1 //===- LegalizeVectorOps.cpp - Implement SelectionDAG::LegalizeVectors ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the SelectionDAG::LegalizeVectors method.
10 //
11 // The vector legalizer looks for vector operations which might need to be
12 // scalarized and legalizes them. This is a separate step from Legalize because
13 // scalarizing can introduce illegal types. For example, suppose we have an
14 // ISD::SDIV of type v2i64 on x86-32. The type is legal (for example, addition
15 // on a v2i64 is legal), but ISD::SDIV isn't legal, so we have to unroll the
16 // operation, which introduces nodes with the illegal type i64 which must be
17 // expanded. Similarly, suppose we have an ISD::SRA of type v16i8 on PowerPC;
18 // the operation must be unrolled, which introduces nodes with the illegal
19 // type i8 which must be promoted.
20 //
21 // This does not legalize vector manipulations like ISD::BUILD_VECTOR,
22 // or operations that happen to take a vector which are custom-lowered;
23 // the legalization for such operations never produces nodes
24 // with illegal types, so it's okay to put off legalizing them until
25 // SelectionDAG::Legalize runs.
26 //
27 //===----------------------------------------------------------------------===//
28
29 #include "llvm/ADT/APInt.h"
30 #include "llvm/ADT/DenseMap.h"
31 #include "llvm/ADT/SmallVector.h"
32 #include "llvm/CodeGen/ISDOpcodes.h"
33 #include "llvm/CodeGen/MachineMemOperand.h"
34 #include "llvm/CodeGen/SelectionDAG.h"
35 #include "llvm/CodeGen/SelectionDAGNodes.h"
36 #include "llvm/CodeGen/TargetLowering.h"
37 #include "llvm/CodeGen/ValueTypes.h"
38 #include "llvm/IR/DataLayout.h"
39 #include "llvm/Support/Casting.h"
40 #include "llvm/Support/Compiler.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/MachineValueType.h"
44 #include "llvm/Support/MathExtras.h"
45 #include <cassert>
46 #include <cstdint>
47 #include <iterator>
48 #include <utility>
49
50 using namespace llvm;
51
52 #define DEBUG_TYPE "legalizevectorops"
53
54 namespace {
55
56 class VectorLegalizer {
57 SelectionDAG& DAG;
58 const TargetLowering &TLI;
59 bool Changed = false; // Keep track of whether anything changed
60
61 /// For nodes that are of legal width, and that have more than one use, this
62 /// map indicates what regularized operand to use. This allows us to avoid
63 /// legalizing the same thing more than once.
64 SmallDenseMap<SDValue, SDValue, 64> LegalizedNodes;
65
66 /// Adds a node to the translation cache.
AddLegalizedOperand(SDValue From,SDValue To)67 void AddLegalizedOperand(SDValue From, SDValue To) {
68 LegalizedNodes.insert(std::make_pair(From, To));
69 // If someone requests legalization of the new node, return itself.
70 if (From != To)
71 LegalizedNodes.insert(std::make_pair(To, To));
72 }
73
74 /// Legalizes the given node.
75 SDValue LegalizeOp(SDValue Op);
76
77 /// Assuming the node is legal, "legalize" the results.
78 SDValue TranslateLegalizeResults(SDValue Op, SDNode *Result);
79
80 /// Make sure Results are legal and update the translation cache.
81 SDValue RecursivelyLegalizeResults(SDValue Op,
82 MutableArrayRef<SDValue> Results);
83
84 /// Wrapper to interface LowerOperation with a vector of Results.
85 /// Returns false if the target wants to use default expansion. Otherwise
86 /// returns true. If return is true and the Results are empty, then the
87 /// target wants to keep the input node as is.
88 bool LowerOperationWrapper(SDNode *N, SmallVectorImpl<SDValue> &Results);
89
90 /// Implements unrolling a VSETCC.
91 SDValue UnrollVSETCC(SDNode *Node);
92
93 /// Implement expand-based legalization of vector operations.
94 ///
95 /// This is just a high-level routine to dispatch to specific code paths for
96 /// operations to legalize them.
97 void Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results);
98
99 /// Implements expansion for FP_TO_UINT; falls back to UnrollVectorOp if
100 /// FP_TO_SINT isn't legal.
101 void ExpandFP_TO_UINT(SDNode *Node, SmallVectorImpl<SDValue> &Results);
102
103 /// Implements expansion for UINT_TO_FLOAT; falls back to UnrollVectorOp if
104 /// SINT_TO_FLOAT and SHR on vectors isn't legal.
105 void ExpandUINT_TO_FLOAT(SDNode *Node, SmallVectorImpl<SDValue> &Results);
106
107 /// Implement expansion for SIGN_EXTEND_INREG using SRL and SRA.
108 SDValue ExpandSEXTINREG(SDNode *Node);
109
110 /// Implement expansion for ANY_EXTEND_VECTOR_INREG.
111 ///
112 /// Shuffles the low lanes of the operand into place and bitcasts to the proper
113 /// type. The contents of the bits in the extended part of each element are
114 /// undef.
115 SDValue ExpandANY_EXTEND_VECTOR_INREG(SDNode *Node);
116
117 /// Implement expansion for SIGN_EXTEND_VECTOR_INREG.
118 ///
119 /// Shuffles the low lanes of the operand into place, bitcasts to the proper
120 /// type, then shifts left and arithmetic shifts right to introduce a sign
121 /// extension.
122 SDValue ExpandSIGN_EXTEND_VECTOR_INREG(SDNode *Node);
123
124 /// Implement expansion for ZERO_EXTEND_VECTOR_INREG.
125 ///
126 /// Shuffles the low lanes of the operand into place and blends zeros into
127 /// the remaining lanes, finally bitcasting to the proper type.
128 SDValue ExpandZERO_EXTEND_VECTOR_INREG(SDNode *Node);
129
130 /// Expand bswap of vectors into a shuffle if legal.
131 SDValue ExpandBSWAP(SDNode *Node);
132
133 /// Implement vselect in terms of XOR, AND, OR when blend is not
134 /// supported by the target.
135 SDValue ExpandVSELECT(SDNode *Node);
136 SDValue ExpandSELECT(SDNode *Node);
137 std::pair<SDValue, SDValue> ExpandLoad(SDNode *N);
138 SDValue ExpandStore(SDNode *N);
139 SDValue ExpandFNEG(SDNode *Node);
140 void ExpandFSUB(SDNode *Node, SmallVectorImpl<SDValue> &Results);
141 void ExpandBITREVERSE(SDNode *Node, SmallVectorImpl<SDValue> &Results);
142 void ExpandUADDSUBO(SDNode *Node, SmallVectorImpl<SDValue> &Results);
143 void ExpandSADDSUBO(SDNode *Node, SmallVectorImpl<SDValue> &Results);
144 void ExpandMULO(SDNode *Node, SmallVectorImpl<SDValue> &Results);
145 void ExpandFixedPointDiv(SDNode *Node, SmallVectorImpl<SDValue> &Results);
146 SDValue ExpandStrictFPOp(SDNode *Node);
147 void ExpandStrictFPOp(SDNode *Node, SmallVectorImpl<SDValue> &Results);
148 void ExpandREM(SDNode *Node, SmallVectorImpl<SDValue> &Results);
149
150 void UnrollStrictFPOp(SDNode *Node, SmallVectorImpl<SDValue> &Results);
151
152 /// Implements vector promotion.
153 ///
154 /// This is essentially just bitcasting the operands to a different type and
155 /// bitcasting the result back to the original type.
156 void Promote(SDNode *Node, SmallVectorImpl<SDValue> &Results);
157
158 /// Implements [SU]INT_TO_FP vector promotion.
159 ///
160 /// This is a [zs]ext of the input operand to a larger integer type.
161 void PromoteINT_TO_FP(SDNode *Node, SmallVectorImpl<SDValue> &Results);
162
163 /// Implements FP_TO_[SU]INT vector promotion of the result type.
164 ///
165 /// It is promoted to a larger integer type. The result is then
166 /// truncated back to the original type.
167 void PromoteFP_TO_INT(SDNode *Node, SmallVectorImpl<SDValue> &Results);
168
169 public:
VectorLegalizer(SelectionDAG & dag)170 VectorLegalizer(SelectionDAG& dag) :
171 DAG(dag), TLI(dag.getTargetLoweringInfo()) {}
172
173 /// Begin legalizer the vector operations in the DAG.
174 bool Run();
175 };
176
177 } // end anonymous namespace
178
Run()179 bool VectorLegalizer::Run() {
180 // Before we start legalizing vector nodes, check if there are any vectors.
181 bool HasVectors = false;
182 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
183 E = std::prev(DAG.allnodes_end()); I != std::next(E); ++I) {
184 // Check if the values of the nodes contain vectors. We don't need to check
185 // the operands because we are going to check their values at some point.
186 HasVectors = llvm::any_of(I->values(), [](EVT T) { return T.isVector(); });
187
188 // If we found a vector node we can start the legalization.
189 if (HasVectors)
190 break;
191 }
192
193 // If this basic block has no vectors then no need to legalize vectors.
194 if (!HasVectors)
195 return false;
196
197 // The legalize process is inherently a bottom-up recursive process (users
198 // legalize their uses before themselves). Given infinite stack space, we
199 // could just start legalizing on the root and traverse the whole graph. In
200 // practice however, this causes us to run out of stack space on large basic
201 // blocks. To avoid this problem, compute an ordering of the nodes where each
202 // node is only legalized after all of its operands are legalized.
203 DAG.AssignTopologicalOrder();
204 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
205 E = std::prev(DAG.allnodes_end()); I != std::next(E); ++I)
206 LegalizeOp(SDValue(&*I, 0));
207
208 // Finally, it's possible the root changed. Get the new root.
209 SDValue OldRoot = DAG.getRoot();
210 assert(LegalizedNodes.count(OldRoot) && "Root didn't get legalized?");
211 DAG.setRoot(LegalizedNodes[OldRoot]);
212
213 LegalizedNodes.clear();
214
215 // Remove dead nodes now.
216 DAG.RemoveDeadNodes();
217
218 return Changed;
219 }
220
TranslateLegalizeResults(SDValue Op,SDNode * Result)221 SDValue VectorLegalizer::TranslateLegalizeResults(SDValue Op, SDNode *Result) {
222 assert(Op->getNumValues() == Result->getNumValues() &&
223 "Unexpected number of results");
224 // Generic legalization: just pass the operand through.
225 for (unsigned i = 0, e = Op->getNumValues(); i != e; ++i)
226 AddLegalizedOperand(Op.getValue(i), SDValue(Result, i));
227 return SDValue(Result, Op.getResNo());
228 }
229
230 SDValue
RecursivelyLegalizeResults(SDValue Op,MutableArrayRef<SDValue> Results)231 VectorLegalizer::RecursivelyLegalizeResults(SDValue Op,
232 MutableArrayRef<SDValue> Results) {
233 assert(Results.size() == Op->getNumValues() &&
234 "Unexpected number of results");
235 // Make sure that the generated code is itself legal.
236 for (unsigned i = 0, e = Results.size(); i != e; ++i) {
237 Results[i] = LegalizeOp(Results[i]);
238 AddLegalizedOperand(Op.getValue(i), Results[i]);
239 }
240
241 return Results[Op.getResNo()];
242 }
243
LegalizeOp(SDValue Op)244 SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
245 // Note that LegalizeOp may be reentered even from single-use nodes, which
246 // means that we always must cache transformed nodes.
247 DenseMap<SDValue, SDValue>::iterator I = LegalizedNodes.find(Op);
248 if (I != LegalizedNodes.end()) return I->second;
249
250 // Legalize the operands
251 SmallVector<SDValue, 8> Ops;
252 for (const SDValue &Oper : Op->op_values())
253 Ops.push_back(LegalizeOp(Oper));
254
255 SDNode *Node = DAG.UpdateNodeOperands(Op.getNode(), Ops);
256
257 if (Op.getOpcode() == ISD::LOAD) {
258 LoadSDNode *LD = cast<LoadSDNode>(Node);
259 ISD::LoadExtType ExtType = LD->getExtensionType();
260 if (LD->getMemoryVT().isVector() && ExtType != ISD::NON_EXTLOAD) {
261 LLVM_DEBUG(dbgs() << "\nLegalizing extending vector load: ";
262 Node->dump(&DAG));
263 switch (TLI.getLoadExtAction(LD->getExtensionType(), LD->getValueType(0),
264 LD->getMemoryVT())) {
265 default: llvm_unreachable("This action is not supported yet!");
266 case TargetLowering::Legal:
267 return TranslateLegalizeResults(Op, Node);
268 case TargetLowering::Custom: {
269 SmallVector<SDValue, 2> ResultVals;
270 if (LowerOperationWrapper(Node, ResultVals)) {
271 if (ResultVals.empty())
272 return TranslateLegalizeResults(Op, Node);
273
274 Changed = true;
275 return RecursivelyLegalizeResults(Op, ResultVals);
276 }
277 LLVM_FALLTHROUGH;
278 }
279 case TargetLowering::Expand: {
280 Changed = true;
281 std::pair<SDValue, SDValue> Tmp = ExpandLoad(Node);
282 AddLegalizedOperand(Op.getValue(0), Tmp.first);
283 AddLegalizedOperand(Op.getValue(1), Tmp.second);
284 return Op.getResNo() ? Tmp.first : Tmp.second;
285 }
286 }
287 }
288 } else if (Op.getOpcode() == ISD::STORE) {
289 StoreSDNode *ST = cast<StoreSDNode>(Node);
290 EVT StVT = ST->getMemoryVT();
291 MVT ValVT = ST->getValue().getSimpleValueType();
292 if (StVT.isVector() && ST->isTruncatingStore()) {
293 LLVM_DEBUG(dbgs() << "\nLegalizing truncating vector store: ";
294 Node->dump(&DAG));
295 switch (TLI.getTruncStoreAction(ValVT, StVT)) {
296 default: llvm_unreachable("This action is not supported yet!");
297 case TargetLowering::Legal:
298 return TranslateLegalizeResults(Op, Node);
299 case TargetLowering::Custom: {
300 SmallVector<SDValue, 1> ResultVals;
301 if (LowerOperationWrapper(Node, ResultVals)) {
302 if (ResultVals.empty())
303 return TranslateLegalizeResults(Op, Node);
304
305 Changed = true;
306 return RecursivelyLegalizeResults(Op, ResultVals);
307 }
308 LLVM_FALLTHROUGH;
309 }
310 case TargetLowering::Expand: {
311 Changed = true;
312 SDValue Chain = ExpandStore(Node);
313 AddLegalizedOperand(Op, Chain);
314 return Chain;
315 }
316 }
317 }
318 }
319
320 bool HasVectorValueOrOp =
321 llvm::any_of(Node->values(), [](EVT T) { return T.isVector(); }) ||
322 llvm::any_of(Node->op_values(),
323 [](SDValue O) { return O.getValueType().isVector(); });
324 if (!HasVectorValueOrOp)
325 return TranslateLegalizeResults(Op, Node);
326
327 TargetLowering::LegalizeAction Action = TargetLowering::Legal;
328 EVT ValVT;
329 switch (Op.getOpcode()) {
330 default:
331 return TranslateLegalizeResults(Op, Node);
332 case ISD::MERGE_VALUES:
333 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
334 // This operation lies about being legal: when it claims to be legal,
335 // it should actually be expanded.
336 if (Action == TargetLowering::Legal)
337 Action = TargetLowering::Expand;
338 break;
339 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
340 case ISD::STRICT_##DAGN:
341 #include "llvm/IR/ConstrainedOps.def"
342 ValVT = Node->getValueType(0);
343 if (Op.getOpcode() == ISD::STRICT_SINT_TO_FP ||
344 Op.getOpcode() == ISD::STRICT_UINT_TO_FP)
345 ValVT = Node->getOperand(1).getValueType();
346 Action = TLI.getOperationAction(Node->getOpcode(), ValVT);
347 // If we're asked to expand a strict vector floating-point operation,
348 // by default we're going to simply unroll it. That is usually the
349 // best approach, except in the case where the resulting strict (scalar)
350 // operations would themselves use the fallback mutation to non-strict.
351 // In that specific case, just do the fallback on the vector op.
352 if (Action == TargetLowering::Expand && !TLI.isStrictFPEnabled() &&
353 TLI.getStrictFPOperationAction(Node->getOpcode(), ValVT) ==
354 TargetLowering::Legal) {
355 EVT EltVT = ValVT.getVectorElementType();
356 if (TLI.getOperationAction(Node->getOpcode(), EltVT)
357 == TargetLowering::Expand &&
358 TLI.getStrictFPOperationAction(Node->getOpcode(), EltVT)
359 == TargetLowering::Legal)
360 Action = TargetLowering::Legal;
361 }
362 break;
363 case ISD::ADD:
364 case ISD::SUB:
365 case ISD::MUL:
366 case ISD::MULHS:
367 case ISD::MULHU:
368 case ISD::SDIV:
369 case ISD::UDIV:
370 case ISD::SREM:
371 case ISD::UREM:
372 case ISD::SDIVREM:
373 case ISD::UDIVREM:
374 case ISD::FADD:
375 case ISD::FSUB:
376 case ISD::FMUL:
377 case ISD::FDIV:
378 case ISD::FREM:
379 case ISD::AND:
380 case ISD::OR:
381 case ISD::XOR:
382 case ISD::SHL:
383 case ISD::SRA:
384 case ISD::SRL:
385 case ISD::FSHL:
386 case ISD::FSHR:
387 case ISD::ROTL:
388 case ISD::ROTR:
389 case ISD::ABS:
390 case ISD::BSWAP:
391 case ISD::BITREVERSE:
392 case ISD::CTLZ:
393 case ISD::CTTZ:
394 case ISD::CTLZ_ZERO_UNDEF:
395 case ISD::CTTZ_ZERO_UNDEF:
396 case ISD::CTPOP:
397 case ISD::SELECT:
398 case ISD::VSELECT:
399 case ISD::SELECT_CC:
400 case ISD::SETCC:
401 case ISD::ZERO_EXTEND:
402 case ISD::ANY_EXTEND:
403 case ISD::TRUNCATE:
404 case ISD::SIGN_EXTEND:
405 case ISD::FP_TO_SINT:
406 case ISD::FP_TO_UINT:
407 case ISD::FNEG:
408 case ISD::FABS:
409 case ISD::FMINNUM:
410 case ISD::FMAXNUM:
411 case ISD::FMINNUM_IEEE:
412 case ISD::FMAXNUM_IEEE:
413 case ISD::FMINIMUM:
414 case ISD::FMAXIMUM:
415 case ISD::FCOPYSIGN:
416 case ISD::FSQRT:
417 case ISD::FSIN:
418 case ISD::FCOS:
419 case ISD::FPOWI:
420 case ISD::FPOW:
421 case ISD::FLOG:
422 case ISD::FLOG2:
423 case ISD::FLOG10:
424 case ISD::FEXP:
425 case ISD::FEXP2:
426 case ISD::FCEIL:
427 case ISD::FTRUNC:
428 case ISD::FRINT:
429 case ISD::FNEARBYINT:
430 case ISD::FROUND:
431 case ISD::FROUNDEVEN:
432 case ISD::FFLOOR:
433 case ISD::FP_ROUND:
434 case ISD::FP_EXTEND:
435 case ISD::FMA:
436 case ISD::SIGN_EXTEND_INREG:
437 case ISD::ANY_EXTEND_VECTOR_INREG:
438 case ISD::SIGN_EXTEND_VECTOR_INREG:
439 case ISD::ZERO_EXTEND_VECTOR_INREG:
440 case ISD::SMIN:
441 case ISD::SMAX:
442 case ISD::UMIN:
443 case ISD::UMAX:
444 case ISD::SMUL_LOHI:
445 case ISD::UMUL_LOHI:
446 case ISD::SADDO:
447 case ISD::UADDO:
448 case ISD::SSUBO:
449 case ISD::USUBO:
450 case ISD::SMULO:
451 case ISD::UMULO:
452 case ISD::FCANONICALIZE:
453 case ISD::SADDSAT:
454 case ISD::UADDSAT:
455 case ISD::SSUBSAT:
456 case ISD::USUBSAT:
457 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
458 break;
459 case ISD::SMULFIX:
460 case ISD::SMULFIXSAT:
461 case ISD::UMULFIX:
462 case ISD::UMULFIXSAT:
463 case ISD::SDIVFIX:
464 case ISD::SDIVFIXSAT:
465 case ISD::UDIVFIX:
466 case ISD::UDIVFIXSAT: {
467 unsigned Scale = Node->getConstantOperandVal(2);
468 Action = TLI.getFixedPointOperationAction(Node->getOpcode(),
469 Node->getValueType(0), Scale);
470 break;
471 }
472 case ISD::SINT_TO_FP:
473 case ISD::UINT_TO_FP:
474 case ISD::VECREDUCE_ADD:
475 case ISD::VECREDUCE_MUL:
476 case ISD::VECREDUCE_AND:
477 case ISD::VECREDUCE_OR:
478 case ISD::VECREDUCE_XOR:
479 case ISD::VECREDUCE_SMAX:
480 case ISD::VECREDUCE_SMIN:
481 case ISD::VECREDUCE_UMAX:
482 case ISD::VECREDUCE_UMIN:
483 case ISD::VECREDUCE_FADD:
484 case ISD::VECREDUCE_FMUL:
485 case ISD::VECREDUCE_FMAX:
486 case ISD::VECREDUCE_FMIN:
487 Action = TLI.getOperationAction(Node->getOpcode(),
488 Node->getOperand(0).getValueType());
489 break;
490 }
491
492 LLVM_DEBUG(dbgs() << "\nLegalizing vector op: "; Node->dump(&DAG));
493
494 SmallVector<SDValue, 8> ResultVals;
495 switch (Action) {
496 default: llvm_unreachable("This action is not supported yet!");
497 case TargetLowering::Promote:
498 LLVM_DEBUG(dbgs() << "Promoting\n");
499 Promote(Node, ResultVals);
500 assert(!ResultVals.empty() && "No results for promotion?");
501 break;
502 case TargetLowering::Legal:
503 LLVM_DEBUG(dbgs() << "Legal node: nothing to do\n");
504 break;
505 case TargetLowering::Custom:
506 LLVM_DEBUG(dbgs() << "Trying custom legalization\n");
507 if (LowerOperationWrapper(Node, ResultVals))
508 break;
509 LLVM_DEBUG(dbgs() << "Could not custom legalize node\n");
510 LLVM_FALLTHROUGH;
511 case TargetLowering::Expand:
512 LLVM_DEBUG(dbgs() << "Expanding\n");
513 Expand(Node, ResultVals);
514 break;
515 }
516
517 if (ResultVals.empty())
518 return TranslateLegalizeResults(Op, Node);
519
520 Changed = true;
521 return RecursivelyLegalizeResults(Op, ResultVals);
522 }
523
524 // FIME: This is very similar to the X86 override of
525 // TargetLowering::LowerOperationWrapper. Can we merge them somehow?
LowerOperationWrapper(SDNode * Node,SmallVectorImpl<SDValue> & Results)526 bool VectorLegalizer::LowerOperationWrapper(SDNode *Node,
527 SmallVectorImpl<SDValue> &Results) {
528 SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
529
530 if (!Res.getNode())
531 return false;
532
533 if (Res == SDValue(Node, 0))
534 return true;
535
536 // If the original node has one result, take the return value from
537 // LowerOperation as is. It might not be result number 0.
538 if (Node->getNumValues() == 1) {
539 Results.push_back(Res);
540 return true;
541 }
542
543 // If the original node has multiple results, then the return node should
544 // have the same number of results.
545 assert((Node->getNumValues() == Res->getNumValues()) &&
546 "Lowering returned the wrong number of results!");
547
548 // Places new result values base on N result number.
549 for (unsigned I = 0, E = Node->getNumValues(); I != E; ++I)
550 Results.push_back(Res.getValue(I));
551
552 return true;
553 }
554
Promote(SDNode * Node,SmallVectorImpl<SDValue> & Results)555 void VectorLegalizer::Promote(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
556 // For a few operations there is a specific concept for promotion based on
557 // the operand's type.
558 switch (Node->getOpcode()) {
559 case ISD::SINT_TO_FP:
560 case ISD::UINT_TO_FP:
561 case ISD::STRICT_SINT_TO_FP:
562 case ISD::STRICT_UINT_TO_FP:
563 // "Promote" the operation by extending the operand.
564 PromoteINT_TO_FP(Node, Results);
565 return;
566 case ISD::FP_TO_UINT:
567 case ISD::FP_TO_SINT:
568 case ISD::STRICT_FP_TO_UINT:
569 case ISD::STRICT_FP_TO_SINT:
570 // Promote the operation by extending the operand.
571 PromoteFP_TO_INT(Node, Results);
572 return;
573 case ISD::FP_ROUND:
574 case ISD::FP_EXTEND:
575 // These operations are used to do promotion so they can't be promoted
576 // themselves.
577 llvm_unreachable("Don't know how to promote this operation!");
578 }
579
580 // There are currently two cases of vector promotion:
581 // 1) Bitcasting a vector of integers to a different type to a vector of the
582 // same overall length. For example, x86 promotes ISD::AND v2i32 to v1i64.
583 // 2) Extending a vector of floats to a vector of the same number of larger
584 // floats. For example, AArch64 promotes ISD::FADD on v4f16 to v4f32.
585 assert(Node->getNumValues() == 1 &&
586 "Can't promote a vector with multiple results!");
587 MVT VT = Node->getSimpleValueType(0);
588 MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT);
589 SDLoc dl(Node);
590 SmallVector<SDValue, 4> Operands(Node->getNumOperands());
591
592 for (unsigned j = 0; j != Node->getNumOperands(); ++j) {
593 if (Node->getOperand(j).getValueType().isVector())
594 if (Node->getOperand(j)
595 .getValueType()
596 .getVectorElementType()
597 .isFloatingPoint() &&
598 NVT.isVector() && NVT.getVectorElementType().isFloatingPoint())
599 Operands[j] = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(j));
600 else
601 Operands[j] = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(j));
602 else
603 Operands[j] = Node->getOperand(j);
604 }
605
606 SDValue Res =
607 DAG.getNode(Node->getOpcode(), dl, NVT, Operands, Node->getFlags());
608
609 if ((VT.isFloatingPoint() && NVT.isFloatingPoint()) ||
610 (VT.isVector() && VT.getVectorElementType().isFloatingPoint() &&
611 NVT.isVector() && NVT.getVectorElementType().isFloatingPoint()))
612 Res = DAG.getNode(ISD::FP_ROUND, dl, VT, Res, DAG.getIntPtrConstant(0, dl));
613 else
614 Res = DAG.getNode(ISD::BITCAST, dl, VT, Res);
615
616 Results.push_back(Res);
617 }
618
PromoteINT_TO_FP(SDNode * Node,SmallVectorImpl<SDValue> & Results)619 void VectorLegalizer::PromoteINT_TO_FP(SDNode *Node,
620 SmallVectorImpl<SDValue> &Results) {
621 // INT_TO_FP operations may require the input operand be promoted even
622 // when the type is otherwise legal.
623 bool IsStrict = Node->isStrictFPOpcode();
624 MVT VT = Node->getOperand(IsStrict ? 1 : 0).getSimpleValueType();
625 MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT);
626 assert(NVT.getVectorNumElements() == VT.getVectorNumElements() &&
627 "Vectors have different number of elements!");
628
629 SDLoc dl(Node);
630 SmallVector<SDValue, 4> Operands(Node->getNumOperands());
631
632 unsigned Opc = (Node->getOpcode() == ISD::UINT_TO_FP ||
633 Node->getOpcode() == ISD::STRICT_UINT_TO_FP)
634 ? ISD::ZERO_EXTEND
635 : ISD::SIGN_EXTEND;
636 for (unsigned j = 0; j != Node->getNumOperands(); ++j) {
637 if (Node->getOperand(j).getValueType().isVector())
638 Operands[j] = DAG.getNode(Opc, dl, NVT, Node->getOperand(j));
639 else
640 Operands[j] = Node->getOperand(j);
641 }
642
643 if (IsStrict) {
644 SDValue Res = DAG.getNode(Node->getOpcode(), dl,
645 {Node->getValueType(0), MVT::Other}, Operands);
646 Results.push_back(Res);
647 Results.push_back(Res.getValue(1));
648 return;
649 }
650
651 SDValue Res =
652 DAG.getNode(Node->getOpcode(), dl, Node->getValueType(0), Operands);
653 Results.push_back(Res);
654 }
655
656 // For FP_TO_INT we promote the result type to a vector type with wider
657 // elements and then truncate the result. This is different from the default
658 // PromoteVector which uses bitcast to promote thus assumning that the
659 // promoted vector type has the same overall size.
PromoteFP_TO_INT(SDNode * Node,SmallVectorImpl<SDValue> & Results)660 void VectorLegalizer::PromoteFP_TO_INT(SDNode *Node,
661 SmallVectorImpl<SDValue> &Results) {
662 MVT VT = Node->getSimpleValueType(0);
663 MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT);
664 bool IsStrict = Node->isStrictFPOpcode();
665 assert(NVT.getVectorNumElements() == VT.getVectorNumElements() &&
666 "Vectors have different number of elements!");
667
668 unsigned NewOpc = Node->getOpcode();
669 // Change FP_TO_UINT to FP_TO_SINT if possible.
670 // TODO: Should we only do this if FP_TO_UINT itself isn't legal?
671 if (NewOpc == ISD::FP_TO_UINT &&
672 TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NVT))
673 NewOpc = ISD::FP_TO_SINT;
674
675 if (NewOpc == ISD::STRICT_FP_TO_UINT &&
676 TLI.isOperationLegalOrCustom(ISD::STRICT_FP_TO_SINT, NVT))
677 NewOpc = ISD::STRICT_FP_TO_SINT;
678
679 SDLoc dl(Node);
680 SDValue Promoted, Chain;
681 if (IsStrict) {
682 Promoted = DAG.getNode(NewOpc, dl, {NVT, MVT::Other},
683 {Node->getOperand(0), Node->getOperand(1)});
684 Chain = Promoted.getValue(1);
685 } else
686 Promoted = DAG.getNode(NewOpc, dl, NVT, Node->getOperand(0));
687
688 // Assert that the converted value fits in the original type. If it doesn't
689 // (eg: because the value being converted is too big), then the result of the
690 // original operation was undefined anyway, so the assert is still correct.
691 if (Node->getOpcode() == ISD::FP_TO_UINT ||
692 Node->getOpcode() == ISD::STRICT_FP_TO_UINT)
693 NewOpc = ISD::AssertZext;
694 else
695 NewOpc = ISD::AssertSext;
696
697 Promoted = DAG.getNode(NewOpc, dl, NVT, Promoted,
698 DAG.getValueType(VT.getScalarType()));
699 Promoted = DAG.getNode(ISD::TRUNCATE, dl, VT, Promoted);
700 Results.push_back(Promoted);
701 if (IsStrict)
702 Results.push_back(Chain);
703 }
704
ExpandLoad(SDNode * N)705 std::pair<SDValue, SDValue> VectorLegalizer::ExpandLoad(SDNode *N) {
706 LoadSDNode *LD = cast<LoadSDNode>(N);
707 return TLI.scalarizeVectorLoad(LD, DAG);
708 }
709
ExpandStore(SDNode * N)710 SDValue VectorLegalizer::ExpandStore(SDNode *N) {
711 StoreSDNode *ST = cast<StoreSDNode>(N);
712 SDValue TF = TLI.scalarizeVectorStore(ST, DAG);
713 return TF;
714 }
715
Expand(SDNode * Node,SmallVectorImpl<SDValue> & Results)716 void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
717 SDValue Tmp;
718 switch (Node->getOpcode()) {
719 case ISD::MERGE_VALUES:
720 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i)
721 Results.push_back(Node->getOperand(i));
722 return;
723 case ISD::SIGN_EXTEND_INREG:
724 Results.push_back(ExpandSEXTINREG(Node));
725 return;
726 case ISD::ANY_EXTEND_VECTOR_INREG:
727 Results.push_back(ExpandANY_EXTEND_VECTOR_INREG(Node));
728 return;
729 case ISD::SIGN_EXTEND_VECTOR_INREG:
730 Results.push_back(ExpandSIGN_EXTEND_VECTOR_INREG(Node));
731 return;
732 case ISD::ZERO_EXTEND_VECTOR_INREG:
733 Results.push_back(ExpandZERO_EXTEND_VECTOR_INREG(Node));
734 return;
735 case ISD::BSWAP:
736 Results.push_back(ExpandBSWAP(Node));
737 return;
738 case ISD::VSELECT:
739 Results.push_back(ExpandVSELECT(Node));
740 return;
741 case ISD::SELECT:
742 Results.push_back(ExpandSELECT(Node));
743 return;
744 case ISD::FP_TO_UINT:
745 ExpandFP_TO_UINT(Node, Results);
746 return;
747 case ISD::UINT_TO_FP:
748 ExpandUINT_TO_FLOAT(Node, Results);
749 return;
750 case ISD::FNEG:
751 Results.push_back(ExpandFNEG(Node));
752 return;
753 case ISD::FSUB:
754 ExpandFSUB(Node, Results);
755 return;
756 case ISD::SETCC:
757 Results.push_back(UnrollVSETCC(Node));
758 return;
759 case ISD::ABS:
760 if (TLI.expandABS(Node, Tmp, DAG)) {
761 Results.push_back(Tmp);
762 return;
763 }
764 break;
765 case ISD::BITREVERSE:
766 ExpandBITREVERSE(Node, Results);
767 return;
768 case ISD::CTPOP:
769 if (TLI.expandCTPOP(Node, Tmp, DAG)) {
770 Results.push_back(Tmp);
771 return;
772 }
773 break;
774 case ISD::CTLZ:
775 case ISD::CTLZ_ZERO_UNDEF:
776 if (TLI.expandCTLZ(Node, Tmp, DAG)) {
777 Results.push_back(Tmp);
778 return;
779 }
780 break;
781 case ISD::CTTZ:
782 case ISD::CTTZ_ZERO_UNDEF:
783 if (TLI.expandCTTZ(Node, Tmp, DAG)) {
784 Results.push_back(Tmp);
785 return;
786 }
787 break;
788 case ISD::FSHL:
789 case ISD::FSHR:
790 if (TLI.expandFunnelShift(Node, Tmp, DAG)) {
791 Results.push_back(Tmp);
792 return;
793 }
794 break;
795 case ISD::ROTL:
796 case ISD::ROTR:
797 if (TLI.expandROT(Node, Tmp, DAG)) {
798 Results.push_back(Tmp);
799 return;
800 }
801 break;
802 case ISD::FMINNUM:
803 case ISD::FMAXNUM:
804 if (SDValue Expanded = TLI.expandFMINNUM_FMAXNUM(Node, DAG)) {
805 Results.push_back(Expanded);
806 return;
807 }
808 break;
809 case ISD::UADDO:
810 case ISD::USUBO:
811 ExpandUADDSUBO(Node, Results);
812 return;
813 case ISD::SADDO:
814 case ISD::SSUBO:
815 ExpandSADDSUBO(Node, Results);
816 return;
817 case ISD::UMULO:
818 case ISD::SMULO:
819 ExpandMULO(Node, Results);
820 return;
821 case ISD::USUBSAT:
822 case ISD::SSUBSAT:
823 case ISD::UADDSAT:
824 case ISD::SADDSAT:
825 if (SDValue Expanded = TLI.expandAddSubSat(Node, DAG)) {
826 Results.push_back(Expanded);
827 return;
828 }
829 break;
830 case ISD::SMULFIX:
831 case ISD::UMULFIX:
832 if (SDValue Expanded = TLI.expandFixedPointMul(Node, DAG)) {
833 Results.push_back(Expanded);
834 return;
835 }
836 break;
837 case ISD::SMULFIXSAT:
838 case ISD::UMULFIXSAT:
839 // FIXME: We do not expand SMULFIXSAT/UMULFIXSAT here yet, not sure exactly
840 // why. Maybe it results in worse codegen compared to the unroll for some
841 // targets? This should probably be investigated. And if we still prefer to
842 // unroll an explanation could be helpful.
843 break;
844 case ISD::SDIVFIX:
845 case ISD::UDIVFIX:
846 ExpandFixedPointDiv(Node, Results);
847 return;
848 case ISD::SDIVFIXSAT:
849 case ISD::UDIVFIXSAT:
850 break;
851 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
852 case ISD::STRICT_##DAGN:
853 #include "llvm/IR/ConstrainedOps.def"
854 ExpandStrictFPOp(Node, Results);
855 return;
856 case ISD::VECREDUCE_ADD:
857 case ISD::VECREDUCE_MUL:
858 case ISD::VECREDUCE_AND:
859 case ISD::VECREDUCE_OR:
860 case ISD::VECREDUCE_XOR:
861 case ISD::VECREDUCE_SMAX:
862 case ISD::VECREDUCE_SMIN:
863 case ISD::VECREDUCE_UMAX:
864 case ISD::VECREDUCE_UMIN:
865 case ISD::VECREDUCE_FADD:
866 case ISD::VECREDUCE_FMUL:
867 case ISD::VECREDUCE_FMAX:
868 case ISD::VECREDUCE_FMIN:
869 Results.push_back(TLI.expandVecReduce(Node, DAG));
870 return;
871 case ISD::SREM:
872 case ISD::UREM:
873 ExpandREM(Node, Results);
874 return;
875 }
876
877 Results.push_back(DAG.UnrollVectorOp(Node));
878 }
879
ExpandSELECT(SDNode * Node)880 SDValue VectorLegalizer::ExpandSELECT(SDNode *Node) {
881 // Lower a select instruction where the condition is a scalar and the
882 // operands are vectors. Lower this select to VSELECT and implement it
883 // using XOR AND OR. The selector bit is broadcasted.
884 EVT VT = Node->getValueType(0);
885 SDLoc DL(Node);
886
887 SDValue Mask = Node->getOperand(0);
888 SDValue Op1 = Node->getOperand(1);
889 SDValue Op2 = Node->getOperand(2);
890
891 assert(VT.isVector() && !Mask.getValueType().isVector()
892 && Op1.getValueType() == Op2.getValueType() && "Invalid type");
893
894 // If we can't even use the basic vector operations of
895 // AND,OR,XOR, we will have to scalarize the op.
896 // Notice that the operation may be 'promoted' which means that it is
897 // 'bitcasted' to another type which is handled.
898 // Also, we need to be able to construct a splat vector using BUILD_VECTOR.
899 if (TLI.getOperationAction(ISD::AND, VT) == TargetLowering::Expand ||
900 TLI.getOperationAction(ISD::XOR, VT) == TargetLowering::Expand ||
901 TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand ||
902 TLI.getOperationAction(ISD::BUILD_VECTOR, VT) == TargetLowering::Expand)
903 return DAG.UnrollVectorOp(Node);
904
905 // Generate a mask operand.
906 EVT MaskTy = VT.changeVectorElementTypeToInteger();
907
908 // What is the size of each element in the vector mask.
909 EVT BitTy = MaskTy.getScalarType();
910
911 Mask = DAG.getSelect(DL, BitTy, Mask,
912 DAG.getConstant(APInt::getAllOnesValue(BitTy.getSizeInBits()), DL,
913 BitTy),
914 DAG.getConstant(0, DL, BitTy));
915
916 // Broadcast the mask so that the entire vector is all-one or all zero.
917 Mask = DAG.getSplatBuildVector(MaskTy, DL, Mask);
918
919 // Bitcast the operands to be the same type as the mask.
920 // This is needed when we select between FP types because
921 // the mask is a vector of integers.
922 Op1 = DAG.getNode(ISD::BITCAST, DL, MaskTy, Op1);
923 Op2 = DAG.getNode(ISD::BITCAST, DL, MaskTy, Op2);
924
925 SDValue AllOnes = DAG.getConstant(
926 APInt::getAllOnesValue(BitTy.getSizeInBits()), DL, MaskTy);
927 SDValue NotMask = DAG.getNode(ISD::XOR, DL, MaskTy, Mask, AllOnes);
928
929 Op1 = DAG.getNode(ISD::AND, DL, MaskTy, Op1, Mask);
930 Op2 = DAG.getNode(ISD::AND, DL, MaskTy, Op2, NotMask);
931 SDValue Val = DAG.getNode(ISD::OR, DL, MaskTy, Op1, Op2);
932 return DAG.getNode(ISD::BITCAST, DL, Node->getValueType(0), Val);
933 }
934
ExpandSEXTINREG(SDNode * Node)935 SDValue VectorLegalizer::ExpandSEXTINREG(SDNode *Node) {
936 EVT VT = Node->getValueType(0);
937
938 // Make sure that the SRA and SHL instructions are available.
939 if (TLI.getOperationAction(ISD::SRA, VT) == TargetLowering::Expand ||
940 TLI.getOperationAction(ISD::SHL, VT) == TargetLowering::Expand)
941 return DAG.UnrollVectorOp(Node);
942
943 SDLoc DL(Node);
944 EVT OrigTy = cast<VTSDNode>(Node->getOperand(1))->getVT();
945
946 unsigned BW = VT.getScalarSizeInBits();
947 unsigned OrigBW = OrigTy.getScalarSizeInBits();
948 SDValue ShiftSz = DAG.getConstant(BW - OrigBW, DL, VT);
949
950 SDValue Op = DAG.getNode(ISD::SHL, DL, VT, Node->getOperand(0), ShiftSz);
951 return DAG.getNode(ISD::SRA, DL, VT, Op, ShiftSz);
952 }
953
954 // Generically expand a vector anyext in register to a shuffle of the relevant
955 // lanes into the appropriate locations, with other lanes left undef.
ExpandANY_EXTEND_VECTOR_INREG(SDNode * Node)956 SDValue VectorLegalizer::ExpandANY_EXTEND_VECTOR_INREG(SDNode *Node) {
957 SDLoc DL(Node);
958 EVT VT = Node->getValueType(0);
959 int NumElements = VT.getVectorNumElements();
960 SDValue Src = Node->getOperand(0);
961 EVT SrcVT = Src.getValueType();
962 int NumSrcElements = SrcVT.getVectorNumElements();
963
964 // *_EXTEND_VECTOR_INREG SrcVT can be smaller than VT - so insert the vector
965 // into a larger vector type.
966 if (SrcVT.bitsLE(VT)) {
967 assert((VT.getSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
968 "ANY_EXTEND_VECTOR_INREG vector size mismatch");
969 NumSrcElements = VT.getSizeInBits() / SrcVT.getScalarSizeInBits();
970 SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcVT.getScalarType(),
971 NumSrcElements);
972 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SrcVT, DAG.getUNDEF(SrcVT),
973 Src, DAG.getVectorIdxConstant(0, DL));
974 }
975
976 // Build a base mask of undef shuffles.
977 SmallVector<int, 16> ShuffleMask;
978 ShuffleMask.resize(NumSrcElements, -1);
979
980 // Place the extended lanes into the correct locations.
981 int ExtLaneScale = NumSrcElements / NumElements;
982 int EndianOffset = DAG.getDataLayout().isBigEndian() ? ExtLaneScale - 1 : 0;
983 for (int i = 0; i < NumElements; ++i)
984 ShuffleMask[i * ExtLaneScale + EndianOffset] = i;
985
986 return DAG.getNode(
987 ISD::BITCAST, DL, VT,
988 DAG.getVectorShuffle(SrcVT, DL, Src, DAG.getUNDEF(SrcVT), ShuffleMask));
989 }
990
ExpandSIGN_EXTEND_VECTOR_INREG(SDNode * Node)991 SDValue VectorLegalizer::ExpandSIGN_EXTEND_VECTOR_INREG(SDNode *Node) {
992 SDLoc DL(Node);
993 EVT VT = Node->getValueType(0);
994 SDValue Src = Node->getOperand(0);
995 EVT SrcVT = Src.getValueType();
996
997 // First build an any-extend node which can be legalized above when we
998 // recurse through it.
999 SDValue Op = DAG.getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, Src);
1000
1001 // Now we need sign extend. Do this by shifting the elements. Even if these
1002 // aren't legal operations, they have a better chance of being legalized
1003 // without full scalarization than the sign extension does.
1004 unsigned EltWidth = VT.getScalarSizeInBits();
1005 unsigned SrcEltWidth = SrcVT.getScalarSizeInBits();
1006 SDValue ShiftAmount = DAG.getConstant(EltWidth - SrcEltWidth, DL, VT);
1007 return DAG.getNode(ISD::SRA, DL, VT,
1008 DAG.getNode(ISD::SHL, DL, VT, Op, ShiftAmount),
1009 ShiftAmount);
1010 }
1011
1012 // Generically expand a vector zext in register to a shuffle of the relevant
1013 // lanes into the appropriate locations, a blend of zero into the high bits,
1014 // and a bitcast to the wider element type.
ExpandZERO_EXTEND_VECTOR_INREG(SDNode * Node)1015 SDValue VectorLegalizer::ExpandZERO_EXTEND_VECTOR_INREG(SDNode *Node) {
1016 SDLoc DL(Node);
1017 EVT VT = Node->getValueType(0);
1018 int NumElements = VT.getVectorNumElements();
1019 SDValue Src = Node->getOperand(0);
1020 EVT SrcVT = Src.getValueType();
1021 int NumSrcElements = SrcVT.getVectorNumElements();
1022
1023 // *_EXTEND_VECTOR_INREG SrcVT can be smaller than VT - so insert the vector
1024 // into a larger vector type.
1025 if (SrcVT.bitsLE(VT)) {
1026 assert((VT.getSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
1027 "ZERO_EXTEND_VECTOR_INREG vector size mismatch");
1028 NumSrcElements = VT.getSizeInBits() / SrcVT.getScalarSizeInBits();
1029 SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcVT.getScalarType(),
1030 NumSrcElements);
1031 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SrcVT, DAG.getUNDEF(SrcVT),
1032 Src, DAG.getVectorIdxConstant(0, DL));
1033 }
1034
1035 // Build up a zero vector to blend into this one.
1036 SDValue Zero = DAG.getConstant(0, DL, SrcVT);
1037
1038 // Shuffle the incoming lanes into the correct position, and pull all other
1039 // lanes from the zero vector.
1040 SmallVector<int, 16> ShuffleMask;
1041 ShuffleMask.reserve(NumSrcElements);
1042 for (int i = 0; i < NumSrcElements; ++i)
1043 ShuffleMask.push_back(i);
1044
1045 int ExtLaneScale = NumSrcElements / NumElements;
1046 int EndianOffset = DAG.getDataLayout().isBigEndian() ? ExtLaneScale - 1 : 0;
1047 for (int i = 0; i < NumElements; ++i)
1048 ShuffleMask[i * ExtLaneScale + EndianOffset] = NumSrcElements + i;
1049
1050 return DAG.getNode(ISD::BITCAST, DL, VT,
1051 DAG.getVectorShuffle(SrcVT, DL, Zero, Src, ShuffleMask));
1052 }
1053
createBSWAPShuffleMask(EVT VT,SmallVectorImpl<int> & ShuffleMask)1054 static void createBSWAPShuffleMask(EVT VT, SmallVectorImpl<int> &ShuffleMask) {
1055 int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
1056 for (int I = 0, E = VT.getVectorNumElements(); I != E; ++I)
1057 for (int J = ScalarSizeInBytes - 1; J >= 0; --J)
1058 ShuffleMask.push_back((I * ScalarSizeInBytes) + J);
1059 }
1060
ExpandBSWAP(SDNode * Node)1061 SDValue VectorLegalizer::ExpandBSWAP(SDNode *Node) {
1062 EVT VT = Node->getValueType(0);
1063
1064 // Generate a byte wise shuffle mask for the BSWAP.
1065 SmallVector<int, 16> ShuffleMask;
1066 createBSWAPShuffleMask(VT, ShuffleMask);
1067 EVT ByteVT = EVT::getVectorVT(*DAG.getContext(), MVT::i8, ShuffleMask.size());
1068
1069 // Only emit a shuffle if the mask is legal.
1070 if (!TLI.isShuffleMaskLegal(ShuffleMask, ByteVT))
1071 return DAG.UnrollVectorOp(Node);
1072
1073 SDLoc DL(Node);
1074 SDValue Op = DAG.getNode(ISD::BITCAST, DL, ByteVT, Node->getOperand(0));
1075 Op = DAG.getVectorShuffle(ByteVT, DL, Op, DAG.getUNDEF(ByteVT), ShuffleMask);
1076 return DAG.getNode(ISD::BITCAST, DL, VT, Op);
1077 }
1078
ExpandBITREVERSE(SDNode * Node,SmallVectorImpl<SDValue> & Results)1079 void VectorLegalizer::ExpandBITREVERSE(SDNode *Node,
1080 SmallVectorImpl<SDValue> &Results) {
1081 EVT VT = Node->getValueType(0);
1082
1083 // If we have the scalar operation, it's probably cheaper to unroll it.
1084 if (TLI.isOperationLegalOrCustom(ISD::BITREVERSE, VT.getScalarType())) {
1085 SDValue Tmp = DAG.UnrollVectorOp(Node);
1086 Results.push_back(Tmp);
1087 return;
1088 }
1089
1090 // If the vector element width is a whole number of bytes, test if its legal
1091 // to BSWAP shuffle the bytes and then perform the BITREVERSE on the byte
1092 // vector. This greatly reduces the number of bit shifts necessary.
1093 unsigned ScalarSizeInBits = VT.getScalarSizeInBits();
1094 if (ScalarSizeInBits > 8 && (ScalarSizeInBits % 8) == 0) {
1095 SmallVector<int, 16> BSWAPMask;
1096 createBSWAPShuffleMask(VT, BSWAPMask);
1097
1098 EVT ByteVT = EVT::getVectorVT(*DAG.getContext(), MVT::i8, BSWAPMask.size());
1099 if (TLI.isShuffleMaskLegal(BSWAPMask, ByteVT) &&
1100 (TLI.isOperationLegalOrCustom(ISD::BITREVERSE, ByteVT) ||
1101 (TLI.isOperationLegalOrCustom(ISD::SHL, ByteVT) &&
1102 TLI.isOperationLegalOrCustom(ISD::SRL, ByteVT) &&
1103 TLI.isOperationLegalOrCustomOrPromote(ISD::AND, ByteVT) &&
1104 TLI.isOperationLegalOrCustomOrPromote(ISD::OR, ByteVT)))) {
1105 SDLoc DL(Node);
1106 SDValue Op = DAG.getNode(ISD::BITCAST, DL, ByteVT, Node->getOperand(0));
1107 Op = DAG.getVectorShuffle(ByteVT, DL, Op, DAG.getUNDEF(ByteVT),
1108 BSWAPMask);
1109 Op = DAG.getNode(ISD::BITREVERSE, DL, ByteVT, Op);
1110 Op = DAG.getNode(ISD::BITCAST, DL, VT, Op);
1111 Results.push_back(Op);
1112 return;
1113 }
1114 }
1115
1116 // If we have the appropriate vector bit operations, it is better to use them
1117 // than unrolling and expanding each component.
1118 if (TLI.isOperationLegalOrCustom(ISD::SHL, VT) &&
1119 TLI.isOperationLegalOrCustom(ISD::SRL, VT) &&
1120 TLI.isOperationLegalOrCustomOrPromote(ISD::AND, VT) &&
1121 TLI.isOperationLegalOrCustomOrPromote(ISD::OR, VT))
1122 // Let LegalizeDAG handle this later.
1123 return;
1124
1125 // Otherwise unroll.
1126 SDValue Tmp = DAG.UnrollVectorOp(Node);
1127 Results.push_back(Tmp);
1128 }
1129
ExpandVSELECT(SDNode * Node)1130 SDValue VectorLegalizer::ExpandVSELECT(SDNode *Node) {
1131 // Implement VSELECT in terms of XOR, AND, OR
1132 // on platforms which do not support blend natively.
1133 SDLoc DL(Node);
1134
1135 SDValue Mask = Node->getOperand(0);
1136 SDValue Op1 = Node->getOperand(1);
1137 SDValue Op2 = Node->getOperand(2);
1138
1139 EVT VT = Mask.getValueType();
1140
1141 // If we can't even use the basic vector operations of
1142 // AND,OR,XOR, we will have to scalarize the op.
1143 // Notice that the operation may be 'promoted' which means that it is
1144 // 'bitcasted' to another type which is handled.
1145 // This operation also isn't safe with AND, OR, XOR when the boolean
1146 // type is 0/1 as we need an all ones vector constant to mask with.
1147 // FIXME: Sign extend 1 to all ones if thats legal on the target.
1148 if (TLI.getOperationAction(ISD::AND, VT) == TargetLowering::Expand ||
1149 TLI.getOperationAction(ISD::XOR, VT) == TargetLowering::Expand ||
1150 TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand ||
1151 TLI.getBooleanContents(Op1.getValueType()) !=
1152 TargetLowering::ZeroOrNegativeOneBooleanContent)
1153 return DAG.UnrollVectorOp(Node);
1154
1155 // If the mask and the type are different sizes, unroll the vector op. This
1156 // can occur when getSetCCResultType returns something that is different in
1157 // size from the operand types. For example, v4i8 = select v4i32, v4i8, v4i8.
1158 if (VT.getSizeInBits() != Op1.getValueSizeInBits())
1159 return DAG.UnrollVectorOp(Node);
1160
1161 // Bitcast the operands to be the same type as the mask.
1162 // This is needed when we select between FP types because
1163 // the mask is a vector of integers.
1164 Op1 = DAG.getNode(ISD::BITCAST, DL, VT, Op1);
1165 Op2 = DAG.getNode(ISD::BITCAST, DL, VT, Op2);
1166
1167 SDValue AllOnes = DAG.getConstant(
1168 APInt::getAllOnesValue(VT.getScalarSizeInBits()), DL, VT);
1169 SDValue NotMask = DAG.getNode(ISD::XOR, DL, VT, Mask, AllOnes);
1170
1171 Op1 = DAG.getNode(ISD::AND, DL, VT, Op1, Mask);
1172 Op2 = DAG.getNode(ISD::AND, DL, VT, Op2, NotMask);
1173 SDValue Val = DAG.getNode(ISD::OR, DL, VT, Op1, Op2);
1174 return DAG.getNode(ISD::BITCAST, DL, Node->getValueType(0), Val);
1175 }
1176
ExpandFP_TO_UINT(SDNode * Node,SmallVectorImpl<SDValue> & Results)1177 void VectorLegalizer::ExpandFP_TO_UINT(SDNode *Node,
1178 SmallVectorImpl<SDValue> &Results) {
1179 // Attempt to expand using TargetLowering.
1180 SDValue Result, Chain;
1181 if (TLI.expandFP_TO_UINT(Node, Result, Chain, DAG)) {
1182 Results.push_back(Result);
1183 if (Node->isStrictFPOpcode())
1184 Results.push_back(Chain);
1185 return;
1186 }
1187
1188 // Otherwise go ahead and unroll.
1189 if (Node->isStrictFPOpcode()) {
1190 UnrollStrictFPOp(Node, Results);
1191 return;
1192 }
1193
1194 Results.push_back(DAG.UnrollVectorOp(Node));
1195 }
1196
ExpandUINT_TO_FLOAT(SDNode * Node,SmallVectorImpl<SDValue> & Results)1197 void VectorLegalizer::ExpandUINT_TO_FLOAT(SDNode *Node,
1198 SmallVectorImpl<SDValue> &Results) {
1199 bool IsStrict = Node->isStrictFPOpcode();
1200 unsigned OpNo = IsStrict ? 1 : 0;
1201 SDValue Src = Node->getOperand(OpNo);
1202 EVT VT = Src.getValueType();
1203 SDLoc DL(Node);
1204
1205 // Attempt to expand using TargetLowering.
1206 SDValue Result;
1207 SDValue Chain;
1208 if (TLI.expandUINT_TO_FP(Node, Result, Chain, DAG)) {
1209 Results.push_back(Result);
1210 if (IsStrict)
1211 Results.push_back(Chain);
1212 return;
1213 }
1214
1215 // Make sure that the SINT_TO_FP and SRL instructions are available.
1216 if (((!IsStrict && TLI.getOperationAction(ISD::SINT_TO_FP, VT) ==
1217 TargetLowering::Expand) ||
1218 (IsStrict && TLI.getOperationAction(ISD::STRICT_SINT_TO_FP, VT) ==
1219 TargetLowering::Expand)) ||
1220 TLI.getOperationAction(ISD::SRL, VT) == TargetLowering::Expand) {
1221 if (IsStrict) {
1222 UnrollStrictFPOp(Node, Results);
1223 return;
1224 }
1225
1226 Results.push_back(DAG.UnrollVectorOp(Node));
1227 return;
1228 }
1229
1230 unsigned BW = VT.getScalarSizeInBits();
1231 assert((BW == 64 || BW == 32) &&
1232 "Elements in vector-UINT_TO_FP must be 32 or 64 bits wide");
1233
1234 SDValue HalfWord = DAG.getConstant(BW / 2, DL, VT);
1235
1236 // Constants to clear the upper part of the word.
1237 // Notice that we can also use SHL+SHR, but using a constant is slightly
1238 // faster on x86.
1239 uint64_t HWMask = (BW == 64) ? 0x00000000FFFFFFFF : 0x0000FFFF;
1240 SDValue HalfWordMask = DAG.getConstant(HWMask, DL, VT);
1241
1242 // Two to the power of half-word-size.
1243 SDValue TWOHW =
1244 DAG.getConstantFP(1ULL << (BW / 2), DL, Node->getValueType(0));
1245
1246 // Clear upper part of LO, lower HI
1247 SDValue HI = DAG.getNode(ISD::SRL, DL, VT, Src, HalfWord);
1248 SDValue LO = DAG.getNode(ISD::AND, DL, VT, Src, HalfWordMask);
1249
1250 if (IsStrict) {
1251 // Convert hi and lo to floats
1252 // Convert the hi part back to the upper values
1253 // TODO: Can any fast-math-flags be set on these nodes?
1254 SDValue fHI = DAG.getNode(ISD::STRICT_SINT_TO_FP, DL,
1255 {Node->getValueType(0), MVT::Other},
1256 {Node->getOperand(0), HI});
1257 fHI = DAG.getNode(ISD::STRICT_FMUL, DL, {Node->getValueType(0), MVT::Other},
1258 {fHI.getValue(1), fHI, TWOHW});
1259 SDValue fLO = DAG.getNode(ISD::STRICT_SINT_TO_FP, DL,
1260 {Node->getValueType(0), MVT::Other},
1261 {Node->getOperand(0), LO});
1262
1263 SDValue TF = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, fHI.getValue(1),
1264 fLO.getValue(1));
1265
1266 // Add the two halves
1267 SDValue Result =
1268 DAG.getNode(ISD::STRICT_FADD, DL, {Node->getValueType(0), MVT::Other},
1269 {TF, fHI, fLO});
1270
1271 Results.push_back(Result);
1272 Results.push_back(Result.getValue(1));
1273 return;
1274 }
1275
1276 // Convert hi and lo to floats
1277 // Convert the hi part back to the upper values
1278 // TODO: Can any fast-math-flags be set on these nodes?
1279 SDValue fHI = DAG.getNode(ISD::SINT_TO_FP, DL, Node->getValueType(0), HI);
1280 fHI = DAG.getNode(ISD::FMUL, DL, Node->getValueType(0), fHI, TWOHW);
1281 SDValue fLO = DAG.getNode(ISD::SINT_TO_FP, DL, Node->getValueType(0), LO);
1282
1283 // Add the two halves
1284 Results.push_back(
1285 DAG.getNode(ISD::FADD, DL, Node->getValueType(0), fHI, fLO));
1286 }
1287
ExpandFNEG(SDNode * Node)1288 SDValue VectorLegalizer::ExpandFNEG(SDNode *Node) {
1289 if (TLI.isOperationLegalOrCustom(ISD::FSUB, Node->getValueType(0))) {
1290 SDLoc DL(Node);
1291 SDValue Zero = DAG.getConstantFP(-0.0, DL, Node->getValueType(0));
1292 // TODO: If FNEG had fast-math-flags, they'd get propagated to this FSUB.
1293 return DAG.getNode(ISD::FSUB, DL, Node->getValueType(0), Zero,
1294 Node->getOperand(0));
1295 }
1296 return DAG.UnrollVectorOp(Node);
1297 }
1298
ExpandFSUB(SDNode * Node,SmallVectorImpl<SDValue> & Results)1299 void VectorLegalizer::ExpandFSUB(SDNode *Node,
1300 SmallVectorImpl<SDValue> &Results) {
1301 // For floating-point values, (a-b) is the same as a+(-b). If FNEG is legal,
1302 // we can defer this to operation legalization where it will be lowered as
1303 // a+(-b).
1304 EVT VT = Node->getValueType(0);
1305 if (TLI.isOperationLegalOrCustom(ISD::FNEG, VT) &&
1306 TLI.isOperationLegalOrCustom(ISD::FADD, VT))
1307 return; // Defer to LegalizeDAG
1308
1309 SDValue Tmp = DAG.UnrollVectorOp(Node);
1310 Results.push_back(Tmp);
1311 }
1312
ExpandUADDSUBO(SDNode * Node,SmallVectorImpl<SDValue> & Results)1313 void VectorLegalizer::ExpandUADDSUBO(SDNode *Node,
1314 SmallVectorImpl<SDValue> &Results) {
1315 SDValue Result, Overflow;
1316 TLI.expandUADDSUBO(Node, Result, Overflow, DAG);
1317 Results.push_back(Result);
1318 Results.push_back(Overflow);
1319 }
1320
ExpandSADDSUBO(SDNode * Node,SmallVectorImpl<SDValue> & Results)1321 void VectorLegalizer::ExpandSADDSUBO(SDNode *Node,
1322 SmallVectorImpl<SDValue> &Results) {
1323 SDValue Result, Overflow;
1324 TLI.expandSADDSUBO(Node, Result, Overflow, DAG);
1325 Results.push_back(Result);
1326 Results.push_back(Overflow);
1327 }
1328
ExpandMULO(SDNode * Node,SmallVectorImpl<SDValue> & Results)1329 void VectorLegalizer::ExpandMULO(SDNode *Node,
1330 SmallVectorImpl<SDValue> &Results) {
1331 SDValue Result, Overflow;
1332 if (!TLI.expandMULO(Node, Result, Overflow, DAG))
1333 std::tie(Result, Overflow) = DAG.UnrollVectorOverflowOp(Node);
1334
1335 Results.push_back(Result);
1336 Results.push_back(Overflow);
1337 }
1338
ExpandFixedPointDiv(SDNode * Node,SmallVectorImpl<SDValue> & Results)1339 void VectorLegalizer::ExpandFixedPointDiv(SDNode *Node,
1340 SmallVectorImpl<SDValue> &Results) {
1341 SDNode *N = Node;
1342 if (SDValue Expanded = TLI.expandFixedPointDiv(N->getOpcode(), SDLoc(N),
1343 N->getOperand(0), N->getOperand(1), N->getConstantOperandVal(2), DAG))
1344 Results.push_back(Expanded);
1345 }
1346
ExpandStrictFPOp(SDNode * Node,SmallVectorImpl<SDValue> & Results)1347 void VectorLegalizer::ExpandStrictFPOp(SDNode *Node,
1348 SmallVectorImpl<SDValue> &Results) {
1349 if (Node->getOpcode() == ISD::STRICT_UINT_TO_FP) {
1350 ExpandUINT_TO_FLOAT(Node, Results);
1351 return;
1352 }
1353 if (Node->getOpcode() == ISD::STRICT_FP_TO_UINT) {
1354 ExpandFP_TO_UINT(Node, Results);
1355 return;
1356 }
1357
1358 UnrollStrictFPOp(Node, Results);
1359 }
1360
ExpandREM(SDNode * Node,SmallVectorImpl<SDValue> & Results)1361 void VectorLegalizer::ExpandREM(SDNode *Node,
1362 SmallVectorImpl<SDValue> &Results) {
1363 assert((Node->getOpcode() == ISD::SREM || Node->getOpcode() == ISD::UREM) &&
1364 "Expected REM node");
1365
1366 SDValue Result;
1367 if (!TLI.expandREM(Node, Result, DAG))
1368 Result = DAG.UnrollVectorOp(Node);
1369 Results.push_back(Result);
1370 }
1371
UnrollStrictFPOp(SDNode * Node,SmallVectorImpl<SDValue> & Results)1372 void VectorLegalizer::UnrollStrictFPOp(SDNode *Node,
1373 SmallVectorImpl<SDValue> &Results) {
1374 EVT VT = Node->getValueType(0);
1375 EVT EltVT = VT.getVectorElementType();
1376 unsigned NumElems = VT.getVectorNumElements();
1377 unsigned NumOpers = Node->getNumOperands();
1378 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1379
1380 EVT TmpEltVT = EltVT;
1381 if (Node->getOpcode() == ISD::STRICT_FSETCC ||
1382 Node->getOpcode() == ISD::STRICT_FSETCCS)
1383 TmpEltVT = TLI.getSetCCResultType(DAG.getDataLayout(),
1384 *DAG.getContext(), TmpEltVT);
1385
1386 EVT ValueVTs[] = {TmpEltVT, MVT::Other};
1387 SDValue Chain = Node->getOperand(0);
1388 SDLoc dl(Node);
1389
1390 SmallVector<SDValue, 32> OpValues;
1391 SmallVector<SDValue, 32> OpChains;
1392 for (unsigned i = 0; i < NumElems; ++i) {
1393 SmallVector<SDValue, 4> Opers;
1394 SDValue Idx = DAG.getVectorIdxConstant(i, dl);
1395
1396 // The Chain is the first operand.
1397 Opers.push_back(Chain);
1398
1399 // Now process the remaining operands.
1400 for (unsigned j = 1; j < NumOpers; ++j) {
1401 SDValue Oper = Node->getOperand(j);
1402 EVT OperVT = Oper.getValueType();
1403
1404 if (OperVT.isVector())
1405 Oper = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
1406 OperVT.getVectorElementType(), Oper, Idx);
1407
1408 Opers.push_back(Oper);
1409 }
1410
1411 SDValue ScalarOp = DAG.getNode(Node->getOpcode(), dl, ValueVTs, Opers);
1412 SDValue ScalarResult = ScalarOp.getValue(0);
1413 SDValue ScalarChain = ScalarOp.getValue(1);
1414
1415 if (Node->getOpcode() == ISD::STRICT_FSETCC ||
1416 Node->getOpcode() == ISD::STRICT_FSETCCS)
1417 ScalarResult = DAG.getSelect(dl, EltVT, ScalarResult,
1418 DAG.getConstant(APInt::getAllOnesValue
1419 (EltVT.getSizeInBits()), dl, EltVT),
1420 DAG.getConstant(0, dl, EltVT));
1421
1422 OpValues.push_back(ScalarResult);
1423 OpChains.push_back(ScalarChain);
1424 }
1425
1426 SDValue Result = DAG.getBuildVector(VT, dl, OpValues);
1427 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OpChains);
1428
1429 Results.push_back(Result);
1430 Results.push_back(NewChain);
1431 }
1432
UnrollVSETCC(SDNode * Node)1433 SDValue VectorLegalizer::UnrollVSETCC(SDNode *Node) {
1434 EVT VT = Node->getValueType(0);
1435 unsigned NumElems = VT.getVectorNumElements();
1436 EVT EltVT = VT.getVectorElementType();
1437 SDValue LHS = Node->getOperand(0);
1438 SDValue RHS = Node->getOperand(1);
1439 SDValue CC = Node->getOperand(2);
1440 EVT TmpEltVT = LHS.getValueType().getVectorElementType();
1441 SDLoc dl(Node);
1442 SmallVector<SDValue, 8> Ops(NumElems);
1443 for (unsigned i = 0; i < NumElems; ++i) {
1444 SDValue LHSElem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, LHS,
1445 DAG.getVectorIdxConstant(i, dl));
1446 SDValue RHSElem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, RHS,
1447 DAG.getVectorIdxConstant(i, dl));
1448 Ops[i] = DAG.getNode(ISD::SETCC, dl,
1449 TLI.getSetCCResultType(DAG.getDataLayout(),
1450 *DAG.getContext(), TmpEltVT),
1451 LHSElem, RHSElem, CC);
1452 Ops[i] = DAG.getSelect(dl, EltVT, Ops[i],
1453 DAG.getConstant(APInt::getAllOnesValue
1454 (EltVT.getSizeInBits()), dl, EltVT),
1455 DAG.getConstant(0, dl, EltVT));
1456 }
1457 return DAG.getBuildVector(VT, dl, Ops);
1458 }
1459
LegalizeVectors()1460 bool SelectionDAG::LegalizeVectors() {
1461 return VectorLegalizer(*this).Run();
1462 }
1463