1 //===- SwitchLoweringUtils.cpp - Switch Lowering --------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains switch inst lowering optimizations and utilities for
10 // codegen, so that it can be used for both SelectionDAG and GlobalISel.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/MachineJumpTableInfo.h"
15 #include "llvm/CodeGen/SwitchLoweringUtils.h"
16 #include "llvm/Target/TargetMachine.h"
17 
18 using namespace llvm;
19 using namespace SwitchCG;
20 
21 uint64_t SwitchCG::getJumpTableRange(const CaseClusterVector &Clusters,
22                                      unsigned First, unsigned Last) {
23   assert(Last >= First);
24   const APInt &LowCase = Clusters[First].Low->getValue();
25   const APInt &HighCase = Clusters[Last].High->getValue();
26   assert(LowCase.getBitWidth() == HighCase.getBitWidth());
27 
28   // FIXME: A range of consecutive cases has 100% density, but only requires one
29   // comparison to lower. We should discriminate against such consecutive ranges
30   // in jump tables.
31   return (HighCase - LowCase).getLimitedValue((UINT64_MAX - 1) / 100) + 1;
32 }
33 
34 uint64_t
35 SwitchCG::getJumpTableNumCases(const SmallVectorImpl<unsigned> &TotalCases,
36                                unsigned First, unsigned Last) {
37   assert(Last >= First);
38   assert(TotalCases[Last] >= TotalCases[First]);
39   uint64_t NumCases =
40       TotalCases[Last] - (First == 0 ? 0 : TotalCases[First - 1]);
41   return NumCases;
42 }
43 
44 void SwitchCG::SwitchLowering::findJumpTables(CaseClusterVector &Clusters,
45                                               const SwitchInst *SI,
46                                               MachineBasicBlock *DefaultMBB,
47                                               ProfileSummaryInfo *PSI,
48                                               BlockFrequencyInfo *BFI) {
49 #ifndef NDEBUG
50   // Clusters must be non-empty, sorted, and only contain Range clusters.
51   assert(!Clusters.empty());
52   for (CaseCluster &C : Clusters)
53     assert(C.Kind == CC_Range);
54   for (unsigned i = 1, e = Clusters.size(); i < e; ++i)
55     assert(Clusters[i - 1].High->getValue().slt(Clusters[i].Low->getValue()));
56 #endif
57 
58   assert(TLI && "TLI not set!");
59   if (!TLI->areJTsAllowed(SI->getParent()->getParent()))
60     return;
61 
62   const unsigned MinJumpTableEntries = TLI->getMinimumJumpTableEntries();
63   const unsigned SmallNumberOfEntries = MinJumpTableEntries / 2;
64 
65   // Bail if not enough cases.
66   const int64_t N = Clusters.size();
67   if (N < 2 || N < MinJumpTableEntries)
68     return;
69 
70   // Accumulated number of cases in each cluster and those prior to it.
71   SmallVector<unsigned, 8> TotalCases(N);
72   for (unsigned i = 0; i < N; ++i) {
73     const APInt &Hi = Clusters[i].High->getValue();
74     const APInt &Lo = Clusters[i].Low->getValue();
75     TotalCases[i] = (Hi - Lo).getLimitedValue() + 1;
76     if (i != 0)
77       TotalCases[i] += TotalCases[i - 1];
78   }
79 
80   uint64_t Range = getJumpTableRange(Clusters,0, N - 1);
81   uint64_t NumCases = getJumpTableNumCases(TotalCases, 0, N - 1);
82   assert(NumCases < UINT64_MAX / 100);
83   assert(Range >= NumCases);
84 
85   // Cheap case: the whole range may be suitable for jump table.
86   if (TLI->isSuitableForJumpTable(SI, NumCases, Range, PSI, BFI)) {
87     CaseCluster JTCluster;
88     if (buildJumpTable(Clusters, 0, N - 1, SI, DefaultMBB, JTCluster)) {
89       Clusters[0] = JTCluster;
90       Clusters.resize(1);
91       return;
92     }
93   }
94 
95   // The algorithm below is not suitable for -O0.
96   if (TM->getOptLevel() == CodeGenOpt::None)
97     return;
98 
99   // Split Clusters into minimum number of dense partitions. The algorithm uses
100   // the same idea as Kannan & Proebsting "Correction to 'Producing Good Code
101   // for the Case Statement'" (1994), but builds the MinPartitions array in
102   // reverse order to make it easier to reconstruct the partitions in ascending
103   // order. In the choice between two optimal partitionings, it picks the one
104   // which yields more jump tables.
105 
106   // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
107   SmallVector<unsigned, 8> MinPartitions(N);
108   // LastElement[i] is the last element of the partition starting at i.
109   SmallVector<unsigned, 8> LastElement(N);
110   // PartitionsScore[i] is used to break ties when choosing between two
111   // partitionings resulting in the same number of partitions.
112   SmallVector<unsigned, 8> PartitionsScore(N);
113   // For PartitionsScore, a small number of comparisons is considered as good as
114   // a jump table and a single comparison is considered better than a jump
115   // table.
116   enum PartitionScores : unsigned {
117     NoTable = 0,
118     Table = 1,
119     FewCases = 1,
120     SingleCase = 2
121   };
122 
123   // Base case: There is only one way to partition Clusters[N-1].
124   MinPartitions[N - 1] = 1;
125   LastElement[N - 1] = N - 1;
126   PartitionsScore[N - 1] = PartitionScores::SingleCase;
127 
128   // Note: loop indexes are signed to avoid underflow.
129   for (int64_t i = N - 2; i >= 0; i--) {
130     // Find optimal partitioning of Clusters[i..N-1].
131     // Baseline: Put Clusters[i] into a partition on its own.
132     MinPartitions[i] = MinPartitions[i + 1] + 1;
133     LastElement[i] = i;
134     PartitionsScore[i] = PartitionsScore[i + 1] + PartitionScores::SingleCase;
135 
136     // Search for a solution that results in fewer partitions.
137     for (int64_t j = N - 1; j > i; j--) {
138       // Try building a partition from Clusters[i..j].
139       Range = getJumpTableRange(Clusters, i, j);
140       NumCases = getJumpTableNumCases(TotalCases, i, j);
141       assert(NumCases < UINT64_MAX / 100);
142       assert(Range >= NumCases);
143 
144       if (TLI->isSuitableForJumpTable(SI, NumCases, Range, PSI, BFI)) {
145         unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]);
146         unsigned Score = j == N - 1 ? 0 : PartitionsScore[j + 1];
147         int64_t NumEntries = j - i + 1;
148 
149         if (NumEntries == 1)
150           Score += PartitionScores::SingleCase;
151         else if (NumEntries <= SmallNumberOfEntries)
152           Score += PartitionScores::FewCases;
153         else if (NumEntries >= MinJumpTableEntries)
154           Score += PartitionScores::Table;
155 
156         // If this leads to fewer partitions, or to the same number of
157         // partitions with better score, it is a better partitioning.
158         if (NumPartitions < MinPartitions[i] ||
159             (NumPartitions == MinPartitions[i] && Score > PartitionsScore[i])) {
160           MinPartitions[i] = NumPartitions;
161           LastElement[i] = j;
162           PartitionsScore[i] = Score;
163         }
164       }
165     }
166   }
167 
168   // Iterate over the partitions, replacing some with jump tables in-place.
169   unsigned DstIndex = 0;
170   for (unsigned First = 0, Last; First < N; First = Last + 1) {
171     Last = LastElement[First];
172     assert(Last >= First);
173     assert(DstIndex <= First);
174     unsigned NumClusters = Last - First + 1;
175 
176     CaseCluster JTCluster;
177     if (NumClusters >= MinJumpTableEntries &&
178         buildJumpTable(Clusters, First, Last, SI, DefaultMBB, JTCluster)) {
179       Clusters[DstIndex++] = JTCluster;
180     } else {
181       for (unsigned I = First; I <= Last; ++I)
182         std::memmove(&Clusters[DstIndex++], &Clusters[I], sizeof(Clusters[I]));
183     }
184   }
185   Clusters.resize(DstIndex);
186 }
187 
188 bool SwitchCG::SwitchLowering::buildJumpTable(const CaseClusterVector &Clusters,
189                                               unsigned First, unsigned Last,
190                                               const SwitchInst *SI,
191                                               MachineBasicBlock *DefaultMBB,
192                                               CaseCluster &JTCluster) {
193   assert(First <= Last);
194 
195   auto Prob = BranchProbability::getZero();
196   unsigned NumCmps = 0;
197   std::vector<MachineBasicBlock*> Table;
198   DenseMap<MachineBasicBlock*, BranchProbability> JTProbs;
199 
200   // Initialize probabilities in JTProbs.
201   for (unsigned I = First; I <= Last; ++I)
202     JTProbs[Clusters[I].MBB] = BranchProbability::getZero();
203 
204   for (unsigned I = First; I <= Last; ++I) {
205     assert(Clusters[I].Kind == CC_Range);
206     Prob += Clusters[I].Prob;
207     const APInt &Low = Clusters[I].Low->getValue();
208     const APInt &High = Clusters[I].High->getValue();
209     NumCmps += (Low == High) ? 1 : 2;
210     if (I != First) {
211       // Fill the gap between this and the previous cluster.
212       const APInt &PreviousHigh = Clusters[I - 1].High->getValue();
213       assert(PreviousHigh.slt(Low));
214       uint64_t Gap = (Low - PreviousHigh).getLimitedValue() - 1;
215       for (uint64_t J = 0; J < Gap; J++)
216         Table.push_back(DefaultMBB);
217     }
218     uint64_t ClusterSize = (High - Low).getLimitedValue() + 1;
219     for (uint64_t J = 0; J < ClusterSize; ++J)
220       Table.push_back(Clusters[I].MBB);
221     JTProbs[Clusters[I].MBB] += Clusters[I].Prob;
222   }
223 
224   unsigned NumDests = JTProbs.size();
225   if (TLI->isSuitableForBitTests(NumDests, NumCmps,
226                                  Clusters[First].Low->getValue(),
227                                  Clusters[Last].High->getValue(), *DL)) {
228     // Clusters[First..Last] should be lowered as bit tests instead.
229     return false;
230   }
231 
232   // Create the MBB that will load from and jump through the table.
233   // Note: We create it here, but it's not inserted into the function yet.
234   MachineFunction *CurMF = FuncInfo.MF;
235   MachineBasicBlock *JumpTableMBB =
236       CurMF->CreateMachineBasicBlock(SI->getParent());
237 
238   // Add successors. Note: use table order for determinism.
239   SmallPtrSet<MachineBasicBlock *, 8> Done;
240   for (MachineBasicBlock *Succ : Table) {
241     if (Done.count(Succ))
242       continue;
243     addSuccessorWithProb(JumpTableMBB, Succ, JTProbs[Succ]);
244     Done.insert(Succ);
245   }
246   JumpTableMBB->normalizeSuccProbs();
247 
248   unsigned JTI = CurMF->getOrCreateJumpTableInfo(TLI->getJumpTableEncoding())
249                      ->createJumpTableIndex(Table);
250 
251   // Set up the jump table info.
252   JumpTable JT(-1U, JTI, JumpTableMBB, nullptr);
253   JumpTableHeader JTH(Clusters[First].Low->getValue(),
254                       Clusters[Last].High->getValue(), SI->getCondition(),
255                       nullptr, false);
256   JTCases.emplace_back(std::move(JTH), std::move(JT));
257 
258   JTCluster = CaseCluster::jumpTable(Clusters[First].Low, Clusters[Last].High,
259                                      JTCases.size() - 1, Prob);
260   return true;
261 }
262 
263 void SwitchCG::SwitchLowering::findBitTestClusters(CaseClusterVector &Clusters,
264                                                    const SwitchInst *SI) {
265   // Partition Clusters into as few subsets as possible, where each subset has a
266   // range that fits in a machine word and has <= 3 unique destinations.
267 
268 #ifndef NDEBUG
269   // Clusters must be sorted and contain Range or JumpTable clusters.
270   assert(!Clusters.empty());
271   assert(Clusters[0].Kind == CC_Range || Clusters[0].Kind == CC_JumpTable);
272   for (const CaseCluster &C : Clusters)
273     assert(C.Kind == CC_Range || C.Kind == CC_JumpTable);
274   for (unsigned i = 1; i < Clusters.size(); ++i)
275     assert(Clusters[i-1].High->getValue().slt(Clusters[i].Low->getValue()));
276 #endif
277 
278   // The algorithm below is not suitable for -O0.
279   if (TM->getOptLevel() == CodeGenOpt::None)
280     return;
281 
282   // If target does not have legal shift left, do not emit bit tests at all.
283   EVT PTy = TLI->getPointerTy(*DL);
284   if (!TLI->isOperationLegal(ISD::SHL, PTy))
285     return;
286 
287   int BitWidth = PTy.getSizeInBits();
288   const int64_t N = Clusters.size();
289 
290   // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
291   SmallVector<unsigned, 8> MinPartitions(N);
292   // LastElement[i] is the last element of the partition starting at i.
293   SmallVector<unsigned, 8> LastElement(N);
294 
295   // FIXME: This might not be the best algorithm for finding bit test clusters.
296 
297   // Base case: There is only one way to partition Clusters[N-1].
298   MinPartitions[N - 1] = 1;
299   LastElement[N - 1] = N - 1;
300 
301   // Note: loop indexes are signed to avoid underflow.
302   for (int64_t i = N - 2; i >= 0; --i) {
303     // Find optimal partitioning of Clusters[i..N-1].
304     // Baseline: Put Clusters[i] into a partition on its own.
305     MinPartitions[i] = MinPartitions[i + 1] + 1;
306     LastElement[i] = i;
307 
308     // Search for a solution that results in fewer partitions.
309     // Note: the search is limited by BitWidth, reducing time complexity.
310     for (int64_t j = std::min(N - 1, i + BitWidth - 1); j > i; --j) {
311       // Try building a partition from Clusters[i..j].
312 
313       // Check the range.
314       if (!TLI->rangeFitsInWord(Clusters[i].Low->getValue(),
315                                 Clusters[j].High->getValue(), *DL))
316         continue;
317 
318       // Check nbr of destinations and cluster types.
319       // FIXME: This works, but doesn't seem very efficient.
320       bool RangesOnly = true;
321       BitVector Dests(FuncInfo.MF->getNumBlockIDs());
322       for (int64_t k = i; k <= j; k++) {
323         if (Clusters[k].Kind != CC_Range) {
324           RangesOnly = false;
325           break;
326         }
327         Dests.set(Clusters[k].MBB->getNumber());
328       }
329       if (!RangesOnly || Dests.count() > 3)
330         break;
331 
332       // Check if it's a better partition.
333       unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]);
334       if (NumPartitions < MinPartitions[i]) {
335         // Found a better partition.
336         MinPartitions[i] = NumPartitions;
337         LastElement[i] = j;
338       }
339     }
340   }
341 
342   // Iterate over the partitions, replacing with bit-test clusters in-place.
343   unsigned DstIndex = 0;
344   for (unsigned First = 0, Last; First < N; First = Last + 1) {
345     Last = LastElement[First];
346     assert(First <= Last);
347     assert(DstIndex <= First);
348 
349     CaseCluster BitTestCluster;
350     if (buildBitTests(Clusters, First, Last, SI, BitTestCluster)) {
351       Clusters[DstIndex++] = BitTestCluster;
352     } else {
353       size_t NumClusters = Last - First + 1;
354       std::memmove(&Clusters[DstIndex], &Clusters[First],
355                    sizeof(Clusters[0]) * NumClusters);
356       DstIndex += NumClusters;
357     }
358   }
359   Clusters.resize(DstIndex);
360 }
361 
362 bool SwitchCG::SwitchLowering::buildBitTests(CaseClusterVector &Clusters,
363                                              unsigned First, unsigned Last,
364                                              const SwitchInst *SI,
365                                              CaseCluster &BTCluster) {
366   assert(First <= Last);
367   if (First == Last)
368     return false;
369 
370   BitVector Dests(FuncInfo.MF->getNumBlockIDs());
371   unsigned NumCmps = 0;
372   for (int64_t I = First; I <= Last; ++I) {
373     assert(Clusters[I].Kind == CC_Range);
374     Dests.set(Clusters[I].MBB->getNumber());
375     NumCmps += (Clusters[I].Low == Clusters[I].High) ? 1 : 2;
376   }
377   unsigned NumDests = Dests.count();
378 
379   APInt Low = Clusters[First].Low->getValue();
380   APInt High = Clusters[Last].High->getValue();
381   assert(Low.slt(High));
382 
383   if (!TLI->isSuitableForBitTests(NumDests, NumCmps, Low, High, *DL))
384     return false;
385 
386   APInt LowBound;
387   APInt CmpRange;
388 
389   const int BitWidth = TLI->getPointerTy(*DL).getSizeInBits();
390   assert(TLI->rangeFitsInWord(Low, High, *DL) &&
391          "Case range must fit in bit mask!");
392 
393   // Check if the clusters cover a contiguous range such that no value in the
394   // range will jump to the default statement.
395   bool ContiguousRange = true;
396   for (int64_t I = First + 1; I <= Last; ++I) {
397     if (Clusters[I].Low->getValue() != Clusters[I - 1].High->getValue() + 1) {
398       ContiguousRange = false;
399       break;
400     }
401   }
402 
403   if (Low.isStrictlyPositive() && High.slt(BitWidth)) {
404     // Optimize the case where all the case values fit in a word without having
405     // to subtract minValue. In this case, we can optimize away the subtraction.
406     LowBound = APInt::getNullValue(Low.getBitWidth());
407     CmpRange = High;
408     ContiguousRange = false;
409   } else {
410     LowBound = Low;
411     CmpRange = High - Low;
412   }
413 
414   CaseBitsVector CBV;
415   auto TotalProb = BranchProbability::getZero();
416   for (unsigned i = First; i <= Last; ++i) {
417     // Find the CaseBits for this destination.
418     unsigned j;
419     for (j = 0; j < CBV.size(); ++j)
420       if (CBV[j].BB == Clusters[i].MBB)
421         break;
422     if (j == CBV.size())
423       CBV.push_back(
424           CaseBits(0, Clusters[i].MBB, 0, BranchProbability::getZero()));
425     CaseBits *CB = &CBV[j];
426 
427     // Update Mask, Bits and ExtraProb.
428     uint64_t Lo = (Clusters[i].Low->getValue() - LowBound).getZExtValue();
429     uint64_t Hi = (Clusters[i].High->getValue() - LowBound).getZExtValue();
430     assert(Hi >= Lo && Hi < 64 && "Invalid bit case!");
431     CB->Mask |= (-1ULL >> (63 - (Hi - Lo))) << Lo;
432     CB->Bits += Hi - Lo + 1;
433     CB->ExtraProb += Clusters[i].Prob;
434     TotalProb += Clusters[i].Prob;
435   }
436 
437   BitTestInfo BTI;
438   llvm::sort(CBV, [](const CaseBits &a, const CaseBits &b) {
439     // Sort by probability first, number of bits second, bit mask third.
440     if (a.ExtraProb != b.ExtraProb)
441       return a.ExtraProb > b.ExtraProb;
442     if (a.Bits != b.Bits)
443       return a.Bits > b.Bits;
444     return a.Mask < b.Mask;
445   });
446 
447   for (auto &CB : CBV) {
448     MachineBasicBlock *BitTestBB =
449         FuncInfo.MF->CreateMachineBasicBlock(SI->getParent());
450     BTI.push_back(BitTestCase(CB.Mask, BitTestBB, CB.BB, CB.ExtraProb));
451   }
452   BitTestCases.emplace_back(std::move(LowBound), std::move(CmpRange),
453                             SI->getCondition(), -1U, MVT::Other, false,
454                             ContiguousRange, nullptr, nullptr, std::move(BTI),
455                             TotalProb);
456 
457   BTCluster = CaseCluster::bitTests(Clusters[First].Low, Clusters[Last].High,
458                                     BitTestCases.size() - 1, TotalProb);
459   return true;
460 }
461 
462 void SwitchCG::sortAndRangeify(CaseClusterVector &Clusters) {
463 #ifndef NDEBUG
464   for (const CaseCluster &CC : Clusters)
465     assert(CC.Low == CC.High && "Input clusters must be single-case");
466 #endif
467 
468   llvm::sort(Clusters, [](const CaseCluster &a, const CaseCluster &b) {
469     return a.Low->getValue().slt(b.Low->getValue());
470   });
471 
472   // Merge adjacent clusters with the same destination.
473   const unsigned N = Clusters.size();
474   unsigned DstIndex = 0;
475   for (unsigned SrcIndex = 0; SrcIndex < N; ++SrcIndex) {
476     CaseCluster &CC = Clusters[SrcIndex];
477     const ConstantInt *CaseVal = CC.Low;
478     MachineBasicBlock *Succ = CC.MBB;
479 
480     if (DstIndex != 0 && Clusters[DstIndex - 1].MBB == Succ &&
481         (CaseVal->getValue() - Clusters[DstIndex - 1].High->getValue()) == 1) {
482       // If this case has the same successor and is a neighbour, merge it into
483       // the previous cluster.
484       Clusters[DstIndex - 1].High = CaseVal;
485       Clusters[DstIndex - 1].Prob += CC.Prob;
486     } else {
487       std::memmove(&Clusters[DstIndex++], &Clusters[SrcIndex],
488                    sizeof(Clusters[SrcIndex]));
489     }
490   }
491   Clusters.resize(DstIndex);
492 }
493