1 //===- RISCVMatInt.cpp - Immediate materialisation -------------*- C++ -*--===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "RISCVMatInt.h"
10 #include "MCTargetDesc/RISCVMCTargetDesc.h"
11 #include "llvm/ADT/APInt.h"
12 #include "llvm/Support/MathExtras.h"
13 using namespace llvm;
14
getInstSeqCost(RISCVMatInt::InstSeq & Res,bool HasRVC)15 static int getInstSeqCost(RISCVMatInt::InstSeq &Res, bool HasRVC) {
16 if (!HasRVC)
17 return Res.size();
18
19 int Cost = 0;
20 for (auto Instr : Res) {
21 bool Compressed;
22 switch (Instr.Opc) {
23 default: llvm_unreachable("Unexpected opcode");
24 case RISCV::SLLI:
25 case RISCV::SRLI:
26 Compressed = true;
27 break;
28 case RISCV::ADDI:
29 case RISCV::ADDIW:
30 case RISCV::LUI:
31 Compressed = isInt<6>(Instr.Imm);
32 break;
33 case RISCV::ADDUW:
34 Compressed = false;
35 break;
36 }
37 // Two RVC instructions take the same space as one RVI instruction, but
38 // can take longer to execute than the single RVI instruction. Thus, we
39 // consider that two RVC instruction are slightly more costly than one
40 // RVI instruction. For longer sequences of RVC instructions the space
41 // savings can be worth it, though. The costs below try to model that.
42 if (!Compressed)
43 Cost += 100; // Baseline cost of one RVI instruction: 100%.
44 else
45 Cost += 70; // 70% cost of baseline.
46 }
47 return Cost;
48 }
49
50 // Recursively generate a sequence for materializing an integer.
generateInstSeqImpl(int64_t Val,const FeatureBitset & ActiveFeatures,RISCVMatInt::InstSeq & Res)51 static void generateInstSeqImpl(int64_t Val,
52 const FeatureBitset &ActiveFeatures,
53 RISCVMatInt::InstSeq &Res) {
54 bool IsRV64 = ActiveFeatures[RISCV::Feature64Bit];
55
56 if (isInt<32>(Val)) {
57 // Depending on the active bits in the immediate Value v, the following
58 // instruction sequences are emitted:
59 //
60 // v == 0 : ADDI
61 // v[0,12) != 0 && v[12,32) == 0 : ADDI
62 // v[0,12) == 0 && v[12,32) != 0 : LUI
63 // v[0,32) != 0 : LUI+ADDI(W)
64 int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF;
65 int64_t Lo12 = SignExtend64<12>(Val);
66
67 if (Hi20)
68 Res.push_back(RISCVMatInt::Inst(RISCV::LUI, Hi20));
69
70 if (Lo12 || Hi20 == 0) {
71 unsigned AddiOpc = (IsRV64 && Hi20) ? RISCV::ADDIW : RISCV::ADDI;
72 Res.push_back(RISCVMatInt::Inst(AddiOpc, Lo12));
73 }
74 return;
75 }
76
77 assert(IsRV64 && "Can't emit >32-bit imm for non-RV64 target");
78
79 // In the worst case, for a full 64-bit constant, a sequence of 8 instructions
80 // (i.e., LUI+ADDIW+SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) has to be emitted. Note
81 // that the first two instructions (LUI+ADDIW) can contribute up to 32 bits
82 // while the following ADDI instructions contribute up to 12 bits each.
83 //
84 // On the first glance, implementing this seems to be possible by simply
85 // emitting the most significant 32 bits (LUI+ADDIW) followed by as many left
86 // shift (SLLI) and immediate additions (ADDI) as needed. However, due to the
87 // fact that ADDI performs a sign extended addition, doing it like that would
88 // only be possible when at most 11 bits of the ADDI instructions are used.
89 // Using all 12 bits of the ADDI instructions, like done by GAS, actually
90 // requires that the constant is processed starting with the least significant
91 // bit.
92 //
93 // In the following, constants are processed from LSB to MSB but instruction
94 // emission is performed from MSB to LSB by recursively calling
95 // generateInstSeq. In each recursion, first the lowest 12 bits are removed
96 // from the constant and the optimal shift amount, which can be greater than
97 // 12 bits if the constant is sparse, is determined. Then, the shifted
98 // remaining constant is processed recursively and gets emitted as soon as it
99 // fits into 32 bits. The emission of the shifts and additions is subsequently
100 // performed when the recursion returns.
101
102 int64_t Lo12 = SignExtend64<12>(Val);
103 int64_t Hi52 = ((uint64_t)Val + 0x800ull) >> 12;
104 int ShiftAmount = 12 + findFirstSet((uint64_t)Hi52);
105 Hi52 = SignExtend64(Hi52 >> (ShiftAmount - 12), 64 - ShiftAmount);
106
107 // If the remaining bits don't fit in 12 bits, we might be able to reduce the
108 // shift amount in order to use LUI which will zero the lower 12 bits.
109 if (ShiftAmount > 12 && !isInt<12>(Hi52) && isInt<32>((uint64_t)Hi52 << 12)) {
110 // Reduce the shift amount and add zeros to the LSBs so it will match LUI.
111 ShiftAmount -= 12;
112 Hi52 = (uint64_t)Hi52 << 12;
113 }
114
115 generateInstSeqImpl(Hi52, ActiveFeatures, Res);
116
117 Res.push_back(RISCVMatInt::Inst(RISCV::SLLI, ShiftAmount));
118 if (Lo12)
119 Res.push_back(RISCVMatInt::Inst(RISCV::ADDI, Lo12));
120 }
121
122 namespace llvm {
123 namespace RISCVMatInt {
generateInstSeq(int64_t Val,const FeatureBitset & ActiveFeatures)124 InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
125 RISCVMatInt::InstSeq Res;
126 generateInstSeqImpl(Val, ActiveFeatures, Res);
127
128 // If the constant is positive we might be able to generate a shifted constant
129 // with no leading zeros and use a final SRLI to restore them.
130 if (Val > 0 && Res.size() > 2) {
131 assert(ActiveFeatures[RISCV::Feature64Bit] &&
132 "Expected RV32 to only need 2 instructions");
133 unsigned LeadingZeros = countLeadingZeros((uint64_t)Val);
134 uint64_t ShiftedVal = (uint64_t)Val << LeadingZeros;
135 // Fill in the bits that will be shifted out with 1s. An example where this
136 // helps is trailing one masks with 32 or more ones. This will generate
137 // ADDI -1 and an SRLI.
138 ShiftedVal |= maskTrailingOnes<uint64_t>(LeadingZeros);
139
140 RISCVMatInt::InstSeq TmpSeq;
141 generateInstSeqImpl(ShiftedVal, ActiveFeatures, TmpSeq);
142 TmpSeq.push_back(RISCVMatInt::Inst(RISCV::SRLI, LeadingZeros));
143
144 // Keep the new sequence if it is an improvement.
145 if (TmpSeq.size() < Res.size()) {
146 Res = TmpSeq;
147 // A 2 instruction sequence is the best we can do.
148 if (Res.size() <= 2)
149 return Res;
150 }
151
152 // Some cases can benefit from filling the lower bits with zeros instead.
153 ShiftedVal &= maskTrailingZeros<uint64_t>(LeadingZeros);
154 TmpSeq.clear();
155 generateInstSeqImpl(ShiftedVal, ActiveFeatures, TmpSeq);
156 TmpSeq.push_back(RISCVMatInt::Inst(RISCV::SRLI, LeadingZeros));
157
158 // Keep the new sequence if it is an improvement.
159 if (TmpSeq.size() < Res.size()) {
160 Res = TmpSeq;
161 // A 2 instruction sequence is the best we can do.
162 if (Res.size() <= 2)
163 return Res;
164 }
165
166 // If we have exactly 32 leading zeros and Zba, we can try using zext.w at
167 // the end of the sequence.
168 if (LeadingZeros == 32 && ActiveFeatures[RISCV::FeatureStdExtZba]) {
169 // Try replacing upper bits with 1.
170 uint64_t LeadingOnesVal = Val | maskLeadingOnes<uint64_t>(LeadingZeros);
171 TmpSeq.clear();
172 generateInstSeqImpl(LeadingOnesVal, ActiveFeatures, TmpSeq);
173 TmpSeq.push_back(RISCVMatInt::Inst(RISCV::ADDUW, 0));
174
175 // Keep the new sequence if it is an improvement.
176 if (TmpSeq.size() < Res.size()) {
177 Res = TmpSeq;
178 // A 2 instruction sequence is the best we can do.
179 if (Res.size() <= 2)
180 return Res;
181 }
182 }
183 }
184
185 return Res;
186 }
187
getIntMatCost(const APInt & Val,unsigned Size,const FeatureBitset & ActiveFeatures,bool CompressionCost)188 int getIntMatCost(const APInt &Val, unsigned Size,
189 const FeatureBitset &ActiveFeatures,
190 bool CompressionCost) {
191 bool IsRV64 = ActiveFeatures[RISCV::Feature64Bit];
192 bool HasRVC = CompressionCost && ActiveFeatures[RISCV::FeatureStdExtC];
193 int PlatRegSize = IsRV64 ? 64 : 32;
194
195 // Split the constant into platform register sized chunks, and calculate cost
196 // of each chunk.
197 int Cost = 0;
198 for (unsigned ShiftVal = 0; ShiftVal < Size; ShiftVal += PlatRegSize) {
199 APInt Chunk = Val.ashr(ShiftVal).sextOrTrunc(PlatRegSize);
200 InstSeq MatSeq = generateInstSeq(Chunk.getSExtValue(), ActiveFeatures);
201 Cost += getInstSeqCost(MatSeq, HasRVC);
202 }
203 return std::max(1, Cost);
204 }
205 } // namespace RISCVMatInt
206 } // namespace llvm
207