1 //===- HexagonConstExtenders.cpp ------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "HexagonInstrInfo.h"
10 #include "HexagonRegisterInfo.h"
11 #include "HexagonSubtarget.h"
12 #include "llvm/ADT/SmallVector.h"
13 #include "llvm/CodeGen/MachineDominators.h"
14 #include "llvm/CodeGen/MachineFunctionPass.h"
15 #include "llvm/CodeGen/MachineInstrBuilder.h"
16 #include "llvm/CodeGen/MachineRegisterInfo.h"
17 #include "llvm/CodeGen/Register.h"
18 #include "llvm/InitializePasses.h"
19 #include "llvm/Pass.h"
20 #include "llvm/Support/CommandLine.h"
21 #include "llvm/Support/raw_ostream.h"
22 #include <map>
23 #include <set>
24 #include <utility>
25 #include <vector>
26
27 #define DEBUG_TYPE "hexagon-cext-opt"
28
29 using namespace llvm;
30
31 static cl::opt<unsigned> CountThreshold("hexagon-cext-threshold",
32 cl::init(3), cl::Hidden, cl::ZeroOrMore,
33 cl::desc("Minimum number of extenders to trigger replacement"));
34
35 static cl::opt<unsigned> ReplaceLimit("hexagon-cext-limit", cl::init(0),
36 cl::Hidden, cl::ZeroOrMore, cl::desc("Maximum number of replacements"));
37
38 namespace llvm {
39 void initializeHexagonConstExtendersPass(PassRegistry&);
40 FunctionPass *createHexagonConstExtenders();
41 }
42
adjustUp(int32_t V,uint8_t A,uint8_t O)43 static int32_t adjustUp(int32_t V, uint8_t A, uint8_t O) {
44 assert(isPowerOf2_32(A));
45 int32_t U = (V & -A) + O;
46 return U >= V ? U : U+A;
47 }
48
adjustDown(int32_t V,uint8_t A,uint8_t O)49 static int32_t adjustDown(int32_t V, uint8_t A, uint8_t O) {
50 assert(isPowerOf2_32(A));
51 int32_t U = (V & -A) + O;
52 return U <= V ? U : U-A;
53 }
54
55 namespace {
56 struct OffsetRange {
57 // The range of values between Min and Max that are of form Align*N+Offset,
58 // for some integer N. Min and Max are required to be of that form as well,
59 // except in the case of an empty range.
60 int32_t Min = INT_MIN, Max = INT_MAX;
61 uint8_t Align = 1;
62 uint8_t Offset = 0;
63
64 OffsetRange() = default;
OffsetRange__anonb58a747e0111::OffsetRange65 OffsetRange(int32_t L, int32_t H, uint8_t A, uint8_t O = 0)
66 : Min(L), Max(H), Align(A), Offset(O) {}
intersect__anonb58a747e0111::OffsetRange67 OffsetRange &intersect(OffsetRange A) {
68 if (Align < A.Align)
69 std::swap(*this, A);
70
71 // Align >= A.Align.
72 if (Offset >= A.Offset && (Offset - A.Offset) % A.Align == 0) {
73 Min = adjustUp(std::max(Min, A.Min), Align, Offset);
74 Max = adjustDown(std::min(Max, A.Max), Align, Offset);
75 } else {
76 // Make an empty range.
77 Min = 0;
78 Max = -1;
79 }
80 // Canonicalize empty ranges.
81 if (Min > Max)
82 std::tie(Min, Max, Align) = std::make_tuple(0, -1, 1);
83 return *this;
84 }
shift__anonb58a747e0111::OffsetRange85 OffsetRange &shift(int32_t S) {
86 Min += S;
87 Max += S;
88 Offset = (Offset+S) % Align;
89 return *this;
90 }
extendBy__anonb58a747e0111::OffsetRange91 OffsetRange &extendBy(int32_t D) {
92 // If D < 0, extend Min, otherwise extend Max.
93 assert(D % Align == 0);
94 if (D < 0)
95 Min = (INT_MIN-D < Min) ? Min+D : INT_MIN;
96 else
97 Max = (INT_MAX-D > Max) ? Max+D : INT_MAX;
98 return *this;
99 }
empty__anonb58a747e0111::OffsetRange100 bool empty() const {
101 return Min > Max;
102 }
contains__anonb58a747e0111::OffsetRange103 bool contains(int32_t V) const {
104 return Min <= V && V <= Max && (V-Offset) % Align == 0;
105 }
operator ==__anonb58a747e0111::OffsetRange106 bool operator==(const OffsetRange &R) const {
107 return Min == R.Min && Max == R.Max && Align == R.Align;
108 }
operator !=__anonb58a747e0111::OffsetRange109 bool operator!=(const OffsetRange &R) const {
110 return !operator==(R);
111 }
operator <__anonb58a747e0111::OffsetRange112 bool operator<(const OffsetRange &R) const {
113 if (Min != R.Min)
114 return Min < R.Min;
115 if (Max != R.Max)
116 return Max < R.Max;
117 return Align < R.Align;
118 }
zero__anonb58a747e0111::OffsetRange119 static OffsetRange zero() { return {0, 0, 1}; }
120 };
121
122 struct RangeTree {
123 struct Node {
Node__anonb58a747e0111::RangeTree::Node124 Node(const OffsetRange &R) : MaxEnd(R.Max), Range(R) {}
125 unsigned Height = 1;
126 unsigned Count = 1;
127 int32_t MaxEnd;
128 const OffsetRange &Range;
129 Node *Left = nullptr, *Right = nullptr;
130 };
131
132 Node *Root = nullptr;
133
add__anonb58a747e0111::RangeTree134 void add(const OffsetRange &R) {
135 Root = add(Root, R);
136 }
erase__anonb58a747e0111::RangeTree137 void erase(const Node *N) {
138 Root = remove(Root, N);
139 delete N;
140 }
order__anonb58a747e0111::RangeTree141 void order(SmallVectorImpl<Node*> &Seq) const {
142 order(Root, Seq);
143 }
nodesWith__anonb58a747e0111::RangeTree144 SmallVector<Node*,8> nodesWith(int32_t P, bool CheckAlign = true) {
145 SmallVector<Node*,8> Nodes;
146 nodesWith(Root, P, CheckAlign, Nodes);
147 return Nodes;
148 }
149 void dump() const;
~RangeTree__anonb58a747e0111::RangeTree150 ~RangeTree() {
151 SmallVector<Node*,8> Nodes;
152 order(Nodes);
153 for (Node *N : Nodes)
154 delete N;
155 }
156
157 private:
158 void dump(const Node *N) const;
159 void order(Node *N, SmallVectorImpl<Node*> &Seq) const;
160 void nodesWith(Node *N, int32_t P, bool CheckA,
161 SmallVectorImpl<Node*> &Seq) const;
162
163 Node *add(Node *N, const OffsetRange &R);
164 Node *remove(Node *N, const Node *D);
165 Node *rotateLeft(Node *Lower, Node *Higher);
166 Node *rotateRight(Node *Lower, Node *Higher);
height__anonb58a747e0111::RangeTree167 unsigned height(Node *N) {
168 return N != nullptr ? N->Height : 0;
169 }
update__anonb58a747e0111::RangeTree170 Node *update(Node *N) {
171 assert(N != nullptr);
172 N->Height = 1 + std::max(height(N->Left), height(N->Right));
173 if (N->Left)
174 N->MaxEnd = std::max(N->MaxEnd, N->Left->MaxEnd);
175 if (N->Right)
176 N->MaxEnd = std::max(N->MaxEnd, N->Right->MaxEnd);
177 return N;
178 }
rebalance__anonb58a747e0111::RangeTree179 Node *rebalance(Node *N) {
180 assert(N != nullptr);
181 int32_t Balance = height(N->Right) - height(N->Left);
182 if (Balance < -1)
183 return rotateRight(N->Left, N);
184 if (Balance > 1)
185 return rotateLeft(N->Right, N);
186 return N;
187 }
188 };
189
190 struct Loc {
191 MachineBasicBlock *Block = nullptr;
192 MachineBasicBlock::iterator At;
193
Loc__anonb58a747e0111::Loc194 Loc(MachineBasicBlock *B, MachineBasicBlock::iterator It)
195 : Block(B), At(It) {
196 if (B->end() == It) {
197 Pos = -1;
198 } else {
199 assert(It->getParent() == B);
200 Pos = std::distance(B->begin(), It);
201 }
202 }
operator <__anonb58a747e0111::Loc203 bool operator<(Loc A) const {
204 if (Block != A.Block)
205 return Block->getNumber() < A.Block->getNumber();
206 if (A.Pos == -1)
207 return Pos != A.Pos;
208 return Pos != -1 && Pos < A.Pos;
209 }
210 private:
211 int Pos = 0;
212 };
213
214 struct HexagonConstExtenders : public MachineFunctionPass {
215 static char ID;
HexagonConstExtenders__anonb58a747e0111::HexagonConstExtenders216 HexagonConstExtenders() : MachineFunctionPass(ID) {}
217
getAnalysisUsage__anonb58a747e0111::HexagonConstExtenders218 void getAnalysisUsage(AnalysisUsage &AU) const override {
219 AU.addRequired<MachineDominatorTree>();
220 AU.addPreserved<MachineDominatorTree>();
221 MachineFunctionPass::getAnalysisUsage(AU);
222 }
223
getPassName__anonb58a747e0111::HexagonConstExtenders224 StringRef getPassName() const override {
225 return "Hexagon constant-extender optimization";
226 }
227 bool runOnMachineFunction(MachineFunction &MF) override;
228
229 private:
230 struct Register {
231 Register() = default;
Register__anonb58a747e0111::HexagonConstExtenders::Register232 Register(unsigned R, unsigned S) : Reg(R), Sub(S) {}
Register__anonb58a747e0111::HexagonConstExtenders::Register233 Register(const MachineOperand &Op)
234 : Reg(Op.getReg()), Sub(Op.getSubReg()) {}
operator =__anonb58a747e0111::HexagonConstExtenders::Register235 Register &operator=(const MachineOperand &Op) {
236 if (Op.isReg()) {
237 Reg = Op.getReg();
238 Sub = Op.getSubReg();
239 } else if (Op.isFI()) {
240 Reg = llvm::Register::index2StackSlot(Op.getIndex());
241 }
242 return *this;
243 }
isVReg__anonb58a747e0111::HexagonConstExtenders::Register244 bool isVReg() const {
245 return Reg != 0 && !Reg.isStack() && Reg.isVirtual();
246 }
isSlot__anonb58a747e0111::HexagonConstExtenders::Register247 bool isSlot() const { return Reg != 0 && Reg.isStack(); }
operator MachineOperand__anonb58a747e0111::HexagonConstExtenders::Register248 operator MachineOperand() const {
249 if (isVReg())
250 return MachineOperand::CreateReg(Reg, /*Def*/false, /*Imp*/false,
251 /*Kill*/false, /*Dead*/false, /*Undef*/false,
252 /*EarlyClobber*/false, Sub);
253 if (Reg.isStack()) {
254 int FI = llvm::Register::stackSlot2Index(Reg);
255 return MachineOperand::CreateFI(FI);
256 }
257 llvm_unreachable("Cannot create MachineOperand");
258 }
operator ==__anonb58a747e0111::HexagonConstExtenders::Register259 bool operator==(Register R) const { return Reg == R.Reg && Sub == R.Sub; }
operator !=__anonb58a747e0111::HexagonConstExtenders::Register260 bool operator!=(Register R) const { return !operator==(R); }
operator <__anonb58a747e0111::HexagonConstExtenders::Register261 bool operator<(Register R) const {
262 // For std::map.
263 return Reg < R.Reg || (Reg == R.Reg && Sub < R.Sub);
264 }
265 llvm::Register Reg;
266 unsigned Sub = 0;
267 };
268
269 struct ExtExpr {
270 // A subexpression in which the extender is used. In general, this
271 // represents an expression where adding D to the extender will be
272 // equivalent to adding D to the expression as a whole. In other
273 // words, expr(add(##V,D) = add(expr(##V),D).
274
275 // The original motivation for this are the io/ur addressing modes,
276 // where the offset is extended. Consider the io example:
277 // In memw(Rs+##V), the ##V could be replaced by a register Rt to
278 // form the rr mode: memw(Rt+Rs<<0). In such case, however, the
279 // register Rt must have exactly the value of ##V. If there was
280 // another instruction memw(Rs+##V+4), it would need a different Rt.
281 // Now, if Rt was initialized as "##V+Rs<<0", both of these
282 // instructions could use the same Rt, just with different offsets.
283 // Here it's clear that "initializer+4" should be the same as if
284 // the offset 4 was added to the ##V in the initializer.
285
286 // The only kinds of expressions that support the requirement of
287 // commuting with addition are addition and subtraction from ##V.
288 // Include shifting the Rs to account for the ur addressing mode:
289 // ##Val + Rs << S
290 // ##Val - Rs
291 Register Rs;
292 unsigned S = 0;
293 bool Neg = false;
294
295 ExtExpr() = default;
ExtExpr__anonb58a747e0111::HexagonConstExtenders::ExtExpr296 ExtExpr(Register RS, bool NG, unsigned SH) : Rs(RS), S(SH), Neg(NG) {}
297 // Expression is trivial if it does not modify the extender.
trivial__anonb58a747e0111::HexagonConstExtenders::ExtExpr298 bool trivial() const {
299 return Rs.Reg == 0;
300 }
operator ==__anonb58a747e0111::HexagonConstExtenders::ExtExpr301 bool operator==(const ExtExpr &Ex) const {
302 return Rs == Ex.Rs && S == Ex.S && Neg == Ex.Neg;
303 }
operator !=__anonb58a747e0111::HexagonConstExtenders::ExtExpr304 bool operator!=(const ExtExpr &Ex) const {
305 return !operator==(Ex);
306 }
operator <__anonb58a747e0111::HexagonConstExtenders::ExtExpr307 bool operator<(const ExtExpr &Ex) const {
308 if (Rs != Ex.Rs)
309 return Rs < Ex.Rs;
310 if (S != Ex.S)
311 return S < Ex.S;
312 return !Neg && Ex.Neg;
313 }
314 };
315
316 struct ExtDesc {
317 MachineInstr *UseMI = nullptr;
318 unsigned OpNum = -1u;
319 // The subexpression in which the extender is used (e.g. address
320 // computation).
321 ExtExpr Expr;
322 // Optional register that is assigned the value of Expr.
323 Register Rd;
324 // Def means that the output of the instruction may differ from the
325 // original by a constant c, and that the difference can be corrected
326 // by adding/subtracting c in all users of the defined register.
327 bool IsDef = false;
328
getOp__anonb58a747e0111::HexagonConstExtenders::ExtDesc329 MachineOperand &getOp() {
330 return UseMI->getOperand(OpNum);
331 }
getOp__anonb58a747e0111::HexagonConstExtenders::ExtDesc332 const MachineOperand &getOp() const {
333 return UseMI->getOperand(OpNum);
334 }
335 };
336
337 struct ExtRoot {
338 union {
339 const ConstantFP *CFP; // MO_FPImmediate
340 const char *SymbolName; // MO_ExternalSymbol
341 const GlobalValue *GV; // MO_GlobalAddress
342 const BlockAddress *BA; // MO_BlockAddress
343 int64_t ImmVal; // MO_Immediate, MO_TargetIndex,
344 // and MO_ConstantPoolIndex
345 } V;
346 unsigned Kind; // Same as in MachineOperand.
347 unsigned char TF; // TargetFlags.
348
349 ExtRoot(const MachineOperand &Op);
operator ==__anonb58a747e0111::HexagonConstExtenders::ExtRoot350 bool operator==(const ExtRoot &ER) const {
351 return Kind == ER.Kind && V.ImmVal == ER.V.ImmVal;
352 }
operator !=__anonb58a747e0111::HexagonConstExtenders::ExtRoot353 bool operator!=(const ExtRoot &ER) const {
354 return !operator==(ER);
355 }
356 bool operator<(const ExtRoot &ER) const;
357 };
358
359 struct ExtValue : public ExtRoot {
360 int32_t Offset;
361
362 ExtValue(const MachineOperand &Op);
ExtValue__anonb58a747e0111::HexagonConstExtenders::ExtValue363 ExtValue(const ExtDesc &ED) : ExtValue(ED.getOp()) {}
ExtValue__anonb58a747e0111::HexagonConstExtenders::ExtValue364 ExtValue(const ExtRoot &ER, int32_t Off) : ExtRoot(ER), Offset(Off) {}
365 bool operator<(const ExtValue &EV) const;
operator ==__anonb58a747e0111::HexagonConstExtenders::ExtValue366 bool operator==(const ExtValue &EV) const {
367 return ExtRoot(*this) == ExtRoot(EV) && Offset == EV.Offset;
368 }
operator !=__anonb58a747e0111::HexagonConstExtenders::ExtValue369 bool operator!=(const ExtValue &EV) const {
370 return !operator==(EV);
371 }
372 explicit operator MachineOperand() const;
373 };
374
375 using IndexList = SetVector<unsigned>;
376 using ExtenderInit = std::pair<ExtValue, ExtExpr>;
377 using AssignmentMap = std::map<ExtenderInit, IndexList>;
378 using LocDefList = std::vector<std::pair<Loc, IndexList>>;
379
380 const HexagonSubtarget *HST = nullptr;
381 const HexagonInstrInfo *HII = nullptr;
382 const HexagonRegisterInfo *HRI = nullptr;
383 MachineDominatorTree *MDT = nullptr;
384 MachineRegisterInfo *MRI = nullptr;
385 std::vector<ExtDesc> Extenders;
386 std::vector<unsigned> NewRegs;
387
388 bool isStoreImmediate(unsigned Opc) const;
389 bool isRegOffOpcode(unsigned ExtOpc) const ;
390 unsigned getRegOffOpcode(unsigned ExtOpc) const;
391 unsigned getDirectRegReplacement(unsigned ExtOpc) const;
392 OffsetRange getOffsetRange(Register R, const MachineInstr &MI) const;
393 OffsetRange getOffsetRange(const ExtDesc &ED) const;
394 OffsetRange getOffsetRange(Register Rd) const;
395
396 void recordExtender(MachineInstr &MI, unsigned OpNum);
397 void collectInstr(MachineInstr &MI);
398 void collect(MachineFunction &MF);
399 void assignInits(const ExtRoot &ER, unsigned Begin, unsigned End,
400 AssignmentMap &IMap);
401 void calculatePlacement(const ExtenderInit &ExtI, const IndexList &Refs,
402 LocDefList &Defs);
403 Register insertInitializer(Loc DefL, const ExtenderInit &ExtI);
404 bool replaceInstrExact(const ExtDesc &ED, Register ExtR);
405 bool replaceInstrExpr(const ExtDesc &ED, const ExtenderInit &ExtI,
406 Register ExtR, int32_t &Diff);
407 bool replaceInstr(unsigned Idx, Register ExtR, const ExtenderInit &ExtI);
408 bool replaceExtenders(const AssignmentMap &IMap);
409
410 unsigned getOperandIndex(const MachineInstr &MI,
411 const MachineOperand &Op) const;
412 const MachineOperand &getPredicateOp(const MachineInstr &MI) const;
413 const MachineOperand &getLoadResultOp(const MachineInstr &MI) const;
414 const MachineOperand &getStoredValueOp(const MachineInstr &MI) const;
415
416 friend struct PrintRegister;
417 friend struct PrintExpr;
418 friend struct PrintInit;
419 friend struct PrintIMap;
420 friend raw_ostream &operator<< (raw_ostream &OS,
421 const struct PrintRegister &P);
422 friend raw_ostream &operator<< (raw_ostream &OS, const struct PrintExpr &P);
423 friend raw_ostream &operator<< (raw_ostream &OS, const struct PrintInit &P);
424 friend raw_ostream &operator<< (raw_ostream &OS, const ExtDesc &ED);
425 friend raw_ostream &operator<< (raw_ostream &OS, const ExtRoot &ER);
426 friend raw_ostream &operator<< (raw_ostream &OS, const ExtValue &EV);
427 friend raw_ostream &operator<< (raw_ostream &OS, const OffsetRange &OR);
428 friend raw_ostream &operator<< (raw_ostream &OS, const struct PrintIMap &P);
429 };
430
431 using HCE = HexagonConstExtenders;
432
433 LLVM_ATTRIBUTE_UNUSED
operator <<(raw_ostream & OS,const OffsetRange & OR)434 raw_ostream &operator<< (raw_ostream &OS, const OffsetRange &OR) {
435 if (OR.Min > OR.Max)
436 OS << '!';
437 OS << '[' << OR.Min << ',' << OR.Max << "]a" << unsigned(OR.Align)
438 << '+' << unsigned(OR.Offset);
439 return OS;
440 }
441
442 struct PrintRegister {
PrintRegister__anonb58a747e0111::PrintRegister443 PrintRegister(HCE::Register R, const HexagonRegisterInfo &I)
444 : Rs(R), HRI(I) {}
445 HCE::Register Rs;
446 const HexagonRegisterInfo &HRI;
447 };
448
449 LLVM_ATTRIBUTE_UNUSED
operator <<(raw_ostream & OS,const PrintRegister & P)450 raw_ostream &operator<< (raw_ostream &OS, const PrintRegister &P) {
451 if (P.Rs.Reg != 0)
452 OS << printReg(P.Rs.Reg, &P.HRI, P.Rs.Sub);
453 else
454 OS << "noreg";
455 return OS;
456 }
457
458 struct PrintExpr {
PrintExpr__anonb58a747e0111::PrintExpr459 PrintExpr(const HCE::ExtExpr &E, const HexagonRegisterInfo &I)
460 : Ex(E), HRI(I) {}
461 const HCE::ExtExpr &Ex;
462 const HexagonRegisterInfo &HRI;
463 };
464
465 LLVM_ATTRIBUTE_UNUSED
operator <<(raw_ostream & OS,const PrintExpr & P)466 raw_ostream &operator<< (raw_ostream &OS, const PrintExpr &P) {
467 OS << "## " << (P.Ex.Neg ? "- " : "+ ");
468 if (P.Ex.Rs.Reg != 0)
469 OS << printReg(P.Ex.Rs.Reg, &P.HRI, P.Ex.Rs.Sub);
470 else
471 OS << "__";
472 OS << " << " << P.Ex.S;
473 return OS;
474 }
475
476 struct PrintInit {
PrintInit__anonb58a747e0111::PrintInit477 PrintInit(const HCE::ExtenderInit &EI, const HexagonRegisterInfo &I)
478 : ExtI(EI), HRI(I) {}
479 const HCE::ExtenderInit &ExtI;
480 const HexagonRegisterInfo &HRI;
481 };
482
483 LLVM_ATTRIBUTE_UNUSED
operator <<(raw_ostream & OS,const PrintInit & P)484 raw_ostream &operator<< (raw_ostream &OS, const PrintInit &P) {
485 OS << '[' << P.ExtI.first << ", "
486 << PrintExpr(P.ExtI.second, P.HRI) << ']';
487 return OS;
488 }
489
490 LLVM_ATTRIBUTE_UNUSED
operator <<(raw_ostream & OS,const HCE::ExtDesc & ED)491 raw_ostream &operator<< (raw_ostream &OS, const HCE::ExtDesc &ED) {
492 assert(ED.OpNum != -1u);
493 const MachineBasicBlock &MBB = *ED.getOp().getParent()->getParent();
494 const MachineFunction &MF = *MBB.getParent();
495 const auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
496 OS << "bb#" << MBB.getNumber() << ": ";
497 if (ED.Rd.Reg != 0)
498 OS << printReg(ED.Rd.Reg, &HRI, ED.Rd.Sub);
499 else
500 OS << "__";
501 OS << " = " << PrintExpr(ED.Expr, HRI);
502 if (ED.IsDef)
503 OS << ", def";
504 return OS;
505 }
506
507 LLVM_ATTRIBUTE_UNUSED
operator <<(raw_ostream & OS,const HCE::ExtRoot & ER)508 raw_ostream &operator<< (raw_ostream &OS, const HCE::ExtRoot &ER) {
509 switch (ER.Kind) {
510 case MachineOperand::MO_Immediate:
511 OS << "imm:" << ER.V.ImmVal;
512 break;
513 case MachineOperand::MO_FPImmediate:
514 OS << "fpi:" << *ER.V.CFP;
515 break;
516 case MachineOperand::MO_ExternalSymbol:
517 OS << "sym:" << *ER.V.SymbolName;
518 break;
519 case MachineOperand::MO_GlobalAddress:
520 OS << "gad:" << ER.V.GV->getName();
521 break;
522 case MachineOperand::MO_BlockAddress:
523 OS << "blk:" << *ER.V.BA;
524 break;
525 case MachineOperand::MO_TargetIndex:
526 OS << "tgi:" << ER.V.ImmVal;
527 break;
528 case MachineOperand::MO_ConstantPoolIndex:
529 OS << "cpi:" << ER.V.ImmVal;
530 break;
531 case MachineOperand::MO_JumpTableIndex:
532 OS << "jti:" << ER.V.ImmVal;
533 break;
534 default:
535 OS << "???:" << ER.V.ImmVal;
536 break;
537 }
538 return OS;
539 }
540
541 LLVM_ATTRIBUTE_UNUSED
operator <<(raw_ostream & OS,const HCE::ExtValue & EV)542 raw_ostream &operator<< (raw_ostream &OS, const HCE::ExtValue &EV) {
543 OS << HCE::ExtRoot(EV) << " off:" << EV.Offset;
544 return OS;
545 }
546
547 struct PrintIMap {
PrintIMap__anonb58a747e0111::PrintIMap548 PrintIMap(const HCE::AssignmentMap &M, const HexagonRegisterInfo &I)
549 : IMap(M), HRI(I) {}
550 const HCE::AssignmentMap &IMap;
551 const HexagonRegisterInfo &HRI;
552 };
553
554 LLVM_ATTRIBUTE_UNUSED
operator <<(raw_ostream & OS,const PrintIMap & P)555 raw_ostream &operator<< (raw_ostream &OS, const PrintIMap &P) {
556 OS << "{\n";
557 for (const std::pair<const HCE::ExtenderInit, HCE::IndexList> &Q : P.IMap) {
558 OS << " " << PrintInit(Q.first, P.HRI) << " -> {";
559 for (unsigned I : Q.second)
560 OS << ' ' << I;
561 OS << " }\n";
562 }
563 OS << "}\n";
564 return OS;
565 }
566 }
567
568 INITIALIZE_PASS_BEGIN(HexagonConstExtenders, "hexagon-cext-opt",
569 "Hexagon constant-extender optimization", false, false)
570 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
571 INITIALIZE_PASS_END(HexagonConstExtenders, "hexagon-cext-opt",
572 "Hexagon constant-extender optimization", false, false)
573
574 static unsigned ReplaceCounter = 0;
575
576 char HCE::ID = 0;
577
578 #ifndef NDEBUG
dump() const579 LLVM_DUMP_METHOD void RangeTree::dump() const {
580 dbgs() << "Root: " << Root << '\n';
581 if (Root)
582 dump(Root);
583 }
584
dump(const Node * N) const585 LLVM_DUMP_METHOD void RangeTree::dump(const Node *N) const {
586 dbgs() << "Node: " << N << '\n';
587 dbgs() << " Height: " << N->Height << '\n';
588 dbgs() << " Count: " << N->Count << '\n';
589 dbgs() << " MaxEnd: " << N->MaxEnd << '\n';
590 dbgs() << " Range: " << N->Range << '\n';
591 dbgs() << " Left: " << N->Left << '\n';
592 dbgs() << " Right: " << N->Right << "\n\n";
593
594 if (N->Left)
595 dump(N->Left);
596 if (N->Right)
597 dump(N->Right);
598 }
599 #endif
600
order(Node * N,SmallVectorImpl<Node * > & Seq) const601 void RangeTree::order(Node *N, SmallVectorImpl<Node*> &Seq) const {
602 if (N == nullptr)
603 return;
604 order(N->Left, Seq);
605 Seq.push_back(N);
606 order(N->Right, Seq);
607 }
608
nodesWith(Node * N,int32_t P,bool CheckA,SmallVectorImpl<Node * > & Seq) const609 void RangeTree::nodesWith(Node *N, int32_t P, bool CheckA,
610 SmallVectorImpl<Node*> &Seq) const {
611 if (N == nullptr || N->MaxEnd < P)
612 return;
613 nodesWith(N->Left, P, CheckA, Seq);
614 if (N->Range.Min <= P) {
615 if ((CheckA && N->Range.contains(P)) || (!CheckA && P <= N->Range.Max))
616 Seq.push_back(N);
617 nodesWith(N->Right, P, CheckA, Seq);
618 }
619 }
620
add(Node * N,const OffsetRange & R)621 RangeTree::Node *RangeTree::add(Node *N, const OffsetRange &R) {
622 if (N == nullptr)
623 return new Node(R);
624
625 if (N->Range == R) {
626 N->Count++;
627 return N;
628 }
629
630 if (R < N->Range)
631 N->Left = add(N->Left, R);
632 else
633 N->Right = add(N->Right, R);
634 return rebalance(update(N));
635 }
636
remove(Node * N,const Node * D)637 RangeTree::Node *RangeTree::remove(Node *N, const Node *D) {
638 assert(N != nullptr);
639
640 if (N != D) {
641 assert(N->Range != D->Range && "N and D should not be equal");
642 if (D->Range < N->Range)
643 N->Left = remove(N->Left, D);
644 else
645 N->Right = remove(N->Right, D);
646 return rebalance(update(N));
647 }
648
649 // We got to the node we need to remove. If any of its children are
650 // missing, simply replace it with the other child.
651 if (N->Left == nullptr || N->Right == nullptr)
652 return (N->Left == nullptr) ? N->Right : N->Left;
653
654 // Find the rightmost child of N->Left, remove it and plug it in place
655 // of N.
656 Node *M = N->Left;
657 while (M->Right)
658 M = M->Right;
659 M->Left = remove(N->Left, M);
660 M->Right = N->Right;
661 return rebalance(update(M));
662 }
663
rotateLeft(Node * Lower,Node * Higher)664 RangeTree::Node *RangeTree::rotateLeft(Node *Lower, Node *Higher) {
665 assert(Higher->Right == Lower);
666 // The Lower node is on the right from Higher. Make sure that Lower's
667 // balance is greater to the right. Otherwise the rotation will create
668 // an unbalanced tree again.
669 if (height(Lower->Left) > height(Lower->Right))
670 Lower = rotateRight(Lower->Left, Lower);
671 assert(height(Lower->Left) <= height(Lower->Right));
672 Higher->Right = Lower->Left;
673 update(Higher);
674 Lower->Left = Higher;
675 update(Lower);
676 return Lower;
677 }
678
rotateRight(Node * Lower,Node * Higher)679 RangeTree::Node *RangeTree::rotateRight(Node *Lower, Node *Higher) {
680 assert(Higher->Left == Lower);
681 // The Lower node is on the left from Higher. Make sure that Lower's
682 // balance is greater to the left. Otherwise the rotation will create
683 // an unbalanced tree again.
684 if (height(Lower->Left) < height(Lower->Right))
685 Lower = rotateLeft(Lower->Right, Lower);
686 assert(height(Lower->Left) >= height(Lower->Right));
687 Higher->Left = Lower->Right;
688 update(Higher);
689 Lower->Right = Higher;
690 update(Lower);
691 return Lower;
692 }
693
694
ExtRoot(const MachineOperand & Op)695 HCE::ExtRoot::ExtRoot(const MachineOperand &Op) {
696 // Always store ImmVal, since it's the field used for comparisons.
697 V.ImmVal = 0;
698 if (Op.isImm())
699 ; // Keep 0. Do not use Op.getImm() for value here (treat 0 as the root).
700 else if (Op.isFPImm())
701 V.CFP = Op.getFPImm();
702 else if (Op.isSymbol())
703 V.SymbolName = Op.getSymbolName();
704 else if (Op.isGlobal())
705 V.GV = Op.getGlobal();
706 else if (Op.isBlockAddress())
707 V.BA = Op.getBlockAddress();
708 else if (Op.isCPI() || Op.isTargetIndex() || Op.isJTI())
709 V.ImmVal = Op.getIndex();
710 else
711 llvm_unreachable("Unexpected operand type");
712
713 Kind = Op.getType();
714 TF = Op.getTargetFlags();
715 }
716
operator <(const HCE::ExtRoot & ER) const717 bool HCE::ExtRoot::operator< (const HCE::ExtRoot &ER) const {
718 if (Kind != ER.Kind)
719 return Kind < ER.Kind;
720 switch (Kind) {
721 case MachineOperand::MO_Immediate:
722 case MachineOperand::MO_TargetIndex:
723 case MachineOperand::MO_ConstantPoolIndex:
724 case MachineOperand::MO_JumpTableIndex:
725 return V.ImmVal < ER.V.ImmVal;
726 case MachineOperand::MO_FPImmediate: {
727 const APFloat &ThisF = V.CFP->getValueAPF();
728 const APFloat &OtherF = ER.V.CFP->getValueAPF();
729 return ThisF.bitcastToAPInt().ult(OtherF.bitcastToAPInt());
730 }
731 case MachineOperand::MO_ExternalSymbol:
732 return StringRef(V.SymbolName) < StringRef(ER.V.SymbolName);
733 case MachineOperand::MO_GlobalAddress:
734 // Do not use GUIDs, since they depend on the source path. Moving the
735 // source file to a different directory could cause different GUID
736 // values for a pair of given symbols. These symbols could then compare
737 // "less" in one directory, but "greater" in another.
738 assert(!V.GV->getName().empty() && !ER.V.GV->getName().empty());
739 return V.GV->getName() < ER.V.GV->getName();
740 case MachineOperand::MO_BlockAddress: {
741 const BasicBlock *ThisB = V.BA->getBasicBlock();
742 const BasicBlock *OtherB = ER.V.BA->getBasicBlock();
743 assert(ThisB->getParent() == OtherB->getParent());
744 const Function &F = *ThisB->getParent();
745 return std::distance(F.begin(), ThisB->getIterator()) <
746 std::distance(F.begin(), OtherB->getIterator());
747 }
748 }
749 return V.ImmVal < ER.V.ImmVal;
750 }
751
ExtValue(const MachineOperand & Op)752 HCE::ExtValue::ExtValue(const MachineOperand &Op) : ExtRoot(Op) {
753 if (Op.isImm())
754 Offset = Op.getImm();
755 else if (Op.isFPImm() || Op.isJTI())
756 Offset = 0;
757 else if (Op.isSymbol() || Op.isGlobal() || Op.isBlockAddress() ||
758 Op.isCPI() || Op.isTargetIndex())
759 Offset = Op.getOffset();
760 else
761 llvm_unreachable("Unexpected operand type");
762 }
763
operator <(const HCE::ExtValue & EV) const764 bool HCE::ExtValue::operator< (const HCE::ExtValue &EV) const {
765 const ExtRoot &ER = *this;
766 if (!(ER == ExtRoot(EV)))
767 return ER < EV;
768 return Offset < EV.Offset;
769 }
770
operator MachineOperand() const771 HCE::ExtValue::operator MachineOperand() const {
772 switch (Kind) {
773 case MachineOperand::MO_Immediate:
774 return MachineOperand::CreateImm(V.ImmVal + Offset);
775 case MachineOperand::MO_FPImmediate:
776 assert(Offset == 0);
777 return MachineOperand::CreateFPImm(V.CFP);
778 case MachineOperand::MO_ExternalSymbol:
779 assert(Offset == 0);
780 return MachineOperand::CreateES(V.SymbolName, TF);
781 case MachineOperand::MO_GlobalAddress:
782 return MachineOperand::CreateGA(V.GV, Offset, TF);
783 case MachineOperand::MO_BlockAddress:
784 return MachineOperand::CreateBA(V.BA, Offset, TF);
785 case MachineOperand::MO_TargetIndex:
786 return MachineOperand::CreateTargetIndex(V.ImmVal, Offset, TF);
787 case MachineOperand::MO_ConstantPoolIndex:
788 return MachineOperand::CreateCPI(V.ImmVal, Offset, TF);
789 case MachineOperand::MO_JumpTableIndex:
790 assert(Offset == 0);
791 return MachineOperand::CreateJTI(V.ImmVal, TF);
792 default:
793 llvm_unreachable("Unhandled kind");
794 }
795 }
796
isStoreImmediate(unsigned Opc) const797 bool HCE::isStoreImmediate(unsigned Opc) const {
798 switch (Opc) {
799 case Hexagon::S4_storeirbt_io:
800 case Hexagon::S4_storeirbf_io:
801 case Hexagon::S4_storeirht_io:
802 case Hexagon::S4_storeirhf_io:
803 case Hexagon::S4_storeirit_io:
804 case Hexagon::S4_storeirif_io:
805 case Hexagon::S4_storeirb_io:
806 case Hexagon::S4_storeirh_io:
807 case Hexagon::S4_storeiri_io:
808 return true;
809 default:
810 break;
811 }
812 return false;
813 }
814
isRegOffOpcode(unsigned Opc) const815 bool HCE::isRegOffOpcode(unsigned Opc) const {
816 switch (Opc) {
817 case Hexagon::L2_loadrub_io:
818 case Hexagon::L2_loadrb_io:
819 case Hexagon::L2_loadruh_io:
820 case Hexagon::L2_loadrh_io:
821 case Hexagon::L2_loadri_io:
822 case Hexagon::L2_loadrd_io:
823 case Hexagon::L2_loadbzw2_io:
824 case Hexagon::L2_loadbzw4_io:
825 case Hexagon::L2_loadbsw2_io:
826 case Hexagon::L2_loadbsw4_io:
827 case Hexagon::L2_loadalignh_io:
828 case Hexagon::L2_loadalignb_io:
829 case Hexagon::L2_ploadrubt_io:
830 case Hexagon::L2_ploadrubf_io:
831 case Hexagon::L2_ploadrbt_io:
832 case Hexagon::L2_ploadrbf_io:
833 case Hexagon::L2_ploadruht_io:
834 case Hexagon::L2_ploadruhf_io:
835 case Hexagon::L2_ploadrht_io:
836 case Hexagon::L2_ploadrhf_io:
837 case Hexagon::L2_ploadrit_io:
838 case Hexagon::L2_ploadrif_io:
839 case Hexagon::L2_ploadrdt_io:
840 case Hexagon::L2_ploadrdf_io:
841 case Hexagon::S2_storerb_io:
842 case Hexagon::S2_storerh_io:
843 case Hexagon::S2_storerf_io:
844 case Hexagon::S2_storeri_io:
845 case Hexagon::S2_storerd_io:
846 case Hexagon::S2_pstorerbt_io:
847 case Hexagon::S2_pstorerbf_io:
848 case Hexagon::S2_pstorerht_io:
849 case Hexagon::S2_pstorerhf_io:
850 case Hexagon::S2_pstorerft_io:
851 case Hexagon::S2_pstorerff_io:
852 case Hexagon::S2_pstorerit_io:
853 case Hexagon::S2_pstorerif_io:
854 case Hexagon::S2_pstorerdt_io:
855 case Hexagon::S2_pstorerdf_io:
856 case Hexagon::A2_addi:
857 return true;
858 default:
859 break;
860 }
861 return false;
862 }
863
getRegOffOpcode(unsigned ExtOpc) const864 unsigned HCE::getRegOffOpcode(unsigned ExtOpc) const {
865 // If there exists an instruction that takes a register and offset,
866 // that corresponds to the ExtOpc, return it, otherwise return 0.
867 using namespace Hexagon;
868 switch (ExtOpc) {
869 case A2_tfrsi: return A2_addi;
870 default:
871 break;
872 }
873 const MCInstrDesc &D = HII->get(ExtOpc);
874 if (D.mayLoad() || D.mayStore()) {
875 uint64_t F = D.TSFlags;
876 unsigned AM = (F >> HexagonII::AddrModePos) & HexagonII::AddrModeMask;
877 switch (AM) {
878 case HexagonII::Absolute:
879 case HexagonII::AbsoluteSet:
880 case HexagonII::BaseLongOffset:
881 switch (ExtOpc) {
882 case PS_loadrubabs:
883 case L4_loadrub_ap:
884 case L4_loadrub_ur: return L2_loadrub_io;
885 case PS_loadrbabs:
886 case L4_loadrb_ap:
887 case L4_loadrb_ur: return L2_loadrb_io;
888 case PS_loadruhabs:
889 case L4_loadruh_ap:
890 case L4_loadruh_ur: return L2_loadruh_io;
891 case PS_loadrhabs:
892 case L4_loadrh_ap:
893 case L4_loadrh_ur: return L2_loadrh_io;
894 case PS_loadriabs:
895 case L4_loadri_ap:
896 case L4_loadri_ur: return L2_loadri_io;
897 case PS_loadrdabs:
898 case L4_loadrd_ap:
899 case L4_loadrd_ur: return L2_loadrd_io;
900 case L4_loadbzw2_ap:
901 case L4_loadbzw2_ur: return L2_loadbzw2_io;
902 case L4_loadbzw4_ap:
903 case L4_loadbzw4_ur: return L2_loadbzw4_io;
904 case L4_loadbsw2_ap:
905 case L4_loadbsw2_ur: return L2_loadbsw2_io;
906 case L4_loadbsw4_ap:
907 case L4_loadbsw4_ur: return L2_loadbsw4_io;
908 case L4_loadalignh_ap:
909 case L4_loadalignh_ur: return L2_loadalignh_io;
910 case L4_loadalignb_ap:
911 case L4_loadalignb_ur: return L2_loadalignb_io;
912 case L4_ploadrubt_abs: return L2_ploadrubt_io;
913 case L4_ploadrubf_abs: return L2_ploadrubf_io;
914 case L4_ploadrbt_abs: return L2_ploadrbt_io;
915 case L4_ploadrbf_abs: return L2_ploadrbf_io;
916 case L4_ploadruht_abs: return L2_ploadruht_io;
917 case L4_ploadruhf_abs: return L2_ploadruhf_io;
918 case L4_ploadrht_abs: return L2_ploadrht_io;
919 case L4_ploadrhf_abs: return L2_ploadrhf_io;
920 case L4_ploadrit_abs: return L2_ploadrit_io;
921 case L4_ploadrif_abs: return L2_ploadrif_io;
922 case L4_ploadrdt_abs: return L2_ploadrdt_io;
923 case L4_ploadrdf_abs: return L2_ploadrdf_io;
924 case PS_storerbabs:
925 case S4_storerb_ap:
926 case S4_storerb_ur: return S2_storerb_io;
927 case PS_storerhabs:
928 case S4_storerh_ap:
929 case S4_storerh_ur: return S2_storerh_io;
930 case PS_storerfabs:
931 case S4_storerf_ap:
932 case S4_storerf_ur: return S2_storerf_io;
933 case PS_storeriabs:
934 case S4_storeri_ap:
935 case S4_storeri_ur: return S2_storeri_io;
936 case PS_storerdabs:
937 case S4_storerd_ap:
938 case S4_storerd_ur: return S2_storerd_io;
939 case S4_pstorerbt_abs: return S2_pstorerbt_io;
940 case S4_pstorerbf_abs: return S2_pstorerbf_io;
941 case S4_pstorerht_abs: return S2_pstorerht_io;
942 case S4_pstorerhf_abs: return S2_pstorerhf_io;
943 case S4_pstorerft_abs: return S2_pstorerft_io;
944 case S4_pstorerff_abs: return S2_pstorerff_io;
945 case S4_pstorerit_abs: return S2_pstorerit_io;
946 case S4_pstorerif_abs: return S2_pstorerif_io;
947 case S4_pstorerdt_abs: return S2_pstorerdt_io;
948 case S4_pstorerdf_abs: return S2_pstorerdf_io;
949 default:
950 break;
951 }
952 break;
953 case HexagonII::BaseImmOffset:
954 if (!isStoreImmediate(ExtOpc))
955 return ExtOpc;
956 break;
957 default:
958 break;
959 }
960 }
961 return 0;
962 }
963
getDirectRegReplacement(unsigned ExtOpc) const964 unsigned HCE::getDirectRegReplacement(unsigned ExtOpc) const {
965 switch (ExtOpc) {
966 case Hexagon::A2_addi: return Hexagon::A2_add;
967 case Hexagon::A2_andir: return Hexagon::A2_and;
968 case Hexagon::A2_combineii: return Hexagon::A4_combineri;
969 case Hexagon::A2_orir: return Hexagon::A2_or;
970 case Hexagon::A2_paddif: return Hexagon::A2_paddf;
971 case Hexagon::A2_paddit: return Hexagon::A2_paddt;
972 case Hexagon::A2_subri: return Hexagon::A2_sub;
973 case Hexagon::A2_tfrsi: return TargetOpcode::COPY;
974 case Hexagon::A4_cmpbeqi: return Hexagon::A4_cmpbeq;
975 case Hexagon::A4_cmpbgti: return Hexagon::A4_cmpbgt;
976 case Hexagon::A4_cmpbgtui: return Hexagon::A4_cmpbgtu;
977 case Hexagon::A4_cmpheqi: return Hexagon::A4_cmpheq;
978 case Hexagon::A4_cmphgti: return Hexagon::A4_cmphgt;
979 case Hexagon::A4_cmphgtui: return Hexagon::A4_cmphgtu;
980 case Hexagon::A4_combineii: return Hexagon::A4_combineir;
981 case Hexagon::A4_combineir: return TargetOpcode::REG_SEQUENCE;
982 case Hexagon::A4_combineri: return TargetOpcode::REG_SEQUENCE;
983 case Hexagon::A4_rcmpeqi: return Hexagon::A4_rcmpeq;
984 case Hexagon::A4_rcmpneqi: return Hexagon::A4_rcmpneq;
985 case Hexagon::C2_cmoveif: return Hexagon::A2_tfrpf;
986 case Hexagon::C2_cmoveit: return Hexagon::A2_tfrpt;
987 case Hexagon::C2_cmpeqi: return Hexagon::C2_cmpeq;
988 case Hexagon::C2_cmpgti: return Hexagon::C2_cmpgt;
989 case Hexagon::C2_cmpgtui: return Hexagon::C2_cmpgtu;
990 case Hexagon::C2_muxii: return Hexagon::C2_muxir;
991 case Hexagon::C2_muxir: return Hexagon::C2_mux;
992 case Hexagon::C2_muxri: return Hexagon::C2_mux;
993 case Hexagon::C4_cmpltei: return Hexagon::C4_cmplte;
994 case Hexagon::C4_cmplteui: return Hexagon::C4_cmplteu;
995 case Hexagon::C4_cmpneqi: return Hexagon::C4_cmpneq;
996 case Hexagon::M2_accii: return Hexagon::M2_acci; // T -> T
997 /* No M2_macsin */
998 case Hexagon::M2_macsip: return Hexagon::M2_maci; // T -> T
999 case Hexagon::M2_mpysin: return Hexagon::M2_mpyi;
1000 case Hexagon::M2_mpysip: return Hexagon::M2_mpyi;
1001 case Hexagon::M2_mpysmi: return Hexagon::M2_mpyi;
1002 case Hexagon::M2_naccii: return Hexagon::M2_nacci; // T -> T
1003 case Hexagon::M4_mpyri_addi: return Hexagon::M4_mpyri_addr;
1004 case Hexagon::M4_mpyri_addr: return Hexagon::M4_mpyrr_addr; // _ -> T
1005 case Hexagon::M4_mpyrr_addi: return Hexagon::M4_mpyrr_addr; // _ -> T
1006 case Hexagon::S4_addaddi: return Hexagon::M2_acci; // _ -> T
1007 case Hexagon::S4_addi_asl_ri: return Hexagon::S2_asl_i_r_acc; // T -> T
1008 case Hexagon::S4_addi_lsr_ri: return Hexagon::S2_lsr_i_r_acc; // T -> T
1009 case Hexagon::S4_andi_asl_ri: return Hexagon::S2_asl_i_r_and; // T -> T
1010 case Hexagon::S4_andi_lsr_ri: return Hexagon::S2_lsr_i_r_and; // T -> T
1011 case Hexagon::S4_ori_asl_ri: return Hexagon::S2_asl_i_r_or; // T -> T
1012 case Hexagon::S4_ori_lsr_ri: return Hexagon::S2_lsr_i_r_or; // T -> T
1013 case Hexagon::S4_subaddi: return Hexagon::M2_subacc; // _ -> T
1014 case Hexagon::S4_subi_asl_ri: return Hexagon::S2_asl_i_r_nac; // T -> T
1015 case Hexagon::S4_subi_lsr_ri: return Hexagon::S2_lsr_i_r_nac; // T -> T
1016
1017 // Store-immediates:
1018 case Hexagon::S4_storeirbf_io: return Hexagon::S2_pstorerbf_io;
1019 case Hexagon::S4_storeirb_io: return Hexagon::S2_storerb_io;
1020 case Hexagon::S4_storeirbt_io: return Hexagon::S2_pstorerbt_io;
1021 case Hexagon::S4_storeirhf_io: return Hexagon::S2_pstorerhf_io;
1022 case Hexagon::S4_storeirh_io: return Hexagon::S2_storerh_io;
1023 case Hexagon::S4_storeirht_io: return Hexagon::S2_pstorerht_io;
1024 case Hexagon::S4_storeirif_io: return Hexagon::S2_pstorerif_io;
1025 case Hexagon::S4_storeiri_io: return Hexagon::S2_storeri_io;
1026 case Hexagon::S4_storeirit_io: return Hexagon::S2_pstorerit_io;
1027
1028 default:
1029 break;
1030 }
1031 return 0;
1032 }
1033
1034 // Return the allowable deviation from the current value of Rb (i.e. the
1035 // range of values that can be added to the current value) which the
1036 // instruction MI can accommodate.
1037 // The instruction MI is a user of register Rb, which is defined via an
1038 // extender. It may be possible for MI to be tweaked to work for a register
1039 // defined with a slightly different value. For example
1040 // ... = L2_loadrub_io Rb, 1
1041 // can be modifed to be
1042 // ... = L2_loadrub_io Rb', 0
1043 // if Rb' = Rb+1.
1044 // The range for Rb would be [Min+1, Max+1], where [Min, Max] is a range
1045 // for L2_loadrub with offset 0. That means that Rb could be replaced with
1046 // Rc, where Rc-Rb belongs to [Min+1, Max+1].
getOffsetRange(Register Rb,const MachineInstr & MI) const1047 OffsetRange HCE::getOffsetRange(Register Rb, const MachineInstr &MI) const {
1048 unsigned Opc = MI.getOpcode();
1049 // Instructions that are constant-extended may be replaced with something
1050 // else that no longer offers the same range as the original.
1051 if (!isRegOffOpcode(Opc) || HII->isConstExtended(MI))
1052 return OffsetRange::zero();
1053
1054 if (Opc == Hexagon::A2_addi) {
1055 const MachineOperand &Op1 = MI.getOperand(1), &Op2 = MI.getOperand(2);
1056 if (Rb != Register(Op1) || !Op2.isImm())
1057 return OffsetRange::zero();
1058 OffsetRange R = { -(1<<15)+1, (1<<15)-1, 1 };
1059 return R.shift(Op2.getImm());
1060 }
1061
1062 // HII::getBaseAndOffsetPosition returns the increment position as "offset".
1063 if (HII->isPostIncrement(MI))
1064 return OffsetRange::zero();
1065
1066 const MCInstrDesc &D = HII->get(Opc);
1067 assert(D.mayLoad() || D.mayStore());
1068
1069 unsigned BaseP, OffP;
1070 if (!HII->getBaseAndOffsetPosition(MI, BaseP, OffP) ||
1071 Rb != Register(MI.getOperand(BaseP)) ||
1072 !MI.getOperand(OffP).isImm())
1073 return OffsetRange::zero();
1074
1075 uint64_t F = (D.TSFlags >> HexagonII::MemAccessSizePos) &
1076 HexagonII::MemAccesSizeMask;
1077 uint8_t A = HexagonII::getMemAccessSizeInBytes(HexagonII::MemAccessSize(F));
1078 unsigned L = Log2_32(A);
1079 unsigned S = 10+L; // sint11_L
1080 int32_t Min = -alignDown((1<<S)-1, A);
1081
1082 // The range will be shifted by Off. To prefer non-negative offsets,
1083 // adjust Max accordingly.
1084 int32_t Off = MI.getOperand(OffP).getImm();
1085 int32_t Max = Off >= 0 ? 0 : -Off;
1086
1087 OffsetRange R = { Min, Max, A };
1088 return R.shift(Off);
1089 }
1090
1091 // Return the allowable deviation from the current value of the extender ED,
1092 // for which the instruction corresponding to ED can be modified without
1093 // using an extender.
1094 // The instruction uses the extender directly. It will be replaced with
1095 // another instruction, say MJ, where the extender will be replaced with a
1096 // register. MJ can allow some variability with respect to the value of
1097 // that register, as is the case with indexed memory instructions.
getOffsetRange(const ExtDesc & ED) const1098 OffsetRange HCE::getOffsetRange(const ExtDesc &ED) const {
1099 // The only way that there can be a non-zero range available is if
1100 // the instruction using ED will be converted to an indexed memory
1101 // instruction.
1102 unsigned IdxOpc = getRegOffOpcode(ED.UseMI->getOpcode());
1103 switch (IdxOpc) {
1104 case 0:
1105 return OffsetRange::zero();
1106 case Hexagon::A2_addi: // s16
1107 return { -32767, 32767, 1 };
1108 case Hexagon::A2_subri: // s10
1109 return { -511, 511, 1 };
1110 }
1111
1112 if (!ED.UseMI->mayLoad() && !ED.UseMI->mayStore())
1113 return OffsetRange::zero();
1114 const MCInstrDesc &D = HII->get(IdxOpc);
1115 uint64_t F = (D.TSFlags >> HexagonII::MemAccessSizePos) &
1116 HexagonII::MemAccesSizeMask;
1117 uint8_t A = HexagonII::getMemAccessSizeInBytes(HexagonII::MemAccessSize(F));
1118 unsigned L = Log2_32(A);
1119 unsigned S = 10+L; // sint11_L
1120 int32_t Min = -alignDown((1<<S)-1, A);
1121 int32_t Max = 0; // Force non-negative offsets.
1122 return { Min, Max, A };
1123 }
1124
1125 // Get the allowable deviation from the current value of Rd by checking
1126 // all uses of Rd.
getOffsetRange(Register Rd) const1127 OffsetRange HCE::getOffsetRange(Register Rd) const {
1128 OffsetRange Range;
1129 for (const MachineOperand &Op : MRI->use_operands(Rd.Reg)) {
1130 // Make sure that the register being used by this operand is identical
1131 // to the register that was defined: using a different subregister
1132 // precludes any non-trivial range.
1133 if (Rd != Register(Op))
1134 return OffsetRange::zero();
1135 Range.intersect(getOffsetRange(Rd, *Op.getParent()));
1136 }
1137 return Range;
1138 }
1139
recordExtender(MachineInstr & MI,unsigned OpNum)1140 void HCE::recordExtender(MachineInstr &MI, unsigned OpNum) {
1141 unsigned Opc = MI.getOpcode();
1142 ExtDesc ED;
1143 ED.OpNum = OpNum;
1144
1145 bool IsLoad = MI.mayLoad();
1146 bool IsStore = MI.mayStore();
1147
1148 // Fixed stack slots have negative indexes, and they cannot be used
1149 // with TRI::stackSlot2Index and TRI::index2StackSlot. This is somewhat
1150 // unfortunate, but should not be a frequent thing.
1151 for (MachineOperand &Op : MI.operands())
1152 if (Op.isFI() && Op.getIndex() < 0)
1153 return;
1154
1155 if (IsLoad || IsStore) {
1156 unsigned AM = HII->getAddrMode(MI);
1157 switch (AM) {
1158 // (Re: ##Off + Rb<<S) = Rd: ##Val
1159 case HexagonII::Absolute: // (__: ## + __<<_)
1160 break;
1161 case HexagonII::AbsoluteSet: // (Rd: ## + __<<_)
1162 ED.Rd = MI.getOperand(OpNum-1);
1163 ED.IsDef = true;
1164 break;
1165 case HexagonII::BaseImmOffset: // (__: ## + Rs<<0)
1166 // Store-immediates are treated as non-memory operations, since
1167 // it's the value being stored that is extended (as opposed to
1168 // a part of the address).
1169 if (!isStoreImmediate(Opc))
1170 ED.Expr.Rs = MI.getOperand(OpNum-1);
1171 break;
1172 case HexagonII::BaseLongOffset: // (__: ## + Rs<<S)
1173 ED.Expr.Rs = MI.getOperand(OpNum-2);
1174 ED.Expr.S = MI.getOperand(OpNum-1).getImm();
1175 break;
1176 default:
1177 llvm_unreachable("Unhandled memory instruction");
1178 }
1179 } else {
1180 switch (Opc) {
1181 case Hexagon::A2_tfrsi: // (Rd: ## + __<<_)
1182 ED.Rd = MI.getOperand(0);
1183 ED.IsDef = true;
1184 break;
1185 case Hexagon::A2_combineii: // (Rd: ## + __<<_)
1186 case Hexagon::A4_combineir:
1187 ED.Rd = { MI.getOperand(0).getReg(), Hexagon::isub_hi };
1188 ED.IsDef = true;
1189 break;
1190 case Hexagon::A4_combineri: // (Rd: ## + __<<_)
1191 ED.Rd = { MI.getOperand(0).getReg(), Hexagon::isub_lo };
1192 ED.IsDef = true;
1193 break;
1194 case Hexagon::A2_addi: // (Rd: ## + Rs<<0)
1195 ED.Rd = MI.getOperand(0);
1196 ED.Expr.Rs = MI.getOperand(OpNum-1);
1197 break;
1198 case Hexagon::M2_accii: // (__: ## + Rs<<0)
1199 case Hexagon::M2_naccii:
1200 case Hexagon::S4_addaddi:
1201 ED.Expr.Rs = MI.getOperand(OpNum-1);
1202 break;
1203 case Hexagon::A2_subri: // (Rd: ## - Rs<<0)
1204 ED.Rd = MI.getOperand(0);
1205 ED.Expr.Rs = MI.getOperand(OpNum+1);
1206 ED.Expr.Neg = true;
1207 break;
1208 case Hexagon::S4_subaddi: // (__: ## - Rs<<0)
1209 ED.Expr.Rs = MI.getOperand(OpNum+1);
1210 ED.Expr.Neg = true;
1211 break;
1212 default: // (__: ## + __<<_)
1213 break;
1214 }
1215 }
1216
1217 ED.UseMI = &MI;
1218
1219 // Ignore unnamed globals.
1220 ExtRoot ER(ED.getOp());
1221 if (ER.Kind == MachineOperand::MO_GlobalAddress)
1222 if (ER.V.GV->getName().empty())
1223 return;
1224 Extenders.push_back(ED);
1225 }
1226
collectInstr(MachineInstr & MI)1227 void HCE::collectInstr(MachineInstr &MI) {
1228 if (!HII->isConstExtended(MI))
1229 return;
1230
1231 // Skip some non-convertible instructions.
1232 unsigned Opc = MI.getOpcode();
1233 switch (Opc) {
1234 case Hexagon::M2_macsin: // There is no Rx -= mpyi(Rs,Rt).
1235 case Hexagon::C4_addipc:
1236 case Hexagon::S4_or_andi:
1237 case Hexagon::S4_or_andix:
1238 case Hexagon::S4_or_ori:
1239 return;
1240 }
1241 recordExtender(MI, HII->getCExtOpNum(MI));
1242 }
1243
collect(MachineFunction & MF)1244 void HCE::collect(MachineFunction &MF) {
1245 Extenders.clear();
1246 for (MachineBasicBlock &MBB : MF) {
1247 // Skip unreachable blocks.
1248 if (MBB.getNumber() == -1)
1249 continue;
1250 for (MachineInstr &MI : MBB)
1251 collectInstr(MI);
1252 }
1253 }
1254
assignInits(const ExtRoot & ER,unsigned Begin,unsigned End,AssignmentMap & IMap)1255 void HCE::assignInits(const ExtRoot &ER, unsigned Begin, unsigned End,
1256 AssignmentMap &IMap) {
1257 // Sanity check: make sure that all extenders in the range [Begin..End)
1258 // share the same root ER.
1259 for (unsigned I = Begin; I != End; ++I)
1260 assert(ER == ExtRoot(Extenders[I].getOp()));
1261
1262 // Construct the list of ranges, such that for each P in Ranges[I],
1263 // a register Reg = ER+P can be used in place of Extender[I]. If the
1264 // instruction allows, uses in the form of Reg+Off are considered
1265 // (here, Off = required_value - P).
1266 std::vector<OffsetRange> Ranges(End-Begin);
1267
1268 // For each extender that is a def, visit all uses of the defined register,
1269 // and produce an offset range that works for all uses. The def doesn't
1270 // have to be checked, because it can become dead if all uses can be updated
1271 // to use a different reg/offset.
1272 for (unsigned I = Begin; I != End; ++I) {
1273 const ExtDesc &ED = Extenders[I];
1274 if (!ED.IsDef)
1275 continue;
1276 ExtValue EV(ED);
1277 LLVM_DEBUG(dbgs() << " =" << I << ". " << EV << " " << ED << '\n');
1278 assert(ED.Rd.Reg != 0);
1279 Ranges[I-Begin] = getOffsetRange(ED.Rd).shift(EV.Offset);
1280 // A2_tfrsi is a special case: it will be replaced with A2_addi, which
1281 // has a 16-bit signed offset. This means that A2_tfrsi not only has a
1282 // range coming from its uses, but also from the fact that its replacement
1283 // has a range as well.
1284 if (ED.UseMI->getOpcode() == Hexagon::A2_tfrsi) {
1285 int32_t D = alignDown(32767, Ranges[I-Begin].Align); // XXX hardcoded
1286 Ranges[I-Begin].extendBy(-D).extendBy(D);
1287 }
1288 }
1289
1290 // Visit all non-def extenders. For each one, determine the offset range
1291 // available for it.
1292 for (unsigned I = Begin; I != End; ++I) {
1293 const ExtDesc &ED = Extenders[I];
1294 if (ED.IsDef)
1295 continue;
1296 ExtValue EV(ED);
1297 LLVM_DEBUG(dbgs() << " " << I << ". " << EV << " " << ED << '\n');
1298 OffsetRange Dev = getOffsetRange(ED);
1299 Ranges[I-Begin].intersect(Dev.shift(EV.Offset));
1300 }
1301
1302 // Here for each I there is a corresponding Range[I]. Construct the
1303 // inverse map, that to each range will assign the set of indexes in
1304 // [Begin..End) that this range corresponds to.
1305 std::map<OffsetRange, IndexList> RangeMap;
1306 for (unsigned I = Begin; I != End; ++I)
1307 RangeMap[Ranges[I-Begin]].insert(I);
1308
1309 LLVM_DEBUG({
1310 dbgs() << "Ranges\n";
1311 for (unsigned I = Begin; I != End; ++I)
1312 dbgs() << " " << I << ". " << Ranges[I-Begin] << '\n';
1313 dbgs() << "RangeMap\n";
1314 for (auto &P : RangeMap) {
1315 dbgs() << " " << P.first << " ->";
1316 for (unsigned I : P.second)
1317 dbgs() << ' ' << I;
1318 dbgs() << '\n';
1319 }
1320 });
1321
1322 // Select the definition points, and generate the assignment between
1323 // these points and the uses.
1324
1325 // For each candidate offset, keep a pair CandData consisting of
1326 // the total number of ranges containing that candidate, and the
1327 // vector of corresponding RangeTree nodes.
1328 using CandData = std::pair<unsigned, SmallVector<RangeTree::Node*,8>>;
1329 std::map<int32_t, CandData> CandMap;
1330
1331 RangeTree Tree;
1332 for (const OffsetRange &R : Ranges)
1333 Tree.add(R);
1334 SmallVector<RangeTree::Node*,8> Nodes;
1335 Tree.order(Nodes);
1336
1337 auto MaxAlign = [](const SmallVectorImpl<RangeTree::Node*> &Nodes,
1338 uint8_t Align, uint8_t Offset) {
1339 for (RangeTree::Node *N : Nodes) {
1340 if (N->Range.Align <= Align || N->Range.Offset < Offset)
1341 continue;
1342 if ((N->Range.Offset - Offset) % Align != 0)
1343 continue;
1344 Align = N->Range.Align;
1345 Offset = N->Range.Offset;
1346 }
1347 return std::make_pair(Align, Offset);
1348 };
1349
1350 // Construct the set of all potential definition points from the endpoints
1351 // of the ranges. If a given endpoint also belongs to a different range,
1352 // but with a higher alignment, also consider the more-highly-aligned
1353 // value of this endpoint.
1354 std::set<int32_t> CandSet;
1355 for (RangeTree::Node *N : Nodes) {
1356 const OffsetRange &R = N->Range;
1357 auto P0 = MaxAlign(Tree.nodesWith(R.Min, false), R.Align, R.Offset);
1358 CandSet.insert(R.Min);
1359 if (R.Align < P0.first)
1360 CandSet.insert(adjustUp(R.Min, P0.first, P0.second));
1361 auto P1 = MaxAlign(Tree.nodesWith(R.Max, false), R.Align, R.Offset);
1362 CandSet.insert(R.Max);
1363 if (R.Align < P1.first)
1364 CandSet.insert(adjustDown(R.Max, P1.first, P1.second));
1365 }
1366
1367 // Build the assignment map: candidate C -> { list of extender indexes }.
1368 // This has to be done iteratively:
1369 // - pick the candidate that covers the maximum number of extenders,
1370 // - add the candidate to the map,
1371 // - remove the extenders from the pool.
1372 while (true) {
1373 using CMap = std::map<int32_t,unsigned>;
1374 CMap Counts;
1375 for (auto It = CandSet.begin(), Et = CandSet.end(); It != Et; ) {
1376 auto &&V = Tree.nodesWith(*It);
1377 unsigned N = std::accumulate(V.begin(), V.end(), 0u,
1378 [](unsigned Acc, const RangeTree::Node *N) {
1379 return Acc + N->Count;
1380 });
1381 if (N != 0)
1382 Counts.insert({*It, N});
1383 It = (N != 0) ? std::next(It) : CandSet.erase(It);
1384 }
1385 if (Counts.empty())
1386 break;
1387
1388 // Find the best candidate with respect to the number of extenders covered.
1389 auto BestIt = std::max_element(Counts.begin(), Counts.end(),
1390 [](const CMap::value_type &A, const CMap::value_type &B) {
1391 return A.second < B.second ||
1392 (A.second == B.second && A < B);
1393 });
1394 int32_t Best = BestIt->first;
1395 ExtValue BestV(ER, Best);
1396 for (RangeTree::Node *N : Tree.nodesWith(Best)) {
1397 for (unsigned I : RangeMap[N->Range])
1398 IMap[{BestV,Extenders[I].Expr}].insert(I);
1399 Tree.erase(N);
1400 }
1401 }
1402
1403 LLVM_DEBUG(dbgs() << "IMap (before fixup) = " << PrintIMap(IMap, *HRI));
1404
1405 // There is some ambiguity in what initializer should be used, if the
1406 // descriptor's subexpression is non-trivial: it can be the entire
1407 // subexpression (which is what has been done so far), or it can be
1408 // the extender's value itself, if all corresponding extenders have the
1409 // exact value of the initializer (i.e. require offset of 0).
1410
1411 // To reduce the number of initializers, merge such special cases.
1412 for (std::pair<const ExtenderInit,IndexList> &P : IMap) {
1413 // Skip trivial initializers.
1414 if (P.first.second.trivial())
1415 continue;
1416 // If the corresponding trivial initializer does not exist, skip this
1417 // entry.
1418 const ExtValue &EV = P.first.first;
1419 AssignmentMap::iterator F = IMap.find({EV, ExtExpr()});
1420 if (F == IMap.end())
1421 continue;
1422
1423 // Finally, check if all extenders have the same value as the initializer.
1424 // Make sure that extenders that are a part of a stack address are not
1425 // merged with those that aren't. Stack addresses need an offset field
1426 // (to be used by frame index elimination), while non-stack expressions
1427 // can be replaced with forms (such as rr) that do not have such a field.
1428 // Example:
1429 //
1430 // Collected 3 extenders
1431 // =2. imm:0 off:32968 bb#2: %7 = ## + __ << 0, def
1432 // 0. imm:0 off:267 bb#0: __ = ## + SS#1 << 0
1433 // 1. imm:0 off:267 bb#1: __ = ## + SS#1 << 0
1434 // Ranges
1435 // 0. [-756,267]a1+0
1436 // 1. [-756,267]a1+0
1437 // 2. [201,65735]a1+0
1438 // RangeMap
1439 // [-756,267]a1+0 -> 0 1
1440 // [201,65735]a1+0 -> 2
1441 // IMap (before fixup) = {
1442 // [imm:0 off:267, ## + __ << 0] -> { 2 }
1443 // [imm:0 off:267, ## + SS#1 << 0] -> { 0 1 }
1444 // }
1445 // IMap (after fixup) = {
1446 // [imm:0 off:267, ## + __ << 0] -> { 2 0 1 }
1447 // [imm:0 off:267, ## + SS#1 << 0] -> { }
1448 // }
1449 // Inserted def in bb#0 for initializer: [imm:0 off:267, ## + __ << 0]
1450 // %12:intregs = A2_tfrsi 267
1451 //
1452 // The result was
1453 // %12:intregs = A2_tfrsi 267
1454 // S4_pstorerbt_rr %3, %12, %stack.1, 0, killed %4
1455 // Which became
1456 // r0 = #267
1457 // if (p0.new) memb(r0+r29<<#4) = r2
1458
1459 bool IsStack = any_of(F->second, [this](unsigned I) {
1460 return Extenders[I].Expr.Rs.isSlot();
1461 });
1462 auto SameValue = [&EV,this,IsStack](unsigned I) {
1463 const ExtDesc &ED = Extenders[I];
1464 return ED.Expr.Rs.isSlot() == IsStack &&
1465 ExtValue(ED).Offset == EV.Offset;
1466 };
1467 if (all_of(P.second, SameValue)) {
1468 F->second.insert(P.second.begin(), P.second.end());
1469 P.second.clear();
1470 }
1471 }
1472
1473 LLVM_DEBUG(dbgs() << "IMap (after fixup) = " << PrintIMap(IMap, *HRI));
1474 }
1475
calculatePlacement(const ExtenderInit & ExtI,const IndexList & Refs,LocDefList & Defs)1476 void HCE::calculatePlacement(const ExtenderInit &ExtI, const IndexList &Refs,
1477 LocDefList &Defs) {
1478 if (Refs.empty())
1479 return;
1480
1481 // The placement calculation is somewhat simple right now: it finds a
1482 // single location for the def that dominates all refs. Since this may
1483 // place the def far from the uses, producing several locations for
1484 // defs that collectively dominate all refs could be better.
1485 // For now only do the single one.
1486 DenseSet<MachineBasicBlock*> Blocks;
1487 DenseSet<MachineInstr*> RefMIs;
1488 const ExtDesc &ED0 = Extenders[Refs[0]];
1489 MachineBasicBlock *DomB = ED0.UseMI->getParent();
1490 RefMIs.insert(ED0.UseMI);
1491 Blocks.insert(DomB);
1492 for (unsigned i = 1, e = Refs.size(); i != e; ++i) {
1493 const ExtDesc &ED = Extenders[Refs[i]];
1494 MachineBasicBlock *MBB = ED.UseMI->getParent();
1495 RefMIs.insert(ED.UseMI);
1496 DomB = MDT->findNearestCommonDominator(DomB, MBB);
1497 Blocks.insert(MBB);
1498 }
1499
1500 #ifndef NDEBUG
1501 // The block DomB should be dominated by the def of each register used
1502 // in the initializer.
1503 Register Rs = ExtI.second.Rs; // Only one reg allowed now.
1504 const MachineInstr *DefI = Rs.isVReg() ? MRI->getVRegDef(Rs.Reg) : nullptr;
1505
1506 // This should be guaranteed given that the entire expression is used
1507 // at each instruction in Refs. Add an assertion just in case.
1508 assert(!DefI || MDT->dominates(DefI->getParent(), DomB));
1509 #endif
1510
1511 MachineBasicBlock::iterator It;
1512 if (Blocks.count(DomB)) {
1513 // Try to find the latest possible location for the def.
1514 MachineBasicBlock::iterator End = DomB->end();
1515 for (It = DomB->begin(); It != End; ++It)
1516 if (RefMIs.count(&*It))
1517 break;
1518 assert(It != End && "Should have found a ref in DomB");
1519 } else {
1520 // DomB does not contain any refs.
1521 It = DomB->getFirstTerminator();
1522 }
1523 Loc DefLoc(DomB, It);
1524 Defs.emplace_back(DefLoc, Refs);
1525 }
1526
insertInitializer(Loc DefL,const ExtenderInit & ExtI)1527 HCE::Register HCE::insertInitializer(Loc DefL, const ExtenderInit &ExtI) {
1528 llvm::Register DefR = MRI->createVirtualRegister(&Hexagon::IntRegsRegClass);
1529 MachineBasicBlock &MBB = *DefL.Block;
1530 MachineBasicBlock::iterator At = DefL.At;
1531 DebugLoc dl = DefL.Block->findDebugLoc(DefL.At);
1532 const ExtValue &EV = ExtI.first;
1533 MachineOperand ExtOp(EV);
1534
1535 const ExtExpr &Ex = ExtI.second;
1536 const MachineInstr *InitI = nullptr;
1537
1538 if (Ex.Rs.isSlot()) {
1539 assert(Ex.S == 0 && "Cannot have a shift of a stack slot");
1540 assert(!Ex.Neg && "Cannot subtract a stack slot");
1541 // DefR = PS_fi Rb,##EV
1542 InitI = BuildMI(MBB, At, dl, HII->get(Hexagon::PS_fi), DefR)
1543 .add(MachineOperand(Ex.Rs))
1544 .add(ExtOp);
1545 } else {
1546 assert((Ex.Rs.Reg == 0 || Ex.Rs.isVReg()) && "Expecting virtual register");
1547 if (Ex.trivial()) {
1548 // DefR = ##EV
1549 InitI = BuildMI(MBB, At, dl, HII->get(Hexagon::A2_tfrsi), DefR)
1550 .add(ExtOp);
1551 } else if (Ex.S == 0) {
1552 if (Ex.Neg) {
1553 // DefR = sub(##EV,Rb)
1554 InitI = BuildMI(MBB, At, dl, HII->get(Hexagon::A2_subri), DefR)
1555 .add(ExtOp)
1556 .add(MachineOperand(Ex.Rs));
1557 } else {
1558 // DefR = add(Rb,##EV)
1559 InitI = BuildMI(MBB, At, dl, HII->get(Hexagon::A2_addi), DefR)
1560 .add(MachineOperand(Ex.Rs))
1561 .add(ExtOp);
1562 }
1563 } else {
1564 if (HST->useCompound()) {
1565 unsigned NewOpc = Ex.Neg ? Hexagon::S4_subi_asl_ri
1566 : Hexagon::S4_addi_asl_ri;
1567 // DefR = add(##EV,asl(Rb,S))
1568 InitI = BuildMI(MBB, At, dl, HII->get(NewOpc), DefR)
1569 .add(ExtOp)
1570 .add(MachineOperand(Ex.Rs))
1571 .addImm(Ex.S);
1572 } else {
1573 // No compounds are available. It is not clear whether we should
1574 // even process such extenders where the initializer cannot be
1575 // a single instruction, but do it for now.
1576 unsigned TmpR = MRI->createVirtualRegister(&Hexagon::IntRegsRegClass);
1577 BuildMI(MBB, At, dl, HII->get(Hexagon::S2_asl_i_r), TmpR)
1578 .add(MachineOperand(Ex.Rs))
1579 .addImm(Ex.S);
1580 if (Ex.Neg)
1581 InitI = BuildMI(MBB, At, dl, HII->get(Hexagon::A2_subri), DefR)
1582 .add(ExtOp)
1583 .add(MachineOperand(Register(TmpR, 0)));
1584 else
1585 InitI = BuildMI(MBB, At, dl, HII->get(Hexagon::A2_addi), DefR)
1586 .add(MachineOperand(Register(TmpR, 0)))
1587 .add(ExtOp);
1588 }
1589 }
1590 }
1591
1592 assert(InitI);
1593 (void)InitI;
1594 LLVM_DEBUG(dbgs() << "Inserted def in bb#" << MBB.getNumber()
1595 << " for initializer: " << PrintInit(ExtI, *HRI) << "\n "
1596 << *InitI);
1597 return { DefR, 0 };
1598 }
1599
1600 // Replace the extender at index Idx with the register ExtR.
replaceInstrExact(const ExtDesc & ED,Register ExtR)1601 bool HCE::replaceInstrExact(const ExtDesc &ED, Register ExtR) {
1602 MachineInstr &MI = *ED.UseMI;
1603 MachineBasicBlock &MBB = *MI.getParent();
1604 MachineBasicBlock::iterator At = MI.getIterator();
1605 DebugLoc dl = MI.getDebugLoc();
1606 unsigned ExtOpc = MI.getOpcode();
1607
1608 // With a few exceptions, direct replacement amounts to creating an
1609 // instruction with a corresponding register opcode, with all operands
1610 // the same, except for the register used in place of the extender.
1611 unsigned RegOpc = getDirectRegReplacement(ExtOpc);
1612
1613 if (RegOpc == TargetOpcode::REG_SEQUENCE) {
1614 if (ExtOpc == Hexagon::A4_combineri)
1615 BuildMI(MBB, At, dl, HII->get(RegOpc))
1616 .add(MI.getOperand(0))
1617 .add(MI.getOperand(1))
1618 .addImm(Hexagon::isub_hi)
1619 .add(MachineOperand(ExtR))
1620 .addImm(Hexagon::isub_lo);
1621 else if (ExtOpc == Hexagon::A4_combineir)
1622 BuildMI(MBB, At, dl, HII->get(RegOpc))
1623 .add(MI.getOperand(0))
1624 .add(MachineOperand(ExtR))
1625 .addImm(Hexagon::isub_hi)
1626 .add(MI.getOperand(2))
1627 .addImm(Hexagon::isub_lo);
1628 else
1629 llvm_unreachable("Unexpected opcode became REG_SEQUENCE");
1630 MBB.erase(MI);
1631 return true;
1632 }
1633 if (ExtOpc == Hexagon::C2_cmpgei || ExtOpc == Hexagon::C2_cmpgeui) {
1634 unsigned NewOpc = ExtOpc == Hexagon::C2_cmpgei ? Hexagon::C2_cmplt
1635 : Hexagon::C2_cmpltu;
1636 BuildMI(MBB, At, dl, HII->get(NewOpc))
1637 .add(MI.getOperand(0))
1638 .add(MachineOperand(ExtR))
1639 .add(MI.getOperand(1));
1640 MBB.erase(MI);
1641 return true;
1642 }
1643
1644 if (RegOpc != 0) {
1645 MachineInstrBuilder MIB = BuildMI(MBB, At, dl, HII->get(RegOpc));
1646 unsigned RegN = ED.OpNum;
1647 // Copy all operands except the one that has the extender.
1648 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1649 if (i != RegN)
1650 MIB.add(MI.getOperand(i));
1651 else
1652 MIB.add(MachineOperand(ExtR));
1653 }
1654 MIB.cloneMemRefs(MI);
1655 MBB.erase(MI);
1656 return true;
1657 }
1658
1659 if (MI.mayLoadOrStore() && !isStoreImmediate(ExtOpc)) {
1660 // For memory instructions, there is an asymmetry in the addressing
1661 // modes. Addressing modes allowing extenders can be replaced with
1662 // addressing modes that use registers, but the order of operands
1663 // (or even their number) may be different.
1664 // Replacements:
1665 // BaseImmOffset (io) -> BaseRegOffset (rr)
1666 // BaseLongOffset (ur) -> BaseRegOffset (rr)
1667 unsigned RegOpc, Shift;
1668 unsigned AM = HII->getAddrMode(MI);
1669 if (AM == HexagonII::BaseImmOffset) {
1670 RegOpc = HII->changeAddrMode_io_rr(ExtOpc);
1671 Shift = 0;
1672 } else if (AM == HexagonII::BaseLongOffset) {
1673 // Loads: Rd = L4_loadri_ur Rs, S, ##
1674 // Stores: S4_storeri_ur Rs, S, ##, Rt
1675 RegOpc = HII->changeAddrMode_ur_rr(ExtOpc);
1676 Shift = MI.getOperand(MI.mayLoad() ? 2 : 1).getImm();
1677 } else {
1678 llvm_unreachable("Unexpected addressing mode");
1679 }
1680 #ifndef NDEBUG
1681 if (RegOpc == -1u) {
1682 dbgs() << "\nExtOpc: " << HII->getName(ExtOpc) << " has no rr version\n";
1683 llvm_unreachable("No corresponding rr instruction");
1684 }
1685 #endif
1686
1687 unsigned BaseP, OffP;
1688 HII->getBaseAndOffsetPosition(MI, BaseP, OffP);
1689
1690 // Build an rr instruction: (RegOff + RegBase<<0)
1691 MachineInstrBuilder MIB = BuildMI(MBB, At, dl, HII->get(RegOpc));
1692 // First, add the def for loads.
1693 if (MI.mayLoad())
1694 MIB.add(getLoadResultOp(MI));
1695 // Handle possible predication.
1696 if (HII->isPredicated(MI))
1697 MIB.add(getPredicateOp(MI));
1698 // Build the address.
1699 MIB.add(MachineOperand(ExtR)); // RegOff
1700 MIB.add(MI.getOperand(BaseP)); // RegBase
1701 MIB.addImm(Shift); // << Shift
1702 // Add the stored value for stores.
1703 if (MI.mayStore())
1704 MIB.add(getStoredValueOp(MI));
1705 MIB.cloneMemRefs(MI);
1706 MBB.erase(MI);
1707 return true;
1708 }
1709
1710 #ifndef NDEBUG
1711 dbgs() << '\n' << MI;
1712 #endif
1713 llvm_unreachable("Unhandled exact replacement");
1714 return false;
1715 }
1716
1717 // Replace the extender ED with a form corresponding to the initializer ExtI.
replaceInstrExpr(const ExtDesc & ED,const ExtenderInit & ExtI,Register ExtR,int32_t & Diff)1718 bool HCE::replaceInstrExpr(const ExtDesc &ED, const ExtenderInit &ExtI,
1719 Register ExtR, int32_t &Diff) {
1720 MachineInstr &MI = *ED.UseMI;
1721 MachineBasicBlock &MBB = *MI.getParent();
1722 MachineBasicBlock::iterator At = MI.getIterator();
1723 DebugLoc dl = MI.getDebugLoc();
1724 unsigned ExtOpc = MI.getOpcode();
1725
1726 if (ExtOpc == Hexagon::A2_tfrsi) {
1727 // A2_tfrsi is a special case: it's replaced with A2_addi, which introduces
1728 // another range. One range is the one that's common to all tfrsi's uses,
1729 // this one is the range of immediates in A2_addi. When calculating ranges,
1730 // the addi's 16-bit argument was included, so now we need to make it such
1731 // that the produced value is in the range for the uses alone.
1732 // Most of the time, simply adding Diff will make the addi produce exact
1733 // result, but if Diff is outside of the 16-bit range, some adjustment
1734 // will be needed.
1735 unsigned IdxOpc = getRegOffOpcode(ExtOpc);
1736 assert(IdxOpc == Hexagon::A2_addi);
1737
1738 // Clamp Diff to the 16 bit range.
1739 int32_t D = isInt<16>(Diff) ? Diff : (Diff > 0 ? 32767 : -32768);
1740 if (Diff > 32767) {
1741 // Split Diff into two values: one that is close to min/max int16,
1742 // and the other being the rest, and such that both have the same
1743 // "alignment" as Diff.
1744 uint32_t UD = Diff;
1745 OffsetRange R = getOffsetRange(MI.getOperand(0));
1746 uint32_t A = std::min<uint32_t>(R.Align, 1u << countTrailingZeros(UD));
1747 D &= ~(A-1);
1748 }
1749 BuildMI(MBB, At, dl, HII->get(IdxOpc))
1750 .add(MI.getOperand(0))
1751 .add(MachineOperand(ExtR))
1752 .addImm(D);
1753 Diff -= D;
1754 #ifndef NDEBUG
1755 // Make sure the output is within allowable range for uses.
1756 // "Diff" is a difference in the "opposite direction", i.e. Ext - DefV,
1757 // not DefV - Ext, as the getOffsetRange would calculate.
1758 OffsetRange Uses = getOffsetRange(MI.getOperand(0));
1759 if (!Uses.contains(-Diff))
1760 dbgs() << "Diff: " << -Diff << " out of range " << Uses
1761 << " for " << MI;
1762 assert(Uses.contains(-Diff));
1763 #endif
1764 MBB.erase(MI);
1765 return true;
1766 }
1767
1768 const ExtValue &EV = ExtI.first; (void)EV;
1769 const ExtExpr &Ex = ExtI.second; (void)Ex;
1770
1771 if (ExtOpc == Hexagon::A2_addi || ExtOpc == Hexagon::A2_subri) {
1772 // If addi/subri are replaced with the exactly matching initializer,
1773 // they amount to COPY.
1774 // Check that the initializer is an exact match (for simplicity).
1775 #ifndef NDEBUG
1776 bool IsAddi = ExtOpc == Hexagon::A2_addi;
1777 const MachineOperand &RegOp = MI.getOperand(IsAddi ? 1 : 2);
1778 const MachineOperand &ImmOp = MI.getOperand(IsAddi ? 2 : 1);
1779 assert(Ex.Rs == RegOp && EV == ImmOp && Ex.Neg != IsAddi &&
1780 "Initializer mismatch");
1781 #endif
1782 BuildMI(MBB, At, dl, HII->get(TargetOpcode::COPY))
1783 .add(MI.getOperand(0))
1784 .add(MachineOperand(ExtR));
1785 Diff = 0;
1786 MBB.erase(MI);
1787 return true;
1788 }
1789 if (ExtOpc == Hexagon::M2_accii || ExtOpc == Hexagon::M2_naccii ||
1790 ExtOpc == Hexagon::S4_addaddi || ExtOpc == Hexagon::S4_subaddi) {
1791 // M2_accii: add(Rt,add(Rs,V)) (tied)
1792 // M2_naccii: sub(Rt,add(Rs,V))
1793 // S4_addaddi: add(Rt,add(Rs,V))
1794 // S4_subaddi: add(Rt,sub(V,Rs))
1795 // Check that Rs and V match the initializer expression. The Rs+V is the
1796 // combination that is considered "subexpression" for V, although Rx+V
1797 // would also be valid.
1798 #ifndef NDEBUG
1799 bool IsSub = ExtOpc == Hexagon::S4_subaddi;
1800 Register Rs = MI.getOperand(IsSub ? 3 : 2);
1801 ExtValue V = MI.getOperand(IsSub ? 2 : 3);
1802 assert(EV == V && Rs == Ex.Rs && IsSub == Ex.Neg && "Initializer mismatch");
1803 #endif
1804 unsigned NewOpc = ExtOpc == Hexagon::M2_naccii ? Hexagon::A2_sub
1805 : Hexagon::A2_add;
1806 BuildMI(MBB, At, dl, HII->get(NewOpc))
1807 .add(MI.getOperand(0))
1808 .add(MI.getOperand(1))
1809 .add(MachineOperand(ExtR));
1810 MBB.erase(MI);
1811 return true;
1812 }
1813
1814 if (MI.mayLoadOrStore()) {
1815 unsigned IdxOpc = getRegOffOpcode(ExtOpc);
1816 assert(IdxOpc && "Expecting indexed opcode");
1817 MachineInstrBuilder MIB = BuildMI(MBB, At, dl, HII->get(IdxOpc));
1818 // Construct the new indexed instruction.
1819 // First, add the def for loads.
1820 if (MI.mayLoad())
1821 MIB.add(getLoadResultOp(MI));
1822 // Handle possible predication.
1823 if (HII->isPredicated(MI))
1824 MIB.add(getPredicateOp(MI));
1825 // Build the address.
1826 MIB.add(MachineOperand(ExtR));
1827 MIB.addImm(Diff);
1828 // Add the stored value for stores.
1829 if (MI.mayStore())
1830 MIB.add(getStoredValueOp(MI));
1831 MIB.cloneMemRefs(MI);
1832 MBB.erase(MI);
1833 return true;
1834 }
1835
1836 #ifndef NDEBUG
1837 dbgs() << '\n' << PrintInit(ExtI, *HRI) << " " << MI;
1838 #endif
1839 llvm_unreachable("Unhandled expr replacement");
1840 return false;
1841 }
1842
replaceInstr(unsigned Idx,Register ExtR,const ExtenderInit & ExtI)1843 bool HCE::replaceInstr(unsigned Idx, Register ExtR, const ExtenderInit &ExtI) {
1844 if (ReplaceLimit.getNumOccurrences()) {
1845 if (ReplaceLimit <= ReplaceCounter)
1846 return false;
1847 ++ReplaceCounter;
1848 }
1849 const ExtDesc &ED = Extenders[Idx];
1850 assert((!ED.IsDef || ED.Rd.Reg != 0) && "Missing Rd for def");
1851 const ExtValue &DefV = ExtI.first;
1852 assert(ExtRoot(ExtValue(ED)) == ExtRoot(DefV) && "Extender root mismatch");
1853 const ExtExpr &DefEx = ExtI.second;
1854
1855 ExtValue EV(ED);
1856 int32_t Diff = EV.Offset - DefV.Offset;
1857 const MachineInstr &MI = *ED.UseMI;
1858 LLVM_DEBUG(dbgs() << __func__ << " Idx:" << Idx << " ExtR:"
1859 << PrintRegister(ExtR, *HRI) << " Diff:" << Diff << '\n');
1860
1861 // These two addressing modes must be converted into indexed forms
1862 // regardless of what the initializer looks like.
1863 bool IsAbs = false, IsAbsSet = false;
1864 if (MI.mayLoadOrStore()) {
1865 unsigned AM = HII->getAddrMode(MI);
1866 IsAbs = AM == HexagonII::Absolute;
1867 IsAbsSet = AM == HexagonII::AbsoluteSet;
1868 }
1869
1870 // If it's a def, remember all operands that need to be updated.
1871 // If ED is a def, and Diff is not 0, then all uses of the register Rd
1872 // defined by ED must be in the form (Rd, imm), i.e. the immediate offset
1873 // must follow the Rd in the operand list.
1874 std::vector<std::pair<MachineInstr*,unsigned>> RegOps;
1875 if (ED.IsDef && Diff != 0) {
1876 for (MachineOperand &Op : MRI->use_operands(ED.Rd.Reg)) {
1877 MachineInstr &UI = *Op.getParent();
1878 RegOps.push_back({&UI, getOperandIndex(UI, Op)});
1879 }
1880 }
1881
1882 // Replace the instruction.
1883 bool Replaced = false;
1884 if (Diff == 0 && DefEx.trivial() && !IsAbs && !IsAbsSet)
1885 Replaced = replaceInstrExact(ED, ExtR);
1886 else
1887 Replaced = replaceInstrExpr(ED, ExtI, ExtR, Diff);
1888
1889 if (Diff != 0 && Replaced && ED.IsDef) {
1890 // Update offsets of the def's uses.
1891 for (std::pair<MachineInstr*,unsigned> P : RegOps) {
1892 unsigned J = P.second;
1893 assert(P.first->getNumOperands() > J+1 &&
1894 P.first->getOperand(J+1).isImm());
1895 MachineOperand &ImmOp = P.first->getOperand(J+1);
1896 ImmOp.setImm(ImmOp.getImm() + Diff);
1897 }
1898 // If it was an absolute-set instruction, the "set" part has been removed.
1899 // ExtR will now be the register with the extended value, and since all
1900 // users of Rd have been updated, all that needs to be done is to replace
1901 // Rd with ExtR.
1902 if (IsAbsSet) {
1903 assert(ED.Rd.Sub == 0 && ExtR.Sub == 0);
1904 MRI->replaceRegWith(ED.Rd.Reg, ExtR.Reg);
1905 }
1906 }
1907
1908 return Replaced;
1909 }
1910
replaceExtenders(const AssignmentMap & IMap)1911 bool HCE::replaceExtenders(const AssignmentMap &IMap) {
1912 LocDefList Defs;
1913 bool Changed = false;
1914
1915 for (const std::pair<const ExtenderInit, IndexList> &P : IMap) {
1916 const IndexList &Idxs = P.second;
1917 if (Idxs.size() < CountThreshold)
1918 continue;
1919
1920 Defs.clear();
1921 calculatePlacement(P.first, Idxs, Defs);
1922 for (const std::pair<Loc,IndexList> &Q : Defs) {
1923 Register DefR = insertInitializer(Q.first, P.first);
1924 NewRegs.push_back(DefR.Reg);
1925 for (unsigned I : Q.second)
1926 Changed |= replaceInstr(I, DefR, P.first);
1927 }
1928 }
1929 return Changed;
1930 }
1931
getOperandIndex(const MachineInstr & MI,const MachineOperand & Op) const1932 unsigned HCE::getOperandIndex(const MachineInstr &MI,
1933 const MachineOperand &Op) const {
1934 for (unsigned i = 0, n = MI.getNumOperands(); i != n; ++i)
1935 if (&MI.getOperand(i) == &Op)
1936 return i;
1937 llvm_unreachable("Not an operand of MI");
1938 }
1939
getPredicateOp(const MachineInstr & MI) const1940 const MachineOperand &HCE::getPredicateOp(const MachineInstr &MI) const {
1941 assert(HII->isPredicated(MI));
1942 for (const MachineOperand &Op : MI.operands()) {
1943 if (!Op.isReg() || !Op.isUse() ||
1944 MRI->getRegClass(Op.getReg()) != &Hexagon::PredRegsRegClass)
1945 continue;
1946 assert(Op.getSubReg() == 0 && "Predicate register with a subregister");
1947 return Op;
1948 }
1949 llvm_unreachable("Predicate operand not found");
1950 }
1951
getLoadResultOp(const MachineInstr & MI) const1952 const MachineOperand &HCE::getLoadResultOp(const MachineInstr &MI) const {
1953 assert(MI.mayLoad());
1954 return MI.getOperand(0);
1955 }
1956
getStoredValueOp(const MachineInstr & MI) const1957 const MachineOperand &HCE::getStoredValueOp(const MachineInstr &MI) const {
1958 assert(MI.mayStore());
1959 return MI.getOperand(MI.getNumExplicitOperands()-1);
1960 }
1961
runOnMachineFunction(MachineFunction & MF)1962 bool HCE::runOnMachineFunction(MachineFunction &MF) {
1963 if (skipFunction(MF.getFunction()))
1964 return false;
1965 if (MF.getFunction().hasPersonalityFn()) {
1966 LLVM_DEBUG(dbgs() << getPassName() << ": skipping " << MF.getName()
1967 << " due to exception handling\n");
1968 return false;
1969 }
1970 LLVM_DEBUG(MF.print(dbgs() << "Before " << getPassName() << '\n', nullptr));
1971
1972 HST = &MF.getSubtarget<HexagonSubtarget>();
1973 HII = HST->getInstrInfo();
1974 HRI = HST->getRegisterInfo();
1975 MDT = &getAnalysis<MachineDominatorTree>();
1976 MRI = &MF.getRegInfo();
1977 AssignmentMap IMap;
1978
1979 collect(MF);
1980 llvm::sort(Extenders, [this](const ExtDesc &A, const ExtDesc &B) {
1981 ExtValue VA(A), VB(B);
1982 if (VA != VB)
1983 return VA < VB;
1984 const MachineInstr *MA = A.UseMI;
1985 const MachineInstr *MB = B.UseMI;
1986 if (MA == MB) {
1987 // If it's the same instruction, compare operand numbers.
1988 return A.OpNum < B.OpNum;
1989 }
1990
1991 const MachineBasicBlock *BA = MA->getParent();
1992 const MachineBasicBlock *BB = MB->getParent();
1993 assert(BA->getNumber() != -1 && BB->getNumber() != -1);
1994 if (BA != BB)
1995 return BA->getNumber() < BB->getNumber();
1996 return MDT->dominates(MA, MB);
1997 });
1998
1999 bool Changed = false;
2000 LLVM_DEBUG(dbgs() << "Collected " << Extenders.size() << " extenders\n");
2001 for (unsigned I = 0, E = Extenders.size(); I != E; ) {
2002 unsigned B = I;
2003 const ExtRoot &T = Extenders[B].getOp();
2004 while (I != E && ExtRoot(Extenders[I].getOp()) == T)
2005 ++I;
2006
2007 IMap.clear();
2008 assignInits(T, B, I, IMap);
2009 Changed |= replaceExtenders(IMap);
2010 }
2011
2012 LLVM_DEBUG({
2013 if (Changed)
2014 MF.print(dbgs() << "After " << getPassName() << '\n', nullptr);
2015 else
2016 dbgs() << "No changes\n";
2017 });
2018 return Changed;
2019 }
2020
createHexagonConstExtenders()2021 FunctionPass *llvm::createHexagonConstExtenders() {
2022 return new HexagonConstExtenders();
2023 }
2024