1 //===- RISCVMatInt.cpp - Immediate materialisation -------------*- C++ -*--===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "RISCVMatInt.h"
10 #include "MCTargetDesc/RISCVMCTargetDesc.h"
11 #include "llvm/ADT/APInt.h"
12 #include "llvm/Support/MathExtras.h"
13 
14 namespace llvm {
15 
16 namespace RISCVMatInt {
17 void generateInstSeq(int64_t Val, bool IsRV64, InstSeq &Res) {
18   if (isInt<32>(Val)) {
19     // Depending on the active bits in the immediate Value v, the following
20     // instruction sequences are emitted:
21     //
22     // v == 0                        : ADDI
23     // v[0,12) != 0 && v[12,32) == 0 : ADDI
24     // v[0,12) == 0 && v[12,32) != 0 : LUI
25     // v[0,32) != 0                  : LUI+ADDI(W)
26     int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF;
27     int64_t Lo12 = SignExtend64<12>(Val);
28 
29     if (Hi20)
30       Res.push_back(Inst(RISCV::LUI, Hi20));
31 
32     if (Lo12 || Hi20 == 0) {
33       unsigned AddiOpc = (IsRV64 && Hi20) ? RISCV::ADDIW : RISCV::ADDI;
34       Res.push_back(Inst(AddiOpc, Lo12));
35     }
36     return;
37   }
38 
39   assert(IsRV64 && "Can't emit >32-bit imm for non-RV64 target");
40 
41   // In the worst case, for a full 64-bit constant, a sequence of 8 instructions
42   // (i.e., LUI+ADDIW+SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) has to be emmitted. Note
43   // that the first two instructions (LUI+ADDIW) can contribute up to 32 bits
44   // while the following ADDI instructions contribute up to 12 bits each.
45   //
46   // On the first glance, implementing this seems to be possible by simply
47   // emitting the most significant 32 bits (LUI+ADDIW) followed by as many left
48   // shift (SLLI) and immediate additions (ADDI) as needed. However, due to the
49   // fact that ADDI performs a sign extended addition, doing it like that would
50   // only be possible when at most 11 bits of the ADDI instructions are used.
51   // Using all 12 bits of the ADDI instructions, like done by GAS, actually
52   // requires that the constant is processed starting with the least significant
53   // bit.
54   //
55   // In the following, constants are processed from LSB to MSB but instruction
56   // emission is performed from MSB to LSB by recursively calling
57   // generateInstSeq. In each recursion, first the lowest 12 bits are removed
58   // from the constant and the optimal shift amount, which can be greater than
59   // 12 bits if the constant is sparse, is determined. Then, the shifted
60   // remaining constant is processed recursively and gets emitted as soon as it
61   // fits into 32 bits. The emission of the shifts and additions is subsequently
62   // performed when the recursion returns.
63 
64   int64_t Lo12 = SignExtend64<12>(Val);
65   int64_t Hi52 = ((uint64_t)Val + 0x800ull) >> 12;
66   int ShiftAmount = 12 + findFirstSet((uint64_t)Hi52);
67   Hi52 = SignExtend64(Hi52 >> (ShiftAmount - 12), 64 - ShiftAmount);
68 
69   generateInstSeq(Hi52, IsRV64, Res);
70 
71   Res.push_back(Inst(RISCV::SLLI, ShiftAmount));
72   if (Lo12)
73     Res.push_back(Inst(RISCV::ADDI, Lo12));
74 }
75 
76 int getIntMatCost(const APInt &Val, unsigned Size, bool IsRV64) {
77   int PlatRegSize = IsRV64 ? 64 : 32;
78 
79   // Split the constant into platform register sized chunks, and calculate cost
80   // of each chunk.
81   int Cost = 0;
82   for (unsigned ShiftVal = 0; ShiftVal < Size; ShiftVal += PlatRegSize) {
83     APInt Chunk = Val.ashr(ShiftVal).sextOrTrunc(PlatRegSize);
84     InstSeq MatSeq;
85     generateInstSeq(Chunk.getSExtValue(), IsRV64, MatSeq);
86     Cost += MatSeq.size();
87   }
88   return std::max(1, Cost);
89 }
90 } // namespace RISCVMatInt
91 } // namespace llvm
92