1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/codegen/register-configuration.h"
6 #include "src/base/lazy-instance.h"
7 #include "src/codegen/cpu-features.h"
8 #include "src/codegen/register-arch.h"
9 #include "src/common/globals.h"
10 
11 namespace v8 {
12 namespace internal {
13 
14 namespace {
15 
16 #define REGISTER_COUNT(R) 1 +
17 static const int kMaxAllocatableGeneralRegisterCount =
18     ALLOCATABLE_GENERAL_REGISTERS(REGISTER_COUNT) 0;
19 static const int kMaxAllocatableDoubleRegisterCount =
20     ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_COUNT) 0;
21 
22 static const int kAllocatableGeneralCodes[] = {
23 #define REGISTER_CODE(R) kRegCode_##R,
24     ALLOCATABLE_GENERAL_REGISTERS(REGISTER_CODE)};
25 #undef REGISTER_CODE
26 
27 #define REGISTER_CODE(R) kDoubleCode_##R,
28 static const int kAllocatableDoubleCodes[] = {
29     ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_CODE)};
30 #if V8_TARGET_ARCH_ARM
31 static const int kAllocatableNoVFP32DoubleCodes[] = {
32     ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(REGISTER_CODE)};
33 #endif  // V8_TARGET_ARCH_ARM
34 #undef REGISTER_CODE
35 
36 STATIC_ASSERT(RegisterConfiguration::kMaxGeneralRegisters >=
37               Register::kNumRegisters);
38 STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
39               FloatRegister::kNumRegisters);
40 STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
41               DoubleRegister::kNumRegisters);
42 STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
43               Simd128Register::kNumRegisters);
44 
45 // Callers on architectures other than Arm expect this to be be constant
46 // between build and runtime. Avoid adding variability on other platforms.
get_num_allocatable_double_registers()47 static int get_num_allocatable_double_registers() {
48   return
49 #if V8_TARGET_ARCH_IA32
50       kMaxAllocatableDoubleRegisterCount;
51 #elif V8_TARGET_ARCH_X64
52       kMaxAllocatableDoubleRegisterCount;
53 #elif V8_TARGET_ARCH_ARM
54       CpuFeatures::IsSupported(VFP32DREGS)
55           ? kMaxAllocatableDoubleRegisterCount
56           : (ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(REGISTER_COUNT) 0);
57 #elif V8_TARGET_ARCH_ARM64
58       kMaxAllocatableDoubleRegisterCount;
59 #elif V8_TARGET_ARCH_MIPS
60       kMaxAllocatableDoubleRegisterCount;
61 #elif V8_TARGET_ARCH_MIPS64
62       kMaxAllocatableDoubleRegisterCount;
63 #elif V8_TARGET_ARCH_LOONG64
64       kMaxAllocatableDoubleRegisterCount;
65 #elif V8_TARGET_ARCH_PPC
66       kMaxAllocatableDoubleRegisterCount;
67 #elif V8_TARGET_ARCH_PPC64
68       kMaxAllocatableDoubleRegisterCount;
69 #elif V8_TARGET_ARCH_S390
70       kMaxAllocatableDoubleRegisterCount;
71 #elif V8_TARGET_ARCH_RISCV64
72       kMaxAllocatableDoubleRegisterCount;
73 #else
74 #error Unsupported target architecture.
75 #endif
76 }
77 
78 #undef REGISTER_COUNT
79 
80 // Callers on architectures other than Arm expect this to be be constant
81 // between build and runtime. Avoid adding variability on other platforms.
get_allocatable_double_codes()82 static const int* get_allocatable_double_codes() {
83   return
84 #if V8_TARGET_ARCH_ARM
85       CpuFeatures::IsSupported(VFP32DREGS) ? kAllocatableDoubleCodes
86                                            : kAllocatableNoVFP32DoubleCodes;
87 #else
88       kAllocatableDoubleCodes;
89 #endif
90 }
91 
92 class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
93  public:
ArchDefaultRegisterConfiguration()94   ArchDefaultRegisterConfiguration()
95       : RegisterConfiguration(
96             Register::kNumRegisters, DoubleRegister::kNumRegisters,
97             kMaxAllocatableGeneralRegisterCount,
98             get_num_allocatable_double_registers(), kAllocatableGeneralCodes,
99             get_allocatable_double_codes(),
100             kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE) {
101   }
102 };
103 
104 DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultRegisterConfiguration,
105                                 GetDefaultRegisterConfiguration)
106 
107 // RestrictedRegisterConfiguration uses the subset of allocatable general
108 // registers the architecture support, which results into generating assembly
109 // to use less registers. Currently, it's only used by RecordWrite code stub.
110 class RestrictedRegisterConfiguration : public RegisterConfiguration {
111  public:
RestrictedRegisterConfiguration(int num_allocatable_general_registers,std::unique_ptr<int[]> allocatable_general_register_codes,std::unique_ptr<char const * []> allocatable_general_register_names)112   RestrictedRegisterConfiguration(
113       int num_allocatable_general_registers,
114       std::unique_ptr<int[]> allocatable_general_register_codes,
115       std::unique_ptr<char const*[]> allocatable_general_register_names)
116       : RegisterConfiguration(
117             Register::kNumRegisters, DoubleRegister::kNumRegisters,
118             num_allocatable_general_registers,
119             get_num_allocatable_double_registers(),
120             allocatable_general_register_codes.get(),
121             get_allocatable_double_codes(),
122             kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE),
123         allocatable_general_register_codes_(
124             std::move(allocatable_general_register_codes)),
125         allocatable_general_register_names_(
126             std::move(allocatable_general_register_names)) {
127     for (int i = 0; i < num_allocatable_general_registers; ++i) {
128       DCHECK(
129           IsAllocatableGeneralRegister(allocatable_general_register_codes_[i]));
130     }
131   }
132 
IsAllocatableGeneralRegister(int code)133   bool IsAllocatableGeneralRegister(int code) {
134     for (int i = 0; i < kMaxAllocatableGeneralRegisterCount; ++i) {
135       if (code == kAllocatableGeneralCodes[i]) {
136         return true;
137       }
138     }
139     return false;
140   }
141 
142  private:
143   std::unique_ptr<int[]> allocatable_general_register_codes_;
144   std::unique_ptr<char const*[]> allocatable_general_register_names_;
145 };
146 
147 }  // namespace
148 
Default()149 const RegisterConfiguration* RegisterConfiguration::Default() {
150   return GetDefaultRegisterConfiguration();
151 }
152 
RestrictGeneralRegisters(RegList registers)153 const RegisterConfiguration* RegisterConfiguration::RestrictGeneralRegisters(
154     RegList registers) {
155   int num = NumRegs(registers);
156   std::unique_ptr<int[]> codes{new int[num]};
157   std::unique_ptr<char const* []> names { new char const*[num] };
158   int counter = 0;
159   for (int i = 0; i < Default()->num_allocatable_general_registers(); ++i) {
160     auto reg = Register::from_code(Default()->GetAllocatableGeneralCode(i));
161     if (reg.bit() & registers) {
162       DCHECK(counter < num);
163       codes[counter] = reg.code();
164       names[counter] = RegisterName(Register::from_code(i));
165       counter++;
166     }
167   }
168 
169   return new RestrictedRegisterConfiguration(num, std::move(codes),
170                                              std::move(names));
171 }
172 
RegisterConfiguration(int num_general_registers,int num_double_registers,int num_allocatable_general_registers,int num_allocatable_double_registers,const int * allocatable_general_codes,const int * allocatable_double_codes,AliasingKind fp_aliasing_kind)173 RegisterConfiguration::RegisterConfiguration(
174     int num_general_registers, int num_double_registers,
175     int num_allocatable_general_registers, int num_allocatable_double_registers,
176     const int* allocatable_general_codes, const int* allocatable_double_codes,
177     AliasingKind fp_aliasing_kind)
178     : num_general_registers_(num_general_registers),
179       num_float_registers_(0),
180       num_double_registers_(num_double_registers),
181       num_simd128_registers_(0),
182       num_allocatable_general_registers_(num_allocatable_general_registers),
183       num_allocatable_float_registers_(0),
184       num_allocatable_double_registers_(num_allocatable_double_registers),
185       num_allocatable_simd128_registers_(0),
186       allocatable_general_codes_mask_(0),
187       allocatable_float_codes_mask_(0),
188       allocatable_double_codes_mask_(0),
189       allocatable_simd128_codes_mask_(0),
190       allocatable_general_codes_(allocatable_general_codes),
191       allocatable_double_codes_(allocatable_double_codes),
192       fp_aliasing_kind_(fp_aliasing_kind) {
193   DCHECK_LE(num_general_registers_,
194             RegisterConfiguration::kMaxGeneralRegisters);
195   DCHECK_LE(num_double_registers_, RegisterConfiguration::kMaxFPRegisters);
196   for (int i = 0; i < num_allocatable_general_registers_; ++i) {
197     allocatable_general_codes_mask_ |= (1 << allocatable_general_codes_[i]);
198   }
199   for (int i = 0; i < num_allocatable_double_registers_; ++i) {
200     allocatable_double_codes_mask_ |= (1 << allocatable_double_codes_[i]);
201   }
202 
203   if (fp_aliasing_kind_ == COMBINE) {
204     num_float_registers_ = num_double_registers_ * 2 <= kMaxFPRegisters
205                                ? num_double_registers_ * 2
206                                : kMaxFPRegisters;
207     num_allocatable_float_registers_ = 0;
208     for (int i = 0; i < num_allocatable_double_registers_; i++) {
209       int base_code = allocatable_double_codes_[i] * 2;
210       if (base_code >= kMaxFPRegisters) continue;
211       allocatable_float_codes_[num_allocatable_float_registers_++] = base_code;
212       allocatable_float_codes_[num_allocatable_float_registers_++] =
213           base_code + 1;
214       allocatable_float_codes_mask_ |= (0x3 << base_code);
215     }
216     num_simd128_registers_ = num_double_registers_ / 2;
217     num_allocatable_simd128_registers_ = 0;
218     int last_simd128_code = allocatable_double_codes_[0] / 2;
219     for (int i = 1; i < num_allocatable_double_registers_; i++) {
220       int next_simd128_code = allocatable_double_codes_[i] / 2;
221       // This scheme assumes allocatable_double_codes_ are strictly increasing.
222       DCHECK_GE(next_simd128_code, last_simd128_code);
223       if (last_simd128_code == next_simd128_code) {
224         allocatable_simd128_codes_[num_allocatable_simd128_registers_++] =
225             next_simd128_code;
226         allocatable_simd128_codes_mask_ |= (0x1 << next_simd128_code);
227       }
228       last_simd128_code = next_simd128_code;
229     }
230   } else {
231     DCHECK(fp_aliasing_kind_ == OVERLAP);
232     num_float_registers_ = num_simd128_registers_ = num_double_registers_;
233     num_allocatable_float_registers_ = num_allocatable_simd128_registers_ =
234         num_allocatable_double_registers_;
235     for (int i = 0; i < num_allocatable_float_registers_; ++i) {
236       allocatable_float_codes_[i] = allocatable_simd128_codes_[i] =
237           allocatable_double_codes_[i];
238     }
239     allocatable_float_codes_mask_ = allocatable_simd128_codes_mask_ =
240         allocatable_double_codes_mask_;
241   }
242 }
243 
244 // Assert that kFloat32, kFloat64, and kSimd128 are consecutive values.
245 STATIC_ASSERT(static_cast<int>(MachineRepresentation::kSimd128) ==
246               static_cast<int>(MachineRepresentation::kFloat64) + 1);
247 STATIC_ASSERT(static_cast<int>(MachineRepresentation::kFloat64) ==
248               static_cast<int>(MachineRepresentation::kFloat32) + 1);
249 
GetAliases(MachineRepresentation rep,int index,MachineRepresentation other_rep,int * alias_base_index) const250 int RegisterConfiguration::GetAliases(MachineRepresentation rep, int index,
251                                       MachineRepresentation other_rep,
252                                       int* alias_base_index) const {
253   DCHECK(fp_aliasing_kind_ == COMBINE);
254   DCHECK(IsFloatingPoint(rep) && IsFloatingPoint(other_rep));
255   if (rep == other_rep) {
256     *alias_base_index = index;
257     return 1;
258   }
259   int rep_int = static_cast<int>(rep);
260   int other_rep_int = static_cast<int>(other_rep);
261   if (rep_int > other_rep_int) {
262     int shift = rep_int - other_rep_int;
263     int base_index = index << shift;
264     if (base_index >= kMaxFPRegisters) {
265       // Alias indices would be out of FP register range.
266       return 0;
267     }
268     *alias_base_index = base_index;
269     return 1 << shift;
270   }
271   int shift = other_rep_int - rep_int;
272   *alias_base_index = index >> shift;
273   return 1;
274 }
275 
AreAliases(MachineRepresentation rep,int index,MachineRepresentation other_rep,int other_index) const276 bool RegisterConfiguration::AreAliases(MachineRepresentation rep, int index,
277                                        MachineRepresentation other_rep,
278                                        int other_index) const {
279   DCHECK(fp_aliasing_kind_ == COMBINE);
280   DCHECK(IsFloatingPoint(rep) && IsFloatingPoint(other_rep));
281   if (rep == other_rep) {
282     return index == other_index;
283   }
284   int rep_int = static_cast<int>(rep);
285   int other_rep_int = static_cast<int>(other_rep);
286   if (rep_int > other_rep_int) {
287     int shift = rep_int - other_rep_int;
288     return index == other_index >> shift;
289   }
290   int shift = other_rep_int - rep_int;
291   return index >> shift == other_index;
292 }
293 
294 }  // namespace internal
295 }  // namespace v8
296