1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_COMPILER_BACKEND_CODE_GENERATOR_H_
6 #define V8_COMPILER_BACKEND_CODE_GENERATOR_H_
7 
8 #include <memory>
9 
10 #include "src/base/optional.h"
11 #include "src/codegen/macro-assembler.h"
12 #include "src/codegen/optimized-compilation-info.h"
13 #include "src/codegen/safepoint-table.h"
14 #include "src/codegen/source-position-table.h"
15 #include "src/compiler/backend/gap-resolver.h"
16 #include "src/compiler/backend/instruction.h"
17 #include "src/compiler/backend/unwinding-info-writer.h"
18 #include "src/compiler/osr.h"
19 #include "src/deoptimizer/deoptimizer.h"
20 #include "src/objects/code-kind.h"
21 #include "src/trap-handler/trap-handler.h"
22 
23 namespace v8 {
24 namespace internal {
25 
26 namespace compiler {
27 
28 // Forward declarations.
29 class DeoptimizationExit;
30 class FrameAccessState;
31 class Linkage;
32 class OutOfLineCode;
33 
34 struct BranchInfo {
35   FlagsCondition condition;
36   Label* true_label;
37   Label* false_label;
38   bool fallthru;
39 };
40 
41 class InstructionOperandIterator {
42  public:
InstructionOperandIterator(Instruction * instr,size_t pos)43   InstructionOperandIterator(Instruction* instr, size_t pos)
44       : instr_(instr), pos_(pos) {}
45 
instruction()46   Instruction* instruction() const { return instr_; }
Advance()47   InstructionOperand* Advance() { return instr_->InputAt(pos_++); }
48 
49  private:
50   Instruction* instr_;
51   size_t pos_;
52 };
53 
54 enum class DeoptimizationLiteralKind { kObject, kNumber, kString, kInvalid };
55 
56 // Either a non-null Handle<Object>, a double or a StringConstantBase.
57 class DeoptimizationLiteral {
58  public:
DeoptimizationLiteral()59   DeoptimizationLiteral()
60       : kind_(DeoptimizationLiteralKind::kInvalid),
61         object_(),
62         number_(0),
63         string_(nullptr) {}
DeoptimizationLiteral(Handle<Object> object)64   explicit DeoptimizationLiteral(Handle<Object> object)
65       : kind_(DeoptimizationLiteralKind::kObject), object_(object) {
66     CHECK(!object_.is_null());
67   }
DeoptimizationLiteral(double number)68   explicit DeoptimizationLiteral(double number)
69       : kind_(DeoptimizationLiteralKind::kNumber), number_(number) {}
DeoptimizationLiteral(const StringConstantBase * string)70   explicit DeoptimizationLiteral(const StringConstantBase* string)
71       : kind_(DeoptimizationLiteralKind::kString), string_(string) {}
72 
object()73   Handle<Object> object() const { return object_; }
string()74   const StringConstantBase* string() const { return string_; }
75 
76   bool operator==(const DeoptimizationLiteral& other) const {
77     return kind_ == other.kind_ && object_.equals(other.object_) &&
78            bit_cast<uint64_t>(number_) == bit_cast<uint64_t>(other.number_) &&
79            bit_cast<intptr_t>(string_) == bit_cast<intptr_t>(other.string_);
80   }
81 
82   Handle<Object> Reify(Isolate* isolate) const;
83 
Validate()84   void Validate() const {
85     CHECK_NE(kind_, DeoptimizationLiteralKind::kInvalid);
86   }
87 
kind()88   DeoptimizationLiteralKind kind() const {
89     Validate();
90     return kind_;
91   }
92 
93  private:
94   DeoptimizationLiteralKind kind_;
95 
96   Handle<Object> object_;
97   double number_ = 0;
98   const StringConstantBase* string_ = nullptr;
99 };
100 
101 // These structs hold pc offsets for generated instructions and is only used
102 // when tracing for turbolizer is enabled.
103 struct TurbolizerCodeOffsetsInfo {
104   int code_start_register_check = -1;
105   int deopt_check = -1;
106   int blocks_start = -1;
107   int out_of_line_code = -1;
108   int deoptimization_exits = -1;
109   int pools = -1;
110   int jump_tables = -1;
111 };
112 
113 struct TurbolizerInstructionStartInfo {
114   int gap_pc_offset = -1;
115   int arch_instr_pc_offset = -1;
116   int condition_pc_offset = -1;
117 };
118 
119 // Generates native code for a sequence of instructions.
120 class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
121  public:
122   explicit CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
123                          InstructionSequence* instructions,
124                          OptimizedCompilationInfo* info, Isolate* isolate,
125                          base::Optional<OsrHelper> osr_helper,
126                          int start_source_position,
127                          JumpOptimizationInfo* jump_opt,
128                          const AssemblerOptions& options, Builtin builtin,
129                          size_t max_unoptimized_frame_height,
130                          size_t max_pushed_argument_count,
131                          const char* debug_name = nullptr);
132 
133   // Generate native code. After calling AssembleCode, call FinalizeCode to
134   // produce the actual code object. If an error occurs during either phase,
135   // FinalizeCode returns an empty MaybeHandle.
136   void AssembleCode();  // Does not need to run on main thread.
137   MaybeHandle<Code> FinalizeCode();
138 
139   base::OwnedVector<byte> GetSourcePositionTable();
140   base::OwnedVector<byte> GetProtectedInstructionsData();
141 
instructions()142   InstructionSequence* instructions() const { return instructions_; }
frame_access_state()143   FrameAccessState* frame_access_state() const { return frame_access_state_; }
frame()144   const Frame* frame() const { return frame_access_state_->frame(); }
isolate()145   Isolate* isolate() const { return isolate_; }
linkage()146   Linkage* linkage() const { return linkage_; }
147 
GetLabel(RpoNumber rpo)148   Label* GetLabel(RpoNumber rpo) { return &labels_[rpo.ToSize()]; }
149 
150   void AddProtectedInstructionLanding(uint32_t instr_offset,
151                                       uint32_t landing_offset);
152 
153   bool wasm_runtime_exception_support() const;
154 
start_source_position()155   SourcePosition start_source_position() const {
156     return start_source_position_;
157   }
158 
159   void AssembleSourcePosition(Instruction* instr);
160   void AssembleSourcePosition(SourcePosition source_position);
161 
162   // Record a safepoint with the given pointer map.
163   void RecordSafepoint(ReferenceMap* references);
164 
zone()165   Zone* zone() const { return zone_; }
tasm()166   TurboAssembler* tasm() { return &tasm_; }
safepoint_table_builder()167   SafepointTableBuilder* safepoint_table_builder() { return &safepoints_; }
GetSafepointTableOffset()168   size_t GetSafepointTableOffset() const { return safepoints_.GetCodeOffset(); }
GetHandlerTableOffset()169   size_t GetHandlerTableOffset() const { return handler_table_offset_; }
170 
block_starts()171   const ZoneVector<int>& block_starts() const { return block_starts_; }
instr_starts()172   const ZoneVector<TurbolizerInstructionStartInfo>& instr_starts() const {
173     return instr_starts_;
174   }
175 
offsets_info()176   const TurbolizerCodeOffsetsInfo& offsets_info() const {
177     return offsets_info_;
178   }
179 
180   static constexpr int kBinarySearchSwitchMinimalCases = 4;
181 
182   // Returns true if an offset should be applied to the given stack check. There
183   // are two reasons that this could happen:
184   // 1. The optimized frame is smaller than the corresponding deoptimized frames
185   //    and an offset must be applied in order to be able to deopt safely.
186   // 2. The current function pushes a large number of arguments to the stack.
187   //    These are not accounted for by the initial frame setup.
188   bool ShouldApplyOffsetToStackCheck(Instruction* instr, uint32_t* offset);
189   uint32_t GetStackCheckOffset();
190 
code_kind()191   CodeKind code_kind() const { return info_->code_kind(); }
192 
193  private:
resolver()194   GapResolver* resolver() { return &resolver_; }
safepoints()195   SafepointTableBuilder* safepoints() { return &safepoints_; }
info()196   OptimizedCompilationInfo* info() const { return info_; }
osr_helper()197   OsrHelper* osr_helper() { return &(*osr_helper_); }
198 
199   // Create the FrameAccessState object. The Frame is immutable from here on.
200   void CreateFrameAccessState(Frame* frame);
201 
202   // Architecture - specific frame finalization.
203   void FinishFrame(Frame* frame);
204 
205   // Checks if {block} will appear directly after {current_block_} when
206   // assembling code, in which case, a fall-through can be used.
207   bool IsNextInAssemblyOrder(RpoNumber block) const;
208 
209   // Check if a heap object can be materialized by loading from a heap root,
210   // which is cheaper on some platforms than materializing the actual heap
211   // object constant.
212   bool IsMaterializableFromRoot(Handle<HeapObject> object,
213                                 RootIndex* index_return);
214 
215   enum CodeGenResult { kSuccess, kTooManyDeoptimizationBailouts };
216 
217   // Assemble instructions for the specified block.
218   CodeGenResult AssembleBlock(const InstructionBlock* block);
219 
220   // Assemble code for the specified instruction.
221   CodeGenResult AssembleInstruction(int instruction_index,
222                                     const InstructionBlock* block);
223   void AssembleGaps(Instruction* instr);
224 
225   // Compute branch info from given instruction. Returns a valid rpo number
226   // if the branch is redundant, the returned rpo number point to the target
227   // basic block.
228   RpoNumber ComputeBranchInfo(BranchInfo* branch, Instruction* instr);
229 
230   // Returns true if a instruction is a tail call that needs to adjust the stack
231   // pointer before execution. The stack slot index to the empty slot above the
232   // adjusted stack pointer is returned in |slot|.
233   bool GetSlotAboveSPBeforeTailCall(Instruction* instr, int* slot);
234 
235   // Determines how to call helper stubs depending on the code kind.
236   StubCallMode DetermineStubCallMode() const;
237 
238   CodeGenResult AssembleDeoptimizerCall(DeoptimizationExit* exit);
239 
240   void AssembleDeoptImmediateArgs(
241       const ZoneVector<ImmediateOperand*>* immediate_args, Label* deopt_exit);
242 
243   // ===========================================================================
244   // ============= Architecture-specific code generation methods. ==============
245   // ===========================================================================
246 
247   CodeGenResult AssembleArchInstruction(Instruction* instr);
248   void AssembleArchJump(RpoNumber target);
249   void AssembleArchBranch(Instruction* instr, BranchInfo* branch);
250 
251   // Generates special branch for deoptimization condition.
252   void AssembleArchDeoptBranch(Instruction* instr, BranchInfo* branch);
253 
254   void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
255   void AssembleArchSelect(Instruction* instr, FlagsCondition condition);
256 #if V8_ENABLE_WEBASSEMBLY
257   void AssembleArchTrap(Instruction* instr, FlagsCondition condition);
258 #endif  // V8_ENABLE_WEBASSEMBLY
259   void AssembleArchBinarySearchSwitchRange(Register input, RpoNumber def_block,
260                                            std::pair<int32_t, Label*>* begin,
261                                            std::pair<int32_t, Label*>* end);
262   void AssembleArchBinarySearchSwitch(Instruction* instr);
263   void AssembleArchTableSwitch(Instruction* instr);
264 
265   // Generates code that checks whether the {kJavaScriptCallCodeStartRegister}
266   // contains the expected pointer to the start of the instruction stream.
267   void AssembleCodeStartRegisterCheck();
268 
269   // When entering a code that is marked for deoptimization, rather continuing
270   // with its execution, we jump to a lazy compiled code. We need to do this
271   // because this code has already been deoptimized and needs to be unlinked
272   // from the JS functions referring it.
273   void BailoutIfDeoptimized();
274 
275   // Generates an architecture-specific, descriptor-specific prologue
276   // to set up a stack frame.
277   void AssembleConstructFrame();
278 
279   // Generates an architecture-specific, descriptor-specific return sequence
280   // to tear down a stack frame.
281   void AssembleReturn(InstructionOperand* pop);
282 
283   void AssembleDeconstructFrame();
284 
285   // Generates code to manipulate the stack in preparation for a tail call.
286   void AssemblePrepareTailCall();
287 
288   enum PushTypeFlag {
289     kImmediatePush = 0x1,
290     kRegisterPush = 0x2,
291     kStackSlotPush = 0x4,
292     kScalarPush = kRegisterPush | kStackSlotPush
293   };
294 
295   using PushTypeFlags = base::Flags<PushTypeFlag>;
296 
297   static bool IsValidPush(InstructionOperand source, PushTypeFlags push_type);
298 
299   // Generate a list of moves from an instruction that are candidates to be
300   // turned into push instructions on platforms that support them. In general,
301   // the list of push candidates are moves to a set of contiguous destination
302   // InstructionOperand locations on the stack that don't clobber values that
303   // are needed to resolve the gap or use values generated by the gap,
304   // i.e. moves that can be hoisted together before the actual gap and assembled
305   // together.
306   static void GetPushCompatibleMoves(Instruction* instr,
307                                      PushTypeFlags push_type,
308                                      ZoneVector<MoveOperands*>* pushes);
309 
310   class MoveType {
311    public:
312     enum Type {
313       kRegisterToRegister,
314       kRegisterToStack,
315       kStackToRegister,
316       kStackToStack,
317       kConstantToRegister,
318       kConstantToStack
319     };
320 
321     // Detect what type of move or swap needs to be performed. Note that these
322     // functions do not take into account the representation (Tagged, FP,
323     // ...etc).
324 
325     static Type InferMove(InstructionOperand* source,
326                           InstructionOperand* destination);
327     static Type InferSwap(InstructionOperand* source,
328                           InstructionOperand* destination);
329   };
330   // Called before a tail call |instr|'s gap moves are assembled and allows
331   // gap-specific pre-processing, e.g. adjustment of the sp for tail calls that
332   // need it before gap moves or conversion of certain gap moves into pushes.
333   void AssembleTailCallBeforeGap(Instruction* instr,
334                                  int first_unused_stack_slot);
335   // Called after a tail call |instr|'s gap moves are assembled and allows
336   // gap-specific post-processing, e.g. adjustment of the sp for tail calls that
337   // need it after gap moves.
338   void AssembleTailCallAfterGap(Instruction* instr,
339                                 int first_unused_stack_slot);
340 
341   void FinishCode();
342   void MaybeEmitOutOfLineConstantPool();
343 
344   void IncrementStackAccessCounter(InstructionOperand* source,
345                                    InstructionOperand* destination);
346 
347   // ===========================================================================
348   // ============== Architecture-specific gap resolver methods. ================
349   // ===========================================================================
350 
351   // Interface used by the gap resolver to emit moves and swaps.
352   void AssembleMove(InstructionOperand* source,
353                     InstructionOperand* destination) final;
354   void AssembleSwap(InstructionOperand* source,
355                     InstructionOperand* destination) final;
356 
357   // ===========================================================================
358   // =================== Jump table construction methods. ======================
359   // ===========================================================================
360 
361   class JumpTable;
362   // Adds a jump table that is emitted after the actual code.  Returns label
363   // pointing to the beginning of the table.  {targets} is assumed to be static
364   // or zone allocated.
365   Label* AddJumpTable(Label** targets, size_t target_count);
366   // Emits a jump table.
367   void AssembleJumpTable(Label** targets, size_t target_count);
368 
369   // ===========================================================================
370   // ================== Deoptimization table construction. =====================
371   // ===========================================================================
372 
373   void RecordCallPosition(Instruction* instr);
374   Handle<DeoptimizationData> GenerateDeoptimizationData();
375   int DefineDeoptimizationLiteral(DeoptimizationLiteral literal);
376   DeoptimizationEntry const& GetDeoptimizationEntry(Instruction* instr,
377                                                     size_t frame_state_offset);
378   DeoptimizationExit* BuildTranslation(Instruction* instr, int pc_offset,
379                                        size_t frame_state_offset,
380                                        size_t immediate_args_count,
381                                        OutputFrameStateCombine state_combine);
382   void BuildTranslationForFrameStateDescriptor(
383       FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
384       OutputFrameStateCombine state_combine);
385   void TranslateStateValueDescriptor(StateValueDescriptor* desc,
386                                      StateValueList* nested,
387                                      InstructionOperandIterator* iter);
388   void TranslateFrameStateDescriptorOperands(FrameStateDescriptor* desc,
389                                              InstructionOperandIterator* iter);
390   void AddTranslationForOperand(Instruction* instr, InstructionOperand* op,
391                                 MachineType type);
392   void MarkLazyDeoptSite();
393 
394   void PrepareForDeoptimizationExits(ZoneDeque<DeoptimizationExit*>* exits);
395   DeoptimizationExit* AddDeoptimizationExit(Instruction* instr,
396                                             size_t frame_state_offset,
397                                             size_t immediate_args_count);
398 
399   // ===========================================================================
400 
401   struct HandlerInfo {
402     Label* handler;
403     int pc_offset;
404   };
405 
406   friend class OutOfLineCode;
407   friend class CodeGeneratorTester;
408 
409   Zone* zone_;
410   Isolate* isolate_;
411   FrameAccessState* frame_access_state_;
412   Linkage* const linkage_;
413   InstructionSequence* const instructions_;
414   UnwindingInfoWriter unwinding_info_writer_;
415   OptimizedCompilationInfo* const info_;
416   Label* const labels_;
417   Label return_label_;
418   RpoNumber current_block_;
419   SourcePosition start_source_position_;
420   SourcePosition current_source_position_;
421   TurboAssembler tasm_;
422   GapResolver resolver_;
423   SafepointTableBuilder safepoints_;
424   ZoneVector<HandlerInfo> handlers_;
425   int next_deoptimization_id_ = 0;
426   int deopt_exit_start_offset_ = 0;
427   int eager_soft_and_bailout_deopt_count_ = 0;
428   int lazy_deopt_count_ = 0;
429   ZoneDeque<DeoptimizationExit*> deoptimization_exits_;
430   ZoneDeque<DeoptimizationLiteral> deoptimization_literals_;
431   size_t inlined_function_count_ = 0;
432   TranslationArrayBuilder translations_;
433   int handler_table_offset_ = 0;
434   int last_lazy_deopt_pc_ = 0;
435 
436   // Deoptimization exits must be as small as possible, since their count grows
437   // with function size. {jump_deoptimization_entry_labels_} is an optimization
438   // to that effect, which extracts the (potentially large) instruction
439   // sequence for the final jump to the deoptimization entry into a single spot
440   // per Code object. All deopt exits can then near-call to this label. Note:
441   // not used on all architectures.
442   Label jump_deoptimization_entry_labels_[kDeoptimizeKindCount];
443   Label jump_deoptimization_or_resume_entry_labels_[kDeoptimizeReasonCount];
444 
445   // The maximal combined height of all frames produced upon deoptimization, and
446   // the maximal number of pushed arguments for function calls. Applied as an
447   // offset to the first stack check of an optimized function.
448   const size_t max_unoptimized_frame_height_;
449   const size_t max_pushed_argument_count_;
450 
451   // kArchCallCFunction could be reached either:
452   //   kArchCallCFunction;
453   // or:
454   //   kArchSaveCallerRegisters;
455   //   kArchCallCFunction;
456   //   kArchRestoreCallerRegisters;
457   // The boolean is used to distinguish the two cases. In the latter case, we
458   // also need to decide if FP registers need to be saved, which is controlled
459   // by fp_mode_.
460   bool caller_registers_saved_;
461   SaveFPRegsMode fp_mode_;
462 
463   JumpTable* jump_tables_;
464   OutOfLineCode* ools_;
465   base::Optional<OsrHelper> osr_helper_;
466   int osr_pc_offset_;
467   int optimized_out_literal_id_;
468   SourcePositionTableBuilder source_position_table_builder_;
469   ZoneVector<trap_handler::ProtectedInstructionData> protected_instructions_;
470   CodeGenResult result_;
471   ZoneVector<int> block_starts_;
472   TurbolizerCodeOffsetsInfo offsets_info_;
473   ZoneVector<TurbolizerInstructionStartInfo> instr_starts_;
474 
475   const char* debug_name_ = nullptr;
476 };
477 
478 }  // namespace compiler
479 }  // namespace internal
480 }  // namespace v8
481 
482 #endif  // V8_COMPILER_BACKEND_CODE_GENERATOR_H_
483