1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/pipeline.h"
6 
7 #include <fstream>  // NOLINT(readability/streams)
8 #include <iostream>
9 #include <memory>
10 #include <sstream>
11 
12 #include "src/base/optional.h"
13 #include "src/base/platform/elapsed-timer.h"
14 #include "src/builtins/profile-data-reader.h"
15 #include "src/codegen/assembler-inl.h"
16 #include "src/codegen/compiler.h"
17 #include "src/codegen/optimized-compilation-info.h"
18 #include "src/codegen/register-configuration.h"
19 #include "src/compiler/add-type-assertions-reducer.h"
20 #include "src/compiler/backend/code-generator.h"
21 #include "src/compiler/backend/frame-elider.h"
22 #include "src/compiler/backend/instruction-selector.h"
23 #include "src/compiler/backend/instruction.h"
24 #include "src/compiler/backend/jump-threading.h"
25 #include "src/compiler/backend/mid-tier-register-allocator.h"
26 #include "src/compiler/backend/move-optimizer.h"
27 #include "src/compiler/backend/register-allocator-verifier.h"
28 #include "src/compiler/backend/register-allocator.h"
29 #include "src/compiler/basic-block-instrumentor.h"
30 #include "src/compiler/branch-elimination.h"
31 #include "src/compiler/bytecode-graph-builder.h"
32 #include "src/compiler/checkpoint-elimination.h"
33 #include "src/compiler/common-operator-reducer.h"
34 #include "src/compiler/compilation-dependencies.h"
35 #include "src/compiler/compiler-source-position-table.h"
36 #include "src/compiler/constant-folding-reducer.h"
37 #include "src/compiler/control-flow-optimizer.h"
38 #include "src/compiler/csa-load-elimination.h"
39 #include "src/compiler/dead-code-elimination.h"
40 #include "src/compiler/decompression-optimizer.h"
41 #include "src/compiler/effect-control-linearizer.h"
42 #include "src/compiler/escape-analysis-reducer.h"
43 #include "src/compiler/escape-analysis.h"
44 #include "src/compiler/graph-trimmer.h"
45 #include "src/compiler/graph-visualizer.h"
46 #include "src/compiler/js-call-reducer.h"
47 #include "src/compiler/js-context-specialization.h"
48 #include "src/compiler/js-create-lowering.h"
49 #include "src/compiler/js-generic-lowering.h"
50 #include "src/compiler/js-heap-broker.h"
51 #include "src/compiler/js-heap-copy-reducer.h"
52 #include "src/compiler/js-inlining-heuristic.h"
53 #include "src/compiler/js-intrinsic-lowering.h"
54 #include "src/compiler/js-native-context-specialization.h"
55 #include "src/compiler/js-typed-lowering.h"
56 #include "src/compiler/load-elimination.h"
57 #include "src/compiler/loop-analysis.h"
58 #include "src/compiler/loop-peeling.h"
59 #include "src/compiler/loop-variable-optimizer.h"
60 #include "src/compiler/machine-graph-verifier.h"
61 #include "src/compiler/machine-operator-reducer.h"
62 #include "src/compiler/memory-optimizer.h"
63 #include "src/compiler/node-origin-table.h"
64 #include "src/compiler/osr.h"
65 #include "src/compiler/pipeline-statistics.h"
66 #include "src/compiler/redundancy-elimination.h"
67 #include "src/compiler/schedule.h"
68 #include "src/compiler/scheduled-machine-lowering.h"
69 #include "src/compiler/scheduler.h"
70 #include "src/compiler/select-lowering.h"
71 #include "src/compiler/serializer-for-background-compilation.h"
72 #include "src/compiler/simplified-lowering.h"
73 #include "src/compiler/simplified-operator-reducer.h"
74 #include "src/compiler/simplified-operator.h"
75 #include "src/compiler/store-store-elimination.h"
76 #include "src/compiler/type-narrowing-reducer.h"
77 #include "src/compiler/typed-optimization.h"
78 #include "src/compiler/typer.h"
79 #include "src/compiler/value-numbering-reducer.h"
80 #include "src/compiler/verifier.h"
81 #include "src/compiler/wasm-compiler.h"
82 #include "src/compiler/zone-stats.h"
83 #include "src/diagnostics/code-tracer.h"
84 #include "src/diagnostics/disassembler.h"
85 #include "src/execution/isolate-inl.h"
86 #include "src/heap/local-heap.h"
87 #include "src/init/bootstrapper.h"
88 #include "src/logging/counters.h"
89 #include "src/objects/shared-function-info.h"
90 #include "src/parsing/parse-info.h"
91 #include "src/tracing/trace-event.h"
92 #include "src/tracing/traced-value.h"
93 #include "src/utils/ostreams.h"
94 #include "src/utils/utils.h"
95 #include "src/wasm/function-body-decoder.h"
96 #include "src/wasm/function-compiler.h"
97 #include "src/wasm/wasm-engine.h"
98 
99 namespace v8 {
100 namespace internal {
101 namespace compiler {
102 
103 static constexpr char kCodegenZoneName[] = "codegen-zone";
104 static constexpr char kGraphZoneName[] = "graph-zone";
105 static constexpr char kInstructionZoneName[] = "instruction-zone";
106 static constexpr char kMachineGraphVerifierZoneName[] =
107     "machine-graph-verifier-zone";
108 static constexpr char kPipelineCompilationJobZoneName[] =
109     "pipeline-compilation-job-zone";
110 static constexpr char kRegisterAllocationZoneName[] =
111     "register-allocation-zone";
112 static constexpr char kRegisterAllocatorVerifierZoneName[] =
113     "register-allocator-verifier-zone";
114 namespace {
115 
GetModuleContext(Handle<JSFunction> closure)116 Maybe<OuterContext> GetModuleContext(Handle<JSFunction> closure) {
117   Context current = closure->context();
118   size_t distance = 0;
119   while (!current.IsNativeContext()) {
120     if (current.IsModuleContext()) {
121       return Just(
122           OuterContext(handle(current, current.GetIsolate()), distance));
123     }
124     current = current.previous();
125     distance++;
126   }
127   return Nothing<OuterContext>();
128 }
129 
130 }  // anonymous namespace
131 
132 class PipelineData {
133  public:
134   // For main entry point.
PipelineData(ZoneStats * zone_stats,Isolate * isolate,OptimizedCompilationInfo * info,PipelineStatistics * pipeline_statistics,bool is_concurrent_inlining)135   PipelineData(ZoneStats* zone_stats, Isolate* isolate,
136                OptimizedCompilationInfo* info,
137                PipelineStatistics* pipeline_statistics,
138                bool is_concurrent_inlining)
139       : isolate_(isolate),
140         allocator_(isolate->allocator()),
141         info_(info),
142         debug_name_(info_->GetDebugName()),
143         may_have_unverifiable_graph_(false),
144         zone_stats_(zone_stats),
145         pipeline_statistics_(pipeline_statistics),
146         roots_relative_addressing_enabled_(
147             !isolate->serializer_enabled() &&
148             !isolate->IsGeneratingEmbeddedBuiltins()),
149         graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone),
150         graph_zone_(graph_zone_scope_.zone()),
151         instruction_zone_scope_(zone_stats_, kInstructionZoneName),
152         instruction_zone_(instruction_zone_scope_.zone()),
153         codegen_zone_scope_(zone_stats_, kCodegenZoneName),
154         codegen_zone_(codegen_zone_scope_.zone()),
155         broker_(new JSHeapBroker(isolate_, info_->zone(),
156                                  info_->trace_heap_broker(),
157                                  is_concurrent_inlining, info->code_kind())),
158         register_allocation_zone_scope_(zone_stats_,
159                                         kRegisterAllocationZoneName),
160         register_allocation_zone_(register_allocation_zone_scope_.zone()),
161         assembler_options_(AssemblerOptions::Default(isolate)) {
162     PhaseScope scope(pipeline_statistics, "V8.TFInitPipelineData");
163     graph_ = graph_zone_->New<Graph>(graph_zone_);
164     source_positions_ = graph_zone_->New<SourcePositionTable>(graph_);
165     node_origins_ = info->trace_turbo_json()
166                         ? graph_zone_->New<NodeOriginTable>(graph_)
167                         : nullptr;
168     simplified_ = graph_zone_->New<SimplifiedOperatorBuilder>(graph_zone_);
169     machine_ = graph_zone_->New<MachineOperatorBuilder>(
170         graph_zone_, MachineType::PointerRepresentation(),
171         InstructionSelector::SupportedMachineOperatorFlags(),
172         InstructionSelector::AlignmentRequirements());
173     common_ = graph_zone_->New<CommonOperatorBuilder>(graph_zone_);
174     javascript_ = graph_zone_->New<JSOperatorBuilder>(graph_zone_);
175     jsgraph_ = graph_zone_->New<JSGraph>(isolate_, graph_, common_, javascript_,
176                                          simplified_, machine_);
177     dependencies_ =
178         info_->zone()->New<CompilationDependencies>(broker_, info_->zone());
179   }
180 
181   // For WebAssembly compile entry point.
PipelineData(ZoneStats * zone_stats,wasm::WasmEngine * wasm_engine,OptimizedCompilationInfo * info,MachineGraph * mcgraph,PipelineStatistics * pipeline_statistics,SourcePositionTable * source_positions,NodeOriginTable * node_origins,const AssemblerOptions & assembler_options)182   PipelineData(ZoneStats* zone_stats, wasm::WasmEngine* wasm_engine,
183                OptimizedCompilationInfo* info, MachineGraph* mcgraph,
184                PipelineStatistics* pipeline_statistics,
185                SourcePositionTable* source_positions,
186                NodeOriginTable* node_origins,
187                const AssemblerOptions& assembler_options)
188       : isolate_(nullptr),
189         wasm_engine_(wasm_engine),
190         allocator_(wasm_engine->allocator()),
191         info_(info),
192         debug_name_(info_->GetDebugName()),
193         may_have_unverifiable_graph_(false),
194         zone_stats_(zone_stats),
195         pipeline_statistics_(pipeline_statistics),
196         graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone),
197         graph_zone_(graph_zone_scope_.zone()),
198         graph_(mcgraph->graph()),
199         source_positions_(source_positions),
200         node_origins_(node_origins),
201         machine_(mcgraph->machine()),
202         common_(mcgraph->common()),
203         mcgraph_(mcgraph),
204         instruction_zone_scope_(zone_stats_, kInstructionZoneName),
205         instruction_zone_(instruction_zone_scope_.zone()),
206         codegen_zone_scope_(zone_stats_, kCodegenZoneName),
207         codegen_zone_(codegen_zone_scope_.zone()),
208         register_allocation_zone_scope_(zone_stats_,
209                                         kRegisterAllocationZoneName),
210         register_allocation_zone_(register_allocation_zone_scope_.zone()),
211         assembler_options_(assembler_options) {}
212 
213   // For CodeStubAssembler and machine graph testing entry point.
PipelineData(ZoneStats * zone_stats,OptimizedCompilationInfo * info,Isolate * isolate,AccountingAllocator * allocator,Graph * graph,JSGraph * jsgraph,Schedule * schedule,SourcePositionTable * source_positions,NodeOriginTable * node_origins,JumpOptimizationInfo * jump_opt,const AssemblerOptions & assembler_options,const ProfileDataFromFile * profile_data)214   PipelineData(ZoneStats* zone_stats, OptimizedCompilationInfo* info,
215                Isolate* isolate, AccountingAllocator* allocator, Graph* graph,
216                JSGraph* jsgraph, Schedule* schedule,
217                SourcePositionTable* source_positions,
218                NodeOriginTable* node_origins, JumpOptimizationInfo* jump_opt,
219                const AssemblerOptions& assembler_options,
220                const ProfileDataFromFile* profile_data)
221       : isolate_(isolate),
222         wasm_engine_(isolate_->wasm_engine()),
223         allocator_(allocator),
224         info_(info),
225         debug_name_(info_->GetDebugName()),
226         zone_stats_(zone_stats),
227         graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone),
228         graph_zone_(graph_zone_scope_.zone()),
229         graph_(graph),
230         source_positions_(source_positions),
231         node_origins_(node_origins),
232         schedule_(schedule),
233         instruction_zone_scope_(zone_stats_, kInstructionZoneName),
234         instruction_zone_(instruction_zone_scope_.zone()),
235         codegen_zone_scope_(zone_stats_, kCodegenZoneName),
236         codegen_zone_(codegen_zone_scope_.zone()),
237         register_allocation_zone_scope_(zone_stats_,
238                                         kRegisterAllocationZoneName),
239         register_allocation_zone_(register_allocation_zone_scope_.zone()),
240         jump_optimization_info_(jump_opt),
241         assembler_options_(assembler_options),
242         profile_data_(profile_data) {
243     if (jsgraph) {
244       jsgraph_ = jsgraph;
245       simplified_ = jsgraph->simplified();
246       machine_ = jsgraph->machine();
247       common_ = jsgraph->common();
248       javascript_ = jsgraph->javascript();
249     } else {
250       simplified_ = graph_zone_->New<SimplifiedOperatorBuilder>(graph_zone_);
251       machine_ = graph_zone_->New<MachineOperatorBuilder>(
252           graph_zone_, MachineType::PointerRepresentation(),
253           InstructionSelector::SupportedMachineOperatorFlags(),
254           InstructionSelector::AlignmentRequirements());
255       common_ = graph_zone_->New<CommonOperatorBuilder>(graph_zone_);
256       javascript_ = graph_zone_->New<JSOperatorBuilder>(graph_zone_);
257       jsgraph_ = graph_zone_->New<JSGraph>(isolate_, graph_, common_,
258                                            javascript_, simplified_, machine_);
259     }
260   }
261 
262   // For register allocation testing entry point.
PipelineData(ZoneStats * zone_stats,OptimizedCompilationInfo * info,Isolate * isolate,InstructionSequence * sequence)263   PipelineData(ZoneStats* zone_stats, OptimizedCompilationInfo* info,
264                Isolate* isolate, InstructionSequence* sequence)
265       : isolate_(isolate),
266         allocator_(isolate->allocator()),
267         info_(info),
268         debug_name_(info_->GetDebugName()),
269         zone_stats_(zone_stats),
270         graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone),
271         instruction_zone_scope_(zone_stats_, kInstructionZoneName),
272         instruction_zone_(sequence->zone()),
273         sequence_(sequence),
274         codegen_zone_scope_(zone_stats_, kCodegenZoneName),
275         codegen_zone_(codegen_zone_scope_.zone()),
276         register_allocation_zone_scope_(zone_stats_,
277                                         kRegisterAllocationZoneName),
278         register_allocation_zone_(register_allocation_zone_scope_.zone()),
279         assembler_options_(AssemblerOptions::Default(isolate)) {}
280 
~PipelineData()281   ~PipelineData() {
282     // Must happen before zones are destroyed.
283     delete code_generator_;
284     code_generator_ = nullptr;
285     DeleteTyper();
286     DeleteRegisterAllocationZone();
287     DeleteInstructionZone();
288     DeleteCodegenZone();
289     DeleteGraphZone();
290   }
291 
292   PipelineData(const PipelineData&) = delete;
293   PipelineData& operator=(const PipelineData&) = delete;
294 
isolate() const295   Isolate* isolate() const { return isolate_; }
allocator() const296   AccountingAllocator* allocator() const { return allocator_; }
info() const297   OptimizedCompilationInfo* info() const { return info_; }
zone_stats() const298   ZoneStats* zone_stats() const { return zone_stats_; }
dependencies() const299   CompilationDependencies* dependencies() const { return dependencies_; }
pipeline_statistics()300   PipelineStatistics* pipeline_statistics() { return pipeline_statistics_; }
osr_helper()301   OsrHelper* osr_helper() { return &(*osr_helper_); }
compilation_failed() const302   bool compilation_failed() const { return compilation_failed_; }
set_compilation_failed()303   void set_compilation_failed() { compilation_failed_ = true; }
304 
verify_graph() const305   bool verify_graph() const { return verify_graph_; }
set_verify_graph(bool value)306   void set_verify_graph(bool value) { verify_graph_ = value; }
307 
code()308   MaybeHandle<Code> code() { return code_; }
set_code(MaybeHandle<Code> code)309   void set_code(MaybeHandle<Code> code) {
310     DCHECK(code_.is_null());
311     code_ = code;
312   }
313 
code_generator() const314   CodeGenerator* code_generator() const { return code_generator_; }
315 
316   // RawMachineAssembler generally produces graphs which cannot be verified.
MayHaveUnverifiableGraph() const317   bool MayHaveUnverifiableGraph() const { return may_have_unverifiable_graph_; }
318 
graph_zone() const319   Zone* graph_zone() const { return graph_zone_; }
graph() const320   Graph* graph() const { return graph_; }
source_positions() const321   SourcePositionTable* source_positions() const { return source_positions_; }
node_origins() const322   NodeOriginTable* node_origins() const { return node_origins_; }
machine() const323   MachineOperatorBuilder* machine() const { return machine_; }
common() const324   CommonOperatorBuilder* common() const { return common_; }
javascript() const325   JSOperatorBuilder* javascript() const { return javascript_; }
jsgraph() const326   JSGraph* jsgraph() const { return jsgraph_; }
mcgraph() const327   MachineGraph* mcgraph() const { return mcgraph_; }
native_context() const328   Handle<NativeContext> native_context() const {
329     return handle(info()->native_context(), isolate());
330   }
global_object() const331   Handle<JSGlobalObject> global_object() const {
332     return handle(info()->global_object(), isolate());
333   }
334 
broker() const335   JSHeapBroker* broker() const { return broker_; }
ReleaseBroker()336   std::unique_ptr<JSHeapBroker> ReleaseBroker() {
337     std::unique_ptr<JSHeapBroker> broker(broker_);
338     broker_ = nullptr;
339     return broker;
340   }
341 
schedule() const342   Schedule* schedule() const { return schedule_; }
set_schedule(Schedule * schedule)343   void set_schedule(Schedule* schedule) {
344     DCHECK(!schedule_);
345     schedule_ = schedule;
346   }
reset_schedule()347   void reset_schedule() { schedule_ = nullptr; }
348 
instruction_zone() const349   Zone* instruction_zone() const { return instruction_zone_; }
codegen_zone() const350   Zone* codegen_zone() const { return codegen_zone_; }
sequence() const351   InstructionSequence* sequence() const { return sequence_; }
frame() const352   Frame* frame() const { return frame_; }
353 
register_allocation_zone() const354   Zone* register_allocation_zone() const { return register_allocation_zone_; }
355 
register_allocation_data() const356   RegisterAllocationData* register_allocation_data() const {
357     return register_allocation_data_;
358   }
top_tier_register_allocation_data() const359   TopTierRegisterAllocationData* top_tier_register_allocation_data() const {
360     return TopTierRegisterAllocationData::cast(register_allocation_data_);
361   }
mid_tier_register_allocator_data() const362   MidTierRegisterAllocationData* mid_tier_register_allocator_data() const {
363     return MidTierRegisterAllocationData::cast(register_allocation_data_);
364   }
365 
source_position_output() const366   std::string const& source_position_output() const {
367     return source_position_output_;
368   }
set_source_position_output(std::string const & source_position_output)369   void set_source_position_output(std::string const& source_position_output) {
370     source_position_output_ = source_position_output;
371   }
372 
jump_optimization_info() const373   JumpOptimizationInfo* jump_optimization_info() const {
374     return jump_optimization_info_;
375   }
376 
assembler_options() const377   const AssemblerOptions& assembler_options() const {
378     return assembler_options_;
379   }
380 
ChooseSpecializationContext()381   void ChooseSpecializationContext() {
382     if (info()->function_context_specializing()) {
383       DCHECK(info()->has_context());
384       specialization_context_ =
385           Just(OuterContext(handle(info()->context(), isolate()), 0));
386     } else {
387       specialization_context_ = GetModuleContext(info()->closure());
388     }
389   }
390 
specialization_context() const391   Maybe<OuterContext> specialization_context() const {
392     return specialization_context_;
393   }
394 
address_of_max_unoptimized_frame_height()395   size_t* address_of_max_unoptimized_frame_height() {
396     return &max_unoptimized_frame_height_;
397   }
max_unoptimized_frame_height() const398   size_t max_unoptimized_frame_height() const {
399     return max_unoptimized_frame_height_;
400   }
address_of_max_pushed_argument_count()401   size_t* address_of_max_pushed_argument_count() {
402     return &max_pushed_argument_count_;
403   }
max_pushed_argument_count() const404   size_t max_pushed_argument_count() const {
405     return max_pushed_argument_count_;
406   }
407 
GetCodeTracer() const408   CodeTracer* GetCodeTracer() const {
409     return wasm_engine_ == nullptr ? isolate_->GetCodeTracer()
410                                    : wasm_engine_->GetCodeTracer();
411   }
412 
CreateTyper()413   Typer* CreateTyper() {
414     DCHECK_NULL(typer_);
415     typer_ =
416         new Typer(broker(), typer_flags_, graph(), &info()->tick_counter());
417     return typer_;
418   }
419 
AddTyperFlag(Typer::Flag flag)420   void AddTyperFlag(Typer::Flag flag) {
421     DCHECK_NULL(typer_);
422     typer_flags_ |= flag;
423   }
424 
DeleteTyper()425   void DeleteTyper() {
426     delete typer_;
427     typer_ = nullptr;
428   }
429 
DeleteGraphZone()430   void DeleteGraphZone() {
431     if (graph_zone_ == nullptr) return;
432     graph_zone_scope_.Destroy();
433     graph_zone_ = nullptr;
434     graph_ = nullptr;
435     source_positions_ = nullptr;
436     node_origins_ = nullptr;
437     simplified_ = nullptr;
438     machine_ = nullptr;
439     common_ = nullptr;
440     javascript_ = nullptr;
441     jsgraph_ = nullptr;
442     mcgraph_ = nullptr;
443     schedule_ = nullptr;
444   }
445 
DeleteInstructionZone()446   void DeleteInstructionZone() {
447     if (instruction_zone_ == nullptr) return;
448     instruction_zone_scope_.Destroy();
449     instruction_zone_ = nullptr;
450     sequence_ = nullptr;
451   }
452 
DeleteCodegenZone()453   void DeleteCodegenZone() {
454     if (codegen_zone_ == nullptr) return;
455     codegen_zone_scope_.Destroy();
456     codegen_zone_ = nullptr;
457     dependencies_ = nullptr;
458     delete broker_;
459     broker_ = nullptr;
460     frame_ = nullptr;
461   }
462 
DeleteRegisterAllocationZone()463   void DeleteRegisterAllocationZone() {
464     if (register_allocation_zone_ == nullptr) return;
465     register_allocation_zone_scope_.Destroy();
466     register_allocation_zone_ = nullptr;
467     register_allocation_data_ = nullptr;
468   }
469 
InitializeInstructionSequence(const CallDescriptor * call_descriptor)470   void InitializeInstructionSequence(const CallDescriptor* call_descriptor) {
471     DCHECK_NULL(sequence_);
472     InstructionBlocks* instruction_blocks =
473         InstructionSequence::InstructionBlocksFor(instruction_zone(),
474                                                   schedule());
475     sequence_ = instruction_zone()->New<InstructionSequence>(
476         isolate(), instruction_zone(), instruction_blocks);
477     if (call_descriptor && call_descriptor->RequiresFrameAsIncoming()) {
478       sequence_->instruction_blocks()[0]->mark_needs_frame();
479     } else {
480       DCHECK_EQ(0u, call_descriptor->CalleeSavedFPRegisters());
481       DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters());
482     }
483   }
484 
InitializeFrameData(CallDescriptor * call_descriptor)485   void InitializeFrameData(CallDescriptor* call_descriptor) {
486     DCHECK_NULL(frame_);
487     int fixed_frame_size = 0;
488     if (call_descriptor != nullptr) {
489       fixed_frame_size =
490           call_descriptor->CalculateFixedFrameSize(info()->code_kind());
491     }
492     frame_ = codegen_zone()->New<Frame>(fixed_frame_size);
493     if (osr_helper_.has_value()) osr_helper()->SetupFrame(frame());
494   }
495 
InitializeTopTierRegisterAllocationData(const RegisterConfiguration * config,CallDescriptor * call_descriptor,RegisterAllocationFlags flags)496   void InitializeTopTierRegisterAllocationData(
497       const RegisterConfiguration* config, CallDescriptor* call_descriptor,
498       RegisterAllocationFlags flags) {
499     DCHECK_NULL(register_allocation_data_);
500     register_allocation_data_ =
501         register_allocation_zone()->New<TopTierRegisterAllocationData>(
502             config, register_allocation_zone(), frame(), sequence(), flags,
503             &info()->tick_counter(), debug_name());
504   }
505 
InitializeMidTierRegisterAllocationData(const RegisterConfiguration * config,CallDescriptor * call_descriptor)506   void InitializeMidTierRegisterAllocationData(
507       const RegisterConfiguration* config, CallDescriptor* call_descriptor) {
508     DCHECK_NULL(register_allocation_data_);
509     register_allocation_data_ =
510         register_allocation_zone()->New<MidTierRegisterAllocationData>(
511             config, register_allocation_zone(), frame(), sequence(),
512             &info()->tick_counter(), debug_name());
513   }
514 
InitializeOsrHelper()515   void InitializeOsrHelper() {
516     DCHECK(!osr_helper_.has_value());
517     osr_helper_.emplace(info());
518   }
519 
set_start_source_position(int position)520   void set_start_source_position(int position) {
521     DCHECK_EQ(start_source_position_, kNoSourcePosition);
522     start_source_position_ = position;
523   }
524 
InitializeCodeGenerator(Linkage * linkage,std::unique_ptr<AssemblerBuffer> buffer)525   void InitializeCodeGenerator(Linkage* linkage,
526                                std::unique_ptr<AssemblerBuffer> buffer) {
527     DCHECK_NULL(code_generator_);
528     code_generator_ = new CodeGenerator(
529         codegen_zone(), frame(), linkage, sequence(), info(), isolate(),
530         osr_helper_, start_source_position_, jump_optimization_info_,
531         info()->GetPoisoningMitigationLevel(), assembler_options_,
532         info_->builtin_index(), max_unoptimized_frame_height(),
533         max_pushed_argument_count(), std::move(buffer),
534         FLAG_trace_turbo_stack_accesses ? debug_name_.get() : nullptr);
535   }
536 
BeginPhaseKind(const char * phase_kind_name)537   void BeginPhaseKind(const char* phase_kind_name) {
538     if (pipeline_statistics() != nullptr) {
539       pipeline_statistics()->BeginPhaseKind(phase_kind_name);
540     }
541   }
542 
EndPhaseKind()543   void EndPhaseKind() {
544     if (pipeline_statistics() != nullptr) {
545       pipeline_statistics()->EndPhaseKind();
546     }
547   }
548 
debug_name() const549   const char* debug_name() const { return debug_name_.get(); }
550 
roots_relative_addressing_enabled()551   bool roots_relative_addressing_enabled() {
552     return roots_relative_addressing_enabled_;
553   }
554 
profile_data() const555   const ProfileDataFromFile* profile_data() const { return profile_data_; }
set_profile_data(const ProfileDataFromFile * profile_data)556   void set_profile_data(const ProfileDataFromFile* profile_data) {
557     profile_data_ = profile_data;
558   }
559 
560   // RuntimeCallStats that is only available during job execution but not
561   // finalization.
562   // TODO(delphick): Currently even during execution this can be nullptr, due to
563   // JSToWasmWrapperCompilationUnit::Execute. Once a table can be extracted
564   // there, this method can DCHECK that it is never nullptr.
runtime_call_stats() const565   RuntimeCallStats* runtime_call_stats() const { return runtime_call_stats_; }
set_runtime_call_stats(RuntimeCallStats * stats)566   void set_runtime_call_stats(RuntimeCallStats* stats) {
567     runtime_call_stats_ = stats;
568   }
569 
570  private:
571   Isolate* const isolate_;
572   wasm::WasmEngine* const wasm_engine_ = nullptr;
573   AccountingAllocator* const allocator_;
574   OptimizedCompilationInfo* const info_;
575   std::unique_ptr<char[]> debug_name_;
576   bool may_have_unverifiable_graph_ = true;
577   ZoneStats* const zone_stats_;
578   PipelineStatistics* pipeline_statistics_ = nullptr;
579   bool compilation_failed_ = false;
580   bool verify_graph_ = false;
581   int start_source_position_ = kNoSourcePosition;
582   base::Optional<OsrHelper> osr_helper_;
583   MaybeHandle<Code> code_;
584   CodeGenerator* code_generator_ = nullptr;
585   Typer* typer_ = nullptr;
586   Typer::Flags typer_flags_ = Typer::kNoFlags;
587   bool roots_relative_addressing_enabled_ = false;
588 
589   // All objects in the following group of fields are allocated in graph_zone_.
590   // They are all set to nullptr when the graph_zone_ is destroyed.
591   ZoneStats::Scope graph_zone_scope_;
592   Zone* graph_zone_ = nullptr;
593   Graph* graph_ = nullptr;
594   SourcePositionTable* source_positions_ = nullptr;
595   NodeOriginTable* node_origins_ = nullptr;
596   SimplifiedOperatorBuilder* simplified_ = nullptr;
597   MachineOperatorBuilder* machine_ = nullptr;
598   CommonOperatorBuilder* common_ = nullptr;
599   JSOperatorBuilder* javascript_ = nullptr;
600   JSGraph* jsgraph_ = nullptr;
601   MachineGraph* mcgraph_ = nullptr;
602   Schedule* schedule_ = nullptr;
603 
604   // All objects in the following group of fields are allocated in
605   // instruction_zone_. They are all set to nullptr when the instruction_zone_
606   // is destroyed.
607   ZoneStats::Scope instruction_zone_scope_;
608   Zone* instruction_zone_;
609   InstructionSequence* sequence_ = nullptr;
610 
611   // All objects in the following group of fields are allocated in
612   // codegen_zone_. They are all set to nullptr when the codegen_zone_
613   // is destroyed.
614   ZoneStats::Scope codegen_zone_scope_;
615   Zone* codegen_zone_;
616   CompilationDependencies* dependencies_ = nullptr;
617   JSHeapBroker* broker_ = nullptr;
618   Frame* frame_ = nullptr;
619 
620   // All objects in the following group of fields are allocated in
621   // register_allocation_zone_. They are all set to nullptr when the zone is
622   // destroyed.
623   ZoneStats::Scope register_allocation_zone_scope_;
624   Zone* register_allocation_zone_;
625   RegisterAllocationData* register_allocation_data_ = nullptr;
626 
627   // Source position output for --trace-turbo.
628   std::string source_position_output_;
629 
630   JumpOptimizationInfo* jump_optimization_info_ = nullptr;
631   AssemblerOptions assembler_options_;
632   Maybe<OuterContext> specialization_context_ = Nothing<OuterContext>();
633 
634   // The maximal combined height of all inlined frames in their unoptimized
635   // state, and the maximal number of arguments pushed during function calls.
636   // Calculated during instruction selection, applied during code generation.
637   size_t max_unoptimized_frame_height_ = 0;
638   size_t max_pushed_argument_count_ = 0;
639 
640   RuntimeCallStats* runtime_call_stats_ = nullptr;
641   const ProfileDataFromFile* profile_data_ = nullptr;
642 };
643 
644 class PipelineImpl final {
645  public:
PipelineImpl(PipelineData * data)646   explicit PipelineImpl(PipelineData* data) : data_(data) {}
647 
648   // Helpers for executing pipeline phases.
649   template <typename Phase, typename... Args>
650   void Run(Args&&... args);
651 
652   // Step A.1. Serialize the data needed for the compilation front-end.
653   void Serialize();
654 
655   // Step A.2. Run the graph creation and initial optimization passes.
656   bool CreateGraph();
657 
658   // Step B. Run the concurrent optimization passes.
659   bool OptimizeGraph(Linkage* linkage);
660 
661   // Alternative step B. Run minimal concurrent optimization passes for
662   // mid-tier.
663   bool OptimizeGraphForMidTier(Linkage* linkage);
664 
665   // Substep B.1. Produce a scheduled graph.
666   void ComputeScheduledGraph();
667 
668   // Substep B.2. Select instructions from a scheduled graph.
669   bool SelectInstructions(Linkage* linkage);
670 
671   // Step C. Run the code assembly pass.
672   void AssembleCode(Linkage* linkage,
673                     std::unique_ptr<AssemblerBuffer> buffer = {});
674 
675   // Step D. Run the code finalization pass.
676   MaybeHandle<Code> FinalizeCode(bool retire_broker = true);
677 
678   // Step E. Install any code dependencies.
679   bool CommitDependencies(Handle<Code> code);
680 
681   void VerifyGeneratedCodeIsIdempotent();
682   void RunPrintAndVerify(const char* phase, bool untyped = false);
683   bool SelectInstructionsAndAssemble(CallDescriptor* call_descriptor);
684   MaybeHandle<Code> GenerateCode(CallDescriptor* call_descriptor);
685   void AllocateRegistersForTopTier(const RegisterConfiguration* config,
686                                    CallDescriptor* call_descriptor,
687                                    bool run_verifier);
688   void AllocateRegistersForMidTier(const RegisterConfiguration* config,
689                                    CallDescriptor* call_descriptor,
690                                    bool run_verifier);
691 
692   OptimizedCompilationInfo* info() const;
693   Isolate* isolate() const;
694   CodeGenerator* code_generator() const;
695 
696  private:
697   PipelineData* const data_;
698 };
699 
700 namespace {
701 
702 class SourcePositionWrapper final : public Reducer {
703  public:
SourcePositionWrapper(Reducer * reducer,SourcePositionTable * table)704   SourcePositionWrapper(Reducer* reducer, SourcePositionTable* table)
705       : reducer_(reducer), table_(table) {}
706   ~SourcePositionWrapper() final = default;
707   SourcePositionWrapper(const SourcePositionWrapper&) = delete;
708   SourcePositionWrapper& operator=(const SourcePositionWrapper&) = delete;
709 
reducer_name() const710   const char* reducer_name() const override { return reducer_->reducer_name(); }
711 
Reduce(Node * node)712   Reduction Reduce(Node* node) final {
713     SourcePosition const pos = table_->GetSourcePosition(node);
714     SourcePositionTable::Scope position(table_, pos);
715     return reducer_->Reduce(node);
716   }
717 
Finalize()718   void Finalize() final { reducer_->Finalize(); }
719 
720  private:
721   Reducer* const reducer_;
722   SourcePositionTable* const table_;
723 };
724 
725 class NodeOriginsWrapper final : public Reducer {
726  public:
NodeOriginsWrapper(Reducer * reducer,NodeOriginTable * table)727   NodeOriginsWrapper(Reducer* reducer, NodeOriginTable* table)
728       : reducer_(reducer), table_(table) {}
729   ~NodeOriginsWrapper() final = default;
730   NodeOriginsWrapper(const NodeOriginsWrapper&) = delete;
731   NodeOriginsWrapper& operator=(const NodeOriginsWrapper&) = delete;
732 
reducer_name() const733   const char* reducer_name() const override { return reducer_->reducer_name(); }
734 
Reduce(Node * node)735   Reduction Reduce(Node* node) final {
736     NodeOriginTable::Scope position(table_, reducer_name(), node);
737     return reducer_->Reduce(node);
738   }
739 
Finalize()740   void Finalize() final { reducer_->Finalize(); }
741 
742  private:
743   Reducer* const reducer_;
744   NodeOriginTable* const table_;
745 };
746 
747 class PipelineRunScope {
748  public:
PipelineRunScope(PipelineData * data,const char * phase_name,RuntimeCallCounterId runtime_call_counter_id,RuntimeCallStats::CounterMode counter_mode=RuntimeCallStats::kExact)749   PipelineRunScope(
750       PipelineData* data, const char* phase_name,
751       RuntimeCallCounterId runtime_call_counter_id,
752       RuntimeCallStats::CounterMode counter_mode = RuntimeCallStats::kExact)
753       : phase_scope_(data->pipeline_statistics(), phase_name),
754         zone_scope_(data->zone_stats(), phase_name),
755         origin_scope_(data->node_origins(), phase_name),
756         runtime_call_timer_scope(data->runtime_call_stats(),
757                                  runtime_call_counter_id, counter_mode) {
758     DCHECK_NOT_NULL(phase_name);
759   }
760 
zone()761   Zone* zone() { return zone_scope_.zone(); }
762 
763  private:
764   PhaseScope phase_scope_;
765   ZoneStats::Scope zone_scope_;
766   NodeOriginTable::PhaseScope origin_scope_;
767   RuntimeCallTimerScope runtime_call_timer_scope;
768 };
769 
770 // LocalIsolateScope encapsulates the phase where persistent handles are
771 // attached to the LocalHeap inside {local_isolate}.
772 class LocalIsolateScope {
773  public:
LocalIsolateScope(JSHeapBroker * broker,OptimizedCompilationInfo * info,LocalIsolate * local_isolate)774   explicit LocalIsolateScope(JSHeapBroker* broker,
775                              OptimizedCompilationInfo* info,
776                              LocalIsolate* local_isolate)
777       : broker_(broker), info_(info) {
778     broker_->AttachLocalIsolate(info_, local_isolate);
779     info_->tick_counter().AttachLocalHeap(local_isolate->heap());
780   }
781 
~LocalIsolateScope()782   ~LocalIsolateScope() {
783     info_->tick_counter().DetachLocalHeap();
784     broker_->DetachLocalIsolate(info_);
785   }
786 
787  private:
788   JSHeapBroker* broker_;
789   OptimizedCompilationInfo* info_;
790 };
791 
PrintFunctionSource(OptimizedCompilationInfo * info,Isolate * isolate,int source_id,Handle<SharedFunctionInfo> shared)792 void PrintFunctionSource(OptimizedCompilationInfo* info, Isolate* isolate,
793                          int source_id, Handle<SharedFunctionInfo> shared) {
794   if (!shared->script().IsUndefined(isolate)) {
795     Handle<Script> script(Script::cast(shared->script()), isolate);
796 
797     if (!script->source().IsUndefined(isolate)) {
798       CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
799       Object source_name = script->name();
800       auto& os = tracing_scope.stream();
801       os << "--- FUNCTION SOURCE (";
802       if (source_name.IsString()) {
803         os << String::cast(source_name).ToCString().get() << ":";
804       }
805       os << shared->DebugName().ToCString().get() << ") id{";
806       os << info->optimization_id() << "," << source_id << "} start{";
807       os << shared->StartPosition() << "} ---\n";
808       {
809         DisallowHeapAllocation no_allocation;
810         int start = shared->StartPosition();
811         int len = shared->EndPosition() - start;
812         SubStringRange source(String::cast(script->source()), no_allocation,
813                               start, len);
814         for (const auto& c : source) {
815           os << AsReversiblyEscapedUC16(c);
816         }
817       }
818 
819       os << "\n--- END ---\n";
820     }
821   }
822 }
823 
824 // Print information for the given inlining: which function was inlined and
825 // where the inlining occurred.
PrintInlinedFunctionInfo(OptimizedCompilationInfo * info,Isolate * isolate,int source_id,int inlining_id,const OptimizedCompilationInfo::InlinedFunctionHolder & h)826 void PrintInlinedFunctionInfo(
827     OptimizedCompilationInfo* info, Isolate* isolate, int source_id,
828     int inlining_id, const OptimizedCompilationInfo::InlinedFunctionHolder& h) {
829   CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
830   auto& os = tracing_scope.stream();
831   os << "INLINE (" << h.shared_info->DebugName().ToCString().get() << ") id{"
832      << info->optimization_id() << "," << source_id << "} AS " << inlining_id
833      << " AT ";
834   const SourcePosition position = h.position.position;
835   if (position.IsKnown()) {
836     os << "<" << position.InliningId() << ":" << position.ScriptOffset() << ">";
837   } else {
838     os << "<?>";
839   }
840   os << std::endl;
841 }
842 
843 // Print the source of all functions that participated in this optimizing
844 // compilation. For inlined functions print source position of their inlining.
PrintParticipatingSource(OptimizedCompilationInfo * info,Isolate * isolate)845 void PrintParticipatingSource(OptimizedCompilationInfo* info,
846                               Isolate* isolate) {
847   SourceIdAssigner id_assigner(info->inlined_functions().size());
848   PrintFunctionSource(info, isolate, -1, info->shared_info());
849   const auto& inlined = info->inlined_functions();
850   for (unsigned id = 0; id < inlined.size(); id++) {
851     const int source_id = id_assigner.GetIdFor(inlined[id].shared_info);
852     PrintFunctionSource(info, isolate, source_id, inlined[id].shared_info);
853     PrintInlinedFunctionInfo(info, isolate, source_id, id, inlined[id]);
854   }
855 }
856 
857 // Print the code after compiling it.
PrintCode(Isolate * isolate,Handle<Code> code,OptimizedCompilationInfo * info)858 void PrintCode(Isolate* isolate, Handle<Code> code,
859                OptimizedCompilationInfo* info) {
860   if (FLAG_print_opt_source && info->IsOptimizing()) {
861     PrintParticipatingSource(info, isolate);
862   }
863 
864 #ifdef ENABLE_DISASSEMBLER
865   const bool print_code =
866       FLAG_print_code ||
867       (info->IsOptimizing() && FLAG_print_opt_code &&
868        info->shared_info()->PassesFilter(FLAG_print_opt_code_filter)) ||
869       (info->IsNativeContextIndependent() && FLAG_print_nci_code);
870   if (print_code) {
871     std::unique_ptr<char[]> debug_name = info->GetDebugName();
872     CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
873     auto& os = tracing_scope.stream();
874 
875     // Print the source code if available.
876     const bool print_source = info->IsOptimizing();
877     if (print_source) {
878       Handle<SharedFunctionInfo> shared = info->shared_info();
879       if (shared->script().IsScript() &&
880           !Script::cast(shared->script()).source().IsUndefined(isolate)) {
881         os << "--- Raw source ---\n";
882         StringCharacterStream stream(
883             String::cast(Script::cast(shared->script()).source()),
884             shared->StartPosition());
885         // fun->end_position() points to the last character in the stream. We
886         // need to compensate by adding one to calculate the length.
887         int source_len = shared->EndPosition() - shared->StartPosition() + 1;
888         for (int i = 0; i < source_len; i++) {
889           if (stream.HasMore()) {
890             os << AsReversiblyEscapedUC16(stream.GetNext());
891           }
892         }
893         os << "\n\n";
894       }
895     }
896     if (info->IsOptimizing()) {
897       os << "--- Optimized code ---\n"
898          << "optimization_id = " << info->optimization_id() << "\n";
899     } else {
900       os << "--- Code ---\n";
901     }
902     if (print_source) {
903       Handle<SharedFunctionInfo> shared = info->shared_info();
904       os << "source_position = " << shared->StartPosition() << "\n";
905     }
906     code->Disassemble(debug_name.get(), os, isolate);
907     os << "--- End code ---\n";
908   }
909 #endif  // ENABLE_DISASSEMBLER
910 }
911 
TraceScheduleAndVerify(OptimizedCompilationInfo * info,PipelineData * data,Schedule * schedule,const char * phase_name)912 void TraceScheduleAndVerify(OptimizedCompilationInfo* info, PipelineData* data,
913                             Schedule* schedule, const char* phase_name) {
914   if (info->trace_turbo_json()) {
915     UnparkedScopeIfNeeded scope(data->broker());
916     AllowHandleDereference allow_deref;
917     TurboJsonFile json_of(info, std::ios_base::app);
918     json_of << "{\"name\":\"" << phase_name << "\",\"type\":\"schedule\""
919             << ",\"data\":\"";
920     std::stringstream schedule_stream;
921     schedule_stream << *schedule;
922     std::string schedule_string(schedule_stream.str());
923     for (const auto& c : schedule_string) {
924       json_of << AsEscapedUC16ForJSON(c);
925     }
926     json_of << "\"},\n";
927   }
928   if (info->trace_turbo_graph() || FLAG_trace_turbo_scheduler) {
929     UnparkedScopeIfNeeded scope(data->broker());
930     AllowHandleDereference allow_deref;
931     CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
932     tracing_scope.stream()
933         << "-- Schedule --------------------------------------\n"
934         << *schedule;
935   }
936 
937   if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
938 }
939 
AddReducer(PipelineData * data,GraphReducer * graph_reducer,Reducer * reducer)940 void AddReducer(PipelineData* data, GraphReducer* graph_reducer,
941                 Reducer* reducer) {
942   if (data->info()->source_positions()) {
943     SourcePositionWrapper* const wrapper =
944         data->graph_zone()->New<SourcePositionWrapper>(
945             reducer, data->source_positions());
946     reducer = wrapper;
947   }
948   if (data->info()->trace_turbo_json()) {
949     NodeOriginsWrapper* const wrapper =
950         data->graph_zone()->New<NodeOriginsWrapper>(reducer,
951                                                     data->node_origins());
952     reducer = wrapper;
953   }
954 
955   graph_reducer->AddReducer(reducer);
956 }
957 
CreatePipelineStatistics(Handle<Script> script,OptimizedCompilationInfo * info,Isolate * isolate,ZoneStats * zone_stats)958 PipelineStatistics* CreatePipelineStatistics(Handle<Script> script,
959                                              OptimizedCompilationInfo* info,
960                                              Isolate* isolate,
961                                              ZoneStats* zone_stats) {
962   PipelineStatistics* pipeline_statistics = nullptr;
963 
964   bool tracing_enabled;
965   TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("v8.turbofan"),
966                                      &tracing_enabled);
967   if (tracing_enabled || FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
968     pipeline_statistics =
969         new PipelineStatistics(info, isolate->GetTurboStatistics(), zone_stats);
970     pipeline_statistics->BeginPhaseKind("V8.TFInitializing");
971   }
972 
973   if (info->trace_turbo_json()) {
974     TurboJsonFile json_of(info, std::ios_base::trunc);
975     json_of << "{\"function\" : ";
976     JsonPrintFunctionSource(json_of, -1, info->GetDebugName(), script, isolate,
977                             info->shared_info());
978     json_of << ",\n\"phases\":[";
979   }
980 
981   return pipeline_statistics;
982 }
983 
CreatePipelineStatistics(wasm::WasmEngine * wasm_engine,wasm::FunctionBody function_body,const wasm::WasmModule * wasm_module,OptimizedCompilationInfo * info,ZoneStats * zone_stats)984 PipelineStatistics* CreatePipelineStatistics(
985     wasm::WasmEngine* wasm_engine, wasm::FunctionBody function_body,
986     const wasm::WasmModule* wasm_module, OptimizedCompilationInfo* info,
987     ZoneStats* zone_stats) {
988   PipelineStatistics* pipeline_statistics = nullptr;
989 
990   bool tracing_enabled;
991   TRACE_EVENT_CATEGORY_GROUP_ENABLED(
992       TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"), &tracing_enabled);
993   if (tracing_enabled || FLAG_turbo_stats_wasm) {
994     pipeline_statistics = new PipelineStatistics(
995         info, wasm_engine->GetOrCreateTurboStatistics(), zone_stats);
996     pipeline_statistics->BeginPhaseKind("V8.WasmInitializing");
997   }
998 
999   if (info->trace_turbo_json()) {
1000     TurboJsonFile json_of(info, std::ios_base::trunc);
1001     std::unique_ptr<char[]> function_name = info->GetDebugName();
1002     json_of << "{\"function\":\"" << function_name.get() << "\", \"source\":\"";
1003     AccountingAllocator allocator;
1004     std::ostringstream disassembly;
1005     std::vector<int> source_positions;
1006     wasm::PrintRawWasmCode(&allocator, function_body, wasm_module,
1007                            wasm::kPrintLocals, disassembly, &source_positions);
1008     for (const auto& c : disassembly.str()) {
1009       json_of << AsEscapedUC16ForJSON(c);
1010     }
1011     json_of << "\",\n\"sourceLineToBytecodePosition\" : [";
1012     bool insert_comma = false;
1013     for (auto val : source_positions) {
1014       if (insert_comma) {
1015         json_of << ", ";
1016       }
1017       json_of << val;
1018       insert_comma = true;
1019     }
1020     json_of << "],\n\"phases\":[";
1021   }
1022 
1023   return pipeline_statistics;
1024 }
1025 
1026 }  // namespace
1027 
1028 class PipelineCompilationJob final : public OptimizedCompilationJob {
1029  public:
1030   PipelineCompilationJob(Isolate* isolate,
1031                          Handle<SharedFunctionInfo> shared_info,
1032                          Handle<JSFunction> function, BailoutId osr_offset,
1033                          JavaScriptFrame* osr_frame, CodeKind code_kind);
1034   ~PipelineCompilationJob() final;
1035   PipelineCompilationJob(const PipelineCompilationJob&) = delete;
1036   PipelineCompilationJob& operator=(const PipelineCompilationJob&) = delete;
1037 
1038  protected:
1039   Status PrepareJobImpl(Isolate* isolate) final;
1040   Status ExecuteJobImpl(RuntimeCallStats* stats,
1041                         LocalIsolate* local_isolate) final;
1042   Status FinalizeJobImpl(Isolate* isolate) final;
1043 
1044   // Registers weak object to optimized code dependencies.
1045   void RegisterWeakObjectsInOptimizedCode(Isolate* isolate,
1046                                           Handle<NativeContext> context,
1047                                           Handle<Code> code);
1048 
1049  private:
1050   Zone zone_;
1051   ZoneStats zone_stats_;
1052   OptimizedCompilationInfo compilation_info_;
1053   std::unique_ptr<PipelineStatistics> pipeline_statistics_;
1054   PipelineData data_;
1055   PipelineImpl pipeline_;
1056   Linkage* linkage_;
1057 };
1058 
PipelineCompilationJob(Isolate * isolate,Handle<SharedFunctionInfo> shared_info,Handle<JSFunction> function,BailoutId osr_offset,JavaScriptFrame * osr_frame,CodeKind code_kind)1059 PipelineCompilationJob::PipelineCompilationJob(
1060     Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
1061     Handle<JSFunction> function, BailoutId osr_offset,
1062     JavaScriptFrame* osr_frame, CodeKind code_kind)
1063     // Note that the OptimizedCompilationInfo is not initialized at the time
1064     // we pass it to the CompilationJob constructor, but it is not
1065     // dereferenced there.
1066     : OptimizedCompilationJob(&compilation_info_, "TurboFan"),
1067       zone_(function->GetIsolate()->allocator(),
1068             kPipelineCompilationJobZoneName),
1069       zone_stats_(function->GetIsolate()->allocator()),
1070       compilation_info_(&zone_, function->GetIsolate(), shared_info, function,
1071                         code_kind),
1072       pipeline_statistics_(CreatePipelineStatistics(
1073           handle(Script::cast(shared_info->script()), isolate),
1074           compilation_info(), function->GetIsolate(), &zone_stats_)),
1075       data_(&zone_stats_, function->GetIsolate(), compilation_info(),
1076             pipeline_statistics_.get(),
1077             FLAG_concurrent_inlining && osr_offset.IsNone()),
1078       pipeline_(&data_),
1079       linkage_(nullptr) {
1080   compilation_info_.SetOptimizingForOsr(osr_offset, osr_frame);
1081 }
1082 
1083 PipelineCompilationJob::~PipelineCompilationJob() = default;
1084 
1085 namespace {
1086 // Ensure that the RuntimeStats table is set on the PipelineData for
1087 // duration of the job phase and unset immediately afterwards. Each job
1088 // needs to set the correct RuntimeCallStats table depending on whether it
1089 // is running on a background or foreground thread.
1090 class PipelineJobScope {
1091  public:
PipelineJobScope(PipelineData * data,RuntimeCallStats * stats)1092   PipelineJobScope(PipelineData* data, RuntimeCallStats* stats) : data_(data) {
1093     data_->set_runtime_call_stats(stats);
1094   }
1095 
~PipelineJobScope()1096   ~PipelineJobScope() { data_->set_runtime_call_stats(nullptr); }
1097 
1098  private:
1099   PipelineData* data_;
1100 };
1101 }  // namespace
1102 
PrepareJobImpl(Isolate * isolate)1103 PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
1104     Isolate* isolate) {
1105   // Ensure that the RuntimeCallStats table of main thread is available for
1106   // phases happening during PrepareJob.
1107   PipelineJobScope scope(&data_, isolate->counters()->runtime_call_stats());
1108 
1109   if (compilation_info()->bytecode_array()->length() >
1110       FLAG_max_optimized_bytecode_size) {
1111     return AbortOptimization(BailoutReason::kFunctionTooBig);
1112   }
1113 
1114   if (!FLAG_always_opt && !compilation_info()->IsNativeContextIndependent()) {
1115     compilation_info()->set_bailout_on_uninitialized();
1116   }
1117   if (FLAG_turbo_loop_peeling) {
1118     compilation_info()->set_loop_peeling();
1119   }
1120   if (FLAG_turbo_inlining && !compilation_info()->IsTurboprop() &&
1121       !compilation_info()->IsNativeContextIndependent()) {
1122     compilation_info()->set_inlining();
1123   }
1124 
1125   // This is the bottleneck for computing and setting poisoning level in the
1126   // optimizing compiler.
1127   PoisoningMitigationLevel load_poisoning =
1128       PoisoningMitigationLevel::kDontPoison;
1129   if (FLAG_untrusted_code_mitigations) {
1130     // For full mitigations, this can be changed to
1131     // PoisoningMitigationLevel::kPoisonAll.
1132     load_poisoning = PoisoningMitigationLevel::kPoisonCriticalOnly;
1133   }
1134   compilation_info()->SetPoisoningMitigationLevel(load_poisoning);
1135 
1136   if (FLAG_turbo_allocation_folding) {
1137     compilation_info()->set_allocation_folding();
1138   }
1139 
1140   // Determine whether to specialize the code for the function's context.
1141   // We can't do this in the case of OSR, because we want to cache the
1142   // generated code on the native context keyed on SharedFunctionInfo.
1143   // We also can't do this for native context independent code (yet).
1144   // TODO(mythria): Check if it is better to key the OSR cache on JSFunction and
1145   // allow context specialization for OSR code.
1146   if (compilation_info()->closure()->raw_feedback_cell().map() ==
1147           ReadOnlyRoots(isolate).one_closure_cell_map() &&
1148       !compilation_info()->is_osr() &&
1149       !compilation_info()->IsNativeContextIndependent() &&
1150       !compilation_info()->IsTurboprop()) {
1151     compilation_info()->set_function_context_specializing();
1152     data_.ChooseSpecializationContext();
1153   }
1154 
1155   if (compilation_info()->source_positions()) {
1156     SharedFunctionInfo::EnsureSourcePositionsAvailable(
1157         isolate, compilation_info()->shared_info());
1158   }
1159 
1160   data_.set_start_source_position(
1161       compilation_info()->shared_info()->StartPosition());
1162 
1163   linkage_ = compilation_info()->zone()->New<Linkage>(
1164       Linkage::ComputeIncoming(compilation_info()->zone(), compilation_info()));
1165 
1166   if (compilation_info()->is_osr()) data_.InitializeOsrHelper();
1167 
1168   pipeline_.Serialize();
1169 
1170   if (!data_.broker()->is_concurrent_inlining()) {
1171     if (!pipeline_.CreateGraph()) {
1172       CHECK(!isolate->has_pending_exception());
1173       return AbortOptimization(BailoutReason::kGraphBuildingFailed);
1174     }
1175   }
1176 
1177   return SUCCEEDED;
1178 }
1179 
ExecuteJobImpl(RuntimeCallStats * stats,LocalIsolate * local_isolate)1180 PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl(
1181     RuntimeCallStats* stats, LocalIsolate* local_isolate) {
1182   // Ensure that the RuntimeCallStats table is only available during execution
1183   // and not during finalization as that might be on a different thread.
1184   PipelineJobScope scope(&data_, stats);
1185   LocalIsolateScope local_isolate_scope(data_.broker(), data_.info(),
1186                                         local_isolate);
1187 
1188   if (data_.broker()->is_concurrent_inlining()) {
1189     if (!pipeline_.CreateGraph()) {
1190       return AbortOptimization(BailoutReason::kGraphBuildingFailed);
1191     }
1192   }
1193 
1194   // We selectively Unpark inside OptimizeGraph*.
1195   bool success;
1196   if (compilation_info_.code_kind() == CodeKind::TURBOPROP) {
1197     success = pipeline_.OptimizeGraphForMidTier(linkage_);
1198   } else {
1199     success = pipeline_.OptimizeGraph(linkage_);
1200   }
1201   if (!success) return FAILED;
1202 
1203   pipeline_.AssembleCode(linkage_);
1204 
1205   return SUCCEEDED;
1206 }
1207 
FinalizeJobImpl(Isolate * isolate)1208 PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl(
1209     Isolate* isolate) {
1210   // Ensure that the RuntimeCallStats table of main thread is available for
1211   // phases happening during PrepareJob.
1212   PipelineJobScope scope(&data_, isolate->counters()->runtime_call_stats());
1213   RuntimeCallTimerScope runtimeTimer(
1214       isolate, RuntimeCallCounterId::kOptimizeFinalizePipelineJob);
1215   MaybeHandle<Code> maybe_code = pipeline_.FinalizeCode();
1216   Handle<Code> code;
1217   if (!maybe_code.ToHandle(&code)) {
1218     if (compilation_info()->bailout_reason() == BailoutReason::kNoReason) {
1219       return AbortOptimization(BailoutReason::kCodeGenerationFailed);
1220     }
1221     return FAILED;
1222   }
1223   if (!pipeline_.CommitDependencies(code)) {
1224     return RetryOptimization(BailoutReason::kBailedOutDueToDependencyChange);
1225   }
1226 
1227   compilation_info()->SetCode(code);
1228   Handle<NativeContext> context(compilation_info()->native_context(), isolate);
1229   if (CodeKindCanDeoptimize(code->kind())) context->AddOptimizedCode(*code);
1230   RegisterWeakObjectsInOptimizedCode(isolate, context, code);
1231   return SUCCEEDED;
1232 }
1233 
RegisterWeakObjectsInOptimizedCode(Isolate * isolate,Handle<NativeContext> context,Handle<Code> code)1234 void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
1235     Isolate* isolate, Handle<NativeContext> context, Handle<Code> code) {
1236   std::vector<Handle<Map>> maps;
1237   DCHECK(code->is_optimized_code());
1238   {
1239     DisallowHeapAllocation no_gc;
1240     int const mode_mask = RelocInfo::EmbeddedObjectModeMask();
1241     for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
1242       DCHECK(RelocInfo::IsEmbeddedObjectMode(it.rinfo()->rmode()));
1243       if (code->IsWeakObjectInOptimizedCode(it.rinfo()->target_object())) {
1244         Handle<HeapObject> object(HeapObject::cast(it.rinfo()->target_object()),
1245                                   isolate);
1246         if (object->IsMap()) {
1247           maps.push_back(Handle<Map>::cast(object));
1248         }
1249       }
1250     }
1251   }
1252   for (Handle<Map> map : maps) {
1253     isolate->heap()->AddRetainedMap(context, map);
1254   }
1255   code->set_can_have_weak_objects(true);
1256 }
1257 
1258 class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
1259  public:
WasmHeapStubCompilationJob(Isolate * isolate,wasm::WasmEngine * wasm_engine,CallDescriptor * call_descriptor,std::unique_ptr<Zone> zone,Graph * graph,CodeKind kind,std::unique_ptr<char[]> debug_name,const AssemblerOptions & options,SourcePositionTable * source_positions)1260   WasmHeapStubCompilationJob(Isolate* isolate, wasm::WasmEngine* wasm_engine,
1261                              CallDescriptor* call_descriptor,
1262                              std::unique_ptr<Zone> zone, Graph* graph,
1263                              CodeKind kind, std::unique_ptr<char[]> debug_name,
1264                              const AssemblerOptions& options,
1265                              SourcePositionTable* source_positions)
1266       // Note that the OptimizedCompilationInfo is not initialized at the time
1267       // we pass it to the CompilationJob constructor, but it is not
1268       // dereferenced there.
1269       : OptimizedCompilationJob(&info_, "TurboFan",
1270                                 CompilationJob::State::kReadyToExecute),
1271         debug_name_(std::move(debug_name)),
1272         info_(CStrVector(debug_name_.get()), graph->zone(), kind),
1273         call_descriptor_(call_descriptor),
1274         zone_stats_(zone->allocator()),
1275         zone_(std::move(zone)),
1276         graph_(graph),
1277         data_(&zone_stats_, &info_, isolate, wasm_engine->allocator(), graph_,
1278               nullptr, nullptr, source_positions,
1279               zone_->New<NodeOriginTable>(graph_), nullptr, options, nullptr),
1280         pipeline_(&data_),
1281         wasm_engine_(wasm_engine) {}
1282 
1283   WasmHeapStubCompilationJob(const WasmHeapStubCompilationJob&) = delete;
1284   WasmHeapStubCompilationJob& operator=(const WasmHeapStubCompilationJob&) =
1285       delete;
1286 
1287  protected:
1288   Status PrepareJobImpl(Isolate* isolate) final;
1289   Status ExecuteJobImpl(RuntimeCallStats* stats,
1290                         LocalIsolate* local_isolate) final;
1291   Status FinalizeJobImpl(Isolate* isolate) final;
1292 
1293  private:
1294   std::unique_ptr<char[]> debug_name_;
1295   OptimizedCompilationInfo info_;
1296   CallDescriptor* call_descriptor_;
1297   ZoneStats zone_stats_;
1298   std::unique_ptr<Zone> zone_;
1299   Graph* graph_;
1300   PipelineData data_;
1301   PipelineImpl pipeline_;
1302   wasm::WasmEngine* wasm_engine_;
1303 };
1304 
1305 // static
1306 std::unique_ptr<OptimizedCompilationJob>
NewWasmHeapStubCompilationJob(Isolate * isolate,wasm::WasmEngine * wasm_engine,CallDescriptor * call_descriptor,std::unique_ptr<Zone> zone,Graph * graph,CodeKind kind,std::unique_ptr<char[]> debug_name,const AssemblerOptions & options,SourcePositionTable * source_positions)1307 Pipeline::NewWasmHeapStubCompilationJob(
1308     Isolate* isolate, wasm::WasmEngine* wasm_engine,
1309     CallDescriptor* call_descriptor, std::unique_ptr<Zone> zone, Graph* graph,
1310     CodeKind kind, std::unique_ptr<char[]> debug_name,
1311     const AssemblerOptions& options, SourcePositionTable* source_positions) {
1312   return std::make_unique<WasmHeapStubCompilationJob>(
1313       isolate, wasm_engine, call_descriptor, std::move(zone), graph, kind,
1314       std::move(debug_name), options, source_positions);
1315 }
1316 
PrepareJobImpl(Isolate * isolate)1317 CompilationJob::Status WasmHeapStubCompilationJob::PrepareJobImpl(
1318     Isolate* isolate) {
1319   UNREACHABLE();
1320 }
1321 
ExecuteJobImpl(RuntimeCallStats * stats,LocalIsolate * local_isolate)1322 CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl(
1323     RuntimeCallStats* stats, LocalIsolate* local_isolate) {
1324   std::unique_ptr<PipelineStatistics> pipeline_statistics;
1325   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
1326     pipeline_statistics.reset(new PipelineStatistics(
1327         &info_, wasm_engine_->GetOrCreateTurboStatistics(), &zone_stats_));
1328     pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
1329   }
1330   if (info_.trace_turbo_json() || info_.trace_turbo_graph()) {
1331     CodeTracer::StreamScope tracing_scope(data_.GetCodeTracer());
1332     tracing_scope.stream()
1333         << "---------------------------------------------------\n"
1334         << "Begin compiling method " << info_.GetDebugName().get()
1335         << " using TurboFan" << std::endl;
1336   }
1337   if (info_.trace_turbo_graph()) {  // Simple textual RPO.
1338     StdoutStream{} << "-- wasm stub " << CodeKindToString(info_.code_kind())
1339                    << " graph -- " << std::endl
1340                    << AsRPO(*data_.graph());
1341   }
1342 
1343   if (info_.trace_turbo_json()) {
1344     TurboJsonFile json_of(&info_, std::ios_base::trunc);
1345     json_of << "{\"function\":\"" << info_.GetDebugName().get()
1346             << "\", \"source\":\"\",\n\"phases\":[";
1347   }
1348   pipeline_.RunPrintAndVerify("V8.WasmMachineCode", true);
1349   pipeline_.ComputeScheduledGraph();
1350   if (pipeline_.SelectInstructionsAndAssemble(call_descriptor_)) {
1351     return CompilationJob::SUCCEEDED;
1352   }
1353   return CompilationJob::FAILED;
1354 }
1355 
FinalizeJobImpl(Isolate * isolate)1356 CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl(
1357     Isolate* isolate) {
1358   Handle<Code> code;
1359   if (!pipeline_.FinalizeCode(call_descriptor_).ToHandle(&code)) {
1360     V8::FatalProcessOutOfMemory(isolate,
1361                                 "WasmHeapStubCompilationJob::FinalizeJobImpl");
1362   }
1363   if (pipeline_.CommitDependencies(code)) {
1364     info_.SetCode(code);
1365 #ifdef ENABLE_DISASSEMBLER
1366     if (FLAG_print_opt_code) {
1367       CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
1368       code->Disassemble(compilation_info()->GetDebugName().get(),
1369                         tracing_scope.stream(), isolate);
1370     }
1371 #endif
1372     return SUCCEEDED;
1373   }
1374   return FAILED;
1375 }
1376 
1377 template <typename Phase, typename... Args>
Run(Args &&...args)1378 void PipelineImpl::Run(Args&&... args) {
1379   PipelineRunScope scope(this->data_, Phase::phase_name(),
1380                          Phase::kRuntimeCallCounterId, Phase::kCounterMode);
1381   Phase phase;
1382   phase.Run(this->data_, scope.zone(), std::forward<Args>(args)...);
1383 }
1384 
1385 #define DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Name, Mode)        \
1386   static const char* phase_name() { return "V8.TF" #Name; }     \
1387   static constexpr RuntimeCallCounterId kRuntimeCallCounterId = \
1388       RuntimeCallCounterId::kOptimize##Name;                    \
1389   static constexpr RuntimeCallStats::CounterMode kCounterMode = Mode;
1390 
1391 #define DECL_PIPELINE_PHASE_CONSTANTS(Name) \
1392   DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Name, RuntimeCallStats::kThreadSpecific)
1393 
1394 #define DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(Name) \
1395   DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Name, RuntimeCallStats::kExact)
1396 
1397 struct GraphBuilderPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::GraphBuilderPhase1398   DECL_PIPELINE_PHASE_CONSTANTS(BytecodeGraphBuilder)
1399 
1400   void Run(PipelineData* data, Zone* temp_zone) {
1401     BytecodeGraphBuilderFlags flags;
1402     if (data->info()->analyze_environment_liveness()) {
1403       flags |= BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness;
1404     }
1405     if (data->info()->bailout_on_uninitialized()) {
1406       flags |= BytecodeGraphBuilderFlag::kBailoutOnUninitialized;
1407     }
1408 
1409     JSFunctionRef closure(data->broker(), data->info()->closure());
1410     CallFrequency frequency(1.0f);
1411     BuildGraphFromBytecode(
1412         data->broker(), temp_zone, closure.shared(),
1413         closure.raw_feedback_cell(), data->info()->osr_offset(),
1414         data->jsgraph(), frequency, data->source_positions(),
1415         SourcePosition::kNotInlined, data->info()->code_kind(), flags,
1416         &data->info()->tick_counter());
1417   }
1418 };
1419 
1420 struct InliningPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::InliningPhase1421   DECL_PIPELINE_PHASE_CONSTANTS(Inlining)
1422 
1423   void Run(PipelineData* data, Zone* temp_zone) {
1424     OptimizedCompilationInfo* info = data->info();
1425     GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(),
1426                                data->broker(), data->jsgraph()->Dead());
1427     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1428                                               data->common(), temp_zone);
1429     CheckpointElimination checkpoint_elimination(&graph_reducer);
1430     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1431                                          data->broker(), data->common(),
1432                                          data->machine(), temp_zone);
1433     JSCallReducer::Flags call_reducer_flags = JSCallReducer::kNoFlags;
1434     if (data->info()->bailout_on_uninitialized()) {
1435       call_reducer_flags |= JSCallReducer::kBailoutOnUninitialized;
1436     }
1437     JSCallReducer call_reducer(&graph_reducer, data->jsgraph(), data->broker(),
1438                                temp_zone, call_reducer_flags,
1439                                data->dependencies());
1440     JSContextSpecialization context_specialization(
1441         &graph_reducer, data->jsgraph(), data->broker(),
1442         data->specialization_context(),
1443         data->info()->function_context_specializing()
1444             ? data->info()->closure()
1445             : MaybeHandle<JSFunction>());
1446     JSNativeContextSpecialization::Flags flags =
1447         JSNativeContextSpecialization::kNoFlags;
1448     if (data->info()->bailout_on_uninitialized()) {
1449       flags |= JSNativeContextSpecialization::kBailoutOnUninitialized;
1450     }
1451     // Passing the OptimizedCompilationInfo's shared zone here as
1452     // JSNativeContextSpecialization allocates out-of-heap objects
1453     // that need to live until code generation.
1454     JSNativeContextSpecialization native_context_specialization(
1455         &graph_reducer, data->jsgraph(), data->broker(), flags,
1456         data->dependencies(), temp_zone, info->zone());
1457     JSInliningHeuristic inlining(&graph_reducer,
1458                                  temp_zone, data->info(), data->jsgraph(),
1459                                  data->broker(), data->source_positions());
1460 
1461     JSIntrinsicLowering intrinsic_lowering(&graph_reducer, data->jsgraph(),
1462                                            data->broker());
1463     AddReducer(data, &graph_reducer, &dead_code_elimination);
1464     AddReducer(data, &graph_reducer, &checkpoint_elimination);
1465     AddReducer(data, &graph_reducer, &common_reducer);
1466     if (!data->info()->IsNativeContextIndependent()) {
1467       AddReducer(data, &graph_reducer, &native_context_specialization);
1468       AddReducer(data, &graph_reducer, &context_specialization);
1469     }
1470     AddReducer(data, &graph_reducer, &intrinsic_lowering);
1471     AddReducer(data, &graph_reducer, &call_reducer);
1472     if (data->info()->inlining()) {
1473       AddReducer(data, &graph_reducer, &inlining);
1474     }
1475     graph_reducer.ReduceGraph();
1476     info->set_inlined_bytecode_size(inlining.total_inlined_bytecode_size());
1477   }
1478 };
1479 
1480 
1481 struct TyperPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::TyperPhase1482   DECL_PIPELINE_PHASE_CONSTANTS(Typer)
1483 
1484   void Run(PipelineData* data, Zone* temp_zone, Typer* typer) {
1485     NodeVector roots(temp_zone);
1486     data->jsgraph()->GetCachedNodes(&roots);
1487 
1488     // Make sure we always type True and False. Needed for escape analysis.
1489     roots.push_back(data->jsgraph()->TrueConstant());
1490     roots.push_back(data->jsgraph()->FalseConstant());
1491 
1492     LoopVariableOptimizer induction_vars(data->jsgraph()->graph(),
1493                                          data->common(), temp_zone);
1494     if (FLAG_turbo_loop_variable) induction_vars.Run();
1495 
1496     // The typer inspects heap objects, so we need to unpark the local heap.
1497     UnparkedScopeIfNeeded scope(data->broker());
1498     typer->Run(roots, &induction_vars);
1499   }
1500 };
1501 
1502 struct UntyperPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::UntyperPhase1503   DECL_PIPELINE_PHASE_CONSTANTS(Untyper)
1504 
1505   void Run(PipelineData* data, Zone* temp_zone) {
1506     class RemoveTypeReducer final : public Reducer {
1507      public:
1508       const char* reducer_name() const override { return "RemoveTypeReducer"; }
1509       Reduction Reduce(Node* node) final {
1510         if (NodeProperties::IsTyped(node)) {
1511           NodeProperties::RemoveType(node);
1512           return Changed(node);
1513         }
1514         return NoChange();
1515       }
1516     };
1517 
1518     NodeVector roots(temp_zone);
1519     data->jsgraph()->GetCachedNodes(&roots);
1520     for (Node* node : roots) {
1521       NodeProperties::RemoveType(node);
1522     }
1523 
1524     GraphReducer graph_reducer(temp_zone, data->graph(),
1525                                &data->info()->tick_counter(), data->broker(),
1526                                data->jsgraph()->Dead());
1527     RemoveTypeReducer remove_type_reducer;
1528     AddReducer(data, &graph_reducer, &remove_type_reducer);
1529     graph_reducer.ReduceGraph();
1530   }
1531 };
1532 
1533 struct HeapBrokerInitializationPhase {
DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::HeapBrokerInitializationPhase1534   DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(HeapBrokerInitialization)
1535 
1536   void Run(PipelineData* data, Zone* temp_zone) {
1537     data->broker()->InitializeAndStartSerializing(data->native_context());
1538   }
1539 };
1540 
1541 struct CopyMetadataForConcurrentCompilePhase {
DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::CopyMetadataForConcurrentCompilePhase1542   DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(SerializeMetadata)
1543 
1544   void Run(PipelineData* data, Zone* temp_zone) {
1545     GraphReducer graph_reducer(temp_zone, data->graph(),
1546                                &data->info()->tick_counter(), data->broker(),
1547                                data->jsgraph()->Dead());
1548     JSHeapCopyReducer heap_copy_reducer(data->broker());
1549     AddReducer(data, &graph_reducer, &heap_copy_reducer);
1550     graph_reducer.ReduceGraph();
1551 
1552     // Some nodes that are no longer in the graph might still be in the cache.
1553     NodeVector cached_nodes(temp_zone);
1554     data->jsgraph()->GetCachedNodes(&cached_nodes);
1555     for (Node* const node : cached_nodes) graph_reducer.ReduceNode(node);
1556   }
1557 };
1558 
1559 struct SerializationPhase {
DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::SerializationPhase1560   DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(Serialization)
1561 
1562   void Run(PipelineData* data, Zone* temp_zone) {
1563     SerializerForBackgroundCompilationFlags flags;
1564     if (data->info()->bailout_on_uninitialized()) {
1565       flags |= SerializerForBackgroundCompilationFlag::kBailoutOnUninitialized;
1566     }
1567     if (data->info()->source_positions()) {
1568       flags |= SerializerForBackgroundCompilationFlag::kCollectSourcePositions;
1569     }
1570     if (data->info()->analyze_environment_liveness()) {
1571       flags |=
1572           SerializerForBackgroundCompilationFlag::kAnalyzeEnvironmentLiveness;
1573     }
1574     if (data->info()->inlining()) {
1575       flags |= SerializerForBackgroundCompilationFlag::kEnableTurboInlining;
1576     }
1577     RunSerializerForBackgroundCompilation(
1578         data->zone_stats(), data->broker(), data->dependencies(),
1579         data->info()->closure(), flags, data->info()->osr_offset());
1580     if (data->specialization_context().IsJust()) {
1581       ContextRef(data->broker(),
1582                  data->specialization_context().FromJust().context);
1583     }
1584   }
1585 };
1586 
1587 struct TypedLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::TypedLoweringPhase1588   DECL_PIPELINE_PHASE_CONSTANTS(TypedLowering)
1589 
1590   void Run(PipelineData* data, Zone* temp_zone) {
1591     GraphReducer graph_reducer(temp_zone, data->graph(),
1592                                &data->info()->tick_counter(), data->broker(),
1593                                data->jsgraph()->Dead());
1594     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1595                                               data->common(), temp_zone);
1596     JSCreateLowering create_lowering(&graph_reducer, data->dependencies(),
1597                                      data->jsgraph(), data->broker(),
1598                                      temp_zone);
1599     JSTypedLowering typed_lowering(&graph_reducer, data->jsgraph(),
1600                                    data->broker(), temp_zone);
1601     ConstantFoldingReducer constant_folding_reducer(
1602         &graph_reducer, data->jsgraph(), data->broker());
1603     TypedOptimization typed_optimization(&graph_reducer, data->dependencies(),
1604                                          data->jsgraph(), data->broker());
1605     SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph(),
1606                                              data->broker());
1607     CheckpointElimination checkpoint_elimination(&graph_reducer);
1608     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1609                                          data->broker(), data->common(),
1610                                          data->machine(), temp_zone);
1611     AddReducer(data, &graph_reducer, &dead_code_elimination);
1612 
1613     if (!data->info()->IsNativeContextIndependent()) {
1614       AddReducer(data, &graph_reducer, &create_lowering);
1615     }
1616     AddReducer(data, &graph_reducer, &constant_folding_reducer);
1617     AddReducer(data, &graph_reducer, &typed_lowering);
1618     AddReducer(data, &graph_reducer, &typed_optimization);
1619     AddReducer(data, &graph_reducer, &simple_reducer);
1620     AddReducer(data, &graph_reducer, &checkpoint_elimination);
1621     AddReducer(data, &graph_reducer, &common_reducer);
1622 
1623     // ConstantFoldingReducer, JSCreateLowering, JSTypedLowering, and
1624     // TypedOptimization access the heap.
1625     UnparkedScopeIfNeeded scope(data->broker());
1626 
1627     graph_reducer.ReduceGraph();
1628   }
1629 };
1630 
1631 
1632 struct EscapeAnalysisPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::EscapeAnalysisPhase1633   DECL_PIPELINE_PHASE_CONSTANTS(EscapeAnalysis)
1634 
1635   void Run(PipelineData* data, Zone* temp_zone) {
1636     EscapeAnalysis escape_analysis(data->jsgraph(),
1637                                    &data->info()->tick_counter(), temp_zone);
1638     escape_analysis.ReduceGraph();
1639 
1640     GraphReducer reducer(temp_zone, data->graph(),
1641                          &data->info()->tick_counter(), data->broker(),
1642                          data->jsgraph()->Dead());
1643     EscapeAnalysisReducer escape_reducer(&reducer, data->jsgraph(),
1644                                          escape_analysis.analysis_result(),
1645                                          temp_zone);
1646 
1647     AddReducer(data, &reducer, &escape_reducer);
1648 
1649     // EscapeAnalysisReducer accesses the heap.
1650     UnparkedScopeIfNeeded scope(data->broker());
1651 
1652     reducer.ReduceGraph();
1653     // TODO(tebbi): Turn this into a debug mode check once we have confidence.
1654     escape_reducer.VerifyReplacement();
1655   }
1656 };
1657 
1658 struct TypeAssertionsPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::TypeAssertionsPhase1659   DECL_PIPELINE_PHASE_CONSTANTS(TypeAssertions)
1660 
1661   void Run(PipelineData* data, Zone* temp_zone) {
1662     GraphReducer graph_reducer(temp_zone, data->graph(),
1663                                &data->info()->tick_counter(), data->broker(),
1664                                data->jsgraph()->Dead());
1665     AddTypeAssertionsReducer type_assertions(&graph_reducer, data->jsgraph(),
1666                                              temp_zone);
1667     AddReducer(data, &graph_reducer, &type_assertions);
1668     graph_reducer.ReduceGraph();
1669   }
1670 };
1671 
1672 struct SimplifiedLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::SimplifiedLoweringPhase1673   DECL_PIPELINE_PHASE_CONSTANTS(SimplifiedLowering)
1674 
1675   void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
1676     SimplifiedLowering lowering(data->jsgraph(), data->broker(), temp_zone,
1677                                 data->source_positions(), data->node_origins(),
1678                                 data->info()->GetPoisoningMitigationLevel(),
1679                                 &data->info()->tick_counter(), linkage);
1680 
1681     // RepresentationChanger accesses the heap.
1682     UnparkedScopeIfNeeded scope(data->broker());
1683 
1684     lowering.LowerAllNodes();
1685   }
1686 };
1687 
1688 struct LoopPeelingPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::LoopPeelingPhase1689   DECL_PIPELINE_PHASE_CONSTANTS(LoopPeeling)
1690 
1691   void Run(PipelineData* data, Zone* temp_zone) {
1692     GraphTrimmer trimmer(temp_zone, data->graph());
1693     NodeVector roots(temp_zone);
1694     data->jsgraph()->GetCachedNodes(&roots);
1695     trimmer.TrimGraph(roots.begin(), roots.end());
1696 
1697     LoopTree* loop_tree = LoopFinder::BuildLoopTree(
1698         data->jsgraph()->graph(), &data->info()->tick_counter(), temp_zone);
1699     // We call the typer inside of PeelInnerLoopsOfTree which inspects heap
1700     // objects, so we need to unpark the local heap.
1701     UnparkedScopeIfNeeded scope(data->broker());
1702     LoopPeeler(data->graph(), data->common(), loop_tree, temp_zone,
1703                data->source_positions(), data->node_origins())
1704         .PeelInnerLoopsOfTree();
1705   }
1706 };
1707 
1708 struct LoopExitEliminationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::LoopExitEliminationPhase1709   DECL_PIPELINE_PHASE_CONSTANTS(LoopExitElimination)
1710 
1711   void Run(PipelineData* data, Zone* temp_zone) {
1712     LoopPeeler::EliminateLoopExits(data->graph(), temp_zone);
1713   }
1714 };
1715 
1716 struct GenericLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::GenericLoweringPhase1717   DECL_PIPELINE_PHASE_CONSTANTS(GenericLowering)
1718 
1719   void Run(PipelineData* data, Zone* temp_zone) {
1720     GraphReducer graph_reducer(temp_zone, data->graph(),
1721                                &data->info()->tick_counter(), data->broker(),
1722                                data->jsgraph()->Dead());
1723     JSGenericLowering generic_lowering(data->jsgraph(), &graph_reducer,
1724                                        data->broker());
1725     AddReducer(data, &graph_reducer, &generic_lowering);
1726     graph_reducer.ReduceGraph();
1727   }
1728 };
1729 
1730 struct EarlyOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::EarlyOptimizationPhase1731   DECL_PIPELINE_PHASE_CONSTANTS(EarlyOptimization)
1732 
1733   void Run(PipelineData* data, Zone* temp_zone) {
1734     GraphReducer graph_reducer(temp_zone, data->graph(),
1735                                &data->info()->tick_counter(), data->broker(),
1736                                data->jsgraph()->Dead());
1737     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1738                                               data->common(), temp_zone);
1739     SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph(),
1740                                              data->broker());
1741     RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
1742     ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1743     MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
1744     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1745                                          data->broker(), data->common(),
1746                                          data->machine(), temp_zone);
1747     AddReducer(data, &graph_reducer, &dead_code_elimination);
1748     AddReducer(data, &graph_reducer, &simple_reducer);
1749     AddReducer(data, &graph_reducer, &redundancy_elimination);
1750     AddReducer(data, &graph_reducer, &machine_reducer);
1751     AddReducer(data, &graph_reducer, &common_reducer);
1752     AddReducer(data, &graph_reducer, &value_numbering);
1753     graph_reducer.ReduceGraph();
1754   }
1755 };
1756 
1757 struct ControlFlowOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::ControlFlowOptimizationPhase1758   DECL_PIPELINE_PHASE_CONSTANTS(ControlFlowOptimization)
1759 
1760   void Run(PipelineData* data, Zone* temp_zone) {
1761     ControlFlowOptimizer optimizer(data->graph(), data->common(),
1762                                    data->machine(),
1763                                    &data->info()->tick_counter(), temp_zone);
1764     optimizer.Optimize();
1765   }
1766 };
1767 
1768 struct EffectControlLinearizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::EffectControlLinearizationPhase1769   DECL_PIPELINE_PHASE_CONSTANTS(EffectLinearization)
1770 
1771   void Run(PipelineData* data, Zone* temp_zone) {
1772     {
1773       // The scheduler requires the graphs to be trimmed, so trim now.
1774       // TODO(jarin) Remove the trimming once the scheduler can handle untrimmed
1775       // graphs.
1776       GraphTrimmer trimmer(temp_zone, data->graph());
1777       NodeVector roots(temp_zone);
1778       data->jsgraph()->GetCachedNodes(&roots);
1779       trimmer.TrimGraph(roots.begin(), roots.end());
1780 
1781       // Schedule the graph without node splitting so that we can
1782       // fix the effect and control flow for nodes with low-level side
1783       // effects (such as changing representation to tagged or
1784       // 'floating' allocation regions.)
1785       Schedule* schedule = Scheduler::ComputeSchedule(
1786           temp_zone, data->graph(), Scheduler::kTempSchedule,
1787           &data->info()->tick_counter(), data->profile_data());
1788       TraceScheduleAndVerify(data->info(), data, schedule,
1789                              "effect linearization schedule");
1790 
1791       MaskArrayIndexEnable mask_array_index =
1792           (data->info()->GetPoisoningMitigationLevel() !=
1793            PoisoningMitigationLevel::kDontPoison)
1794               ? MaskArrayIndexEnable::kMaskArrayIndex
1795               : MaskArrayIndexEnable::kDoNotMaskArrayIndex;
1796       // Post-pass for wiring the control/effects
1797       // - connect allocating representation changes into the control&effect
1798       //   chains and lower them,
1799       // - get rid of the region markers,
1800       // - introduce effect phis and rewire effects to get SSA again.
1801       LinearizeEffectControl(data->jsgraph(), schedule, temp_zone,
1802                              data->source_positions(), data->node_origins(),
1803                              mask_array_index, MaintainSchedule::kDiscard,
1804                              data->broker());
1805     }
1806     {
1807       // The {EffectControlLinearizer} might leave {Dead} nodes behind, so we
1808       // run {DeadCodeElimination} to prune these parts of the graph.
1809       // Also, the following store-store elimination phase greatly benefits from
1810       // doing a common operator reducer and dead code elimination just before
1811       // it, to eliminate conditional deopts with a constant condition.
1812       GraphReducer graph_reducer(temp_zone, data->graph(),
1813                                  &data->info()->tick_counter(), data->broker(),
1814                                  data->jsgraph()->Dead());
1815       DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1816                                                 data->common(), temp_zone);
1817       CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1818                                            data->broker(), data->common(),
1819                                            data->machine(), temp_zone);
1820       AddReducer(data, &graph_reducer, &dead_code_elimination);
1821       AddReducer(data, &graph_reducer, &common_reducer);
1822       graph_reducer.ReduceGraph();
1823     }
1824   }
1825 };
1826 
1827 struct StoreStoreEliminationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::StoreStoreEliminationPhase1828   DECL_PIPELINE_PHASE_CONSTANTS(StoreStoreElimination)
1829 
1830   void Run(PipelineData* data, Zone* temp_zone) {
1831     GraphTrimmer trimmer(temp_zone, data->graph());
1832     NodeVector roots(temp_zone);
1833     data->jsgraph()->GetCachedNodes(&roots);
1834     trimmer.TrimGraph(roots.begin(), roots.end());
1835 
1836     StoreStoreElimination::Run(data->jsgraph(), &data->info()->tick_counter(),
1837                                temp_zone);
1838   }
1839 };
1840 
1841 struct LoadEliminationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::LoadEliminationPhase1842   DECL_PIPELINE_PHASE_CONSTANTS(LoadElimination)
1843 
1844   void Run(PipelineData* data, Zone* temp_zone) {
1845     GraphReducer graph_reducer(temp_zone, data->graph(),
1846                                &data->info()->tick_counter(), data->broker(),
1847                                data->jsgraph()->Dead());
1848     BranchElimination branch_condition_elimination(&graph_reducer,
1849                                                    data->jsgraph(), temp_zone,
1850                                                    BranchElimination::kEARLY);
1851     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1852                                               data->common(), temp_zone);
1853     RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
1854     LoadElimination load_elimination(&graph_reducer, data->jsgraph(),
1855                                      temp_zone);
1856     CheckpointElimination checkpoint_elimination(&graph_reducer);
1857     ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1858     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1859                                          data->broker(), data->common(),
1860                                          data->machine(), temp_zone);
1861     TypedOptimization typed_optimization(&graph_reducer, data->dependencies(),
1862                                          data->jsgraph(), data->broker());
1863     ConstantFoldingReducer constant_folding_reducer(
1864         &graph_reducer, data->jsgraph(), data->broker());
1865     TypeNarrowingReducer type_narrowing_reducer(&graph_reducer, data->jsgraph(),
1866                                                 data->broker());
1867 
1868     AddReducer(data, &graph_reducer, &branch_condition_elimination);
1869     AddReducer(data, &graph_reducer, &dead_code_elimination);
1870     AddReducer(data, &graph_reducer, &redundancy_elimination);
1871     AddReducer(data, &graph_reducer, &load_elimination);
1872     AddReducer(data, &graph_reducer, &type_narrowing_reducer);
1873     AddReducer(data, &graph_reducer, &constant_folding_reducer);
1874     AddReducer(data, &graph_reducer, &typed_optimization);
1875     AddReducer(data, &graph_reducer, &checkpoint_elimination);
1876     AddReducer(data, &graph_reducer, &common_reducer);
1877     AddReducer(data, &graph_reducer, &value_numbering);
1878 
1879     // ConstantFoldingReducer and TypedOptimization access the heap.
1880     UnparkedScopeIfNeeded scope(data->broker());
1881 
1882     graph_reducer.ReduceGraph();
1883   }
1884 };
1885 
1886 struct MemoryOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::MemoryOptimizationPhase1887   DECL_PIPELINE_PHASE_CONSTANTS(MemoryOptimization)
1888 
1889   void Run(PipelineData* data, Zone* temp_zone) {
1890     // The memory optimizer requires the graphs to be trimmed, so trim now.
1891     GraphTrimmer trimmer(temp_zone, data->graph());
1892     NodeVector roots(temp_zone);
1893     data->jsgraph()->GetCachedNodes(&roots);
1894     trimmer.TrimGraph(roots.begin(), roots.end());
1895 
1896     // Optimize allocations and load/store operations.
1897     MemoryOptimizer optimizer(
1898         data->jsgraph(), temp_zone, data->info()->GetPoisoningMitigationLevel(),
1899         data->info()->allocation_folding()
1900             ? MemoryLowering::AllocationFolding::kDoAllocationFolding
1901             : MemoryLowering::AllocationFolding::kDontAllocationFolding,
1902         data->debug_name(), &data->info()->tick_counter());
1903     optimizer.Optimize();
1904   }
1905 };
1906 
1907 struct LateOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::LateOptimizationPhase1908   DECL_PIPELINE_PHASE_CONSTANTS(LateOptimization)
1909 
1910   void Run(PipelineData* data, Zone* temp_zone) {
1911     GraphReducer graph_reducer(temp_zone, data->graph(),
1912                                &data->info()->tick_counter(), data->broker(),
1913                                data->jsgraph()->Dead());
1914     BranchElimination branch_condition_elimination(&graph_reducer,
1915                                                    data->jsgraph(), temp_zone);
1916     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1917                                               data->common(), temp_zone);
1918     ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1919     MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
1920     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1921                                          data->broker(), data->common(),
1922                                          data->machine(), temp_zone);
1923     JSGraphAssembler graph_assembler(data->jsgraph(), temp_zone);
1924     SelectLowering select_lowering(&graph_assembler, data->graph());
1925     AddReducer(data, &graph_reducer, &branch_condition_elimination);
1926     AddReducer(data, &graph_reducer, &dead_code_elimination);
1927     AddReducer(data, &graph_reducer, &machine_reducer);
1928     AddReducer(data, &graph_reducer, &common_reducer);
1929     AddReducer(data, &graph_reducer, &select_lowering);
1930     AddReducer(data, &graph_reducer, &value_numbering);
1931     graph_reducer.ReduceGraph();
1932   }
1933 };
1934 
1935 struct MachineOperatorOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::MachineOperatorOptimizationPhase1936   DECL_PIPELINE_PHASE_CONSTANTS(MachineOperatorOptimization)
1937 
1938   void Run(PipelineData* data, Zone* temp_zone) {
1939     GraphReducer graph_reducer(temp_zone, data->graph(),
1940                                &data->info()->tick_counter(), data->broker(),
1941                                data->jsgraph()->Dead());
1942     ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1943     MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
1944 
1945     AddReducer(data, &graph_reducer, &machine_reducer);
1946     AddReducer(data, &graph_reducer, &value_numbering);
1947     graph_reducer.ReduceGraph();
1948   }
1949 };
1950 
1951 struct DecompressionOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::DecompressionOptimizationPhase1952   DECL_PIPELINE_PHASE_CONSTANTS(DecompressionOptimization)
1953 
1954   void Run(PipelineData* data, Zone* temp_zone) {
1955     if (COMPRESS_POINTERS_BOOL) {
1956       DecompressionOptimizer decompression_optimizer(
1957           temp_zone, data->graph(), data->common(), data->machine());
1958       decompression_optimizer.Reduce();
1959     }
1960   }
1961 };
1962 
1963 struct ScheduledEffectControlLinearizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::ScheduledEffectControlLinearizationPhase1964   DECL_PIPELINE_PHASE_CONSTANTS(ScheduledEffectControlLinearization)
1965 
1966   void Run(PipelineData* data, Zone* temp_zone) {
1967     MaskArrayIndexEnable mask_array_index =
1968         (data->info()->GetPoisoningMitigationLevel() !=
1969          PoisoningMitigationLevel::kDontPoison)
1970             ? MaskArrayIndexEnable::kMaskArrayIndex
1971             : MaskArrayIndexEnable::kDoNotMaskArrayIndex;
1972     // Post-pass for wiring the control/effects
1973     // - connect allocating representation changes into the control&effect
1974     //   chains and lower them,
1975     // - get rid of the region markers,
1976     // - introduce effect phis and rewire effects to get SSA again.
1977     LinearizeEffectControl(data->jsgraph(), data->schedule(), temp_zone,
1978                            data->source_positions(), data->node_origins(),
1979                            mask_array_index, MaintainSchedule::kMaintain,
1980                            data->broker());
1981 
1982     // TODO(rmcilroy) Avoid having to rebuild rpo_order on schedule each time.
1983     Scheduler::ComputeSpecialRPO(temp_zone, data->schedule());
1984     if (FLAG_turbo_verify) Scheduler::GenerateDominatorTree(data->schedule());
1985     TraceScheduleAndVerify(data->info(), data, data->schedule(),
1986                            "effect linearization schedule");
1987   }
1988 };
1989 
1990 struct ScheduledMachineLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::ScheduledMachineLoweringPhase1991   DECL_PIPELINE_PHASE_CONSTANTS(ScheduledMachineLowering)
1992 
1993   void Run(PipelineData* data, Zone* temp_zone) {
1994     ScheduledMachineLowering machine_lowering(
1995         data->jsgraph(), data->schedule(), temp_zone, data->source_positions(),
1996         data->node_origins(), data->info()->GetPoisoningMitigationLevel());
1997     machine_lowering.Run();
1998 
1999     // TODO(rmcilroy) Avoid having to rebuild rpo_order on schedule each time.
2000     Scheduler::ComputeSpecialRPO(temp_zone, data->schedule());
2001     Scheduler::GenerateDominatorTree(data->schedule());
2002     TraceScheduleAndVerify(data->info(), data, data->schedule(),
2003                            "machine lowered schedule");
2004   }
2005 };
2006 
2007 struct CsaEarlyOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::CsaEarlyOptimizationPhase2008   DECL_PIPELINE_PHASE_CONSTANTS(CSAEarlyOptimization)
2009 
2010   void Run(PipelineData* data, Zone* temp_zone) {
2011     GraphReducer graph_reducer(temp_zone, data->graph(),
2012                                &data->info()->tick_counter(), data->broker(),
2013                                data->jsgraph()->Dead());
2014     MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
2015     BranchElimination branch_condition_elimination(&graph_reducer,
2016                                                    data->jsgraph(), temp_zone);
2017     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
2018                                               data->common(), temp_zone);
2019     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
2020                                          data->broker(), data->common(),
2021                                          data->machine(), temp_zone);
2022     ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
2023     CsaLoadElimination load_elimination(&graph_reducer, data->jsgraph(),
2024                                         temp_zone);
2025     AddReducer(data, &graph_reducer, &machine_reducer);
2026     AddReducer(data, &graph_reducer, &branch_condition_elimination);
2027     AddReducer(data, &graph_reducer, &dead_code_elimination);
2028     AddReducer(data, &graph_reducer, &common_reducer);
2029     AddReducer(data, &graph_reducer, &value_numbering);
2030     AddReducer(data, &graph_reducer, &load_elimination);
2031     graph_reducer.ReduceGraph();
2032   }
2033 };
2034 
2035 struct CsaOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::CsaOptimizationPhase2036   DECL_PIPELINE_PHASE_CONSTANTS(CSAOptimization)
2037 
2038   void Run(PipelineData* data, Zone* temp_zone) {
2039     GraphReducer graph_reducer(temp_zone, data->graph(),
2040                                &data->info()->tick_counter(), data->broker(),
2041                                data->jsgraph()->Dead());
2042     BranchElimination branch_condition_elimination(&graph_reducer,
2043                                                    data->jsgraph(), temp_zone);
2044     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
2045                                               data->common(), temp_zone);
2046     MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
2047     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
2048                                          data->broker(), data->common(),
2049                                          data->machine(), temp_zone);
2050     ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
2051     AddReducer(data, &graph_reducer, &branch_condition_elimination);
2052     AddReducer(data, &graph_reducer, &dead_code_elimination);
2053     AddReducer(data, &graph_reducer, &machine_reducer);
2054     AddReducer(data, &graph_reducer, &common_reducer);
2055     AddReducer(data, &graph_reducer, &value_numbering);
2056     graph_reducer.ReduceGraph();
2057   }
2058 };
2059 
2060 struct EarlyGraphTrimmingPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::EarlyGraphTrimmingPhase2061   DECL_PIPELINE_PHASE_CONSTANTS(EarlyTrimming)
2062 
2063   void Run(PipelineData* data, Zone* temp_zone) {
2064     GraphTrimmer trimmer(temp_zone, data->graph());
2065     NodeVector roots(temp_zone);
2066     data->jsgraph()->GetCachedNodes(&roots);
2067     trimmer.TrimGraph(roots.begin(), roots.end());
2068   }
2069 };
2070 
2071 
2072 struct LateGraphTrimmingPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::LateGraphTrimmingPhase2073   DECL_PIPELINE_PHASE_CONSTANTS(LateGraphTrimming)
2074 
2075   void Run(PipelineData* data, Zone* temp_zone) {
2076     GraphTrimmer trimmer(temp_zone, data->graph());
2077     NodeVector roots(temp_zone);
2078     if (data->jsgraph()) {
2079       data->jsgraph()->GetCachedNodes(&roots);
2080     }
2081     trimmer.TrimGraph(roots.begin(), roots.end());
2082   }
2083 };
2084 
2085 
2086 struct ComputeSchedulePhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::ComputeSchedulePhase2087   DECL_PIPELINE_PHASE_CONSTANTS(Scheduling)
2088 
2089   void Run(PipelineData* data, Zone* temp_zone) {
2090     Schedule* schedule = Scheduler::ComputeSchedule(
2091         temp_zone, data->graph(),
2092         data->info()->splitting() ? Scheduler::kSplitNodes
2093                                   : Scheduler::kNoFlags,
2094         &data->info()->tick_counter(), data->profile_data());
2095     data->set_schedule(schedule);
2096   }
2097 };
2098 
2099 struct InstructionRangesAsJSON {
2100   const InstructionSequence* sequence;
2101   const ZoneVector<std::pair<int, int>>* instr_origins;
2102 };
2103 
operator <<(std::ostream & out,const InstructionRangesAsJSON & s)2104 std::ostream& operator<<(std::ostream& out, const InstructionRangesAsJSON& s) {
2105   const int max = static_cast<int>(s.sequence->LastInstructionIndex());
2106 
2107   out << ", \"nodeIdToInstructionRange\": {";
2108   bool need_comma = false;
2109   for (size_t i = 0; i < s.instr_origins->size(); ++i) {
2110     std::pair<int, int> offset = (*s.instr_origins)[i];
2111     if (offset.first == -1) continue;
2112     const int first = max - offset.first + 1;
2113     const int second = max - offset.second + 1;
2114     if (need_comma) out << ", ";
2115     out << "\"" << i << "\": [" << first << ", " << second << "]";
2116     need_comma = true;
2117   }
2118   out << "}";
2119   out << ", \"blockIdtoInstructionRange\": {";
2120   need_comma = false;
2121   for (auto block : s.sequence->instruction_blocks()) {
2122     if (need_comma) out << ", ";
2123     out << "\"" << block->rpo_number() << "\": [" << block->code_start() << ", "
2124         << block->code_end() << "]";
2125     need_comma = true;
2126   }
2127   out << "}";
2128   return out;
2129 }
2130 
2131 struct InstructionSelectionPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::InstructionSelectionPhase2132   DECL_PIPELINE_PHASE_CONSTANTS(SelectInstructions)
2133 
2134   void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
2135     InstructionSelector selector(
2136         temp_zone, data->graph()->NodeCount(), linkage, data->sequence(),
2137         data->schedule(), data->source_positions(), data->frame(),
2138         data->info()->switch_jump_table()
2139             ? InstructionSelector::kEnableSwitchJumpTable
2140             : InstructionSelector::kDisableSwitchJumpTable,
2141         &data->info()->tick_counter(), data->broker(),
2142         data->address_of_max_unoptimized_frame_height(),
2143         data->address_of_max_pushed_argument_count(),
2144         data->info()->source_positions()
2145             ? InstructionSelector::kAllSourcePositions
2146             : InstructionSelector::kCallSourcePositions,
2147         InstructionSelector::SupportedFeatures(),
2148         FLAG_turbo_instruction_scheduling
2149             ? InstructionSelector::kEnableScheduling
2150             : InstructionSelector::kDisableScheduling,
2151         data->roots_relative_addressing_enabled()
2152             ? InstructionSelector::kEnableRootsRelativeAddressing
2153             : InstructionSelector::kDisableRootsRelativeAddressing,
2154         data->info()->GetPoisoningMitigationLevel(),
2155         data->info()->trace_turbo_json()
2156             ? InstructionSelector::kEnableTraceTurboJson
2157             : InstructionSelector::kDisableTraceTurboJson);
2158     if (!selector.SelectInstructions()) {
2159       data->set_compilation_failed();
2160     }
2161     if (data->info()->trace_turbo_json()) {
2162       TurboJsonFile json_of(data->info(), std::ios_base::app);
2163       json_of << "{\"name\":\"" << phase_name()
2164               << "\",\"type\":\"instructions\""
2165               << InstructionRangesAsJSON{data->sequence(),
2166                                          &selector.instr_origins()}
2167               << "},\n";
2168     }
2169   }
2170 };
2171 
2172 
2173 struct MeetRegisterConstraintsPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::MeetRegisterConstraintsPhase2174   DECL_PIPELINE_PHASE_CONSTANTS(MeetRegisterConstraints)
2175   void Run(PipelineData* data, Zone* temp_zone) {
2176     ConstraintBuilder builder(data->top_tier_register_allocation_data());
2177     builder.MeetRegisterConstraints();
2178   }
2179 };
2180 
2181 
2182 struct ResolvePhisPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::ResolvePhisPhase2183   DECL_PIPELINE_PHASE_CONSTANTS(ResolvePhis)
2184 
2185   void Run(PipelineData* data, Zone* temp_zone) {
2186     ConstraintBuilder builder(data->top_tier_register_allocation_data());
2187     builder.ResolvePhis();
2188   }
2189 };
2190 
2191 
2192 struct BuildLiveRangesPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::BuildLiveRangesPhase2193   DECL_PIPELINE_PHASE_CONSTANTS(BuildLiveRanges)
2194 
2195   void Run(PipelineData* data, Zone* temp_zone) {
2196     LiveRangeBuilder builder(data->top_tier_register_allocation_data(),
2197                              temp_zone);
2198     builder.BuildLiveRanges();
2199   }
2200 };
2201 
2202 struct BuildBundlesPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::BuildBundlesPhase2203   DECL_PIPELINE_PHASE_CONSTANTS(BuildLiveRangeBundles)
2204 
2205   void Run(PipelineData* data, Zone* temp_zone) {
2206     BundleBuilder builder(data->top_tier_register_allocation_data());
2207     builder.BuildBundles();
2208   }
2209 };
2210 
2211 template <typename RegAllocator>
2212 struct AllocateGeneralRegistersPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::AllocateGeneralRegistersPhase2213   DECL_PIPELINE_PHASE_CONSTANTS(AllocateGeneralRegisters)
2214 
2215   void Run(PipelineData* data, Zone* temp_zone) {
2216     RegAllocator allocator(data->top_tier_register_allocation_data(),
2217                            RegisterKind::kGeneral, temp_zone);
2218     allocator.AllocateRegisters();
2219   }
2220 };
2221 
2222 template <typename RegAllocator>
2223 struct AllocateFPRegistersPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::AllocateFPRegistersPhase2224   DECL_PIPELINE_PHASE_CONSTANTS(AllocateFPRegisters)
2225 
2226   void Run(PipelineData* data, Zone* temp_zone) {
2227     RegAllocator allocator(data->top_tier_register_allocation_data(),
2228                            RegisterKind::kDouble, temp_zone);
2229     allocator.AllocateRegisters();
2230   }
2231 };
2232 
2233 struct DecideSpillingModePhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::DecideSpillingModePhase2234   DECL_PIPELINE_PHASE_CONSTANTS(DecideSpillingMode)
2235 
2236   void Run(PipelineData* data, Zone* temp_zone) {
2237     OperandAssigner assigner(data->top_tier_register_allocation_data());
2238     assigner.DecideSpillingMode();
2239   }
2240 };
2241 
2242 struct AssignSpillSlotsPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::AssignSpillSlotsPhase2243   DECL_PIPELINE_PHASE_CONSTANTS(AssignSpillSlots)
2244 
2245   void Run(PipelineData* data, Zone* temp_zone) {
2246     OperandAssigner assigner(data->top_tier_register_allocation_data());
2247     assigner.AssignSpillSlots();
2248   }
2249 };
2250 
2251 
2252 struct CommitAssignmentPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::CommitAssignmentPhase2253   DECL_PIPELINE_PHASE_CONSTANTS(CommitAssignment)
2254 
2255   void Run(PipelineData* data, Zone* temp_zone) {
2256     OperandAssigner assigner(data->top_tier_register_allocation_data());
2257     assigner.CommitAssignment();
2258   }
2259 };
2260 
2261 
2262 struct PopulateReferenceMapsPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::PopulateReferenceMapsPhase2263   DECL_PIPELINE_PHASE_CONSTANTS(PopulatePointerMaps)
2264 
2265   void Run(PipelineData* data, Zone* temp_zone) {
2266     ReferenceMapPopulator populator(data->top_tier_register_allocation_data());
2267     populator.PopulateReferenceMaps();
2268   }
2269 };
2270 
2271 
2272 struct ConnectRangesPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::ConnectRangesPhase2273   DECL_PIPELINE_PHASE_CONSTANTS(ConnectRanges)
2274 
2275   void Run(PipelineData* data, Zone* temp_zone) {
2276     LiveRangeConnector connector(data->top_tier_register_allocation_data());
2277     connector.ConnectRanges(temp_zone);
2278   }
2279 };
2280 
2281 
2282 struct ResolveControlFlowPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::ResolveControlFlowPhase2283   DECL_PIPELINE_PHASE_CONSTANTS(ResolveControlFlow)
2284 
2285   void Run(PipelineData* data, Zone* temp_zone) {
2286     LiveRangeConnector connector(data->top_tier_register_allocation_data());
2287     connector.ResolveControlFlow(temp_zone);
2288   }
2289 };
2290 
2291 struct MidTierRegisterOutputDefinitionPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::MidTierRegisterOutputDefinitionPhase2292   DECL_PIPELINE_PHASE_CONSTANTS(MidTierRegisterAllocator)
2293 
2294   void Run(PipelineData* data, Zone* temp_zone) {
2295     DefineOutputs(data->mid_tier_register_allocator_data());
2296   }
2297 };
2298 
2299 struct MidTierRegisterAllocatorPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::MidTierRegisterAllocatorPhase2300   DECL_PIPELINE_PHASE_CONSTANTS(MidTierRegisterAllocator)
2301 
2302   void Run(PipelineData* data, Zone* temp_zone) {
2303     AllocateRegisters(data->mid_tier_register_allocator_data());
2304   }
2305 };
2306 
2307 struct MidTierSpillSlotAllocatorPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::MidTierSpillSlotAllocatorPhase2308   DECL_PIPELINE_PHASE_CONSTANTS(MidTierSpillSlotAllocator)
2309 
2310   void Run(PipelineData* data, Zone* temp_zone) {
2311     AllocateSpillSlots(data->mid_tier_register_allocator_data());
2312   }
2313 };
2314 
2315 struct MidTierPopulateReferenceMapsPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::MidTierPopulateReferenceMapsPhase2316   DECL_PIPELINE_PHASE_CONSTANTS(MidTierPopulateReferenceMaps)
2317 
2318   void Run(PipelineData* data, Zone* temp_zone) {
2319     PopulateReferenceMaps(data->mid_tier_register_allocator_data());
2320   }
2321 };
2322 
2323 struct OptimizeMovesPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::OptimizeMovesPhase2324   DECL_PIPELINE_PHASE_CONSTANTS(OptimizeMoves)
2325 
2326   void Run(PipelineData* data, Zone* temp_zone) {
2327     MoveOptimizer move_optimizer(temp_zone, data->sequence());
2328     move_optimizer.Run();
2329   }
2330 };
2331 
2332 struct FrameElisionPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::FrameElisionPhase2333   DECL_PIPELINE_PHASE_CONSTANTS(FrameElision)
2334 
2335   void Run(PipelineData* data, Zone* temp_zone) {
2336     FrameElider(data->sequence()).Run();
2337   }
2338 };
2339 
2340 struct JumpThreadingPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::JumpThreadingPhase2341   DECL_PIPELINE_PHASE_CONSTANTS(JumpThreading)
2342 
2343   void Run(PipelineData* data, Zone* temp_zone, bool frame_at_start) {
2344     ZoneVector<RpoNumber> result(temp_zone);
2345     if (JumpThreading::ComputeForwarding(temp_zone, &result, data->sequence(),
2346                                          frame_at_start)) {
2347       JumpThreading::ApplyForwarding(temp_zone, result, data->sequence());
2348     }
2349   }
2350 };
2351 
2352 struct AssembleCodePhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::AssembleCodePhase2353   DECL_PIPELINE_PHASE_CONSTANTS(AssembleCode)
2354 
2355   void Run(PipelineData* data, Zone* temp_zone) {
2356     data->code_generator()->AssembleCode();
2357   }
2358 };
2359 
2360 struct FinalizeCodePhase {
DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::FinalizeCodePhase2361   DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(FinalizeCode)
2362 
2363   void Run(PipelineData* data, Zone* temp_zone) {
2364     data->set_code(data->code_generator()->FinalizeCode());
2365   }
2366 };
2367 
2368 
2369 struct PrintGraphPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::PrintGraphPhase2370   DECL_PIPELINE_PHASE_CONSTANTS(PrintGraph)
2371 
2372   void Run(PipelineData* data, Zone* temp_zone, const char* phase) {
2373     OptimizedCompilationInfo* info = data->info();
2374     Graph* graph = data->graph();
2375 
2376     if (info->trace_turbo_json()) {  // Print JSON.
2377       UnparkedScopeIfNeeded scope(data->broker());
2378       AllowHandleDereference allow_deref;
2379 
2380       TurboJsonFile json_of(info, std::ios_base::app);
2381       json_of << "{\"name\":\"" << phase << "\",\"type\":\"graph\",\"data\":"
2382               << AsJSON(*graph, data->source_positions(), data->node_origins())
2383               << "},\n";
2384     }
2385 
2386     if (info->trace_turbo_scheduled()) {
2387       AccountingAllocator allocator;
2388       Schedule* schedule = data->schedule();
2389       if (schedule == nullptr) {
2390         schedule = Scheduler::ComputeSchedule(
2391             temp_zone, data->graph(), Scheduler::kNoFlags,
2392             &info->tick_counter(), data->profile_data());
2393       }
2394 
2395       UnparkedScopeIfNeeded scope(data->broker());
2396       AllowHandleDereference allow_deref;
2397       CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
2398       tracing_scope.stream()
2399           << "-- Graph after " << phase << " -- " << std::endl
2400           << AsScheduledGraph(schedule);
2401     } else if (info->trace_turbo_graph()) {  // Simple textual RPO.
2402       UnparkedScopeIfNeeded scope(data->broker());
2403       AllowHandleDereference allow_deref;
2404       CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
2405       tracing_scope.stream()
2406           << "-- Graph after " << phase << " -- " << std::endl
2407           << AsRPO(*graph);
2408     }
2409   }
2410 };
2411 
2412 
2413 struct VerifyGraphPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::VerifyGraphPhase2414   DECL_PIPELINE_PHASE_CONSTANTS(VerifyGraph)
2415 
2416   void Run(PipelineData* data, Zone* temp_zone, const bool untyped,
2417            bool values_only = false) {
2418     Verifier::CodeType code_type;
2419     switch (data->info()->code_kind()) {
2420       case CodeKind::WASM_FUNCTION:
2421       case CodeKind::WASM_TO_CAPI_FUNCTION:
2422       case CodeKind::WASM_TO_JS_FUNCTION:
2423       case CodeKind::JS_TO_WASM_FUNCTION:
2424       case CodeKind::C_WASM_ENTRY:
2425         code_type = Verifier::kWasm;
2426         break;
2427       default:
2428         code_type = Verifier::kDefault;
2429     }
2430     Verifier::Run(data->graph(), !untyped ? Verifier::TYPED : Verifier::UNTYPED,
2431                   values_only ? Verifier::kValuesOnly : Verifier::kAll,
2432                   code_type);
2433   }
2434 };
2435 
2436 #undef DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS
2437 #undef DECL_PIPELINE_PHASE_CONSTANTS
2438 #undef DECL_PIPELINE_PHASE_CONSTANTS_HELPER
2439 
RunPrintAndVerify(const char * phase,bool untyped)2440 void PipelineImpl::RunPrintAndVerify(const char* phase, bool untyped) {
2441   if (info()->trace_turbo_json() || info()->trace_turbo_graph()) {
2442     Run<PrintGraphPhase>(phase);
2443   }
2444   if (FLAG_turbo_verify) {
2445     Run<VerifyGraphPhase>(untyped);
2446   }
2447 }
2448 
Serialize()2449 void PipelineImpl::Serialize() {
2450   PipelineData* data = this->data_;
2451 
2452   data->BeginPhaseKind("V8.TFBrokerInitAndSerialization");
2453 
2454   if (info()->trace_turbo_json() || info()->trace_turbo_graph()) {
2455     CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
2456     tracing_scope.stream()
2457         << "---------------------------------------------------\n"
2458         << "Begin compiling method " << info()->GetDebugName().get()
2459         << " using TurboFan" << std::endl;
2460   }
2461   if (info()->trace_turbo_json()) {
2462     TurboCfgFile tcf(isolate());
2463     tcf << AsC1VCompilation(info());
2464   }
2465 
2466   data->source_positions()->AddDecorator();
2467   if (data->info()->trace_turbo_json()) {
2468     data->node_origins()->AddDecorator();
2469   }
2470 
2471   data->broker()->SetTargetNativeContextRef(data->native_context());
2472   if (data->broker()->is_concurrent_inlining()) {
2473     Run<HeapBrokerInitializationPhase>();
2474     Run<SerializationPhase>();
2475     data->broker()->StopSerializing();
2476   }
2477   data->EndPhaseKind();
2478 }
2479 
CreateGraph()2480 bool PipelineImpl::CreateGraph() {
2481   PipelineData* data = this->data_;
2482   UnparkedScopeIfNeeded unparked_scope(data->broker());
2483 
2484   data->BeginPhaseKind("V8.TFGraphCreation");
2485 
2486   Run<GraphBuilderPhase>();
2487   RunPrintAndVerify(GraphBuilderPhase::phase_name(), true);
2488 
2489   // Perform function context specialization and inlining (if enabled).
2490   Run<InliningPhase>();
2491   RunPrintAndVerify(InliningPhase::phase_name(), true);
2492 
2493   // Remove dead->live edges from the graph.
2494   Run<EarlyGraphTrimmingPhase>();
2495   RunPrintAndVerify(EarlyGraphTrimmingPhase::phase_name(), true);
2496 
2497   // Determine the Typer operation flags.
2498   {
2499     SharedFunctionInfoRef shared_info(data->broker(), info()->shared_info());
2500     if (is_sloppy(shared_info.language_mode()) &&
2501         shared_info.IsUserJavaScript()) {
2502       // Sloppy mode functions always have an Object for this.
2503       data->AddTyperFlag(Typer::kThisIsReceiver);
2504     }
2505     if (IsClassConstructor(shared_info.kind())) {
2506       // Class constructors cannot be [[Call]]ed.
2507       data->AddTyperFlag(Typer::kNewTargetIsReceiver);
2508     }
2509   }
2510 
2511   // Run the type-sensitive lowerings and optimizations on the graph.
2512   {
2513     if (!data->broker()->is_concurrent_inlining()) {
2514       Run<HeapBrokerInitializationPhase>();
2515       Run<CopyMetadataForConcurrentCompilePhase>();
2516       data->broker()->StopSerializing();
2517     }
2518   }
2519 
2520   data->EndPhaseKind();
2521 
2522   return true;
2523 }
2524 
OptimizeGraph(Linkage * linkage)2525 bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
2526   PipelineData* data = this->data_;
2527 
2528   data->BeginPhaseKind("V8.TFLowering");
2529 
2530   // Type the graph and keep the Typer running such that new nodes get
2531   // automatically typed when they are created.
2532   Run<TyperPhase>(data->CreateTyper());
2533   RunPrintAndVerify(TyperPhase::phase_name());
2534 
2535   Run<TypedLoweringPhase>();
2536   RunPrintAndVerify(TypedLoweringPhase::phase_name());
2537 
2538   if (data->info()->loop_peeling()) {
2539     Run<LoopPeelingPhase>();
2540     RunPrintAndVerify(LoopPeelingPhase::phase_name(), true);
2541   } else {
2542     Run<LoopExitEliminationPhase>();
2543     RunPrintAndVerify(LoopExitEliminationPhase::phase_name(), true);
2544   }
2545 
2546   if (FLAG_turbo_load_elimination) {
2547     Run<LoadEliminationPhase>();
2548     RunPrintAndVerify(LoadEliminationPhase::phase_name());
2549   }
2550   data->DeleteTyper();
2551 
2552   if (FLAG_turbo_escape) {
2553     Run<EscapeAnalysisPhase>();
2554     if (data->compilation_failed()) {
2555       info()->AbortOptimization(
2556           BailoutReason::kCyclicObjectStateDetectedInEscapeAnalysis);
2557       data->EndPhaseKind();
2558       return false;
2559     }
2560     RunPrintAndVerify(EscapeAnalysisPhase::phase_name());
2561   }
2562 
2563   if (FLAG_assert_types) {
2564     Run<TypeAssertionsPhase>();
2565     RunPrintAndVerify(TypeAssertionsPhase::phase_name());
2566   }
2567 
2568   // Perform simplified lowering. This has to run w/o the Typer decorator,
2569   // because we cannot compute meaningful types anyways, and the computed types
2570   // might even conflict with the representation/truncation logic.
2571   Run<SimplifiedLoweringPhase>(linkage);
2572   RunPrintAndVerify(SimplifiedLoweringPhase::phase_name(), true);
2573 
2574   // From now on it is invalid to look at types on the nodes, because the types
2575   // on the nodes might not make sense after representation selection due to the
2576   // way we handle truncations; if we'd want to look at types afterwards we'd
2577   // essentially need to re-type (large portions of) the graph.
2578 
2579   // In order to catch bugs related to type access after this point, we now
2580   // remove the types from the nodes (currently only in Debug builds).
2581 #ifdef DEBUG
2582   Run<UntyperPhase>();
2583   RunPrintAndVerify(UntyperPhase::phase_name(), true);
2584 #endif
2585 
2586   // Run generic lowering pass.
2587   Run<GenericLoweringPhase>();
2588   RunPrintAndVerify(GenericLoweringPhase::phase_name(), true);
2589 
2590   data->BeginPhaseKind("V8.TFBlockBuilding");
2591 
2592   data->InitializeFrameData(linkage->GetIncomingDescriptor());
2593 
2594   // Run early optimization pass.
2595   Run<EarlyOptimizationPhase>();
2596   RunPrintAndVerify(EarlyOptimizationPhase::phase_name(), true);
2597 
2598   Run<EffectControlLinearizationPhase>();
2599   RunPrintAndVerify(EffectControlLinearizationPhase::phase_name(), true);
2600 
2601   if (FLAG_turbo_store_elimination) {
2602     Run<StoreStoreEliminationPhase>();
2603     RunPrintAndVerify(StoreStoreEliminationPhase::phase_name(), true);
2604   }
2605 
2606   // Optimize control flow.
2607   if (FLAG_turbo_cf_optimization) {
2608     Run<ControlFlowOptimizationPhase>();
2609     RunPrintAndVerify(ControlFlowOptimizationPhase::phase_name(), true);
2610   }
2611 
2612   Run<LateOptimizationPhase>();
2613   RunPrintAndVerify(LateOptimizationPhase::phase_name(), true);
2614 
2615   // Optimize memory access and allocation operations.
2616   Run<MemoryOptimizationPhase>();
2617   RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
2618 
2619   // Run value numbering and machine operator reducer to optimize load/store
2620   // address computation (in particular, reuse the address computation whenever
2621   // possible).
2622   Run<MachineOperatorOptimizationPhase>();
2623   RunPrintAndVerify(MachineOperatorOptimizationPhase::phase_name(), true);
2624 
2625   Run<DecompressionOptimizationPhase>();
2626   RunPrintAndVerify(DecompressionOptimizationPhase::phase_name(), true);
2627 
2628   data->source_positions()->RemoveDecorator();
2629   if (data->info()->trace_turbo_json()) {
2630     data->node_origins()->RemoveDecorator();
2631   }
2632 
2633   ComputeScheduledGraph();
2634 
2635   return SelectInstructions(linkage);
2636 }
2637 
OptimizeGraphForMidTier(Linkage * linkage)2638 bool PipelineImpl::OptimizeGraphForMidTier(Linkage* linkage) {
2639   PipelineData* data = this->data_;
2640 
2641   data->BeginPhaseKind("V8.TFLowering");
2642 
2643   // Type the graph and keep the Typer running such that new nodes get
2644   // automatically typed when they are created.
2645   Run<TyperPhase>(data->CreateTyper());
2646   RunPrintAndVerify(TyperPhase::phase_name());
2647 
2648   Run<TypedLoweringPhase>();
2649   RunPrintAndVerify(TypedLoweringPhase::phase_name());
2650 
2651   // TODO(9684): Consider rolling this into the preceeding phase or not creating
2652   // LoopExit nodes at all.
2653   Run<LoopExitEliminationPhase>();
2654   RunPrintAndVerify(LoopExitEliminationPhase::phase_name(), true);
2655 
2656   data->DeleteTyper();
2657 
2658   if (FLAG_assert_types) {
2659     Run<TypeAssertionsPhase>();
2660     RunPrintAndVerify(TypeAssertionsPhase::phase_name());
2661   }
2662 
2663   // Perform simplified lowering. This has to run w/o the Typer decorator,
2664   // because we cannot compute meaningful types anyways, and the computed types
2665   // might even conflict with the representation/truncation logic.
2666   Run<SimplifiedLoweringPhase>(linkage);
2667   RunPrintAndVerify(SimplifiedLoweringPhase::phase_name(), true);
2668 
2669   // From now on it is invalid to look at types on the nodes, because the types
2670   // on the nodes might not make sense after representation selection due to the
2671   // way we handle truncations; if we'd want to look at types afterwards we'd
2672   // essentially need to re-type (large portions of) the graph.
2673 
2674   // In order to catch bugs related to type access after this point, we now
2675   // remove the types from the nodes (currently only in Debug builds).
2676 #ifdef DEBUG
2677   Run<UntyperPhase>();
2678   RunPrintAndVerify(UntyperPhase::phase_name(), true);
2679 #endif
2680 
2681   // Run generic lowering pass.
2682   Run<GenericLoweringPhase>();
2683   RunPrintAndVerify(GenericLoweringPhase::phase_name(), true);
2684 
2685   data->BeginPhaseKind("V8.TFBlockBuilding");
2686 
2687   data->InitializeFrameData(linkage->GetIncomingDescriptor());
2688 
2689   ComputeScheduledGraph();
2690 
2691   Run<ScheduledEffectControlLinearizationPhase>();
2692   RunPrintAndVerify(ScheduledEffectControlLinearizationPhase::phase_name(),
2693                     true);
2694 
2695   Run<ScheduledMachineLoweringPhase>();
2696   RunPrintAndVerify(ScheduledMachineLoweringPhase::phase_name(), true);
2697 
2698   // The DecompressionOptimizationPhase updates node's operations but does not
2699   // otherwise rewrite the graph, thus it is safe to run on a scheduled graph.
2700   Run<DecompressionOptimizationPhase>();
2701   RunPrintAndVerify(DecompressionOptimizationPhase::phase_name(), true);
2702 
2703   data->source_positions()->RemoveDecorator();
2704   if (data->info()->trace_turbo_json()) {
2705     data->node_origins()->RemoveDecorator();
2706   }
2707 
2708   return SelectInstructions(linkage);
2709 }
2710 
2711 namespace {
2712 
2713 // Compute a hash of the given graph, in a way that should provide the same
2714 // result in multiple runs of mksnapshot, meaning the hash cannot depend on any
2715 // external pointer values or uncompressed heap constants. This hash can be used
2716 // to reject profiling data if the builtin's current code doesn't match the
2717 // version that was profiled. Hash collisions are not catastrophic; in the worst
2718 // case, we just defer some blocks that ideally shouldn't be deferred. The
2719 // result value is in the valid Smi range.
HashGraphForPGO(Graph * graph)2720 int HashGraphForPGO(Graph* graph) {
2721   AccountingAllocator allocator;
2722   Zone local_zone(&allocator, ZONE_NAME);
2723 
2724   constexpr NodeId kUnassigned = static_cast<NodeId>(-1);
2725 
2726   constexpr byte kUnvisited = 0;
2727   constexpr byte kOnStack = 1;
2728   constexpr byte kVisited = 2;
2729 
2730   // Do a depth-first post-order traversal of the graph. For every node, hash:
2731   //
2732   //   - the node's traversal number
2733   //   - the opcode
2734   //   - the number of inputs
2735   //   - each input node's traversal number
2736   //
2737   // What's a traversal number? We can't use node IDs because they're not stable
2738   // build-to-build, so we assign a new number for each node as it is visited.
2739 
2740   ZoneVector<byte> state(graph->NodeCount(), kUnvisited, &local_zone);
2741   ZoneVector<NodeId> traversal_numbers(graph->NodeCount(), kUnassigned,
2742                                        &local_zone);
2743   ZoneStack<Node*> stack(&local_zone);
2744 
2745   NodeId visited_count = 0;
2746   size_t hash = 0;
2747 
2748   stack.push(graph->end());
2749   state[graph->end()->id()] = kOnStack;
2750   traversal_numbers[graph->end()->id()] = visited_count++;
2751   while (!stack.empty()) {
2752     Node* n = stack.top();
2753     bool pop = true;
2754     for (Node* const i : n->inputs()) {
2755       if (state[i->id()] == kUnvisited) {
2756         state[i->id()] = kOnStack;
2757         traversal_numbers[i->id()] = visited_count++;
2758         stack.push(i);
2759         pop = false;
2760         break;
2761       }
2762     }
2763     if (pop) {
2764       state[n->id()] = kVisited;
2765       stack.pop();
2766       hash = base::hash_combine(hash, traversal_numbers[n->id()], n->opcode(),
2767                                 n->InputCount());
2768       for (Node* const i : n->inputs()) {
2769         DCHECK(traversal_numbers[i->id()] != kUnassigned);
2770         hash = base::hash_combine(hash, traversal_numbers[i->id()]);
2771       }
2772     }
2773   }
2774   return Smi(IntToSmi(static_cast<int>(hash))).value();
2775 }
2776 
2777 }  // namespace
2778 
GenerateCodeForCodeStub(Isolate * isolate,CallDescriptor * call_descriptor,Graph * graph,JSGraph * jsgraph,SourcePositionTable * source_positions,CodeKind kind,const char * debug_name,int32_t builtin_index,PoisoningMitigationLevel poisoning_level,const AssemblerOptions & options,const ProfileDataFromFile * profile_data)2779 MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
2780     Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
2781     JSGraph* jsgraph, SourcePositionTable* source_positions, CodeKind kind,
2782     const char* debug_name, int32_t builtin_index,
2783     PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
2784     const ProfileDataFromFile* profile_data) {
2785   OptimizedCompilationInfo info(CStrVector(debug_name), graph->zone(), kind);
2786   info.set_builtin_index(builtin_index);
2787 
2788   if (poisoning_level != PoisoningMitigationLevel::kDontPoison) {
2789     info.SetPoisoningMitigationLevel(poisoning_level);
2790   }
2791 
2792   // Construct a pipeline for scheduling and code generation.
2793   ZoneStats zone_stats(isolate->allocator());
2794   NodeOriginTable node_origins(graph);
2795   JumpOptimizationInfo jump_opt;
2796   bool should_optimize_jumps = isolate->serializer_enabled() &&
2797                                FLAG_turbo_rewrite_far_jumps &&
2798                                !FLAG_turbo_profiling;
2799   PipelineData data(&zone_stats, &info, isolate, isolate->allocator(), graph,
2800                     jsgraph, nullptr, source_positions, &node_origins,
2801                     should_optimize_jumps ? &jump_opt : nullptr, options,
2802                     profile_data);
2803   PipelineJobScope scope(&data, isolate->counters()->runtime_call_stats());
2804   RuntimeCallTimerScope timer_scope(isolate,
2805                                     RuntimeCallCounterId::kOptimizeCode);
2806   data.set_verify_graph(FLAG_verify_csa);
2807   std::unique_ptr<PipelineStatistics> pipeline_statistics;
2808   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
2809     pipeline_statistics.reset(new PipelineStatistics(
2810         &info, isolate->GetTurboStatistics(), &zone_stats));
2811     pipeline_statistics->BeginPhaseKind("V8.TFStubCodegen");
2812   }
2813 
2814   PipelineImpl pipeline(&data);
2815 
2816   if (info.trace_turbo_json() || info.trace_turbo_graph()) {
2817     CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
2818     tracing_scope.stream()
2819         << "---------------------------------------------------\n"
2820         << "Begin compiling " << debug_name << " using TurboFan" << std::endl;
2821     if (info.trace_turbo_json()) {
2822       TurboJsonFile json_of(&info, std::ios_base::trunc);
2823       json_of << "{\"function\" : ";
2824       JsonPrintFunctionSource(json_of, -1, info.GetDebugName(),
2825                               Handle<Script>(), isolate,
2826                               Handle<SharedFunctionInfo>());
2827       json_of << ",\n\"phases\":[";
2828     }
2829     pipeline.Run<PrintGraphPhase>("V8.TFMachineCode");
2830   }
2831 
2832   pipeline.Run<CsaEarlyOptimizationPhase>();
2833   pipeline.RunPrintAndVerify(CsaEarlyOptimizationPhase::phase_name(), true);
2834 
2835   // Optimize memory access and allocation operations.
2836   pipeline.Run<MemoryOptimizationPhase>();
2837   pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
2838 
2839   pipeline.Run<CsaOptimizationPhase>();
2840   pipeline.RunPrintAndVerify(CsaOptimizationPhase::phase_name(), true);
2841 
2842   pipeline.Run<DecompressionOptimizationPhase>();
2843   pipeline.RunPrintAndVerify(DecompressionOptimizationPhase::phase_name(),
2844                              true);
2845 
2846   pipeline.Run<VerifyGraphPhase>(true);
2847 
2848   int graph_hash_before_scheduling = 0;
2849   if (FLAG_turbo_profiling || profile_data != nullptr) {
2850     graph_hash_before_scheduling = HashGraphForPGO(data.graph());
2851   }
2852 
2853   if (profile_data != nullptr &&
2854       profile_data->hash() != graph_hash_before_scheduling) {
2855     PrintF("Rejected profile data for %s due to function change\n", debug_name);
2856     profile_data = nullptr;
2857     data.set_profile_data(profile_data);
2858   }
2859 
2860   pipeline.ComputeScheduledGraph();
2861   DCHECK_NOT_NULL(data.schedule());
2862 
2863   // First run code generation on a copy of the pipeline, in order to be able to
2864   // repeat it for jump optimization. The first run has to happen on a temporary
2865   // pipeline to avoid deletion of zones on the main pipeline.
2866   PipelineData second_data(&zone_stats, &info, isolate, isolate->allocator(),
2867                            data.graph(), data.jsgraph(), data.schedule(),
2868                            data.source_positions(), data.node_origins(),
2869                            data.jump_optimization_info(), options,
2870                            profile_data);
2871   PipelineJobScope second_scope(&second_data,
2872                                 isolate->counters()->runtime_call_stats());
2873   second_data.set_verify_graph(FLAG_verify_csa);
2874   PipelineImpl second_pipeline(&second_data);
2875   second_pipeline.SelectInstructionsAndAssemble(call_descriptor);
2876 
2877   if (FLAG_turbo_profiling) {
2878     info.profiler_data()->SetHash(graph_hash_before_scheduling);
2879   }
2880 
2881   if (jump_opt.is_optimizable()) {
2882     jump_opt.set_optimizing();
2883     return pipeline.GenerateCode(call_descriptor);
2884   } else {
2885     return second_pipeline.FinalizeCode();
2886   }
2887 }
2888 
2889 struct BlockStartsAsJSON {
2890   const ZoneVector<int>* block_starts;
2891 };
2892 
operator <<(std::ostream & out,const BlockStartsAsJSON & s)2893 std::ostream& operator<<(std::ostream& out, const BlockStartsAsJSON& s) {
2894   out << ", \"blockIdToOffset\": {";
2895   bool need_comma = false;
2896   for (size_t i = 0; i < s.block_starts->size(); ++i) {
2897     if (need_comma) out << ", ";
2898     int offset = (*s.block_starts)[i];
2899     out << "\"" << i << "\":" << offset;
2900     need_comma = true;
2901   }
2902   out << "},";
2903   return out;
2904 }
2905 
2906 // static
GenerateCodeForWasmNativeStub(wasm::WasmEngine * wasm_engine,CallDescriptor * call_descriptor,MachineGraph * mcgraph,CodeKind kind,int wasm_kind,const char * debug_name,const AssemblerOptions & options,SourcePositionTable * source_positions)2907 wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
2908     wasm::WasmEngine* wasm_engine, CallDescriptor* call_descriptor,
2909     MachineGraph* mcgraph, CodeKind kind, int wasm_kind, const char* debug_name,
2910     const AssemblerOptions& options, SourcePositionTable* source_positions) {
2911   Graph* graph = mcgraph->graph();
2912   OptimizedCompilationInfo info(CStrVector(debug_name), graph->zone(), kind);
2913   // Construct a pipeline for scheduling and code generation.
2914   ZoneStats zone_stats(wasm_engine->allocator());
2915   NodeOriginTable* node_positions = graph->zone()->New<NodeOriginTable>(graph);
2916   // {instruction_buffer} must live longer than {PipelineData}, since
2917   // {PipelineData} will reference the {instruction_buffer} via the
2918   // {AssemblerBuffer} of the {Assembler} contained in the {CodeGenerator}.
2919   std::unique_ptr<wasm::WasmInstructionBuffer> instruction_buffer =
2920       wasm::WasmInstructionBuffer::New();
2921   PipelineData data(&zone_stats, wasm_engine, &info, mcgraph, nullptr,
2922                     source_positions, node_positions, options);
2923   std::unique_ptr<PipelineStatistics> pipeline_statistics;
2924   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
2925     pipeline_statistics.reset(new PipelineStatistics(
2926         &info, wasm_engine->GetOrCreateTurboStatistics(), &zone_stats));
2927     pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
2928   }
2929 
2930   PipelineImpl pipeline(&data);
2931 
2932   if (info.trace_turbo_json() || info.trace_turbo_graph()) {
2933     CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
2934     tracing_scope.stream()
2935         << "---------------------------------------------------\n"
2936         << "Begin compiling method " << info.GetDebugName().get()
2937         << " using TurboFan" << std::endl;
2938   }
2939 
2940   if (info.trace_turbo_graph()) {  // Simple textual RPO.
2941     StdoutStream{} << "-- wasm stub " << CodeKindToString(kind) << " graph -- "
2942                    << std::endl
2943                    << AsRPO(*graph);
2944   }
2945 
2946   if (info.trace_turbo_json()) {
2947     TurboJsonFile json_of(&info, std::ios_base::trunc);
2948     json_of << "{\"function\":\"" << info.GetDebugName().get()
2949             << "\", \"source\":\"\",\n\"phases\":[";
2950   }
2951 
2952   pipeline.RunPrintAndVerify("V8.WasmNativeStubMachineCode", true);
2953   pipeline.ComputeScheduledGraph();
2954 
2955   Linkage linkage(call_descriptor);
2956   CHECK(pipeline.SelectInstructions(&linkage));
2957   pipeline.AssembleCode(&linkage, instruction_buffer->CreateView());
2958 
2959   CodeGenerator* code_generator = pipeline.code_generator();
2960   wasm::WasmCompilationResult result;
2961   code_generator->tasm()->GetCode(
2962       nullptr, &result.code_desc, code_generator->safepoint_table_builder(),
2963       static_cast<int>(code_generator->GetHandlerTableOffset()));
2964   result.instr_buffer = instruction_buffer->ReleaseBuffer();
2965   result.source_positions = code_generator->GetSourcePositionTable();
2966   result.protected_instructions_data =
2967       code_generator->GetProtectedInstructionsData();
2968   result.frame_slot_count = code_generator->frame()->GetTotalFrameSlotCount();
2969   result.tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
2970   result.result_tier = wasm::ExecutionTier::kTurbofan;
2971 
2972   DCHECK(result.succeeded());
2973 
2974   if (info.trace_turbo_json()) {
2975     TurboJsonFile json_of(&info, std::ios_base::app);
2976     json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
2977             << BlockStartsAsJSON{&code_generator->block_starts()}
2978             << "\"data\":\"";
2979 #ifdef ENABLE_DISASSEMBLER
2980     std::stringstream disassembler_stream;
2981     Disassembler::Decode(
2982         nullptr, &disassembler_stream, result.code_desc.buffer,
2983         result.code_desc.buffer + result.code_desc.safepoint_table_offset,
2984         CodeReference(&result.code_desc));
2985     for (auto const c : disassembler_stream.str()) {
2986       json_of << AsEscapedUC16ForJSON(c);
2987     }
2988 #endif  // ENABLE_DISASSEMBLER
2989     json_of << "\"}\n]";
2990     json_of << "\n}";
2991   }
2992 
2993   if (info.trace_turbo_json() || info.trace_turbo_graph()) {
2994     CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
2995     tracing_scope.stream()
2996         << "---------------------------------------------------\n"
2997         << "Finished compiling method " << info.GetDebugName().get()
2998         << " using TurboFan" << std::endl;
2999   }
3000 
3001   return result;
3002 }
3003 
3004 // static
GenerateCodeForTesting(OptimizedCompilationInfo * info,Isolate * isolate,std::unique_ptr<JSHeapBroker> * out_broker)3005 MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
3006     OptimizedCompilationInfo* info, Isolate* isolate,
3007     std::unique_ptr<JSHeapBroker>* out_broker) {
3008   ZoneStats zone_stats(isolate->allocator());
3009   std::unique_ptr<PipelineStatistics> pipeline_statistics(
3010       CreatePipelineStatistics(Handle<Script>::null(), info, isolate,
3011                                &zone_stats));
3012 
3013   PipelineData data(&zone_stats, isolate, info, pipeline_statistics.get(),
3014                     i::FLAG_concurrent_inlining);
3015   PipelineImpl pipeline(&data);
3016 
3017   Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
3018 
3019   {
3020     CompilationHandleScope compilation_scope(isolate, info);
3021     CanonicalHandleScope canonical(isolate, info);
3022     info->ReopenHandlesInNewHandleScope(isolate);
3023     pipeline.Serialize();
3024     // Emulating the proper pipeline, we call CreateGraph on different places
3025     // (i.e before or after creating a LocalIsolateScope) depending on
3026     // is_concurrent_inlining.
3027     if (!data.broker()->is_concurrent_inlining()) {
3028       if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
3029     }
3030   }
3031 
3032   {
3033     LocalIsolate local_isolate(isolate, ThreadKind::kMain);
3034     LocalIsolateScope local_isolate_scope(data.broker(), info, &local_isolate);
3035     if (data.broker()->is_concurrent_inlining()) {
3036       if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
3037     }
3038     // We selectively Unpark inside OptimizeGraph.
3039     if (!pipeline.OptimizeGraph(&linkage)) return MaybeHandle<Code>();
3040 
3041     pipeline.AssembleCode(&linkage);
3042   }
3043 
3044   const bool will_retire_broker = out_broker == nullptr;
3045   if (!will_retire_broker) {
3046     // If the broker is going to be kept alive, pass the persistent and the
3047     // canonical handles containers back to the JSHeapBroker since it will
3048     // outlive the OptimizedCompilationInfo.
3049     data.broker()->SetPersistentAndCopyCanonicalHandlesForTesting(
3050         info->DetachPersistentHandles(), info->DetachCanonicalHandles());
3051   }
3052 
3053   Handle<Code> code;
3054   if (pipeline.FinalizeCode(will_retire_broker).ToHandle(&code) &&
3055       pipeline.CommitDependencies(code)) {
3056     if (!will_retire_broker) *out_broker = data.ReleaseBroker();
3057     return code;
3058   }
3059   return MaybeHandle<Code>();
3060 }
3061 
3062 // static
GenerateCodeForTesting(OptimizedCompilationInfo * info,Isolate * isolate,CallDescriptor * call_descriptor,Graph * graph,const AssemblerOptions & options,Schedule * schedule)3063 MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
3064     OptimizedCompilationInfo* info, Isolate* isolate,
3065     CallDescriptor* call_descriptor, Graph* graph,
3066     const AssemblerOptions& options, Schedule* schedule) {
3067   // Construct a pipeline for scheduling and code generation.
3068   ZoneStats zone_stats(isolate->allocator());
3069   NodeOriginTable* node_positions = info->zone()->New<NodeOriginTable>(graph);
3070   PipelineData data(&zone_stats, info, isolate, isolate->allocator(), graph,
3071                     nullptr, schedule, nullptr, node_positions, nullptr,
3072                     options, nullptr);
3073   std::unique_ptr<PipelineStatistics> pipeline_statistics;
3074   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
3075     pipeline_statistics.reset(new PipelineStatistics(
3076         info, isolate->GetTurboStatistics(), &zone_stats));
3077     pipeline_statistics->BeginPhaseKind("V8.TFTestCodegen");
3078   }
3079 
3080   PipelineImpl pipeline(&data);
3081 
3082   if (info->trace_turbo_json()) {
3083     TurboJsonFile json_of(info, std::ios_base::trunc);
3084     json_of << "{\"function\":\"" << info->GetDebugName().get()
3085             << "\", \"source\":\"\",\n\"phases\":[";
3086   }
3087   // TODO(rossberg): Should this really be untyped?
3088   pipeline.RunPrintAndVerify("V8.TFMachineCode", true);
3089 
3090   // Ensure we have a schedule.
3091   if (data.schedule() == nullptr) {
3092     pipeline.ComputeScheduledGraph();
3093   }
3094 
3095   Handle<Code> code;
3096   if (pipeline.GenerateCode(call_descriptor).ToHandle(&code) &&
3097       pipeline.CommitDependencies(code)) {
3098     return code;
3099   }
3100   return MaybeHandle<Code>();
3101 }
3102 
3103 // static
NewCompilationJob(Isolate * isolate,Handle<JSFunction> function,CodeKind code_kind,bool has_script,BailoutId osr_offset,JavaScriptFrame * osr_frame)3104 std::unique_ptr<OptimizedCompilationJob> Pipeline::NewCompilationJob(
3105     Isolate* isolate, Handle<JSFunction> function, CodeKind code_kind,
3106     bool has_script, BailoutId osr_offset, JavaScriptFrame* osr_frame) {
3107   Handle<SharedFunctionInfo> shared =
3108       handle(function->shared(), function->GetIsolate());
3109   return std::make_unique<PipelineCompilationJob>(
3110       isolate, shared, function, osr_offset, osr_frame, code_kind);
3111 }
3112 
3113 // static
GenerateCodeForWasmFunction(OptimizedCompilationInfo * info,wasm::WasmEngine * wasm_engine,MachineGraph * mcgraph,CallDescriptor * call_descriptor,SourcePositionTable * source_positions,NodeOriginTable * node_origins,wasm::FunctionBody function_body,const wasm::WasmModule * module,int function_index)3114 void Pipeline::GenerateCodeForWasmFunction(
3115     OptimizedCompilationInfo* info, wasm::WasmEngine* wasm_engine,
3116     MachineGraph* mcgraph, CallDescriptor* call_descriptor,
3117     SourcePositionTable* source_positions, NodeOriginTable* node_origins,
3118     wasm::FunctionBody function_body, const wasm::WasmModule* module,
3119     int function_index) {
3120   ZoneStats zone_stats(wasm_engine->allocator());
3121   std::unique_ptr<PipelineStatistics> pipeline_statistics(
3122       CreatePipelineStatistics(wasm_engine, function_body, module, info,
3123                                &zone_stats));
3124   // {instruction_buffer} must live longer than {PipelineData}, since
3125   // {PipelineData} will reference the {instruction_buffer} via the
3126   // {AssemblerBuffer} of the {Assembler} contained in the {CodeGenerator}.
3127   std::unique_ptr<wasm::WasmInstructionBuffer> instruction_buffer =
3128       wasm::WasmInstructionBuffer::New();
3129   PipelineData data(&zone_stats, wasm_engine, info, mcgraph,
3130                     pipeline_statistics.get(), source_positions, node_origins,
3131                     WasmAssemblerOptions());
3132 
3133   PipelineImpl pipeline(&data);
3134 
3135   if (data.info()->trace_turbo_json() || data.info()->trace_turbo_graph()) {
3136     CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
3137     tracing_scope.stream()
3138         << "---------------------------------------------------\n"
3139         << "Begin compiling method " << data.info()->GetDebugName().get()
3140         << " using TurboFan" << std::endl;
3141   }
3142 
3143   pipeline.RunPrintAndVerify("V8.WasmMachineCode", true);
3144 
3145   data.BeginPhaseKind("V8.WasmOptimization");
3146   const bool is_asm_js = is_asmjs_module(module);
3147   if (FLAG_turbo_splitting && !is_asm_js) {
3148     data.info()->set_splitting();
3149   }
3150   if (FLAG_wasm_opt || is_asm_js) {
3151     PipelineRunScope scope(&data, "V8.WasmFullOptimization",
3152                            RuntimeCallCounterId::kOptimizeWasmFullOptimization);
3153     GraphReducer graph_reducer(scope.zone(), data.graph(),
3154                                &data.info()->tick_counter(), data.broker(),
3155                                data.mcgraph()->Dead());
3156     DeadCodeElimination dead_code_elimination(&graph_reducer, data.graph(),
3157                                               data.common(), scope.zone());
3158     ValueNumberingReducer value_numbering(scope.zone(), data.graph()->zone());
3159     const bool allow_signalling_nan = is_asm_js;
3160     MachineOperatorReducer machine_reducer(&graph_reducer, data.mcgraph(),
3161                                            allow_signalling_nan);
3162     CommonOperatorReducer common_reducer(&graph_reducer, data.graph(),
3163                                          data.broker(), data.common(),
3164                                          data.machine(), scope.zone());
3165     AddReducer(&data, &graph_reducer, &dead_code_elimination);
3166     AddReducer(&data, &graph_reducer, &machine_reducer);
3167     AddReducer(&data, &graph_reducer, &common_reducer);
3168     AddReducer(&data, &graph_reducer, &value_numbering);
3169     graph_reducer.ReduceGraph();
3170   } else {
3171     PipelineRunScope scope(&data, "V8.OptimizeWasmBaseOptimization",
3172                            RuntimeCallCounterId::kOptimizeWasmBaseOptimization);
3173     GraphReducer graph_reducer(scope.zone(), data.graph(),
3174                                &data.info()->tick_counter(), data.broker(),
3175                                data.mcgraph()->Dead());
3176     ValueNumberingReducer value_numbering(scope.zone(), data.graph()->zone());
3177     AddReducer(&data, &graph_reducer, &value_numbering);
3178     graph_reducer.ReduceGraph();
3179   }
3180   pipeline.RunPrintAndVerify("V8.WasmOptimization", true);
3181 
3182   if (data.node_origins()) {
3183     data.node_origins()->RemoveDecorator();
3184   }
3185 
3186   pipeline.ComputeScheduledGraph();
3187 
3188   Linkage linkage(call_descriptor);
3189   if (!pipeline.SelectInstructions(&linkage)) return;
3190   pipeline.AssembleCode(&linkage, instruction_buffer->CreateView());
3191 
3192   auto result = std::make_unique<wasm::WasmCompilationResult>();
3193   CodeGenerator* code_generator = pipeline.code_generator();
3194   code_generator->tasm()->GetCode(
3195       nullptr, &result->code_desc, code_generator->safepoint_table_builder(),
3196       static_cast<int>(code_generator->GetHandlerTableOffset()));
3197 
3198   result->instr_buffer = instruction_buffer->ReleaseBuffer();
3199   result->frame_slot_count = code_generator->frame()->GetTotalFrameSlotCount();
3200   result->tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
3201   result->source_positions = code_generator->GetSourcePositionTable();
3202   result->protected_instructions_data =
3203       code_generator->GetProtectedInstructionsData();
3204   result->result_tier = wasm::ExecutionTier::kTurbofan;
3205 
3206   if (data.info()->trace_turbo_json()) {
3207     TurboJsonFile json_of(data.info(), std::ios_base::app);
3208     json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
3209             << BlockStartsAsJSON{&code_generator->block_starts()}
3210             << "\"data\":\"";
3211 #ifdef ENABLE_DISASSEMBLER
3212     std::stringstream disassembler_stream;
3213     Disassembler::Decode(
3214         nullptr, &disassembler_stream, result->code_desc.buffer,
3215         result->code_desc.buffer + result->code_desc.safepoint_table_offset,
3216         CodeReference(&result->code_desc));
3217     for (auto const c : disassembler_stream.str()) {
3218       json_of << AsEscapedUC16ForJSON(c);
3219     }
3220 #endif  // ENABLE_DISASSEMBLER
3221     json_of << "\"}\n]";
3222     json_of << "\n}";
3223   }
3224 
3225   if (data.info()->trace_turbo_json() || data.info()->trace_turbo_graph()) {
3226     CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
3227     tracing_scope.stream()
3228         << "---------------------------------------------------\n"
3229         << "Finished compiling method " << data.info()->GetDebugName().get()
3230         << " using TurboFan" << std::endl;
3231   }
3232 
3233   DCHECK(result->succeeded());
3234   info->SetWasmCompilationResult(std::move(result));
3235 }
3236 
AllocateRegistersForTesting(const RegisterConfiguration * config,InstructionSequence * sequence,bool use_mid_tier_register_allocator,bool run_verifier)3237 bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
3238                                            InstructionSequence* sequence,
3239                                            bool use_mid_tier_register_allocator,
3240                                            bool run_verifier) {
3241   OptimizedCompilationInfo info(ArrayVector("testing"), sequence->zone(),
3242                                 CodeKind::FOR_TESTING);
3243   ZoneStats zone_stats(sequence->isolate()->allocator());
3244   PipelineData data(&zone_stats, &info, sequence->isolate(), sequence);
3245   data.InitializeFrameData(nullptr);
3246 
3247   if (info.trace_turbo_json()) {
3248     TurboJsonFile json_of(&info, std::ios_base::trunc);
3249     json_of << "{\"function\":\"" << info.GetDebugName().get()
3250             << "\", \"source\":\"\",\n\"phases\":[";
3251   }
3252 
3253   PipelineImpl pipeline(&data);
3254   if (use_mid_tier_register_allocator) {
3255     pipeline.AllocateRegistersForMidTier(config, nullptr, run_verifier);
3256   } else {
3257     pipeline.AllocateRegistersForTopTier(config, nullptr, run_verifier);
3258   }
3259 
3260   return !data.compilation_failed();
3261 }
3262 
ComputeScheduledGraph()3263 void PipelineImpl::ComputeScheduledGraph() {
3264   PipelineData* data = this->data_;
3265 
3266   // We should only schedule the graph if it is not scheduled yet.
3267   DCHECK_NULL(data->schedule());
3268 
3269   Run<LateGraphTrimmingPhase>();
3270   RunPrintAndVerify(LateGraphTrimmingPhase::phase_name(), true);
3271 
3272   Run<ComputeSchedulePhase>();
3273   TraceScheduleAndVerify(data->info(), data, data->schedule(), "schedule");
3274 }
3275 
SelectInstructions(Linkage * linkage)3276 bool PipelineImpl::SelectInstructions(Linkage* linkage) {
3277   auto call_descriptor = linkage->GetIncomingDescriptor();
3278   PipelineData* data = this->data_;
3279 
3280   // We should have a scheduled graph.
3281   DCHECK_NOT_NULL(data->graph());
3282   DCHECK_NOT_NULL(data->schedule());
3283 
3284   if (FLAG_turbo_profiling) {
3285     data->info()->set_profiler_data(BasicBlockInstrumentor::Instrument(
3286         info(), data->graph(), data->schedule(), data->isolate()));
3287   }
3288 
3289   bool verify_stub_graph =
3290       data->verify_graph() ||
3291       (FLAG_turbo_verify_machine_graph != nullptr &&
3292        (!strcmp(FLAG_turbo_verify_machine_graph, "*") ||
3293         !strcmp(FLAG_turbo_verify_machine_graph, data->debug_name())));
3294   // Jump optimization runs instruction selection twice, but the instruction
3295   // selector mutates nodes like swapping the inputs of a load, which can
3296   // violate the machine graph verification rules. So we skip the second
3297   // verification on a graph that already verified before.
3298   auto jump_opt = data->jump_optimization_info();
3299   if (jump_opt && jump_opt->is_optimizing()) {
3300     verify_stub_graph = false;
3301   }
3302   if (verify_stub_graph) {
3303     if (FLAG_trace_verify_csa) {
3304       UnparkedScopeIfNeeded scope(data->broker());
3305       AllowHandleDereference allow_deref;
3306       CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
3307       tracing_scope.stream()
3308           << "--------------------------------------------------\n"
3309           << "--- Verifying " << data->debug_name()
3310           << " generated by TurboFan\n"
3311           << "--------------------------------------------------\n"
3312           << *data->schedule()
3313           << "--------------------------------------------------\n"
3314           << "--- End of " << data->debug_name() << " generated by TurboFan\n"
3315           << "--------------------------------------------------\n";
3316     }
3317     // TODO(jgruber): The parameter is called is_stub but actually contains
3318     // something different. Update either the name or its contents.
3319     const bool is_stub =
3320         !data->info()->IsOptimizing() && !data->info()->IsWasm();
3321     Zone temp_zone(data->allocator(), kMachineGraphVerifierZoneName);
3322     MachineGraphVerifier::Run(data->graph(), data->schedule(), linkage, is_stub,
3323                               data->debug_name(), &temp_zone);
3324   }
3325 
3326   data->InitializeInstructionSequence(call_descriptor);
3327 
3328   // Depending on which code path led us to this function, the frame may or
3329   // may not have been initialized. If it hasn't yet, initialize it now.
3330   if (!data->frame()) {
3331     data->InitializeFrameData(call_descriptor);
3332   }
3333   // Select and schedule instructions covering the scheduled graph.
3334   Run<InstructionSelectionPhase>(linkage);
3335   if (data->compilation_failed()) {
3336     info()->AbortOptimization(BailoutReason::kCodeGenerationFailed);
3337     data->EndPhaseKind();
3338     return false;
3339   }
3340 
3341   if (info()->trace_turbo_json() && !data->MayHaveUnverifiableGraph()) {
3342     UnparkedScopeIfNeeded scope(data->broker());
3343     AllowHandleDereference allow_deref;
3344     TurboCfgFile tcf(isolate());
3345     tcf << AsC1V("CodeGen", data->schedule(), data->source_positions(),
3346                  data->sequence());
3347   }
3348 
3349   if (info()->trace_turbo_json()) {
3350     std::ostringstream source_position_output;
3351     // Output source position information before the graph is deleted.
3352     if (data_->source_positions() != nullptr) {
3353       data_->source_positions()->PrintJson(source_position_output);
3354     } else {
3355       source_position_output << "{}";
3356     }
3357     source_position_output << ",\n\"NodeOrigins\" : ";
3358     data_->node_origins()->PrintJson(source_position_output);
3359     data_->set_source_position_output(source_position_output.str());
3360   }
3361 
3362   data->DeleteGraphZone();
3363 
3364   data->BeginPhaseKind("V8.TFRegisterAllocation");
3365 
3366   bool run_verifier = FLAG_turbo_verify_allocation;
3367 
3368   // Allocate registers.
3369   if (call_descriptor->HasRestrictedAllocatableRegisters()) {
3370     RegList registers = call_descriptor->AllocatableRegisters();
3371     DCHECK_LT(0, NumRegs(registers));
3372     std::unique_ptr<const RegisterConfiguration> config;
3373     config.reset(RegisterConfiguration::RestrictGeneralRegisters(registers));
3374     AllocateRegistersForTopTier(config.get(), call_descriptor, run_verifier);
3375   } else {
3376     const RegisterConfiguration* config;
3377     if (data->info()->GetPoisoningMitigationLevel() !=
3378         PoisoningMitigationLevel::kDontPoison) {
3379 #ifdef V8_TARGET_ARCH_IA32
3380     FATAL("Poisoning is not supported on ia32.");
3381 #else
3382       config = RegisterConfiguration::Poisoning();
3383 #endif  // V8_TARGET_ARCH_IA32
3384     } else {
3385       config = RegisterConfiguration::Default();
3386     }
3387 
3388     if (data->info()->IsTurboprop() && FLAG_turboprop_mid_tier_reg_alloc) {
3389       AllocateRegistersForMidTier(config, call_descriptor, run_verifier);
3390     } else {
3391       AllocateRegistersForTopTier(config, call_descriptor, run_verifier);
3392     }
3393   }
3394 
3395   // Verify the instruction sequence has the same hash in two stages.
3396   VerifyGeneratedCodeIsIdempotent();
3397 
3398   Run<FrameElisionPhase>();
3399   if (data->compilation_failed()) {
3400     info()->AbortOptimization(
3401         BailoutReason::kNotEnoughVirtualRegistersRegalloc);
3402     data->EndPhaseKind();
3403     return false;
3404   }
3405 
3406   // TODO(mtrofin): move this off to the register allocator.
3407   bool generate_frame_at_start =
3408       data_->sequence()->instruction_blocks().front()->must_construct_frame();
3409   // Optimimize jumps.
3410   if (FLAG_turbo_jt) {
3411     Run<JumpThreadingPhase>(generate_frame_at_start);
3412   }
3413 
3414   data->EndPhaseKind();
3415 
3416   return true;
3417 }
3418 
VerifyGeneratedCodeIsIdempotent()3419 void PipelineImpl::VerifyGeneratedCodeIsIdempotent() {
3420   PipelineData* data = this->data_;
3421   JumpOptimizationInfo* jump_opt = data->jump_optimization_info();
3422   if (jump_opt == nullptr) return;
3423 
3424   InstructionSequence* code = data->sequence();
3425   int instruction_blocks = code->InstructionBlockCount();
3426   int virtual_registers = code->VirtualRegisterCount();
3427   size_t hash_code = base::hash_combine(instruction_blocks, virtual_registers);
3428   for (auto instr : *code) {
3429     hash_code = base::hash_combine(hash_code, instr->opcode(),
3430                                    instr->InputCount(), instr->OutputCount());
3431   }
3432   for (int i = 0; i < virtual_registers; i++) {
3433     hash_code = base::hash_combine(hash_code, code->GetRepresentation(i));
3434   }
3435   if (jump_opt->is_collecting()) {
3436     jump_opt->set_hash_code(hash_code);
3437   } else {
3438     CHECK_EQ(hash_code, jump_opt->hash_code());
3439   }
3440 }
3441 
3442 struct InstructionStartsAsJSON {
3443   const ZoneVector<TurbolizerInstructionStartInfo>* instr_starts;
3444 };
3445 
operator <<(std::ostream & out,const InstructionStartsAsJSON & s)3446 std::ostream& operator<<(std::ostream& out, const InstructionStartsAsJSON& s) {
3447   out << ", \"instructionOffsetToPCOffset\": {";
3448   bool need_comma = false;
3449   for (size_t i = 0; i < s.instr_starts->size(); ++i) {
3450     if (need_comma) out << ", ";
3451     const TurbolizerInstructionStartInfo& info = (*s.instr_starts)[i];
3452     out << "\"" << i << "\": {";
3453     out << "\"gap\": " << info.gap_pc_offset;
3454     out << ", \"arch\": " << info.arch_instr_pc_offset;
3455     out << ", \"condition\": " << info.condition_pc_offset;
3456     out << "}";
3457     need_comma = true;
3458   }
3459   out << "}";
3460   return out;
3461 }
3462 
3463 struct TurbolizerCodeOffsetsInfoAsJSON {
3464   const TurbolizerCodeOffsetsInfo* offsets_info;
3465 };
3466 
operator <<(std::ostream & out,const TurbolizerCodeOffsetsInfoAsJSON & s)3467 std::ostream& operator<<(std::ostream& out,
3468                          const TurbolizerCodeOffsetsInfoAsJSON& s) {
3469   out << ", \"codeOffsetsInfo\": {";
3470   out << "\"codeStartRegisterCheck\": "
3471       << s.offsets_info->code_start_register_check << ", ";
3472   out << "\"deoptCheck\": " << s.offsets_info->deopt_check << ", ";
3473   out << "\"initPoison\": " << s.offsets_info->init_poison << ", ";
3474   out << "\"blocksStart\": " << s.offsets_info->blocks_start << ", ";
3475   out << "\"outOfLineCode\": " << s.offsets_info->out_of_line_code << ", ";
3476   out << "\"deoptimizationExits\": " << s.offsets_info->deoptimization_exits
3477       << ", ";
3478   out << "\"pools\": " << s.offsets_info->pools << ", ";
3479   out << "\"jumpTables\": " << s.offsets_info->jump_tables;
3480   out << "}";
3481   return out;
3482 }
3483 
AssembleCode(Linkage * linkage,std::unique_ptr<AssemblerBuffer> buffer)3484 void PipelineImpl::AssembleCode(Linkage* linkage,
3485                                 std::unique_ptr<AssemblerBuffer> buffer) {
3486   PipelineData* data = this->data_;
3487   data->BeginPhaseKind("V8.TFCodeGeneration");
3488   data->InitializeCodeGenerator(linkage, std::move(buffer));
3489 
3490   UnparkedScopeIfNeeded unparked_scope(data->broker(), FLAG_code_comments);
3491 
3492   Run<AssembleCodePhase>();
3493   if (data->info()->trace_turbo_json()) {
3494     TurboJsonFile json_of(data->info(), std::ios_base::app);
3495     json_of << "{\"name\":\"code generation\""
3496             << ", \"type\":\"instructions\""
3497             << InstructionStartsAsJSON{&data->code_generator()->instr_starts()}
3498             << TurbolizerCodeOffsetsInfoAsJSON{
3499                    &data->code_generator()->offsets_info()};
3500     json_of << "},\n";
3501   }
3502   data->DeleteInstructionZone();
3503   data->EndPhaseKind();
3504 }
3505 
FinalizeCode(bool retire_broker)3506 MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) {
3507   PipelineData* data = this->data_;
3508   data->BeginPhaseKind("V8.TFFinalizeCode");
3509   if (data->broker() && retire_broker) {
3510     data->broker()->Retire();
3511   }
3512   Run<FinalizeCodePhase>();
3513 
3514   MaybeHandle<Code> maybe_code = data->code();
3515   Handle<Code> code;
3516   if (!maybe_code.ToHandle(&code)) {
3517     return maybe_code;
3518   }
3519 
3520   info()->SetCode(code);
3521   PrintCode(isolate(), code, info());
3522 
3523   if (info()->trace_turbo_json()) {
3524     TurboJsonFile json_of(info(), std::ios_base::app);
3525 
3526     json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
3527             << BlockStartsAsJSON{&data->code_generator()->block_starts()}
3528             << "\"data\":\"";
3529 #ifdef ENABLE_DISASSEMBLER
3530     std::stringstream disassembly_stream;
3531     code->Disassemble(nullptr, disassembly_stream, isolate());
3532     std::string disassembly_string(disassembly_stream.str());
3533     for (const auto& c : disassembly_string) {
3534       json_of << AsEscapedUC16ForJSON(c);
3535     }
3536 #endif  // ENABLE_DISASSEMBLER
3537     json_of << "\"}\n],\n";
3538     json_of << "\"nodePositions\":";
3539     json_of << data->source_position_output() << ",\n";
3540     JsonPrintAllSourceWithPositions(json_of, data->info(), isolate());
3541     json_of << "\n}";
3542   }
3543   if (info()->trace_turbo_json() || info()->trace_turbo_graph()) {
3544     CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
3545     tracing_scope.stream()
3546         << "---------------------------------------------------\n"
3547         << "Finished compiling method " << info()->GetDebugName().get()
3548         << " using TurboFan" << std::endl;
3549   }
3550   data->EndPhaseKind();
3551   return code;
3552 }
3553 
SelectInstructionsAndAssemble(CallDescriptor * call_descriptor)3554 bool PipelineImpl::SelectInstructionsAndAssemble(
3555     CallDescriptor* call_descriptor) {
3556   Linkage linkage(call_descriptor);
3557 
3558   // Perform instruction selection and register allocation.
3559   if (!SelectInstructions(&linkage)) return false;
3560 
3561   // Generate the final machine code.
3562   AssembleCode(&linkage);
3563   return true;
3564 }
3565 
GenerateCode(CallDescriptor * call_descriptor)3566 MaybeHandle<Code> PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) {
3567   if (!SelectInstructionsAndAssemble(call_descriptor)) {
3568     return MaybeHandle<Code>();
3569   }
3570   return FinalizeCode();
3571 }
3572 
CommitDependencies(Handle<Code> code)3573 bool PipelineImpl::CommitDependencies(Handle<Code> code) {
3574   return data_->dependencies() == nullptr ||
3575          data_->dependencies()->Commit(code);
3576 }
3577 
3578 namespace {
3579 
TraceSequence(OptimizedCompilationInfo * info,PipelineData * data,const char * phase_name)3580 void TraceSequence(OptimizedCompilationInfo* info, PipelineData* data,
3581                    const char* phase_name) {
3582   if (info->trace_turbo_json()) {
3583     UnparkedScopeIfNeeded scope(data->broker());
3584     AllowHandleDereference allow_deref;
3585     TurboJsonFile json_of(info, std::ios_base::app);
3586     json_of << "{\"name\":\"" << phase_name << "\",\"type\":\"sequence\""
3587             << ",\"blocks\":" << InstructionSequenceAsJSON{data->sequence()}
3588             << ",\"register_allocation\":{"
3589             << RegisterAllocationDataAsJSON{*(data->register_allocation_data()),
3590                                             *(data->sequence())}
3591             << "}},\n";
3592   }
3593   if (info->trace_turbo_graph()) {
3594     UnparkedScopeIfNeeded scope(data->broker());
3595     AllowHandleDereference allow_deref;
3596     CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
3597     tracing_scope.stream() << "----- Instruction sequence " << phase_name
3598                            << " -----\n"
3599                            << *data->sequence();
3600   }
3601 }
3602 
3603 }  // namespace
3604 
AllocateRegistersForTopTier(const RegisterConfiguration * config,CallDescriptor * call_descriptor,bool run_verifier)3605 void PipelineImpl::AllocateRegistersForTopTier(
3606     const RegisterConfiguration* config, CallDescriptor* call_descriptor,
3607     bool run_verifier) {
3608   PipelineData* data = this->data_;
3609   // Don't track usage for this zone in compiler stats.
3610   std::unique_ptr<Zone> verifier_zone;
3611   RegisterAllocatorVerifier* verifier = nullptr;
3612   if (run_verifier) {
3613     verifier_zone.reset(
3614         new Zone(data->allocator(), kRegisterAllocatorVerifierZoneName));
3615     verifier = verifier_zone->New<RegisterAllocatorVerifier>(
3616         verifier_zone.get(), config, data->sequence(), data->frame());
3617   }
3618 
3619 #ifdef DEBUG
3620   data_->sequence()->ValidateEdgeSplitForm();
3621   data_->sequence()->ValidateDeferredBlockEntryPaths();
3622   data_->sequence()->ValidateDeferredBlockExitPaths();
3623 #endif
3624 
3625   RegisterAllocationFlags flags;
3626   if (data->info()->trace_turbo_allocation()) {
3627     flags |= RegisterAllocationFlag::kTraceAllocation;
3628   }
3629   data->InitializeTopTierRegisterAllocationData(config, call_descriptor, flags);
3630 
3631   Run<MeetRegisterConstraintsPhase>();
3632   Run<ResolvePhisPhase>();
3633   Run<BuildLiveRangesPhase>();
3634   Run<BuildBundlesPhase>();
3635 
3636   TraceSequence(info(), data, "before register allocation");
3637   if (verifier != nullptr) {
3638     CHECK(!data->top_tier_register_allocation_data()
3639                ->ExistsUseWithoutDefinition());
3640     CHECK(data->top_tier_register_allocation_data()
3641               ->RangesDefinedInDeferredStayInDeferred());
3642   }
3643 
3644   if (info()->trace_turbo_json() && !data->MayHaveUnverifiableGraph()) {
3645     TurboCfgFile tcf(isolate());
3646     tcf << AsC1VRegisterAllocationData(
3647         "PreAllocation", data->top_tier_register_allocation_data());
3648   }
3649 
3650   Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
3651 
3652   if (data->sequence()->HasFPVirtualRegisters()) {
3653     Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
3654   }
3655 
3656   Run<DecideSpillingModePhase>();
3657   Run<AssignSpillSlotsPhase>();
3658   Run<CommitAssignmentPhase>();
3659 
3660   // TODO(chromium:725559): remove this check once
3661   // we understand the cause of the bug. We keep just the
3662   // check at the end of the allocation.
3663   if (verifier != nullptr) {
3664     verifier->VerifyAssignment("Immediately after CommitAssignmentPhase.");
3665   }
3666 
3667 
3668   Run<ConnectRangesPhase>();
3669 
3670   Run<ResolveControlFlowPhase>();
3671 
3672   Run<PopulateReferenceMapsPhase>();
3673 
3674   if (FLAG_turbo_move_optimization) {
3675     Run<OptimizeMovesPhase>();
3676   }
3677 
3678   TraceSequence(info(), data, "after register allocation");
3679 
3680   if (verifier != nullptr) {
3681     verifier->VerifyAssignment("End of regalloc pipeline.");
3682     verifier->VerifyGapMoves();
3683   }
3684 
3685   if (info()->trace_turbo_json() && !data->MayHaveUnverifiableGraph()) {
3686     TurboCfgFile tcf(isolate());
3687     tcf << AsC1VRegisterAllocationData(
3688         "CodeGen", data->top_tier_register_allocation_data());
3689   }
3690 
3691   data->DeleteRegisterAllocationZone();
3692 }
3693 
AllocateRegistersForMidTier(const RegisterConfiguration * config,CallDescriptor * call_descriptor,bool run_verifier)3694 void PipelineImpl::AllocateRegistersForMidTier(
3695     const RegisterConfiguration* config, CallDescriptor* call_descriptor,
3696     bool run_verifier) {
3697   PipelineData* data = data_;
3698   // Don't track usage for this zone in compiler stats.
3699   std::unique_ptr<Zone> verifier_zone;
3700   RegisterAllocatorVerifier* verifier = nullptr;
3701   if (run_verifier) {
3702     verifier_zone.reset(
3703         new Zone(data->allocator(), kRegisterAllocatorVerifierZoneName));
3704     verifier = verifier_zone->New<RegisterAllocatorVerifier>(
3705         verifier_zone.get(), config, data->sequence(), data->frame());
3706   }
3707 
3708 #ifdef DEBUG
3709   data->sequence()->ValidateEdgeSplitForm();
3710   data->sequence()->ValidateDeferredBlockEntryPaths();
3711   data->sequence()->ValidateDeferredBlockExitPaths();
3712 #endif
3713   data->InitializeMidTierRegisterAllocationData(config, call_descriptor);
3714 
3715   TraceSequence(info(), data, "before register allocation");
3716 
3717   Run<MidTierRegisterOutputDefinitionPhase>();
3718 
3719   Run<MidTierRegisterAllocatorPhase>();
3720 
3721   Run<MidTierSpillSlotAllocatorPhase>();
3722 
3723   Run<MidTierPopulateReferenceMapsPhase>();
3724 
3725   TraceSequence(info(), data, "after register allocation");
3726 
3727   if (verifier != nullptr) {
3728     verifier->VerifyAssignment("End of regalloc pipeline.");
3729     verifier->VerifyGapMoves();
3730   }
3731 
3732   data->DeleteRegisterAllocationZone();
3733 }
3734 
info() const3735 OptimizedCompilationInfo* PipelineImpl::info() const { return data_->info(); }
3736 
isolate() const3737 Isolate* PipelineImpl::isolate() const { return data_->isolate(); }
3738 
code_generator() const3739 CodeGenerator* PipelineImpl::code_generator() const {
3740   return data_->code_generator();
3741 }
3742 
3743 }  // namespace compiler
3744 }  // namespace internal
3745 }  // namespace v8
3746