1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/pipeline.h"
6 
7 #include <fstream>
8 #include <iostream>
9 #include <memory>
10 #include <sstream>
11 
12 #include "src/base/optional.h"
13 #include "src/base/platform/elapsed-timer.h"
14 #include "src/builtins/profile-data-reader.h"
15 #include "src/codegen/assembler-inl.h"
16 #include "src/codegen/compiler.h"
17 #include "src/codegen/optimized-compilation-info.h"
18 #include "src/codegen/register-configuration.h"
19 #include "src/compiler/add-type-assertions-reducer.h"
20 #include "src/compiler/backend/code-generator.h"
21 #include "src/compiler/backend/frame-elider.h"
22 #include "src/compiler/backend/instruction-selector.h"
23 #include "src/compiler/backend/instruction.h"
24 #include "src/compiler/backend/jump-threading.h"
25 #include "src/compiler/backend/mid-tier-register-allocator.h"
26 #include "src/compiler/backend/move-optimizer.h"
27 #include "src/compiler/backend/register-allocator-verifier.h"
28 #include "src/compiler/backend/register-allocator.h"
29 #include "src/compiler/basic-block-instrumentor.h"
30 #include "src/compiler/branch-elimination.h"
31 #include "src/compiler/bytecode-graph-builder.h"
32 #include "src/compiler/checkpoint-elimination.h"
33 #include "src/compiler/common-operator-reducer.h"
34 #include "src/compiler/compilation-dependencies.h"
35 #include "src/compiler/compiler-source-position-table.h"
36 #include "src/compiler/constant-folding-reducer.h"
37 #include "src/compiler/control-flow-optimizer.h"
38 #include "src/compiler/csa-load-elimination.h"
39 #include "src/compiler/dead-code-elimination.h"
40 #include "src/compiler/decompression-optimizer.h"
41 #include "src/compiler/effect-control-linearizer.h"
42 #include "src/compiler/escape-analysis-reducer.h"
43 #include "src/compiler/escape-analysis.h"
44 #include "src/compiler/graph-trimmer.h"
45 #include "src/compiler/graph-visualizer.h"
46 #include "src/compiler/js-call-reducer.h"
47 #include "src/compiler/js-context-specialization.h"
48 #include "src/compiler/js-create-lowering.h"
49 #include "src/compiler/js-generic-lowering.h"
50 #include "src/compiler/js-heap-broker.h"
51 #include "src/compiler/js-heap-copy-reducer.h"
52 #include "src/compiler/js-inlining-heuristic.h"
53 #include "src/compiler/js-intrinsic-lowering.h"
54 #include "src/compiler/js-native-context-specialization.h"
55 #include "src/compiler/js-typed-lowering.h"
56 #include "src/compiler/load-elimination.h"
57 #include "src/compiler/loop-analysis.h"
58 #include "src/compiler/loop-peeling.h"
59 #include "src/compiler/loop-unrolling.h"
60 #include "src/compiler/loop-variable-optimizer.h"
61 #include "src/compiler/machine-graph-verifier.h"
62 #include "src/compiler/machine-operator-reducer.h"
63 #include "src/compiler/memory-optimizer.h"
64 #include "src/compiler/node-observer.h"
65 #include "src/compiler/node-origin-table.h"
66 #include "src/compiler/osr.h"
67 #include "src/compiler/pipeline-statistics.h"
68 #include "src/compiler/redundancy-elimination.h"
69 #include "src/compiler/schedule.h"
70 #include "src/compiler/scheduler.h"
71 #include "src/compiler/select-lowering.h"
72 #include "src/compiler/simplified-lowering.h"
73 #include "src/compiler/simplified-operator-reducer.h"
74 #include "src/compiler/simplified-operator.h"
75 #include "src/compiler/store-store-elimination.h"
76 #include "src/compiler/type-narrowing-reducer.h"
77 #include "src/compiler/typed-optimization.h"
78 #include "src/compiler/typer.h"
79 #include "src/compiler/value-numbering-reducer.h"
80 #include "src/compiler/verifier.h"
81 #include "src/compiler/zone-stats.h"
82 #include "src/diagnostics/code-tracer.h"
83 #include "src/diagnostics/disassembler.h"
84 #include "src/execution/isolate-inl.h"
85 #include "src/heap/local-heap.h"
86 #include "src/init/bootstrapper.h"
87 #include "src/logging/code-events.h"
88 #include "src/logging/counters.h"
89 #include "src/logging/runtime-call-stats-scope.h"
90 #include "src/objects/shared-function-info.h"
91 #include "src/parsing/parse-info.h"
92 #include "src/tracing/trace-event.h"
93 #include "src/tracing/traced-value.h"
94 #include "src/utils/ostreams.h"
95 #include "src/utils/utils.h"
96 
97 #if V8_ENABLE_WEBASSEMBLY
98 #include "src/compiler/wasm-compiler.h"
99 #include "src/compiler/wasm-inlining.h"
100 #include "src/wasm/function-body-decoder.h"
101 #include "src/wasm/function-compiler.h"
102 #include "src/wasm/wasm-engine.h"
103 #endif  // V8_ENABLE_WEBASSEMBLY
104 
105 namespace v8 {
106 namespace internal {
107 namespace compiler {
108 
109 static constexpr char kCodegenZoneName[] = "codegen-zone";
110 static constexpr char kGraphZoneName[] = "graph-zone";
111 static constexpr char kInstructionZoneName[] = "instruction-zone";
112 static constexpr char kMachineGraphVerifierZoneName[] =
113     "machine-graph-verifier-zone";
114 static constexpr char kPipelineCompilationJobZoneName[] =
115     "pipeline-compilation-job-zone";
116 static constexpr char kRegisterAllocationZoneName[] =
117     "register-allocation-zone";
118 static constexpr char kRegisterAllocatorVerifierZoneName[] =
119     "register-allocator-verifier-zone";
120 namespace {
121 
GetModuleContext(Handle<JSFunction> closure)122 Maybe<OuterContext> GetModuleContext(Handle<JSFunction> closure) {
123   Context current = closure->context();
124   size_t distance = 0;
125   while (!current.IsNativeContext()) {
126     if (current.IsModuleContext()) {
127       return Just(
128           OuterContext(handle(current, current.GetIsolate()), distance));
129     }
130     current = current.previous();
131     distance++;
132   }
133   return Nothing<OuterContext>();
134 }
135 
136 }  // anonymous namespace
137 
138 class PipelineData {
139  public:
140   // For main entry point.
PipelineData(ZoneStats * zone_stats,Isolate * isolate,OptimizedCompilationInfo * info,PipelineStatistics * pipeline_statistics)141   PipelineData(ZoneStats* zone_stats, Isolate* isolate,
142                OptimizedCompilationInfo* info,
143                PipelineStatistics* pipeline_statistics)
144       : isolate_(isolate),
145         allocator_(isolate->allocator()),
146         info_(info),
147         debug_name_(info_->GetDebugName()),
148         may_have_unverifiable_graph_(false),
149         zone_stats_(zone_stats),
150         pipeline_statistics_(pipeline_statistics),
151         graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone),
152         graph_zone_(graph_zone_scope_.zone()),
153         instruction_zone_scope_(zone_stats_, kInstructionZoneName),
154         instruction_zone_(instruction_zone_scope_.zone()),
155         codegen_zone_scope_(zone_stats_, kCodegenZoneName),
156         codegen_zone_(codegen_zone_scope_.zone()),
157         broker_(new JSHeapBroker(
158             isolate_, info_->zone(), info_->trace_heap_broker(),
159             info_->concurrent_inlining(), info->code_kind())),
160         register_allocation_zone_scope_(zone_stats_,
161                                         kRegisterAllocationZoneName),
162         register_allocation_zone_(register_allocation_zone_scope_.zone()),
163         assembler_options_(AssemblerOptions::Default(isolate)) {
164     PhaseScope scope(pipeline_statistics, "V8.TFInitPipelineData");
165     graph_ = graph_zone_->New<Graph>(graph_zone_);
166     source_positions_ = graph_zone_->New<SourcePositionTable>(graph_);
167     node_origins_ = info->trace_turbo_json()
168                         ? graph_zone_->New<NodeOriginTable>(graph_)
169                         : nullptr;
170     simplified_ = graph_zone_->New<SimplifiedOperatorBuilder>(graph_zone_);
171     machine_ = graph_zone_->New<MachineOperatorBuilder>(
172         graph_zone_, MachineType::PointerRepresentation(),
173         InstructionSelector::SupportedMachineOperatorFlags(),
174         InstructionSelector::AlignmentRequirements());
175     common_ = graph_zone_->New<CommonOperatorBuilder>(graph_zone_);
176     javascript_ = graph_zone_->New<JSOperatorBuilder>(graph_zone_);
177     jsgraph_ = graph_zone_->New<JSGraph>(isolate_, graph_, common_, javascript_,
178                                          simplified_, machine_);
179     observe_node_manager_ =
180         info->node_observer()
181             ? graph_zone_->New<ObserveNodeManager>(graph_zone_)
182             : nullptr;
183     dependencies_ =
184         info_->zone()->New<CompilationDependencies>(broker_, info_->zone());
185   }
186 
187 #if V8_ENABLE_WEBASSEMBLY
188   // For WebAssembly compile entry point.
PipelineData(ZoneStats * zone_stats,wasm::WasmEngine * wasm_engine,OptimizedCompilationInfo * info,MachineGraph * mcgraph,PipelineStatistics * pipeline_statistics,SourcePositionTable * source_positions,NodeOriginTable * node_origins,const AssemblerOptions & assembler_options)189   PipelineData(ZoneStats* zone_stats, wasm::WasmEngine* wasm_engine,
190                OptimizedCompilationInfo* info, MachineGraph* mcgraph,
191                PipelineStatistics* pipeline_statistics,
192                SourcePositionTable* source_positions,
193                NodeOriginTable* node_origins,
194                const AssemblerOptions& assembler_options)
195       : isolate_(nullptr),
196         wasm_engine_(wasm_engine),
197         allocator_(wasm_engine->allocator()),
198         info_(info),
199         debug_name_(info_->GetDebugName()),
200         may_have_unverifiable_graph_(false),
201         zone_stats_(zone_stats),
202         pipeline_statistics_(pipeline_statistics),
203         graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone),
204         graph_zone_(graph_zone_scope_.zone()),
205         graph_(mcgraph->graph()),
206         source_positions_(source_positions),
207         node_origins_(node_origins),
208         machine_(mcgraph->machine()),
209         common_(mcgraph->common()),
210         mcgraph_(mcgraph),
211         instruction_zone_scope_(zone_stats_, kInstructionZoneName),
212         instruction_zone_(instruction_zone_scope_.zone()),
213         codegen_zone_scope_(zone_stats_, kCodegenZoneName),
214         codegen_zone_(codegen_zone_scope_.zone()),
215         register_allocation_zone_scope_(zone_stats_,
216                                         kRegisterAllocationZoneName),
217         register_allocation_zone_(register_allocation_zone_scope_.zone()),
218         assembler_options_(assembler_options) {
219     simplified_ = graph_zone_->New<SimplifiedOperatorBuilder>(graph_zone_);
220     javascript_ = graph_zone_->New<JSOperatorBuilder>(graph_zone_);
221     jsgraph_ = graph_zone_->New<JSGraph>(isolate_, graph_, common_, javascript_,
222                                          simplified_, machine_);
223   }
224 #endif  // V8_ENABLE_WEBASSEMBLY
225 
226   // For CodeStubAssembler and machine graph testing entry point.
PipelineData(ZoneStats * zone_stats,OptimizedCompilationInfo * info,Isolate * isolate,AccountingAllocator * allocator,Graph * graph,JSGraph * jsgraph,Schedule * schedule,SourcePositionTable * source_positions,NodeOriginTable * node_origins,JumpOptimizationInfo * jump_opt,const AssemblerOptions & assembler_options,const ProfileDataFromFile * profile_data)227   PipelineData(ZoneStats* zone_stats, OptimizedCompilationInfo* info,
228                Isolate* isolate, AccountingAllocator* allocator, Graph* graph,
229                JSGraph* jsgraph, Schedule* schedule,
230                SourcePositionTable* source_positions,
231                NodeOriginTable* node_origins, JumpOptimizationInfo* jump_opt,
232                const AssemblerOptions& assembler_options,
233                const ProfileDataFromFile* profile_data)
234       : isolate_(isolate),
235 #if V8_ENABLE_WEBASSEMBLY
236         // TODO(clemensb): Remove this field, use GetWasmEngine directly
237         // instead.
238         wasm_engine_(wasm::GetWasmEngine()),
239 #endif  // V8_ENABLE_WEBASSEMBLY
240         allocator_(allocator),
241         info_(info),
242         debug_name_(info_->GetDebugName()),
243         zone_stats_(zone_stats),
244         graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone),
245         graph_zone_(graph_zone_scope_.zone()),
246         graph_(graph),
247         source_positions_(source_positions),
248         node_origins_(node_origins),
249         schedule_(schedule),
250         instruction_zone_scope_(zone_stats_, kInstructionZoneName),
251         instruction_zone_(instruction_zone_scope_.zone()),
252         codegen_zone_scope_(zone_stats_, kCodegenZoneName),
253         codegen_zone_(codegen_zone_scope_.zone()),
254         register_allocation_zone_scope_(zone_stats_,
255                                         kRegisterAllocationZoneName),
256         register_allocation_zone_(register_allocation_zone_scope_.zone()),
257         jump_optimization_info_(jump_opt),
258         assembler_options_(assembler_options),
259         profile_data_(profile_data) {
260     if (jsgraph) {
261       jsgraph_ = jsgraph;
262       simplified_ = jsgraph->simplified();
263       machine_ = jsgraph->machine();
264       common_ = jsgraph->common();
265       javascript_ = jsgraph->javascript();
266     } else {
267       simplified_ = graph_zone_->New<SimplifiedOperatorBuilder>(graph_zone_);
268       machine_ = graph_zone_->New<MachineOperatorBuilder>(
269           graph_zone_, MachineType::PointerRepresentation(),
270           InstructionSelector::SupportedMachineOperatorFlags(),
271           InstructionSelector::AlignmentRequirements());
272       common_ = graph_zone_->New<CommonOperatorBuilder>(graph_zone_);
273       javascript_ = graph_zone_->New<JSOperatorBuilder>(graph_zone_);
274       jsgraph_ = graph_zone_->New<JSGraph>(isolate_, graph_, common_,
275                                            javascript_, simplified_, machine_);
276     }
277   }
278 
279   // For register allocation testing entry point.
PipelineData(ZoneStats * zone_stats,OptimizedCompilationInfo * info,Isolate * isolate,InstructionSequence * sequence)280   PipelineData(ZoneStats* zone_stats, OptimizedCompilationInfo* info,
281                Isolate* isolate, InstructionSequence* sequence)
282       : isolate_(isolate),
283         allocator_(isolate->allocator()),
284         info_(info),
285         debug_name_(info_->GetDebugName()),
286         zone_stats_(zone_stats),
287         graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone),
288         instruction_zone_scope_(zone_stats_, kInstructionZoneName),
289         instruction_zone_(sequence->zone()),
290         sequence_(sequence),
291         codegen_zone_scope_(zone_stats_, kCodegenZoneName),
292         codegen_zone_(codegen_zone_scope_.zone()),
293         register_allocation_zone_scope_(zone_stats_,
294                                         kRegisterAllocationZoneName),
295         register_allocation_zone_(register_allocation_zone_scope_.zone()),
296         assembler_options_(AssemblerOptions::Default(isolate)) {}
297 
~PipelineData()298   ~PipelineData() {
299     // Must happen before zones are destroyed.
300     delete code_generator_;
301     code_generator_ = nullptr;
302     DeleteTyper();
303     DeleteRegisterAllocationZone();
304     DeleteInstructionZone();
305     DeleteCodegenZone();
306     DeleteGraphZone();
307   }
308 
309   PipelineData(const PipelineData&) = delete;
310   PipelineData& operator=(const PipelineData&) = delete;
311 
isolate() const312   Isolate* isolate() const { return isolate_; }
allocator() const313   AccountingAllocator* allocator() const { return allocator_; }
info() const314   OptimizedCompilationInfo* info() const { return info_; }
zone_stats() const315   ZoneStats* zone_stats() const { return zone_stats_; }
dependencies() const316   CompilationDependencies* dependencies() const { return dependencies_; }
pipeline_statistics()317   PipelineStatistics* pipeline_statistics() { return pipeline_statistics_; }
osr_helper()318   OsrHelper* osr_helper() { return &(*osr_helper_); }
compilation_failed() const319   bool compilation_failed() const { return compilation_failed_; }
set_compilation_failed()320   void set_compilation_failed() { compilation_failed_ = true; }
321 
verify_graph() const322   bool verify_graph() const { return verify_graph_; }
set_verify_graph(bool value)323   void set_verify_graph(bool value) { verify_graph_ = value; }
324 
code()325   MaybeHandle<Code> code() { return code_; }
set_code(MaybeHandle<Code> code)326   void set_code(MaybeHandle<Code> code) {
327     DCHECK(code_.is_null());
328     code_ = code;
329   }
330 
code_generator() const331   CodeGenerator* code_generator() const { return code_generator_; }
332 
333   // RawMachineAssembler generally produces graphs which cannot be verified.
MayHaveUnverifiableGraph() const334   bool MayHaveUnverifiableGraph() const { return may_have_unverifiable_graph_; }
335 
graph_zone() const336   Zone* graph_zone() const { return graph_zone_; }
graph() const337   Graph* graph() const { return graph_; }
source_positions() const338   SourcePositionTable* source_positions() const { return source_positions_; }
node_origins() const339   NodeOriginTable* node_origins() const { return node_origins_; }
machine() const340   MachineOperatorBuilder* machine() const { return machine_; }
common() const341   CommonOperatorBuilder* common() const { return common_; }
javascript() const342   JSOperatorBuilder* javascript() const { return javascript_; }
jsgraph() const343   JSGraph* jsgraph() const { return jsgraph_; }
mcgraph() const344   MachineGraph* mcgraph() const { return mcgraph_; }
native_context() const345   Handle<NativeContext> native_context() const {
346     return handle(info()->native_context(), isolate());
347   }
global_object() const348   Handle<JSGlobalObject> global_object() const {
349     return handle(info()->global_object(), isolate());
350   }
351 
broker() const352   JSHeapBroker* broker() const { return broker_; }
ReleaseBroker()353   std::unique_ptr<JSHeapBroker> ReleaseBroker() {
354     std::unique_ptr<JSHeapBroker> broker(broker_);
355     broker_ = nullptr;
356     return broker;
357   }
358 
schedule() const359   Schedule* schedule() const { return schedule_; }
set_schedule(Schedule * schedule)360   void set_schedule(Schedule* schedule) {
361     DCHECK(!schedule_);
362     schedule_ = schedule;
363   }
reset_schedule()364   void reset_schedule() { schedule_ = nullptr; }
365 
observe_node_manager() const366   ObserveNodeManager* observe_node_manager() const {
367     return observe_node_manager_;
368   }
369 
instruction_zone() const370   Zone* instruction_zone() const { return instruction_zone_; }
codegen_zone() const371   Zone* codegen_zone() const { return codegen_zone_; }
sequence() const372   InstructionSequence* sequence() const { return sequence_; }
frame() const373   Frame* frame() const { return frame_; }
374 
register_allocation_zone() const375   Zone* register_allocation_zone() const { return register_allocation_zone_; }
376 
register_allocation_data() const377   RegisterAllocationData* register_allocation_data() const {
378     return register_allocation_data_;
379   }
top_tier_register_allocation_data() const380   TopTierRegisterAllocationData* top_tier_register_allocation_data() const {
381     return TopTierRegisterAllocationData::cast(register_allocation_data_);
382   }
mid_tier_register_allocator_data() const383   MidTierRegisterAllocationData* mid_tier_register_allocator_data() const {
384     return MidTierRegisterAllocationData::cast(register_allocation_data_);
385   }
386 
source_position_output() const387   std::string const& source_position_output() const {
388     return source_position_output_;
389   }
set_source_position_output(std::string const & source_position_output)390   void set_source_position_output(std::string const& source_position_output) {
391     source_position_output_ = source_position_output;
392   }
393 
jump_optimization_info() const394   JumpOptimizationInfo* jump_optimization_info() const {
395     return jump_optimization_info_;
396   }
397 
assembler_options() const398   const AssemblerOptions& assembler_options() const {
399     return assembler_options_;
400   }
401 
ChooseSpecializationContext()402   void ChooseSpecializationContext() {
403     if (info()->function_context_specializing()) {
404       DCHECK(info()->has_context());
405       specialization_context_ =
406           Just(OuterContext(handle(info()->context(), isolate()), 0));
407     } else {
408       specialization_context_ = GetModuleContext(info()->closure());
409     }
410   }
411 
specialization_context() const412   Maybe<OuterContext> specialization_context() const {
413     return specialization_context_;
414   }
415 
address_of_max_unoptimized_frame_height()416   size_t* address_of_max_unoptimized_frame_height() {
417     return &max_unoptimized_frame_height_;
418   }
max_unoptimized_frame_height() const419   size_t max_unoptimized_frame_height() const {
420     return max_unoptimized_frame_height_;
421   }
address_of_max_pushed_argument_count()422   size_t* address_of_max_pushed_argument_count() {
423     return &max_pushed_argument_count_;
424   }
max_pushed_argument_count() const425   size_t max_pushed_argument_count() const {
426     return max_pushed_argument_count_;
427   }
428 
GetCodeTracer() const429   CodeTracer* GetCodeTracer() const {
430 #if V8_ENABLE_WEBASSEMBLY
431     if (wasm_engine_) return wasm_engine_->GetCodeTracer();
432 #endif  // V8_ENABLE_WEBASSEMBLY
433     return isolate_->GetCodeTracer();
434   }
435 
CreateTyper()436   Typer* CreateTyper() {
437     DCHECK_NULL(typer_);
438     typer_ =
439         new Typer(broker(), typer_flags_, graph(), &info()->tick_counter());
440     return typer_;
441   }
442 
AddTyperFlag(Typer::Flag flag)443   void AddTyperFlag(Typer::Flag flag) {
444     DCHECK_NULL(typer_);
445     typer_flags_ |= flag;
446   }
447 
DeleteTyper()448   void DeleteTyper() {
449     delete typer_;
450     typer_ = nullptr;
451   }
452 
DeleteGraphZone()453   void DeleteGraphZone() {
454     if (graph_zone_ == nullptr) return;
455     graph_zone_scope_.Destroy();
456     graph_zone_ = nullptr;
457     graph_ = nullptr;
458     source_positions_ = nullptr;
459     node_origins_ = nullptr;
460     simplified_ = nullptr;
461     machine_ = nullptr;
462     common_ = nullptr;
463     javascript_ = nullptr;
464     jsgraph_ = nullptr;
465     mcgraph_ = nullptr;
466     schedule_ = nullptr;
467   }
468 
DeleteInstructionZone()469   void DeleteInstructionZone() {
470     if (instruction_zone_ == nullptr) return;
471     instruction_zone_scope_.Destroy();
472     instruction_zone_ = nullptr;
473     sequence_ = nullptr;
474   }
475 
DeleteCodegenZone()476   void DeleteCodegenZone() {
477     if (codegen_zone_ == nullptr) return;
478     codegen_zone_scope_.Destroy();
479     codegen_zone_ = nullptr;
480     dependencies_ = nullptr;
481     delete broker_;
482     broker_ = nullptr;
483     frame_ = nullptr;
484   }
485 
DeleteRegisterAllocationZone()486   void DeleteRegisterAllocationZone() {
487     if (register_allocation_zone_ == nullptr) return;
488     register_allocation_zone_scope_.Destroy();
489     register_allocation_zone_ = nullptr;
490     register_allocation_data_ = nullptr;
491   }
492 
InitializeInstructionSequence(const CallDescriptor * call_descriptor)493   void InitializeInstructionSequence(const CallDescriptor* call_descriptor) {
494     DCHECK_NULL(sequence_);
495     InstructionBlocks* instruction_blocks =
496         InstructionSequence::InstructionBlocksFor(instruction_zone(),
497                                                   schedule());
498     sequence_ = instruction_zone()->New<InstructionSequence>(
499         isolate(), instruction_zone(), instruction_blocks);
500     if (call_descriptor && call_descriptor->RequiresFrameAsIncoming()) {
501       sequence_->instruction_blocks()[0]->mark_needs_frame();
502     } else {
503       DCHECK_EQ(0u, call_descriptor->CalleeSavedFPRegisters());
504     }
505   }
506 
InitializeFrameData(CallDescriptor * call_descriptor)507   void InitializeFrameData(CallDescriptor* call_descriptor) {
508     DCHECK_NULL(frame_);
509     int fixed_frame_size = 0;
510     if (call_descriptor != nullptr) {
511       fixed_frame_size =
512           call_descriptor->CalculateFixedFrameSize(info()->code_kind());
513     }
514     frame_ = codegen_zone()->New<Frame>(fixed_frame_size);
515     if (osr_helper_.has_value()) osr_helper()->SetupFrame(frame());
516   }
517 
InitializeTopTierRegisterAllocationData(const RegisterConfiguration * config,CallDescriptor * call_descriptor,RegisterAllocationFlags flags)518   void InitializeTopTierRegisterAllocationData(
519       const RegisterConfiguration* config, CallDescriptor* call_descriptor,
520       RegisterAllocationFlags flags) {
521     DCHECK_NULL(register_allocation_data_);
522     register_allocation_data_ =
523         register_allocation_zone()->New<TopTierRegisterAllocationData>(
524             config, register_allocation_zone(), frame(), sequence(), flags,
525             &info()->tick_counter(), debug_name());
526   }
527 
InitializeMidTierRegisterAllocationData(const RegisterConfiguration * config,CallDescriptor * call_descriptor)528   void InitializeMidTierRegisterAllocationData(
529       const RegisterConfiguration* config, CallDescriptor* call_descriptor) {
530     DCHECK_NULL(register_allocation_data_);
531     register_allocation_data_ =
532         register_allocation_zone()->New<MidTierRegisterAllocationData>(
533             config, register_allocation_zone(), frame(), sequence(),
534             &info()->tick_counter(), debug_name());
535   }
536 
InitializeOsrHelper()537   void InitializeOsrHelper() {
538     DCHECK(!osr_helper_.has_value());
539     osr_helper_.emplace(info());
540   }
541 
set_start_source_position(int position)542   void set_start_source_position(int position) {
543     DCHECK_EQ(start_source_position_, kNoSourcePosition);
544     start_source_position_ = position;
545   }
546 
InitializeCodeGenerator(Linkage * linkage)547   void InitializeCodeGenerator(Linkage* linkage) {
548     DCHECK_NULL(code_generator_);
549     code_generator_ = new CodeGenerator(
550         codegen_zone(), frame(), linkage, sequence(), info(), isolate(),
551         osr_helper_, start_source_position_, jump_optimization_info_,
552         assembler_options(), info_->builtin(), max_unoptimized_frame_height(),
553         max_pushed_argument_count(),
554         FLAG_trace_turbo_stack_accesses ? debug_name_.get() : nullptr);
555   }
556 
BeginPhaseKind(const char * phase_kind_name)557   void BeginPhaseKind(const char* phase_kind_name) {
558     if (pipeline_statistics() != nullptr) {
559       pipeline_statistics()->BeginPhaseKind(phase_kind_name);
560     }
561   }
562 
EndPhaseKind()563   void EndPhaseKind() {
564     if (pipeline_statistics() != nullptr) {
565       pipeline_statistics()->EndPhaseKind();
566     }
567   }
568 
debug_name() const569   const char* debug_name() const { return debug_name_.get(); }
570 
profile_data() const571   const ProfileDataFromFile* profile_data() const { return profile_data_; }
set_profile_data(const ProfileDataFromFile * profile_data)572   void set_profile_data(const ProfileDataFromFile* profile_data) {
573     profile_data_ = profile_data;
574   }
575 
576   // RuntimeCallStats that is only available during job execution but not
577   // finalization.
578   // TODO(delphick): Currently even during execution this can be nullptr, due to
579   // JSToWasmWrapperCompilationUnit::Execute. Once a table can be extracted
580   // there, this method can DCHECK that it is never nullptr.
runtime_call_stats() const581   RuntimeCallStats* runtime_call_stats() const { return runtime_call_stats_; }
set_runtime_call_stats(RuntimeCallStats * stats)582   void set_runtime_call_stats(RuntimeCallStats* stats) {
583     runtime_call_stats_ = stats;
584   }
585 
586   // Used to skip the "wasm-inlining" phase when there are no JS-to-Wasm calls.
has_js_wasm_calls() const587   bool has_js_wasm_calls() const { return has_js_wasm_calls_; }
set_has_js_wasm_calls(bool has_js_wasm_calls)588   void set_has_js_wasm_calls(bool has_js_wasm_calls) {
589     has_js_wasm_calls_ = has_js_wasm_calls;
590   }
591 
592  private:
593   Isolate* const isolate_;
594 #if V8_ENABLE_WEBASSEMBLY
595   wasm::WasmEngine* const wasm_engine_ = nullptr;
596 #endif  // V8_ENABLE_WEBASSEMBLY
597   AccountingAllocator* const allocator_;
598   OptimizedCompilationInfo* const info_;
599   std::unique_ptr<char[]> debug_name_;
600   bool may_have_unverifiable_graph_ = true;
601   ZoneStats* const zone_stats_;
602   PipelineStatistics* pipeline_statistics_ = nullptr;
603   bool compilation_failed_ = false;
604   bool verify_graph_ = false;
605   int start_source_position_ = kNoSourcePosition;
606   base::Optional<OsrHelper> osr_helper_;
607   MaybeHandle<Code> code_;
608   CodeGenerator* code_generator_ = nullptr;
609   Typer* typer_ = nullptr;
610   Typer::Flags typer_flags_ = Typer::kNoFlags;
611 
612   // All objects in the following group of fields are allocated in graph_zone_.
613   // They are all set to nullptr when the graph_zone_ is destroyed.
614   ZoneStats::Scope graph_zone_scope_;
615   Zone* graph_zone_ = nullptr;
616   Graph* graph_ = nullptr;
617   SourcePositionTable* source_positions_ = nullptr;
618   NodeOriginTable* node_origins_ = nullptr;
619   SimplifiedOperatorBuilder* simplified_ = nullptr;
620   MachineOperatorBuilder* machine_ = nullptr;
621   CommonOperatorBuilder* common_ = nullptr;
622   JSOperatorBuilder* javascript_ = nullptr;
623   JSGraph* jsgraph_ = nullptr;
624   MachineGraph* mcgraph_ = nullptr;
625   Schedule* schedule_ = nullptr;
626   ObserveNodeManager* observe_node_manager_ = nullptr;
627 
628   // All objects in the following group of fields are allocated in
629   // instruction_zone_. They are all set to nullptr when the instruction_zone_
630   // is destroyed.
631   ZoneStats::Scope instruction_zone_scope_;
632   Zone* instruction_zone_;
633   InstructionSequence* sequence_ = nullptr;
634 
635   // All objects in the following group of fields are allocated in
636   // codegen_zone_. They are all set to nullptr when the codegen_zone_
637   // is destroyed.
638   ZoneStats::Scope codegen_zone_scope_;
639   Zone* codegen_zone_;
640   CompilationDependencies* dependencies_ = nullptr;
641   JSHeapBroker* broker_ = nullptr;
642   Frame* frame_ = nullptr;
643 
644   // All objects in the following group of fields are allocated in
645   // register_allocation_zone_. They are all set to nullptr when the zone is
646   // destroyed.
647   ZoneStats::Scope register_allocation_zone_scope_;
648   Zone* register_allocation_zone_;
649   RegisterAllocationData* register_allocation_data_ = nullptr;
650 
651   // Source position output for --trace-turbo.
652   std::string source_position_output_;
653 
654   JumpOptimizationInfo* jump_optimization_info_ = nullptr;
655   AssemblerOptions assembler_options_;
656   Maybe<OuterContext> specialization_context_ = Nothing<OuterContext>();
657 
658   // The maximal combined height of all inlined frames in their unoptimized
659   // state, and the maximal number of arguments pushed during function calls.
660   // Calculated during instruction selection, applied during code generation.
661   size_t max_unoptimized_frame_height_ = 0;
662   size_t max_pushed_argument_count_ = 0;
663 
664   RuntimeCallStats* runtime_call_stats_ = nullptr;
665   const ProfileDataFromFile* profile_data_ = nullptr;
666 
667   bool has_js_wasm_calls_ = false;
668 };
669 
670 class PipelineImpl final {
671  public:
PipelineImpl(PipelineData * data)672   explicit PipelineImpl(PipelineData* data) : data_(data) {}
673 
674   // Helpers for executing pipeline phases.
675   template <typename Phase, typename... Args>
676   void Run(Args&&... args);
677 
678   // Step A.1. Initialize the heap broker.
679   void InitializeHeapBroker();
680 
681   // Step A.2. Run the graph creation and initial optimization passes.
682   bool CreateGraph();
683 
684   // Step B. Run the concurrent optimization passes.
685   bool OptimizeGraph(Linkage* linkage);
686 
687   // Alternative step B. Run minimal concurrent optimization passes for
688   // mid-tier.
689   bool OptimizeGraphForMidTier(Linkage* linkage);
690 
691   // Substep B.1. Produce a scheduled graph.
692   void ComputeScheduledGraph();
693 
694   // Substep B.2. Select instructions from a scheduled graph.
695   bool SelectInstructions(Linkage* linkage);
696 
697   // Step C. Run the code assembly pass.
698   void AssembleCode(Linkage* linkage);
699 
700   // Step D. Run the code finalization pass.
701   MaybeHandle<Code> FinalizeCode(bool retire_broker = true);
702 
703   // Step E. Install any code dependencies.
704   bool CommitDependencies(Handle<Code> code);
705 
706   void VerifyGeneratedCodeIsIdempotent();
707   void RunPrintAndVerify(const char* phase, bool untyped = false);
708   bool SelectInstructionsAndAssemble(CallDescriptor* call_descriptor);
709   MaybeHandle<Code> GenerateCode(CallDescriptor* call_descriptor);
710   void AllocateRegistersForTopTier(const RegisterConfiguration* config,
711                                    CallDescriptor* call_descriptor,
712                                    bool run_verifier);
713   void AllocateRegistersForMidTier(const RegisterConfiguration* config,
714                                    CallDescriptor* call_descriptor,
715                                    bool run_verifier);
716 
717   OptimizedCompilationInfo* info() const;
718   Isolate* isolate() const;
719   CodeGenerator* code_generator() const;
720 
721   ObserveNodeManager* observe_node_manager() const;
722 
723  private:
724   PipelineData* const data_;
725 };
726 
727 namespace {
728 
729 class SourcePositionWrapper final : public Reducer {
730  public:
SourcePositionWrapper(Reducer * reducer,SourcePositionTable * table)731   SourcePositionWrapper(Reducer* reducer, SourcePositionTable* table)
732       : reducer_(reducer), table_(table) {}
733   ~SourcePositionWrapper() final = default;
734   SourcePositionWrapper(const SourcePositionWrapper&) = delete;
735   SourcePositionWrapper& operator=(const SourcePositionWrapper&) = delete;
736 
reducer_name() const737   const char* reducer_name() const override { return reducer_->reducer_name(); }
738 
Reduce(Node * node)739   Reduction Reduce(Node* node) final {
740     SourcePosition const pos = table_->GetSourcePosition(node);
741     SourcePositionTable::Scope position(table_, pos);
742     return reducer_->Reduce(node, nullptr);
743   }
744 
Finalize()745   void Finalize() final { reducer_->Finalize(); }
746 
747  private:
748   Reducer* const reducer_;
749   SourcePositionTable* const table_;
750 };
751 
752 class NodeOriginsWrapper final : public Reducer {
753  public:
NodeOriginsWrapper(Reducer * reducer,NodeOriginTable * table)754   NodeOriginsWrapper(Reducer* reducer, NodeOriginTable* table)
755       : reducer_(reducer), table_(table) {}
756   ~NodeOriginsWrapper() final = default;
757   NodeOriginsWrapper(const NodeOriginsWrapper&) = delete;
758   NodeOriginsWrapper& operator=(const NodeOriginsWrapper&) = delete;
759 
reducer_name() const760   const char* reducer_name() const override { return reducer_->reducer_name(); }
761 
Reduce(Node * node)762   Reduction Reduce(Node* node) final {
763     NodeOriginTable::Scope position(table_, reducer_name(), node);
764     return reducer_->Reduce(node, nullptr);
765   }
766 
Finalize()767   void Finalize() final { reducer_->Finalize(); }
768 
769  private:
770   Reducer* const reducer_;
771   NodeOriginTable* const table_;
772 };
773 
774 class V8_NODISCARD PipelineRunScope {
775  public:
776 #ifdef V8_RUNTIME_CALL_STATS
PipelineRunScope(PipelineData * data,const char * phase_name,RuntimeCallCounterId runtime_call_counter_id,RuntimeCallStats::CounterMode counter_mode=RuntimeCallStats::kExact)777   PipelineRunScope(
778       PipelineData* data, const char* phase_name,
779       RuntimeCallCounterId runtime_call_counter_id,
780       RuntimeCallStats::CounterMode counter_mode = RuntimeCallStats::kExact)
781       : phase_scope_(data->pipeline_statistics(), phase_name),
782         zone_scope_(data->zone_stats(), phase_name),
783         origin_scope_(data->node_origins(), phase_name),
784         runtime_call_timer_scope(data->runtime_call_stats(),
785                                  runtime_call_counter_id, counter_mode) {
786     DCHECK_NOT_NULL(phase_name);
787   }
788 #else   // V8_RUNTIME_CALL_STATS
789   PipelineRunScope(PipelineData* data, const char* phase_name)
790       : phase_scope_(data->pipeline_statistics(), phase_name),
791         zone_scope_(data->zone_stats(), phase_name),
792         origin_scope_(data->node_origins(), phase_name) {
793     DCHECK_NOT_NULL(phase_name);
794   }
795 #endif  // V8_RUNTIME_CALL_STATS
796 
zone()797   Zone* zone() { return zone_scope_.zone(); }
798 
799  private:
800   PhaseScope phase_scope_;
801   ZoneStats::Scope zone_scope_;
802   NodeOriginTable::PhaseScope origin_scope_;
803 #ifdef V8_RUNTIME_CALL_STATS
804   RuntimeCallTimerScope runtime_call_timer_scope;
805 #endif  // V8_RUNTIME_CALL_STATS
806 };
807 
808 // LocalIsolateScope encapsulates the phase where persistent handles are
809 // attached to the LocalHeap inside {local_isolate}.
810 class V8_NODISCARD LocalIsolateScope {
811  public:
LocalIsolateScope(JSHeapBroker * broker,OptimizedCompilationInfo * info,LocalIsolate * local_isolate)812   explicit LocalIsolateScope(JSHeapBroker* broker,
813                              OptimizedCompilationInfo* info,
814                              LocalIsolate* local_isolate)
815       : broker_(broker), info_(info) {
816     broker_->AttachLocalIsolate(info_, local_isolate);
817     info_->tick_counter().AttachLocalHeap(local_isolate->heap());
818   }
819 
~LocalIsolateScope()820   ~LocalIsolateScope() {
821     info_->tick_counter().DetachLocalHeap();
822     broker_->DetachLocalIsolate(info_);
823   }
824 
825  private:
826   JSHeapBroker* broker_;
827   OptimizedCompilationInfo* info_;
828 };
829 
PrintFunctionSource(OptimizedCompilationInfo * info,Isolate * isolate,int source_id,Handle<SharedFunctionInfo> shared)830 void PrintFunctionSource(OptimizedCompilationInfo* info, Isolate* isolate,
831                          int source_id, Handle<SharedFunctionInfo> shared) {
832   if (!shared->script().IsUndefined(isolate)) {
833     Handle<Script> script(Script::cast(shared->script()), isolate);
834 
835     if (!script->source().IsUndefined(isolate)) {
836       CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
837       Object source_name = script->name();
838       auto& os = tracing_scope.stream();
839       os << "--- FUNCTION SOURCE (";
840       if (source_name.IsString()) {
841         os << String::cast(source_name).ToCString().get() << ":";
842       }
843       os << shared->DebugNameCStr().get() << ") id{";
844       os << info->optimization_id() << "," << source_id << "} start{";
845       os << shared->StartPosition() << "} ---\n";
846       {
847         DisallowGarbageCollection no_gc;
848         int start = shared->StartPosition();
849         int len = shared->EndPosition() - start;
850         SubStringRange source(String::cast(script->source()), no_gc, start,
851                               len);
852         for (auto c : source) {
853           os << AsReversiblyEscapedUC16(c);
854         }
855       }
856 
857       os << "\n--- END ---\n";
858     }
859   }
860 }
861 
862 // Print information for the given inlining: which function was inlined and
863 // where the inlining occurred.
PrintInlinedFunctionInfo(OptimizedCompilationInfo * info,Isolate * isolate,int source_id,int inlining_id,const OptimizedCompilationInfo::InlinedFunctionHolder & h)864 void PrintInlinedFunctionInfo(
865     OptimizedCompilationInfo* info, Isolate* isolate, int source_id,
866     int inlining_id, const OptimizedCompilationInfo::InlinedFunctionHolder& h) {
867   CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
868   auto& os = tracing_scope.stream();
869   os << "INLINE (" << h.shared_info->DebugNameCStr().get() << ") id{"
870      << info->optimization_id() << "," << source_id << "} AS " << inlining_id
871      << " AT ";
872   const SourcePosition position = h.position.position;
873   if (position.IsKnown()) {
874     os << "<" << position.InliningId() << ":" << position.ScriptOffset() << ">";
875   } else {
876     os << "<?>";
877   }
878   os << std::endl;
879 }
880 
881 // Print the source of all functions that participated in this optimizing
882 // compilation. For inlined functions print source position of their inlining.
PrintParticipatingSource(OptimizedCompilationInfo * info,Isolate * isolate)883 void PrintParticipatingSource(OptimizedCompilationInfo* info,
884                               Isolate* isolate) {
885   SourceIdAssigner id_assigner(info->inlined_functions().size());
886   PrintFunctionSource(info, isolate, -1, info->shared_info());
887   const auto& inlined = info->inlined_functions();
888   for (unsigned id = 0; id < inlined.size(); id++) {
889     const int source_id = id_assigner.GetIdFor(inlined[id].shared_info);
890     PrintFunctionSource(info, isolate, source_id, inlined[id].shared_info);
891     PrintInlinedFunctionInfo(info, isolate, source_id, id, inlined[id]);
892   }
893 }
894 
895 // Print the code after compiling it.
PrintCode(Isolate * isolate,Handle<Code> code,OptimizedCompilationInfo * info)896 void PrintCode(Isolate* isolate, Handle<Code> code,
897                OptimizedCompilationInfo* info) {
898   if (FLAG_print_opt_source && info->IsOptimizing()) {
899     PrintParticipatingSource(info, isolate);
900   }
901 
902 #ifdef ENABLE_DISASSEMBLER
903   const bool print_code =
904       FLAG_print_code ||
905       (info->IsOptimizing() && FLAG_print_opt_code &&
906        info->shared_info()->PassesFilter(FLAG_print_opt_code_filter));
907   if (print_code) {
908     std::unique_ptr<char[]> debug_name = info->GetDebugName();
909     CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
910     auto& os = tracing_scope.stream();
911 
912     // Print the source code if available.
913     const bool print_source = info->IsOptimizing();
914     if (print_source) {
915       Handle<SharedFunctionInfo> shared = info->shared_info();
916       if (shared->script().IsScript() &&
917           !Script::cast(shared->script()).source().IsUndefined(isolate)) {
918         os << "--- Raw source ---\n";
919         StringCharacterStream stream(
920             String::cast(Script::cast(shared->script()).source()),
921             shared->StartPosition());
922         // fun->end_position() points to the last character in the stream. We
923         // need to compensate by adding one to calculate the length.
924         int source_len = shared->EndPosition() - shared->StartPosition() + 1;
925         for (int i = 0; i < source_len; i++) {
926           if (stream.HasMore()) {
927             os << AsReversiblyEscapedUC16(stream.GetNext());
928           }
929         }
930         os << "\n\n";
931       }
932     }
933     if (info->IsOptimizing()) {
934       os << "--- Optimized code ---\n"
935          << "optimization_id = " << info->optimization_id() << "\n";
936     } else {
937       os << "--- Code ---\n";
938     }
939     if (print_source) {
940       Handle<SharedFunctionInfo> shared = info->shared_info();
941       os << "source_position = " << shared->StartPosition() << "\n";
942     }
943     code->Disassemble(debug_name.get(), os, isolate);
944     os << "--- End code ---\n";
945   }
946 #endif  // ENABLE_DISASSEMBLER
947 }
948 
TraceScheduleAndVerify(OptimizedCompilationInfo * info,PipelineData * data,Schedule * schedule,const char * phase_name)949 void TraceScheduleAndVerify(OptimizedCompilationInfo* info, PipelineData* data,
950                             Schedule* schedule, const char* phase_name) {
951   RCS_SCOPE(data->runtime_call_stats(),
952             RuntimeCallCounterId::kOptimizeTraceScheduleAndVerify,
953             RuntimeCallStats::kThreadSpecific);
954   TRACE_EVENT0(PipelineStatistics::kTraceCategory, "V8.TraceScheduleAndVerify");
955   if (info->trace_turbo_json()) {
956     UnparkedScopeIfNeeded scope(data->broker());
957     AllowHandleDereference allow_deref;
958     TurboJsonFile json_of(info, std::ios_base::app);
959     json_of << "{\"name\":\"" << phase_name << "\",\"type\":\"schedule\""
960             << ",\"data\":\"";
961     std::stringstream schedule_stream;
962     schedule_stream << *schedule;
963     std::string schedule_string(schedule_stream.str());
964     for (const auto& c : schedule_string) {
965       json_of << AsEscapedUC16ForJSON(c);
966     }
967     json_of << "\"},\n";
968   }
969   if (info->trace_turbo_graph() || FLAG_trace_turbo_scheduler) {
970     UnparkedScopeIfNeeded scope(data->broker());
971     AllowHandleDereference allow_deref;
972     CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
973     tracing_scope.stream()
974         << "-- Schedule --------------------------------------\n"
975         << *schedule;
976   }
977 
978   if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
979 }
980 
AddReducer(PipelineData * data,GraphReducer * graph_reducer,Reducer * reducer)981 void AddReducer(PipelineData* data, GraphReducer* graph_reducer,
982                 Reducer* reducer) {
983   if (data->info()->source_positions()) {
984     SourcePositionWrapper* const wrapper =
985         data->graph_zone()->New<SourcePositionWrapper>(
986             reducer, data->source_positions());
987     reducer = wrapper;
988   }
989   if (data->info()->trace_turbo_json()) {
990     NodeOriginsWrapper* const wrapper =
991         data->graph_zone()->New<NodeOriginsWrapper>(reducer,
992                                                     data->node_origins());
993     reducer = wrapper;
994   }
995 
996   graph_reducer->AddReducer(reducer);
997 }
998 
CreatePipelineStatistics(Handle<Script> script,OptimizedCompilationInfo * info,Isolate * isolate,ZoneStats * zone_stats)999 PipelineStatistics* CreatePipelineStatistics(Handle<Script> script,
1000                                              OptimizedCompilationInfo* info,
1001                                              Isolate* isolate,
1002                                              ZoneStats* zone_stats) {
1003   PipelineStatistics* pipeline_statistics = nullptr;
1004 
1005   bool tracing_enabled;
1006   TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("v8.turbofan"),
1007                                      &tracing_enabled);
1008   if (tracing_enabled || FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
1009     pipeline_statistics =
1010         new PipelineStatistics(info, isolate->GetTurboStatistics(), zone_stats);
1011     pipeline_statistics->BeginPhaseKind("V8.TFInitializing");
1012   }
1013 
1014   if (info->trace_turbo_json()) {
1015     TurboJsonFile json_of(info, std::ios_base::trunc);
1016     json_of << "{\"function\" : ";
1017     JsonPrintFunctionSource(json_of, -1, info->GetDebugName(), script, isolate,
1018                             info->shared_info());
1019     json_of << ",\n\"phases\":[";
1020   }
1021 
1022   return pipeline_statistics;
1023 }
1024 
1025 #if V8_ENABLE_WEBASSEMBLY
CreatePipelineStatistics(wasm::FunctionBody function_body,const wasm::WasmModule * wasm_module,OptimizedCompilationInfo * info,ZoneStats * zone_stats)1026 PipelineStatistics* CreatePipelineStatistics(
1027     wasm::FunctionBody function_body, const wasm::WasmModule* wasm_module,
1028     OptimizedCompilationInfo* info, ZoneStats* zone_stats) {
1029   PipelineStatistics* pipeline_statistics = nullptr;
1030 
1031   bool tracing_enabled;
1032   TRACE_EVENT_CATEGORY_GROUP_ENABLED(
1033       TRACE_DISABLED_BY_DEFAULT("v8.wasm.turbofan"), &tracing_enabled);
1034   if (tracing_enabled || FLAG_turbo_stats_wasm) {
1035     pipeline_statistics = new PipelineStatistics(
1036         info, wasm::GetWasmEngine()->GetOrCreateTurboStatistics(), zone_stats);
1037     pipeline_statistics->BeginPhaseKind("V8.WasmInitializing");
1038   }
1039 
1040   if (info->trace_turbo_json()) {
1041     TurboJsonFile json_of(info, std::ios_base::trunc);
1042     std::unique_ptr<char[]> function_name = info->GetDebugName();
1043     json_of << "{\"function\":\"" << function_name.get() << "\", \"source\":\"";
1044     AccountingAllocator allocator;
1045     std::ostringstream disassembly;
1046     std::vector<int> source_positions;
1047     wasm::PrintRawWasmCode(&allocator, function_body, wasm_module,
1048                            wasm::kPrintLocals, disassembly, &source_positions);
1049     for (const auto& c : disassembly.str()) {
1050       json_of << AsEscapedUC16ForJSON(c);
1051     }
1052     json_of << "\",\n\"sourceLineToBytecodePosition\" : [";
1053     bool insert_comma = false;
1054     for (auto val : source_positions) {
1055       if (insert_comma) {
1056         json_of << ", ";
1057       }
1058       json_of << val;
1059       insert_comma = true;
1060     }
1061     json_of << "],\n\"phases\":[";
1062   }
1063 
1064   return pipeline_statistics;
1065 }
1066 #endif  // V8_ENABLE_WEBASSEMBLY
1067 
1068 }  // namespace
1069 
1070 class PipelineCompilationJob final : public OptimizedCompilationJob {
1071  public:
1072   PipelineCompilationJob(Isolate* isolate,
1073                          Handle<SharedFunctionInfo> shared_info,
1074                          Handle<JSFunction> function, BytecodeOffset osr_offset,
1075                          JavaScriptFrame* osr_frame, CodeKind code_kind);
1076   ~PipelineCompilationJob() final;
1077   PipelineCompilationJob(const PipelineCompilationJob&) = delete;
1078   PipelineCompilationJob& operator=(const PipelineCompilationJob&) = delete;
1079 
1080  protected:
1081   Status PrepareJobImpl(Isolate* isolate) final;
1082   Status ExecuteJobImpl(RuntimeCallStats* stats,
1083                         LocalIsolate* local_isolate) final;
1084   Status FinalizeJobImpl(Isolate* isolate) final;
1085 
1086   // Registers weak object to optimized code dependencies.
1087   void RegisterWeakObjectsInOptimizedCode(Isolate* isolate,
1088                                           Handle<NativeContext> context,
1089                                           Handle<Code> code);
1090 
1091  private:
1092   Zone zone_;
1093   ZoneStats zone_stats_;
1094   OptimizedCompilationInfo compilation_info_;
1095   std::unique_ptr<PipelineStatistics> pipeline_statistics_;
1096   PipelineData data_;
1097   PipelineImpl pipeline_;
1098   Linkage* linkage_;
1099 };
1100 
PipelineCompilationJob(Isolate * isolate,Handle<SharedFunctionInfo> shared_info,Handle<JSFunction> function,BytecodeOffset osr_offset,JavaScriptFrame * osr_frame,CodeKind code_kind)1101 PipelineCompilationJob::PipelineCompilationJob(
1102     Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
1103     Handle<JSFunction> function, BytecodeOffset osr_offset,
1104     JavaScriptFrame* osr_frame, CodeKind code_kind)
1105     // Note that the OptimizedCompilationInfo is not initialized at the time
1106     // we pass it to the CompilationJob constructor, but it is not
1107     // dereferenced there.
1108     : OptimizedCompilationJob(&compilation_info_, "TurboFan"),
1109       zone_(function->GetIsolate()->allocator(),
1110             kPipelineCompilationJobZoneName),
1111       zone_stats_(function->GetIsolate()->allocator()),
1112       compilation_info_(&zone_, function->GetIsolate(), shared_info, function,
1113                         code_kind, osr_offset, osr_frame),
1114       pipeline_statistics_(CreatePipelineStatistics(
1115           handle(Script::cast(shared_info->script()), isolate),
1116           compilation_info(), function->GetIsolate(), &zone_stats_)),
1117       data_(&zone_stats_, function->GetIsolate(), compilation_info(),
1118             pipeline_statistics_.get()),
1119       pipeline_(&data_),
1120       linkage_(nullptr) {}
1121 
1122 PipelineCompilationJob::~PipelineCompilationJob() = default;
1123 
1124 namespace {
1125 // Ensure that the RuntimeStats table is set on the PipelineData for
1126 // duration of the job phase and unset immediately afterwards. Each job
1127 // needs to set the correct RuntimeCallStats table depending on whether it
1128 // is running on a background or foreground thread.
1129 class V8_NODISCARD PipelineJobScope {
1130  public:
PipelineJobScope(PipelineData * data,RuntimeCallStats * stats)1131   PipelineJobScope(PipelineData* data, RuntimeCallStats* stats) : data_(data) {
1132     data_->set_runtime_call_stats(stats);
1133   }
1134 
~PipelineJobScope()1135   ~PipelineJobScope() { data_->set_runtime_call_stats(nullptr); }
1136 
1137  private:
1138   PipelineData* data_;
1139 };
1140 }  // namespace
1141 
PrepareJobImpl(Isolate * isolate)1142 PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
1143     Isolate* isolate) {
1144   // Ensure that the RuntimeCallStats table of main thread is available for
1145   // phases happening during PrepareJob.
1146   PipelineJobScope scope(&data_, isolate->counters()->runtime_call_stats());
1147 
1148   if (compilation_info()->bytecode_array()->length() >
1149       FLAG_max_optimized_bytecode_size) {
1150     return AbortOptimization(BailoutReason::kFunctionTooBig);
1151   }
1152 
1153   if (!FLAG_always_opt) {
1154     compilation_info()->set_bailout_on_uninitialized();
1155   }
1156   if (FLAG_turbo_loop_peeling) {
1157     compilation_info()->set_loop_peeling();
1158   }
1159   if (FLAG_turbo_inlining) {
1160     compilation_info()->set_inlining();
1161   }
1162   if (FLAG_turbo_allocation_folding) {
1163     compilation_info()->set_allocation_folding();
1164   }
1165 
1166   // Determine whether to specialize the code for the function's context.
1167   // We can't do this in the case of OSR, because we want to cache the
1168   // generated code on the native context keyed on SharedFunctionInfo.
1169   // TODO(mythria): Check if it is better to key the OSR cache on JSFunction and
1170   // allow context specialization for OSR code.
1171   if (compilation_info()->closure()->raw_feedback_cell().map() ==
1172           ReadOnlyRoots(isolate).one_closure_cell_map() &&
1173       !compilation_info()->is_osr() &&
1174       !compilation_info()->IsTurboprop()) {
1175     compilation_info()->set_function_context_specializing();
1176     data_.ChooseSpecializationContext();
1177   }
1178 
1179   if (compilation_info()->source_positions()) {
1180     SharedFunctionInfo::EnsureSourcePositionsAvailable(
1181         isolate, compilation_info()->shared_info());
1182   }
1183 
1184   data_.set_start_source_position(
1185       compilation_info()->shared_info()->StartPosition());
1186 
1187   linkage_ = compilation_info()->zone()->New<Linkage>(
1188       Linkage::ComputeIncoming(compilation_info()->zone(), compilation_info()));
1189 
1190   if (compilation_info()->is_osr()) data_.InitializeOsrHelper();
1191 
1192   // InitializeHeapBroker() and CreateGraph() may already use
1193   // IsPendingAllocation.
1194   isolate->heap()->PublishPendingAllocations();
1195 
1196   pipeline_.InitializeHeapBroker();
1197 
1198   if (!data_.broker()->is_concurrent_inlining()) {
1199     if (!pipeline_.CreateGraph()) {
1200       CHECK(!isolate->has_pending_exception());
1201       return AbortOptimization(BailoutReason::kGraphBuildingFailed);
1202     }
1203   }
1204 
1205   if (compilation_info()->concurrent_inlining()) {
1206     // Serialization may have allocated.
1207     isolate->heap()->PublishPendingAllocations();
1208   }
1209 
1210   return SUCCEEDED;
1211 }
1212 
ExecuteJobImpl(RuntimeCallStats * stats,LocalIsolate * local_isolate)1213 PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl(
1214     RuntimeCallStats* stats, LocalIsolate* local_isolate) {
1215   // Ensure that the RuntimeCallStats table is only available during execution
1216   // and not during finalization as that might be on a different thread.
1217   PipelineJobScope scope(&data_, stats);
1218   LocalIsolateScope local_isolate_scope(data_.broker(), data_.info(),
1219                                         local_isolate);
1220 
1221   if (data_.broker()->is_concurrent_inlining()) {
1222     if (!pipeline_.CreateGraph()) {
1223       return AbortOptimization(BailoutReason::kGraphBuildingFailed);
1224     }
1225   }
1226 
1227   // We selectively Unpark inside OptimizeGraph*.
1228   bool success;
1229   if (compilation_info_.code_kind() == CodeKind::TURBOPROP) {
1230     success = pipeline_.OptimizeGraphForMidTier(linkage_);
1231   } else {
1232     success = pipeline_.OptimizeGraph(linkage_);
1233   }
1234   if (!success) return FAILED;
1235 
1236   pipeline_.AssembleCode(linkage_);
1237 
1238   return SUCCEEDED;
1239 }
1240 
FinalizeJobImpl(Isolate * isolate)1241 PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl(
1242     Isolate* isolate) {
1243   // Ensure that the RuntimeCallStats table of main thread is available for
1244   // phases happening during PrepareJob.
1245   PipelineJobScope scope(&data_, isolate->counters()->runtime_call_stats());
1246   RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeFinalizePipelineJob);
1247   MaybeHandle<Code> maybe_code = pipeline_.FinalizeCode();
1248   Handle<Code> code;
1249   if (!maybe_code.ToHandle(&code)) {
1250     if (compilation_info()->bailout_reason() == BailoutReason::kNoReason) {
1251       return AbortOptimization(BailoutReason::kCodeGenerationFailed);
1252     }
1253     return FAILED;
1254   }
1255   if (!pipeline_.CommitDependencies(code)) {
1256     return RetryOptimization(BailoutReason::kBailedOutDueToDependencyChange);
1257   }
1258 
1259   compilation_info()->SetCode(code);
1260   Handle<NativeContext> context(compilation_info()->native_context(), isolate);
1261   if (CodeKindCanDeoptimize(code->kind())) context->AddOptimizedCode(*code);
1262   RegisterWeakObjectsInOptimizedCode(isolate, context, code);
1263   return SUCCEEDED;
1264 }
1265 
RegisterWeakObjectsInOptimizedCode(Isolate * isolate,Handle<NativeContext> context,Handle<Code> code)1266 void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
1267     Isolate* isolate, Handle<NativeContext> context, Handle<Code> code) {
1268   std::vector<Handle<Map>> maps;
1269   DCHECK(code->is_optimized_code());
1270   {
1271     DisallowGarbageCollection no_gc;
1272     int const mode_mask = RelocInfo::EmbeddedObjectModeMask();
1273     for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
1274       DCHECK(RelocInfo::IsEmbeddedObjectMode(it.rinfo()->rmode()));
1275       if (code->IsWeakObjectInOptimizedCode(it.rinfo()->target_object())) {
1276         Handle<HeapObject> object(HeapObject::cast(it.rinfo()->target_object()),
1277                                   isolate);
1278         if (object->IsMap()) {
1279           maps.push_back(Handle<Map>::cast(object));
1280         }
1281       }
1282     }
1283   }
1284   for (Handle<Map> map : maps) {
1285     isolate->heap()->AddRetainedMap(context, map);
1286   }
1287   code->set_can_have_weak_objects(true);
1288 }
1289 
1290 template <typename Phase, typename... Args>
Run(Args &&...args)1291 void PipelineImpl::Run(Args&&... args) {
1292 #ifdef V8_RUNTIME_CALL_STATS
1293   PipelineRunScope scope(this->data_, Phase::phase_name(),
1294                          Phase::kRuntimeCallCounterId, Phase::kCounterMode);
1295 #else
1296   PipelineRunScope scope(this->data_, Phase::phase_name());
1297 #endif
1298   Phase phase;
1299   phase.Run(this->data_, scope.zone(), std::forward<Args>(args)...);
1300 }
1301 
1302 #ifdef V8_RUNTIME_CALL_STATS
1303 #define DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Name, Mode)        \
1304   static const char* phase_name() { return "V8.TF" #Name; }     \
1305   static constexpr RuntimeCallCounterId kRuntimeCallCounterId = \
1306       RuntimeCallCounterId::kOptimize##Name;                    \
1307   static constexpr RuntimeCallStats::CounterMode kCounterMode = Mode;
1308 #else  // V8_RUNTIME_CALL_STATS
1309 #define DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Name, Mode) \
1310   static const char* phase_name() { return "V8.TF" #Name; }
1311 #endif  // V8_RUNTIME_CALL_STATS
1312 
1313 #define DECL_PIPELINE_PHASE_CONSTANTS(Name) \
1314   DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Name, RuntimeCallStats::kThreadSpecific)
1315 
1316 #define DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(Name) \
1317   DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Name, RuntimeCallStats::kExact)
1318 
1319 struct GraphBuilderPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::GraphBuilderPhase1320   DECL_PIPELINE_PHASE_CONSTANTS(BytecodeGraphBuilder)
1321 
1322   void Run(PipelineData* data, Zone* temp_zone) {
1323     BytecodeGraphBuilderFlags flags;
1324     if (data->info()->analyze_environment_liveness()) {
1325       flags |= BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness;
1326     }
1327     if (data->info()->bailout_on_uninitialized()) {
1328       flags |= BytecodeGraphBuilderFlag::kBailoutOnUninitialized;
1329     }
1330 
1331     JSFunctionRef closure = MakeRef(data->broker(), data->info()->closure());
1332     CallFrequency frequency(1.0f);
1333     BuildGraphFromBytecode(
1334         data->broker(), temp_zone, closure.shared(),
1335         closure.raw_feedback_cell(data->dependencies()),
1336         data->info()->osr_offset(), data->jsgraph(), frequency,
1337         data->source_positions(), SourcePosition::kNotInlined,
1338         data->info()->code_kind(), flags, &data->info()->tick_counter(),
1339         ObserveNodeInfo{data->observe_node_manager(),
1340                         data->info()->node_observer()});
1341   }
1342 };
1343 
1344 struct InliningPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::InliningPhase1345   DECL_PIPELINE_PHASE_CONSTANTS(Inlining)
1346 
1347   void Run(PipelineData* data, Zone* temp_zone) {
1348     OptimizedCompilationInfo* info = data->info();
1349     GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(),
1350                                data->broker(), data->jsgraph()->Dead(),
1351                                data->observe_node_manager());
1352     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1353                                               data->common(), temp_zone);
1354     CheckpointElimination checkpoint_elimination(&graph_reducer);
1355     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1356                                          data->broker(), data->common(),
1357                                          data->machine(), temp_zone);
1358     JSCallReducer::Flags call_reducer_flags = JSCallReducer::kNoFlags;
1359     if (data->info()->bailout_on_uninitialized()) {
1360       call_reducer_flags |= JSCallReducer::kBailoutOnUninitialized;
1361     }
1362     if (data->info()->inline_js_wasm_calls() && data->info()->inlining()) {
1363       call_reducer_flags |= JSCallReducer::kInlineJSToWasmCalls;
1364     }
1365     JSCallReducer call_reducer(&graph_reducer, data->jsgraph(), data->broker(),
1366                                temp_zone, call_reducer_flags);
1367     JSContextSpecialization context_specialization(
1368         &graph_reducer, data->jsgraph(), data->broker(),
1369         data->specialization_context(),
1370         data->info()->function_context_specializing()
1371             ? data->info()->closure()
1372             : MaybeHandle<JSFunction>());
1373     JSNativeContextSpecialization::Flags flags =
1374         JSNativeContextSpecialization::kNoFlags;
1375     if (data->info()->bailout_on_uninitialized()) {
1376       flags |= JSNativeContextSpecialization::kBailoutOnUninitialized;
1377     }
1378     // Passing the OptimizedCompilationInfo's shared zone here as
1379     // JSNativeContextSpecialization allocates out-of-heap objects
1380     // that need to live until code generation.
1381     JSNativeContextSpecialization native_context_specialization(
1382         &graph_reducer, data->jsgraph(), data->broker(), flags,
1383         data->dependencies(), temp_zone, info->zone());
1384     JSInliningHeuristic inlining(
1385         &graph_reducer, temp_zone, data->info(), data->jsgraph(),
1386         data->broker(), data->source_positions(), JSInliningHeuristic::kJSOnly);
1387 
1388     JSIntrinsicLowering intrinsic_lowering(&graph_reducer, data->jsgraph(),
1389                                            data->broker());
1390     AddReducer(data, &graph_reducer, &dead_code_elimination);
1391     if (!data->info()->IsTurboprop()) {
1392       AddReducer(data, &graph_reducer, &checkpoint_elimination);
1393       AddReducer(data, &graph_reducer, &common_reducer);
1394     }
1395     AddReducer(data, &graph_reducer, &native_context_specialization);
1396     AddReducer(data, &graph_reducer, &context_specialization);
1397     AddReducer(data, &graph_reducer, &intrinsic_lowering);
1398     AddReducer(data, &graph_reducer, &call_reducer);
1399     if (data->info()->inlining()) {
1400       AddReducer(data, &graph_reducer, &inlining);
1401     }
1402     graph_reducer.ReduceGraph();
1403     info->set_inlined_bytecode_size(inlining.total_inlined_bytecode_size());
1404 
1405     // Skip the "wasm-inlining" phase if there are no Wasm functions calls.
1406     if (call_reducer.has_wasm_calls()) {
1407       data->set_has_js_wasm_calls(true);
1408     }
1409   }
1410 };
1411 
1412 #if V8_ENABLE_WEBASSEMBLY
1413 struct JSWasmInliningPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::JSWasmInliningPhase1414   DECL_PIPELINE_PHASE_CONSTANTS(JSWasmInlining)
1415   void Run(PipelineData* data, Zone* temp_zone) {
1416     DCHECK(data->has_js_wasm_calls());
1417 
1418     OptimizedCompilationInfo* info = data->info();
1419     GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(),
1420                                data->broker(), data->jsgraph()->Dead());
1421     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1422                                               data->common(), temp_zone);
1423     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1424                                          data->broker(), data->common(),
1425                                          data->machine(), temp_zone);
1426     JSInliningHeuristic inlining(&graph_reducer, temp_zone, data->info(),
1427                                  data->jsgraph(), data->broker(),
1428                                  data->source_positions(),
1429                                  JSInliningHeuristic::kWasmOnly);
1430     AddReducer(data, &graph_reducer, &dead_code_elimination);
1431     AddReducer(data, &graph_reducer, &common_reducer);
1432     AddReducer(data, &graph_reducer, &inlining);
1433     graph_reducer.ReduceGraph();
1434   }
1435 };
1436 #endif  // V8_ENABLE_WEBASSEMBLY
1437 
1438 struct EarlyGraphTrimmingPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::EarlyGraphTrimmingPhase1439   DECL_PIPELINE_PHASE_CONSTANTS(EarlyGraphTrimming)
1440 
1441   void Run(PipelineData* data, Zone* temp_zone) {
1442     GraphTrimmer trimmer(temp_zone, data->graph());
1443     NodeVector roots(temp_zone);
1444     data->jsgraph()->GetCachedNodes(&roots);
1445     UnparkedScopeIfNeeded scope(data->broker(), FLAG_trace_turbo_trimming);
1446     trimmer.TrimGraph(roots.begin(), roots.end());
1447   }
1448 };
1449 
1450 struct TyperPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::TyperPhase1451   DECL_PIPELINE_PHASE_CONSTANTS(Typer)
1452 
1453   void Run(PipelineData* data, Zone* temp_zone, Typer* typer) {
1454     NodeVector roots(temp_zone);
1455     data->jsgraph()->GetCachedNodes(&roots);
1456 
1457     // Make sure we always type True and False. Needed for escape analysis.
1458     roots.push_back(data->jsgraph()->TrueConstant());
1459     roots.push_back(data->jsgraph()->FalseConstant());
1460 
1461     LoopVariableOptimizer induction_vars(data->jsgraph()->graph(),
1462                                          data->common(), temp_zone);
1463     if (FLAG_turbo_loop_variable) induction_vars.Run();
1464 
1465     // The typer inspects heap objects, so we need to unpark the local heap.
1466     UnparkedScopeIfNeeded scope(data->broker());
1467     typer->Run(roots, &induction_vars);
1468   }
1469 };
1470 
1471 struct UntyperPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::UntyperPhase1472   DECL_PIPELINE_PHASE_CONSTANTS(Untyper)
1473 
1474   void Run(PipelineData* data, Zone* temp_zone) {
1475     class RemoveTypeReducer final : public Reducer {
1476      public:
1477       const char* reducer_name() const override { return "RemoveTypeReducer"; }
1478       Reduction Reduce(Node* node) final {
1479         if (NodeProperties::IsTyped(node)) {
1480           NodeProperties::RemoveType(node);
1481           return Changed(node);
1482         }
1483         return NoChange();
1484       }
1485     };
1486 
1487     NodeVector roots(temp_zone);
1488     data->jsgraph()->GetCachedNodes(&roots);
1489     for (Node* node : roots) {
1490       NodeProperties::RemoveType(node);
1491     }
1492 
1493     GraphReducer graph_reducer(
1494         temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1495         data->jsgraph()->Dead(), data->observe_node_manager());
1496     RemoveTypeReducer remove_type_reducer;
1497     AddReducer(data, &graph_reducer, &remove_type_reducer);
1498     graph_reducer.ReduceGraph();
1499   }
1500 };
1501 
1502 struct HeapBrokerInitializationPhase {
DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::HeapBrokerInitializationPhase1503   DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(HeapBrokerInitialization)
1504 
1505   void Run(PipelineData* data, Zone* temp_zone) {
1506     data->broker()->InitializeAndStartSerializing();
1507   }
1508 };
1509 
1510 struct CopyMetadataForConcurrentCompilePhase {
DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::CopyMetadataForConcurrentCompilePhase1511   DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(SerializeMetadata)
1512 
1513   void Run(PipelineData* data, Zone* temp_zone) {
1514     GraphReducer graph_reducer(
1515         temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1516         data->jsgraph()->Dead(), data->observe_node_manager());
1517     JSHeapCopyReducer heap_copy_reducer(data->broker());
1518     AddReducer(data, &graph_reducer, &heap_copy_reducer);
1519     graph_reducer.ReduceGraph();
1520 
1521     // Some nodes that are no longer in the graph might still be in the cache.
1522     NodeVector cached_nodes(temp_zone);
1523     data->jsgraph()->GetCachedNodes(&cached_nodes);
1524     for (Node* const node : cached_nodes) graph_reducer.ReduceNode(node);
1525   }
1526 };
1527 
1528 struct TypedLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::TypedLoweringPhase1529   DECL_PIPELINE_PHASE_CONSTANTS(TypedLowering)
1530 
1531   void Run(PipelineData* data, Zone* temp_zone) {
1532     GraphReducer graph_reducer(
1533         temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1534         data->jsgraph()->Dead(), data->observe_node_manager());
1535     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1536                                               data->common(), temp_zone);
1537     JSCreateLowering create_lowering(&graph_reducer, data->dependencies(),
1538                                      data->jsgraph(), data->broker(),
1539                                      temp_zone);
1540     JSTypedLowering typed_lowering(&graph_reducer, data->jsgraph(),
1541                                    data->broker(), temp_zone);
1542     ConstantFoldingReducer constant_folding_reducer(
1543         &graph_reducer, data->jsgraph(), data->broker());
1544     TypedOptimization typed_optimization(&graph_reducer, data->dependencies(),
1545                                          data->jsgraph(), data->broker());
1546     SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph(),
1547                                              data->broker());
1548     CheckpointElimination checkpoint_elimination(&graph_reducer);
1549     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1550                                          data->broker(), data->common(),
1551                                          data->machine(), temp_zone);
1552     AddReducer(data, &graph_reducer, &dead_code_elimination);
1553 
1554     AddReducer(data, &graph_reducer, &create_lowering);
1555     if (!data->info()->IsTurboprop()) {
1556       AddReducer(data, &graph_reducer, &constant_folding_reducer);
1557     }
1558     AddReducer(data, &graph_reducer, &typed_lowering);
1559     AddReducer(data, &graph_reducer, &typed_optimization);
1560     AddReducer(data, &graph_reducer, &simple_reducer);
1561     AddReducer(data, &graph_reducer, &checkpoint_elimination);
1562     AddReducer(data, &graph_reducer, &common_reducer);
1563 
1564     // ConstantFoldingReducer, JSCreateLowering, JSTypedLowering, and
1565     // TypedOptimization access the heap.
1566     UnparkedScopeIfNeeded scope(data->broker());
1567 
1568     graph_reducer.ReduceGraph();
1569   }
1570 };
1571 
1572 
1573 struct EscapeAnalysisPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::EscapeAnalysisPhase1574   DECL_PIPELINE_PHASE_CONSTANTS(EscapeAnalysis)
1575 
1576   void Run(PipelineData* data, Zone* temp_zone) {
1577     EscapeAnalysis escape_analysis(data->jsgraph(),
1578                                    &data->info()->tick_counter(), temp_zone);
1579     escape_analysis.ReduceGraph();
1580 
1581     GraphReducer reducer(temp_zone, data->graph(),
1582                          &data->info()->tick_counter(), data->broker(),
1583                          data->jsgraph()->Dead(), data->observe_node_manager());
1584     EscapeAnalysisReducer escape_reducer(&reducer, data->jsgraph(),
1585                                          escape_analysis.analysis_result(),
1586                                          temp_zone);
1587 
1588     AddReducer(data, &reducer, &escape_reducer);
1589 
1590     // EscapeAnalysisReducer accesses the heap.
1591     UnparkedScopeIfNeeded scope(data->broker());
1592 
1593     reducer.ReduceGraph();
1594     // TODO(turbofan): Turn this into a debug mode check once we have
1595     // confidence.
1596     escape_reducer.VerifyReplacement();
1597   }
1598 };
1599 
1600 struct TypeAssertionsPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::TypeAssertionsPhase1601   DECL_PIPELINE_PHASE_CONSTANTS(TypeAssertions)
1602 
1603   void Run(PipelineData* data, Zone* temp_zone) {
1604     GraphReducer graph_reducer(
1605         temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1606         data->jsgraph()->Dead(), data->observe_node_manager());
1607     AddTypeAssertionsReducer type_assertions(&graph_reducer, data->jsgraph(),
1608                                              temp_zone);
1609     AddReducer(data, &graph_reducer, &type_assertions);
1610     graph_reducer.ReduceGraph();
1611   }
1612 };
1613 
1614 struct SimplifiedLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::SimplifiedLoweringPhase1615   DECL_PIPELINE_PHASE_CONSTANTS(SimplifiedLowering)
1616 
1617   void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
1618     SimplifiedLowering lowering(data->jsgraph(), data->broker(), temp_zone,
1619                                 data->source_positions(), data->node_origins(),
1620                                 &data->info()->tick_counter(), linkage,
1621                                 data->observe_node_manager());
1622 
1623     // RepresentationChanger accesses the heap.
1624     UnparkedScopeIfNeeded scope(data->broker());
1625 
1626     lowering.LowerAllNodes();
1627   }
1628 };
1629 
1630 struct LoopPeelingPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::LoopPeelingPhase1631   DECL_PIPELINE_PHASE_CONSTANTS(LoopPeeling)
1632 
1633   void Run(PipelineData* data, Zone* temp_zone) {
1634     GraphTrimmer trimmer(temp_zone, data->graph());
1635     NodeVector roots(temp_zone);
1636     data->jsgraph()->GetCachedNodes(&roots);
1637     {
1638       UnparkedScopeIfNeeded scope(data->broker(), FLAG_trace_turbo_trimming);
1639       trimmer.TrimGraph(roots.begin(), roots.end());
1640     }
1641 
1642     LoopTree* loop_tree = LoopFinder::BuildLoopTree(
1643         data->jsgraph()->graph(), &data->info()->tick_counter(), temp_zone);
1644     // We call the typer inside of PeelInnerLoopsOfTree which inspects heap
1645     // objects, so we need to unpark the local heap.
1646     UnparkedScopeIfNeeded scope(data->broker());
1647     LoopPeeler(data->graph(), data->common(), loop_tree, temp_zone,
1648                data->source_positions(), data->node_origins())
1649         .PeelInnerLoopsOfTree();
1650   }
1651 };
1652 
1653 #if V8_ENABLE_WEBASSEMBLY
1654 struct WasmLoopUnrollingPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::WasmLoopUnrollingPhase1655   DECL_PIPELINE_PHASE_CONSTANTS(WasmLoopUnrolling)
1656 
1657   void Run(PipelineData* data, Zone* temp_zone,
1658            std::vector<compiler::WasmLoopInfo>* loop_infos) {
1659     for (WasmLoopInfo& loop_info : *loop_infos) {
1660       if (loop_info.is_innermost) {
1661         ZoneUnorderedSet<Node*>* loop =
1662             LoopFinder::FindSmallUnnestedLoopFromHeader(
1663                 loop_info.header, temp_zone,
1664                 // Only discover the loop until its size is the maximum unrolled
1665                 // size for its depth.
1666                 maximum_unrollable_size(loop_info.nesting_depth));
1667         UnrollLoop(loop_info.header, loop, loop_info.nesting_depth,
1668                    data->graph(), data->common(), temp_zone,
1669                    data->source_positions(), data->node_origins());
1670       }
1671     }
1672 
1673     for (WasmLoopInfo& loop_info : *loop_infos) {
1674       std::unordered_set<Node*> loop_exits;
1675       // We collect exits into a set first because we are not allowed to mutate
1676       // them while iterating uses().
1677       for (Node* use : loop_info.header->uses()) {
1678         if (use->opcode() == IrOpcode::kLoopExit) {
1679           loop_exits.insert(use);
1680         }
1681       }
1682       for (Node* use : loop_exits) {
1683         LoopPeeler::EliminateLoopExit(use);
1684       }
1685     }
1686   }
1687 };
1688 
1689 struct WasmInliningPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::WasmInliningPhase1690   DECL_PIPELINE_PHASE_CONSTANTS(WasmInlining)
1691 
1692   void Run(PipelineData* data, Zone* temp_zone, wasm::CompilationEnv* env,
1693            const wasm::WireBytesStorage* wire_bytes) {
1694     GraphReducer graph_reducer(
1695         temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1696         data->jsgraph()->Dead(), data->observe_node_manager());
1697     DeadCodeElimination dead(&graph_reducer, data->graph(),
1698                              data->mcgraph()->common(), temp_zone);
1699     // For now, hard-code inlining the function at index 0.
1700     InlineByIndex heuristics({0});
1701     WasmInliner inliner(&graph_reducer, env, data->source_positions(),
1702                         data->node_origins(), data->mcgraph(), wire_bytes,
1703                         &heuristics);
1704     AddReducer(data, &graph_reducer, &dead);
1705     AddReducer(data, &graph_reducer, &inliner);
1706 
1707     graph_reducer.ReduceGraph();
1708   }
1709 };
1710 #endif  // V8_ENABLE_WEBASSEMBLY
1711 
1712 struct LoopExitEliminationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::LoopExitEliminationPhase1713   DECL_PIPELINE_PHASE_CONSTANTS(LoopExitElimination)
1714 
1715   void Run(PipelineData* data, Zone* temp_zone) {
1716     LoopPeeler::EliminateLoopExits(data->graph(), temp_zone);
1717   }
1718 };
1719 
1720 struct GenericLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::GenericLoweringPhase1721   DECL_PIPELINE_PHASE_CONSTANTS(GenericLowering)
1722 
1723   void Run(PipelineData* data, Zone* temp_zone) {
1724     GraphReducer graph_reducer(
1725         temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1726         data->jsgraph()->Dead(), data->observe_node_manager());
1727     JSGenericLowering generic_lowering(data->jsgraph(), &graph_reducer,
1728                                        data->broker());
1729     AddReducer(data, &graph_reducer, &generic_lowering);
1730 
1731     // JSGEnericLowering accesses the heap due to ObjectRef's type checks.
1732     UnparkedScopeIfNeeded scope(data->broker());
1733 
1734     graph_reducer.ReduceGraph();
1735   }
1736 };
1737 
1738 struct EarlyOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::EarlyOptimizationPhase1739   DECL_PIPELINE_PHASE_CONSTANTS(EarlyOptimization)
1740 
1741   void Run(PipelineData* data, Zone* temp_zone) {
1742     GraphReducer graph_reducer(
1743         temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1744         data->jsgraph()->Dead(), data->observe_node_manager());
1745     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1746                                               data->common(), temp_zone);
1747     SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph(),
1748                                              data->broker());
1749     RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
1750     ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1751     MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
1752     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1753                                          data->broker(), data->common(),
1754                                          data->machine(), temp_zone);
1755     AddReducer(data, &graph_reducer, &dead_code_elimination);
1756     AddReducer(data, &graph_reducer, &simple_reducer);
1757     AddReducer(data, &graph_reducer, &redundancy_elimination);
1758     AddReducer(data, &graph_reducer, &machine_reducer);
1759     AddReducer(data, &graph_reducer, &common_reducer);
1760     AddReducer(data, &graph_reducer, &value_numbering);
1761     graph_reducer.ReduceGraph();
1762   }
1763 };
1764 
1765 struct ControlFlowOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::ControlFlowOptimizationPhase1766   DECL_PIPELINE_PHASE_CONSTANTS(ControlFlowOptimization)
1767 
1768   void Run(PipelineData* data, Zone* temp_zone) {
1769     ControlFlowOptimizer optimizer(data->graph(), data->common(),
1770                                    data->machine(),
1771                                    &data->info()->tick_counter(), temp_zone);
1772     optimizer.Optimize();
1773   }
1774 };
1775 
1776 struct EffectControlLinearizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::EffectControlLinearizationPhase1777   DECL_PIPELINE_PHASE_CONSTANTS(EffectLinearization)
1778 
1779   void Run(PipelineData* data, Zone* temp_zone) {
1780     {
1781       // Branch cloning in the effect control linearizer requires the graphs to
1782       // be trimmed, so trim now before scheduling.
1783       GraphTrimmer trimmer(temp_zone, data->graph());
1784       NodeVector roots(temp_zone);
1785       data->jsgraph()->GetCachedNodes(&roots);
1786       {
1787         UnparkedScopeIfNeeded scope(data->broker(), FLAG_trace_turbo_trimming);
1788         trimmer.TrimGraph(roots.begin(), roots.end());
1789       }
1790 
1791       // Schedule the graph without node splitting so that we can
1792       // fix the effect and control flow for nodes with low-level side
1793       // effects (such as changing representation to tagged or
1794       // 'floating' allocation regions.)
1795       Schedule* schedule = Scheduler::ComputeSchedule(
1796           temp_zone, data->graph(), Scheduler::kTempSchedule,
1797           &data->info()->tick_counter(), data->profile_data());
1798       TraceScheduleAndVerify(data->info(), data, schedule,
1799                              "effect linearization schedule");
1800 
1801       // Post-pass for wiring the control/effects
1802       // - connect allocating representation changes into the control&effect
1803       //   chains and lower them,
1804       // - get rid of the region markers,
1805       // - introduce effect phis and rewire effects to get SSA again.
1806       LinearizeEffectControl(data->jsgraph(), schedule, temp_zone,
1807                              data->source_positions(), data->node_origins(),
1808                              data->broker());
1809     }
1810     {
1811       // The {EffectControlLinearizer} might leave {Dead} nodes behind, so we
1812       // run {DeadCodeElimination} to prune these parts of the graph.
1813       // Also, the following store-store elimination phase greatly benefits from
1814       // doing a common operator reducer and dead code elimination just before
1815       // it, to eliminate conditional deopts with a constant condition.
1816       GraphReducer graph_reducer(temp_zone, data->graph(),
1817                                  &data->info()->tick_counter(), data->broker(),
1818                                  data->jsgraph()->Dead(),
1819                                  data->observe_node_manager());
1820       DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1821                                                 data->common(), temp_zone);
1822       CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1823                                            data->broker(), data->common(),
1824                                            data->machine(), temp_zone);
1825       AddReducer(data, &graph_reducer, &dead_code_elimination);
1826       AddReducer(data, &graph_reducer, &common_reducer);
1827       graph_reducer.ReduceGraph();
1828     }
1829   }
1830 };
1831 
1832 struct StoreStoreEliminationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::StoreStoreEliminationPhase1833   DECL_PIPELINE_PHASE_CONSTANTS(StoreStoreElimination)
1834 
1835   void Run(PipelineData* data, Zone* temp_zone) {
1836     GraphTrimmer trimmer(temp_zone, data->graph());
1837     NodeVector roots(temp_zone);
1838     data->jsgraph()->GetCachedNodes(&roots);
1839     {
1840       UnparkedScopeIfNeeded scope(data->broker(), FLAG_trace_turbo_trimming);
1841       trimmer.TrimGraph(roots.begin(), roots.end());
1842     }
1843 
1844     StoreStoreElimination::Run(data->jsgraph(), &data->info()->tick_counter(),
1845                                temp_zone);
1846   }
1847 };
1848 
1849 struct LoadEliminationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::LoadEliminationPhase1850   DECL_PIPELINE_PHASE_CONSTANTS(LoadElimination)
1851 
1852   void Run(PipelineData* data, Zone* temp_zone) {
1853     GraphReducer graph_reducer(
1854         temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1855         data->jsgraph()->Dead(), data->observe_node_manager());
1856     BranchElimination branch_condition_elimination(
1857         &graph_reducer, data->jsgraph(), temp_zone, data->source_positions(),
1858         BranchElimination::kEARLY);
1859     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1860                                               data->common(), temp_zone);
1861     RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
1862     LoadElimination load_elimination(&graph_reducer, data->jsgraph(),
1863                                      temp_zone);
1864     CheckpointElimination checkpoint_elimination(&graph_reducer);
1865     ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1866     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1867                                          data->broker(), data->common(),
1868                                          data->machine(), temp_zone);
1869     TypedOptimization typed_optimization(&graph_reducer, data->dependencies(),
1870                                          data->jsgraph(), data->broker());
1871     ConstantFoldingReducer constant_folding_reducer(
1872         &graph_reducer, data->jsgraph(), data->broker());
1873     TypeNarrowingReducer type_narrowing_reducer(&graph_reducer, data->jsgraph(),
1874                                                 data->broker());
1875 
1876     AddReducer(data, &graph_reducer, &branch_condition_elimination);
1877     AddReducer(data, &graph_reducer, &dead_code_elimination);
1878     AddReducer(data, &graph_reducer, &redundancy_elimination);
1879     AddReducer(data, &graph_reducer, &load_elimination);
1880     AddReducer(data, &graph_reducer, &type_narrowing_reducer);
1881     AddReducer(data, &graph_reducer, &constant_folding_reducer);
1882     AddReducer(data, &graph_reducer, &typed_optimization);
1883     AddReducer(data, &graph_reducer, &checkpoint_elimination);
1884     AddReducer(data, &graph_reducer, &common_reducer);
1885     AddReducer(data, &graph_reducer, &value_numbering);
1886 
1887     // ConstantFoldingReducer and TypedOptimization access the heap.
1888     UnparkedScopeIfNeeded scope(data->broker());
1889 
1890     graph_reducer.ReduceGraph();
1891   }
1892 };
1893 
1894 struct MemoryOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::MemoryOptimizationPhase1895   DECL_PIPELINE_PHASE_CONSTANTS(MemoryOptimization)
1896 
1897   void Run(PipelineData* data, Zone* temp_zone) {
1898     // The memory optimizer requires the graphs to be trimmed, so trim now.
1899     GraphTrimmer trimmer(temp_zone, data->graph());
1900     NodeVector roots(temp_zone);
1901     data->jsgraph()->GetCachedNodes(&roots);
1902     {
1903       UnparkedScopeIfNeeded scope(data->broker(), FLAG_trace_turbo_trimming);
1904       trimmer.TrimGraph(roots.begin(), roots.end());
1905     }
1906 
1907     // Optimize allocations and load/store operations.
1908     MemoryOptimizer optimizer(
1909         data->jsgraph(), temp_zone,
1910         data->info()->allocation_folding()
1911             ? MemoryLowering::AllocationFolding::kDoAllocationFolding
1912             : MemoryLowering::AllocationFolding::kDontAllocationFolding,
1913         data->debug_name(), &data->info()->tick_counter());
1914     optimizer.Optimize();
1915   }
1916 };
1917 
1918 struct LateOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::LateOptimizationPhase1919   DECL_PIPELINE_PHASE_CONSTANTS(LateOptimization)
1920 
1921   void Run(PipelineData* data, Zone* temp_zone) {
1922     GraphReducer graph_reducer(
1923         temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1924         data->jsgraph()->Dead(), data->observe_node_manager());
1925     BranchElimination branch_condition_elimination(
1926         &graph_reducer, data->jsgraph(), temp_zone, data->source_positions());
1927     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1928                                               data->common(), temp_zone);
1929     ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1930     MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
1931     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1932                                          data->broker(), data->common(),
1933                                          data->machine(), temp_zone);
1934     JSGraphAssembler graph_assembler(data->jsgraph(), temp_zone);
1935     SelectLowering select_lowering(&graph_assembler, data->graph());
1936     AddReducer(data, &graph_reducer, &branch_condition_elimination);
1937     AddReducer(data, &graph_reducer, &dead_code_elimination);
1938     AddReducer(data, &graph_reducer, &machine_reducer);
1939     AddReducer(data, &graph_reducer, &common_reducer);
1940     AddReducer(data, &graph_reducer, &select_lowering);
1941     AddReducer(data, &graph_reducer, &value_numbering);
1942     graph_reducer.ReduceGraph();
1943   }
1944 };
1945 
1946 struct MachineOperatorOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::MachineOperatorOptimizationPhase1947   DECL_PIPELINE_PHASE_CONSTANTS(MachineOperatorOptimization)
1948 
1949   void Run(PipelineData* data, Zone* temp_zone) {
1950     GraphReducer graph_reducer(
1951         temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1952         data->jsgraph()->Dead(), data->observe_node_manager());
1953     ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1954     MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
1955 
1956     AddReducer(data, &graph_reducer, &machine_reducer);
1957     AddReducer(data, &graph_reducer, &value_numbering);
1958     graph_reducer.ReduceGraph();
1959   }
1960 };
1961 
1962 struct WasmBaseOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::WasmBaseOptimizationPhase1963   DECL_PIPELINE_PHASE_CONSTANTS(WasmBaseOptimization)
1964 
1965   void Run(PipelineData* data, Zone* temp_zone) {
1966     GraphReducer graph_reducer(
1967         temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1968         data->mcgraph()->Dead(), data->observe_node_manager());
1969     ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1970     AddReducer(data, &graph_reducer, &value_numbering);
1971     graph_reducer.ReduceGraph();
1972   }
1973 };
1974 
1975 struct DecompressionOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::DecompressionOptimizationPhase1976   DECL_PIPELINE_PHASE_CONSTANTS(DecompressionOptimization)
1977 
1978   void Run(PipelineData* data, Zone* temp_zone) {
1979     if (COMPRESS_POINTERS_BOOL) {
1980       DecompressionOptimizer decompression_optimizer(
1981           temp_zone, data->graph(), data->common(), data->machine());
1982       decompression_optimizer.Reduce();
1983     }
1984   }
1985 };
1986 
1987 struct ScheduledEffectControlLinearizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::ScheduledEffectControlLinearizationPhase1988   DECL_PIPELINE_PHASE_CONSTANTS(ScheduledEffectControlLinearization)
1989 
1990   void Run(PipelineData* data, Zone* temp_zone) {
1991     // Post-pass for wiring the control/effects
1992     // - connect allocating representation changes into the control&effect
1993     //   chains and lower them,
1994     // - get rid of the region markers,
1995     // - introduce effect phis and rewire effects to get SSA again,
1996     // - lower simplified memory and select nodes to machine level nodes.
1997     LowerToMachineSchedule(data->jsgraph(), data->schedule(), temp_zone,
1998                            data->source_positions(), data->node_origins(),
1999                            data->broker());
2000 
2001     // TODO(rmcilroy) Avoid having to rebuild rpo_order on schedule each time.
2002     Scheduler::ComputeSpecialRPO(temp_zone, data->schedule());
2003     Scheduler::GenerateDominatorTree(data->schedule());
2004     TraceScheduleAndVerify(data->info(), data, data->schedule(),
2005                            "effect linearization schedule");
2006   }
2007 };
2008 
2009 #if V8_ENABLE_WEBASSEMBLY
2010 struct WasmOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::WasmOptimizationPhase2011   DECL_PIPELINE_PHASE_CONSTANTS(WasmOptimization)
2012 
2013   void Run(PipelineData* data, Zone* temp_zone, bool allow_signalling_nan) {
2014     // Run optimizations in two rounds: First one around load elimination and
2015     // then one around branch elimination. This is because those two
2016     // optimizations sometimes display quadratic complexity when run together.
2017     // We only need load elimination for managed objects.
2018     if (FLAG_experimental_wasm_gc) {
2019       GraphReducer graph_reducer(temp_zone, data->graph(),
2020                                  &data->info()->tick_counter(), data->broker(),
2021                                  data->jsgraph()->Dead(),
2022                                  data->observe_node_manager());
2023       MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
2024                                              allow_signalling_nan);
2025       DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
2026                                                 data->common(), temp_zone);
2027       CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
2028                                            data->broker(), data->common(),
2029                                            data->machine(), temp_zone);
2030       ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
2031       CsaLoadElimination load_elimination(&graph_reducer, data->jsgraph(),
2032                                           temp_zone);
2033       AddReducer(data, &graph_reducer, &machine_reducer);
2034       AddReducer(data, &graph_reducer, &dead_code_elimination);
2035       AddReducer(data, &graph_reducer, &common_reducer);
2036       AddReducer(data, &graph_reducer, &value_numbering);
2037       AddReducer(data, &graph_reducer, &load_elimination);
2038       graph_reducer.ReduceGraph();
2039     }
2040     {
2041       GraphReducer graph_reducer(temp_zone, data->graph(),
2042                                  &data->info()->tick_counter(), data->broker(),
2043                                  data->jsgraph()->Dead(),
2044                                  data->observe_node_manager());
2045       MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
2046                                              allow_signalling_nan);
2047       DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
2048                                                 data->common(), temp_zone);
2049       CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
2050                                            data->broker(), data->common(),
2051                                            data->machine(), temp_zone);
2052       ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
2053       BranchElimination branch_condition_elimination(
2054           &graph_reducer, data->jsgraph(), temp_zone, data->source_positions());
2055       AddReducer(data, &graph_reducer, &machine_reducer);
2056       AddReducer(data, &graph_reducer, &dead_code_elimination);
2057       AddReducer(data, &graph_reducer, &common_reducer);
2058       AddReducer(data, &graph_reducer, &value_numbering);
2059       AddReducer(data, &graph_reducer, &branch_condition_elimination);
2060       graph_reducer.ReduceGraph();
2061     }
2062   }
2063 };
2064 #endif  // V8_ENABLE_WEBASSEMBLY
2065 
2066 struct CsaEarlyOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::CsaEarlyOptimizationPhase2067   DECL_PIPELINE_PHASE_CONSTANTS(CSAEarlyOptimization)
2068 
2069   void Run(PipelineData* data, Zone* temp_zone) {
2070     // Run optimizations in two rounds: First one around load elimination and
2071     // then one around branch elimination. This is because those two
2072     // optimizations sometimes display quadratic complexity when run together.
2073     {
2074       GraphReducer graph_reducer(temp_zone, data->graph(),
2075                                  &data->info()->tick_counter(), data->broker(),
2076                                  data->jsgraph()->Dead(),
2077                                  data->observe_node_manager());
2078       MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
2079                                              true);
2080       DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
2081                                                 data->common(), temp_zone);
2082       CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
2083                                            data->broker(), data->common(),
2084                                            data->machine(), temp_zone);
2085       ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
2086       CsaLoadElimination load_elimination(&graph_reducer, data->jsgraph(),
2087                                           temp_zone);
2088       AddReducer(data, &graph_reducer, &machine_reducer);
2089       AddReducer(data, &graph_reducer, &dead_code_elimination);
2090       AddReducer(data, &graph_reducer, &common_reducer);
2091       AddReducer(data, &graph_reducer, &value_numbering);
2092       AddReducer(data, &graph_reducer, &load_elimination);
2093       graph_reducer.ReduceGraph();
2094     }
2095     {
2096       GraphReducer graph_reducer(temp_zone, data->graph(),
2097                                  &data->info()->tick_counter(), data->broker(),
2098                                  data->jsgraph()->Dead(),
2099                                  data->observe_node_manager());
2100       MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
2101                                              true);
2102       DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
2103                                                 data->common(), temp_zone);
2104       CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
2105                                            data->broker(), data->common(),
2106                                            data->machine(), temp_zone);
2107       ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
2108       BranchElimination branch_condition_elimination(
2109           &graph_reducer, data->jsgraph(), temp_zone, data->source_positions());
2110       AddReducer(data, &graph_reducer, &machine_reducer);
2111       AddReducer(data, &graph_reducer, &dead_code_elimination);
2112       AddReducer(data, &graph_reducer, &common_reducer);
2113       AddReducer(data, &graph_reducer, &value_numbering);
2114       AddReducer(data, &graph_reducer, &branch_condition_elimination);
2115       graph_reducer.ReduceGraph();
2116     }
2117   }
2118 };
2119 
2120 struct CsaOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::CsaOptimizationPhase2121   DECL_PIPELINE_PHASE_CONSTANTS(CSAOptimization)
2122 
2123   void Run(PipelineData* data, Zone* temp_zone, bool allow_signalling_nan) {
2124     GraphReducer graph_reducer(
2125         temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
2126         data->jsgraph()->Dead(), data->observe_node_manager());
2127     BranchElimination branch_condition_elimination(
2128         &graph_reducer, data->jsgraph(), temp_zone, data->source_positions());
2129     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
2130                                               data->common(), temp_zone);
2131     MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
2132                                            allow_signalling_nan);
2133     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
2134                                          data->broker(), data->common(),
2135                                          data->machine(), temp_zone);
2136     ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
2137     AddReducer(data, &graph_reducer, &branch_condition_elimination);
2138     AddReducer(data, &graph_reducer, &dead_code_elimination);
2139     AddReducer(data, &graph_reducer, &machine_reducer);
2140     AddReducer(data, &graph_reducer, &common_reducer);
2141     AddReducer(data, &graph_reducer, &value_numbering);
2142     graph_reducer.ReduceGraph();
2143   }
2144 };
2145 
2146 struct ComputeSchedulePhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::ComputeSchedulePhase2147   DECL_PIPELINE_PHASE_CONSTANTS(Scheduling)
2148 
2149   void Run(PipelineData* data, Zone* temp_zone) {
2150     Schedule* schedule = Scheduler::ComputeSchedule(
2151         temp_zone, data->graph(),
2152         data->info()->splitting() ? Scheduler::kSplitNodes
2153                                   : Scheduler::kNoFlags,
2154         &data->info()->tick_counter(), data->profile_data());
2155     data->set_schedule(schedule);
2156   }
2157 };
2158 
2159 struct InstructionRangesAsJSON {
2160   const InstructionSequence* sequence;
2161   const ZoneVector<std::pair<int, int>>* instr_origins;
2162 };
2163 
operator <<(std::ostream & out,const InstructionRangesAsJSON & s)2164 std::ostream& operator<<(std::ostream& out, const InstructionRangesAsJSON& s) {
2165   const int max = static_cast<int>(s.sequence->LastInstructionIndex());
2166 
2167   out << ", \"nodeIdToInstructionRange\": {";
2168   bool need_comma = false;
2169   for (size_t i = 0; i < s.instr_origins->size(); ++i) {
2170     std::pair<int, int> offset = (*s.instr_origins)[i];
2171     if (offset.first == -1) continue;
2172     const int first = max - offset.first + 1;
2173     const int second = max - offset.second + 1;
2174     if (need_comma) out << ", ";
2175     out << "\"" << i << "\": [" << first << ", " << second << "]";
2176     need_comma = true;
2177   }
2178   out << "}";
2179   out << ", \"blockIdtoInstructionRange\": {";
2180   need_comma = false;
2181   for (auto block : s.sequence->instruction_blocks()) {
2182     if (need_comma) out << ", ";
2183     out << "\"" << block->rpo_number() << "\": [" << block->code_start() << ", "
2184         << block->code_end() << "]";
2185     need_comma = true;
2186   }
2187   out << "}";
2188   return out;
2189 }
2190 
2191 struct InstructionSelectionPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::InstructionSelectionPhase2192   DECL_PIPELINE_PHASE_CONSTANTS(SelectInstructions)
2193 
2194   void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
2195     InstructionSelector selector(
2196         temp_zone, data->graph()->NodeCount(), linkage, data->sequence(),
2197         data->schedule(), data->source_positions(), data->frame(),
2198         data->info()->switch_jump_table()
2199             ? InstructionSelector::kEnableSwitchJumpTable
2200             : InstructionSelector::kDisableSwitchJumpTable,
2201         &data->info()->tick_counter(), data->broker(),
2202         data->address_of_max_unoptimized_frame_height(),
2203         data->address_of_max_pushed_argument_count(),
2204         data->info()->source_positions()
2205             ? InstructionSelector::kAllSourcePositions
2206             : InstructionSelector::kCallSourcePositions,
2207         InstructionSelector::SupportedFeatures(),
2208         FLAG_turbo_instruction_scheduling
2209             ? InstructionSelector::kEnableScheduling
2210             : InstructionSelector::kDisableScheduling,
2211         data->assembler_options().enable_root_relative_access
2212             ? InstructionSelector::kEnableRootsRelativeAddressing
2213             : InstructionSelector::kDisableRootsRelativeAddressing,
2214         data->info()->trace_turbo_json()
2215             ? InstructionSelector::kEnableTraceTurboJson
2216             : InstructionSelector::kDisableTraceTurboJson);
2217     if (!selector.SelectInstructions()) {
2218       data->set_compilation_failed();
2219     }
2220     if (data->info()->trace_turbo_json()) {
2221       TurboJsonFile json_of(data->info(), std::ios_base::app);
2222       json_of << "{\"name\":\"" << phase_name()
2223               << "\",\"type\":\"instructions\""
2224               << InstructionRangesAsJSON{data->sequence(),
2225                                          &selector.instr_origins()}
2226               << "},\n";
2227     }
2228   }
2229 };
2230 
2231 
2232 struct MeetRegisterConstraintsPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::MeetRegisterConstraintsPhase2233   DECL_PIPELINE_PHASE_CONSTANTS(MeetRegisterConstraints)
2234   void Run(PipelineData* data, Zone* temp_zone) {
2235     ConstraintBuilder builder(data->top_tier_register_allocation_data());
2236     builder.MeetRegisterConstraints();
2237   }
2238 };
2239 
2240 
2241 struct ResolvePhisPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::ResolvePhisPhase2242   DECL_PIPELINE_PHASE_CONSTANTS(ResolvePhis)
2243 
2244   void Run(PipelineData* data, Zone* temp_zone) {
2245     ConstraintBuilder builder(data->top_tier_register_allocation_data());
2246     builder.ResolvePhis();
2247   }
2248 };
2249 
2250 
2251 struct BuildLiveRangesPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::BuildLiveRangesPhase2252   DECL_PIPELINE_PHASE_CONSTANTS(BuildLiveRanges)
2253 
2254   void Run(PipelineData* data, Zone* temp_zone) {
2255     LiveRangeBuilder builder(data->top_tier_register_allocation_data(),
2256                              temp_zone);
2257     builder.BuildLiveRanges();
2258   }
2259 };
2260 
2261 struct BuildBundlesPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::BuildBundlesPhase2262   DECL_PIPELINE_PHASE_CONSTANTS(BuildLiveRangeBundles)
2263 
2264   void Run(PipelineData* data, Zone* temp_zone) {
2265     BundleBuilder builder(data->top_tier_register_allocation_data());
2266     builder.BuildBundles();
2267   }
2268 };
2269 
2270 template <typename RegAllocator>
2271 struct AllocateGeneralRegistersPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::AllocateGeneralRegistersPhase2272   DECL_PIPELINE_PHASE_CONSTANTS(AllocateGeneralRegisters)
2273 
2274   void Run(PipelineData* data, Zone* temp_zone) {
2275     RegAllocator allocator(data->top_tier_register_allocation_data(),
2276                            RegisterKind::kGeneral, temp_zone);
2277     allocator.AllocateRegisters();
2278   }
2279 };
2280 
2281 template <typename RegAllocator>
2282 struct AllocateFPRegistersPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::AllocateFPRegistersPhase2283   DECL_PIPELINE_PHASE_CONSTANTS(AllocateFPRegisters)
2284 
2285   void Run(PipelineData* data, Zone* temp_zone) {
2286     RegAllocator allocator(data->top_tier_register_allocation_data(),
2287                            RegisterKind::kDouble, temp_zone);
2288     allocator.AllocateRegisters();
2289   }
2290 };
2291 
2292 struct DecideSpillingModePhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::DecideSpillingModePhase2293   DECL_PIPELINE_PHASE_CONSTANTS(DecideSpillingMode)
2294 
2295   void Run(PipelineData* data, Zone* temp_zone) {
2296     OperandAssigner assigner(data->top_tier_register_allocation_data());
2297     assigner.DecideSpillingMode();
2298   }
2299 };
2300 
2301 struct AssignSpillSlotsPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::AssignSpillSlotsPhase2302   DECL_PIPELINE_PHASE_CONSTANTS(AssignSpillSlots)
2303 
2304   void Run(PipelineData* data, Zone* temp_zone) {
2305     OperandAssigner assigner(data->top_tier_register_allocation_data());
2306     assigner.AssignSpillSlots();
2307   }
2308 };
2309 
2310 
2311 struct CommitAssignmentPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::CommitAssignmentPhase2312   DECL_PIPELINE_PHASE_CONSTANTS(CommitAssignment)
2313 
2314   void Run(PipelineData* data, Zone* temp_zone) {
2315     OperandAssigner assigner(data->top_tier_register_allocation_data());
2316     assigner.CommitAssignment();
2317   }
2318 };
2319 
2320 
2321 struct PopulateReferenceMapsPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::PopulateReferenceMapsPhase2322   DECL_PIPELINE_PHASE_CONSTANTS(PopulatePointerMaps)
2323 
2324   void Run(PipelineData* data, Zone* temp_zone) {
2325     ReferenceMapPopulator populator(data->top_tier_register_allocation_data());
2326     populator.PopulateReferenceMaps();
2327   }
2328 };
2329 
2330 
2331 struct ConnectRangesPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::ConnectRangesPhase2332   DECL_PIPELINE_PHASE_CONSTANTS(ConnectRanges)
2333 
2334   void Run(PipelineData* data, Zone* temp_zone) {
2335     LiveRangeConnector connector(data->top_tier_register_allocation_data());
2336     connector.ConnectRanges(temp_zone);
2337   }
2338 };
2339 
2340 
2341 struct ResolveControlFlowPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::ResolveControlFlowPhase2342   DECL_PIPELINE_PHASE_CONSTANTS(ResolveControlFlow)
2343 
2344   void Run(PipelineData* data, Zone* temp_zone) {
2345     LiveRangeConnector connector(data->top_tier_register_allocation_data());
2346     connector.ResolveControlFlow(temp_zone);
2347   }
2348 };
2349 
2350 struct MidTierRegisterOutputDefinitionPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::MidTierRegisterOutputDefinitionPhase2351   DECL_PIPELINE_PHASE_CONSTANTS(MidTierRegisterOutputDefinition)
2352 
2353   void Run(PipelineData* data, Zone* temp_zone) {
2354     DefineOutputs(data->mid_tier_register_allocator_data());
2355   }
2356 };
2357 
2358 struct MidTierRegisterAllocatorPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::MidTierRegisterAllocatorPhase2359   DECL_PIPELINE_PHASE_CONSTANTS(MidTierRegisterAllocator)
2360 
2361   void Run(PipelineData* data, Zone* temp_zone) {
2362     AllocateRegisters(data->mid_tier_register_allocator_data());
2363   }
2364 };
2365 
2366 struct MidTierSpillSlotAllocatorPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::MidTierSpillSlotAllocatorPhase2367   DECL_PIPELINE_PHASE_CONSTANTS(MidTierSpillSlotAllocator)
2368 
2369   void Run(PipelineData* data, Zone* temp_zone) {
2370     AllocateSpillSlots(data->mid_tier_register_allocator_data());
2371   }
2372 };
2373 
2374 struct MidTierPopulateReferenceMapsPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::MidTierPopulateReferenceMapsPhase2375   DECL_PIPELINE_PHASE_CONSTANTS(MidTierPopulateReferenceMaps)
2376 
2377   void Run(PipelineData* data, Zone* temp_zone) {
2378     PopulateReferenceMaps(data->mid_tier_register_allocator_data());
2379   }
2380 };
2381 
2382 struct OptimizeMovesPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::OptimizeMovesPhase2383   DECL_PIPELINE_PHASE_CONSTANTS(OptimizeMoves)
2384 
2385   void Run(PipelineData* data, Zone* temp_zone) {
2386     MoveOptimizer move_optimizer(temp_zone, data->sequence());
2387     move_optimizer.Run();
2388   }
2389 };
2390 
2391 struct FrameElisionPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::FrameElisionPhase2392   DECL_PIPELINE_PHASE_CONSTANTS(FrameElision)
2393 
2394   void Run(PipelineData* data, Zone* temp_zone) {
2395     FrameElider(data->sequence()).Run();
2396   }
2397 };
2398 
2399 struct JumpThreadingPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::JumpThreadingPhase2400   DECL_PIPELINE_PHASE_CONSTANTS(JumpThreading)
2401 
2402   void Run(PipelineData* data, Zone* temp_zone, bool frame_at_start) {
2403     ZoneVector<RpoNumber> result(temp_zone);
2404     if (JumpThreading::ComputeForwarding(temp_zone, &result, data->sequence(),
2405                                          frame_at_start)) {
2406       JumpThreading::ApplyForwarding(temp_zone, result, data->sequence());
2407     }
2408   }
2409 };
2410 
2411 struct AssembleCodePhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::AssembleCodePhase2412   DECL_PIPELINE_PHASE_CONSTANTS(AssembleCode)
2413 
2414   void Run(PipelineData* data, Zone* temp_zone) {
2415     data->code_generator()->AssembleCode();
2416   }
2417 };
2418 
2419 struct FinalizeCodePhase {
DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::FinalizeCodePhase2420   DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(FinalizeCode)
2421 
2422   void Run(PipelineData* data, Zone* temp_zone) {
2423     data->set_code(data->code_generator()->FinalizeCode());
2424   }
2425 };
2426 
2427 
2428 struct PrintGraphPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::PrintGraphPhase2429   DECL_PIPELINE_PHASE_CONSTANTS(PrintGraph)
2430 
2431   void Run(PipelineData* data, Zone* temp_zone, const char* phase) {
2432     OptimizedCompilationInfo* info = data->info();
2433     Graph* graph = data->graph();
2434 
2435     if (info->trace_turbo_json()) {  // Print JSON.
2436       UnparkedScopeIfNeeded scope(data->broker());
2437       AllowHandleDereference allow_deref;
2438 
2439       TurboJsonFile json_of(info, std::ios_base::app);
2440       json_of << "{\"name\":\"" << phase << "\",\"type\":\"graph\",\"data\":"
2441               << AsJSON(*graph, data->source_positions(), data->node_origins())
2442               << "},\n";
2443     }
2444 
2445     if (info->trace_turbo_scheduled()) {
2446       AccountingAllocator allocator;
2447       Schedule* schedule = data->schedule();
2448       if (schedule == nullptr) {
2449         schedule = Scheduler::ComputeSchedule(
2450             temp_zone, data->graph(), Scheduler::kNoFlags,
2451             &info->tick_counter(), data->profile_data());
2452       }
2453 
2454       UnparkedScopeIfNeeded scope(data->broker());
2455       AllowHandleDereference allow_deref;
2456       CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
2457       tracing_scope.stream()
2458           << "-- Graph after " << phase << " -- " << std::endl
2459           << AsScheduledGraph(schedule);
2460     } else if (info->trace_turbo_graph()) {  // Simple textual RPO.
2461       UnparkedScopeIfNeeded scope(data->broker());
2462       AllowHandleDereference allow_deref;
2463       CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
2464       tracing_scope.stream()
2465           << "-- Graph after " << phase << " -- " << std::endl
2466           << AsRPO(*graph);
2467     }
2468   }
2469 };
2470 
2471 
2472 struct VerifyGraphPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::VerifyGraphPhase2473   DECL_PIPELINE_PHASE_CONSTANTS(VerifyGraph)
2474 
2475   void Run(PipelineData* data, Zone* temp_zone, const bool untyped,
2476            bool values_only = false) {
2477     Verifier::CodeType code_type;
2478     switch (data->info()->code_kind()) {
2479       case CodeKind::WASM_FUNCTION:
2480       case CodeKind::WASM_TO_CAPI_FUNCTION:
2481       case CodeKind::WASM_TO_JS_FUNCTION:
2482       case CodeKind::JS_TO_WASM_FUNCTION:
2483       case CodeKind::C_WASM_ENTRY:
2484         code_type = Verifier::kWasm;
2485         break;
2486       default:
2487         code_type = Verifier::kDefault;
2488     }
2489     Verifier::Run(data->graph(), !untyped ? Verifier::TYPED : Verifier::UNTYPED,
2490                   values_only ? Verifier::kValuesOnly : Verifier::kAll,
2491                   code_type);
2492   }
2493 };
2494 
2495 #undef DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS
2496 #undef DECL_PIPELINE_PHASE_CONSTANTS
2497 #undef DECL_PIPELINE_PHASE_CONSTANTS_HELPER
2498 
2499 #if V8_ENABLE_WEBASSEMBLY
2500 class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
2501  public:
WasmHeapStubCompilationJob(Isolate * isolate,CallDescriptor * call_descriptor,std::unique_ptr<Zone> zone,Graph * graph,CodeKind kind,std::unique_ptr<char[]> debug_name,const AssemblerOptions & options,SourcePositionTable * source_positions)2502   WasmHeapStubCompilationJob(Isolate* isolate, CallDescriptor* call_descriptor,
2503                              std::unique_ptr<Zone> zone, Graph* graph,
2504                              CodeKind kind, std::unique_ptr<char[]> debug_name,
2505                              const AssemblerOptions& options,
2506                              SourcePositionTable* source_positions)
2507       // Note that the OptimizedCompilationInfo is not initialized at the time
2508       // we pass it to the CompilationJob constructor, but it is not
2509       // dereferenced there.
2510       : OptimizedCompilationJob(&info_, "TurboFan",
2511                                 CompilationJob::State::kReadyToExecute),
2512         debug_name_(std::move(debug_name)),
2513         info_(base::CStrVector(debug_name_.get()), graph->zone(), kind),
2514         call_descriptor_(call_descriptor),
2515         zone_stats_(zone->allocator()),
2516         zone_(std::move(zone)),
2517         graph_(graph),
2518         data_(&zone_stats_, &info_, isolate, wasm::GetWasmEngine()->allocator(),
2519               graph_, nullptr, nullptr, source_positions,
2520               zone_->New<NodeOriginTable>(graph_), nullptr, options, nullptr),
2521         pipeline_(&data_) {}
2522 
2523   WasmHeapStubCompilationJob(const WasmHeapStubCompilationJob&) = delete;
2524   WasmHeapStubCompilationJob& operator=(const WasmHeapStubCompilationJob&) =
2525       delete;
2526 
2527  protected:
2528   Status PrepareJobImpl(Isolate* isolate) final;
2529   Status ExecuteJobImpl(RuntimeCallStats* stats,
2530                         LocalIsolate* local_isolate) final;
2531   Status FinalizeJobImpl(Isolate* isolate) final;
2532 
2533  private:
2534   std::unique_ptr<char[]> debug_name_;
2535   OptimizedCompilationInfo info_;
2536   CallDescriptor* call_descriptor_;
2537   ZoneStats zone_stats_;
2538   std::unique_ptr<Zone> zone_;
2539   Graph* graph_;
2540   PipelineData data_;
2541   PipelineImpl pipeline_;
2542 };
2543 
2544 // static
2545 std::unique_ptr<OptimizedCompilationJob>
NewWasmHeapStubCompilationJob(Isolate * isolate,CallDescriptor * call_descriptor,std::unique_ptr<Zone> zone,Graph * graph,CodeKind kind,std::unique_ptr<char[]> debug_name,const AssemblerOptions & options,SourcePositionTable * source_positions)2546 Pipeline::NewWasmHeapStubCompilationJob(Isolate* isolate,
2547                                         CallDescriptor* call_descriptor,
2548                                         std::unique_ptr<Zone> zone,
2549                                         Graph* graph, CodeKind kind,
2550                                         std::unique_ptr<char[]> debug_name,
2551                                         const AssemblerOptions& options,
2552                                         SourcePositionTable* source_positions) {
2553   return std::make_unique<WasmHeapStubCompilationJob>(
2554       isolate, call_descriptor, std::move(zone), graph, kind,
2555       std::move(debug_name), options, source_positions);
2556 }
2557 
PrepareJobImpl(Isolate * isolate)2558 CompilationJob::Status WasmHeapStubCompilationJob::PrepareJobImpl(
2559     Isolate* isolate) {
2560   UNREACHABLE();
2561 }
2562 
ExecuteJobImpl(RuntimeCallStats * stats,LocalIsolate * local_isolate)2563 CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl(
2564     RuntimeCallStats* stats, LocalIsolate* local_isolate) {
2565   std::unique_ptr<PipelineStatistics> pipeline_statistics;
2566   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
2567     pipeline_statistics.reset(new PipelineStatistics(
2568         &info_, wasm::GetWasmEngine()->GetOrCreateTurboStatistics(),
2569         &zone_stats_));
2570     pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
2571   }
2572   if (info_.trace_turbo_json() || info_.trace_turbo_graph()) {
2573     CodeTracer::StreamScope tracing_scope(data_.GetCodeTracer());
2574     tracing_scope.stream()
2575         << "---------------------------------------------------\n"
2576         << "Begin compiling method " << info_.GetDebugName().get()
2577         << " using TurboFan" << std::endl;
2578   }
2579   if (info_.trace_turbo_graph()) {  // Simple textual RPO.
2580     StdoutStream{} << "-- wasm stub " << CodeKindToString(info_.code_kind())
2581                    << " graph -- " << std::endl
2582                    << AsRPO(*data_.graph());
2583   }
2584 
2585   if (info_.trace_turbo_json()) {
2586     TurboJsonFile json_of(&info_, std::ios_base::trunc);
2587     json_of << "{\"function\":\"" << info_.GetDebugName().get()
2588             << "\", \"source\":\"\",\n\"phases\":[";
2589   }
2590   pipeline_.RunPrintAndVerify("V8.WasmMachineCode", true);
2591   pipeline_.Run<MemoryOptimizationPhase>();
2592   pipeline_.ComputeScheduledGraph();
2593   if (pipeline_.SelectInstructionsAndAssemble(call_descriptor_)) {
2594     return CompilationJob::SUCCEEDED;
2595   }
2596   return CompilationJob::FAILED;
2597 }
2598 
FinalizeJobImpl(Isolate * isolate)2599 CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl(
2600     Isolate* isolate) {
2601   Handle<Code> code;
2602   if (!pipeline_.FinalizeCode(call_descriptor_).ToHandle(&code)) {
2603     V8::FatalProcessOutOfMemory(isolate,
2604                                 "WasmHeapStubCompilationJob::FinalizeJobImpl");
2605   }
2606   if (pipeline_.CommitDependencies(code)) {
2607     info_.SetCode(code);
2608 #ifdef ENABLE_DISASSEMBLER
2609     if (FLAG_print_opt_code) {
2610       CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
2611       code->Disassemble(compilation_info()->GetDebugName().get(),
2612                         tracing_scope.stream(), isolate);
2613     }
2614 #endif
2615     PROFILE(isolate, CodeCreateEvent(CodeEventListener::STUB_TAG,
2616                                      Handle<AbstractCode>::cast(code),
2617                                      compilation_info()->GetDebugName().get()));
2618     return SUCCEEDED;
2619   }
2620   return FAILED;
2621 }
2622 #endif  // V8_ENABLE_WEBASSEMBLY
2623 
RunPrintAndVerify(const char * phase,bool untyped)2624 void PipelineImpl::RunPrintAndVerify(const char* phase, bool untyped) {
2625   if (info()->trace_turbo_json() || info()->trace_turbo_graph()) {
2626     Run<PrintGraphPhase>(phase);
2627   }
2628   if (FLAG_turbo_verify) {
2629     Run<VerifyGraphPhase>(untyped);
2630   }
2631 }
2632 
InitializeHeapBroker()2633 void PipelineImpl::InitializeHeapBroker() {
2634   PipelineData* data = data_;
2635 
2636   data->BeginPhaseKind("V8.TFBrokerInitAndSerialization");
2637 
2638   if (info()->trace_turbo_json() || info()->trace_turbo_graph()) {
2639     CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
2640     tracing_scope.stream()
2641         << "---------------------------------------------------\n"
2642         << "Begin compiling method " << info()->GetDebugName().get()
2643         << " using TurboFan" << std::endl;
2644   }
2645   if (info()->trace_turbo_json()) {
2646     TurboCfgFile tcf(isolate());
2647     tcf << AsC1VCompilation(info());
2648   }
2649 
2650   data->source_positions()->AddDecorator();
2651   if (data->info()->trace_turbo_json()) {
2652     data->node_origins()->AddDecorator();
2653   }
2654 
2655   data->broker()->SetTargetNativeContextRef(data->native_context());
2656   if (data->broker()->is_concurrent_inlining()) {
2657     Run<HeapBrokerInitializationPhase>();
2658     data->broker()->StopSerializing();
2659   }
2660   data->EndPhaseKind();
2661 }
2662 
CreateGraph()2663 bool PipelineImpl::CreateGraph() {
2664   PipelineData* data = this->data_;
2665   UnparkedScopeIfNeeded unparked_scope(data->broker());
2666 
2667   data->BeginPhaseKind("V8.TFGraphCreation");
2668 
2669   Run<GraphBuilderPhase>();
2670   RunPrintAndVerify(GraphBuilderPhase::phase_name(), true);
2671 
2672   // Perform function context specialization and inlining (if enabled).
2673   Run<InliningPhase>();
2674   RunPrintAndVerify(InliningPhase::phase_name(), true);
2675 
2676   // Determine the Typer operation flags.
2677   {
2678     SharedFunctionInfoRef shared_info =
2679         MakeRef(data->broker(), info()->shared_info());
2680     if (is_sloppy(shared_info.language_mode()) &&
2681         shared_info.IsUserJavaScript()) {
2682       // Sloppy mode functions always have an Object for this.
2683       data->AddTyperFlag(Typer::kThisIsReceiver);
2684     }
2685     if (IsClassConstructor(shared_info.kind())) {
2686       // Class constructors cannot be [[Call]]ed.
2687       data->AddTyperFlag(Typer::kNewTargetIsReceiver);
2688     }
2689   }
2690 
2691   // Run the type-sensitive lowerings and optimizations on the graph.
2692   {
2693     if (!data->broker()->is_concurrent_inlining()) {
2694       Run<HeapBrokerInitializationPhase>();
2695       Run<CopyMetadataForConcurrentCompilePhase>();
2696       data->broker()->StopSerializing();
2697     }
2698   }
2699 
2700   data->EndPhaseKind();
2701 
2702   return true;
2703 }
2704 
OptimizeGraph(Linkage * linkage)2705 bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
2706   PipelineData* data = this->data_;
2707 
2708   data->BeginPhaseKind("V8.TFLowering");
2709 
2710   // Trim the graph before typing to ensure all nodes are typed.
2711   Run<EarlyGraphTrimmingPhase>();
2712   RunPrintAndVerify(EarlyGraphTrimmingPhase::phase_name(), true);
2713 
2714   // Type the graph and keep the Typer running such that new nodes get
2715   // automatically typed when they are created.
2716   Run<TyperPhase>(data->CreateTyper());
2717   RunPrintAndVerify(TyperPhase::phase_name());
2718 
2719   Run<TypedLoweringPhase>();
2720   RunPrintAndVerify(TypedLoweringPhase::phase_name());
2721 
2722   if (data->info()->loop_peeling()) {
2723     Run<LoopPeelingPhase>();
2724     RunPrintAndVerify(LoopPeelingPhase::phase_name(), true);
2725   } else {
2726     Run<LoopExitEliminationPhase>();
2727     RunPrintAndVerify(LoopExitEliminationPhase::phase_name(), true);
2728   }
2729 
2730   if (FLAG_turbo_load_elimination) {
2731     Run<LoadEliminationPhase>();
2732     RunPrintAndVerify(LoadEliminationPhase::phase_name());
2733   }
2734   data->DeleteTyper();
2735 
2736   if (FLAG_turbo_escape) {
2737     Run<EscapeAnalysisPhase>();
2738     if (data->compilation_failed()) {
2739       info()->AbortOptimization(
2740           BailoutReason::kCyclicObjectStateDetectedInEscapeAnalysis);
2741       data->EndPhaseKind();
2742       return false;
2743     }
2744     RunPrintAndVerify(EscapeAnalysisPhase::phase_name());
2745   }
2746 
2747   if (FLAG_assert_types) {
2748     Run<TypeAssertionsPhase>();
2749     RunPrintAndVerify(TypeAssertionsPhase::phase_name());
2750   }
2751 
2752   // Perform simplified lowering. This has to run w/o the Typer decorator,
2753   // because we cannot compute meaningful types anyways, and the computed types
2754   // might even conflict with the representation/truncation logic.
2755   Run<SimplifiedLoweringPhase>(linkage);
2756   RunPrintAndVerify(SimplifiedLoweringPhase::phase_name(), true);
2757 
2758 #if V8_ENABLE_WEBASSEMBLY
2759   if (data->has_js_wasm_calls()) {
2760     DCHECK(data->info()->inline_js_wasm_calls());
2761     Run<JSWasmInliningPhase>();
2762     RunPrintAndVerify(JSWasmInliningPhase::phase_name(), true);
2763   }
2764 #endif  // V8_ENABLE_WEBASSEMBLY
2765 
2766   // From now on it is invalid to look at types on the nodes, because the types
2767   // on the nodes might not make sense after representation selection due to the
2768   // way we handle truncations; if we'd want to look at types afterwards we'd
2769   // essentially need to re-type (large portions of) the graph.
2770 
2771   // In order to catch bugs related to type access after this point, we now
2772   // remove the types from the nodes (currently only in Debug builds).
2773 #ifdef DEBUG
2774   Run<UntyperPhase>();
2775   RunPrintAndVerify(UntyperPhase::phase_name(), true);
2776 #endif
2777 
2778   // Run generic lowering pass.
2779   Run<GenericLoweringPhase>();
2780   RunPrintAndVerify(GenericLoweringPhase::phase_name(), true);
2781 
2782   data->BeginPhaseKind("V8.TFBlockBuilding");
2783 
2784   data->InitializeFrameData(linkage->GetIncomingDescriptor());
2785 
2786   // Run early optimization pass.
2787   Run<EarlyOptimizationPhase>();
2788   RunPrintAndVerify(EarlyOptimizationPhase::phase_name(), true);
2789 
2790   Run<EffectControlLinearizationPhase>();
2791   RunPrintAndVerify(EffectControlLinearizationPhase::phase_name(), true);
2792 
2793   if (FLAG_turbo_store_elimination) {
2794     Run<StoreStoreEliminationPhase>();
2795     RunPrintAndVerify(StoreStoreEliminationPhase::phase_name(), true);
2796   }
2797 
2798   // Optimize control flow.
2799   if (FLAG_turbo_cf_optimization) {
2800     Run<ControlFlowOptimizationPhase>();
2801     RunPrintAndVerify(ControlFlowOptimizationPhase::phase_name(), true);
2802   }
2803 
2804   Run<LateOptimizationPhase>();
2805   RunPrintAndVerify(LateOptimizationPhase::phase_name(), true);
2806 
2807   // Optimize memory access and allocation operations.
2808   Run<MemoryOptimizationPhase>();
2809   RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
2810 
2811   // Run value numbering and machine operator reducer to optimize load/store
2812   // address computation (in particular, reuse the address computation whenever
2813   // possible).
2814   Run<MachineOperatorOptimizationPhase>();
2815   RunPrintAndVerify(MachineOperatorOptimizationPhase::phase_name(), true);
2816 
2817   Run<DecompressionOptimizationPhase>();
2818   RunPrintAndVerify(DecompressionOptimizationPhase::phase_name(), true);
2819 
2820   data->source_positions()->RemoveDecorator();
2821   if (data->info()->trace_turbo_json()) {
2822     data->node_origins()->RemoveDecorator();
2823   }
2824 
2825   ComputeScheduledGraph();
2826 
2827   return SelectInstructions(linkage);
2828 }
2829 
OptimizeGraphForMidTier(Linkage * linkage)2830 bool PipelineImpl::OptimizeGraphForMidTier(Linkage* linkage) {
2831   PipelineData* data = this->data_;
2832 
2833   data->BeginPhaseKind("V8.TFLowering");
2834 
2835   // Type the graph and keep the Typer running such that new nodes get
2836   // automatically typed when they are created.
2837   Run<TyperPhase>(data->CreateTyper());
2838   RunPrintAndVerify(TyperPhase::phase_name());
2839 
2840   Run<TypedLoweringPhase>();
2841   RunPrintAndVerify(TypedLoweringPhase::phase_name());
2842 
2843   // TODO(9684): Consider rolling this into the preceeding phase or not creating
2844   // LoopExit nodes at all.
2845   Run<LoopExitEliminationPhase>();
2846   RunPrintAndVerify(LoopExitEliminationPhase::phase_name(), true);
2847 
2848   data->DeleteTyper();
2849 
2850   if (FLAG_assert_types) {
2851     Run<TypeAssertionsPhase>();
2852     RunPrintAndVerify(TypeAssertionsPhase::phase_name());
2853   }
2854 
2855   // Perform simplified lowering. This has to run w/o the Typer decorator,
2856   // because we cannot compute meaningful types anyways, and the computed types
2857   // might even conflict with the representation/truncation logic.
2858   Run<SimplifiedLoweringPhase>(linkage);
2859   RunPrintAndVerify(SimplifiedLoweringPhase::phase_name(), true);
2860 
2861 #if V8_ENABLE_WEBASSEMBLY
2862   if (data->has_js_wasm_calls()) {
2863     DCHECK(data->info()->inline_js_wasm_calls());
2864     Run<JSWasmInliningPhase>();
2865     RunPrintAndVerify(JSWasmInliningPhase::phase_name(), true);
2866   }
2867 #endif  // V8_ENABLE_WEBASSEMBLY
2868 
2869   // From now on it is invalid to look at types on the nodes, because the types
2870   // on the nodes might not make sense after representation selection due to the
2871   // way we handle truncations; if we'd want to look at types afterwards we'd
2872   // essentially need to re-type (large portions of) the graph.
2873 
2874   // In order to catch bugs related to type access after this point, we now
2875   // remove the types from the nodes (currently only in Debug builds).
2876 #ifdef DEBUG
2877   Run<UntyperPhase>();
2878   RunPrintAndVerify(UntyperPhase::phase_name(), true);
2879 #endif
2880 
2881   // Run generic lowering pass.
2882   Run<GenericLoweringPhase>();
2883   RunPrintAndVerify(GenericLoweringPhase::phase_name(), true);
2884 
2885   data->BeginPhaseKind("V8.TFBlockBuilding");
2886 
2887   data->InitializeFrameData(linkage->GetIncomingDescriptor());
2888 
2889   Run<EffectControlLinearizationPhase>();
2890   RunPrintAndVerify(EffectControlLinearizationPhase::phase_name(), true);
2891 
2892   Run<LateOptimizationPhase>();
2893   RunPrintAndVerify(LateOptimizationPhase::phase_name(), true);
2894 
2895   // Optimize memory access and allocation operations.
2896   Run<MemoryOptimizationPhase>();
2897   RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
2898 
2899   data->source_positions()->RemoveDecorator();
2900   if (data->info()->trace_turbo_json()) {
2901     data->node_origins()->RemoveDecorator();
2902   }
2903 
2904   ComputeScheduledGraph();
2905 
2906   return SelectInstructions(linkage);
2907 }
2908 
2909 namespace {
2910 
2911 // Compute a hash of the given graph, in a way that should provide the same
2912 // result in multiple runs of mksnapshot, meaning the hash cannot depend on any
2913 // external pointer values or uncompressed heap constants. This hash can be used
2914 // to reject profiling data if the builtin's current code doesn't match the
2915 // version that was profiled. Hash collisions are not catastrophic; in the worst
2916 // case, we just defer some blocks that ideally shouldn't be deferred. The
2917 // result value is in the valid Smi range.
HashGraphForPGO(Graph * graph)2918 int HashGraphForPGO(Graph* graph) {
2919   AccountingAllocator allocator;
2920   Zone local_zone(&allocator, ZONE_NAME);
2921 
2922   constexpr NodeId kUnassigned = static_cast<NodeId>(-1);
2923 
2924   constexpr byte kUnvisited = 0;
2925   constexpr byte kOnStack = 1;
2926   constexpr byte kVisited = 2;
2927 
2928   // Do a depth-first post-order traversal of the graph. For every node, hash:
2929   //
2930   //   - the node's traversal number
2931   //   - the opcode
2932   //   - the number of inputs
2933   //   - each input node's traversal number
2934   //
2935   // What's a traversal number? We can't use node IDs because they're not stable
2936   // build-to-build, so we assign a new number for each node as it is visited.
2937 
2938   ZoneVector<byte> state(graph->NodeCount(), kUnvisited, &local_zone);
2939   ZoneVector<NodeId> traversal_numbers(graph->NodeCount(), kUnassigned,
2940                                        &local_zone);
2941   ZoneStack<Node*> stack(&local_zone);
2942 
2943   NodeId visited_count = 0;
2944   size_t hash = 0;
2945 
2946   stack.push(graph->end());
2947   state[graph->end()->id()] = kOnStack;
2948   traversal_numbers[graph->end()->id()] = visited_count++;
2949   while (!stack.empty()) {
2950     Node* n = stack.top();
2951     bool pop = true;
2952     for (Node* const i : n->inputs()) {
2953       if (state[i->id()] == kUnvisited) {
2954         state[i->id()] = kOnStack;
2955         traversal_numbers[i->id()] = visited_count++;
2956         stack.push(i);
2957         pop = false;
2958         break;
2959       }
2960     }
2961     if (pop) {
2962       state[n->id()] = kVisited;
2963       stack.pop();
2964       hash = base::hash_combine(hash, traversal_numbers[n->id()], n->opcode(),
2965                                 n->InputCount());
2966       for (Node* const i : n->inputs()) {
2967         DCHECK(traversal_numbers[i->id()] != kUnassigned);
2968         hash = base::hash_combine(hash, traversal_numbers[i->id()]);
2969       }
2970     }
2971   }
2972   return Smi(IntToSmi(static_cast<int>(hash))).value();
2973 }
2974 
2975 }  // namespace
2976 
GenerateCodeForCodeStub(Isolate * isolate,CallDescriptor * call_descriptor,Graph * graph,JSGraph * jsgraph,SourcePositionTable * source_positions,CodeKind kind,const char * debug_name,Builtin builtin,const AssemblerOptions & options,const ProfileDataFromFile * profile_data)2977 MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
2978     Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
2979     JSGraph* jsgraph, SourcePositionTable* source_positions, CodeKind kind,
2980     const char* debug_name, Builtin builtin, const AssemblerOptions& options,
2981     const ProfileDataFromFile* profile_data) {
2982   OptimizedCompilationInfo info(base::CStrVector(debug_name), graph->zone(),
2983                                 kind);
2984   info.set_builtin(builtin);
2985 
2986   // Construct a pipeline for scheduling and code generation.
2987   ZoneStats zone_stats(isolate->allocator());
2988   NodeOriginTable node_origins(graph);
2989   JumpOptimizationInfo jump_opt;
2990   bool should_optimize_jumps = isolate->serializer_enabled() &&
2991                                FLAG_turbo_rewrite_far_jumps &&
2992                                !FLAG_turbo_profiling;
2993   PipelineData data(&zone_stats, &info, isolate, isolate->allocator(), graph,
2994                     jsgraph, nullptr, source_positions, &node_origins,
2995                     should_optimize_jumps ? &jump_opt : nullptr, options,
2996                     profile_data);
2997   PipelineJobScope scope(&data, isolate->counters()->runtime_call_stats());
2998   RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeCode);
2999   data.set_verify_graph(FLAG_verify_csa);
3000   std::unique_ptr<PipelineStatistics> pipeline_statistics;
3001   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
3002     pipeline_statistics.reset(new PipelineStatistics(
3003         &info, isolate->GetTurboStatistics(), &zone_stats));
3004     pipeline_statistics->BeginPhaseKind("V8.TFStubCodegen");
3005   }
3006 
3007   PipelineImpl pipeline(&data);
3008 
3009   if (info.trace_turbo_json() || info.trace_turbo_graph()) {
3010     CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
3011     tracing_scope.stream()
3012         << "---------------------------------------------------\n"
3013         << "Begin compiling " << debug_name << " using TurboFan" << std::endl;
3014     if (info.trace_turbo_json()) {
3015       TurboJsonFile json_of(&info, std::ios_base::trunc);
3016       json_of << "{\"function\" : ";
3017       JsonPrintFunctionSource(json_of, -1, info.GetDebugName(),
3018                               Handle<Script>(), isolate,
3019                               Handle<SharedFunctionInfo>());
3020       json_of << ",\n\"phases\":[";
3021     }
3022     pipeline.Run<PrintGraphPhase>("V8.TFMachineCode");
3023   }
3024 
3025   pipeline.Run<CsaEarlyOptimizationPhase>();
3026   pipeline.RunPrintAndVerify(CsaEarlyOptimizationPhase::phase_name(), true);
3027 
3028   // Optimize memory access and allocation operations.
3029   pipeline.Run<MemoryOptimizationPhase>();
3030   pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
3031 
3032   pipeline.Run<CsaOptimizationPhase>(true);
3033   pipeline.RunPrintAndVerify(CsaOptimizationPhase::phase_name(), true);
3034 
3035   pipeline.Run<DecompressionOptimizationPhase>();
3036   pipeline.RunPrintAndVerify(DecompressionOptimizationPhase::phase_name(),
3037                              true);
3038 
3039   pipeline.Run<VerifyGraphPhase>(true);
3040 
3041   int graph_hash_before_scheduling = 0;
3042   if (FLAG_turbo_profiling || profile_data != nullptr) {
3043     graph_hash_before_scheduling = HashGraphForPGO(data.graph());
3044   }
3045 
3046   if (profile_data != nullptr &&
3047       profile_data->hash() != graph_hash_before_scheduling) {
3048     PrintF("Rejected profile data for %s due to function change\n", debug_name);
3049     profile_data = nullptr;
3050     data.set_profile_data(profile_data);
3051   }
3052 
3053   pipeline.ComputeScheduledGraph();
3054   DCHECK_NOT_NULL(data.schedule());
3055 
3056   // First run code generation on a copy of the pipeline, in order to be able to
3057   // repeat it for jump optimization. The first run has to happen on a temporary
3058   // pipeline to avoid deletion of zones on the main pipeline.
3059   PipelineData second_data(&zone_stats, &info, isolate, isolate->allocator(),
3060                            data.graph(), data.jsgraph(), data.schedule(),
3061                            data.source_positions(), data.node_origins(),
3062                            data.jump_optimization_info(), options,
3063                            profile_data);
3064   PipelineJobScope second_scope(&second_data,
3065                                 isolate->counters()->runtime_call_stats());
3066   second_data.set_verify_graph(FLAG_verify_csa);
3067   PipelineImpl second_pipeline(&second_data);
3068   second_pipeline.SelectInstructionsAndAssemble(call_descriptor);
3069 
3070   if (FLAG_turbo_profiling) {
3071     info.profiler_data()->SetHash(graph_hash_before_scheduling);
3072   }
3073 
3074   if (jump_opt.is_optimizable()) {
3075     jump_opt.set_optimizing();
3076     return pipeline.GenerateCode(call_descriptor);
3077   } else {
3078     return second_pipeline.FinalizeCode();
3079   }
3080 }
3081 
3082 struct BlockStartsAsJSON {
3083   const ZoneVector<int>* block_starts;
3084 };
3085 
operator <<(std::ostream & out,const BlockStartsAsJSON & s)3086 std::ostream& operator<<(std::ostream& out, const BlockStartsAsJSON& s) {
3087   out << ", \"blockIdToOffset\": {";
3088   bool need_comma = false;
3089   for (size_t i = 0; i < s.block_starts->size(); ++i) {
3090     if (need_comma) out << ", ";
3091     int offset = (*s.block_starts)[i];
3092     out << "\"" << i << "\":" << offset;
3093     need_comma = true;
3094   }
3095   out << "},";
3096   return out;
3097 }
3098 
3099 #if V8_ENABLE_WEBASSEMBLY
3100 // static
GenerateCodeForWasmNativeStub(CallDescriptor * call_descriptor,MachineGraph * mcgraph,CodeKind kind,const char * debug_name,const AssemblerOptions & options,SourcePositionTable * source_positions)3101 wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
3102     CallDescriptor* call_descriptor, MachineGraph* mcgraph, CodeKind kind,
3103     const char* debug_name, const AssemblerOptions& options,
3104     SourcePositionTable* source_positions) {
3105   Graph* graph = mcgraph->graph();
3106   OptimizedCompilationInfo info(base::CStrVector(debug_name), graph->zone(),
3107                                 kind);
3108   // Construct a pipeline for scheduling and code generation.
3109   wasm::WasmEngine* wasm_engine = wasm::GetWasmEngine();
3110   ZoneStats zone_stats(wasm_engine->allocator());
3111   NodeOriginTable* node_positions = graph->zone()->New<NodeOriginTable>(graph);
3112   PipelineData data(&zone_stats, wasm_engine, &info, mcgraph, nullptr,
3113                     source_positions, node_positions, options);
3114   std::unique_ptr<PipelineStatistics> pipeline_statistics;
3115   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
3116     pipeline_statistics.reset(new PipelineStatistics(
3117         &info, wasm_engine->GetOrCreateTurboStatistics(), &zone_stats));
3118     pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
3119   }
3120 
3121   PipelineImpl pipeline(&data);
3122 
3123   if (info.trace_turbo_json() || info.trace_turbo_graph()) {
3124     CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
3125     tracing_scope.stream()
3126         << "---------------------------------------------------\n"
3127         << "Begin compiling method " << info.GetDebugName().get()
3128         << " using TurboFan" << std::endl;
3129   }
3130 
3131   if (info.trace_turbo_graph()) {  // Simple textual RPO.
3132     StdoutStream{} << "-- wasm stub " << CodeKindToString(kind) << " graph -- "
3133                    << std::endl
3134                    << AsRPO(*graph);
3135   }
3136 
3137   if (info.trace_turbo_json()) {
3138     TurboJsonFile json_of(&info, std::ios_base::trunc);
3139     json_of << "{\"function\":\"" << info.GetDebugName().get()
3140             << "\", \"source\":\"\",\n\"phases\":[";
3141   }
3142 
3143   pipeline.RunPrintAndVerify("V8.WasmNativeStubMachineCode", true);
3144 
3145   pipeline.Run<MemoryOptimizationPhase>();
3146   pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
3147 
3148   pipeline.ComputeScheduledGraph();
3149 
3150   Linkage linkage(call_descriptor);
3151   CHECK(pipeline.SelectInstructions(&linkage));
3152   pipeline.AssembleCode(&linkage);
3153 
3154   CodeGenerator* code_generator = pipeline.code_generator();
3155   wasm::WasmCompilationResult result;
3156   code_generator->tasm()->GetCode(
3157       nullptr, &result.code_desc, code_generator->safepoint_table_builder(),
3158       static_cast<int>(code_generator->GetHandlerTableOffset()));
3159   result.instr_buffer = code_generator->tasm()->ReleaseBuffer();
3160   result.source_positions = code_generator->GetSourcePositionTable();
3161   result.protected_instructions_data =
3162       code_generator->GetProtectedInstructionsData();
3163   result.frame_slot_count = code_generator->frame()->GetTotalFrameSlotCount();
3164   result.tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
3165   result.result_tier = wasm::ExecutionTier::kTurbofan;
3166   if (kind == CodeKind::WASM_TO_JS_FUNCTION) {
3167     result.kind = wasm::WasmCompilationResult::kWasmToJsWrapper;
3168   }
3169 
3170   DCHECK(result.succeeded());
3171 
3172   if (info.trace_turbo_json()) {
3173     TurboJsonFile json_of(&info, std::ios_base::app);
3174     json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
3175             << BlockStartsAsJSON{&code_generator->block_starts()}
3176             << "\"data\":\"";
3177 #ifdef ENABLE_DISASSEMBLER
3178     std::stringstream disassembler_stream;
3179     Disassembler::Decode(
3180         nullptr, disassembler_stream, result.code_desc.buffer,
3181         result.code_desc.buffer + result.code_desc.safepoint_table_offset,
3182         CodeReference(&result.code_desc));
3183     for (auto const c : disassembler_stream.str()) {
3184       json_of << AsEscapedUC16ForJSON(c);
3185     }
3186 #endif  // ENABLE_DISASSEMBLER
3187     json_of << "\"}\n]";
3188     json_of << "\n}";
3189   }
3190 
3191   if (info.trace_turbo_json() || info.trace_turbo_graph()) {
3192     CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
3193     tracing_scope.stream()
3194         << "---------------------------------------------------\n"
3195         << "Finished compiling method " << info.GetDebugName().get()
3196         << " using TurboFan" << std::endl;
3197   }
3198 
3199   return result;
3200 }
3201 
3202 // static
GenerateCodeForWasmFunction(OptimizedCompilationInfo * info,wasm::CompilationEnv * env,const wasm::WireBytesStorage * wire_bytes_storage,MachineGraph * mcgraph,CallDescriptor * call_descriptor,SourcePositionTable * source_positions,NodeOriginTable * node_origins,wasm::FunctionBody function_body,const wasm::WasmModule * module,int function_index,std::vector<compiler::WasmLoopInfo> * loop_info)3203 void Pipeline::GenerateCodeForWasmFunction(
3204     OptimizedCompilationInfo* info, wasm::CompilationEnv* env,
3205     const wasm::WireBytesStorage* wire_bytes_storage, MachineGraph* mcgraph,
3206     CallDescriptor* call_descriptor, SourcePositionTable* source_positions,
3207     NodeOriginTable* node_origins, wasm::FunctionBody function_body,
3208     const wasm::WasmModule* module, int function_index,
3209     std::vector<compiler::WasmLoopInfo>* loop_info) {
3210   auto* wasm_engine = wasm::GetWasmEngine();
3211   ZoneStats zone_stats(wasm_engine->allocator());
3212   std::unique_ptr<PipelineStatistics> pipeline_statistics(
3213       CreatePipelineStatistics(function_body, module, info, &zone_stats));
3214   PipelineData data(&zone_stats, wasm_engine, info, mcgraph,
3215                     pipeline_statistics.get(), source_positions, node_origins,
3216                     WasmAssemblerOptions());
3217 
3218   PipelineImpl pipeline(&data);
3219 
3220   if (data.info()->trace_turbo_json() || data.info()->trace_turbo_graph()) {
3221     CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
3222     tracing_scope.stream()
3223         << "---------------------------------------------------\n"
3224         << "Begin compiling method " << data.info()->GetDebugName().get()
3225         << " using TurboFan" << std::endl;
3226   }
3227 
3228   pipeline.RunPrintAndVerify("V8.WasmMachineCode", true);
3229 
3230   data.BeginPhaseKind("V8.WasmOptimization");
3231   if (FLAG_wasm_loop_unrolling) {
3232     pipeline.Run<WasmLoopUnrollingPhase>(loop_info);
3233     pipeline.RunPrintAndVerify(WasmLoopUnrollingPhase::phase_name(), true);
3234   }
3235   if (FLAG_wasm_inlining) {
3236     pipeline.Run<WasmInliningPhase>(env, wire_bytes_storage);
3237     pipeline.RunPrintAndVerify(WasmInliningPhase::phase_name(), true);
3238   }
3239   const bool is_asm_js = is_asmjs_module(module);
3240 
3241   if (FLAG_wasm_opt || is_asm_js) {
3242     pipeline.Run<WasmOptimizationPhase>(is_asm_js);
3243     pipeline.RunPrintAndVerify(WasmOptimizationPhase::phase_name(), true);
3244   } else {
3245     pipeline.Run<WasmBaseOptimizationPhase>();
3246     pipeline.RunPrintAndVerify(WasmBaseOptimizationPhase::phase_name(), true);
3247   }
3248 
3249   pipeline.Run<MemoryOptimizationPhase>();
3250   pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
3251 
3252   if (FLAG_turbo_splitting && !is_asm_js) {
3253     data.info()->set_splitting();
3254   }
3255 
3256   if (data.node_origins()) {
3257     data.node_origins()->RemoveDecorator();
3258   }
3259 
3260   data.BeginPhaseKind("V8.InstructionSelection");
3261   pipeline.ComputeScheduledGraph();
3262 
3263   Linkage linkage(call_descriptor);
3264   if (!pipeline.SelectInstructions(&linkage)) return;
3265   pipeline.AssembleCode(&linkage);
3266 
3267   auto result = std::make_unique<wasm::WasmCompilationResult>();
3268   CodeGenerator* code_generator = pipeline.code_generator();
3269   code_generator->tasm()->GetCode(
3270       nullptr, &result->code_desc, code_generator->safepoint_table_builder(),
3271       static_cast<int>(code_generator->GetHandlerTableOffset()));
3272 
3273   result->instr_buffer = code_generator->tasm()->ReleaseBuffer();
3274   result->frame_slot_count = code_generator->frame()->GetTotalFrameSlotCount();
3275   result->tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
3276   result->source_positions = code_generator->GetSourcePositionTable();
3277   result->protected_instructions_data =
3278       code_generator->GetProtectedInstructionsData();
3279   result->result_tier = wasm::ExecutionTier::kTurbofan;
3280 
3281   if (data.info()->trace_turbo_json()) {
3282     TurboJsonFile json_of(data.info(), std::ios_base::app);
3283     json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
3284             << BlockStartsAsJSON{&code_generator->block_starts()}
3285             << "\"data\":\"";
3286 #ifdef ENABLE_DISASSEMBLER
3287     std::stringstream disassembler_stream;
3288     Disassembler::Decode(
3289         nullptr, disassembler_stream, result->code_desc.buffer,
3290         result->code_desc.buffer + result->code_desc.safepoint_table_offset,
3291         CodeReference(&result->code_desc));
3292     for (auto const c : disassembler_stream.str()) {
3293       json_of << AsEscapedUC16ForJSON(c);
3294     }
3295 #endif  // ENABLE_DISASSEMBLER
3296     json_of << "\"}\n]";
3297     json_of << "\n}";
3298   }
3299 
3300   if (data.info()->trace_turbo_json() || data.info()->trace_turbo_graph()) {
3301     CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
3302     tracing_scope.stream()
3303         << "---------------------------------------------------\n"
3304         << "Finished compiling method " << data.info()->GetDebugName().get()
3305         << " using TurboFan" << std::endl;
3306   }
3307 
3308   DCHECK(result->succeeded());
3309   info->SetWasmCompilationResult(std::move(result));
3310 }
3311 #endif  // V8_ENABLE_WEBASSEMBLY
3312 
3313 // static
GenerateCodeForTesting(OptimizedCompilationInfo * info,Isolate * isolate,std::unique_ptr<JSHeapBroker> * out_broker)3314 MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
3315     OptimizedCompilationInfo* info, Isolate* isolate,
3316     std::unique_ptr<JSHeapBroker>* out_broker) {
3317   ZoneStats zone_stats(isolate->allocator());
3318   std::unique_ptr<PipelineStatistics> pipeline_statistics(
3319       CreatePipelineStatistics(Handle<Script>::null(), info, isolate,
3320                                &zone_stats));
3321 
3322   PipelineData data(&zone_stats, isolate, info, pipeline_statistics.get());
3323   PipelineImpl pipeline(&data);
3324 
3325   Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
3326 
3327   {
3328     CompilationHandleScope compilation_scope(isolate, info);
3329     CanonicalHandleScope canonical(isolate, info);
3330     info->ReopenHandlesInNewHandleScope(isolate);
3331     pipeline.InitializeHeapBroker();
3332     // Emulating the proper pipeline, we call CreateGraph on different places
3333     // (i.e before or after creating a LocalIsolateScope) depending on
3334     // is_concurrent_inlining.
3335     if (!data.broker()->is_concurrent_inlining()) {
3336       if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
3337     }
3338   }
3339 
3340   {
3341     LocalIsolateScope local_isolate_scope(data.broker(), info,
3342                                           isolate->main_thread_local_isolate());
3343     if (data.broker()->is_concurrent_inlining()) {
3344       if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
3345     }
3346     // We selectively Unpark inside OptimizeGraph.
3347     if (!pipeline.OptimizeGraph(&linkage)) return MaybeHandle<Code>();
3348 
3349     pipeline.AssembleCode(&linkage);
3350   }
3351 
3352   const bool will_retire_broker = out_broker == nullptr;
3353   if (!will_retire_broker) {
3354     // If the broker is going to be kept alive, pass the persistent and the
3355     // canonical handles containers back to the JSHeapBroker since it will
3356     // outlive the OptimizedCompilationInfo.
3357     data.broker()->SetPersistentAndCopyCanonicalHandlesForTesting(
3358         info->DetachPersistentHandles(), info->DetachCanonicalHandles());
3359   }
3360 
3361   Handle<Code> code;
3362   if (pipeline.FinalizeCode(will_retire_broker).ToHandle(&code) &&
3363       pipeline.CommitDependencies(code)) {
3364     if (!will_retire_broker) *out_broker = data.ReleaseBroker();
3365     return code;
3366   }
3367   return MaybeHandle<Code>();
3368 }
3369 
3370 // static
GenerateCodeForTesting(OptimizedCompilationInfo * info,Isolate * isolate,CallDescriptor * call_descriptor,Graph * graph,const AssemblerOptions & options,Schedule * schedule)3371 MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
3372     OptimizedCompilationInfo* info, Isolate* isolate,
3373     CallDescriptor* call_descriptor, Graph* graph,
3374     const AssemblerOptions& options, Schedule* schedule) {
3375   // Construct a pipeline for scheduling and code generation.
3376   ZoneStats zone_stats(isolate->allocator());
3377   NodeOriginTable* node_positions = info->zone()->New<NodeOriginTable>(graph);
3378   PipelineData data(&zone_stats, info, isolate, isolate->allocator(), graph,
3379                     nullptr, schedule, nullptr, node_positions, nullptr,
3380                     options, nullptr);
3381   std::unique_ptr<PipelineStatistics> pipeline_statistics;
3382   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
3383     pipeline_statistics.reset(new PipelineStatistics(
3384         info, isolate->GetTurboStatistics(), &zone_stats));
3385     pipeline_statistics->BeginPhaseKind("V8.TFTestCodegen");
3386   }
3387 
3388   PipelineImpl pipeline(&data);
3389 
3390   if (info->trace_turbo_json()) {
3391     TurboJsonFile json_of(info, std::ios_base::trunc);
3392     json_of << "{\"function\":\"" << info->GetDebugName().get()
3393             << "\", \"source\":\"\",\n\"phases\":[";
3394   }
3395   // TODO(rossberg): Should this really be untyped?
3396   pipeline.RunPrintAndVerify("V8.TFMachineCode", true);
3397 
3398   // Ensure we have a schedule.
3399   if (data.schedule() == nullptr) {
3400     pipeline.ComputeScheduledGraph();
3401   }
3402 
3403   Handle<Code> code;
3404   if (pipeline.GenerateCode(call_descriptor).ToHandle(&code) &&
3405       pipeline.CommitDependencies(code)) {
3406     return code;
3407   }
3408   return MaybeHandle<Code>();
3409 }
3410 
3411 // static
NewCompilationJob(Isolate * isolate,Handle<JSFunction> function,CodeKind code_kind,bool has_script,BytecodeOffset osr_offset,JavaScriptFrame * osr_frame)3412 std::unique_ptr<OptimizedCompilationJob> Pipeline::NewCompilationJob(
3413     Isolate* isolate, Handle<JSFunction> function, CodeKind code_kind,
3414     bool has_script, BytecodeOffset osr_offset, JavaScriptFrame* osr_frame) {
3415   Handle<SharedFunctionInfo> shared =
3416       handle(function->shared(), function->GetIsolate());
3417   return std::make_unique<PipelineCompilationJob>(
3418       isolate, shared, function, osr_offset, osr_frame, code_kind);
3419 }
3420 
AllocateRegistersForTesting(const RegisterConfiguration * config,InstructionSequence * sequence,bool use_mid_tier_register_allocator,bool run_verifier)3421 bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
3422                                            InstructionSequence* sequence,
3423                                            bool use_mid_tier_register_allocator,
3424                                            bool run_verifier) {
3425   OptimizedCompilationInfo info(base::ArrayVector("testing"), sequence->zone(),
3426                                 CodeKind::FOR_TESTING);
3427   ZoneStats zone_stats(sequence->isolate()->allocator());
3428   PipelineData data(&zone_stats, &info, sequence->isolate(), sequence);
3429   data.InitializeFrameData(nullptr);
3430 
3431   if (info.trace_turbo_json()) {
3432     TurboJsonFile json_of(&info, std::ios_base::trunc);
3433     json_of << "{\"function\":\"" << info.GetDebugName().get()
3434             << "\", \"source\":\"\",\n\"phases\":[";
3435   }
3436 
3437   PipelineImpl pipeline(&data);
3438   if (use_mid_tier_register_allocator) {
3439     pipeline.AllocateRegistersForMidTier(config, nullptr, run_verifier);
3440   } else {
3441     pipeline.AllocateRegistersForTopTier(config, nullptr, run_verifier);
3442   }
3443 
3444   return !data.compilation_failed();
3445 }
3446 
ComputeScheduledGraph()3447 void PipelineImpl::ComputeScheduledGraph() {
3448   PipelineData* data = this->data_;
3449 
3450   // We should only schedule the graph if it is not scheduled yet.
3451   DCHECK_NULL(data->schedule());
3452 
3453   Run<ComputeSchedulePhase>();
3454   TraceScheduleAndVerify(data->info(), data, data->schedule(), "schedule");
3455 }
3456 
SelectInstructions(Linkage * linkage)3457 bool PipelineImpl::SelectInstructions(Linkage* linkage) {
3458   auto call_descriptor = linkage->GetIncomingDescriptor();
3459   PipelineData* data = this->data_;
3460 
3461   // We should have a scheduled graph.
3462   DCHECK_NOT_NULL(data->graph());
3463   DCHECK_NOT_NULL(data->schedule());
3464 
3465   if (FLAG_turbo_profiling) {
3466     UnparkedScopeIfNeeded unparked_scope(data->broker());
3467     data->info()->set_profiler_data(BasicBlockInstrumentor::Instrument(
3468         info(), data->graph(), data->schedule(), data->isolate()));
3469   }
3470 
3471   bool verify_stub_graph =
3472       data->verify_graph() ||
3473       (FLAG_turbo_verify_machine_graph != nullptr &&
3474        (!strcmp(FLAG_turbo_verify_machine_graph, "*") ||
3475         !strcmp(FLAG_turbo_verify_machine_graph, data->debug_name())));
3476   // Jump optimization runs instruction selection twice, but the instruction
3477   // selector mutates nodes like swapping the inputs of a load, which can
3478   // violate the machine graph verification rules. So we skip the second
3479   // verification on a graph that already verified before.
3480   auto jump_opt = data->jump_optimization_info();
3481   if (jump_opt && jump_opt->is_optimizing()) {
3482     verify_stub_graph = false;
3483   }
3484   if (verify_stub_graph) {
3485     if (FLAG_trace_verify_csa) {
3486       UnparkedScopeIfNeeded scope(data->broker());
3487       AllowHandleDereference allow_deref;
3488       CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
3489       tracing_scope.stream()
3490           << "--------------------------------------------------\n"
3491           << "--- Verifying " << data->debug_name()
3492           << " generated by TurboFan\n"
3493           << "--------------------------------------------------\n"
3494           << *data->schedule()
3495           << "--------------------------------------------------\n"
3496           << "--- End of " << data->debug_name() << " generated by TurboFan\n"
3497           << "--------------------------------------------------\n";
3498     }
3499     // TODO(jgruber): The parameter is called is_stub but actually contains
3500     // something different. Update either the name or its contents.
3501     bool is_stub = !data->info()->IsOptimizing();
3502 #if V8_ENABLE_WEBASSEMBLY
3503     if (data->info()->IsWasm()) is_stub = false;
3504 #endif  // V8_ENABLE_WEBASSEMBLY
3505     Zone temp_zone(data->allocator(), kMachineGraphVerifierZoneName);
3506     MachineGraphVerifier::Run(data->graph(), data->schedule(), linkage, is_stub,
3507                               data->debug_name(), &temp_zone);
3508   }
3509 
3510   data->InitializeInstructionSequence(call_descriptor);
3511 
3512   // Depending on which code path led us to this function, the frame may or
3513   // may not have been initialized. If it hasn't yet, initialize it now.
3514   if (!data->frame()) {
3515     data->InitializeFrameData(call_descriptor);
3516   }
3517   // Select and schedule instructions covering the scheduled graph.
3518   Run<InstructionSelectionPhase>(linkage);
3519   if (data->compilation_failed()) {
3520     info()->AbortOptimization(BailoutReason::kCodeGenerationFailed);
3521     data->EndPhaseKind();
3522     return false;
3523   }
3524 
3525   if (info()->trace_turbo_json() && !data->MayHaveUnverifiableGraph()) {
3526     UnparkedScopeIfNeeded scope(data->broker());
3527     AllowHandleDereference allow_deref;
3528     TurboCfgFile tcf(isolate());
3529     tcf << AsC1V("CodeGen", data->schedule(), data->source_positions(),
3530                  data->sequence());
3531   }
3532 
3533   if (info()->trace_turbo_json()) {
3534     std::ostringstream source_position_output;
3535     // Output source position information before the graph is deleted.
3536     if (data_->source_positions() != nullptr) {
3537       data_->source_positions()->PrintJson(source_position_output);
3538     } else {
3539       source_position_output << "{}";
3540     }
3541     source_position_output << ",\n\"NodeOrigins\" : ";
3542     data_->node_origins()->PrintJson(source_position_output);
3543     data_->set_source_position_output(source_position_output.str());
3544   }
3545 
3546   data->DeleteGraphZone();
3547 
3548   data->BeginPhaseKind("V8.TFRegisterAllocation");
3549 
3550   bool run_verifier = FLAG_turbo_verify_allocation;
3551 
3552   // Allocate registers.
3553   if (call_descriptor->HasRestrictedAllocatableRegisters()) {
3554     RegList registers = call_descriptor->AllocatableRegisters();
3555     DCHECK_LT(0, NumRegs(registers));
3556     std::unique_ptr<const RegisterConfiguration> config;
3557     config.reset(RegisterConfiguration::RestrictGeneralRegisters(registers));
3558     AllocateRegistersForTopTier(config.get(), call_descriptor, run_verifier);
3559   } else {
3560     const RegisterConfiguration* config = RegisterConfiguration::Default();
3561     if (data->info()->IsTurboprop() && FLAG_turboprop_mid_tier_reg_alloc) {
3562       AllocateRegistersForMidTier(config, call_descriptor, run_verifier);
3563     } else {
3564       AllocateRegistersForTopTier(config, call_descriptor, run_verifier);
3565     }
3566   }
3567 
3568   // Verify the instruction sequence has the same hash in two stages.
3569   VerifyGeneratedCodeIsIdempotent();
3570 
3571   Run<FrameElisionPhase>();
3572   if (data->compilation_failed()) {
3573     info()->AbortOptimization(
3574         BailoutReason::kNotEnoughVirtualRegistersRegalloc);
3575     data->EndPhaseKind();
3576     return false;
3577   }
3578 
3579   // TODO(mtrofin): move this off to the register allocator.
3580   bool generate_frame_at_start =
3581       data_->sequence()->instruction_blocks().front()->must_construct_frame();
3582   // Optimimize jumps.
3583   if (FLAG_turbo_jt) {
3584     Run<JumpThreadingPhase>(generate_frame_at_start);
3585   }
3586 
3587   data->EndPhaseKind();
3588 
3589   return true;
3590 }
3591 
VerifyGeneratedCodeIsIdempotent()3592 void PipelineImpl::VerifyGeneratedCodeIsIdempotent() {
3593   PipelineData* data = this->data_;
3594   JumpOptimizationInfo* jump_opt = data->jump_optimization_info();
3595   if (jump_opt == nullptr) return;
3596 
3597   InstructionSequence* code = data->sequence();
3598   int instruction_blocks = code->InstructionBlockCount();
3599   int virtual_registers = code->VirtualRegisterCount();
3600   size_t hash_code = base::hash_combine(instruction_blocks, virtual_registers);
3601   for (auto instr : *code) {
3602     hash_code = base::hash_combine(hash_code, instr->opcode(),
3603                                    instr->InputCount(), instr->OutputCount());
3604   }
3605   for (int i = 0; i < virtual_registers; i++) {
3606     hash_code = base::hash_combine(hash_code, code->GetRepresentation(i));
3607   }
3608   if (jump_opt->is_collecting()) {
3609     jump_opt->set_hash_code(hash_code);
3610   } else {
3611     CHECK_EQ(hash_code, jump_opt->hash_code());
3612   }
3613 }
3614 
3615 struct InstructionStartsAsJSON {
3616   const ZoneVector<TurbolizerInstructionStartInfo>* instr_starts;
3617 };
3618 
operator <<(std::ostream & out,const InstructionStartsAsJSON & s)3619 std::ostream& operator<<(std::ostream& out, const InstructionStartsAsJSON& s) {
3620   out << ", \"instructionOffsetToPCOffset\": {";
3621   bool need_comma = false;
3622   for (size_t i = 0; i < s.instr_starts->size(); ++i) {
3623     if (need_comma) out << ", ";
3624     const TurbolizerInstructionStartInfo& info = (*s.instr_starts)[i];
3625     out << "\"" << i << "\": {";
3626     out << "\"gap\": " << info.gap_pc_offset;
3627     out << ", \"arch\": " << info.arch_instr_pc_offset;
3628     out << ", \"condition\": " << info.condition_pc_offset;
3629     out << "}";
3630     need_comma = true;
3631   }
3632   out << "}";
3633   return out;
3634 }
3635 
3636 struct TurbolizerCodeOffsetsInfoAsJSON {
3637   const TurbolizerCodeOffsetsInfo* offsets_info;
3638 };
3639 
operator <<(std::ostream & out,const TurbolizerCodeOffsetsInfoAsJSON & s)3640 std::ostream& operator<<(std::ostream& out,
3641                          const TurbolizerCodeOffsetsInfoAsJSON& s) {
3642   out << ", \"codeOffsetsInfo\": {";
3643   out << "\"codeStartRegisterCheck\": "
3644       << s.offsets_info->code_start_register_check << ", ";
3645   out << "\"deoptCheck\": " << s.offsets_info->deopt_check << ", ";
3646   out << "\"blocksStart\": " << s.offsets_info->blocks_start << ", ";
3647   out << "\"outOfLineCode\": " << s.offsets_info->out_of_line_code << ", ";
3648   out << "\"deoptimizationExits\": " << s.offsets_info->deoptimization_exits
3649       << ", ";
3650   out << "\"pools\": " << s.offsets_info->pools << ", ";
3651   out << "\"jumpTables\": " << s.offsets_info->jump_tables;
3652   out << "}";
3653   return out;
3654 }
3655 
AssembleCode(Linkage * linkage)3656 void PipelineImpl::AssembleCode(Linkage* linkage) {
3657   PipelineData* data = this->data_;
3658   data->BeginPhaseKind("V8.TFCodeGeneration");
3659   data->InitializeCodeGenerator(linkage);
3660 
3661   UnparkedScopeIfNeeded unparked_scope(data->broker(), FLAG_code_comments);
3662 
3663   Run<AssembleCodePhase>();
3664   if (data->info()->trace_turbo_json()) {
3665     TurboJsonFile json_of(data->info(), std::ios_base::app);
3666     json_of << "{\"name\":\"code generation\""
3667             << ", \"type\":\"instructions\""
3668             << InstructionStartsAsJSON{&data->code_generator()->instr_starts()}
3669             << TurbolizerCodeOffsetsInfoAsJSON{
3670                    &data->code_generator()->offsets_info()};
3671     json_of << "},\n";
3672   }
3673   data->DeleteInstructionZone();
3674   data->EndPhaseKind();
3675 }
3676 
FinalizeCode(bool retire_broker)3677 MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) {
3678   PipelineData* data = this->data_;
3679   data->BeginPhaseKind("V8.TFFinalizeCode");
3680   if (data->broker() && retire_broker) {
3681     data->broker()->Retire();
3682   }
3683   Run<FinalizeCodePhase>();
3684 
3685   MaybeHandle<Code> maybe_code = data->code();
3686   Handle<Code> code;
3687   if (!maybe_code.ToHandle(&code)) {
3688     return maybe_code;
3689   }
3690 
3691   info()->SetCode(code);
3692   PrintCode(isolate(), code, info());
3693 
3694   if (info()->trace_turbo_json()) {
3695     TurboJsonFile json_of(info(), std::ios_base::app);
3696 
3697     json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
3698             << BlockStartsAsJSON{&data->code_generator()->block_starts()}
3699             << "\"data\":\"";
3700 #ifdef ENABLE_DISASSEMBLER
3701     std::stringstream disassembly_stream;
3702     code->Disassemble(nullptr, disassembly_stream, isolate());
3703     std::string disassembly_string(disassembly_stream.str());
3704     for (const auto& c : disassembly_string) {
3705       json_of << AsEscapedUC16ForJSON(c);
3706     }
3707 #endif  // ENABLE_DISASSEMBLER
3708     json_of << "\"}\n],\n";
3709     json_of << "\"nodePositions\":";
3710     json_of << data->source_position_output() << ",\n";
3711     JsonPrintAllSourceWithPositions(json_of, data->info(), isolate());
3712     json_of << "\n}";
3713   }
3714   if (info()->trace_turbo_json() || info()->trace_turbo_graph()) {
3715     CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
3716     tracing_scope.stream()
3717         << "---------------------------------------------------\n"
3718         << "Finished compiling method " << info()->GetDebugName().get()
3719         << " using TurboFan" << std::endl;
3720   }
3721   data->EndPhaseKind();
3722   return code;
3723 }
3724 
SelectInstructionsAndAssemble(CallDescriptor * call_descriptor)3725 bool PipelineImpl::SelectInstructionsAndAssemble(
3726     CallDescriptor* call_descriptor) {
3727   Linkage linkage(call_descriptor);
3728 
3729   // Perform instruction selection and register allocation.
3730   if (!SelectInstructions(&linkage)) return false;
3731 
3732   // Generate the final machine code.
3733   AssembleCode(&linkage);
3734   return true;
3735 }
3736 
GenerateCode(CallDescriptor * call_descriptor)3737 MaybeHandle<Code> PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) {
3738   if (!SelectInstructionsAndAssemble(call_descriptor)) {
3739     return MaybeHandle<Code>();
3740   }
3741   return FinalizeCode();
3742 }
3743 
CommitDependencies(Handle<Code> code)3744 bool PipelineImpl::CommitDependencies(Handle<Code> code) {
3745   return data_->dependencies() == nullptr ||
3746          data_->dependencies()->Commit(code);
3747 }
3748 
3749 namespace {
3750 
TraceSequence(OptimizedCompilationInfo * info,PipelineData * data,const char * phase_name)3751 void TraceSequence(OptimizedCompilationInfo* info, PipelineData* data,
3752                    const char* phase_name) {
3753   if (info->trace_turbo_json()) {
3754     UnparkedScopeIfNeeded scope(data->broker());
3755     AllowHandleDereference allow_deref;
3756     TurboJsonFile json_of(info, std::ios_base::app);
3757     json_of << "{\"name\":\"" << phase_name << "\",\"type\":\"sequence\""
3758             << ",\"blocks\":" << InstructionSequenceAsJSON{data->sequence()}
3759             << ",\"register_allocation\":{"
3760             << RegisterAllocationDataAsJSON{*(data->register_allocation_data()),
3761                                             *(data->sequence())}
3762             << "}},\n";
3763   }
3764   if (info->trace_turbo_graph()) {
3765     UnparkedScopeIfNeeded scope(data->broker());
3766     AllowHandleDereference allow_deref;
3767     CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
3768     tracing_scope.stream() << "----- Instruction sequence " << phase_name
3769                            << " -----\n"
3770                            << *data->sequence();
3771   }
3772 }
3773 
3774 }  // namespace
3775 
AllocateRegistersForTopTier(const RegisterConfiguration * config,CallDescriptor * call_descriptor,bool run_verifier)3776 void PipelineImpl::AllocateRegistersForTopTier(
3777     const RegisterConfiguration* config, CallDescriptor* call_descriptor,
3778     bool run_verifier) {
3779   PipelineData* data = this->data_;
3780   // Don't track usage for this zone in compiler stats.
3781   std::unique_ptr<Zone> verifier_zone;
3782   RegisterAllocatorVerifier* verifier = nullptr;
3783   if (run_verifier) {
3784     verifier_zone.reset(
3785         new Zone(data->allocator(), kRegisterAllocatorVerifierZoneName));
3786     verifier = verifier_zone->New<RegisterAllocatorVerifier>(
3787         verifier_zone.get(), config, data->sequence(), data->frame());
3788   }
3789 
3790 #ifdef DEBUG
3791   data_->sequence()->ValidateEdgeSplitForm();
3792   data_->sequence()->ValidateDeferredBlockEntryPaths();
3793   data_->sequence()->ValidateDeferredBlockExitPaths();
3794 #endif
3795 
3796   RegisterAllocationFlags flags;
3797   if (data->info()->trace_turbo_allocation()) {
3798     flags |= RegisterAllocationFlag::kTraceAllocation;
3799   }
3800   data->InitializeTopTierRegisterAllocationData(config, call_descriptor, flags);
3801 
3802   Run<MeetRegisterConstraintsPhase>();
3803   Run<ResolvePhisPhase>();
3804   Run<BuildLiveRangesPhase>();
3805   Run<BuildBundlesPhase>();
3806 
3807   TraceSequence(info(), data, "before register allocation");
3808   if (verifier != nullptr) {
3809     CHECK(!data->top_tier_register_allocation_data()
3810                ->ExistsUseWithoutDefinition());
3811     CHECK(data->top_tier_register_allocation_data()
3812               ->RangesDefinedInDeferredStayInDeferred());
3813   }
3814 
3815   if (info()->trace_turbo_json() && !data->MayHaveUnverifiableGraph()) {
3816     TurboCfgFile tcf(isolate());
3817     tcf << AsC1VRegisterAllocationData(
3818         "PreAllocation", data->top_tier_register_allocation_data());
3819   }
3820 
3821   Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
3822 
3823   if (data->sequence()->HasFPVirtualRegisters()) {
3824     Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
3825   }
3826 
3827   Run<DecideSpillingModePhase>();
3828   Run<AssignSpillSlotsPhase>();
3829   Run<CommitAssignmentPhase>();
3830 
3831   // TODO(chromium:725559): remove this check once
3832   // we understand the cause of the bug. We keep just the
3833   // check at the end of the allocation.
3834   if (verifier != nullptr) {
3835     verifier->VerifyAssignment("Immediately after CommitAssignmentPhase.");
3836   }
3837 
3838 
3839   Run<ConnectRangesPhase>();
3840 
3841   Run<ResolveControlFlowPhase>();
3842 
3843   Run<PopulateReferenceMapsPhase>();
3844 
3845   if (FLAG_turbo_move_optimization) {
3846     Run<OptimizeMovesPhase>();
3847   }
3848 
3849   TraceSequence(info(), data, "after register allocation");
3850 
3851   if (verifier != nullptr) {
3852     verifier->VerifyAssignment("End of regalloc pipeline.");
3853     verifier->VerifyGapMoves();
3854   }
3855 
3856   if (info()->trace_turbo_json() && !data->MayHaveUnverifiableGraph()) {
3857     TurboCfgFile tcf(isolate());
3858     tcf << AsC1VRegisterAllocationData(
3859         "CodeGen", data->top_tier_register_allocation_data());
3860   }
3861 
3862   data->DeleteRegisterAllocationZone();
3863 }
3864 
AllocateRegistersForMidTier(const RegisterConfiguration * config,CallDescriptor * call_descriptor,bool run_verifier)3865 void PipelineImpl::AllocateRegistersForMidTier(
3866     const RegisterConfiguration* config, CallDescriptor* call_descriptor,
3867     bool run_verifier) {
3868   PipelineData* data = data_;
3869   // Don't track usage for this zone in compiler stats.
3870   std::unique_ptr<Zone> verifier_zone;
3871   RegisterAllocatorVerifier* verifier = nullptr;
3872   if (run_verifier) {
3873     verifier_zone.reset(
3874         new Zone(data->allocator(), kRegisterAllocatorVerifierZoneName));
3875     verifier = verifier_zone->New<RegisterAllocatorVerifier>(
3876         verifier_zone.get(), config, data->sequence(), data->frame());
3877   }
3878 
3879 #ifdef DEBUG
3880   data->sequence()->ValidateEdgeSplitForm();
3881   data->sequence()->ValidateDeferredBlockEntryPaths();
3882   data->sequence()->ValidateDeferredBlockExitPaths();
3883 #endif
3884   data->InitializeMidTierRegisterAllocationData(config, call_descriptor);
3885 
3886   TraceSequence(info(), data, "before register allocation");
3887 
3888   Run<MidTierRegisterOutputDefinitionPhase>();
3889 
3890   Run<MidTierRegisterAllocatorPhase>();
3891 
3892   Run<MidTierSpillSlotAllocatorPhase>();
3893 
3894   Run<MidTierPopulateReferenceMapsPhase>();
3895 
3896   TraceSequence(info(), data, "after register allocation");
3897 
3898   if (verifier != nullptr) {
3899     verifier->VerifyAssignment("End of regalloc pipeline.");
3900     verifier->VerifyGapMoves();
3901   }
3902 
3903   data->DeleteRegisterAllocationZone();
3904 }
3905 
info() const3906 OptimizedCompilationInfo* PipelineImpl::info() const { return data_->info(); }
3907 
isolate() const3908 Isolate* PipelineImpl::isolate() const { return data_->isolate(); }
3909 
code_generator() const3910 CodeGenerator* PipelineImpl::code_generator() const {
3911   return data_->code_generator();
3912 }
3913 
observe_node_manager() const3914 ObserveNodeManager* PipelineImpl::observe_node_manager() const {
3915   return data_->observe_node_manager();
3916 }
3917 
3918 }  // namespace compiler
3919 }  // namespace internal
3920 }  // namespace v8
3921