1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/bytecode-analysis.h"
6
7 #include "src/interpreter/bytecode-array-iterator.h"
8 #include "src/interpreter/bytecode-array-random-iterator.h"
9 #include "src/objects/objects-inl.h"
10 #include "src/utils/ostreams.h"
11
12 namespace v8 {
13 namespace internal {
14 namespace compiler {
15
16 using interpreter::Bytecode;
17 using interpreter::Bytecodes;
18 using interpreter::OperandType;
19
BytecodeLoopAssignments(int parameter_count,int register_count,Zone * zone)20 BytecodeLoopAssignments::BytecodeLoopAssignments(int parameter_count,
21 int register_count, Zone* zone)
22 : parameter_count_(parameter_count),
23 bit_vector_(
24 zone->New<BitVector>(parameter_count + register_count, zone)) {}
25
Add(interpreter::Register r)26 void BytecodeLoopAssignments::Add(interpreter::Register r) {
27 if (r.is_parameter()) {
28 bit_vector_->Add(r.ToParameterIndex(parameter_count_));
29 } else {
30 bit_vector_->Add(parameter_count_ + r.index());
31 }
32 }
33
AddList(interpreter::Register r,uint32_t count)34 void BytecodeLoopAssignments::AddList(interpreter::Register r, uint32_t count) {
35 if (r.is_parameter()) {
36 for (uint32_t i = 0; i < count; i++) {
37 DCHECK(interpreter::Register(r.index() + i).is_parameter());
38 bit_vector_->Add(r.ToParameterIndex(parameter_count_) + i);
39 }
40 } else {
41 for (uint32_t i = 0; i < count; i++) {
42 DCHECK(!interpreter::Register(r.index() + i).is_parameter());
43 bit_vector_->Add(parameter_count_ + r.index() + i);
44 }
45 }
46 }
47
48
Union(const BytecodeLoopAssignments & other)49 void BytecodeLoopAssignments::Union(const BytecodeLoopAssignments& other) {
50 bit_vector_->Union(*other.bit_vector_);
51 }
52
ContainsParameter(int index) const53 bool BytecodeLoopAssignments::ContainsParameter(int index) const {
54 DCHECK_GE(index, 0);
55 DCHECK_LT(index, parameter_count());
56 return bit_vector_->Contains(index);
57 }
58
ContainsLocal(int index) const59 bool BytecodeLoopAssignments::ContainsLocal(int index) const {
60 DCHECK_GE(index, 0);
61 DCHECK_LT(index, local_count());
62 return bit_vector_->Contains(parameter_count_ + index);
63 }
64
ResumeJumpTarget(int suspend_id,int target_offset,int final_target_offset)65 ResumeJumpTarget::ResumeJumpTarget(int suspend_id, int target_offset,
66 int final_target_offset)
67 : suspend_id_(suspend_id),
68 target_offset_(target_offset),
69 final_target_offset_(final_target_offset) {}
70
Leaf(int suspend_id,int target_offset)71 ResumeJumpTarget ResumeJumpTarget::Leaf(int suspend_id, int target_offset) {
72 return ResumeJumpTarget(suspend_id, target_offset, target_offset);
73 }
74
AtLoopHeader(int loop_header_offset,const ResumeJumpTarget & next)75 ResumeJumpTarget ResumeJumpTarget::AtLoopHeader(int loop_header_offset,
76 const ResumeJumpTarget& next) {
77 return ResumeJumpTarget(next.suspend_id(), loop_header_offset,
78 next.target_offset());
79 }
80
BytecodeAnalysis(Handle<BytecodeArray> bytecode_array,Zone * zone,BytecodeOffset osr_bailout_id,bool analyze_liveness)81 BytecodeAnalysis::BytecodeAnalysis(Handle<BytecodeArray> bytecode_array,
82 Zone* zone, BytecodeOffset osr_bailout_id,
83 bool analyze_liveness)
84 : bytecode_array_(bytecode_array),
85 zone_(zone),
86 osr_bailout_id_(osr_bailout_id),
87 analyze_liveness_(analyze_liveness),
88 loop_stack_(zone),
89 loop_end_index_queue_(zone),
90 resume_jump_targets_(zone),
91 end_to_header_(zone),
92 header_to_info_(zone),
93 osr_entry_point_(-1) {
94 if (analyze_liveness) liveness_map_.emplace(bytecode_array->length(), zone);
95 Analyze();
96 }
97
98 namespace {
99
UpdateInLiveness(Bytecode bytecode,BytecodeLivenessState * in_liveness,const interpreter::BytecodeArrayIterator & iterator)100 void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState* in_liveness,
101 const interpreter::BytecodeArrayIterator& iterator) {
102 int num_operands = Bytecodes::NumberOfOperands(bytecode);
103 const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
104
105 // Special case Suspend and Resume to just pass through liveness.
106 if (bytecode == Bytecode::kSuspendGenerator) {
107 // The generator object has to be live.
108 in_liveness->MarkRegisterLive(iterator.GetRegisterOperand(0).index());
109 // Suspend additionally reads and returns the accumulator
110 DCHECK(Bytecodes::ReadsAccumulator(bytecode));
111 in_liveness->MarkAccumulatorLive();
112 return;
113 }
114 if (bytecode == Bytecode::kResumeGenerator) {
115 // The generator object has to be live.
116 in_liveness->MarkRegisterLive(iterator.GetRegisterOperand(0).index());
117 return;
118 }
119
120 if (Bytecodes::WritesAccumulator(bytecode)) {
121 in_liveness->MarkAccumulatorDead();
122 }
123 for (int i = 0; i < num_operands; ++i) {
124 switch (operand_types[i]) {
125 case OperandType::kRegOut: {
126 interpreter::Register r = iterator.GetRegisterOperand(i);
127 if (!r.is_parameter()) {
128 in_liveness->MarkRegisterDead(r.index());
129 }
130 break;
131 }
132 case OperandType::kRegOutList: {
133 interpreter::Register r = iterator.GetRegisterOperand(i++);
134 uint32_t reg_count = iterator.GetRegisterCountOperand(i);
135 if (!r.is_parameter()) {
136 for (uint32_t j = 0; j < reg_count; ++j) {
137 DCHECK(!interpreter::Register(r.index() + j).is_parameter());
138 in_liveness->MarkRegisterDead(r.index() + j);
139 }
140 }
141 break;
142 }
143 case OperandType::kRegOutPair: {
144 interpreter::Register r = iterator.GetRegisterOperand(i);
145 if (!r.is_parameter()) {
146 DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
147 in_liveness->MarkRegisterDead(r.index());
148 in_liveness->MarkRegisterDead(r.index() + 1);
149 }
150 break;
151 }
152 case OperandType::kRegOutTriple: {
153 interpreter::Register r = iterator.GetRegisterOperand(i);
154 if (!r.is_parameter()) {
155 DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
156 DCHECK(!interpreter::Register(r.index() + 2).is_parameter());
157 in_liveness->MarkRegisterDead(r.index());
158 in_liveness->MarkRegisterDead(r.index() + 1);
159 in_liveness->MarkRegisterDead(r.index() + 2);
160 }
161 break;
162 }
163 default:
164 DCHECK(!Bytecodes::IsRegisterOutputOperandType(operand_types[i]));
165 break;
166 }
167 }
168
169 if (Bytecodes::WritesImplicitRegister(bytecode)) {
170 in_liveness->MarkRegisterDead(
171 interpreter::Register::FromShortStar(bytecode).index());
172 }
173
174 if (Bytecodes::ReadsAccumulator(bytecode)) {
175 in_liveness->MarkAccumulatorLive();
176 }
177 for (int i = 0; i < num_operands; ++i) {
178 switch (operand_types[i]) {
179 case OperandType::kReg: {
180 interpreter::Register r = iterator.GetRegisterOperand(i);
181 if (!r.is_parameter()) {
182 in_liveness->MarkRegisterLive(r.index());
183 }
184 break;
185 }
186 case OperandType::kRegPair: {
187 interpreter::Register r = iterator.GetRegisterOperand(i);
188 if (!r.is_parameter()) {
189 DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
190 in_liveness->MarkRegisterLive(r.index());
191 in_liveness->MarkRegisterLive(r.index() + 1);
192 }
193 break;
194 }
195 case OperandType::kRegList: {
196 interpreter::Register r = iterator.GetRegisterOperand(i++);
197 uint32_t reg_count = iterator.GetRegisterCountOperand(i);
198 if (!r.is_parameter()) {
199 for (uint32_t j = 0; j < reg_count; ++j) {
200 DCHECK(!interpreter::Register(r.index() + j).is_parameter());
201 in_liveness->MarkRegisterLive(r.index() + j);
202 }
203 }
204 break;
205 }
206 default:
207 DCHECK(!Bytecodes::IsRegisterInputOperandType(operand_types[i]));
208 break;
209 }
210 }
211 }
212
UpdateOutLiveness(Bytecode bytecode,BytecodeLivenessState * out_liveness,BytecodeLivenessState * next_bytecode_in_liveness,const interpreter::BytecodeArrayIterator & iterator,Handle<BytecodeArray> bytecode_array,const BytecodeLivenessMap & liveness_map)213 void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState* out_liveness,
214 BytecodeLivenessState* next_bytecode_in_liveness,
215 const interpreter::BytecodeArrayIterator& iterator,
216 Handle<BytecodeArray> bytecode_array,
217 const BytecodeLivenessMap& liveness_map) {
218 int current_offset = iterator.current_offset();
219
220 // Special case Suspend and Resume to just pass through liveness.
221 if (bytecode == Bytecode::kSuspendGenerator ||
222 bytecode == Bytecode::kResumeGenerator) {
223 out_liveness->Union(*next_bytecode_in_liveness);
224 return;
225 }
226
227 // Update from jump target (if any). Skip loops, we update these manually in
228 // the liveness iterations.
229 if (Bytecodes::IsForwardJump(bytecode)) {
230 int target_offset = iterator.GetJumpTargetOffset();
231 out_liveness->Union(*liveness_map.GetInLiveness(target_offset));
232 } else if (Bytecodes::IsSwitch(bytecode)) {
233 for (interpreter::JumpTableTargetOffset entry :
234 iterator.GetJumpTableTargetOffsets()) {
235 out_liveness->Union(*liveness_map.GetInLiveness(entry.target_offset));
236 }
237 }
238
239 // Update from next bytecode (unless there isn't one or this is an
240 // unconditional jump).
241 if (next_bytecode_in_liveness != nullptr &&
242 !Bytecodes::IsUnconditionalJump(bytecode)) {
243 out_liveness->Union(*next_bytecode_in_liveness);
244 }
245
246 // Update from exception handler (if any).
247 if (!interpreter::Bytecodes::IsWithoutExternalSideEffects(bytecode)) {
248 int handler_context;
249 // TODO(leszeks): We should look up this range only once per entry.
250 HandlerTable table(*bytecode_array);
251 int handler_offset =
252 table.LookupRange(current_offset, &handler_context, nullptr);
253
254 if (handler_offset != -1) {
255 bool was_accumulator_live = out_liveness->AccumulatorIsLive();
256 out_liveness->Union(*liveness_map.GetInLiveness(handler_offset));
257 out_liveness->MarkRegisterLive(handler_context);
258 if (!was_accumulator_live) {
259 // The accumulator is reset to the exception on entry into a handler,
260 // and so shouldn't be considered live coming out of this bytecode just
261 // because it's live coming into the handler. So, kill the accumulator
262 // if the handler is the only thing that made it live.
263 out_liveness->MarkAccumulatorDead();
264
265 // TODO(leszeks): Ideally the accumulator wouldn't be considered live at
266 // the start of the handler, but looking up if the current bytecode is
267 // the start of a handler is not free, so we should only do it if we
268 // decide it's necessary.
269 }
270 }
271 }
272 }
273
UpdateLiveness(Bytecode bytecode,BytecodeLiveness const & liveness,BytecodeLivenessState ** next_bytecode_in_liveness,const interpreter::BytecodeArrayIterator & iterator,Handle<BytecodeArray> bytecode_array,const BytecodeLivenessMap & liveness_map)274 void UpdateLiveness(Bytecode bytecode, BytecodeLiveness const& liveness,
275 BytecodeLivenessState** next_bytecode_in_liveness,
276 const interpreter::BytecodeArrayIterator& iterator,
277 Handle<BytecodeArray> bytecode_array,
278 const BytecodeLivenessMap& liveness_map) {
279 UpdateOutLiveness(bytecode, liveness.out, *next_bytecode_in_liveness,
280 iterator, bytecode_array, liveness_map);
281 liveness.in->CopyFrom(*liveness.out);
282 UpdateInLiveness(bytecode, liveness.in, iterator);
283
284 *next_bytecode_in_liveness = liveness.in;
285 }
286
UpdateAssignments(Bytecode bytecode,BytecodeLoopAssignments * assignments,const interpreter::BytecodeArrayIterator & iterator)287 void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments* assignments,
288 const interpreter::BytecodeArrayIterator& iterator) {
289 int num_operands = Bytecodes::NumberOfOperands(bytecode);
290 const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
291
292 for (int i = 0; i < num_operands; ++i) {
293 switch (operand_types[i]) {
294 case OperandType::kRegOut: {
295 assignments->Add(iterator.GetRegisterOperand(i));
296 break;
297 }
298 case OperandType::kRegOutList: {
299 interpreter::Register r = iterator.GetRegisterOperand(i++);
300 uint32_t reg_count = iterator.GetRegisterCountOperand(i);
301 assignments->AddList(r, reg_count);
302 break;
303 }
304 case OperandType::kRegOutPair: {
305 assignments->AddList(iterator.GetRegisterOperand(i), 2);
306 break;
307 }
308 case OperandType::kRegOutTriple: {
309 assignments->AddList(iterator.GetRegisterOperand(i), 3);
310 break;
311 }
312 default:
313 DCHECK(!Bytecodes::IsRegisterOutputOperandType(operand_types[i]));
314 break;
315 }
316 }
317
318 if (Bytecodes::WritesImplicitRegister(bytecode)) {
319 assignments->Add(interpreter::Register::FromShortStar(bytecode));
320 }
321 }
322
323 } // namespace
324
Analyze()325 void BytecodeAnalysis::Analyze() {
326 loop_stack_.push({-1, nullptr});
327
328 BytecodeLivenessState* next_bytecode_in_liveness = nullptr;
329 int generator_switch_index = -1;
330 int osr_loop_end_offset = osr_bailout_id_.ToInt();
331 DCHECK_EQ(osr_loop_end_offset < 0, osr_bailout_id_.IsNone());
332
333 interpreter::BytecodeArrayRandomIterator iterator(bytecode_array(), zone());
334 for (iterator.GoToEnd(); iterator.IsValid(); --iterator) {
335 Bytecode bytecode = iterator.current_bytecode();
336 int current_offset = iterator.current_offset();
337
338 if (bytecode == Bytecode::kSwitchOnGeneratorState) {
339 DCHECK_EQ(generator_switch_index, -1);
340 generator_switch_index = iterator.current_index();
341 } else if (bytecode == Bytecode::kJumpLoop) {
342 // Every byte up to and including the last byte within the backwards jump
343 // instruction is considered part of the loop, set loop end accordingly.
344 int loop_end = current_offset + iterator.current_bytecode_size();
345 int loop_header = iterator.GetJumpTargetOffset();
346 PushLoop(loop_header, loop_end);
347
348 if (current_offset == osr_loop_end_offset) {
349 osr_entry_point_ = loop_header;
350 } else if (current_offset < osr_loop_end_offset) {
351 // Assert that we've found the osr_entry_point if we've gone past the
352 // osr_loop_end_offset. Note, we are iterating the bytecode in reverse,
353 // so the less-than in the above condition is correct.
354 DCHECK_LE(0, osr_entry_point_);
355 }
356
357 // Save the index so that we can do another pass later.
358 if (analyze_liveness_) {
359 loop_end_index_queue_.push_back(iterator.current_index());
360 }
361 }
362
363 // We have to pop from loop_stack_ if:
364 // 1) We entered the body of the loop
365 // 2) If we have a JumpLoop that jumps to itself (i.e an empty loop)
366 bool pop_current_loop = loop_stack_.size() > 1 &&
367 (bytecode != Bytecode::kJumpLoop ||
368 iterator.GetJumpTargetOffset() == current_offset);
369
370 if (pop_current_loop) {
371 LoopStackEntry& current_loop = loop_stack_.top();
372 LoopInfo* current_loop_info = current_loop.loop_info;
373
374 // TODO(leszeks): Ideally, we'd only set values that were assigned in
375 // the loop *and* are live when the loop exits. However, this requires
376 // tracking the out-liveness of *all* loop exits, which is not
377 // information we currently have.
378 UpdateAssignments(bytecode, ¤t_loop_info->assignments(), iterator);
379
380 // Update suspend counts for this loop.
381 if (bytecode == Bytecode::kSuspendGenerator) {
382 int suspend_id = iterator.GetUnsignedImmediateOperand(3);
383 int resume_offset = current_offset + iterator.current_bytecode_size();
384 current_loop_info->AddResumeTarget(
385 ResumeJumpTarget::Leaf(suspend_id, resume_offset));
386 }
387
388 // If we've reached the header of the loop, pop it off the stack.
389 if (current_offset == current_loop.header_offset) {
390 loop_stack_.pop();
391 if (loop_stack_.size() > 1) {
392 // If there is still an outer loop, propagate inner loop assignments.
393 LoopInfo* parent_loop_info = loop_stack_.top().loop_info;
394
395 parent_loop_info->assignments().Union(
396 current_loop_info->assignments());
397
398 // Also, propagate resume targets. Instead of jumping to the target
399 // itself, the outer loop will jump to this loop header for any
400 // targets that are inside the current loop, so that this loop stays
401 // reducible. Hence, a nested loop of the form:
402 //
403 // switch (#1 -> suspend1, #2 -> suspend2)
404 // loop {
405 // suspend1: suspend #1
406 // loop {
407 // suspend2: suspend #2
408 // }
409 // }
410 //
411 // becomes:
412 //
413 // switch (#1 -> loop1, #2 -> loop1)
414 // loop1: loop {
415 // switch (#1 -> suspend1, #2 -> loop2)
416 // suspend1: suspend #1
417 // loop2: loop {
418 // switch (#2 -> suspend2)
419 // suspend2: suspend #2
420 // }
421 // }
422 for (const auto& target : current_loop_info->resume_jump_targets()) {
423 parent_loop_info->AddResumeTarget(
424 ResumeJumpTarget::AtLoopHeader(current_offset, target));
425 }
426
427 } else {
428 // Otherwise, just propagate inner loop suspends to top-level.
429 for (const auto& target : current_loop_info->resume_jump_targets()) {
430 resume_jump_targets_.push_back(
431 ResumeJumpTarget::AtLoopHeader(current_offset, target));
432 }
433 }
434 }
435 } else if (bytecode == Bytecode::kSuspendGenerator) {
436 // If we're not in a loop, we still need to look for suspends.
437 // TODO(leszeks): It would be nice to de-duplicate this with the in-loop
438 // case
439 int suspend_id = iterator.GetUnsignedImmediateOperand(3);
440 int resume_offset = current_offset + iterator.current_bytecode_size();
441 resume_jump_targets_.push_back(
442 ResumeJumpTarget::Leaf(suspend_id, resume_offset));
443 }
444
445 if (analyze_liveness_) {
446 BytecodeLiveness const& liveness = liveness_map().InitializeLiveness(
447 current_offset, bytecode_array()->register_count(), zone());
448 UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator,
449 bytecode_array(), liveness_map());
450 }
451 }
452
453 DCHECK_EQ(loop_stack_.size(), 1u);
454 DCHECK_EQ(loop_stack_.top().header_offset, -1);
455
456 DCHECK(ResumeJumpTargetsAreValid());
457
458 if (!analyze_liveness_) return;
459
460 // At this point, every bytecode has a valid in and out liveness, except for
461 // propagating liveness across back edges (i.e. JumpLoop). Subsequent liveness
462 // analysis iterations can only add additional liveness bits that are pulled
463 // across these back edges.
464 //
465 // Furthermore, a loop header's in-liveness can only change based on any
466 // bytecodes *after* the loop end -- it cannot change as a result of the
467 // JumpLoop liveness being updated, as the only liveness bits than can be
468 // added to the loop body are those of the loop header.
469 //
470 // So, if we know that the liveness of bytecodes after a loop header won't
471 // change (e.g. because there are no loops in them, or we have already ensured
472 // those loops are valid), we can safely update the loop end and pass over the
473 // loop body, and then never have to pass over that loop end again, because we
474 // have shown that its target, the loop header, can't change from the entries
475 // after the loop, and can't change from any loop body pass.
476 //
477 // This means that in a pass, we can iterate backwards over the bytecode
478 // array, process any loops that we encounter, and on subsequent passes we can
479 // skip processing those loops (though we still have to process inner loops).
480 //
481 // Equivalently, we can queue up loop ends from back to front, and pass over
482 // the loops in that order, as this preserves both the bottom-to-top and
483 // outer-to-inner requirements.
484
485 for (int loop_end_index : loop_end_index_queue_) {
486 iterator.GoToIndex(loop_end_index);
487
488 DCHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpLoop);
489
490 int header_offset = iterator.GetJumpTargetOffset();
491 int end_offset = iterator.current_offset();
492
493 BytecodeLiveness& header_liveness =
494 liveness_map().GetLiveness(header_offset);
495 BytecodeLiveness& end_liveness = liveness_map().GetLiveness(end_offset);
496
497 if (!end_liveness.out->UnionIsChanged(*header_liveness.in)) {
498 // Only update the loop body if the loop end liveness changed.
499 continue;
500 }
501 end_liveness.in->CopyFrom(*end_liveness.out);
502 next_bytecode_in_liveness = end_liveness.in;
503
504 // Advance into the loop body.
505 --iterator;
506 for (; iterator.current_offset() > header_offset; --iterator) {
507 Bytecode bytecode = iterator.current_bytecode();
508 int current_offset = iterator.current_offset();
509 BytecodeLiveness const& liveness =
510 liveness_map().GetLiveness(current_offset);
511 UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator,
512 bytecode_array(), liveness_map());
513 }
514 // Now we are at the loop header. Since the in-liveness of the header
515 // can't change, we need only to update the out-liveness.
516 UpdateOutLiveness(iterator.current_bytecode(), header_liveness.out,
517 next_bytecode_in_liveness, iterator, bytecode_array(),
518 liveness_map());
519 }
520
521 // Process the generator switch statement separately, once the loops are done.
522 // This has to be a separate pass because the generator switch can jump into
523 // the middle of loops (and is the only kind of jump that can jump across a
524 // loop header).
525 if (generator_switch_index != -1) {
526 iterator.GoToIndex(generator_switch_index);
527 DCHECK_EQ(iterator.current_bytecode(), Bytecode::kSwitchOnGeneratorState);
528
529 int current_offset = iterator.current_offset();
530 BytecodeLiveness& switch_liveness =
531 liveness_map().GetLiveness(current_offset);
532
533 bool any_changed = false;
534 for (interpreter::JumpTableTargetOffset entry :
535 iterator.GetJumpTableTargetOffsets()) {
536 if (switch_liveness.out->UnionIsChanged(
537 *liveness_map().GetInLiveness(entry.target_offset))) {
538 any_changed = true;
539 }
540 }
541
542 // If the switch liveness changed, we have to propagate it up the remaining
543 // bytecodes before it.
544 if (any_changed) {
545 switch_liveness.in->CopyFrom(*switch_liveness.out);
546 UpdateInLiveness(Bytecode::kSwitchOnGeneratorState, switch_liveness.in,
547 iterator);
548 next_bytecode_in_liveness = switch_liveness.in;
549 for (--iterator; iterator.IsValid(); --iterator) {
550 Bytecode bytecode = iterator.current_bytecode();
551 int current_offset = iterator.current_offset();
552 BytecodeLiveness const& liveness =
553 liveness_map().GetLiveness(current_offset);
554
555 // There shouldn't be any more loops.
556 DCHECK_NE(bytecode, Bytecode::kJumpLoop);
557
558 UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator,
559 bytecode_array(), liveness_map());
560 }
561 }
562 }
563
564 DCHECK(analyze_liveness_);
565 if (FLAG_trace_environment_liveness) {
566 StdoutStream of;
567 PrintLivenessTo(of);
568 }
569
570 DCHECK(LivenessIsValid());
571 }
572
PushLoop(int loop_header,int loop_end)573 void BytecodeAnalysis::PushLoop(int loop_header, int loop_end) {
574 DCHECK_LT(loop_header, loop_end);
575 DCHECK_LT(loop_stack_.top().header_offset, loop_header);
576 DCHECK_EQ(end_to_header_.find(loop_end), end_to_header_.end());
577 DCHECK_EQ(header_to_info_.find(loop_header), header_to_info_.end());
578
579 int parent_offset = loop_stack_.top().header_offset;
580
581 end_to_header_.insert({loop_end, loop_header});
582 auto it = header_to_info_.insert(
583 {loop_header, LoopInfo(parent_offset, bytecode_array_->parameter_count(),
584 bytecode_array_->register_count(), zone_)});
585 // Get the loop info pointer from the output of insert.
586 LoopInfo* loop_info = &it.first->second;
587
588 loop_stack_.push({loop_header, loop_info});
589 }
590
IsLoopHeader(int offset) const591 bool BytecodeAnalysis::IsLoopHeader(int offset) const {
592 return header_to_info_.find(offset) != header_to_info_.end();
593 }
594
GetLoopOffsetFor(int offset) const595 int BytecodeAnalysis::GetLoopOffsetFor(int offset) const {
596 auto loop_end_to_header = end_to_header_.upper_bound(offset);
597 // If there is no next end => offset is not in a loop.
598 if (loop_end_to_header == end_to_header_.end()) {
599 return -1;
600 }
601 // If the header precedes the offset, this is the loop
602 //
603 // .> header <--loop_end_to_header
604 // |
605 // | <--offset
606 // |
607 // `- end
608 if (loop_end_to_header->second <= offset) {
609 return loop_end_to_header->second;
610 }
611 // Otherwise there is a (potentially nested) loop after this offset.
612 //
613 // <--offset
614 //
615 // .> header
616 // |
617 // | .> header <--loop_end_to_header
618 // | |
619 // | `- end
620 // |
621 // `- end
622 // We just return the parent of the next loop (might be -1).
623 DCHECK(header_to_info_.upper_bound(offset) != header_to_info_.end());
624
625 return header_to_info_.upper_bound(offset)->second.parent_offset();
626 }
627
GetLoopInfoFor(int header_offset) const628 const LoopInfo& BytecodeAnalysis::GetLoopInfoFor(int header_offset) const {
629 DCHECK(IsLoopHeader(header_offset));
630
631 return header_to_info_.find(header_offset)->second;
632 }
633
GetInLivenessFor(int offset) const634 const BytecodeLivenessState* BytecodeAnalysis::GetInLivenessFor(
635 int offset) const {
636 if (!analyze_liveness_) return nullptr;
637
638 return liveness_map().GetInLiveness(offset);
639 }
640
GetOutLivenessFor(int offset) const641 const BytecodeLivenessState* BytecodeAnalysis::GetOutLivenessFor(
642 int offset) const {
643 if (!analyze_liveness_) return nullptr;
644
645 return liveness_map().GetOutLiveness(offset);
646 }
647
PrintLivenessTo(std::ostream & os) const648 std::ostream& BytecodeAnalysis::PrintLivenessTo(std::ostream& os) const {
649 interpreter::BytecodeArrayIterator iterator(bytecode_array());
650
651 for (; !iterator.done(); iterator.Advance()) {
652 int current_offset = iterator.current_offset();
653
654 const BitVector& in_liveness =
655 GetInLivenessFor(current_offset)->bit_vector();
656 const BitVector& out_liveness =
657 GetOutLivenessFor(current_offset)->bit_vector();
658
659 for (int i = 0; i < in_liveness.length(); ++i) {
660 os << (in_liveness.Contains(i) ? "L" : ".");
661 }
662 os << " -> ";
663
664 for (int i = 0; i < out_liveness.length(); ++i) {
665 os << (out_liveness.Contains(i) ? "L" : ".");
666 }
667
668 os << " | " << current_offset << ": ";
669 iterator.PrintTo(os) << std::endl;
670 }
671
672 return os;
673 }
674
675 #if DEBUG
ResumeJumpTargetsAreValid()676 bool BytecodeAnalysis::ResumeJumpTargetsAreValid() {
677 bool valid = true;
678
679 // Find the generator switch.
680 interpreter::BytecodeArrayRandomIterator iterator(bytecode_array(), zone());
681 for (iterator.GoToStart(); iterator.IsValid(); ++iterator) {
682 if (iterator.current_bytecode() == Bytecode::kSwitchOnGeneratorState) {
683 break;
684 }
685 }
686
687 // If the iterator is invalid, we've reached the end without finding the
688 // generator switch. So, ensure there are no jump targets and exit.
689 if (!iterator.IsValid()) {
690 // Check top-level.
691 if (!resume_jump_targets().empty()) {
692 PrintF(stderr,
693 "Found %zu top-level resume targets but no resume switch\n",
694 resume_jump_targets().size());
695 valid = false;
696 }
697 // Check loops.
698 for (const std::pair<const int, LoopInfo>& loop_info : header_to_info_) {
699 if (!loop_info.second.resume_jump_targets().empty()) {
700 PrintF(stderr,
701 "Found %zu resume targets at loop at offset %d, but no resume "
702 "switch\n",
703 loop_info.second.resume_jump_targets().size(), loop_info.first);
704 valid = false;
705 }
706 }
707
708 return valid;
709 }
710
711 // Otherwise, we've found the resume switch. Check that the top level jumps
712 // only to leaves and loop headers, then check that each loop header handles
713 // all the unresolved jumps, also jumping only to leaves and inner loop
714 // headers.
715
716 // First collect all required suspend ids.
717 std::map<int, int> unresolved_suspend_ids;
718 for (interpreter::JumpTableTargetOffset offset :
719 iterator.GetJumpTableTargetOffsets()) {
720 int suspend_id = offset.case_value;
721 int resume_offset = offset.target_offset;
722
723 unresolved_suspend_ids[suspend_id] = resume_offset;
724 }
725
726 // Check top-level.
727 if (!ResumeJumpTargetLeavesResolveSuspendIds(-1, resume_jump_targets(),
728 &unresolved_suspend_ids)) {
729 valid = false;
730 }
731 // Check loops.
732 for (const std::pair<const int, LoopInfo>& loop_info : header_to_info_) {
733 if (!ResumeJumpTargetLeavesResolveSuspendIds(
734 loop_info.first, loop_info.second.resume_jump_targets(),
735 &unresolved_suspend_ids)) {
736 valid = false;
737 }
738 }
739
740 // Check that everything is resolved.
741 if (!unresolved_suspend_ids.empty()) {
742 PrintF(stderr,
743 "Found suspend ids that are not resolved by a final leaf resume "
744 "jump:\n");
745
746 for (const std::pair<const int, int>& target : unresolved_suspend_ids) {
747 PrintF(stderr, " %d -> %d\n", target.first, target.second);
748 }
749 valid = false;
750 }
751
752 return valid;
753 }
754
ResumeJumpTargetLeavesResolveSuspendIds(int parent_offset,const ZoneVector<ResumeJumpTarget> & resume_jump_targets,std::map<int,int> * unresolved_suspend_ids)755 bool BytecodeAnalysis::ResumeJumpTargetLeavesResolveSuspendIds(
756 int parent_offset, const ZoneVector<ResumeJumpTarget>& resume_jump_targets,
757 std::map<int, int>* unresolved_suspend_ids) {
758 bool valid = true;
759 for (const ResumeJumpTarget& target : resume_jump_targets) {
760 std::map<int, int>::iterator it =
761 unresolved_suspend_ids->find(target.suspend_id());
762 if (it == unresolved_suspend_ids->end()) {
763 PrintF(
764 stderr,
765 "No unresolved suspend found for resume target with suspend id %d\n",
766 target.suspend_id());
767 valid = false;
768 continue;
769 }
770 int expected_target = it->second;
771
772 if (target.is_leaf()) {
773 // Leaves should have the expected target as their target.
774 if (target.target_offset() != expected_target) {
775 PrintF(
776 stderr,
777 "Expected leaf resume target for id %d to have target offset %d, "
778 "but had %d\n",
779 target.suspend_id(), expected_target, target.target_offset());
780 valid = false;
781 } else {
782 // Make sure we're resuming to a Resume bytecode
783 interpreter::BytecodeArrayIterator iterator(bytecode_array(),
784 target.target_offset());
785 if (iterator.current_bytecode() != Bytecode::kResumeGenerator) {
786 PrintF(stderr,
787 "Expected resume target for id %d, offset %d, to be "
788 "ResumeGenerator, but found %s\n",
789 target.suspend_id(), target.target_offset(),
790 Bytecodes::ToString(iterator.current_bytecode()));
791
792 valid = false;
793 }
794 }
795 // We've resolved this suspend id, so erase it to make sure we don't
796 // resolve it twice.
797 unresolved_suspend_ids->erase(it);
798 } else {
799 // Non-leaves should have a direct inner loop header as their target.
800 if (!IsLoopHeader(target.target_offset())) {
801 PrintF(stderr,
802 "Expected non-leaf resume target for id %d to have a loop "
803 "header at target offset %d\n",
804 target.suspend_id(), target.target_offset());
805 valid = false;
806 } else {
807 LoopInfo loop_info = GetLoopInfoFor(target.target_offset());
808 if (loop_info.parent_offset() != parent_offset) {
809 PrintF(stderr,
810 "Expected non-leaf resume target for id %d to have a direct "
811 "inner loop at target offset %d\n",
812 target.suspend_id(), target.target_offset());
813 valid = false;
814 }
815 // If the target loop is a valid inner loop, we'll check its validity
816 // when we analyze its resume targets.
817 }
818 }
819 }
820 return valid;
821 }
822
LivenessIsValid()823 bool BytecodeAnalysis::LivenessIsValid() {
824 interpreter::BytecodeArrayRandomIterator iterator(bytecode_array(), zone());
825
826 BytecodeLivenessState previous_liveness(bytecode_array()->register_count(),
827 zone());
828
829 int invalid_offset = -1;
830 int which_invalid = -1;
831
832 BytecodeLivenessState* next_bytecode_in_liveness = nullptr;
833
834 // Ensure that there are no liveness changes if we iterate one more time.
835 for (iterator.GoToEnd(); iterator.IsValid(); --iterator) {
836 Bytecode bytecode = iterator.current_bytecode();
837
838 int current_offset = iterator.current_offset();
839
840 BytecodeLiveness& liveness = liveness_map().GetLiveness(current_offset);
841
842 previous_liveness.CopyFrom(*liveness.out);
843
844 UpdateOutLiveness(bytecode, liveness.out, next_bytecode_in_liveness,
845 iterator, bytecode_array(), liveness_map());
846 // UpdateOutLiveness skips kJumpLoop, so we update it manually.
847 if (bytecode == Bytecode::kJumpLoop) {
848 int target_offset = iterator.GetJumpTargetOffset();
849 liveness.out->Union(*liveness_map().GetInLiveness(target_offset));
850 }
851
852 if (!liveness.out->Equals(previous_liveness)) {
853 // Reset the invalid liveness.
854 liveness.out->CopyFrom(previous_liveness);
855 invalid_offset = current_offset;
856 which_invalid = 1;
857 break;
858 }
859
860 previous_liveness.CopyFrom(*liveness.in);
861
862 liveness.in->CopyFrom(*liveness.out);
863 UpdateInLiveness(bytecode, liveness.in, iterator);
864
865 if (!liveness.in->Equals(previous_liveness)) {
866 // Reset the invalid liveness.
867 liveness.in->CopyFrom(previous_liveness);
868 invalid_offset = current_offset;
869 which_invalid = 0;
870 break;
871 }
872
873 next_bytecode_in_liveness = liveness.in;
874 }
875
876 // Ensure that the accumulator is not live when jumping out of a loop, or on
877 // the back-edge of a loop.
878 for (iterator.GoToStart(); iterator.IsValid() && invalid_offset == -1;
879 ++iterator) {
880 Bytecode bytecode = iterator.current_bytecode();
881 int current_offset = iterator.current_offset();
882 int loop_header = GetLoopOffsetFor(current_offset);
883
884 // We only care if we're inside a loop.
885 if (loop_header == -1) continue;
886
887 // We only care about jumps.
888 if (!Bytecodes::IsJump(bytecode)) continue;
889
890 int jump_target = iterator.GetJumpTargetOffset();
891
892 // If this is a forward jump to somewhere else in the same loop, ignore it.
893 if (Bytecodes::IsForwardJump(bytecode) &&
894 GetLoopOffsetFor(jump_target) == loop_header) {
895 continue;
896 }
897
898 // The accumulator must be dead at the start of the target of the jump.
899 if (liveness_map().GetLiveness(jump_target).in->AccumulatorIsLive()) {
900 invalid_offset = jump_target;
901 which_invalid = 0;
902 break;
903 }
904 }
905
906 if (invalid_offset != -1) {
907 OFStream of(stderr);
908 of << "Invalid liveness:" << std::endl;
909
910 // Dump the bytecode, annotated with the liveness and marking loops.
911
912 int loop_indent = 0;
913
914 interpreter::BytecodeArrayIterator forward_iterator(bytecode_array());
915 for (; !forward_iterator.done(); forward_iterator.Advance()) {
916 int current_offset = forward_iterator.current_offset();
917 const BitVector& in_liveness =
918 GetInLivenessFor(current_offset)->bit_vector();
919 const BitVector& out_liveness =
920 GetOutLivenessFor(current_offset)->bit_vector();
921
922 for (int i = 0; i < in_liveness.length(); ++i) {
923 of << (in_liveness.Contains(i) ? 'L' : '.');
924 }
925
926 of << " | ";
927
928 for (int i = 0; i < out_liveness.length(); ++i) {
929 of << (out_liveness.Contains(i) ? 'L' : '.');
930 }
931
932 of << " : " << current_offset << " : ";
933
934 // Draw loop back edges by indentin everything between loop headers and
935 // jump loop instructions.
936 if (forward_iterator.current_bytecode() == Bytecode::kJumpLoop) {
937 loop_indent--;
938 }
939 for (int i = 0; i < loop_indent; ++i) {
940 of << "| ";
941 }
942 if (forward_iterator.current_bytecode() == Bytecode::kJumpLoop) {
943 of << "`-";
944 } else if (IsLoopHeader(current_offset)) {
945 of << ".>";
946 loop_indent++;
947 }
948 forward_iterator.PrintTo(of);
949 if (Bytecodes::IsJump(forward_iterator.current_bytecode())) {
950 of << " (@" << forward_iterator.GetJumpTargetOffset() << ")";
951 }
952 of << std::endl;
953
954 if (current_offset == invalid_offset) {
955 // Underline the invalid liveness.
956 if (which_invalid == 0) {
957 for (int i = 0; i < in_liveness.length(); ++i) {
958 of << '^';
959 }
960 for (int i = 0; i < out_liveness.length() + 3; ++i) {
961 of << ' ';
962 }
963 } else {
964 for (int i = 0; i < in_liveness.length() + 3; ++i) {
965 of << ' ';
966 }
967 for (int i = 0; i < out_liveness.length(); ++i) {
968 of << '^';
969 }
970 }
971
972 // Make sure to draw the loop indentation marks on this additional line.
973 of << " : " << current_offset << " : ";
974 for (int i = 0; i < loop_indent; ++i) {
975 of << "| ";
976 }
977
978 of << std::endl;
979 }
980 }
981 }
982
983 return invalid_offset == -1;
984 }
985 #endif
986
987 } // namespace compiler
988 } // namespace internal
989 } // namespace v8
990