1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/RegisterAllocator.h"
8 
9 using namespace js;
10 using namespace js::jit;
11 
12 #ifdef DEBUG
record()13 bool AllocationIntegrityState::record() {
14   // Ignore repeated record() calls.
15   if (!instructions.empty()) {
16     return true;
17   }
18 
19   if (!instructions.appendN(InstructionInfo(), graph.numInstructions())) {
20     return false;
21   }
22 
23   if (!virtualRegisters.appendN((LDefinition*)nullptr,
24                                 graph.numVirtualRegisters())) {
25     return false;
26   }
27 
28   if (!blocks.reserve(graph.numBlocks())) {
29     return false;
30   }
31   for (size_t i = 0; i < graph.numBlocks(); i++) {
32     blocks.infallibleAppend(BlockInfo());
33     LBlock* block = graph.getBlock(i);
34     MOZ_ASSERT(block->mir()->id() == i);
35 
36     BlockInfo& blockInfo = blocks[i];
37     if (!blockInfo.phis.reserve(block->numPhis())) {
38       return false;
39     }
40 
41     for (size_t j = 0; j < block->numPhis(); j++) {
42       blockInfo.phis.infallibleAppend(InstructionInfo());
43       InstructionInfo& info = blockInfo.phis[j];
44       LPhi* phi = block->getPhi(j);
45       MOZ_ASSERT(phi->numDefs() == 1);
46       uint32_t vreg = phi->getDef(0)->virtualRegister();
47       virtualRegisters[vreg] = phi->getDef(0);
48       if (!info.outputs.append(*phi->getDef(0))) {
49         return false;
50       }
51       for (size_t k = 0, kend = phi->numOperands(); k < kend; k++) {
52         if (!info.inputs.append(*phi->getOperand(k))) {
53           return false;
54         }
55       }
56     }
57 
58     for (LInstructionIterator iter = block->begin(); iter != block->end();
59          iter++) {
60       LInstruction* ins = *iter;
61       InstructionInfo& info = instructions[ins->id()];
62 
63       for (size_t k = 0; k < ins->numTemps(); k++) {
64         if (!ins->getTemp(k)->isBogusTemp()) {
65           uint32_t vreg = ins->getTemp(k)->virtualRegister();
66           virtualRegisters[vreg] = ins->getTemp(k);
67         }
68         if (!info.temps.append(*ins->getTemp(k))) {
69           return false;
70         }
71       }
72       for (size_t k = 0; k < ins->numDefs(); k++) {
73         if (!ins->getDef(k)->isBogusTemp()) {
74           uint32_t vreg = ins->getDef(k)->virtualRegister();
75           virtualRegisters[vreg] = ins->getDef(k);
76         }
77         if (!info.outputs.append(*ins->getDef(k))) {
78           return false;
79         }
80       }
81       for (LInstruction::InputIterator alloc(*ins); alloc.more();
82            alloc.next()) {
83         if (!info.inputs.append(**alloc)) {
84           return false;
85         }
86       }
87     }
88   }
89 
90   return true;
91 }
92 
check()93 bool AllocationIntegrityState::check() {
94   MOZ_ASSERT(!instructions.empty());
95 
96 #  ifdef JS_JITSPEW
97   if (JitSpewEnabled(JitSpew_RegAlloc)) {
98     dump();
99   }
100 #  endif
101   for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
102     LBlock* block = graph.getBlock(blockIndex);
103 
104     // Check that all instruction inputs and outputs have been assigned an
105     // allocation.
106     for (LInstructionIterator iter = block->begin(); iter != block->end();
107          iter++) {
108       LInstruction* ins = *iter;
109 
110       for (LInstruction::InputIterator alloc(*ins); alloc.more();
111            alloc.next()) {
112         MOZ_ASSERT(!alloc->isUse());
113       }
114 
115       for (size_t i = 0; i < ins->numDefs(); i++) {
116         LDefinition* def = ins->getDef(i);
117         MOZ_ASSERT(!def->output()->isUse());
118 
119         LDefinition oldDef = instructions[ins->id()].outputs[i];
120         MOZ_ASSERT_IF(
121             oldDef.policy() == LDefinition::MUST_REUSE_INPUT,
122             *def->output() == *ins->getOperand(oldDef.getReusedInput()));
123       }
124 
125       for (size_t i = 0; i < ins->numTemps(); i++) {
126         LDefinition* temp = ins->getTemp(i);
127         MOZ_ASSERT_IF(!temp->isBogusTemp(), temp->output()->isRegister());
128 
129         LDefinition oldTemp = instructions[ins->id()].temps[i];
130         MOZ_ASSERT_IF(
131             oldTemp.policy() == LDefinition::MUST_REUSE_INPUT,
132             *temp->output() == *ins->getOperand(oldTemp.getReusedInput()));
133       }
134     }
135   }
136 
137   // Check that the register assignment and move groups preserve the original
138   // semantics of the virtual registers. Each virtual register has a single
139   // write (owing to the SSA representation), but the allocation may move the
140   // written value around between registers and memory locations along
141   // different paths through the script.
142   //
143   // For each use of an allocation, follow the physical value which is read
144   // backward through the script, along all paths to the value's virtual
145   // register's definition.
146   for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
147     LBlock* block = graph.getBlock(blockIndex);
148     for (LInstructionIterator iter = block->begin(); iter != block->end();
149          iter++) {
150       LInstruction* ins = *iter;
151       const InstructionInfo& info = instructions[ins->id()];
152 
153       LSafepoint* safepoint = ins->safepoint();
154       if (safepoint) {
155         for (size_t i = 0; i < ins->numTemps(); i++) {
156           if (ins->getTemp(i)->isBogusTemp()) {
157             continue;
158           }
159           uint32_t vreg = info.temps[i].virtualRegister();
160           LAllocation* alloc = ins->getTemp(i)->output();
161           checkSafepointAllocation(ins, vreg, *alloc);
162         }
163         MOZ_ASSERT_IF(ins->isCall(), safepoint->liveRegs().emptyFloat() &&
164                                          safepoint->liveRegs().emptyGeneral());
165       }
166 
167       size_t inputIndex = 0;
168       for (LInstruction::InputIterator alloc(*ins); alloc.more();
169            alloc.next()) {
170         LAllocation oldInput = info.inputs[inputIndex++];
171         if (!oldInput.isUse()) {
172           continue;
173         }
174 
175         uint32_t vreg = oldInput.toUse()->virtualRegister();
176 
177         if (safepoint && !oldInput.toUse()->usedAtStart()) {
178           checkSafepointAllocation(ins, vreg, **alloc);
179         }
180 
181         // Start checking at the previous instruction, in case this
182         // instruction reuses its input register for an output.
183         LInstructionReverseIterator riter = block->rbegin(ins);
184         riter++;
185         if (!checkIntegrity(block, *riter, vreg, **alloc)) {
186           return false;
187         }
188 
189         while (!worklist.empty()) {
190           IntegrityItem item = worklist.popCopy();
191           if (!checkIntegrity(item.block, *item.block->rbegin(), item.vreg,
192                               item.alloc)) {
193             return false;
194           }
195         }
196       }
197     }
198   }
199 
200   return true;
201 }
202 
checkIntegrity(LBlock * block,LInstruction * ins,uint32_t vreg,LAllocation alloc)203 bool AllocationIntegrityState::checkIntegrity(LBlock* block, LInstruction* ins,
204                                               uint32_t vreg,
205                                               LAllocation alloc) {
206   for (LInstructionReverseIterator iter(block->rbegin(ins));
207        iter != block->rend(); iter++) {
208     ins = *iter;
209 
210     // Follow values through assignments in move groups. All assignments in
211     // a move group are considered to happen simultaneously, so stop after
212     // the first matching move is found.
213     if (ins->isMoveGroup()) {
214       LMoveGroup* group = ins->toMoveGroup();
215       for (int i = group->numMoves() - 1; i >= 0; i--) {
216         if (group->getMove(i).to() == alloc) {
217           alloc = group->getMove(i).from();
218           break;
219         }
220       }
221     }
222 
223     const InstructionInfo& info = instructions[ins->id()];
224 
225     // Make sure the physical location being tracked is not clobbered by
226     // another instruction, and that if the originating vreg definition is
227     // found that it is writing to the tracked location.
228 
229     for (size_t i = 0; i < ins->numDefs(); i++) {
230       LDefinition* def = ins->getDef(i);
231       if (def->isBogusTemp()) {
232         continue;
233       }
234       if (info.outputs[i].virtualRegister() == vreg) {
235         MOZ_ASSERT(*def->output() == alloc);
236 
237         // Found the original definition, done scanning.
238         return true;
239       } else {
240         MOZ_ASSERT(*def->output() != alloc);
241       }
242     }
243 
244     for (size_t i = 0; i < ins->numTemps(); i++) {
245       LDefinition* temp = ins->getTemp(i);
246       if (!temp->isBogusTemp()) {
247         MOZ_ASSERT(*temp->output() != alloc);
248       }
249     }
250 
251     if (ins->safepoint()) {
252       checkSafepointAllocation(ins, vreg, alloc);
253     }
254   }
255 
256   // Phis are effectless, but change the vreg we are tracking. Check if there
257   // is one which produced this vreg. We need to follow back through the phi
258   // inputs as it is not guaranteed the register allocator filled in physical
259   // allocations for the inputs and outputs of the phis.
260   for (size_t i = 0; i < block->numPhis(); i++) {
261     const InstructionInfo& info = blocks[block->mir()->id()].phis[i];
262     LPhi* phi = block->getPhi(i);
263     if (info.outputs[0].virtualRegister() == vreg) {
264       for (size_t j = 0, jend = phi->numOperands(); j < jend; j++) {
265         uint32_t newvreg = info.inputs[j].toUse()->virtualRegister();
266         LBlock* predecessor = block->mir()->getPredecessor(j)->lir();
267         if (!addPredecessor(predecessor, newvreg, alloc)) {
268           return false;
269         }
270       }
271       return true;
272     }
273   }
274 
275   // No phi which defined the vreg we are tracking, follow back through all
276   // predecessors with the existing vreg.
277   for (size_t i = 0, iend = block->mir()->numPredecessors(); i < iend; i++) {
278     LBlock* predecessor = block->mir()->getPredecessor(i)->lir();
279     if (!addPredecessor(predecessor, vreg, alloc)) {
280       return false;
281     }
282   }
283 
284   return true;
285 }
286 
checkSafepointAllocation(LInstruction * ins,uint32_t vreg,LAllocation alloc)287 void AllocationIntegrityState::checkSafepointAllocation(LInstruction* ins,
288                                                         uint32_t vreg,
289                                                         LAllocation alloc) {
290   LSafepoint* safepoint = ins->safepoint();
291   MOZ_ASSERT(safepoint);
292 
293   if (ins->isCall() && alloc.isRegister()) {
294     return;
295   }
296 
297   if (alloc.isRegister()) {
298     MOZ_ASSERT(safepoint->liveRegs().has(alloc.toRegister()));
299   }
300 
301   // The |this| argument slot is implicitly included in all safepoints.
302   if (alloc.isArgument() &&
303       alloc.toArgument()->index() < THIS_FRAME_ARGSLOT + sizeof(Value)) {
304     return;
305   }
306 
307   LDefinition::Type type = virtualRegisters[vreg]
308                                ? virtualRegisters[vreg]->type()
309                                : LDefinition::GENERAL;
310 
311   switch (type) {
312     case LDefinition::OBJECT:
313       MOZ_ASSERT(safepoint->hasGcPointer(alloc));
314       break;
315     case LDefinition::STACKRESULTS:
316       MOZ_ASSERT(safepoint->hasAllGcPointersFromStackArea(alloc));
317       break;
318     case LDefinition::SLOTS:
319       MOZ_ASSERT(safepoint->hasSlotsOrElementsPointer(alloc));
320       break;
321 #  ifdef JS_NUNBOX32
322     // Do not assert that safepoint information for nunbox types is complete,
323     // as if a vreg for a value's components are copied in multiple places
324     // then the safepoint information may not reflect all copies. All copies
325     // of payloads must be reflected, however, for generational GC.
326     case LDefinition::TYPE:
327       break;
328     case LDefinition::PAYLOAD:
329       MOZ_ASSERT(safepoint->hasNunboxPayload(alloc));
330       break;
331 #  else
332     case LDefinition::BOX:
333       MOZ_ASSERT(safepoint->hasBoxedValue(alloc));
334       break;
335 #  endif
336     default:
337       break;
338   }
339 }
340 
addPredecessor(LBlock * block,uint32_t vreg,LAllocation alloc)341 bool AllocationIntegrityState::addPredecessor(LBlock* block, uint32_t vreg,
342                                               LAllocation alloc) {
343   // There is no need to reanalyze if we have already seen this predecessor.
344   // We share the seen allocations across analysis of each use, as there will
345   // likely be common ground between different uses of the same vreg.
346   IntegrityItem item;
347   item.block = block;
348   item.vreg = vreg;
349   item.alloc = alloc;
350   item.index = seen.count();
351 
352   IntegrityItemSet::AddPtr p = seen.lookupForAdd(item);
353   if (p) {
354     return true;
355   }
356   if (!seen.add(p, item)) {
357     return false;
358   }
359 
360   return worklist.append(item);
361 }
362 
dump()363 void AllocationIntegrityState::dump() {
364 #  ifdef JS_JITSPEW
365   fprintf(stderr, "Register Allocation Integrity State:\n");
366 
367   for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
368     LBlock* block = graph.getBlock(blockIndex);
369     MBasicBlock* mir = block->mir();
370 
371     fprintf(stderr, "\nBlock %lu", static_cast<unsigned long>(blockIndex));
372     for (size_t i = 0; i < mir->numSuccessors(); i++) {
373       fprintf(stderr, " [successor %u]", mir->getSuccessor(i)->id());
374     }
375     fprintf(stderr, "\n");
376 
377     for (size_t i = 0; i < block->numPhis(); i++) {
378       const InstructionInfo& info = blocks[blockIndex].phis[i];
379       LPhi* phi = block->getPhi(i);
380       CodePosition input(block->getPhi(0)->id(), CodePosition::INPUT);
381       CodePosition output(block->getPhi(block->numPhis() - 1)->id(),
382                           CodePosition::OUTPUT);
383 
384       fprintf(stderr, "[%u,%u Phi] [def %s] ", input.bits(), output.bits(),
385               phi->getDef(0)->toString().get());
386       for (size_t j = 0; j < phi->numOperands(); j++) {
387         fprintf(stderr, " [use %s]", info.inputs[j].toString().get());
388       }
389       fprintf(stderr, "\n");
390     }
391 
392     for (LInstructionIterator iter = block->begin(); iter != block->end();
393          iter++) {
394       LInstruction* ins = *iter;
395       const InstructionInfo& info = instructions[ins->id()];
396 
397       CodePosition input(ins->id(), CodePosition::INPUT);
398       CodePosition output(ins->id(), CodePosition::OUTPUT);
399 
400       fprintf(stderr, "[");
401       if (input != CodePosition::MIN) {
402         fprintf(stderr, "%u,%u ", input.bits(), output.bits());
403       }
404       fprintf(stderr, "%s]", ins->opName());
405 
406       if (ins->isMoveGroup()) {
407         LMoveGroup* group = ins->toMoveGroup();
408         for (int i = group->numMoves() - 1; i >= 0; i--) {
409           fprintf(stderr, " [%s -> %s]",
410                   group->getMove(i).from().toString().get(),
411                   group->getMove(i).to().toString().get());
412         }
413         fprintf(stderr, "\n");
414         continue;
415       }
416 
417       for (size_t i = 0; i < ins->numDefs(); i++) {
418         fprintf(stderr, " [def %s]", ins->getDef(i)->toString().get());
419       }
420 
421       for (size_t i = 0; i < ins->numTemps(); i++) {
422         LDefinition* temp = ins->getTemp(i);
423         if (!temp->isBogusTemp()) {
424           fprintf(stderr, " [temp v%u %s]", info.temps[i].virtualRegister(),
425                   temp->toString().get());
426         }
427       }
428 
429       size_t index = 0;
430       for (LInstruction::InputIterator alloc(*ins); alloc.more();
431            alloc.next()) {
432         fprintf(stderr, " [use %s", info.inputs[index++].toString().get());
433         if (!alloc->isConstant()) {
434           fprintf(stderr, " %s", alloc->toString().get());
435         }
436         fprintf(stderr, "]");
437       }
438 
439       fprintf(stderr, "\n");
440     }
441   }
442 
443   // Print discovered allocations at the ends of blocks, in the order they
444   // were discovered.
445 
446   Vector<IntegrityItem, 20, SystemAllocPolicy> seenOrdered;
447   if (!seenOrdered.appendN(IntegrityItem(), seen.count())) {
448     fprintf(stderr, "OOM while dumping allocations\n");
449     return;
450   }
451 
452   for (IntegrityItemSet::Enum iter(seen); !iter.empty(); iter.popFront()) {
453     IntegrityItem item = iter.front();
454     seenOrdered[item.index] = item;
455   }
456 
457   if (!seenOrdered.empty()) {
458     fprintf(stderr, "Intermediate Allocations:\n");
459 
460     for (size_t i = 0; i < seenOrdered.length(); i++) {
461       IntegrityItem item = seenOrdered[i];
462       fprintf(stderr, "  block %u reg v%u alloc %s\n", item.block->mir()->id(),
463               item.vreg, item.alloc.toString().get());
464     }
465   }
466 
467   fprintf(stderr, "\n");
468 #  endif
469 }
470 #endif  // DEBUG
471 
472 const CodePosition CodePosition::MAX(UINT_MAX);
473 const CodePosition CodePosition::MIN(0);
474 
init()475 bool RegisterAllocator::init() {
476   if (!insData.init(mir, graph.numInstructions())) {
477     return false;
478   }
479 
480   if (!entryPositions.reserve(graph.numBlocks()) ||
481       !exitPositions.reserve(graph.numBlocks())) {
482     return false;
483   }
484 
485   for (size_t i = 0; i < graph.numBlocks(); i++) {
486     LBlock* block = graph.getBlock(i);
487     for (LInstructionIterator ins = block->begin(); ins != block->end();
488          ins++) {
489       insData[ins->id()] = *ins;
490     }
491     for (size_t j = 0; j < block->numPhis(); j++) {
492       LPhi* phi = block->getPhi(j);
493       insData[phi->id()] = phi;
494     }
495 
496     CodePosition entry =
497         block->numPhis() != 0
498             ? CodePosition(block->getPhi(0)->id(), CodePosition::INPUT)
499             : inputOf(block->firstInstructionWithId());
500     CodePosition exit = outputOf(block->lastInstructionWithId());
501 
502     MOZ_ASSERT(block->mir()->id() == i);
503     entryPositions.infallibleAppend(entry);
504     exitPositions.infallibleAppend(exit);
505   }
506 
507   return true;
508 }
509 
getInputMoveGroup(LInstruction * ins)510 LMoveGroup* RegisterAllocator::getInputMoveGroup(LInstruction* ins) {
511   MOZ_ASSERT(!ins->fixReuseMoves());
512   if (ins->inputMoves()) {
513     return ins->inputMoves();
514   }
515 
516   LMoveGroup* moves = LMoveGroup::New(alloc());
517   ins->setInputMoves(moves);
518   ins->block()->insertBefore(ins, moves);
519   return moves;
520 }
521 
getFixReuseMoveGroup(LInstruction * ins)522 LMoveGroup* RegisterAllocator::getFixReuseMoveGroup(LInstruction* ins) {
523   if (ins->fixReuseMoves()) {
524     return ins->fixReuseMoves();
525   }
526 
527   LMoveGroup* moves = LMoveGroup::New(alloc());
528   ins->setFixReuseMoves(moves);
529   ins->block()->insertBefore(ins, moves);
530   return moves;
531 }
532 
getMoveGroupAfter(LInstruction * ins)533 LMoveGroup* RegisterAllocator::getMoveGroupAfter(LInstruction* ins) {
534   if (ins->movesAfter()) {
535     return ins->movesAfter();
536   }
537 
538   LMoveGroup* moves = LMoveGroup::New(alloc());
539   ins->setMovesAfter(moves);
540 
541   ins->block()->insertAfter(ins, moves);
542   return moves;
543 }
544 
dumpInstructions()545 void RegisterAllocator::dumpInstructions() {
546 #ifdef JS_JITSPEW
547   fprintf(stderr, "Instructions:\n");
548 
549   for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
550     LBlock* block = graph.getBlock(blockIndex);
551     MBasicBlock* mir = block->mir();
552 
553     fprintf(stderr, "\nBlock %lu", static_cast<unsigned long>(blockIndex));
554     for (size_t i = 0; i < mir->numSuccessors(); i++) {
555       fprintf(stderr, " [successor %u]", mir->getSuccessor(i)->id());
556     }
557     fprintf(stderr, "\n");
558 
559     for (size_t i = 0; i < block->numPhis(); i++) {
560       LPhi* phi = block->getPhi(i);
561 
562       fprintf(stderr, "[%u,%u Phi] [def %s]", inputOf(phi).bits(),
563               outputOf(phi).bits(), phi->getDef(0)->toString().get());
564       for (size_t j = 0; j < phi->numOperands(); j++) {
565         fprintf(stderr, " [use %s]", phi->getOperand(j)->toString().get());
566       }
567       fprintf(stderr, "\n");
568     }
569 
570     for (LInstructionIterator iter = block->begin(); iter != block->end();
571          iter++) {
572       LInstruction* ins = *iter;
573 
574       fprintf(stderr, "[");
575       if (ins->id() != 0) {
576         fprintf(stderr, "%u,%u ", inputOf(ins).bits(), outputOf(ins).bits());
577       }
578       fprintf(stderr, "%s]", ins->opName());
579 
580       if (ins->isMoveGroup()) {
581         LMoveGroup* group = ins->toMoveGroup();
582         for (int i = group->numMoves() - 1; i >= 0; i--) {
583           // Use two printfs, as LAllocation::toString is not reentant.
584           fprintf(stderr, " [%s", group->getMove(i).from().toString().get());
585           fprintf(stderr, " -> %s]", group->getMove(i).to().toString().get());
586         }
587         fprintf(stderr, "\n");
588         continue;
589       }
590 
591       for (size_t i = 0; i < ins->numDefs(); i++) {
592         fprintf(stderr, " [def %s]", ins->getDef(i)->toString().get());
593       }
594 
595       for (size_t i = 0; i < ins->numTemps(); i++) {
596         LDefinition* temp = ins->getTemp(i);
597         if (!temp->isBogusTemp()) {
598           fprintf(stderr, " [temp %s]", temp->toString().get());
599         }
600       }
601 
602       for (LInstruction::InputIterator alloc(*ins); alloc.more();
603            alloc.next()) {
604         if (!alloc->isBogus()) {
605           fprintf(stderr, " [use %s]", alloc->toString().get());
606         }
607       }
608 
609       fprintf(stderr, "\n");
610     }
611   }
612   fprintf(stderr, "\n");
613 #endif  // JS_JITSPEW
614 }
615