1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/RegisterAllocator.h"
8
9 using namespace js;
10 using namespace js::jit;
11
12 #ifdef DEBUG
record()13 bool AllocationIntegrityState::record() {
14 // Ignore repeated record() calls.
15 if (!instructions.empty()) {
16 return true;
17 }
18
19 if (!instructions.appendN(InstructionInfo(), graph.numInstructions())) {
20 return false;
21 }
22
23 if (!virtualRegisters.appendN((LDefinition*)nullptr,
24 graph.numVirtualRegisters())) {
25 return false;
26 }
27
28 if (!blocks.reserve(graph.numBlocks())) {
29 return false;
30 }
31 for (size_t i = 0; i < graph.numBlocks(); i++) {
32 blocks.infallibleAppend(BlockInfo());
33 LBlock* block = graph.getBlock(i);
34 MOZ_ASSERT(block->mir()->id() == i);
35
36 BlockInfo& blockInfo = blocks[i];
37 if (!blockInfo.phis.reserve(block->numPhis())) {
38 return false;
39 }
40
41 for (size_t j = 0; j < block->numPhis(); j++) {
42 blockInfo.phis.infallibleAppend(InstructionInfo());
43 InstructionInfo& info = blockInfo.phis[j];
44 LPhi* phi = block->getPhi(j);
45 MOZ_ASSERT(phi->numDefs() == 1);
46 uint32_t vreg = phi->getDef(0)->virtualRegister();
47 virtualRegisters[vreg] = phi->getDef(0);
48 if (!info.outputs.append(*phi->getDef(0))) {
49 return false;
50 }
51 for (size_t k = 0, kend = phi->numOperands(); k < kend; k++) {
52 if (!info.inputs.append(*phi->getOperand(k))) {
53 return false;
54 }
55 }
56 }
57
58 for (LInstructionIterator iter = block->begin(); iter != block->end();
59 iter++) {
60 LInstruction* ins = *iter;
61 InstructionInfo& info = instructions[ins->id()];
62
63 for (size_t k = 0; k < ins->numTemps(); k++) {
64 if (!ins->getTemp(k)->isBogusTemp()) {
65 uint32_t vreg = ins->getTemp(k)->virtualRegister();
66 virtualRegisters[vreg] = ins->getTemp(k);
67 }
68 if (!info.temps.append(*ins->getTemp(k))) {
69 return false;
70 }
71 }
72 for (size_t k = 0; k < ins->numDefs(); k++) {
73 if (!ins->getDef(k)->isBogusTemp()) {
74 uint32_t vreg = ins->getDef(k)->virtualRegister();
75 virtualRegisters[vreg] = ins->getDef(k);
76 }
77 if (!info.outputs.append(*ins->getDef(k))) {
78 return false;
79 }
80 }
81 for (LInstruction::InputIterator alloc(*ins); alloc.more();
82 alloc.next()) {
83 if (!info.inputs.append(**alloc)) {
84 return false;
85 }
86 }
87 }
88 }
89
90 return true;
91 }
92
check()93 bool AllocationIntegrityState::check() {
94 MOZ_ASSERT(!instructions.empty());
95
96 # ifdef JS_JITSPEW
97 if (JitSpewEnabled(JitSpew_RegAlloc)) {
98 dump();
99 }
100 # endif
101 for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
102 LBlock* block = graph.getBlock(blockIndex);
103
104 // Check that all instruction inputs and outputs have been assigned an
105 // allocation.
106 for (LInstructionIterator iter = block->begin(); iter != block->end();
107 iter++) {
108 LInstruction* ins = *iter;
109
110 for (LInstruction::InputIterator alloc(*ins); alloc.more();
111 alloc.next()) {
112 MOZ_ASSERT(!alloc->isUse());
113 }
114
115 for (size_t i = 0; i < ins->numDefs(); i++) {
116 LDefinition* def = ins->getDef(i);
117 MOZ_ASSERT(!def->output()->isUse());
118
119 LDefinition oldDef = instructions[ins->id()].outputs[i];
120 MOZ_ASSERT_IF(
121 oldDef.policy() == LDefinition::MUST_REUSE_INPUT,
122 *def->output() == *ins->getOperand(oldDef.getReusedInput()));
123 }
124
125 for (size_t i = 0; i < ins->numTemps(); i++) {
126 LDefinition* temp = ins->getTemp(i);
127 MOZ_ASSERT_IF(!temp->isBogusTemp(), temp->output()->isRegister());
128
129 LDefinition oldTemp = instructions[ins->id()].temps[i];
130 MOZ_ASSERT_IF(
131 oldTemp.policy() == LDefinition::MUST_REUSE_INPUT,
132 *temp->output() == *ins->getOperand(oldTemp.getReusedInput()));
133 }
134 }
135 }
136
137 // Check that the register assignment and move groups preserve the original
138 // semantics of the virtual registers. Each virtual register has a single
139 // write (owing to the SSA representation), but the allocation may move the
140 // written value around between registers and memory locations along
141 // different paths through the script.
142 //
143 // For each use of an allocation, follow the physical value which is read
144 // backward through the script, along all paths to the value's virtual
145 // register's definition.
146 for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
147 LBlock* block = graph.getBlock(blockIndex);
148 for (LInstructionIterator iter = block->begin(); iter != block->end();
149 iter++) {
150 LInstruction* ins = *iter;
151 const InstructionInfo& info = instructions[ins->id()];
152
153 LSafepoint* safepoint = ins->safepoint();
154 if (safepoint) {
155 for (size_t i = 0; i < ins->numTemps(); i++) {
156 if (ins->getTemp(i)->isBogusTemp()) {
157 continue;
158 }
159 uint32_t vreg = info.temps[i].virtualRegister();
160 LAllocation* alloc = ins->getTemp(i)->output();
161 checkSafepointAllocation(ins, vreg, *alloc);
162 }
163 MOZ_ASSERT_IF(ins->isCall(), safepoint->liveRegs().emptyFloat() &&
164 safepoint->liveRegs().emptyGeneral());
165 }
166
167 size_t inputIndex = 0;
168 for (LInstruction::InputIterator alloc(*ins); alloc.more();
169 inputIndex++, alloc.next()) {
170 LAllocation oldInput = info.inputs[inputIndex];
171 if (!oldInput.isUse()) {
172 continue;
173 }
174
175 uint32_t vreg = oldInput.toUse()->virtualRegister();
176
177 if (safepoint && !oldInput.toUse()->usedAtStart()) {
178 checkSafepointAllocation(ins, vreg, **alloc);
179 }
180
181 // Temps must never alias inputs (even at-start uses) unless explicitly
182 // requested.
183 for (size_t i = 0; i < ins->numTemps(); i++) {
184 if (ins->getTemp(i)->isBogusTemp()) {
185 continue;
186 }
187 LAllocation* tempAlloc = ins->getTemp(i)->output();
188
189 // Fixed uses and fixed temps are allowed to alias.
190 if (oldInput.toUse()->isFixedRegister() && info.temps[i].isFixed()) {
191 continue;
192 }
193
194 // MUST_REUSE_INPUT temps will alias their input.
195 if (info.temps[i].policy() == LDefinition::MUST_REUSE_INPUT &&
196 info.temps[i].getReusedInput() == inputIndex) {
197 continue;
198 }
199
200 MOZ_ASSERT(!tempAlloc->aliases(**alloc));
201 }
202
203 // Start checking at the previous instruction, in case this
204 // instruction reuses its input register for an output.
205 LInstructionReverseIterator riter = block->rbegin(ins);
206 riter++;
207 if (!checkIntegrity(block, *riter, vreg, **alloc)) {
208 return false;
209 }
210
211 while (!worklist.empty()) {
212 IntegrityItem item = worklist.popCopy();
213 if (!checkIntegrity(item.block, *item.block->rbegin(), item.vreg,
214 item.alloc)) {
215 return false;
216 }
217 }
218 }
219 }
220 }
221
222 return true;
223 }
224
checkIntegrity(LBlock * block,LInstruction * ins,uint32_t vreg,LAllocation alloc)225 bool AllocationIntegrityState::checkIntegrity(LBlock* block, LInstruction* ins,
226 uint32_t vreg,
227 LAllocation alloc) {
228 for (LInstructionReverseIterator iter(block->rbegin(ins));
229 iter != block->rend(); iter++) {
230 ins = *iter;
231
232 // Follow values through assignments in move groups. All assignments in
233 // a move group are considered to happen simultaneously, so stop after
234 // the first matching move is found.
235 if (ins->isMoveGroup()) {
236 LMoveGroup* group = ins->toMoveGroup();
237 for (int i = group->numMoves() - 1; i >= 0; i--) {
238 if (group->getMove(i).to() == alloc) {
239 alloc = group->getMove(i).from();
240 break;
241 }
242 }
243 }
244
245 const InstructionInfo& info = instructions[ins->id()];
246
247 // Make sure the physical location being tracked is not clobbered by
248 // another instruction, and that if the originating vreg definition is
249 // found that it is writing to the tracked location.
250
251 for (size_t i = 0; i < ins->numDefs(); i++) {
252 LDefinition* def = ins->getDef(i);
253 if (def->isBogusTemp()) {
254 continue;
255 }
256 if (info.outputs[i].virtualRegister() == vreg) {
257 # ifdef JS_JITSPEW
258 // If the following assertion is about to fail, print some useful info.
259 if (!(*def->output() == alloc) && JitSpewEnabled(JitSpew_RegAlloc)) {
260 CodePosition input(ins->id(), CodePosition::INPUT);
261 CodePosition output(ins->id(), CodePosition::OUTPUT);
262 JitSpew(JitSpew_RegAlloc,
263 "Instruction at %u-%u, output number %u:", input.bits(),
264 output.bits(), unsigned(i));
265 JitSpew(JitSpew_RegAlloc,
266 " Error: conflicting allocations: %s vs %s",
267 (*def->output()).toString().get(), alloc.toString().get());
268 }
269 # endif
270 MOZ_ASSERT(*def->output() == alloc);
271
272 // Found the original definition, done scanning.
273 return true;
274 } else {
275 MOZ_ASSERT(*def->output() != alloc);
276 }
277 }
278
279 for (size_t i = 0; i < ins->numTemps(); i++) {
280 LDefinition* temp = ins->getTemp(i);
281 if (!temp->isBogusTemp()) {
282 MOZ_ASSERT(*temp->output() != alloc);
283 }
284 }
285
286 if (ins->safepoint()) {
287 checkSafepointAllocation(ins, vreg, alloc);
288 }
289 }
290
291 // Phis are effectless, but change the vreg we are tracking. Check if there
292 // is one which produced this vreg. We need to follow back through the phi
293 // inputs as it is not guaranteed the register allocator filled in physical
294 // allocations for the inputs and outputs of the phis.
295 for (size_t i = 0; i < block->numPhis(); i++) {
296 const InstructionInfo& info = blocks[block->mir()->id()].phis[i];
297 LPhi* phi = block->getPhi(i);
298 if (info.outputs[0].virtualRegister() == vreg) {
299 for (size_t j = 0, jend = phi->numOperands(); j < jend; j++) {
300 uint32_t newvreg = info.inputs[j].toUse()->virtualRegister();
301 LBlock* predecessor = block->mir()->getPredecessor(j)->lir();
302 if (!addPredecessor(predecessor, newvreg, alloc)) {
303 return false;
304 }
305 }
306 return true;
307 }
308 }
309
310 // No phi which defined the vreg we are tracking, follow back through all
311 // predecessors with the existing vreg.
312 for (size_t i = 0, iend = block->mir()->numPredecessors(); i < iend; i++) {
313 LBlock* predecessor = block->mir()->getPredecessor(i)->lir();
314 if (!addPredecessor(predecessor, vreg, alloc)) {
315 return false;
316 }
317 }
318
319 return true;
320 }
321
checkSafepointAllocation(LInstruction * ins,uint32_t vreg,LAllocation alloc)322 void AllocationIntegrityState::checkSafepointAllocation(LInstruction* ins,
323 uint32_t vreg,
324 LAllocation alloc) {
325 LSafepoint* safepoint = ins->safepoint();
326 MOZ_ASSERT(safepoint);
327
328 if (ins->isCall() && alloc.isRegister()) {
329 return;
330 }
331
332 if (alloc.isRegister()) {
333 MOZ_ASSERT(safepoint->liveRegs().has(alloc.toRegister()));
334 }
335
336 // The |this| argument slot is implicitly included in all safepoints.
337 if (alloc.isArgument() &&
338 alloc.toArgument()->index() < THIS_FRAME_ARGSLOT + sizeof(Value)) {
339 return;
340 }
341
342 LDefinition::Type type = virtualRegisters[vreg]
343 ? virtualRegisters[vreg]->type()
344 : LDefinition::GENERAL;
345
346 switch (type) {
347 case LDefinition::OBJECT:
348 MOZ_ASSERT(safepoint->hasGcPointer(alloc));
349 break;
350 case LDefinition::STACKRESULTS:
351 MOZ_ASSERT(safepoint->hasAllGcPointersFromStackArea(alloc));
352 break;
353 case LDefinition::SLOTS:
354 MOZ_ASSERT(safepoint->hasSlotsOrElementsPointer(alloc));
355 break;
356 # ifdef JS_NUNBOX32
357 // Do not assert that safepoint information for nunbox types is complete,
358 // as if a vreg for a value's components are copied in multiple places
359 // then the safepoint information may not reflect all copies. All copies
360 // of payloads must be reflected, however, for generational GC.
361 case LDefinition::TYPE:
362 break;
363 case LDefinition::PAYLOAD:
364 MOZ_ASSERT(safepoint->hasNunboxPayload(alloc));
365 break;
366 # else
367 case LDefinition::BOX:
368 MOZ_ASSERT(safepoint->hasBoxedValue(alloc));
369 break;
370 # endif
371 default:
372 break;
373 }
374 }
375
addPredecessor(LBlock * block,uint32_t vreg,LAllocation alloc)376 bool AllocationIntegrityState::addPredecessor(LBlock* block, uint32_t vreg,
377 LAllocation alloc) {
378 // There is no need to reanalyze if we have already seen this predecessor.
379 // We share the seen allocations across analysis of each use, as there will
380 // likely be common ground between different uses of the same vreg.
381 IntegrityItem item;
382 item.block = block;
383 item.vreg = vreg;
384 item.alloc = alloc;
385 item.index = seen.count();
386
387 IntegrityItemSet::AddPtr p = seen.lookupForAdd(item);
388 if (p) {
389 return true;
390 }
391 if (!seen.add(p, item)) {
392 return false;
393 }
394
395 return worklist.append(item);
396 }
397
dump()398 void AllocationIntegrityState::dump() {
399 # ifdef JS_JITSPEW
400 JitSpewCont(JitSpew_RegAlloc, "\n");
401 JitSpew(JitSpew_RegAlloc, "Register Allocation Integrity State:");
402
403 for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
404 LBlock* block = graph.getBlock(blockIndex);
405 MBasicBlock* mir = block->mir();
406
407 JitSpewHeader(JitSpew_RegAlloc);
408 JitSpewCont(JitSpew_RegAlloc, " Block %lu",
409 static_cast<unsigned long>(blockIndex));
410 for (size_t i = 0; i < mir->numSuccessors(); i++) {
411 JitSpewCont(JitSpew_RegAlloc, " [successor %u]",
412 mir->getSuccessor(i)->id());
413 }
414 JitSpewCont(JitSpew_RegAlloc, "\n");
415
416 for (size_t i = 0; i < block->numPhis(); i++) {
417 const InstructionInfo& info = blocks[blockIndex].phis[i];
418 LPhi* phi = block->getPhi(i);
419 CodePosition input(block->getPhi(0)->id(), CodePosition::INPUT);
420 CodePosition output(block->getPhi(block->numPhis() - 1)->id(),
421 CodePosition::OUTPUT);
422
423 JitSpewHeader(JitSpew_RegAlloc);
424 JitSpewCont(JitSpew_RegAlloc, " %u-%u Phi [def %s] ", input.bits(),
425 output.bits(), phi->getDef(0)->toString().get());
426 for (size_t j = 0; j < phi->numOperands(); j++) {
427 JitSpewCont(JitSpew_RegAlloc, " [use %s]",
428 info.inputs[j].toString().get());
429 }
430 JitSpewCont(JitSpew_RegAlloc, "\n");
431 }
432
433 for (LInstructionIterator iter = block->begin(); iter != block->end();
434 iter++) {
435 LInstruction* ins = *iter;
436 const InstructionInfo& info = instructions[ins->id()];
437
438 CodePosition input(ins->id(), CodePosition::INPUT);
439 CodePosition output(ins->id(), CodePosition::OUTPUT);
440
441 JitSpewHeader(JitSpew_RegAlloc);
442 JitSpewCont(JitSpew_RegAlloc, " ");
443 if (input != CodePosition::MIN) {
444 JitSpewCont(JitSpew_RegAlloc, "%u-%u ", input.bits(), output.bits());
445 }
446 JitSpewCont(JitSpew_RegAlloc, "%s", ins->opName());
447
448 if (ins->isMoveGroup()) {
449 LMoveGroup* group = ins->toMoveGroup();
450 for (int i = group->numMoves() - 1; i >= 0; i--) {
451 JitSpewCont(JitSpew_RegAlloc, " [%s <- %s]",
452 group->getMove(i).to().toString().get(),
453 group->getMove(i).from().toString().get());
454 }
455 JitSpewCont(JitSpew_RegAlloc, "\n");
456 continue;
457 }
458
459 for (size_t i = 0; i < ins->numDefs(); i++) {
460 JitSpewCont(JitSpew_RegAlloc, " [def %s]",
461 ins->getDef(i)->toString().get());
462 }
463
464 for (size_t i = 0; i < ins->numTemps(); i++) {
465 LDefinition* temp = ins->getTemp(i);
466 if (!temp->isBogusTemp()) {
467 JitSpewCont(JitSpew_RegAlloc, " [temp v%u %s]",
468 info.temps[i].virtualRegister(), temp->toString().get());
469 }
470 }
471
472 size_t index = 0;
473 for (LInstruction::InputIterator alloc(*ins); alloc.more();
474 alloc.next()) {
475 JitSpewCont(JitSpew_RegAlloc, " [use %s",
476 info.inputs[index++].toString().get());
477 if (!alloc->isConstant()) {
478 JitSpewCont(JitSpew_RegAlloc, " %s", alloc->toString().get());
479 }
480 JitSpewCont(JitSpew_RegAlloc, "]");
481 }
482
483 JitSpewCont(JitSpew_RegAlloc, "\n");
484 }
485 }
486
487 // Print discovered allocations at the ends of blocks, in the order they
488 // were discovered.
489
490 Vector<IntegrityItem, 20, SystemAllocPolicy> seenOrdered;
491 if (!seenOrdered.appendN(IntegrityItem(), seen.count())) {
492 fprintf(stderr, "OOM while dumping allocations\n");
493 return;
494 }
495
496 for (IntegrityItemSet::Enum iter(seen); !iter.empty(); iter.popFront()) {
497 IntegrityItem item = iter.front();
498 seenOrdered[item.index] = item;
499 }
500
501 if (!seenOrdered.empty()) {
502 fprintf(stderr, "Intermediate Allocations:\n");
503
504 for (size_t i = 0; i < seenOrdered.length(); i++) {
505 IntegrityItem item = seenOrdered[i];
506 fprintf(stderr, " block %u reg v%u alloc %s\n", item.block->mir()->id(),
507 item.vreg, item.alloc.toString().get());
508 }
509 }
510
511 fprintf(stderr, "\n");
512 # endif
513 }
514 #endif // DEBUG
515
516 const CodePosition CodePosition::MAX(UINT_MAX);
517 const CodePosition CodePosition::MIN(0);
518
init()519 bool RegisterAllocator::init() {
520 if (!insData.init(mir, graph.numInstructions())) {
521 return false;
522 }
523
524 if (!entryPositions.reserve(graph.numBlocks()) ||
525 !exitPositions.reserve(graph.numBlocks())) {
526 return false;
527 }
528
529 for (size_t i = 0; i < graph.numBlocks(); i++) {
530 LBlock* block = graph.getBlock(i);
531 for (LInstructionIterator ins = block->begin(); ins != block->end();
532 ins++) {
533 insData[ins->id()] = *ins;
534 }
535 for (size_t j = 0; j < block->numPhis(); j++) {
536 LPhi* phi = block->getPhi(j);
537 insData[phi->id()] = phi;
538 }
539
540 CodePosition entry =
541 block->numPhis() != 0
542 ? CodePosition(block->getPhi(0)->id(), CodePosition::INPUT)
543 : inputOf(block->firstInstructionWithId());
544 CodePosition exit = outputOf(block->lastInstructionWithId());
545
546 MOZ_ASSERT(block->mir()->id() == i);
547 entryPositions.infallibleAppend(entry);
548 exitPositions.infallibleAppend(exit);
549 }
550
551 return true;
552 }
553
getInputMoveGroup(LInstruction * ins)554 LMoveGroup* RegisterAllocator::getInputMoveGroup(LInstruction* ins) {
555 MOZ_ASSERT(!ins->fixReuseMoves());
556 if (ins->inputMoves()) {
557 return ins->inputMoves();
558 }
559
560 LMoveGroup* moves = LMoveGroup::New(alloc());
561 ins->setInputMoves(moves);
562 ins->block()->insertBefore(ins, moves);
563 return moves;
564 }
565
getFixReuseMoveGroup(LInstruction * ins)566 LMoveGroup* RegisterAllocator::getFixReuseMoveGroup(LInstruction* ins) {
567 if (ins->fixReuseMoves()) {
568 return ins->fixReuseMoves();
569 }
570
571 LMoveGroup* moves = LMoveGroup::New(alloc());
572 ins->setFixReuseMoves(moves);
573 ins->block()->insertBefore(ins, moves);
574 return moves;
575 }
576
getMoveGroupAfter(LInstruction * ins)577 LMoveGroup* RegisterAllocator::getMoveGroupAfter(LInstruction* ins) {
578 if (ins->movesAfter()) {
579 return ins->movesAfter();
580 }
581
582 LMoveGroup* moves = LMoveGroup::New(alloc());
583 ins->setMovesAfter(moves);
584
585 ins->block()->insertAfter(ins, moves);
586 return moves;
587 }
588
dumpInstructions(const char * who)589 void RegisterAllocator::dumpInstructions(const char* who) {
590 #ifdef JS_JITSPEW
591 JitSpew(JitSpew_RegAlloc, "LIR instructions %s", who);
592
593 for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
594 LBlock* block = graph.getBlock(blockIndex);
595 MBasicBlock* mir = block->mir();
596
597 JitSpewHeader(JitSpew_RegAlloc);
598 JitSpewCont(JitSpew_RegAlloc, " Block %lu",
599 static_cast<unsigned long>(blockIndex));
600 for (size_t i = 0; i < mir->numSuccessors(); i++) {
601 JitSpewCont(JitSpew_RegAlloc, " [successor %u]",
602 mir->getSuccessor(i)->id());
603 }
604 JitSpewCont(JitSpew_RegAlloc, "\n");
605
606 for (size_t i = 0; i < block->numPhis(); i++) {
607 LPhi* phi = block->getPhi(i);
608
609 JitSpewHeader(JitSpew_RegAlloc);
610 JitSpewCont(JitSpew_RegAlloc, " %u-%u Phi [def %s]",
611 inputOf(phi).bits(), outputOf(phi).bits(),
612 phi->getDef(0)->toString().get());
613 for (size_t j = 0; j < phi->numOperands(); j++) {
614 JitSpewCont(JitSpew_RegAlloc, " [use %s]",
615 phi->getOperand(j)->toString().get());
616 }
617 JitSpewCont(JitSpew_RegAlloc, "\n");
618 }
619
620 for (LInstructionIterator iter = block->begin(); iter != block->end();
621 iter++) {
622 LInstruction* ins = *iter;
623
624 JitSpewHeader(JitSpew_RegAlloc);
625 JitSpewCont(JitSpew_RegAlloc, " ");
626 if (ins->id() != 0) {
627 JitSpewCont(JitSpew_RegAlloc, "%u-%u ", inputOf(ins).bits(),
628 outputOf(ins).bits());
629 }
630 JitSpewCont(JitSpew_RegAlloc, "%s", ins->opName());
631
632 if (ins->isMoveGroup()) {
633 LMoveGroup* group = ins->toMoveGroup();
634 for (int i = group->numMoves() - 1; i >= 0; i--) {
635 // Use two printfs, as LAllocation::toString is not reentant.
636 JitSpewCont(JitSpew_RegAlloc, " [%s",
637 group->getMove(i).to().toString().get());
638 JitSpewCont(JitSpew_RegAlloc, " <- %s]",
639 group->getMove(i).from().toString().get());
640 }
641 JitSpewCont(JitSpew_RegAlloc, "\n");
642 continue;
643 }
644
645 for (size_t i = 0; i < ins->numDefs(); i++) {
646 JitSpewCont(JitSpew_RegAlloc, " [def %s]",
647 ins->getDef(i)->toString().get());
648 }
649
650 for (size_t i = 0; i < ins->numTemps(); i++) {
651 LDefinition* temp = ins->getTemp(i);
652 if (!temp->isBogusTemp()) {
653 JitSpewCont(JitSpew_RegAlloc, " [temp %s]", temp->toString().get());
654 }
655 }
656
657 for (LInstruction::InputIterator alloc(*ins); alloc.more();
658 alloc.next()) {
659 if (!alloc->isBogus()) {
660 JitSpewCont(JitSpew_RegAlloc, " [use %s]", alloc->toString().get());
661 }
662 }
663
664 JitSpewCont(JitSpew_RegAlloc, "\n");
665 }
666 }
667 JitSpewCont(JitSpew_RegAlloc, "\n");
668 #endif // JS_JITSPEW
669 }
670