1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/codegen/assembler-inl.h"
6 #include "src/codegen/callable.h"
7 #include "src/codegen/macro-assembler.h"
8 #include "src/codegen/optimized-compilation-info.h"
9 #include "src/compiler/backend/code-generator-impl.h"
10 #include "src/compiler/backend/code-generator.h"
11 #include "src/compiler/backend/gap-resolver.h"
12 #include "src/compiler/node-matchers.h"
13 #include "src/compiler/osr.h"
14 #include "src/heap/memory-chunk.h"
15 #include "src/numbers/double.h"
16 #include "src/wasm/wasm-code-manager.h"
17 #include "src/wasm/wasm-objects.h"
18 
19 namespace v8 {
20 namespace internal {
21 namespace compiler {
22 
23 #define __ tasm()->
24 
25 #define kScratchReg r11
26 
27 // Adds PPC-specific methods to convert InstructionOperands.
28 class PPCOperandConverter final : public InstructionOperandConverter {
29  public:
PPCOperandConverter(CodeGenerator * gen,Instruction * instr)30   PPCOperandConverter(CodeGenerator* gen, Instruction* instr)
31       : InstructionOperandConverter(gen, instr) {}
32 
OutputCount()33   size_t OutputCount() { return instr_->OutputCount(); }
34 
OutputRCBit() const35   RCBit OutputRCBit() const {
36     switch (instr_->flags_mode()) {
37       case kFlags_branch:
38       case kFlags_branch_and_poison:
39       case kFlags_deoptimize:
40       case kFlags_deoptimize_and_poison:
41       case kFlags_set:
42       case kFlags_trap:
43         return SetRC;
44       case kFlags_none:
45         return LeaveRC;
46     }
47     UNREACHABLE();
48   }
49 
CompareLogical() const50   bool CompareLogical() const {
51     switch (instr_->flags_condition()) {
52       case kUnsignedLessThan:
53       case kUnsignedGreaterThanOrEqual:
54       case kUnsignedLessThanOrEqual:
55       case kUnsignedGreaterThan:
56         return true;
57       default:
58         return false;
59     }
60     UNREACHABLE();
61   }
62 
InputImmediate(size_t index)63   Operand InputImmediate(size_t index) {
64     Constant constant = ToConstant(instr_->InputAt(index));
65     switch (constant.type()) {
66       case Constant::kInt32:
67         return Operand(constant.ToInt32());
68       case Constant::kFloat32:
69         return Operand::EmbeddedNumber(constant.ToFloat32());
70       case Constant::kFloat64:
71         return Operand::EmbeddedNumber(constant.ToFloat64().value());
72       case Constant::kInt64:
73 #if V8_TARGET_ARCH_PPC64
74         return Operand(constant.ToInt64());
75 #endif
76       case Constant::kExternalReference:
77         return Operand(constant.ToExternalReference());
78       case Constant::kDelayedStringConstant:
79         return Operand::EmbeddedStringConstant(
80             constant.ToDelayedStringConstant());
81       case Constant::kCompressedHeapObject:
82       case Constant::kHeapObject:
83       case Constant::kRpoNumber:
84         break;
85     }
86     UNREACHABLE();
87   }
88 
MemoryOperand(AddressingMode * mode,size_t * first_index)89   MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
90     const size_t index = *first_index;
91     AddressingMode addr_mode = AddressingModeField::decode(instr_->opcode());
92     if (mode) *mode = addr_mode;
93     switch (addr_mode) {
94       case kMode_None:
95         break;
96       case kMode_MRI:
97         *first_index += 2;
98         return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
99       case kMode_MRR:
100         *first_index += 2;
101         return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
102     }
103     UNREACHABLE();
104   }
105 
MemoryOperand(AddressingMode * mode=NULL,size_t first_index=0)106   MemOperand MemoryOperand(AddressingMode* mode = NULL,
107                            size_t first_index = 0) {
108     return MemoryOperand(mode, &first_index);
109   }
110 
ToMemOperand(InstructionOperand * op) const111   MemOperand ToMemOperand(InstructionOperand* op) const {
112     DCHECK_NOT_NULL(op);
113     DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
114     return SlotToMemOperand(AllocatedOperand::cast(op)->index());
115   }
116 
SlotToMemOperand(int slot) const117   MemOperand SlotToMemOperand(int slot) const {
118     FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
119     return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
120   }
121 };
122 
HasRegisterInput(Instruction * instr,size_t index)123 static inline bool HasRegisterInput(Instruction* instr, size_t index) {
124   return instr->InputAt(index)->IsRegister();
125 }
126 
127 namespace {
128 
129 class OutOfLineRecordWrite final : public OutOfLineCode {
130  public:
OutOfLineRecordWrite(CodeGenerator * gen,Register object,Register offset,Register value,Register scratch0,Register scratch1,RecordWriteMode mode,StubCallMode stub_mode,UnwindingInfoWriter * unwinding_info_writer)131   OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
132                        Register value, Register scratch0, Register scratch1,
133                        RecordWriteMode mode, StubCallMode stub_mode,
134                        UnwindingInfoWriter* unwinding_info_writer)
135       : OutOfLineCode(gen),
136         object_(object),
137         offset_(offset),
138         offset_immediate_(0),
139         value_(value),
140         scratch0_(scratch0),
141         scratch1_(scratch1),
142         mode_(mode),
143         stub_mode_(stub_mode),
144         must_save_lr_(!gen->frame_access_state()->has_frame()),
145         unwinding_info_writer_(unwinding_info_writer),
146         zone_(gen->zone()) {}
147 
OutOfLineRecordWrite(CodeGenerator * gen,Register object,int32_t offset,Register value,Register scratch0,Register scratch1,RecordWriteMode mode,StubCallMode stub_mode,UnwindingInfoWriter * unwinding_info_writer)148   OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
149                        Register value, Register scratch0, Register scratch1,
150                        RecordWriteMode mode, StubCallMode stub_mode,
151                        UnwindingInfoWriter* unwinding_info_writer)
152       : OutOfLineCode(gen),
153         object_(object),
154         offset_(no_reg),
155         offset_immediate_(offset),
156         value_(value),
157         scratch0_(scratch0),
158         scratch1_(scratch1),
159         mode_(mode),
160         stub_mode_(stub_mode),
161         must_save_lr_(!gen->frame_access_state()->has_frame()),
162         unwinding_info_writer_(unwinding_info_writer),
163         zone_(gen->zone()) {}
164 
Generate()165   void Generate() final {
166     ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
167     if (mode_ > RecordWriteMode::kValueIsPointer) {
168       __ JumpIfSmi(value_, exit());
169     }
170     if (COMPRESS_POINTERS_BOOL) {
171       __ DecompressTaggedPointer(value_, value_);
172     }
173     __ CheckPageFlag(value_, scratch0_,
174                      MemoryChunk::kPointersToHereAreInterestingMask, eq,
175                      exit());
176     if (offset_ == no_reg) {
177       __ addi(scratch1_, object_, Operand(offset_immediate_));
178     } else {
179       DCHECK_EQ(0, offset_immediate_);
180       __ add(scratch1_, object_, offset_);
181     }
182     RememberedSetAction const remembered_set_action =
183         mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
184                                              : OMIT_REMEMBERED_SET;
185     SaveFPRegsMode const save_fp_mode =
186         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
187     if (must_save_lr_) {
188       // We need to save and restore lr if the frame was elided.
189       __ mflr(scratch0_);
190       __ Push(scratch0_);
191       unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset());
192     }
193     if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
194       __ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode);
195     } else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
196       __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
197                              save_fp_mode, wasm::WasmCode::kRecordWrite);
198     } else {
199       __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
200                              save_fp_mode);
201     }
202     if (must_save_lr_) {
203       // We need to save and restore lr if the frame was elided.
204       __ Pop(scratch0_);
205       __ mtlr(scratch0_);
206       unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
207     }
208   }
209 
210  private:
211   Register const object_;
212   Register const offset_;
213   int32_t const offset_immediate_;  // Valid if offset_ == no_reg.
214   Register const value_;
215   Register const scratch0_;
216   Register const scratch1_;
217   RecordWriteMode const mode_;
218   StubCallMode stub_mode_;
219   bool must_save_lr_;
220   UnwindingInfoWriter* const unwinding_info_writer_;
221   Zone* zone_;
222 };
223 
FlagsConditionToCondition(FlagsCondition condition,ArchOpcode op)224 Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
225   switch (condition) {
226     case kEqual:
227       return eq;
228     case kNotEqual:
229       return ne;
230     case kSignedLessThan:
231     case kUnsignedLessThan:
232       return lt;
233     case kSignedGreaterThanOrEqual:
234     case kUnsignedGreaterThanOrEqual:
235       return ge;
236     case kSignedLessThanOrEqual:
237     case kUnsignedLessThanOrEqual:
238       return le;
239     case kSignedGreaterThan:
240     case kUnsignedGreaterThan:
241       return gt;
242     case kOverflow:
243       // Overflow checked for add/sub only.
244       switch (op) {
245 #if V8_TARGET_ARCH_PPC64
246         case kPPC_Add32:
247         case kPPC_Add64:
248         case kPPC_Sub:
249 #endif
250         case kPPC_AddWithOverflow32:
251         case kPPC_SubWithOverflow32:
252           return lt;
253         default:
254           break;
255       }
256       break;
257     case kNotOverflow:
258       switch (op) {
259 #if V8_TARGET_ARCH_PPC64
260         case kPPC_Add32:
261         case kPPC_Add64:
262         case kPPC_Sub:
263 #endif
264         case kPPC_AddWithOverflow32:
265         case kPPC_SubWithOverflow32:
266           return ge;
267         default:
268           break;
269       }
270       break;
271     default:
272       break;
273   }
274   UNREACHABLE();
275 }
276 
EmitWordLoadPoisoningIfNeeded(CodeGenerator * codegen,Instruction * instr,PPCOperandConverter const & i)277 void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
278                                    PPCOperandConverter const& i) {
279   const MemoryAccessMode access_mode =
280       static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode()));
281   if (access_mode == kMemoryAccessPoisoned) {
282     Register value = i.OutputRegister();
283     codegen->tasm()->and_(value, value, kSpeculationPoisonRegister);
284   }
285 }
286 
287 }  // namespace
288 
289 #define ASSEMBLE_FLOAT_UNOP_RC(asm_instr, round)                     \
290   do {                                                               \
291     __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
292                  i.OutputRCBit());                                   \
293     if (round) {                                                     \
294       __ frsp(i.OutputDoubleRegister(), i.OutputDoubleRegister());   \
295     }                                                                \
296   } while (0)
297 
298 #define ASSEMBLE_FLOAT_BINOP_RC(asm_instr, round)                    \
299   do {                                                               \
300     __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
301                  i.InputDoubleRegister(1), i.OutputRCBit());         \
302     if (round) {                                                     \
303       __ frsp(i.OutputDoubleRegister(), i.OutputDoubleRegister());   \
304     }                                                                \
305   } while (0)
306 
307 #define ASSEMBLE_BINOP(asm_instr_reg, asm_instr_imm)           \
308   do {                                                         \
309     if (HasRegisterInput(instr, 1)) {                          \
310       __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
311                        i.InputRegister(1));                    \
312     } else {                                                   \
313       __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
314                        i.InputImmediate(1));                   \
315     }                                                          \
316   } while (0)
317 
318 #define ASSEMBLE_BINOP_RC(asm_instr_reg, asm_instr_imm)        \
319   do {                                                         \
320     if (HasRegisterInput(instr, 1)) {                          \
321       __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
322                        i.InputRegister(1), i.OutputRCBit());   \
323     } else {                                                   \
324       __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
325                        i.InputImmediate(1), i.OutputRCBit());  \
326     }                                                          \
327   } while (0)
328 
329 #define ASSEMBLE_BINOP_INT_RC(asm_instr_reg, asm_instr_imm)    \
330   do {                                                         \
331     if (HasRegisterInput(instr, 1)) {                          \
332       __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
333                        i.InputRegister(1), i.OutputRCBit());   \
334     } else {                                                   \
335       __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
336                        i.InputInt32(1), i.OutputRCBit());      \
337     }                                                          \
338   } while (0)
339 
340 #define ASSEMBLE_ADD_WITH_OVERFLOW()                                    \
341   do {                                                                  \
342     if (HasRegisterInput(instr, 1)) {                                   \
343       __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
344                                 i.InputRegister(1), kScratchReg, r0);   \
345     } else {                                                            \
346       __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
347                                 i.InputInt32(1), kScratchReg, r0);      \
348     }                                                                   \
349   } while (0)
350 
351 #define ASSEMBLE_SUB_WITH_OVERFLOW()                                    \
352   do {                                                                  \
353     if (HasRegisterInput(instr, 1)) {                                   \
354       __ SubAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
355                                 i.InputRegister(1), kScratchReg, r0);   \
356     } else {                                                            \
357       __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
358                                 -i.InputInt32(1), kScratchReg, r0);     \
359     }                                                                   \
360   } while (0)
361 
362 #if V8_TARGET_ARCH_PPC64
363 #define ASSEMBLE_ADD_WITH_OVERFLOW32()         \
364   do {                                         \
365     ASSEMBLE_ADD_WITH_OVERFLOW();              \
366     __ extsw(kScratchReg, kScratchReg, SetRC); \
367   } while (0)
368 
369 #define ASSEMBLE_SUB_WITH_OVERFLOW32()         \
370   do {                                         \
371     ASSEMBLE_SUB_WITH_OVERFLOW();              \
372     __ extsw(kScratchReg, kScratchReg, SetRC); \
373   } while (0)
374 #else
375 #define ASSEMBLE_ADD_WITH_OVERFLOW32 ASSEMBLE_ADD_WITH_OVERFLOW
376 #define ASSEMBLE_SUB_WITH_OVERFLOW32 ASSEMBLE_SUB_WITH_OVERFLOW
377 #endif
378 
379 #define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr)                        \
380   do {                                                                 \
381     const CRegister cr = cr0;                                          \
382     if (HasRegisterInput(instr, 1)) {                                  \
383       if (i.CompareLogical()) {                                        \
384         __ cmpl_instr(i.InputRegister(0), i.InputRegister(1), cr);     \
385       } else {                                                         \
386         __ cmp_instr(i.InputRegister(0), i.InputRegister(1), cr);      \
387       }                                                                \
388     } else {                                                           \
389       if (i.CompareLogical()) {                                        \
390         __ cmpl_instr##i(i.InputRegister(0), i.InputImmediate(1), cr); \
391       } else {                                                         \
392         __ cmp_instr##i(i.InputRegister(0), i.InputImmediate(1), cr);  \
393       }                                                                \
394     }                                                                  \
395     DCHECK_EQ(SetRC, i.OutputRCBit());                                 \
396   } while (0)
397 
398 #define ASSEMBLE_FLOAT_COMPARE(cmp_instr)                                 \
399   do {                                                                    \
400     const CRegister cr = cr0;                                             \
401     __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1), cr); \
402     DCHECK_EQ(SetRC, i.OutputRCBit());                                    \
403   } while (0)
404 
405 #define ASSEMBLE_MODULO(div_instr, mul_instr)                        \
406   do {                                                               \
407     const Register scratch = kScratchReg;                            \
408     __ div_instr(scratch, i.InputRegister(0), i.InputRegister(1));   \
409     __ mul_instr(scratch, scratch, i.InputRegister(1));              \
410     __ sub(i.OutputRegister(), i.InputRegister(0), scratch, LeaveOE, \
411            i.OutputRCBit());                                         \
412   } while (0)
413 
414 #define ASSEMBLE_FLOAT_MODULO()                                             \
415   do {                                                                      \
416     FrameScope scope(tasm(), StackFrame::MANUAL);                           \
417     __ PrepareCallCFunction(0, 2, kScratchReg);                             \
418     __ MovToFloatParameters(i.InputDoubleRegister(0),                       \
419                             i.InputDoubleRegister(1));                      \
420     __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); \
421     __ MovFromFloatResult(i.OutputDoubleRegister());                        \
422     DCHECK_EQ(LeaveRC, i.OutputRCBit());                                    \
423   } while (0)
424 
425 #define ASSEMBLE_IEEE754_UNOP(name)                                            \
426   do {                                                                         \
427     /* TODO(bmeurer): We should really get rid of this special instruction, */ \
428     /* and generate a CallAddress instruction instead. */                      \
429     FrameScope scope(tasm(), StackFrame::MANUAL);                              \
430     __ PrepareCallCFunction(0, 1, kScratchReg);                                \
431     __ MovToFloatParameter(i.InputDoubleRegister(0));                          \
432     __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1);    \
433     /* Move the result in the double result register. */                       \
434     __ MovFromFloatResult(i.OutputDoubleRegister());                           \
435     DCHECK_EQ(LeaveRC, i.OutputRCBit());                                       \
436   } while (0)
437 
438 #define ASSEMBLE_IEEE754_BINOP(name)                                           \
439   do {                                                                         \
440     /* TODO(bmeurer): We should really get rid of this special instruction, */ \
441     /* and generate a CallAddress instruction instead. */                      \
442     FrameScope scope(tasm(), StackFrame::MANUAL);                              \
443     __ PrepareCallCFunction(0, 2, kScratchReg);                                \
444     __ MovToFloatParameters(i.InputDoubleRegister(0),                          \
445                             i.InputDoubleRegister(1));                         \
446     __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2);    \
447     /* Move the result in the double result register. */                       \
448     __ MovFromFloatResult(i.OutputDoubleRegister());                           \
449     DCHECK_EQ(LeaveRC, i.OutputRCBit());                                       \
450   } while (0)
451 
452 #define ASSEMBLE_FLOAT_MAX()                                            \
453   do {                                                                  \
454     DoubleRegister left_reg = i.InputDoubleRegister(0);                 \
455     DoubleRegister right_reg = i.InputDoubleRegister(1);                \
456     DoubleRegister result_reg = i.OutputDoubleRegister();               \
457     Label check_zero, return_left, return_right, return_nan, done;      \
458     __ fcmpu(left_reg, right_reg);                                      \
459     __ bunordered(&return_nan);                                         \
460     __ beq(&check_zero);                                                \
461     __ bge(&return_left);                                               \
462     __ b(&return_right);                                                \
463                                                                         \
464     __ bind(&check_zero);                                               \
465     __ fcmpu(left_reg, kDoubleRegZero);                                 \
466     /* left == right != 0. */                                           \
467     __ bne(&return_left);                                               \
468     /* At this point, both left and right are either 0 or -0. */        \
469     __ fadd(result_reg, left_reg, right_reg);                           \
470     __ b(&done);                                                        \
471                                                                         \
472     __ bind(&return_nan);                                               \
473     /* If left or right are NaN, fadd propagates the appropriate one.*/ \
474     __ fadd(result_reg, left_reg, right_reg);                           \
475     __ b(&done);                                                        \
476                                                                         \
477     __ bind(&return_right);                                             \
478     if (right_reg != result_reg) {                                      \
479       __ fmr(result_reg, right_reg);                                    \
480     }                                                                   \
481     __ b(&done);                                                        \
482                                                                         \
483     __ bind(&return_left);                                              \
484     if (left_reg != result_reg) {                                       \
485       __ fmr(result_reg, left_reg);                                     \
486     }                                                                   \
487     __ bind(&done);                                                     \
488   } while (0)
489 
490 #define ASSEMBLE_FLOAT_MIN()                                              \
491   do {                                                                    \
492     DoubleRegister left_reg = i.InputDoubleRegister(0);                   \
493     DoubleRegister right_reg = i.InputDoubleRegister(1);                  \
494     DoubleRegister result_reg = i.OutputDoubleRegister();                 \
495     Label check_zero, return_left, return_right, return_nan, done;        \
496     __ fcmpu(left_reg, right_reg);                                        \
497     __ bunordered(&return_nan);                                           \
498     __ beq(&check_zero);                                                  \
499     __ ble(&return_left);                                                 \
500     __ b(&return_right);                                                  \
501                                                                           \
502     __ bind(&check_zero);                                                 \
503     __ fcmpu(left_reg, kDoubleRegZero);                                   \
504     /* left == right != 0. */                                             \
505     __ bne(&return_left);                                                 \
506     /* At this point, both left and right are either 0 or -0. */          \
507     /* Min: The algorithm is: -((-L) + (-R)), which in case of L and R */ \
508     /* being different registers is most efficiently expressed */         \
509     /* as -((-L) - R). */                                                 \
510     __ fneg(kScratchDoubleReg, left_reg);                                 \
511     if (kScratchDoubleReg == right_reg) {                                 \
512       __ fadd(result_reg, kScratchDoubleReg, right_reg);                  \
513     } else {                                                              \
514       __ fsub(result_reg, kScratchDoubleReg, right_reg);                  \
515     }                                                                     \
516     __ fneg(result_reg, result_reg);                                      \
517     __ b(&done);                                                          \
518                                                                           \
519     __ bind(&return_nan);                                                 \
520     /* If left or right are NaN, fadd propagates the appropriate one.*/   \
521     __ fadd(result_reg, left_reg, right_reg);                             \
522     __ b(&done);                                                          \
523                                                                           \
524     __ bind(&return_right);                                               \
525     if (right_reg != result_reg) {                                        \
526       __ fmr(result_reg, right_reg);                                      \
527     }                                                                     \
528     __ b(&done);                                                          \
529                                                                           \
530     __ bind(&return_left);                                                \
531     if (left_reg != result_reg) {                                         \
532       __ fmr(result_reg, left_reg);                                       \
533     }                                                                     \
534     __ bind(&done);                                                       \
535   } while (0)
536 
537 #define ASSEMBLE_LOAD_FLOAT(asm_instr, asm_instrx)    \
538   do {                                                \
539     DoubleRegister result = i.OutputDoubleRegister(); \
540     AddressingMode mode = kMode_None;                 \
541     MemOperand operand = i.MemoryOperand(&mode);      \
542     bool is_atomic = i.InputInt32(2);                 \
543     if (mode == kMode_MRI) {                          \
544       __ asm_instr(result, operand);                  \
545     } else {                                          \
546       __ asm_instrx(result, operand);                 \
547     }                                                 \
548     if (is_atomic) __ lwsync();                       \
549     DCHECK_EQ(LeaveRC, i.OutputRCBit());              \
550   } while (0)
551 
552 #define ASSEMBLE_LOAD_INTEGER(asm_instr, asm_instrx) \
553   do {                                               \
554     Register result = i.OutputRegister();            \
555     AddressingMode mode = kMode_None;                \
556     MemOperand operand = i.MemoryOperand(&mode);     \
557     bool is_atomic = i.InputInt32(2);                \
558     if (mode == kMode_MRI) {                         \
559       __ asm_instr(result, operand);                 \
560     } else {                                         \
561       __ asm_instrx(result, operand);                \
562     }                                                \
563     if (is_atomic) __ lwsync();                      \
564     DCHECK_EQ(LeaveRC, i.OutputRCBit());             \
565   } while (0)
566 
567 #define ASSEMBLE_STORE_FLOAT(asm_instr, asm_instrx)      \
568   do {                                                   \
569     size_t index = 0;                                    \
570     AddressingMode mode = kMode_None;                    \
571     MemOperand operand = i.MemoryOperand(&mode, &index); \
572     DoubleRegister value = i.InputDoubleRegister(index); \
573     bool is_atomic = i.InputInt32(3);                    \
574     if (is_atomic) __ lwsync();                          \
575     /* removed frsp as instruction-selector checked */   \
576     /* value to be kFloat32 */                           \
577     if (mode == kMode_MRI) {                             \
578       __ asm_instr(value, operand);                      \
579     } else {                                             \
580       __ asm_instrx(value, operand);                     \
581     }                                                    \
582     if (is_atomic) __ sync();                            \
583     DCHECK_EQ(LeaveRC, i.OutputRCBit());                 \
584   } while (0)
585 
586 #define ASSEMBLE_STORE_INTEGER(asm_instr, asm_instrx)    \
587   do {                                                   \
588     size_t index = 0;                                    \
589     AddressingMode mode = kMode_None;                    \
590     MemOperand operand = i.MemoryOperand(&mode, &index); \
591     Register value = i.InputRegister(index);             \
592     bool is_atomic = i.InputInt32(3);                    \
593     if (is_atomic) __ lwsync();                          \
594     if (mode == kMode_MRI) {                             \
595       __ asm_instr(value, operand);                      \
596     } else {                                             \
597       __ asm_instrx(value, operand);                     \
598     }                                                    \
599     if (is_atomic) __ sync();                            \
600     DCHECK_EQ(LeaveRC, i.OutputRCBit());                 \
601   } while (0)
602 
603 #if V8_TARGET_ARCH_PPC64
604 // TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
605 #define CleanUInt32(x) __ ClearLeftImm(x, x, Operand(32))
606 #else
607 #define CleanUInt32(x)
608 #endif
609 
610 #define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr)       \
611   do {                                                                  \
612     Label exchange;                                                     \
613     __ lwsync();                                                        \
614     __ bind(&exchange);                                                 \
615     __ load_instr(i.OutputRegister(0),                                  \
616                   MemOperand(i.InputRegister(0), i.InputRegister(1)));  \
617     __ store_instr(i.InputRegister(2),                                  \
618                    MemOperand(i.InputRegister(0), i.InputRegister(1))); \
619     __ bne(&exchange, cr0);                                             \
620     __ sync();                                                          \
621   } while (0)
622 
623 #define ASSEMBLE_ATOMIC_BINOP(bin_inst, load_inst, store_inst)               \
624   do {                                                                       \
625     MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
626     Label binop;                                                             \
627     __ lwsync();                                                             \
628     __ bind(&binop);                                                         \
629     __ load_inst(i.OutputRegister(), operand);                               \
630     __ bin_inst(kScratchReg, i.OutputRegister(), i.InputRegister(2));        \
631     __ store_inst(kScratchReg, operand);                                     \
632     __ bne(&binop, cr0);                                                     \
633     __ sync();                                                               \
634   } while (false)
635 
636 #define ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(bin_inst, load_inst, store_inst,      \
637                                        ext_instr)                            \
638   do {                                                                       \
639     MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
640     Label binop;                                                             \
641     __ lwsync();                                                             \
642     __ bind(&binop);                                                         \
643     __ load_inst(i.OutputRegister(), operand);                               \
644     __ ext_instr(i.OutputRegister(), i.OutputRegister());                    \
645     __ bin_inst(kScratchReg, i.OutputRegister(), i.InputRegister(2));        \
646     __ store_inst(kScratchReg, operand);                                     \
647     __ bne(&binop, cr0);                                                     \
648     __ sync();                                                               \
649   } while (false)
650 
651 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp_inst, load_inst, store_inst,    \
652                                          input_ext)                          \
653   do {                                                                       \
654     MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
655     Label loop;                                                              \
656     Label exit;                                                              \
657     __ input_ext(r0, i.InputRegister(2));                                    \
658     __ lwsync();                                                             \
659     __ bind(&loop);                                                          \
660     __ load_inst(i.OutputRegister(), operand);                               \
661     __ cmp_inst(i.OutputRegister(), r0, cr0);                                \
662     __ bne(&exit, cr0);                                                      \
663     __ store_inst(i.InputRegister(3), operand);                              \
664     __ bne(&loop, cr0);                                                      \
665     __ bind(&exit);                                                          \
666     __ sync();                                                               \
667   } while (false)
668 
669 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(cmp_inst, load_inst,       \
670                                                   store_inst, ext_instr)     \
671   do {                                                                       \
672     MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
673     Label loop;                                                              \
674     Label exit;                                                              \
675     __ ext_instr(r0, i.InputRegister(2));                                    \
676     __ lwsync();                                                             \
677     __ bind(&loop);                                                          \
678     __ load_inst(i.OutputRegister(), operand);                               \
679     __ ext_instr(i.OutputRegister(), i.OutputRegister());                    \
680     __ cmp_inst(i.OutputRegister(), r0, cr0);                                \
681     __ bne(&exit, cr0);                                                      \
682     __ store_inst(i.InputRegister(3), operand);                              \
683     __ bne(&loop, cr0);                                                      \
684     __ bind(&exit);                                                          \
685     __ sync();                                                               \
686   } while (false)
687 
AssembleDeconstructFrame()688 void CodeGenerator::AssembleDeconstructFrame() {
689   __ LeaveFrame(StackFrame::MANUAL);
690   unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
691 }
692 
AssemblePrepareTailCall()693 void CodeGenerator::AssemblePrepareTailCall() {
694   if (frame_access_state()->has_frame()) {
695     __ RestoreFrameStateForTailCall();
696   }
697   frame_access_state()->SetFrameAccessToSP();
698 }
699 
AssemblePopArgumentsAdaptorFrame(Register args_reg,Register scratch1,Register scratch2,Register scratch3)700 void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
701                                                      Register scratch1,
702                                                      Register scratch2,
703                                                      Register scratch3) {
704   DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
705   Label done;
706 
707   // Check if current frame is an arguments adaptor frame.
708   __ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
709   __ cmpi(scratch1,
710           Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
711   __ bne(&done);
712 
713   // Load arguments count from current arguments adaptor frame (note, it
714   // does not include receiver).
715   Register caller_args_count_reg = scratch1;
716   __ LoadP(caller_args_count_reg,
717            MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
718   __ SmiUntag(caller_args_count_reg);
719 
720   __ PrepareForTailCall(args_reg, caller_args_count_reg, scratch2, scratch3);
721   __ bind(&done);
722 }
723 
724 namespace {
725 
FlushPendingPushRegisters(TurboAssembler * tasm,FrameAccessState * frame_access_state,ZoneVector<Register> * pending_pushes)726 void FlushPendingPushRegisters(TurboAssembler* tasm,
727                                FrameAccessState* frame_access_state,
728                                ZoneVector<Register>* pending_pushes) {
729   switch (pending_pushes->size()) {
730     case 0:
731       break;
732     case 1:
733       tasm->Push((*pending_pushes)[0]);
734       break;
735     case 2:
736       tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
737       break;
738     case 3:
739       tasm->Push((*pending_pushes)[0], (*pending_pushes)[1],
740                  (*pending_pushes)[2]);
741       break;
742     default:
743       UNREACHABLE();
744   }
745   frame_access_state->IncreaseSPDelta(pending_pushes->size());
746   pending_pushes->clear();
747 }
748 
AdjustStackPointerForTailCall(TurboAssembler * tasm,FrameAccessState * state,int new_slot_above_sp,ZoneVector<Register> * pending_pushes=nullptr,bool allow_shrinkage=true)749 void AdjustStackPointerForTailCall(
750     TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp,
751     ZoneVector<Register>* pending_pushes = nullptr,
752     bool allow_shrinkage = true) {
753   int current_sp_offset = state->GetSPToFPSlotCount() +
754                           StandardFrameConstants::kFixedSlotCountAboveFp;
755   int stack_slot_delta = new_slot_above_sp - current_sp_offset;
756   if (stack_slot_delta > 0) {
757     if (pending_pushes != nullptr) {
758       FlushPendingPushRegisters(tasm, state, pending_pushes);
759     }
760     tasm->Add(sp, sp, -stack_slot_delta * kSystemPointerSize, r0);
761     state->IncreaseSPDelta(stack_slot_delta);
762   } else if (allow_shrinkage && stack_slot_delta < 0) {
763     if (pending_pushes != nullptr) {
764       FlushPendingPushRegisters(tasm, state, pending_pushes);
765     }
766     tasm->Add(sp, sp, -stack_slot_delta * kSystemPointerSize, r0);
767     state->IncreaseSPDelta(stack_slot_delta);
768   }
769 }
770 
771 }  // namespace
772 
AssembleTailCallBeforeGap(Instruction * instr,int first_unused_stack_slot)773 void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
774                                               int first_unused_stack_slot) {
775   ZoneVector<MoveOperands*> pushes(zone());
776   GetPushCompatibleMoves(instr, kRegisterPush, &pushes);
777 
778   if (!pushes.empty() &&
779       (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
780        first_unused_stack_slot)) {
781     PPCOperandConverter g(this, instr);
782     ZoneVector<Register> pending_pushes(zone());
783     for (auto move : pushes) {
784       LocationOperand destination_location(
785           LocationOperand::cast(move->destination()));
786       InstructionOperand source(move->source());
787       AdjustStackPointerForTailCall(
788           tasm(), frame_access_state(),
789           destination_location.index() - pending_pushes.size(),
790           &pending_pushes);
791       // Pushes of non-register data types are not supported.
792       DCHECK(source.IsRegister());
793       LocationOperand source_location(LocationOperand::cast(source));
794       pending_pushes.push_back(source_location.GetRegister());
795       // TODO(arm): We can push more than 3 registers at once. Add support in
796       // the macro-assembler for pushing a list of registers.
797       if (pending_pushes.size() == 3) {
798         FlushPendingPushRegisters(tasm(), frame_access_state(),
799                                   &pending_pushes);
800       }
801       move->Eliminate();
802     }
803     FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
804   }
805   AdjustStackPointerForTailCall(tasm(), frame_access_state(),
806                                 first_unused_stack_slot, nullptr, false);
807 }
808 
AssembleTailCallAfterGap(Instruction * instr,int first_unused_stack_slot)809 void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
810                                              int first_unused_stack_slot) {
811   AdjustStackPointerForTailCall(tasm(), frame_access_state(),
812                                 first_unused_stack_slot);
813 }
814 
815 // Check that {kJavaScriptCallCodeStartRegister} is correct.
AssembleCodeStartRegisterCheck()816 void CodeGenerator::AssembleCodeStartRegisterCheck() {
817   Register scratch = kScratchReg;
818   __ ComputeCodeStartAddress(scratch);
819   __ cmp(scratch, kJavaScriptCallCodeStartRegister);
820   __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
821 }
822 
823 // Check if the code object is marked for deoptimization. If it is, then it
824 // jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
825 // to:
826 //    1. read from memory the word that contains that bit, which can be found in
827 //       the flags in the referenced {CodeDataContainer} object;
828 //    2. test kMarkedForDeoptimizationBit in those flags; and
829 //    3. if it is not zero then it jumps to the builtin.
BailoutIfDeoptimized()830 void CodeGenerator::BailoutIfDeoptimized() {
831   if (FLAG_debug_code) {
832     // Check that {kJavaScriptCallCodeStartRegister} is correct.
833     __ ComputeCodeStartAddress(ip);
834     __ cmp(ip, kJavaScriptCallCodeStartRegister);
835     __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
836   }
837 
838   int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
839   __ LoadTaggedPointerField(
840       r11, MemOperand(kJavaScriptCallCodeStartRegister, offset));
841   __ LoadWordArith(
842       r11, FieldMemOperand(r11, CodeDataContainer::kKindSpecificFlagsOffset));
843   __ TestBit(r11, Code::kMarkedForDeoptimizationBit);
844   __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
845           RelocInfo::CODE_TARGET, ne, cr0);
846 }
847 
GenerateSpeculationPoisonFromCodeStartRegister()848 void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
849   Register scratch = kScratchReg;
850 
851   __ ComputeCodeStartAddress(scratch);
852 
853   // Calculate a mask which has all bits set in the normal case, but has all
854   // bits cleared if we are speculatively executing the wrong PC.
855   __ cmp(kJavaScriptCallCodeStartRegister, scratch);
856   __ li(scratch, Operand::Zero());
857   __ notx(kSpeculationPoisonRegister, scratch);
858   __ isel(eq, kSpeculationPoisonRegister, kSpeculationPoisonRegister, scratch);
859 }
860 
AssembleRegisterArgumentPoisoning()861 void CodeGenerator::AssembleRegisterArgumentPoisoning() {
862   __ and_(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
863   __ and_(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
864   __ and_(sp, sp, kSpeculationPoisonRegister);
865 }
866 
867 // Assembles an instruction after register allocation, producing machine code.
AssembleArchInstruction(Instruction * instr)868 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
869     Instruction* instr) {
870   PPCOperandConverter i(this, instr);
871   ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
872 
873   switch (opcode) {
874     case kArchCallCodeObject: {
875       v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
876           tasm());
877       if (HasRegisterInput(instr, 0)) {
878         Register reg = i.InputRegister(0);
879         DCHECK_IMPLIES(
880             instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
881             reg == kJavaScriptCallCodeStartRegister);
882         __ CallCodeObject(reg);
883       } else {
884         __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
885       }
886       RecordCallPosition(instr);
887       DCHECK_EQ(LeaveRC, i.OutputRCBit());
888       frame_access_state()->ClearSPDelta();
889       break;
890     }
891     case kArchCallBuiltinPointer: {
892       DCHECK(!instr->InputAt(0)->IsImmediate());
893       Register builtin_index = i.InputRegister(0);
894       __ CallBuiltinByIndex(builtin_index);
895       RecordCallPosition(instr);
896       frame_access_state()->ClearSPDelta();
897       break;
898     }
899     case kArchCallWasmFunction: {
900       // We must not share code targets for calls to builtins for wasm code, as
901       // they might need to be patched individually.
902       if (instr->InputAt(0)->IsImmediate()) {
903         Constant constant = i.ToConstant(instr->InputAt(0));
904 #ifdef V8_TARGET_ARCH_PPC64
905         Address wasm_code = static_cast<Address>(constant.ToInt64());
906 #else
907         Address wasm_code = static_cast<Address>(constant.ToInt32());
908 #endif
909         __ Call(wasm_code, constant.rmode());
910       } else {
911         __ Call(i.InputRegister(0));
912       }
913       RecordCallPosition(instr);
914       DCHECK_EQ(LeaveRC, i.OutputRCBit());
915       frame_access_state()->ClearSPDelta();
916       break;
917     }
918     case kArchTailCallCodeObjectFromJSFunction:
919     case kArchTailCallCodeObject: {
920       if (opcode == kArchTailCallCodeObjectFromJSFunction) {
921         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
922                                          i.TempRegister(0), i.TempRegister(1),
923                                          i.TempRegister(2));
924       }
925       if (HasRegisterInput(instr, 0)) {
926         Register reg = i.InputRegister(0);
927         DCHECK_IMPLIES(
928             instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
929             reg == kJavaScriptCallCodeStartRegister);
930         __ JumpCodeObject(reg);
931       } else {
932         // We cannot use the constant pool to load the target since
933         // we've already restored the caller's frame.
934         ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
935         __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
936       }
937       DCHECK_EQ(LeaveRC, i.OutputRCBit());
938       frame_access_state()->ClearSPDelta();
939       frame_access_state()->SetFrameAccessToDefault();
940       break;
941     }
942     case kArchTailCallWasm: {
943       // We must not share code targets for calls to builtins for wasm code, as
944       // they might need to be patched individually.
945       if (instr->InputAt(0)->IsImmediate()) {
946         Constant constant = i.ToConstant(instr->InputAt(0));
947 #ifdef V8_TARGET_ARCH_PPC64
948         Address wasm_code = static_cast<Address>(constant.ToInt64());
949 #else
950         Address wasm_code = static_cast<Address>(constant.ToInt32());
951 #endif
952         __ Jump(wasm_code, constant.rmode());
953       } else {
954         __ Jump(i.InputRegister(0));
955       }
956       DCHECK_EQ(LeaveRC, i.OutputRCBit());
957       frame_access_state()->ClearSPDelta();
958       frame_access_state()->SetFrameAccessToDefault();
959       break;
960     }
961     case kArchTailCallAddress: {
962       CHECK(!instr->InputAt(0)->IsImmediate());
963       Register reg = i.InputRegister(0);
964       DCHECK_IMPLIES(
965           instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
966           reg == kJavaScriptCallCodeStartRegister);
967       __ Jump(reg);
968       frame_access_state()->ClearSPDelta();
969       frame_access_state()->SetFrameAccessToDefault();
970       break;
971     }
972     case kArchCallJSFunction: {
973       v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
974           tasm());
975       Register func = i.InputRegister(0);
976       if (FLAG_debug_code) {
977         // Check the function's context matches the context argument.
978         __ LoadTaggedPointerField(
979             kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
980         __ cmp(cp, kScratchReg);
981         __ Assert(eq, AbortReason::kWrongFunctionContext);
982       }
983       static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
984       __ LoadTaggedPointerField(r5,
985                                 FieldMemOperand(func, JSFunction::kCodeOffset));
986       __ CallCodeObject(r5);
987       RecordCallPosition(instr);
988       DCHECK_EQ(LeaveRC, i.OutputRCBit());
989       frame_access_state()->ClearSPDelta();
990       break;
991     }
992     case kArchPrepareCallCFunction: {
993       int const num_parameters = MiscField::decode(instr->opcode());
994       __ PrepareCallCFunction(num_parameters, kScratchReg);
995       // Frame alignment requires using FP-relative frame addressing.
996       frame_access_state()->SetFrameAccessToFP();
997       break;
998     }
999     case kArchSaveCallerRegisters: {
1000       fp_mode_ =
1001           static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
1002       DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
1003       // kReturnRegister0 should have been saved before entering the stub.
1004       int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
1005       DCHECK(IsAligned(bytes, kSystemPointerSize));
1006       DCHECK_EQ(0, frame_access_state()->sp_delta());
1007       frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
1008       DCHECK(!caller_registers_saved_);
1009       caller_registers_saved_ = true;
1010       break;
1011     }
1012     case kArchRestoreCallerRegisters: {
1013       DCHECK(fp_mode_ ==
1014              static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
1015       DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
1016       // Don't overwrite the returned value.
1017       int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
1018       frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
1019       DCHECK_EQ(0, frame_access_state()->sp_delta());
1020       DCHECK(caller_registers_saved_);
1021       caller_registers_saved_ = false;
1022       break;
1023     }
1024     case kArchPrepareTailCall:
1025       AssemblePrepareTailCall();
1026       break;
1027     case kArchComment:
1028 #ifdef V8_TARGET_ARCH_PPC64
1029       __ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)));
1030 #else
1031       __ RecordComment(reinterpret_cast<const char*>(i.InputInt32(0)));
1032 #endif
1033       break;
1034     case kArchCallCFunction: {
1035       int misc_field = MiscField::decode(instr->opcode());
1036       int num_parameters = misc_field;
1037       bool has_function_descriptor = false;
1038       Label start_call;
1039       bool isWasmCapiFunction =
1040           linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
1041       int offset = (FLAG_enable_embedded_constant_pool ? 20 : 23) * kInstrSize;
1042 
1043 #if ABI_USES_FUNCTION_DESCRIPTORS
1044       // AIX/PPC64BE Linux uses a function descriptor
1045       int kNumParametersMask = kHasFunctionDescriptorBitMask - 1;
1046       num_parameters = kNumParametersMask & misc_field;
1047       has_function_descriptor =
1048           (misc_field & kHasFunctionDescriptorBitMask) != 0;
1049       // AIX may emit 2 extra Load instructions under CallCFunctionHelper
1050       // due to having function descriptor.
1051       if (has_function_descriptor) {
1052         offset += 2 * kInstrSize;
1053       }
1054 #endif
1055       if (isWasmCapiFunction) {
1056         __ mflr(r0);
1057         __ bind(&start_call);
1058         __ LoadPC(kScratchReg);
1059         __ addi(kScratchReg, kScratchReg, Operand(offset));
1060         __ StoreP(kScratchReg,
1061                   MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
1062         __ mtlr(r0);
1063       }
1064       if (instr->InputAt(0)->IsImmediate()) {
1065         ExternalReference ref = i.InputExternalReference(0);
1066         __ CallCFunction(ref, num_parameters, has_function_descriptor);
1067       } else {
1068         Register func = i.InputRegister(0);
1069         __ CallCFunction(func, num_parameters, has_function_descriptor);
1070       }
1071       // TODO(miladfar): In the above block, kScratchReg must be populated with
1072       // the strictly-correct PC, which is the return address at this spot. The
1073       // offset is set to 36 (9 * kInstrSize) on pLinux and 44 on AIX, which is
1074       // counted from where we are binding to the label and ends at this spot.
1075       // If failed, replace it with the correct offset suggested. More info on
1076       // f5ab7d3.
1077       if (isWasmCapiFunction) {
1078         CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
1079         RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
1080       }
1081       frame_access_state()->SetFrameAccessToDefault();
1082       // Ideally, we should decrement SP delta to match the change of stack
1083       // pointer in CallCFunction. However, for certain architectures (e.g.
1084       // ARM), there may be more strict alignment requirement, causing old SP
1085       // to be saved on the stack. In those cases, we can not calculate the SP
1086       // delta statically.
1087       frame_access_state()->ClearSPDelta();
1088       if (caller_registers_saved_) {
1089         // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
1090         // Here, we assume the sequence to be:
1091         //   kArchSaveCallerRegisters;
1092         //   kArchCallCFunction;
1093         //   kArchRestoreCallerRegisters;
1094         int bytes =
1095             __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
1096         frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
1097       }
1098       break;
1099     }
1100     case kArchJmp:
1101       AssembleArchJump(i.InputRpo(0));
1102       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1103       break;
1104     case kArchBinarySearchSwitch:
1105       AssembleArchBinarySearchSwitch(instr);
1106       break;
1107     case kArchTableSwitch:
1108       AssembleArchTableSwitch(instr);
1109       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1110       break;
1111     case kArchAbortCSAAssert:
1112       DCHECK(i.InputRegister(0) == r4);
1113       {
1114         // We don't actually want to generate a pile of code for this, so just
1115         // claim there is a stack frame, without generating one.
1116         FrameScope scope(tasm(), StackFrame::NONE);
1117         __ Call(
1118             isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
1119             RelocInfo::CODE_TARGET);
1120       }
1121       __ stop();
1122       break;
1123     case kArchDebugBreak:
1124       __ DebugBreak();
1125       break;
1126     case kArchNop:
1127     case kArchThrowTerminator:
1128       // don't emit code for nops.
1129       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1130       break;
1131     case kArchDeoptimize: {
1132       DeoptimizationExit* exit =
1133           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
1134       __ b(exit->label());
1135       break;
1136     }
1137     case kArchRet:
1138       AssembleReturn(instr->InputAt(0));
1139       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1140       break;
1141     case kArchFramePointer:
1142       __ mr(i.OutputRegister(), fp);
1143       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1144       break;
1145     case kArchParentFramePointer:
1146       if (frame_access_state()->has_frame()) {
1147         __ LoadP(i.OutputRegister(), MemOperand(fp, 0));
1148       } else {
1149         __ mr(i.OutputRegister(), fp);
1150       }
1151       break;
1152     case kArchStackPointerGreaterThan: {
1153       // Potentially apply an offset to the current stack pointer before the
1154       // comparison to consider the size difference of an optimized frame versus
1155       // the contained unoptimized frames.
1156 
1157       Register lhs_register = sp;
1158       uint32_t offset;
1159 
1160       if (ShouldApplyOffsetToStackCheck(instr, &offset)) {
1161         lhs_register = i.TempRegister(0);
1162         if (is_int16(offset)) {
1163           __ subi(lhs_register, sp, Operand(offset));
1164         } else {
1165           __ mov(kScratchReg, Operand(offset));
1166           __ sub(lhs_register, sp, kScratchReg);
1167         }
1168       }
1169 
1170       constexpr size_t kValueIndex = 0;
1171       DCHECK(instr->InputAt(kValueIndex)->IsRegister());
1172       __ cmpl(lhs_register, i.InputRegister(kValueIndex), cr0);
1173       break;
1174     }
1175     case kArchStackCheckOffset:
1176       __ LoadSmiLiteral(i.OutputRegister(),
1177                         Smi::FromInt(GetStackCheckOffset()));
1178       break;
1179     case kArchTruncateDoubleToI:
1180       __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
1181                            i.InputDoubleRegister(0), DetermineStubCallMode());
1182       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1183       break;
1184     case kArchStoreWithWriteBarrier: {
1185       RecordWriteMode mode =
1186           static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
1187       Register object = i.InputRegister(0);
1188       Register value = i.InputRegister(2);
1189       Register scratch0 = i.TempRegister(0);
1190       Register scratch1 = i.TempRegister(1);
1191       OutOfLineRecordWrite* ool;
1192 
1193       AddressingMode addressing_mode =
1194           AddressingModeField::decode(instr->opcode());
1195       if (addressing_mode == kMode_MRI) {
1196         int32_t offset = i.InputInt32(1);
1197         ool = zone()->New<OutOfLineRecordWrite>(
1198             this, object, offset, value, scratch0, scratch1, mode,
1199             DetermineStubCallMode(), &unwinding_info_writer_);
1200         __ StoreTaggedField(value, MemOperand(object, offset), r0);
1201       } else {
1202         DCHECK_EQ(kMode_MRR, addressing_mode);
1203         Register offset(i.InputRegister(1));
1204         ool = zone()->New<OutOfLineRecordWrite>(
1205             this, object, offset, value, scratch0, scratch1, mode,
1206             DetermineStubCallMode(), &unwinding_info_writer_);
1207         __ StoreTaggedFieldX(value, MemOperand(object, offset), r0);
1208       }
1209       __ CheckPageFlag(object, scratch0,
1210                        MemoryChunk::kPointersFromHereAreInterestingMask, ne,
1211                        ool->entry());
1212       __ bind(ool->exit());
1213       break;
1214     }
1215     case kArchStackSlot: {
1216       FrameOffset offset =
1217           frame_access_state()->GetFrameOffset(i.InputInt32(0));
1218       __ addi(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
1219               Operand(offset.offset()));
1220       break;
1221     }
1222     case kArchWordPoisonOnSpeculation:
1223       __ and_(i.OutputRegister(), i.InputRegister(0),
1224               kSpeculationPoisonRegister);
1225       break;
1226     case kPPC_Peek: {
1227       int reverse_slot = i.InputInt32(0);
1228       int offset =
1229           FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
1230       if (instr->OutputAt(0)->IsFPRegister()) {
1231         LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
1232         if (op->representation() == MachineRepresentation::kFloat64) {
1233           __ LoadDouble(i.OutputDoubleRegister(), MemOperand(fp, offset), r0);
1234         } else if (op->representation() == MachineRepresentation::kFloat32) {
1235           __ LoadFloat32(i.OutputFloatRegister(), MemOperand(fp, offset), r0);
1236         } else {
1237           DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
1238           __ mov(ip, Operand(offset));
1239           __ LoadSimd128(i.OutputSimd128Register(), MemOperand(fp, ip), r0,
1240                          kScratchDoubleReg);
1241         }
1242       } else {
1243         __ LoadP(i.OutputRegister(), MemOperand(fp, offset), r0);
1244       }
1245       break;
1246     }
1247     case kPPC_Sync: {
1248       __ sync();
1249       break;
1250     }
1251     case kPPC_And:
1252       if (HasRegisterInput(instr, 1)) {
1253         __ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1254                 i.OutputRCBit());
1255       } else {
1256         __ andi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
1257       }
1258       break;
1259     case kPPC_AndComplement:
1260       __ andc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1261               i.OutputRCBit());
1262       break;
1263     case kPPC_Or:
1264       if (HasRegisterInput(instr, 1)) {
1265         __ orx(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1266                i.OutputRCBit());
1267       } else {
1268         __ ori(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
1269         DCHECK_EQ(LeaveRC, i.OutputRCBit());
1270       }
1271       break;
1272     case kPPC_OrComplement:
1273       __ orc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1274              i.OutputRCBit());
1275       break;
1276     case kPPC_Xor:
1277       if (HasRegisterInput(instr, 1)) {
1278         __ xor_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1279                 i.OutputRCBit());
1280       } else {
1281         __ xori(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
1282         DCHECK_EQ(LeaveRC, i.OutputRCBit());
1283       }
1284       break;
1285     case kPPC_ShiftLeft32:
1286       ASSEMBLE_BINOP_RC(slw, slwi);
1287       break;
1288 #if V8_TARGET_ARCH_PPC64
1289     case kPPC_ShiftLeft64:
1290       ASSEMBLE_BINOP_RC(sld, sldi);
1291       break;
1292 #endif
1293     case kPPC_ShiftRight32:
1294       ASSEMBLE_BINOP_RC(srw, srwi);
1295       break;
1296 #if V8_TARGET_ARCH_PPC64
1297     case kPPC_ShiftRight64:
1298       ASSEMBLE_BINOP_RC(srd, srdi);
1299       break;
1300 #endif
1301     case kPPC_ShiftRightAlg32:
1302       ASSEMBLE_BINOP_INT_RC(sraw, srawi);
1303       break;
1304 #if V8_TARGET_ARCH_PPC64
1305     case kPPC_ShiftRightAlg64:
1306       ASSEMBLE_BINOP_INT_RC(srad, sradi);
1307       break;
1308 #endif
1309 #if !V8_TARGET_ARCH_PPC64
1310     case kPPC_AddPair:
1311       // i.InputRegister(0) ... left low word.
1312       // i.InputRegister(1) ... left high word.
1313       // i.InputRegister(2) ... right low word.
1314       // i.InputRegister(3) ... right high word.
1315       __ addc(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2));
1316       __ adde(i.OutputRegister(1), i.InputRegister(1), i.InputRegister(3));
1317       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1318       break;
1319     case kPPC_SubPair:
1320       // i.InputRegister(0) ... left low word.
1321       // i.InputRegister(1) ... left high word.
1322       // i.InputRegister(2) ... right low word.
1323       // i.InputRegister(3) ... right high word.
1324       __ subc(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2));
1325       __ sube(i.OutputRegister(1), i.InputRegister(1), i.InputRegister(3));
1326       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1327       break;
1328     case kPPC_MulPair:
1329       // i.InputRegister(0) ... left low word.
1330       // i.InputRegister(1) ... left high word.
1331       // i.InputRegister(2) ... right low word.
1332       // i.InputRegister(3) ... right high word.
1333       __ mullw(i.TempRegister(0), i.InputRegister(0), i.InputRegister(3));
1334       __ mullw(i.TempRegister(1), i.InputRegister(2), i.InputRegister(1));
1335       __ add(i.TempRegister(0), i.TempRegister(0), i.TempRegister(1));
1336       __ mullw(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2));
1337       __ mulhwu(i.OutputRegister(1), i.InputRegister(0), i.InputRegister(2));
1338       __ add(i.OutputRegister(1), i.OutputRegister(1), i.TempRegister(0));
1339       break;
1340     case kPPC_ShiftLeftPair: {
1341       Register second_output =
1342           instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1343       if (instr->InputAt(2)->IsImmediate()) {
1344         __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1345                          i.InputRegister(1), i.InputInt32(2));
1346       } else {
1347         __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1348                          i.InputRegister(1), kScratchReg, i.InputRegister(2));
1349       }
1350       break;
1351     }
1352     case kPPC_ShiftRightPair: {
1353       Register second_output =
1354           instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1355       if (instr->InputAt(2)->IsImmediate()) {
1356         __ ShiftRightPair(i.OutputRegister(0), second_output,
1357                           i.InputRegister(0), i.InputRegister(1),
1358                           i.InputInt32(2));
1359       } else {
1360         __ ShiftRightPair(i.OutputRegister(0), second_output,
1361                           i.InputRegister(0), i.InputRegister(1), kScratchReg,
1362                           i.InputRegister(2));
1363       }
1364       break;
1365     }
1366     case kPPC_ShiftRightAlgPair: {
1367       Register second_output =
1368           instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1369       if (instr->InputAt(2)->IsImmediate()) {
1370         __ ShiftRightAlgPair(i.OutputRegister(0), second_output,
1371                              i.InputRegister(0), i.InputRegister(1),
1372                              i.InputInt32(2));
1373       } else {
1374         __ ShiftRightAlgPair(i.OutputRegister(0), second_output,
1375                              i.InputRegister(0), i.InputRegister(1),
1376                              kScratchReg, i.InputRegister(2));
1377       }
1378       break;
1379     }
1380 #endif
1381     case kPPC_RotRight32:
1382       if (HasRegisterInput(instr, 1)) {
1383         __ subfic(kScratchReg, i.InputRegister(1), Operand(32));
1384         __ rotlw(i.OutputRegister(), i.InputRegister(0), kScratchReg,
1385                  i.OutputRCBit());
1386       } else {
1387         int sh = i.InputInt32(1);
1388         __ rotrwi(i.OutputRegister(), i.InputRegister(0), sh, i.OutputRCBit());
1389       }
1390       break;
1391 #if V8_TARGET_ARCH_PPC64
1392     case kPPC_RotRight64:
1393       if (HasRegisterInput(instr, 1)) {
1394         __ subfic(kScratchReg, i.InputRegister(1), Operand(64));
1395         __ rotld(i.OutputRegister(), i.InputRegister(0), kScratchReg,
1396                  i.OutputRCBit());
1397       } else {
1398         int sh = i.InputInt32(1);
1399         __ rotrdi(i.OutputRegister(), i.InputRegister(0), sh, i.OutputRCBit());
1400       }
1401       break;
1402 #endif
1403     case kPPC_Not:
1404       __ notx(i.OutputRegister(), i.InputRegister(0), i.OutputRCBit());
1405       break;
1406     case kPPC_RotLeftAndMask32:
1407       __ rlwinm(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
1408                 31 - i.InputInt32(2), 31 - i.InputInt32(3), i.OutputRCBit());
1409       break;
1410 #if V8_TARGET_ARCH_PPC64
1411     case kPPC_RotLeftAndClear64:
1412       __ rldic(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
1413                63 - i.InputInt32(2), i.OutputRCBit());
1414       break;
1415     case kPPC_RotLeftAndClearLeft64:
1416       __ rldicl(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
1417                 63 - i.InputInt32(2), i.OutputRCBit());
1418       break;
1419     case kPPC_RotLeftAndClearRight64:
1420       __ rldicr(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
1421                 63 - i.InputInt32(2), i.OutputRCBit());
1422       break;
1423 #endif
1424     case kPPC_Add32:
1425 #if V8_TARGET_ARCH_PPC64
1426       if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
1427         ASSEMBLE_ADD_WITH_OVERFLOW();
1428       } else {
1429 #endif
1430         if (HasRegisterInput(instr, 1)) {
1431           __ add(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1432                  LeaveOE, i.OutputRCBit());
1433         } else {
1434           __ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
1435           DCHECK_EQ(LeaveRC, i.OutputRCBit());
1436         }
1437         __ extsw(i.OutputRegister(), i.OutputRegister());
1438 #if V8_TARGET_ARCH_PPC64
1439       }
1440 #endif
1441       break;
1442 #if V8_TARGET_ARCH_PPC64
1443     case kPPC_Add64:
1444       if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
1445         ASSEMBLE_ADD_WITH_OVERFLOW();
1446       } else {
1447         if (HasRegisterInput(instr, 1)) {
1448           __ add(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1449                  LeaveOE, i.OutputRCBit());
1450         } else {
1451           __ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
1452           DCHECK_EQ(LeaveRC, i.OutputRCBit());
1453         }
1454       }
1455       break;
1456 #endif
1457     case kPPC_AddWithOverflow32:
1458       ASSEMBLE_ADD_WITH_OVERFLOW32();
1459       break;
1460     case kPPC_AddDouble:
1461       ASSEMBLE_FLOAT_BINOP_RC(fadd, MiscField::decode(instr->opcode()));
1462       break;
1463     case kPPC_Sub:
1464 #if V8_TARGET_ARCH_PPC64
1465       if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
1466         ASSEMBLE_SUB_WITH_OVERFLOW();
1467       } else {
1468 #endif
1469         if (HasRegisterInput(instr, 1)) {
1470           __ sub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1471                  LeaveOE, i.OutputRCBit());
1472         } else {
1473           if (is_int16(i.InputImmediate(1).immediate())) {
1474             __ subi(i.OutputRegister(), i.InputRegister(0),
1475                     i.InputImmediate(1));
1476             DCHECK_EQ(LeaveRC, i.OutputRCBit());
1477           } else {
1478             __ mov(kScratchReg, i.InputImmediate(1));
1479             __ sub(i.OutputRegister(), i.InputRegister(0), kScratchReg, LeaveOE,
1480                    i.OutputRCBit());
1481           }
1482         }
1483 #if V8_TARGET_ARCH_PPC64
1484       }
1485 #endif
1486       break;
1487     case kPPC_SubWithOverflow32:
1488       ASSEMBLE_SUB_WITH_OVERFLOW32();
1489       break;
1490     case kPPC_SubDouble:
1491       ASSEMBLE_FLOAT_BINOP_RC(fsub, MiscField::decode(instr->opcode()));
1492       break;
1493     case kPPC_Mul32:
1494       __ mullw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1495                LeaveOE, i.OutputRCBit());
1496       break;
1497 #if V8_TARGET_ARCH_PPC64
1498     case kPPC_Mul64:
1499       __ mulld(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1500                LeaveOE, i.OutputRCBit());
1501       break;
1502 #endif
1503 
1504     case kPPC_Mul32WithHigh32:
1505       if (i.OutputRegister(0) == i.InputRegister(0) ||
1506           i.OutputRegister(0) == i.InputRegister(1) ||
1507           i.OutputRegister(1) == i.InputRegister(0) ||
1508           i.OutputRegister(1) == i.InputRegister(1)) {
1509         __ mullw(kScratchReg, i.InputRegister(0), i.InputRegister(1));  // low
1510         __ mulhw(i.OutputRegister(1), i.InputRegister(0),
1511                  i.InputRegister(1));  // high
1512         __ mr(i.OutputRegister(0), kScratchReg);
1513       } else {
1514         __ mullw(i.OutputRegister(0), i.InputRegister(0),
1515                  i.InputRegister(1));  // low
1516         __ mulhw(i.OutputRegister(1), i.InputRegister(0),
1517                  i.InputRegister(1));  // high
1518       }
1519       break;
1520     case kPPC_MulHigh32:
1521       __ mulhw(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
1522       // High 32 bits are undefined and need to be cleared.
1523       __ clrldi(i.OutputRegister(), r0, Operand(32));
1524       break;
1525     case kPPC_MulHighU32:
1526       __ mulhwu(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
1527       // High 32 bits are undefined and need to be cleared.
1528       __ clrldi(i.OutputRegister(), r0, Operand(32));
1529       break;
1530     case kPPC_MulDouble:
1531       ASSEMBLE_FLOAT_BINOP_RC(fmul, MiscField::decode(instr->opcode()));
1532       break;
1533     case kPPC_Div32:
1534       __ divw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1535       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1536       break;
1537 #if V8_TARGET_ARCH_PPC64
1538     case kPPC_Div64:
1539       __ divd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1540       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1541       break;
1542 #endif
1543     case kPPC_DivU32:
1544       __ divwu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1545       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1546       break;
1547 #if V8_TARGET_ARCH_PPC64
1548     case kPPC_DivU64:
1549       __ divdu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1550       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1551       break;
1552 #endif
1553     case kPPC_DivDouble:
1554       ASSEMBLE_FLOAT_BINOP_RC(fdiv, MiscField::decode(instr->opcode()));
1555       break;
1556     case kPPC_Mod32:
1557       if (CpuFeatures::IsSupported(MODULO)) {
1558         __ modsw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1559       } else {
1560         ASSEMBLE_MODULO(divw, mullw);
1561       }
1562       break;
1563 #if V8_TARGET_ARCH_PPC64
1564     case kPPC_Mod64:
1565       if (CpuFeatures::IsSupported(MODULO)) {
1566         __ modsd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1567       } else {
1568         ASSEMBLE_MODULO(divd, mulld);
1569       }
1570       break;
1571 #endif
1572     case kPPC_ModU32:
1573       if (CpuFeatures::IsSupported(MODULO)) {
1574         __ moduw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1575       } else {
1576         ASSEMBLE_MODULO(divwu, mullw);
1577       }
1578       break;
1579 #if V8_TARGET_ARCH_PPC64
1580     case kPPC_ModU64:
1581       if (CpuFeatures::IsSupported(MODULO)) {
1582         __ modud(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1583       } else {
1584         ASSEMBLE_MODULO(divdu, mulld);
1585       }
1586       break;
1587 #endif
1588     case kPPC_ModDouble:
1589       // TODO(bmeurer): We should really get rid of this special instruction,
1590       // and generate a CallAddress instruction instead.
1591       ASSEMBLE_FLOAT_MODULO();
1592       break;
1593     case kIeee754Float64Acos:
1594       ASSEMBLE_IEEE754_UNOP(acos);
1595       break;
1596     case kIeee754Float64Acosh:
1597       ASSEMBLE_IEEE754_UNOP(acosh);
1598       break;
1599     case kIeee754Float64Asin:
1600       ASSEMBLE_IEEE754_UNOP(asin);
1601       break;
1602     case kIeee754Float64Asinh:
1603       ASSEMBLE_IEEE754_UNOP(asinh);
1604       break;
1605     case kIeee754Float64Atan:
1606       ASSEMBLE_IEEE754_UNOP(atan);
1607       break;
1608     case kIeee754Float64Atan2:
1609       ASSEMBLE_IEEE754_BINOP(atan2);
1610       break;
1611     case kIeee754Float64Atanh:
1612       ASSEMBLE_IEEE754_UNOP(atanh);
1613       break;
1614     case kIeee754Float64Tan:
1615       ASSEMBLE_IEEE754_UNOP(tan);
1616       break;
1617     case kIeee754Float64Tanh:
1618       ASSEMBLE_IEEE754_UNOP(tanh);
1619       break;
1620     case kIeee754Float64Cbrt:
1621       ASSEMBLE_IEEE754_UNOP(cbrt);
1622       break;
1623     case kIeee754Float64Sin:
1624       ASSEMBLE_IEEE754_UNOP(sin);
1625       break;
1626     case kIeee754Float64Sinh:
1627       ASSEMBLE_IEEE754_UNOP(sinh);
1628       break;
1629     case kIeee754Float64Cos:
1630       ASSEMBLE_IEEE754_UNOP(cos);
1631       break;
1632     case kIeee754Float64Cosh:
1633       ASSEMBLE_IEEE754_UNOP(cosh);
1634       break;
1635     case kIeee754Float64Exp:
1636       ASSEMBLE_IEEE754_UNOP(exp);
1637       break;
1638     case kIeee754Float64Expm1:
1639       ASSEMBLE_IEEE754_UNOP(expm1);
1640       break;
1641     case kIeee754Float64Log:
1642       ASSEMBLE_IEEE754_UNOP(log);
1643       break;
1644     case kIeee754Float64Log1p:
1645       ASSEMBLE_IEEE754_UNOP(log1p);
1646       break;
1647     case kIeee754Float64Log2:
1648       ASSEMBLE_IEEE754_UNOP(log2);
1649       break;
1650     case kIeee754Float64Log10:
1651       ASSEMBLE_IEEE754_UNOP(log10);
1652       break;
1653     case kIeee754Float64Pow:
1654       ASSEMBLE_IEEE754_BINOP(pow);
1655       break;
1656     case kPPC_Neg:
1657       __ neg(i.OutputRegister(), i.InputRegister(0), LeaveOE, i.OutputRCBit());
1658       break;
1659     case kPPC_MaxDouble:
1660       ASSEMBLE_FLOAT_MAX();
1661       break;
1662     case kPPC_MinDouble:
1663       ASSEMBLE_FLOAT_MIN();
1664       break;
1665     case kPPC_AbsDouble:
1666       ASSEMBLE_FLOAT_UNOP_RC(fabs, 0);
1667       break;
1668     case kPPC_SqrtDouble:
1669       ASSEMBLE_FLOAT_UNOP_RC(fsqrt, MiscField::decode(instr->opcode()));
1670       break;
1671     case kPPC_FloorDouble:
1672       ASSEMBLE_FLOAT_UNOP_RC(frim, MiscField::decode(instr->opcode()));
1673       break;
1674     case kPPC_CeilDouble:
1675       ASSEMBLE_FLOAT_UNOP_RC(frip, MiscField::decode(instr->opcode()));
1676       break;
1677     case kPPC_TruncateDouble:
1678       ASSEMBLE_FLOAT_UNOP_RC(friz, MiscField::decode(instr->opcode()));
1679       break;
1680     case kPPC_RoundDouble:
1681       ASSEMBLE_FLOAT_UNOP_RC(frin, MiscField::decode(instr->opcode()));
1682       break;
1683     case kPPC_NegDouble:
1684       ASSEMBLE_FLOAT_UNOP_RC(fneg, 0);
1685       break;
1686     case kPPC_Cntlz32:
1687       __ cntlzw(i.OutputRegister(), i.InputRegister(0));
1688       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1689       break;
1690 #if V8_TARGET_ARCH_PPC64
1691     case kPPC_Cntlz64:
1692       __ cntlzd(i.OutputRegister(), i.InputRegister(0));
1693       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1694       break;
1695 #endif
1696     case kPPC_Popcnt32:
1697       __ popcntw(i.OutputRegister(), i.InputRegister(0));
1698       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1699       break;
1700 #if V8_TARGET_ARCH_PPC64
1701     case kPPC_Popcnt64:
1702       __ popcntd(i.OutputRegister(), i.InputRegister(0));
1703       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1704       break;
1705 #endif
1706     case kPPC_Cmp32:
1707       ASSEMBLE_COMPARE(cmpw, cmplw);
1708       break;
1709 #if V8_TARGET_ARCH_PPC64
1710     case kPPC_Cmp64:
1711       ASSEMBLE_COMPARE(cmp, cmpl);
1712       break;
1713 #endif
1714     case kPPC_CmpDouble:
1715       ASSEMBLE_FLOAT_COMPARE(fcmpu);
1716       break;
1717     case kPPC_Tst32:
1718       if (HasRegisterInput(instr, 1)) {
1719         __ and_(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
1720       } else {
1721         __ andi(r0, i.InputRegister(0), i.InputImmediate(1));
1722       }
1723 #if V8_TARGET_ARCH_PPC64
1724       __ extsw(r0, r0, i.OutputRCBit());
1725 #endif
1726       DCHECK_EQ(SetRC, i.OutputRCBit());
1727       break;
1728 #if V8_TARGET_ARCH_PPC64
1729     case kPPC_Tst64:
1730       if (HasRegisterInput(instr, 1)) {
1731         __ and_(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
1732       } else {
1733         __ andi(r0, i.InputRegister(0), i.InputImmediate(1));
1734       }
1735       DCHECK_EQ(SetRC, i.OutputRCBit());
1736       break;
1737 #endif
1738     case kPPC_Float64SilenceNaN: {
1739       DoubleRegister value = i.InputDoubleRegister(0);
1740       DoubleRegister result = i.OutputDoubleRegister();
1741       __ CanonicalizeNaN(result, value);
1742       break;
1743     }
1744     case kPPC_Push:
1745       if (instr->InputAt(0)->IsFPRegister()) {
1746         LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
1747         switch (op->representation()) {
1748           case MachineRepresentation::kFloat32:
1749             __ StoreSingleU(i.InputDoubleRegister(0),
1750                             MemOperand(sp, -kSystemPointerSize), r0);
1751             frame_access_state()->IncreaseSPDelta(1);
1752             break;
1753           case MachineRepresentation::kFloat64:
1754             __ StoreDoubleU(i.InputDoubleRegister(0),
1755                             MemOperand(sp, -kDoubleSize), r0);
1756             frame_access_state()->IncreaseSPDelta(kDoubleSize /
1757                                                   kSystemPointerSize);
1758             break;
1759           case MachineRepresentation::kSimd128: {
1760             __ addi(sp, sp, Operand(-kSimd128Size));
1761             __ StoreSimd128(i.InputDoubleRegister(0), MemOperand(r0, sp), r0,
1762                             kScratchDoubleReg);
1763             frame_access_state()->IncreaseSPDelta(kSimd128Size /
1764                                                   kSystemPointerSize);
1765             break;
1766           }
1767           default:
1768             UNREACHABLE();
1769             break;
1770         }
1771       } else {
1772         __ StorePU(i.InputRegister(0), MemOperand(sp, -kSystemPointerSize), r0);
1773         frame_access_state()->IncreaseSPDelta(1);
1774       }
1775       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1776       break;
1777     case kPPC_PushFrame: {
1778       int num_slots = i.InputInt32(1);
1779       if (instr->InputAt(0)->IsFPRegister()) {
1780         LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
1781         if (op->representation() == MachineRepresentation::kFloat64) {
1782           __ StoreDoubleU(i.InputDoubleRegister(0),
1783                           MemOperand(sp, -num_slots * kSystemPointerSize), r0);
1784         } else {
1785           DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
1786           __ StoreSingleU(i.InputDoubleRegister(0),
1787                           MemOperand(sp, -num_slots * kSystemPointerSize), r0);
1788         }
1789       } else {
1790         __ StorePU(i.InputRegister(0),
1791                    MemOperand(sp, -num_slots * kSystemPointerSize), r0);
1792       }
1793       break;
1794     }
1795     case kPPC_StoreToStackSlot: {
1796       int slot = i.InputInt32(1);
1797       if (instr->InputAt(0)->IsFPRegister()) {
1798         LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
1799         if (op->representation() == MachineRepresentation::kFloat64) {
1800           __ StoreDouble(i.InputDoubleRegister(0),
1801                          MemOperand(sp, slot * kSystemPointerSize), r0);
1802         } else if (op->representation() == MachineRepresentation::kFloat32) {
1803           __ StoreSingle(i.InputDoubleRegister(0),
1804                          MemOperand(sp, slot * kSystemPointerSize), r0);
1805         } else {
1806           DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
1807           __ mov(ip, Operand(slot * kSystemPointerSize));
1808           __ StoreSimd128(i.InputDoubleRegister(0), MemOperand(ip, sp), r0,
1809                           kScratchDoubleReg);
1810         }
1811       } else {
1812         __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kSystemPointerSize),
1813                   r0);
1814       }
1815       break;
1816     }
1817     case kPPC_ExtendSignWord8:
1818       __ extsb(i.OutputRegister(), i.InputRegister(0));
1819       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1820       break;
1821     case kPPC_ExtendSignWord16:
1822       __ extsh(i.OutputRegister(), i.InputRegister(0));
1823       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1824       break;
1825 #if V8_TARGET_ARCH_PPC64
1826     case kPPC_ExtendSignWord32:
1827       __ extsw(i.OutputRegister(), i.InputRegister(0));
1828       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1829       break;
1830     case kPPC_Uint32ToUint64:
1831       // Zero extend
1832       __ clrldi(i.OutputRegister(), i.InputRegister(0), Operand(32));
1833       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1834       break;
1835     case kPPC_Int64ToInt32:
1836       __ extsw(i.OutputRegister(), i.InputRegister(0));
1837       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1838       break;
1839     case kPPC_Int64ToFloat32:
1840       __ ConvertInt64ToFloat(i.InputRegister(0), i.OutputDoubleRegister());
1841       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1842       break;
1843     case kPPC_Int64ToDouble:
1844       __ ConvertInt64ToDouble(i.InputRegister(0), i.OutputDoubleRegister());
1845       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1846       break;
1847     case kPPC_Uint64ToFloat32:
1848       __ ConvertUnsignedInt64ToFloat(i.InputRegister(0),
1849                                      i.OutputDoubleRegister());
1850       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1851       break;
1852     case kPPC_Uint64ToDouble:
1853       __ ConvertUnsignedInt64ToDouble(i.InputRegister(0),
1854                                       i.OutputDoubleRegister());
1855       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1856       break;
1857 #endif
1858     case kPPC_Int32ToFloat32:
1859       __ ConvertIntToFloat(i.InputRegister(0), i.OutputDoubleRegister());
1860       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1861       break;
1862     case kPPC_Int32ToDouble:
1863       __ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
1864       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1865       break;
1866     case kPPC_Uint32ToFloat32:
1867       __ ConvertUnsignedIntToFloat(i.InputRegister(0),
1868                                    i.OutputDoubleRegister());
1869       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1870       break;
1871     case kPPC_Uint32ToDouble:
1872       __ ConvertUnsignedIntToDouble(i.InputRegister(0),
1873                                     i.OutputDoubleRegister());
1874       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1875       break;
1876     case kPPC_Float32ToInt32: {
1877       bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode());
1878       if (set_overflow_to_min_i32) {
1879         __ mtfsb0(VXCVI);  // clear FPSCR:VXCVI bit
1880       }
1881       __ fctiwz(kScratchDoubleReg, i.InputDoubleRegister(0));
1882       __ MovDoubleLowToInt(i.OutputRegister(), kScratchDoubleReg);
1883       if (set_overflow_to_min_i32) {
1884         // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
1885         // because INT32_MIN allows easier out-of-bounds detection.
1886         CRegister cr = cr7;
1887         int crbit = v8::internal::Assembler::encode_crbit(
1888             cr, static_cast<CRBit>(VXCVI % CRWIDTH));
1889         __ mcrfs(cr, VXCVI);  // extract FPSCR field containing VXCVI into cr7
1890         __ li(kScratchReg, Operand(1));
1891         __ sldi(kScratchReg, kScratchReg, Operand(31));  // generate INT32_MIN.
1892         __ isel(i.OutputRegister(0), kScratchReg, i.OutputRegister(0), crbit);
1893       }
1894       break;
1895     }
1896     case kPPC_Float32ToUint32: {
1897       bool set_overflow_to_min_u32 = MiscField::decode(instr->opcode());
1898       if (set_overflow_to_min_u32) {
1899         __ mtfsb0(VXCVI);  // clear FPSCR:VXCVI bit
1900       }
1901       __ fctiwuz(kScratchDoubleReg, i.InputDoubleRegister(0));
1902       __ MovDoubleLowToInt(i.OutputRegister(), kScratchDoubleReg);
1903       if (set_overflow_to_min_u32) {
1904         // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
1905         // because 0 allows easier out-of-bounds detection.
1906         CRegister cr = cr7;
1907         int crbit = v8::internal::Assembler::encode_crbit(
1908             cr, static_cast<CRBit>(VXCVI % CRWIDTH));
1909         __ mcrfs(cr, VXCVI);  // extract FPSCR field containing VXCVI into cr7
1910         __ li(kScratchReg, Operand::Zero());
1911         __ isel(i.OutputRegister(0), kScratchReg, i.OutputRegister(0), crbit);
1912       }
1913       break;
1914     }
1915     case kPPC_DoubleToInt32:
1916     case kPPC_DoubleToUint32:
1917     case kPPC_DoubleToInt64: {
1918 #if V8_TARGET_ARCH_PPC64
1919       bool check_conversion =
1920           (opcode == kPPC_DoubleToInt64 && i.OutputCount() > 1);
1921       if (check_conversion) {
1922         __ mtfsb0(VXCVI);  // clear FPSCR:VXCVI bit
1923       }
1924 #endif
1925       __ ConvertDoubleToInt64(i.InputDoubleRegister(0),
1926 #if !V8_TARGET_ARCH_PPC64
1927                               kScratchReg,
1928 #endif
1929                               i.OutputRegister(0), kScratchDoubleReg);
1930 #if V8_TARGET_ARCH_PPC64
1931         CRegister cr = cr7;
1932         int crbit = v8::internal::Assembler::encode_crbit(
1933             cr, static_cast<CRBit>(VXCVI % CRWIDTH));
1934         __ mcrfs(cr, VXCVI);  // extract FPSCR field containing VXCVI into cr7
1935         // Handle conversion failures (such as overflow).
1936         if (CpuFeatures::IsSupported(ISELECT)) {
1937           if (check_conversion) {
1938             __ li(i.OutputRegister(1), Operand(1));
1939             __ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
1940           } else {
1941             __ isel(i.OutputRegister(0), r0, i.OutputRegister(0), crbit);
1942           }
1943         } else {
1944           if (check_conversion) {
1945             __ li(i.OutputRegister(1), Operand::Zero());
1946             __ bc(v8::internal::kInstrSize * 2, BT, crbit);
1947             __ li(i.OutputRegister(1), Operand(1));
1948           } else {
1949             __ mr(ip, i.OutputRegister(0));
1950             __ li(i.OutputRegister(0), Operand::Zero());
1951             __ bc(v8::internal::kInstrSize * 2, BT, crbit);
1952             __ mr(i.OutputRegister(0), ip);
1953           }
1954         }
1955 #endif
1956       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1957       break;
1958     }
1959 #if V8_TARGET_ARCH_PPC64
1960     case kPPC_DoubleToUint64: {
1961       bool check_conversion = (i.OutputCount() > 1);
1962       if (check_conversion) {
1963         __ mtfsb0(VXCVI);  // clear FPSCR:VXCVI bit
1964       }
1965       __ ConvertDoubleToUnsignedInt64(i.InputDoubleRegister(0),
1966                                       i.OutputRegister(0), kScratchDoubleReg);
1967       if (check_conversion) {
1968         // Set 2nd output to zero if conversion fails.
1969         CRegister cr = cr7;
1970         int crbit = v8::internal::Assembler::encode_crbit(
1971             cr, static_cast<CRBit>(VXCVI % CRWIDTH));
1972         __ mcrfs(cr, VXCVI);  // extract FPSCR field containing VXCVI into cr7
1973         if (CpuFeatures::IsSupported(ISELECT)) {
1974           __ li(i.OutputRegister(1), Operand(1));
1975           __ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
1976         } else {
1977           __ li(i.OutputRegister(1), Operand::Zero());
1978           __ bc(v8::internal::kInstrSize * 2, BT, crbit);
1979           __ li(i.OutputRegister(1), Operand(1));
1980         }
1981       }
1982       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1983       break;
1984     }
1985 #endif
1986     case kPPC_DoubleToFloat32:
1987       ASSEMBLE_FLOAT_UNOP_RC(frsp, 0);
1988       break;
1989     case kPPC_Float32ToDouble:
1990       // Nothing to do.
1991       __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1992       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1993       break;
1994     case kPPC_DoubleExtractLowWord32:
1995       __ MovDoubleLowToInt(i.OutputRegister(), i.InputDoubleRegister(0));
1996       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1997       break;
1998     case kPPC_DoubleExtractHighWord32:
1999       __ MovDoubleHighToInt(i.OutputRegister(), i.InputDoubleRegister(0));
2000       DCHECK_EQ(LeaveRC, i.OutputRCBit());
2001       break;
2002     case kPPC_DoubleInsertLowWord32:
2003       __ InsertDoubleLow(i.OutputDoubleRegister(), i.InputRegister(1), r0);
2004       DCHECK_EQ(LeaveRC, i.OutputRCBit());
2005       break;
2006     case kPPC_DoubleInsertHighWord32:
2007       __ InsertDoubleHigh(i.OutputDoubleRegister(), i.InputRegister(1), r0);
2008       DCHECK_EQ(LeaveRC, i.OutputRCBit());
2009       break;
2010     case kPPC_DoubleConstruct:
2011 #if V8_TARGET_ARCH_PPC64
2012       __ MovInt64ComponentsToDouble(i.OutputDoubleRegister(),
2013                                     i.InputRegister(0), i.InputRegister(1), r0);
2014 #else
2015       __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0),
2016                           i.InputRegister(1));
2017 #endif
2018       DCHECK_EQ(LeaveRC, i.OutputRCBit());
2019       break;
2020     case kPPC_BitcastFloat32ToInt32:
2021       __ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0));
2022       break;
2023     case kPPC_BitcastInt32ToFloat32:
2024       __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
2025       break;
2026 #if V8_TARGET_ARCH_PPC64
2027     case kPPC_BitcastDoubleToInt64:
2028       __ MovDoubleToInt64(i.OutputRegister(), i.InputDoubleRegister(0));
2029       break;
2030     case kPPC_BitcastInt64ToDouble:
2031       __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
2032       break;
2033 #endif
2034     case kPPC_LoadWordU8:
2035       ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
2036       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2037       break;
2038     case kPPC_LoadWordS8:
2039       ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
2040       __ extsb(i.OutputRegister(), i.OutputRegister());
2041       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2042       break;
2043     case kPPC_LoadWordU16:
2044       ASSEMBLE_LOAD_INTEGER(lhz, lhzx);
2045       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2046       break;
2047     case kPPC_LoadWordS16:
2048       ASSEMBLE_LOAD_INTEGER(lha, lhax);
2049       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2050       break;
2051     case kPPC_LoadWordU32:
2052       ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
2053       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2054       break;
2055     case kPPC_LoadWordS32:
2056       ASSEMBLE_LOAD_INTEGER(lwa, lwax);
2057       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2058       break;
2059 #if V8_TARGET_ARCH_PPC64
2060     case kPPC_LoadWord64:
2061       ASSEMBLE_LOAD_INTEGER(ld, ldx);
2062       EmitWordLoadPoisoningIfNeeded(this, instr, i);
2063       break;
2064 #endif
2065     case kPPC_LoadFloat32:
2066       ASSEMBLE_LOAD_FLOAT(lfs, lfsx);
2067       break;
2068     case kPPC_LoadDouble:
2069       ASSEMBLE_LOAD_FLOAT(lfd, lfdx);
2070       break;
2071     case kPPC_LoadSimd128: {
2072       Simd128Register result = i.OutputSimd128Register();
2073       AddressingMode mode = kMode_None;
2074       MemOperand operand = i.MemoryOperand(&mode);
2075       bool is_atomic = i.InputInt32(2);
2076       // lvx only supports MRR.
2077       DCHECK_EQ(mode, kMode_MRR);
2078       __ LoadSimd128(result, operand, r0, kScratchDoubleReg);
2079       if (is_atomic) __ lwsync();
2080       DCHECK_EQ(LeaveRC, i.OutputRCBit());
2081       break;
2082     }
2083     case kPPC_StoreWord8:
2084       ASSEMBLE_STORE_INTEGER(stb, stbx);
2085       break;
2086     case kPPC_StoreWord16:
2087       ASSEMBLE_STORE_INTEGER(sth, sthx);
2088       break;
2089     case kPPC_StoreWord32:
2090       ASSEMBLE_STORE_INTEGER(stw, stwx);
2091       break;
2092 #if V8_TARGET_ARCH_PPC64
2093     case kPPC_StoreWord64:
2094       ASSEMBLE_STORE_INTEGER(std, stdx);
2095       break;
2096 #endif
2097     case kPPC_StoreFloat32:
2098       ASSEMBLE_STORE_FLOAT(stfs, stfsx);
2099       break;
2100     case kPPC_StoreDouble:
2101       ASSEMBLE_STORE_FLOAT(stfd, stfdx);
2102       break;
2103     case kPPC_StoreSimd128: {
2104       size_t index = 0;
2105       AddressingMode mode = kMode_None;
2106       MemOperand operand = i.MemoryOperand(&mode, &index);
2107       Simd128Register value = i.InputSimd128Register(index);
2108       bool is_atomic = i.InputInt32(3);
2109       if (is_atomic) __ lwsync();
2110       // stvx only supports MRR.
2111       DCHECK_EQ(mode, kMode_MRR);
2112       __ StoreSimd128(value, operand, r0, kScratchDoubleReg);
2113       if (is_atomic) __ sync();
2114       DCHECK_EQ(LeaveRC, i.OutputRCBit());
2115       break;
2116     }
2117     case kWord32AtomicLoadInt8:
2118     case kPPC_AtomicLoadUint8:
2119     case kWord32AtomicLoadInt16:
2120     case kPPC_AtomicLoadUint16:
2121     case kPPC_AtomicLoadWord32:
2122     case kPPC_AtomicLoadWord64:
2123     case kPPC_AtomicStoreUint8:
2124     case kPPC_AtomicStoreUint16:
2125     case kPPC_AtomicStoreWord32:
2126     case kPPC_AtomicStoreWord64:
2127       UNREACHABLE();
2128     case kWord32AtomicExchangeInt8:
2129       ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
2130       __ extsb(i.OutputRegister(0), i.OutputRegister(0));
2131       break;
2132     case kPPC_AtomicExchangeUint8:
2133       ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
2134       break;
2135     case kWord32AtomicExchangeInt16:
2136       ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lharx, sthcx);
2137       __ extsh(i.OutputRegister(0), i.OutputRegister(0));
2138       break;
2139     case kPPC_AtomicExchangeUint16:
2140       ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lharx, sthcx);
2141       break;
2142     case kPPC_AtomicExchangeWord32:
2143       ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lwarx, stwcx);
2144       break;
2145     case kPPC_AtomicExchangeWord64:
2146       ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldarx, stdcx);
2147       break;
2148     case kWord32AtomicCompareExchangeInt8:
2149       ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(cmp, lbarx, stbcx, extsb);
2150       break;
2151     case kPPC_AtomicCompareExchangeUint8:
2152       ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp, lbarx, stbcx, ZeroExtByte);
2153       break;
2154     case kWord32AtomicCompareExchangeInt16:
2155       ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(cmp, lharx, sthcx, extsh);
2156       break;
2157     case kPPC_AtomicCompareExchangeUint16:
2158       ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp, lharx, sthcx, ZeroExtHalfWord);
2159       break;
2160     case kPPC_AtomicCompareExchangeWord32:
2161       ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmpw, lwarx, stwcx, ZeroExtWord32);
2162       break;
2163     case kPPC_AtomicCompareExchangeWord64:
2164       ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp, ldarx, stdcx, mr);
2165       break;
2166 
2167 #define ATOMIC_BINOP_CASE(op, inst)                            \
2168   case kPPC_Atomic##op##Int8:                                  \
2169     ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lbarx, stbcx, extsb); \
2170     break;                                                     \
2171   case kPPC_Atomic##op##Uint8:                                 \
2172     ASSEMBLE_ATOMIC_BINOP(inst, lbarx, stbcx);                 \
2173     break;                                                     \
2174   case kPPC_Atomic##op##Int16:                                 \
2175     ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lharx, sthcx, extsh); \
2176     break;                                                     \
2177   case kPPC_Atomic##op##Uint16:                                \
2178     ASSEMBLE_ATOMIC_BINOP(inst, lharx, sthcx);                 \
2179     break;                                                     \
2180   case kPPC_Atomic##op##Int32:                                 \
2181     ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lwarx, stwcx, extsw); \
2182     break;                                                     \
2183   case kPPC_Atomic##op##Uint32:                                \
2184     ASSEMBLE_ATOMIC_BINOP(inst, lwarx, stwcx);                 \
2185     break;                                                     \
2186   case kPPC_Atomic##op##Int64:                                 \
2187   case kPPC_Atomic##op##Uint64:                                \
2188     ASSEMBLE_ATOMIC_BINOP(inst, ldarx, stdcx);                 \
2189     break;
2190       ATOMIC_BINOP_CASE(Add, add)
2191       ATOMIC_BINOP_CASE(Sub, sub)
2192       ATOMIC_BINOP_CASE(And, and_)
2193       ATOMIC_BINOP_CASE(Or, orx)
2194       ATOMIC_BINOP_CASE(Xor, xor_)
2195 #undef ATOMIC_BINOP_CASE
2196 
2197     case kPPC_ByteRev32: {
2198       Register input = i.InputRegister(0);
2199       Register output = i.OutputRegister();
2200       Register temp1 = r0;
2201       __ rotlwi(temp1, input, 8);
2202       __ rlwimi(temp1, input, 24, 0, 7);
2203       __ rlwimi(temp1, input, 24, 16, 23);
2204       __ extsw(output, temp1);
2205       break;
2206     }
2207 #ifdef V8_TARGET_ARCH_PPC64
2208     case kPPC_ByteRev64: {
2209       Register input = i.InputRegister(0);
2210       Register output = i.OutputRegister();
2211       Register temp1 = r0;
2212       Register temp2 = kScratchReg;
2213       Register temp3 = i.TempRegister(0);
2214       __ rldicl(temp1, input, 32, 32);
2215       __ rotlwi(temp2, input, 8);
2216       __ rlwimi(temp2, input, 24, 0, 7);
2217       __ rotlwi(temp3, temp1, 8);
2218       __ rlwimi(temp2, input, 24, 16, 23);
2219       __ rlwimi(temp3, temp1, 24, 0, 7);
2220       __ rlwimi(temp3, temp1, 24, 16, 23);
2221       __ rldicr(temp2, temp2, 32, 31);
2222       __ orx(output, temp2, temp3);
2223       break;
2224     }
2225 #endif  // V8_TARGET_ARCH_PPC64
2226     case kPPC_F64x2Splat: {
2227       Simd128Register dst = i.OutputSimd128Register();
2228       __ MovDoubleToInt64(ip, i.InputDoubleRegister(0));
2229       // Need to maintain 16 byte alignment for lvx.
2230       __ mr(kScratchReg, sp);
2231       __ ClearRightImm(
2232           sp, sp,
2233           Operand(base::bits::WhichPowerOfTwo(16)));  // equivalent to &= -16
2234       __ addi(sp, sp, Operand(-16));
2235       __ StoreP(ip, MemOperand(sp, 0));
2236       __ StoreP(ip, MemOperand(sp, 8));
2237       __ lvx(dst, MemOperand(r0, sp));
2238       __ mr(sp, kScratchReg);
2239       break;
2240     }
2241     case kPPC_F32x4Splat: {
2242       Simd128Register dst = i.OutputSimd128Register();
2243       __ MovFloatToInt(kScratchReg, i.InputDoubleRegister(0));
2244       __ mtvsrd(dst, kScratchReg);
2245       __ vspltw(dst, dst, Operand(1));
2246       break;
2247     }
2248     case kPPC_I64x2Splat: {
2249       Register src = i.InputRegister(0);
2250       Simd128Register dst = i.OutputSimd128Register();
2251       // Need to maintain 16 byte alignment for lvx.
2252       __ mr(kScratchReg, sp);
2253       __ ClearRightImm(
2254           sp, sp,
2255           Operand(base::bits::WhichPowerOfTwo(16)));  // equivalent to &= -16
2256       __ addi(sp, sp, Operand(-16));
2257       __ StoreP(src, MemOperand(sp, 0));
2258       __ StoreP(src, MemOperand(sp, 8));
2259       __ lvx(dst, MemOperand(r0, sp));
2260       __ mr(sp, kScratchReg);
2261       break;
2262     }
2263     case kPPC_I32x4Splat: {
2264       Simd128Register dst = i.OutputSimd128Register();
2265       __ mtvsrd(dst, i.InputRegister(0));
2266       __ vspltw(dst, dst, Operand(1));
2267       break;
2268     }
2269     case kPPC_I16x8Splat: {
2270       Simd128Register dst = i.OutputSimd128Register();
2271       __ mtvsrd(dst, i.InputRegister(0));
2272       __ vsplth(dst, dst, Operand(3));
2273       break;
2274     }
2275     case kPPC_I8x16Splat: {
2276       Simd128Register dst = i.OutputSimd128Register();
2277       __ mtvsrd(dst, i.InputRegister(0));
2278       __ vspltb(dst, dst, Operand(7));
2279       break;
2280     }
2281     case kPPC_F64x2ExtractLane: {
2282       constexpr int lane_width_in_bytes = 8;
2283       __ vextractd(kScratchDoubleReg, i.InputSimd128Register(0),
2284                    Operand((1 - i.InputInt8(1)) * lane_width_in_bytes));
2285       __ mfvsrd(kScratchReg, kScratchDoubleReg);
2286       __ MovInt64ToDouble(i.OutputDoubleRegister(), kScratchReg);
2287       break;
2288     }
2289     case kPPC_F32x4ExtractLane: {
2290       constexpr int lane_width_in_bytes = 4;
2291       __ vextractuw(kScratchDoubleReg, i.InputSimd128Register(0),
2292                     Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
2293       __ mfvsrd(kScratchReg, kScratchDoubleReg);
2294       __ MovIntToFloat(i.OutputDoubleRegister(), kScratchReg);
2295       break;
2296     }
2297     case kPPC_I64x2ExtractLane: {
2298       constexpr int lane_width_in_bytes = 8;
2299       __ vextractd(kScratchDoubleReg, i.InputSimd128Register(0),
2300                    Operand((1 - i.InputInt8(1)) * lane_width_in_bytes));
2301       __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
2302       break;
2303     }
2304     case kPPC_I32x4ExtractLane: {
2305       constexpr int lane_width_in_bytes = 4;
2306       __ vextractuw(kScratchDoubleReg, i.InputSimd128Register(0),
2307                     Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
2308       __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
2309       break;
2310     }
2311     case kPPC_I16x8ExtractLaneU: {
2312       constexpr int lane_width_in_bytes = 2;
2313       __ vextractuh(kScratchDoubleReg, i.InputSimd128Register(0),
2314                     Operand((7 - i.InputInt8(1)) * lane_width_in_bytes));
2315       __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
2316       break;
2317     }
2318     case kPPC_I16x8ExtractLaneS: {
2319       constexpr int lane_width_in_bytes = 2;
2320       __ vextractuh(kScratchDoubleReg, i.InputSimd128Register(0),
2321                     Operand((7 - i.InputInt8(1)) * lane_width_in_bytes));
2322       __ mfvsrd(kScratchReg, kScratchDoubleReg);
2323       __ extsh(i.OutputRegister(), kScratchReg);
2324       break;
2325     }
2326     case kPPC_I8x16ExtractLaneU: {
2327       __ vextractub(kScratchDoubleReg, i.InputSimd128Register(0),
2328                     Operand(15 - i.InputInt8(1)));
2329       __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
2330       break;
2331     }
2332     case kPPC_I8x16ExtractLaneS: {
2333       __ vextractub(kScratchDoubleReg, i.InputSimd128Register(0),
2334                     Operand(15 - i.InputInt8(1)));
2335       __ mfvsrd(kScratchReg, kScratchDoubleReg);
2336       __ extsb(i.OutputRegister(), kScratchReg);
2337       break;
2338     }
2339     case kPPC_F64x2ReplaceLane: {
2340       DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
2341       constexpr int lane_width_in_bytes = 8;
2342       Simd128Register dst = i.OutputSimd128Register();
2343       __ MovDoubleToInt64(r0, i.InputDoubleRegister(2));
2344       __ mtvsrd(kScratchDoubleReg, r0);
2345       __ vinsertd(dst, kScratchDoubleReg,
2346                   Operand((1 - i.InputInt8(1)) * lane_width_in_bytes));
2347       break;
2348     }
2349     case kPPC_F32x4ReplaceLane: {
2350       DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
2351       constexpr int lane_width_in_bytes = 4;
2352       Simd128Register dst = i.OutputSimd128Register();
2353       __ MovFloatToInt(r0, i.InputDoubleRegister(2));
2354       __ mtvsrd(kScratchDoubleReg, r0);
2355       __ vinsertw(dst, kScratchDoubleReg,
2356                   Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
2357       break;
2358     }
2359     case kPPC_I64x2ReplaceLane: {
2360       DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
2361       constexpr int lane_width_in_bytes = 8;
2362       Simd128Register dst = i.OutputSimd128Register();
2363       __ mtvsrd(kScratchDoubleReg, i.InputRegister(2));
2364       __ vinsertd(dst, kScratchDoubleReg,
2365                   Operand((1 - i.InputInt8(1)) * lane_width_in_bytes));
2366       break;
2367     }
2368     case kPPC_I32x4ReplaceLane: {
2369       DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
2370       constexpr int lane_width_in_bytes = 4;
2371       Simd128Register dst = i.OutputSimd128Register();
2372       __ mtvsrd(kScratchDoubleReg, i.InputRegister(2));
2373       __ vinsertw(dst, kScratchDoubleReg,
2374                   Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
2375       break;
2376     }
2377     case kPPC_I16x8ReplaceLane: {
2378       DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
2379       constexpr int lane_width_in_bytes = 2;
2380       Simd128Register dst = i.OutputSimd128Register();
2381       __ mtvsrd(kScratchDoubleReg, i.InputRegister(2));
2382       __ vinserth(dst, kScratchDoubleReg,
2383                   Operand((7 - i.InputInt8(1)) * lane_width_in_bytes));
2384       break;
2385     }
2386     case kPPC_I8x16ReplaceLane: {
2387       DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
2388       Simd128Register dst = i.OutputSimd128Register();
2389       __ mtvsrd(kScratchDoubleReg, i.InputRegister(2));
2390       __ vinsertb(dst, kScratchDoubleReg, Operand(15 - i.InputInt8(1)));
2391       break;
2392     }
2393     case kPPC_F64x2Add: {
2394       __ xvadddp(i.OutputSimd128Register(), i.InputSimd128Register(0),
2395                  i.InputSimd128Register(1));
2396       break;
2397     }
2398     case kPPC_F64x2Sub: {
2399       __ xvsubdp(i.OutputSimd128Register(), i.InputSimd128Register(0),
2400                  i.InputSimd128Register(1));
2401       break;
2402     }
2403     case kPPC_F64x2Mul: {
2404       __ xvmuldp(i.OutputSimd128Register(), i.InputSimd128Register(0),
2405                  i.InputSimd128Register(1));
2406       break;
2407     }
2408     case kPPC_F32x4Add: {
2409       __ vaddfp(i.OutputSimd128Register(), i.InputSimd128Register(0),
2410                 i.InputSimd128Register(1));
2411       break;
2412     }
2413     case kPPC_F32x4AddHoriz: {
2414       Simd128Register src0 = i.InputSimd128Register(0);
2415       Simd128Register src1 = i.InputSimd128Register(1);
2416       Simd128Register dst = i.OutputSimd128Register();
2417       Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
2418       Simd128Register tempFPReg2 = i.ToSimd128Register(instr->TempAt(1));
2419       constexpr int shift_bits = 32;
2420       // generate first operand
2421       __ vpkudum(dst, src1, src0);
2422       // generate second operand
2423       __ li(ip, Operand(shift_bits));
2424       __ mtvsrd(tempFPReg2, ip);
2425       __ vspltb(tempFPReg2, tempFPReg2, Operand(7));
2426       __ vsro(tempFPReg1, src0, tempFPReg2);
2427       __ vsro(tempFPReg2, src1, tempFPReg2);
2428       __ vpkudum(kScratchDoubleReg, tempFPReg2, tempFPReg1);
2429       // add the operands
2430       __ vaddfp(dst, kScratchDoubleReg, dst);
2431       break;
2432     }
2433     case kPPC_F32x4Sub: {
2434       __ vsubfp(i.OutputSimd128Register(), i.InputSimd128Register(0),
2435                 i.InputSimd128Register(1));
2436       break;
2437     }
2438     case kPPC_F32x4Mul: {
2439       __ xvmulsp(i.OutputSimd128Register(), i.InputSimd128Register(0),
2440                  i.InputSimd128Register(1));
2441       break;
2442     }
2443     case kPPC_I64x2Add: {
2444       __ vaddudm(i.OutputSimd128Register(), i.InputSimd128Register(0),
2445                  i.InputSimd128Register(1));
2446       break;
2447     }
2448     case kPPC_I64x2Sub: {
2449       __ vsubudm(i.OutputSimd128Register(), i.InputSimd128Register(0),
2450                  i.InputSimd128Register(1));
2451       break;
2452     }
2453     case kPPC_I64x2Mul: {
2454       // Need to maintain 16 byte alignment for stvx and lvx.
2455       __ mr(kScratchReg, sp);
2456       __ ClearRightImm(
2457           sp, sp,
2458           Operand(base::bits::WhichPowerOfTwo(16)));  // equivalent to &= -16
2459       __ addi(sp, sp, Operand(-32));
2460       __ stvx(i.InputSimd128Register(0), MemOperand(r0, sp));
2461       __ li(ip, Operand(16));
2462       __ stvx(i.InputSimd128Register(1), MemOperand(ip, sp));
2463       for (int i = 0; i < 2; i++) {
2464         __ LoadP(r0, MemOperand(sp, kBitsPerByte * i));
2465         __ LoadP(ip, MemOperand(sp, (kBitsPerByte * i) + kSimd128Size));
2466         __ mulld(r0, r0, ip);
2467         __ StoreP(r0, MemOperand(sp, i * kBitsPerByte));
2468       }
2469       __ lvx(i.OutputSimd128Register(), MemOperand(r0, sp));
2470       __ mr(sp, kScratchReg);
2471       break;
2472     }
2473     case kPPC_I32x4Add: {
2474       __ vadduwm(i.OutputSimd128Register(), i.InputSimd128Register(0),
2475                  i.InputSimd128Register(1));
2476       break;
2477     }
2478     case kPPC_I32x4AddHoriz: {
2479       Simd128Register src0 = i.InputSimd128Register(0);
2480       Simd128Register src1 = i.InputSimd128Register(1);
2481       Simd128Register dst = i.OutputSimd128Register();
2482       __ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
2483       __ vsum2sws(dst, src0, kScratchDoubleReg);
2484       __ vsum2sws(kScratchDoubleReg, src1, kScratchDoubleReg);
2485       __ vpkudum(dst, kScratchDoubleReg, dst);
2486       break;
2487     }
2488     case kPPC_I32x4Sub: {
2489       __ vsubuwm(i.OutputSimd128Register(), i.InputSimd128Register(0),
2490                  i.InputSimd128Register(1));
2491       break;
2492     }
2493     case kPPC_I32x4Mul: {
2494       __ vmuluwm(i.OutputSimd128Register(), i.InputSimd128Register(0),
2495                  i.InputSimd128Register(1));
2496       break;
2497     }
2498     case kPPC_I16x8Add: {
2499       __ vadduhm(i.OutputSimd128Register(), i.InputSimd128Register(0),
2500                  i.InputSimd128Register(1));
2501       break;
2502     }
2503     case kPPC_I16x8AddHoriz: {
2504       Simd128Register src0 = i.InputSimd128Register(0);
2505       Simd128Register src1 = i.InputSimd128Register(1);
2506       Simd128Register dst = i.OutputSimd128Register();
2507       __ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
2508       __ vsum4shs(dst, src0, kScratchDoubleReg);
2509       __ vsum4shs(kScratchDoubleReg, src1, kScratchDoubleReg);
2510       __ vpkuwus(dst, kScratchDoubleReg, dst);
2511       break;
2512     }
2513     case kPPC_I16x8Sub: {
2514       __ vsubuhm(i.OutputSimd128Register(), i.InputSimd128Register(0),
2515                  i.InputSimd128Register(1));
2516       break;
2517     }
2518     case kPPC_I16x8Mul: {
2519       __ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
2520       __ vmladduhm(i.OutputSimd128Register(), i.InputSimd128Register(0),
2521                    i.InputSimd128Register(1), kScratchDoubleReg);
2522       break;
2523     }
2524     case kPPC_I8x16Add: {
2525       __ vaddubm(i.OutputSimd128Register(), i.InputSimd128Register(0),
2526                  i.InputSimd128Register(1));
2527       break;
2528     }
2529     case kPPC_I8x16Sub: {
2530       __ vsububm(i.OutputSimd128Register(), i.InputSimd128Register(0),
2531                  i.InputSimd128Register(1));
2532       break;
2533     }
2534     case kPPC_I8x16Mul: {
2535       __ vmuleub(kScratchDoubleReg, i.InputSimd128Register(0),
2536                  i.InputSimd128Register(1));
2537       __ vmuloub(i.OutputSimd128Register(), i.InputSimd128Register(0),
2538                  i.InputSimd128Register(1));
2539       __ vpkuhum(i.OutputSimd128Register(), kScratchDoubleReg,
2540                  i.OutputSimd128Register());
2541       break;
2542     }
2543     case kPPC_I64x2MinS: {
2544       __ vminsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
2545                 i.InputSimd128Register(1));
2546       break;
2547     }
2548     case kPPC_I32x4MinS: {
2549       __ vminsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
2550                 i.InputSimd128Register(1));
2551       break;
2552     }
2553     case kPPC_I64x2MinU: {
2554       __ vminud(i.OutputSimd128Register(), i.InputSimd128Register(0),
2555                 i.InputSimd128Register(1));
2556       break;
2557     }
2558     case kPPC_I32x4MinU: {
2559       __ vminuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
2560                 i.InputSimd128Register(1));
2561       break;
2562     }
2563     case kPPC_I16x8MinS: {
2564       __ vminsh(i.OutputSimd128Register(), i.InputSimd128Register(0),
2565                 i.InputSimd128Register(1));
2566       break;
2567     }
2568     case kPPC_I16x8MinU: {
2569       __ vminuh(i.OutputSimd128Register(), i.InputSimd128Register(0),
2570                 i.InputSimd128Register(1));
2571       break;
2572     }
2573     case kPPC_I8x16MinS: {
2574       __ vminsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
2575                 i.InputSimd128Register(1));
2576       break;
2577     }
2578     case kPPC_I8x16MinU: {
2579       __ vminub(i.OutputSimd128Register(), i.InputSimd128Register(0),
2580                 i.InputSimd128Register(1));
2581       break;
2582     }
2583     case kPPC_I64x2MaxS: {
2584       __ vmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
2585                 i.InputSimd128Register(1));
2586       break;
2587     }
2588     case kPPC_I32x4MaxS: {
2589       __ vmaxsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
2590                 i.InputSimd128Register(1));
2591       break;
2592     }
2593     case kPPC_I64x2MaxU: {
2594       __ vmaxud(i.OutputSimd128Register(), i.InputSimd128Register(0),
2595                 i.InputSimd128Register(1));
2596       break;
2597     }
2598     case kPPC_I32x4MaxU: {
2599       __ vmaxuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
2600                 i.InputSimd128Register(1));
2601       break;
2602     }
2603     case kPPC_I16x8MaxS: {
2604       __ vmaxsh(i.OutputSimd128Register(), i.InputSimd128Register(0),
2605                 i.InputSimd128Register(1));
2606       break;
2607     }
2608     case kPPC_I16x8MaxU: {
2609       __ vmaxuh(i.OutputSimd128Register(), i.InputSimd128Register(0),
2610                 i.InputSimd128Register(1));
2611       break;
2612     }
2613     case kPPC_I8x16MaxS: {
2614       __ vmaxsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
2615                 i.InputSimd128Register(1));
2616       break;
2617     }
2618     case kPPC_I8x16MaxU: {
2619       __ vmaxub(i.OutputSimd128Register(), i.InputSimd128Register(0),
2620                 i.InputSimd128Register(1));
2621       break;
2622     }
2623     case kPPC_F64x2Eq: {
2624       __ xvcmpeqdp(i.OutputSimd128Register(), i.InputSimd128Register(0),
2625                    i.InputSimd128Register(1));
2626       break;
2627     }
2628     case kPPC_F64x2Ne: {
2629       __ xvcmpeqdp(kScratchDoubleReg, i.InputSimd128Register(0),
2630                    i.InputSimd128Register(1));
2631       __ vnor(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg);
2632       break;
2633     }
2634     case kPPC_F64x2Le: {
2635       __ xvcmpgedp(i.OutputSimd128Register(), i.InputSimd128Register(1),
2636                    i.InputSimd128Register(0));
2637       break;
2638     }
2639     case kPPC_F64x2Lt: {
2640       __ xvcmpgtdp(i.OutputSimd128Register(), i.InputSimd128Register(1),
2641                    i.InputSimd128Register(0));
2642       break;
2643     }
2644     case kPPC_F32x4Eq: {
2645       __ xvcmpeqsp(i.OutputSimd128Register(), i.InputSimd128Register(0),
2646                    i.InputSimd128Register(1));
2647       break;
2648     }
2649     case kPPC_I64x2Eq: {
2650       __ vcmpequd(i.OutputSimd128Register(), i.InputSimd128Register(0),
2651                   i.InputSimd128Register(1));
2652       break;
2653     }
2654     case kPPC_I32x4Eq: {
2655       __ vcmpequw(i.OutputSimd128Register(), i.InputSimd128Register(0),
2656                   i.InputSimd128Register(1));
2657       break;
2658     }
2659     case kPPC_I16x8Eq: {
2660       __ vcmpequh(i.OutputSimd128Register(), i.InputSimd128Register(0),
2661                   i.InputSimd128Register(1));
2662       break;
2663     }
2664     case kPPC_I8x16Eq: {
2665       __ vcmpequb(i.OutputSimd128Register(), i.InputSimd128Register(0),
2666                   i.InputSimd128Register(1));
2667       break;
2668     }
2669     case kPPC_F32x4Ne: {
2670       __ xvcmpeqsp(kScratchDoubleReg, i.InputSimd128Register(0),
2671                    i.InputSimd128Register(1));
2672       __ vnor(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg);
2673       break;
2674     }
2675     case kPPC_I64x2Ne: {
2676       __ vcmpequd(kScratchDoubleReg, i.InputSimd128Register(0),
2677                   i.InputSimd128Register(1));
2678       __ vnor(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg);
2679       break;
2680     }
2681     case kPPC_I32x4Ne: {
2682       __ vcmpequw(kScratchDoubleReg, i.InputSimd128Register(0),
2683                   i.InputSimd128Register(1));
2684       __ vnor(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg);
2685       break;
2686     }
2687     case kPPC_I16x8Ne: {
2688       __ vcmpequh(kScratchDoubleReg, i.InputSimd128Register(0),
2689                   i.InputSimd128Register(1));
2690       __ vnor(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg);
2691       break;
2692     }
2693     case kPPC_I8x16Ne: {
2694       __ vcmpequb(kScratchDoubleReg, i.InputSimd128Register(0),
2695                   i.InputSimd128Register(1));
2696       __ vnor(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg);
2697       break;
2698     }
2699     case kPPC_F32x4Lt: {
2700       __ xvcmpgtsp(i.OutputSimd128Register(), i.InputSimd128Register(1),
2701                    i.InputSimd128Register(0));
2702       break;
2703     }
2704     case kPPC_F32x4Le: {
2705       __ xvcmpgesp(i.OutputSimd128Register(), i.InputSimd128Register(1),
2706                    i.InputSimd128Register(0));
2707       break;
2708     }
2709     case kPPC_I64x2GtS: {
2710       __ vcmpgtsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
2711                   i.InputSimd128Register(1));
2712       break;
2713     }
2714     case kPPC_I32x4GtS: {
2715       __ vcmpgtsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
2716                   i.InputSimd128Register(1));
2717       break;
2718     }
2719     case kPPC_I64x2GeS: {
2720       __ vcmpequd(kScratchDoubleReg, i.InputSimd128Register(0),
2721                   i.InputSimd128Register(1));
2722       __ vcmpgtsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
2723                   i.InputSimd128Register(1));
2724       __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
2725              kScratchDoubleReg);
2726       break;
2727     }
2728     case kPPC_I32x4GeS: {
2729       __ vcmpequw(kScratchDoubleReg, i.InputSimd128Register(0),
2730                   i.InputSimd128Register(1));
2731       __ vcmpgtsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
2732                   i.InputSimd128Register(1));
2733       __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
2734              kScratchDoubleReg);
2735       break;
2736     }
2737     case kPPC_I64x2GtU: {
2738       __ vcmpgtud(i.OutputSimd128Register(), i.InputSimd128Register(0),
2739                   i.InputSimd128Register(1));
2740       break;
2741     }
2742     case kPPC_I32x4GtU: {
2743       __ vcmpgtuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
2744                   i.InputSimd128Register(1));
2745 
2746       break;
2747     }
2748     case kPPC_I64x2GeU: {
2749       __ vcmpequd(kScratchDoubleReg, i.InputSimd128Register(0),
2750                   i.InputSimd128Register(1));
2751       __ vcmpgtud(i.OutputSimd128Register(), i.InputSimd128Register(0),
2752                   i.InputSimd128Register(1));
2753       __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
2754              kScratchDoubleReg);
2755 
2756       break;
2757     }
2758     case kPPC_I32x4GeU: {
2759       __ vcmpequw(kScratchDoubleReg, i.InputSimd128Register(0),
2760                   i.InputSimd128Register(1));
2761       __ vcmpgtuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
2762                   i.InputSimd128Register(1));
2763       __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
2764              kScratchDoubleReg);
2765       break;
2766     }
2767     case kPPC_I16x8GtS: {
2768       __ vcmpgtsh(i.OutputSimd128Register(), i.InputSimd128Register(0),
2769                   i.InputSimd128Register(1));
2770       break;
2771     }
2772     case kPPC_I16x8GeS: {
2773       __ vcmpequh(kScratchDoubleReg, i.InputSimd128Register(0),
2774                   i.InputSimd128Register(1));
2775       __ vcmpgtsh(i.OutputSimd128Register(), i.InputSimd128Register(0),
2776                   i.InputSimd128Register(1));
2777       __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
2778              kScratchDoubleReg);
2779       break;
2780     }
2781     case kPPC_I16x8GtU: {
2782       __ vcmpgtuh(i.OutputSimd128Register(), i.InputSimd128Register(0),
2783                   i.InputSimd128Register(1));
2784       break;
2785     }
2786     case kPPC_I16x8GeU: {
2787       __ vcmpequh(kScratchDoubleReg, i.InputSimd128Register(0),
2788                   i.InputSimd128Register(1));
2789       __ vcmpgtuh(i.OutputSimd128Register(), i.InputSimd128Register(0),
2790                   i.InputSimd128Register(1));
2791       __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
2792              kScratchDoubleReg);
2793       break;
2794     }
2795     case kPPC_I8x16GtS: {
2796       __ vcmpgtsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
2797                   i.InputSimd128Register(1));
2798       break;
2799     }
2800     case kPPC_I8x16GeS: {
2801       __ vcmpequb(kScratchDoubleReg, i.InputSimd128Register(0),
2802                   i.InputSimd128Register(1));
2803       __ vcmpgtsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
2804                   i.InputSimd128Register(1));
2805       __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
2806              kScratchDoubleReg);
2807       break;
2808     }
2809     case kPPC_I8x16GtU: {
2810       __ vcmpgtub(i.OutputSimd128Register(), i.InputSimd128Register(0),
2811                   i.InputSimd128Register(1));
2812       break;
2813     }
2814     case kPPC_I8x16GeU: {
2815       __ vcmpequb(kScratchDoubleReg, i.InputSimd128Register(0),
2816                   i.InputSimd128Register(1));
2817       __ vcmpgtub(i.OutputSimd128Register(), i.InputSimd128Register(0),
2818                   i.InputSimd128Register(1));
2819       __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
2820              kScratchDoubleReg);
2821       break;
2822     }
2823 #define VECTOR_SHIFT(op)                                         \
2824   {                                                              \
2825     __ mtvsrd(kScratchDoubleReg, i.InputRegister(1));            \
2826     __ vspltb(kScratchDoubleReg, kScratchDoubleReg, Operand(7)); \
2827     __ op(i.OutputSimd128Register(), i.InputSimd128Register(0),  \
2828           kScratchDoubleReg);                                    \
2829   }
2830     case kPPC_I64x2Shl: {
2831       VECTOR_SHIFT(vsld)
2832       break;
2833     }
2834     case kPPC_I64x2ShrS: {
2835       VECTOR_SHIFT(vsrad)
2836       break;
2837     }
2838     case kPPC_I64x2ShrU: {
2839       VECTOR_SHIFT(vsrd)
2840       break;
2841     }
2842     case kPPC_I32x4Shl: {
2843       VECTOR_SHIFT(vslw)
2844       break;
2845     }
2846     case kPPC_I32x4ShrS: {
2847       VECTOR_SHIFT(vsraw)
2848       break;
2849     }
2850     case kPPC_I32x4ShrU: {
2851       VECTOR_SHIFT(vsrw)
2852       break;
2853     }
2854     case kPPC_I16x8Shl: {
2855       VECTOR_SHIFT(vslh)
2856       break;
2857     }
2858     case kPPC_I16x8ShrS: {
2859       VECTOR_SHIFT(vsrah)
2860       break;
2861     }
2862     case kPPC_I16x8ShrU: {
2863       VECTOR_SHIFT(vsrh)
2864       break;
2865     }
2866     case kPPC_I8x16Shl: {
2867       VECTOR_SHIFT(vslb)
2868       break;
2869     }
2870     case kPPC_I8x16ShrS: {
2871       VECTOR_SHIFT(vsrab)
2872       break;
2873     }
2874     case kPPC_I8x16ShrU: {
2875       VECTOR_SHIFT(vsrb)
2876       break;
2877     }
2878 #undef VECTOR_SHIFT
2879     case kPPC_S128And: {
2880       Simd128Register dst = i.OutputSimd128Register();
2881       Simd128Register src = i.InputSimd128Register(1);
2882       __ vand(dst, i.InputSimd128Register(0), src);
2883       break;
2884     }
2885     case kPPC_S128Or: {
2886       Simd128Register dst = i.OutputSimd128Register();
2887       Simd128Register src = i.InputSimd128Register(1);
2888       __ vor(dst, i.InputSimd128Register(0), src);
2889       break;
2890     }
2891     case kPPC_S128Xor: {
2892       Simd128Register dst = i.OutputSimd128Register();
2893       Simd128Register src = i.InputSimd128Register(1);
2894       __ vxor(dst, i.InputSimd128Register(0), src);
2895       break;
2896     }
2897     case kPPC_S128Zero: {
2898       Simd128Register dst = i.OutputSimd128Register();
2899       __ vxor(dst, dst, dst);
2900       break;
2901     }
2902     case kPPC_S128Not: {
2903       Simd128Register dst = i.OutputSimd128Register();
2904       Simd128Register src = i.InputSimd128Register(0);
2905       __ vnor(dst, src, src);
2906       break;
2907     }
2908     case kPPC_S128Select: {
2909       Simd128Register dst = i.OutputSimd128Register();
2910       Simd128Register mask = i.InputSimd128Register(0);
2911       Simd128Register src1 = i.InputSimd128Register(1);
2912       Simd128Register src2 = i.InputSimd128Register(2);
2913       __ vsel(dst, src2, src1, mask);
2914       break;
2915     }
2916     case kPPC_F64x2Abs: {
2917       __ xvabsdp(i.OutputSimd128Register(), i.InputSimd128Register(0));
2918       break;
2919     }
2920     case kPPC_F64x2Neg: {
2921       __ xvnegdp(i.OutputSimd128Register(), i.InputSimd128Register(0));
2922       break;
2923     }
2924     case kPPC_F64x2Sqrt: {
2925       __ xvsqrtdp(i.OutputSimd128Register(), i.InputSimd128Register(0));
2926       break;
2927     }
2928     case kPPC_F32x4Abs: {
2929       __ xvabssp(i.OutputSimd128Register(), i.InputSimd128Register(0));
2930       break;
2931     }
2932     case kPPC_F32x4Neg: {
2933       __ xvnegsp(i.OutputSimd128Register(), i.InputSimd128Register(0));
2934       break;
2935     }
2936     case kPPC_F32x4RecipApprox: {
2937       __ xvresp(i.OutputSimd128Register(), i.InputSimd128Register(0));
2938       break;
2939     }
2940     case kPPC_F32x4RecipSqrtApprox: {
2941       __ xvrsqrtesp(i.OutputSimd128Register(), i.InputSimd128Register(0));
2942       break;
2943     }
2944     case kPPC_F32x4Sqrt: {
2945       __ xvsqrtsp(i.OutputSimd128Register(), i.InputSimd128Register(0));
2946       break;
2947     }
2948     case kPPC_I64x2Neg: {
2949       Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
2950       __ li(ip, Operand(1));
2951       // Need to maintain 16 byte alignment for lvx.
2952       __ mr(kScratchReg, sp);
2953       __ ClearRightImm(
2954           sp, sp,
2955           Operand(base::bits::WhichPowerOfTwo(16)));  // equivalent to &= -16
2956       __ addi(sp, sp, Operand(-16));
2957       __ StoreP(ip, MemOperand(sp, 0));
2958       __ StoreP(ip, MemOperand(sp, 8));
2959       __ lvx(kScratchDoubleReg, MemOperand(r0, sp));
2960       __ mr(sp, kScratchReg);
2961       // Perform negation.
2962       __ vnor(tempFPReg1, i.InputSimd128Register(0), i.InputSimd128Register(0));
2963       __ vaddudm(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg);
2964       break;
2965     }
2966     case kPPC_I32x4Neg: {
2967       Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
2968       __ li(ip, Operand(1));
2969       __ mtvsrd(kScratchDoubleReg, ip);
2970       __ vspltw(kScratchDoubleReg, kScratchDoubleReg, Operand(1));
2971       __ vnor(tempFPReg1, i.InputSimd128Register(0), i.InputSimd128Register(0));
2972       __ vadduwm(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1);
2973       break;
2974     }
2975     case kPPC_I32x4Abs: {
2976       Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
2977       Simd128Register src = i.InputSimd128Register(0);
2978       constexpr int shift_bits = 31;
2979       __ li(ip, Operand(shift_bits));
2980       __ mtvsrd(kScratchDoubleReg, ip);
2981       __ vspltb(kScratchDoubleReg, kScratchDoubleReg, Operand(7));
2982       __ vsraw(kScratchDoubleReg, src, kScratchDoubleReg);
2983       __ vxor(tempFPReg1, src, kScratchDoubleReg);
2984       __ vsubuwm(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg);
2985       break;
2986     }
2987     case kPPC_I16x8Neg: {
2988       Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
2989       __ li(ip, Operand(1));
2990       __ mtvsrd(kScratchDoubleReg, ip);
2991       __ vsplth(kScratchDoubleReg, kScratchDoubleReg, Operand(3));
2992       __ vnor(tempFPReg1, i.InputSimd128Register(0), i.InputSimd128Register(0));
2993       __ vadduhm(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1);
2994       break;
2995     }
2996     case kPPC_I16x8Abs: {
2997       Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
2998       Simd128Register src = i.InputSimd128Register(0);
2999       constexpr int shift_bits = 15;
3000       __ li(ip, Operand(shift_bits));
3001       __ mtvsrd(kScratchDoubleReg, ip);
3002       __ vspltb(kScratchDoubleReg, kScratchDoubleReg, Operand(7));
3003       __ vsrah(kScratchDoubleReg, src, kScratchDoubleReg);
3004       __ vxor(tempFPReg1, src, kScratchDoubleReg);
3005       __ vsubuhm(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg);
3006       break;
3007     }
3008     case kPPC_I8x16Neg: {
3009       Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
3010       __ li(ip, Operand(1));
3011       __ mtvsrd(kScratchDoubleReg, ip);
3012       __ vspltb(kScratchDoubleReg, kScratchDoubleReg, Operand(7));
3013       __ vnor(tempFPReg1, i.InputSimd128Register(0), i.InputSimd128Register(0));
3014       __ vaddubm(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1);
3015       break;
3016     }
3017     case kPPC_I8x16Abs: {
3018       Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
3019       Simd128Register src = i.InputSimd128Register(0);
3020       constexpr int shift_bits = 7;
3021       __ li(ip, Operand(shift_bits));
3022       __ mtvsrd(kScratchDoubleReg, ip);
3023       __ vspltb(kScratchDoubleReg, kScratchDoubleReg, Operand(7));
3024       __ vsrab(kScratchDoubleReg, src, kScratchDoubleReg);
3025       __ vxor(tempFPReg1, src, kScratchDoubleReg);
3026       __ vsububm(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg);
3027       break;
3028     }
3029     case kPPC_V64x2AnyTrue:
3030     case kPPC_V32x4AnyTrue:
3031     case kPPC_V16x8AnyTrue:
3032     case kPPC_V8x16AnyTrue: {
3033       Simd128Register src = i.InputSimd128Register(0);
3034       Register dst = i.OutputRegister();
3035       constexpr int bit_number = 24;
3036       __ li(r0, Operand(0));
3037       __ li(ip, Operand(-1));
3038       // Check if both lanes are 0, if so then return false.
3039       __ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
3040       __ vcmpequd(kScratchDoubleReg, src, kScratchDoubleReg, SetRC);
3041       __ isel(dst, r0, ip, bit_number);
3042       break;
3043     }
3044 #define SIMD_ALL_TRUE(opcode)                                       \
3045   Simd128Register src = i.InputSimd128Register(0);                  \
3046   Register dst = i.OutputRegister();                                \
3047   constexpr int bit_number = 24;                                    \
3048   __ li(r0, Operand(0));                                            \
3049   __ li(ip, Operand(-1));                                           \
3050   /* Check if all lanes > 0, if not then return false.*/            \
3051   __ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg); \
3052   __ opcode(kScratchDoubleReg, src, kScratchDoubleReg, SetRC);      \
3053   __ isel(dst, ip, r0, bit_number);
3054     case kPPC_V64x2AllTrue: {
3055       SIMD_ALL_TRUE(vcmpgtud)
3056       break;
3057     }
3058     case kPPC_V32x4AllTrue: {
3059       SIMD_ALL_TRUE(vcmpgtuw)
3060       break;
3061     }
3062     case kPPC_V16x8AllTrue: {
3063       SIMD_ALL_TRUE(vcmpgtuh)
3064       break;
3065     }
3066     case kPPC_V8x16AllTrue: {
3067       SIMD_ALL_TRUE(vcmpgtub)
3068       break;
3069     }
3070 #undef SIMD_ALL_TRUE
3071     case kPPC_I32x4SConvertF32x4: {
3072       Simd128Register src = i.InputSimd128Register(0);
3073       // NaN to 0
3074       __ vor(kScratchDoubleReg, src, src);
3075       __ xvcmpeqsp(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
3076       __ vand(kScratchDoubleReg, src, kScratchDoubleReg);
3077       __ xvcvspsxws(i.OutputSimd128Register(), kScratchDoubleReg);
3078       break;
3079     }
3080     case kPPC_I32x4UConvertF32x4: {
3081       __ xvcvspuxws(i.OutputSimd128Register(), i.InputSimd128Register(0));
3082       break;
3083     }
3084     case kPPC_F32x4SConvertI32x4: {
3085       __ xvcvsxwsp(i.OutputSimd128Register(), i.InputSimd128Register(0));
3086       break;
3087     }
3088     case kPPC_F32x4UConvertI32x4: {
3089       __ xvcvuxwsp(i.OutputSimd128Register(), i.InputSimd128Register(0));
3090       break;
3091     }
3092     case kPPC_I32x4SConvertI16x8Low: {
3093       __ vupklsh(i.OutputSimd128Register(), i.InputSimd128Register(0));
3094       break;
3095     }
3096     case kPPC_I32x4SConvertI16x8High: {
3097       __ vupkhsh(i.OutputSimd128Register(), i.InputSimd128Register(0));
3098       break;
3099     }
3100     case kPPC_I32x4UConvertI16x8Low: {
3101       __ vupklsh(i.OutputSimd128Register(), i.InputSimd128Register(0));
3102       // Zero extend.
3103       __ mov(ip, Operand(0xFFFF));
3104       __ mtvsrd(kScratchDoubleReg, ip);
3105       __ vspltw(kScratchDoubleReg, kScratchDoubleReg, Operand(1));
3106       __ vand(i.OutputSimd128Register(), kScratchDoubleReg,
3107               i.OutputSimd128Register());
3108       break;
3109     }
3110     case kPPC_I32x4UConvertI16x8High: {
3111       __ vupkhsh(i.OutputSimd128Register(), i.InputSimd128Register(0));
3112       // Zero extend.
3113       __ mov(ip, Operand(0xFFFF));
3114       __ mtvsrd(kScratchDoubleReg, ip);
3115       __ vspltw(kScratchDoubleReg, kScratchDoubleReg, Operand(1));
3116       __ vand(i.OutputSimd128Register(), kScratchDoubleReg,
3117               i.OutputSimd128Register());
3118       break;
3119     }
3120 
3121     case kPPC_I16x8SConvertI8x16Low: {
3122       __ vupklsb(i.OutputSimd128Register(), i.InputSimd128Register(0));
3123       break;
3124     }
3125     case kPPC_I16x8SConvertI8x16High: {
3126       __ vupkhsb(i.OutputSimd128Register(), i.InputSimd128Register(0));
3127       break;
3128     }
3129     case kPPC_I16x8UConvertI8x16Low: {
3130       __ vupklsb(i.OutputSimd128Register(), i.InputSimd128Register(0));
3131       // Zero extend.
3132       __ li(ip, Operand(0xFF));
3133       __ mtvsrd(kScratchDoubleReg, ip);
3134       __ vsplth(kScratchDoubleReg, kScratchDoubleReg, Operand(3));
3135       __ vand(i.OutputSimd128Register(), kScratchDoubleReg,
3136               i.OutputSimd128Register());
3137       break;
3138     }
3139     case kPPC_I16x8UConvertI8x16High: {
3140       __ vupkhsb(i.OutputSimd128Register(), i.InputSimd128Register(0));
3141       // Zero extend.
3142       __ li(ip, Operand(0xFF));
3143       __ mtvsrd(kScratchDoubleReg, ip);
3144       __ vsplth(kScratchDoubleReg, kScratchDoubleReg, Operand(3));
3145       __ vand(i.OutputSimd128Register(), kScratchDoubleReg,
3146               i.OutputSimd128Register());
3147       break;
3148     }
3149     case kPPC_I16x8SConvertI32x4: {
3150       __ vpkswss(i.OutputSimd128Register(), i.InputSimd128Register(0),
3151                  i.InputSimd128Register(1));
3152       break;
3153     }
3154     case kPPC_I16x8UConvertI32x4: {
3155       __ vpkswus(i.OutputSimd128Register(), i.InputSimd128Register(0),
3156                  i.InputSimd128Register(1));
3157       break;
3158     }
3159     case kPPC_I8x16SConvertI16x8: {
3160       __ vpkshss(i.OutputSimd128Register(), i.InputSimd128Register(0),
3161                  i.InputSimd128Register(1));
3162       break;
3163     }
3164     case kPPC_I8x16UConvertI16x8: {
3165       __ vpkshus(i.OutputSimd128Register(), i.InputSimd128Register(0),
3166                  i.InputSimd128Register(1));
3167       break;
3168     }
3169     case kPPC_I8x16Shuffle: {
3170       Simd128Register dst = i.OutputSimd128Register(),
3171                       src0 = i.InputSimd128Register(0),
3172                       src1 = i.InputSimd128Register(1);
3173       __ mov(r0, Operand(make_uint64(i.InputUint32(3), i.InputUint32(2))));
3174       __ mov(ip, Operand(make_uint64(i.InputUint32(5), i.InputUint32(4))));
3175       // Need to maintain 16 byte alignment for lvx.
3176       __ mr(kScratchReg, sp);
3177       __ ClearRightImm(
3178           sp, sp,
3179           Operand(base::bits::WhichPowerOfTwo(16)));  // equivalent to &= -16
3180       __ addi(sp, sp, Operand(-16));
3181       __ StoreP(r0, MemOperand(sp, 0));
3182       __ StoreP(ip, MemOperand(sp, 8));
3183       __ lvx(kScratchDoubleReg, MemOperand(r0, sp));
3184       __ mr(sp, kScratchReg);
3185       __ vperm(dst, src0, src1, kScratchDoubleReg);
3186       break;
3187     }
3188     case kPPC_I16x8AddSatS: {
3189       __ vaddshs(i.OutputSimd128Register(), i.InputSimd128Register(0),
3190                  i.InputSimd128Register(1));
3191       break;
3192     }
3193     case kPPC_I16x8SubSatS: {
3194       __ vsubshs(i.OutputSimd128Register(), i.InputSimd128Register(0),
3195                  i.InputSimd128Register(1));
3196       break;
3197     }
3198     case kPPC_I16x8AddSatU: {
3199       __ vadduhs(i.OutputSimd128Register(), i.InputSimd128Register(0),
3200                  i.InputSimd128Register(1));
3201       break;
3202     }
3203     case kPPC_I16x8SubSatU: {
3204       __ vsubuhs(i.OutputSimd128Register(), i.InputSimd128Register(0),
3205                  i.InputSimd128Register(1));
3206       break;
3207     }
3208     case kPPC_I8x16AddSatS: {
3209       __ vaddsbs(i.OutputSimd128Register(), i.InputSimd128Register(0),
3210                  i.InputSimd128Register(1));
3211       break;
3212     }
3213     case kPPC_I8x16SubSatS: {
3214       __ vsubsbs(i.OutputSimd128Register(), i.InputSimd128Register(0),
3215                  i.InputSimd128Register(1));
3216       break;
3217     }
3218     case kPPC_I8x16AddSatU: {
3219       __ vaddubs(i.OutputSimd128Register(), i.InputSimd128Register(0),
3220                  i.InputSimd128Register(1));
3221       break;
3222     }
3223     case kPPC_I8x16SubSatU: {
3224       __ vsububs(i.OutputSimd128Register(), i.InputSimd128Register(0),
3225                  i.InputSimd128Register(1));
3226       break;
3227     }
3228     case kPPC_I8x16Swizzle: {
3229       Simd128Register dst = i.OutputSimd128Register(),
3230                       src0 = i.InputSimd128Register(0),
3231                       src1 = i.InputSimd128Register(1),
3232                       tempFPReg1 = i.ToSimd128Register(instr->TempAt(0)),
3233                       tempFPReg2 = i.ToSimd128Register(instr->TempAt(1));
3234       // Saturate the indices to 5 bits. Input indices more than 31 should
3235       // return 0.
3236       __ xxspltib(tempFPReg2, Operand(31));
3237       __ vminub(tempFPReg2, src1, tempFPReg2);
3238       __ addi(sp, sp, Operand(-16));
3239       __ stxvd(src0, MemOperand(r0, sp));
3240       __ ldbrx(r0, MemOperand(r0, sp));
3241       __ li(ip, Operand(8));
3242       __ ldbrx(ip, MemOperand(ip, sp));
3243       __ stdx(ip, MemOperand(r0, sp));
3244       __ li(ip, Operand(8));
3245       __ stdx(r0, MemOperand(ip, sp));
3246       __ lxvd(kScratchDoubleReg, MemOperand(r0, sp));
3247       __ addi(sp, sp, Operand(16));
3248       __ vxor(tempFPReg1, tempFPReg1, tempFPReg1);
3249       __ vperm(dst, kScratchDoubleReg, tempFPReg1, tempFPReg2);
3250       break;
3251     }
3252     case kPPC_F64x2Qfma: {
3253       Simd128Register src0 = i.InputSimd128Register(0);
3254       Simd128Register src1 = i.InputSimd128Register(1);
3255       Simd128Register src2 = i.InputSimd128Register(2);
3256       Simd128Register dst = i.OutputSimd128Register();
3257       __ vor(kScratchDoubleReg, src1, src1);
3258       __ xvmaddmdp(kScratchDoubleReg, src2, src0);
3259       __ vor(dst, kScratchDoubleReg, kScratchDoubleReg);
3260       break;
3261     }
3262     case kPPC_F64x2Qfms: {
3263       Simd128Register src0 = i.InputSimd128Register(0);
3264       Simd128Register src1 = i.InputSimd128Register(1);
3265       Simd128Register src2 = i.InputSimd128Register(2);
3266       Simd128Register dst = i.OutputSimd128Register();
3267       __ vor(kScratchDoubleReg, src1, src1);
3268       __ xvnmsubmdp(kScratchDoubleReg, src2, src0);
3269       __ vor(dst, kScratchDoubleReg, kScratchDoubleReg);
3270       break;
3271     }
3272     case kPPC_F32x4Qfma: {
3273       Simd128Register src0 = i.InputSimd128Register(0);
3274       Simd128Register src1 = i.InputSimd128Register(1);
3275       Simd128Register src2 = i.InputSimd128Register(2);
3276       Simd128Register dst = i.OutputSimd128Register();
3277       __ vor(kScratchDoubleReg, src1, src1);
3278       __ xvmaddmsp(kScratchDoubleReg, src2, src0);
3279       __ vor(dst, kScratchDoubleReg, kScratchDoubleReg);
3280       break;
3281     }
3282     case kPPC_F32x4Qfms: {
3283       Simd128Register src0 = i.InputSimd128Register(0);
3284       Simd128Register src1 = i.InputSimd128Register(1);
3285       Simd128Register src2 = i.InputSimd128Register(2);
3286       Simd128Register dst = i.OutputSimd128Register();
3287       __ vor(kScratchDoubleReg, src1, src1);
3288       __ xvnmsubmsp(kScratchDoubleReg, src2, src0);
3289       __ vor(dst, kScratchDoubleReg, kScratchDoubleReg);
3290       break;
3291     }
3292     case kPPC_I16x8RoundingAverageU: {
3293       __ vavguh(i.OutputSimd128Register(), i.InputSimd128Register(0),
3294                 i.InputSimd128Register(1));
3295       break;
3296     }
3297     case kPPC_I8x16RoundingAverageU: {
3298       __ vavgub(i.OutputSimd128Register(), i.InputSimd128Register(0),
3299                 i.InputSimd128Register(1));
3300       break;
3301     }
3302     case kPPC_S128AndNot: {
3303       Simd128Register dst = i.OutputSimd128Register();
3304       Simd128Register src = i.InputSimd128Register(0);
3305       __ vandc(dst, src, i.InputSimd128Register(1));
3306       break;
3307     }
3308     case kPPC_F64x2Div: {
3309       __ xvdivdp(i.OutputSimd128Register(), i.InputSimd128Register(0),
3310                  i.InputSimd128Register(1));
3311       break;
3312     }
3313 #define F64X2_MIN_MAX_NAN(result)                                       \
3314   Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));   \
3315   __ xvcmpeqdp(tempFPReg1, i.InputSimd128Register(0),                   \
3316                i.InputSimd128Register(0));                              \
3317   __ vsel(result, i.InputSimd128Register(0), result, tempFPReg1);       \
3318   __ xvcmpeqdp(tempFPReg1, i.InputSimd128Register(1),                   \
3319                i.InputSimd128Register(1));                              \
3320   __ vsel(i.OutputSimd128Register(), i.InputSimd128Register(1), result, \
3321           tempFPReg1);
3322     case kPPC_F64x2Min: {
3323       __ xvmindp(kScratchDoubleReg, i.InputSimd128Register(0),
3324                  i.InputSimd128Register(1));
3325       // We need to check if an input is NAN and preserve it.
3326       F64X2_MIN_MAX_NAN(kScratchDoubleReg)
3327       break;
3328     }
3329     case kPPC_F64x2Max: {
3330       __ xvmaxdp(kScratchDoubleReg, i.InputSimd128Register(0),
3331                  i.InputSimd128Register(1));
3332       // We need to check if an input is NAN and preserve it.
3333       F64X2_MIN_MAX_NAN(kScratchDoubleReg)
3334       break;
3335     }
3336 #undef F64X2_MIN_MAX_NAN
3337     case kPPC_F32x4Div: {
3338       __ xvdivsp(i.OutputSimd128Register(), i.InputSimd128Register(0),
3339                  i.InputSimd128Register(1));
3340       break;
3341     }
3342     case kPPC_F32x4Min: {
3343       __ vminfp(i.OutputSimd128Register(), i.InputSimd128Register(0),
3344                 i.InputSimd128Register(1));
3345       break;
3346     }
3347     case kPPC_F32x4Max: {
3348       __ vmaxfp(i.OutputSimd128Register(), i.InputSimd128Register(0),
3349                 i.InputSimd128Register(1));
3350       break;
3351     }
3352     case kPPC_F64x2Ceil: {
3353       __ xvrdpip(i.OutputSimd128Register(), i.InputSimd128Register(0));
3354       break;
3355     }
3356     case kPPC_F64x2Floor: {
3357       __ xvrdpim(i.OutputSimd128Register(), i.InputSimd128Register(0));
3358       break;
3359     }
3360     case kPPC_F64x2Trunc: {
3361       __ xvrdpiz(i.OutputSimd128Register(), i.InputSimd128Register(0));
3362       break;
3363     }
3364     case kPPC_F64x2NearestInt: {
3365       __ xvrdpi(i.OutputSimd128Register(), i.InputSimd128Register(0));
3366       break;
3367     }
3368     case kPPC_F32x4Ceil: {
3369       __ xvrspip(i.OutputSimd128Register(), i.InputSimd128Register(0));
3370       break;
3371     }
3372     case kPPC_F32x4Floor: {
3373       __ xvrspim(i.OutputSimd128Register(), i.InputSimd128Register(0));
3374       break;
3375     }
3376     case kPPC_F32x4Trunc: {
3377       __ xvrspiz(i.OutputSimd128Register(), i.InputSimd128Register(0));
3378       break;
3379     }
3380     case kPPC_F32x4NearestInt: {
3381       __ xvrspi(i.OutputSimd128Register(), i.InputSimd128Register(0));
3382       break;
3383     }
3384     case kPPC_I32x4BitMask: {
3385       __ mov(kScratchReg,
3386              Operand(0x8080808000204060));  // Select 0 for the high bits.
3387       __ mtvsrd(kScratchDoubleReg, kScratchReg);
3388       __ vbpermq(kScratchDoubleReg, i.InputSimd128Register(0),
3389                  kScratchDoubleReg);
3390       __ vextractub(kScratchDoubleReg, kScratchDoubleReg, Operand(6));
3391       __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
3392       break;
3393     }
3394     case kPPC_I16x8BitMask: {
3395       __ mov(kScratchReg, Operand(0x10203040506070));
3396       __ mtvsrd(kScratchDoubleReg, kScratchReg);
3397       __ vbpermq(kScratchDoubleReg, i.InputSimd128Register(0),
3398                  kScratchDoubleReg);
3399       __ vextractub(kScratchDoubleReg, kScratchDoubleReg, Operand(6));
3400       __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
3401       break;
3402     }
3403     case kPPC_I8x16BitMask: {
3404       Register temp = i.ToRegister(instr->TempAt(0));
3405       __ mov(temp, Operand(0x8101820283038));
3406       __ mov(ip, Operand(0x4048505860687078));
3407       __ mtvsrdd(kScratchDoubleReg, temp, ip);
3408       __ vbpermq(kScratchDoubleReg, i.InputSimd128Register(0),
3409                  kScratchDoubleReg);
3410       __ vextractuh(kScratchDoubleReg, kScratchDoubleReg, Operand(6));
3411       __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
3412       break;
3413     }
3414     case kPPC_I32x4DotI16x8S: {
3415       __ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
3416       __ vmsumshm(i.OutputSimd128Register(), i.InputSimd128Register(0),
3417                   i.InputSimd128Register(1), kScratchDoubleReg);
3418       break;
3419     }
3420     case kPPC_StoreCompressTagged: {
3421       ASSEMBLE_STORE_INTEGER(StoreTaggedField, StoreTaggedFieldX);
3422       break;
3423     }
3424     case kPPC_LoadDecompressTaggedSigned: {
3425       CHECK(instr->HasOutput());
3426       ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
3427       break;
3428     }
3429     case kPPC_LoadDecompressTaggedPointer: {
3430       CHECK(instr->HasOutput());
3431       ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
3432       __ add(i.OutputRegister(), i.OutputRegister(), kRootRegister);
3433       break;
3434     }
3435     case kPPC_LoadDecompressAnyTagged: {
3436       CHECK(instr->HasOutput());
3437       ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
3438       __ add(i.OutputRegister(), i.OutputRegister(), kRootRegister);
3439       break;
3440     }
3441     default:
3442       UNREACHABLE();
3443   }
3444   return kSuccess;
3445 }  // NOLINT(readability/fn_size)
3446 
3447 // Assembles branches after an instruction.
AssembleArchBranch(Instruction * instr,BranchInfo * branch)3448 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
3449   PPCOperandConverter i(this, instr);
3450   Label* tlabel = branch->true_label;
3451   Label* flabel = branch->false_label;
3452   ArchOpcode op = instr->arch_opcode();
3453   FlagsCondition condition = branch->condition;
3454   CRegister cr = cr0;
3455 
3456   Condition cond = FlagsConditionToCondition(condition, op);
3457   if (op == kPPC_CmpDouble) {
3458     // check for unordered if necessary
3459     if (cond == le) {
3460       __ bunordered(flabel, cr);
3461       // Unnecessary for eq/lt since only FU bit will be set.
3462     } else if (cond == gt) {
3463       __ bunordered(tlabel, cr);
3464       // Unnecessary for ne/ge since only FU bit will be set.
3465     }
3466   }
3467   __ b(cond, tlabel, cr);
3468   if (!branch->fallthru) __ b(flabel);  // no fallthru to flabel.
3469 }
3470 
AssembleBranchPoisoning(FlagsCondition condition,Instruction * instr)3471 void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
3472                                             Instruction* instr) {
3473   // TODO(John) Handle float comparisons (kUnordered[Not]Equal).
3474   if (condition == kUnorderedEqual || condition == kUnorderedNotEqual ||
3475       condition == kOverflow || condition == kNotOverflow) {
3476     return;
3477   }
3478 
3479   ArchOpcode op = instr->arch_opcode();
3480   condition = NegateFlagsCondition(condition);
3481   __ li(kScratchReg, Operand::Zero());
3482   __ isel(FlagsConditionToCondition(condition, op), kSpeculationPoisonRegister,
3483           kScratchReg, kSpeculationPoisonRegister, cr0);
3484 }
3485 
AssembleArchDeoptBranch(Instruction * instr,BranchInfo * branch)3486 void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
3487                                             BranchInfo* branch) {
3488   AssembleArchBranch(instr, branch);
3489 }
3490 
AssembleArchJump(RpoNumber target)3491 void CodeGenerator::AssembleArchJump(RpoNumber target) {
3492   if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
3493 }
3494 
AssembleArchTrap(Instruction * instr,FlagsCondition condition)3495 void CodeGenerator::AssembleArchTrap(Instruction* instr,
3496                                      FlagsCondition condition) {
3497   class OutOfLineTrap final : public OutOfLineCode {
3498    public:
3499     OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
3500         : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
3501 
3502     void Generate() final {
3503       PPCOperandConverter i(gen_, instr_);
3504       TrapId trap_id =
3505           static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
3506       GenerateCallToTrap(trap_id);
3507     }
3508 
3509    private:
3510     void GenerateCallToTrap(TrapId trap_id) {
3511       if (trap_id == TrapId::kInvalid) {
3512         // We cannot test calls to the runtime in cctest/test-run-wasm.
3513         // Therefore we emit a call to C here instead of a call to the runtime.
3514         // We use the context register as the scratch register, because we do
3515         // not have a context here.
3516         __ PrepareCallCFunction(0, 0, cp);
3517         __ CallCFunction(
3518             ExternalReference::wasm_call_trap_callback_for_testing(), 0);
3519         __ LeaveFrame(StackFrame::WASM);
3520         auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
3521         int pop_count =
3522             static_cast<int>(call_descriptor->StackParameterCount());
3523         __ Drop(pop_count);
3524         __ Ret();
3525       } else {
3526         gen_->AssembleSourcePosition(instr_);
3527         // A direct call to a wasm runtime stub defined in this module.
3528         // Just encode the stub index. This will be patched when the code
3529         // is added to the native module and copied into wasm code space.
3530         __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
3531         ReferenceMap* reference_map =
3532             gen_->zone()->New<ReferenceMap>(gen_->zone());
3533         gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
3534         if (FLAG_debug_code) {
3535           __ stop();
3536         }
3537       }
3538     }
3539 
3540     Instruction* instr_;
3541     CodeGenerator* gen_;
3542   };
3543   auto ool = zone()->New<OutOfLineTrap>(this, instr);
3544   Label* tlabel = ool->entry();
3545   Label end;
3546 
3547   ArchOpcode op = instr->arch_opcode();
3548   CRegister cr = cr0;
3549   Condition cond = FlagsConditionToCondition(condition, op);
3550   if (op == kPPC_CmpDouble) {
3551     // check for unordered if necessary
3552     if (cond == le) {
3553       __ bunordered(&end, cr);
3554       // Unnecessary for eq/lt since only FU bit will be set.
3555     } else if (cond == gt) {
3556       __ bunordered(tlabel, cr);
3557       // Unnecessary for ne/ge since only FU bit will be set.
3558     }
3559   }
3560   __ b(cond, tlabel, cr);
3561   __ bind(&end);
3562 }
3563 
3564 // Assembles boolean materializations after an instruction.
AssembleArchBoolean(Instruction * instr,FlagsCondition condition)3565 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
3566                                         FlagsCondition condition) {
3567   PPCOperandConverter i(this, instr);
3568   Label done;
3569   ArchOpcode op = instr->arch_opcode();
3570   CRegister cr = cr0;
3571   int reg_value = -1;
3572 
3573   // Materialize a full 32-bit 1 or 0 value. The result register is always the
3574   // last output of the instruction.
3575   DCHECK_NE(0u, instr->OutputCount());
3576   Register reg = i.OutputRegister(instr->OutputCount() - 1);
3577 
3578   Condition cond = FlagsConditionToCondition(condition, op);
3579   if (op == kPPC_CmpDouble) {
3580     // check for unordered if necessary
3581     if (cond == le) {
3582       reg_value = 0;
3583       __ li(reg, Operand::Zero());
3584       __ bunordered(&done, cr);
3585     } else if (cond == gt) {
3586       reg_value = 1;
3587       __ li(reg, Operand(1));
3588       __ bunordered(&done, cr);
3589     }
3590     // Unnecessary for eq/lt & ne/ge since only FU bit will be set.
3591   }
3592 
3593   if (CpuFeatures::IsSupported(ISELECT)) {
3594     switch (cond) {
3595       case eq:
3596       case lt:
3597       case gt:
3598         if (reg_value != 1) __ li(reg, Operand(1));
3599         __ li(kScratchReg, Operand::Zero());
3600         __ isel(cond, reg, reg, kScratchReg, cr);
3601         break;
3602       case ne:
3603       case ge:
3604       case le:
3605         if (reg_value != 1) __ li(reg, Operand(1));
3606         // r0 implies logical zero in this form
3607         __ isel(NegateCondition(cond), reg, r0, reg, cr);
3608         break;
3609       default:
3610         UNREACHABLE();
3611         break;
3612     }
3613   } else {
3614     if (reg_value != 0) __ li(reg, Operand::Zero());
3615     __ b(NegateCondition(cond), &done, cr);
3616     __ li(reg, Operand(1));
3617   }
3618   __ bind(&done);
3619 }
3620 
AssembleArchBinarySearchSwitch(Instruction * instr)3621 void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
3622   PPCOperandConverter i(this, instr);
3623   Register input = i.InputRegister(0);
3624   std::vector<std::pair<int32_t, Label*>> cases;
3625   for (size_t index = 2; index < instr->InputCount(); index += 2) {
3626     cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
3627   }
3628   AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
3629                                       cases.data() + cases.size());
3630 }
3631 
AssembleArchTableSwitch(Instruction * instr)3632 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
3633   PPCOperandConverter i(this, instr);
3634   Register input = i.InputRegister(0);
3635   int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
3636   Label** cases = zone()->NewArray<Label*>(case_count);
3637   for (int32_t index = 0; index < case_count; ++index) {
3638     cases[index] = GetLabel(i.InputRpo(index + 2));
3639   }
3640   Label* const table = AddJumpTable(cases, case_count);
3641   __ Cmpli(input, Operand(case_count), r0);
3642   __ bge(GetLabel(i.InputRpo(1)));
3643   __ mov_label_addr(kScratchReg, table);
3644   __ ShiftLeftImm(r0, input, Operand(kSystemPointerSizeLog2));
3645   __ LoadPX(kScratchReg, MemOperand(kScratchReg, r0));
3646   __ Jump(kScratchReg);
3647 }
3648 
FinishFrame(Frame * frame)3649 void CodeGenerator::FinishFrame(Frame* frame) {
3650   auto call_descriptor = linkage()->GetIncomingDescriptor();
3651   const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
3652 
3653   // Save callee-saved Double registers.
3654   if (double_saves != 0) {
3655     frame->AlignSavedCalleeRegisterSlots();
3656     DCHECK_EQ(kNumCalleeSavedDoubles,
3657               base::bits::CountPopulation(double_saves));
3658     frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
3659                                             (kDoubleSize / kSystemPointerSize));
3660   }
3661   // Save callee-saved registers.
3662   const RegList saves = FLAG_enable_embedded_constant_pool
3663                             ? call_descriptor->CalleeSavedRegisters() &
3664                                   ~kConstantPoolRegister.bit()
3665                             : call_descriptor->CalleeSavedRegisters();
3666   if (saves != 0) {
3667     // register save area does not include the fp or constant pool pointer.
3668     const int num_saves =
3669         kNumCalleeSaved - 1 - (FLAG_enable_embedded_constant_pool ? 1 : 0);
3670     DCHECK(num_saves == base::bits::CountPopulation(saves));
3671     frame->AllocateSavedCalleeRegisterSlots(num_saves);
3672   }
3673 }
3674 
AssembleConstructFrame()3675 void CodeGenerator::AssembleConstructFrame() {
3676   auto call_descriptor = linkage()->GetIncomingDescriptor();
3677   if (frame_access_state()->has_frame()) {
3678     if (call_descriptor->IsCFunctionCall()) {
3679       if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
3680         __ StubPrologue(StackFrame::C_WASM_ENTRY);
3681         // Reserve stack space for saving the c_entry_fp later.
3682         __ addi(sp, sp, Operand(-kSystemPointerSize));
3683       } else {
3684         __ mflr(r0);
3685         if (FLAG_enable_embedded_constant_pool) {
3686           __ Push(r0, fp, kConstantPoolRegister);
3687           // Adjust FP to point to saved FP.
3688           __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
3689         } else {
3690           __ Push(r0, fp);
3691           __ mr(fp, sp);
3692         }
3693       }
3694     } else if (call_descriptor->IsJSFunctionCall()) {
3695       __ Prologue();
3696     } else {
3697       StackFrame::Type type = info()->GetOutputStackFrameType();
3698       // TODO(mbrandy): Detect cases where ip is the entrypoint (for
3699       // efficient intialization of the constant pool pointer register).
3700       __ StubPrologue(type);
3701       if (call_descriptor->IsWasmFunctionCall()) {
3702         __ Push(kWasmInstanceRegister);
3703       } else if (call_descriptor->IsWasmImportWrapper() ||
3704                  call_descriptor->IsWasmCapiFunction()) {
3705         // Wasm import wrappers are passed a tuple in the place of the instance.
3706         // Unpack the tuple into the instance and the target callable.
3707         // This must be done here in the codegen because it cannot be expressed
3708         // properly in the graph.
3709         __ LoadTaggedPointerField(
3710             kJSFunctionRegister,
3711             FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
3712         __ LoadTaggedPointerField(
3713             kWasmInstanceRegister,
3714             FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
3715         __ Push(kWasmInstanceRegister);
3716         if (call_descriptor->IsWasmCapiFunction()) {
3717           // Reserve space for saving the PC later.
3718           __ addi(sp, sp, Operand(-kSystemPointerSize));
3719         }
3720       }
3721     }
3722     unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
3723   }
3724 
3725   int required_slots =
3726       frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
3727   if (info()->is_osr()) {
3728     // TurboFan OSR-compiled functions cannot be entered directly.
3729     __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
3730 
3731     // Unoptimized code jumps directly to this entrypoint while the unoptimized
3732     // frame is still on the stack. Optimized code uses OSR values directly from
3733     // the unoptimized frame. Thus, all that needs to be done is to allocate the
3734     // remaining stack slots.
3735     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
3736     osr_pc_offset_ = __ pc_offset();
3737     required_slots -= osr_helper()->UnoptimizedFrameSlots();
3738     ResetSpeculationPoison();
3739   }
3740 
3741   const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
3742   const RegList saves = FLAG_enable_embedded_constant_pool
3743                             ? call_descriptor->CalleeSavedRegisters() &
3744                                   ~kConstantPoolRegister.bit()
3745                             : call_descriptor->CalleeSavedRegisters();
3746 
3747   if (required_slots > 0) {
3748     if (info()->IsWasm() && required_slots > 128) {
3749       // For WebAssembly functions with big frames we have to do the stack
3750       // overflow check before we construct the frame. Otherwise we may not
3751       // have enough space on the stack to call the runtime for the stack
3752       // overflow.
3753       Label done;
3754 
3755       // If the frame is bigger than the stack, we throw the stack overflow
3756       // exception unconditionally. Thereby we can avoid the integer overflow
3757       // check in the condition code.
3758       if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) {
3759         Register scratch = ip;
3760         __ LoadP(
3761             scratch,
3762             FieldMemOperand(kWasmInstanceRegister,
3763                             WasmInstanceObject::kRealStackLimitAddressOffset));
3764         __ LoadP(scratch, MemOperand(scratch), r0);
3765         __ Add(scratch, scratch, required_slots * kSystemPointerSize, r0);
3766         __ cmpl(sp, scratch);
3767         __ bge(&done);
3768       }
3769 
3770       __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
3771       // We come from WebAssembly, there are no references for the GC.
3772       ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
3773       RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
3774       if (FLAG_debug_code) {
3775         __ stop();
3776       }
3777 
3778       __ bind(&done);
3779     }
3780 
3781     // Skip callee-saved and return slots, which are pushed below.
3782     required_slots -= base::bits::CountPopulation(saves);
3783     required_slots -= frame()->GetReturnSlotCount();
3784     required_slots -= (kDoubleSize / kSystemPointerSize) *
3785                       base::bits::CountPopulation(saves_fp);
3786     __ Add(sp, sp, -required_slots * kSystemPointerSize, r0);
3787   }
3788 
3789   // Save callee-saved Double registers.
3790   if (saves_fp != 0) {
3791     __ MultiPushDoubles(saves_fp);
3792     DCHECK_EQ(kNumCalleeSavedDoubles, base::bits::CountPopulation(saves_fp));
3793   }
3794 
3795   // Save callee-saved registers.
3796   if (saves != 0) {
3797     __ MultiPush(saves);
3798     // register save area does not include the fp or constant pool pointer.
3799   }
3800 
3801   const int returns = frame()->GetReturnSlotCount();
3802   if (returns != 0) {
3803     // Create space for returns.
3804     __ Add(sp, sp, -returns * kSystemPointerSize, r0);
3805   }
3806 }
3807 
AssembleReturn(InstructionOperand * pop)3808 void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
3809   auto call_descriptor = linkage()->GetIncomingDescriptor();
3810   int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
3811 
3812   const int returns = frame()->GetReturnSlotCount();
3813   if (returns != 0) {
3814     // Create space for returns.
3815     __ Add(sp, sp, returns * kSystemPointerSize, r0);
3816   }
3817 
3818   // Restore registers.
3819   const RegList saves = FLAG_enable_embedded_constant_pool
3820                             ? call_descriptor->CalleeSavedRegisters() &
3821                                   ~kConstantPoolRegister.bit()
3822                             : call_descriptor->CalleeSavedRegisters();
3823   if (saves != 0) {
3824     __ MultiPop(saves);
3825   }
3826 
3827   // Restore double registers.
3828   const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
3829   if (double_saves != 0) {
3830     __ MultiPopDoubles(double_saves);
3831   }
3832   PPCOperandConverter g(this, nullptr);
3833   unwinding_info_writer_.MarkBlockWillExit();
3834 
3835   if (call_descriptor->IsCFunctionCall()) {
3836     AssembleDeconstructFrame();
3837   } else if (frame_access_state()->has_frame()) {
3838     // Canonicalize JSFunction return sites for now unless they have an variable
3839     // number of stack slot pops
3840     if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
3841       if (return_label_.is_bound()) {
3842         __ b(&return_label_);
3843         return;
3844       } else {
3845         __ bind(&return_label_);
3846         AssembleDeconstructFrame();
3847       }
3848     } else {
3849       AssembleDeconstructFrame();
3850     }
3851   }
3852   // Constant pool is unavailable since the frame has been destructed
3853   ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
3854   if (pop->IsImmediate()) {
3855     DCHECK(Constant::kInt32 == g.ToConstant(pop).type() ||
3856            Constant::kInt64 == g.ToConstant(pop).type());
3857     pop_count += g.ToConstant(pop).ToInt32();
3858   } else {
3859     __ Drop(g.ToRegister(pop));
3860   }
3861   __ Drop(pop_count);
3862   __ Ret();
3863 }
3864 
FinishCode()3865 void CodeGenerator::FinishCode() {}
3866 
PrepareForDeoptimizationExits(ZoneDeque<DeoptimizationExit * > * exits)3867 void CodeGenerator::PrepareForDeoptimizationExits(
3868     ZoneDeque<DeoptimizationExit*>* exits) {
3869   // __ EmitConstantPool();
3870 }
3871 
AssembleMove(InstructionOperand * source,InstructionOperand * destination)3872 void CodeGenerator::AssembleMove(InstructionOperand* source,
3873                                  InstructionOperand* destination) {
3874   PPCOperandConverter g(this, nullptr);
3875   // Dispatch on the source and destination operand kinds.  Not all
3876   // combinations are possible.
3877   if (source->IsRegister()) {
3878     DCHECK(destination->IsRegister() || destination->IsStackSlot());
3879     Register src = g.ToRegister(source);
3880     if (destination->IsRegister()) {
3881       __ Move(g.ToRegister(destination), src);
3882     } else {
3883       __ StoreP(src, g.ToMemOperand(destination), r0);
3884     }
3885   } else if (source->IsStackSlot()) {
3886     DCHECK(destination->IsRegister() || destination->IsStackSlot());
3887     MemOperand src = g.ToMemOperand(source);
3888     if (destination->IsRegister()) {
3889       __ LoadP(g.ToRegister(destination), src, r0);
3890     } else {
3891       Register temp = kScratchReg;
3892       __ LoadP(temp, src, r0);
3893       __ StoreP(temp, g.ToMemOperand(destination), r0);
3894     }
3895   } else if (source->IsConstant()) {
3896     Constant src = g.ToConstant(source);
3897     if (destination->IsRegister() || destination->IsStackSlot()) {
3898       Register dst =
3899           destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
3900       switch (src.type()) {
3901         case Constant::kInt32:
3902 #if V8_TARGET_ARCH_PPC64
3903           if (false) {
3904 #else
3905           if (RelocInfo::IsWasmReference(src.rmode())) {
3906 #endif
3907             __ mov(dst, Operand(src.ToInt32(), src.rmode()));
3908           } else {
3909             __ mov(dst, Operand(src.ToInt32()));
3910           }
3911           break;
3912         case Constant::kInt64:
3913 #if V8_TARGET_ARCH_PPC64
3914           if (RelocInfo::IsWasmReference(src.rmode())) {
3915             __ mov(dst, Operand(src.ToInt64(), src.rmode()));
3916           } else {
3917 #endif
3918             __ mov(dst, Operand(src.ToInt64()));
3919 #if V8_TARGET_ARCH_PPC64
3920           }
3921 #endif
3922           break;
3923         case Constant::kFloat32:
3924           __ mov(dst, Operand::EmbeddedNumber(src.ToFloat32()));
3925           break;
3926         case Constant::kFloat64:
3927           __ mov(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
3928           break;
3929         case Constant::kExternalReference:
3930           __ Move(dst, src.ToExternalReference());
3931           break;
3932         case Constant::kDelayedStringConstant:
3933           __ mov(dst, Operand::EmbeddedStringConstant(
3934                           src.ToDelayedStringConstant()));
3935           break;
3936         case Constant::kHeapObject: {
3937           Handle<HeapObject> src_object = src.ToHeapObject();
3938           RootIndex index;
3939           if (IsMaterializableFromRoot(src_object, &index)) {
3940             __ LoadRoot(dst, index);
3941           } else {
3942             __ Move(dst, src_object);
3943           }
3944           break;
3945         }
3946         case Constant::kCompressedHeapObject: {
3947           Handle<HeapObject> src_object = src.ToHeapObject();
3948           RootIndex index;
3949           if (IsMaterializableFromRoot(src_object, &index)) {
3950             __ LoadRoot(dst, index);
3951           } else {
3952             // TODO(v8:7703, jyan@ca.ibm.com): Turn into a
3953             // COMPRESSED_EMBEDDED_OBJECT when the constant pool entry size is
3954             // tagged size.
3955             __ Move(dst, src_object, RelocInfo::FULL_EMBEDDED_OBJECT);
3956           }
3957           break;
3958         }
3959         case Constant::kRpoNumber:
3960           UNREACHABLE();  // TODO(dcarney): loading RPO constants on PPC.
3961           break;
3962       }
3963       if (destination->IsStackSlot()) {
3964         __ StoreP(dst, g.ToMemOperand(destination), r0);
3965       }
3966     } else {
3967       DoubleRegister dst = destination->IsFPRegister()
3968                                ? g.ToDoubleRegister(destination)
3969                                : kScratchDoubleReg;
3970       Double value;
3971 #if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
3972       // casting double precision snan to single precision
3973       // converts it to qnan on ia32/x64
3974       if (src.type() == Constant::kFloat32) {
3975         uint32_t val = src.ToFloat32AsInt();
3976         if ((val & 0x7F800000) == 0x7F800000) {
3977           uint64_t dval = static_cast<uint64_t>(val);
3978           dval = ((dval & 0xC0000000) << 32) | ((dval & 0x40000000) << 31) |
3979                  ((dval & 0x40000000) << 30) | ((dval & 0x7FFFFFFF) << 29);
3980           value = Double(dval);
3981         } else {
3982           value = Double(static_cast<double>(src.ToFloat32()));
3983         }
3984       } else {
3985         value = Double(src.ToFloat64());
3986       }
3987 #else
3988       value = src.type() == Constant::kFloat32
3989                   ? Double(static_cast<double>(src.ToFloat32()))
3990                   : Double(src.ToFloat64());
3991 #endif
3992       __ LoadDoubleLiteral(dst, value, kScratchReg);
3993       if (destination->IsDoubleStackSlot()) {
3994         __ StoreDouble(dst, g.ToMemOperand(destination), r0);
3995       } else if (destination->IsFloatStackSlot()) {
3996         __ StoreSingle(dst, g.ToMemOperand(destination), r0);
3997       }
3998     }
3999   } else if (source->IsFPRegister()) {
4000     MachineRepresentation rep = LocationOperand::cast(source)->representation();
4001     if (rep == MachineRepresentation::kSimd128) {
4002       if (destination->IsSimd128Register()) {
4003         __ vor(g.ToSimd128Register(destination), g.ToSimd128Register(source),
4004                g.ToSimd128Register(source));
4005       } else {
4006         DCHECK(destination->IsSimd128StackSlot());
4007         MemOperand dst = g.ToMemOperand(destination);
4008         __ mov(ip, Operand(dst.offset()));
4009         __ StoreSimd128(g.ToSimd128Register(source), MemOperand(dst.ra(), ip),
4010                         r0, kScratchDoubleReg);
4011       }
4012     } else {
4013       DoubleRegister src = g.ToDoubleRegister(source);
4014       if (destination->IsFPRegister()) {
4015         DoubleRegister dst = g.ToDoubleRegister(destination);
4016         __ Move(dst, src);
4017       } else {
4018         DCHECK(destination->IsFPStackSlot());
4019         LocationOperand* op = LocationOperand::cast(source);
4020         if (op->representation() == MachineRepresentation::kFloat64) {
4021           __ StoreDouble(src, g.ToMemOperand(destination), r0);
4022         } else {
4023           __ StoreSingle(src, g.ToMemOperand(destination), r0);
4024         }
4025       }
4026     }
4027   } else if (source->IsFPStackSlot()) {
4028     DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
4029     MemOperand src = g.ToMemOperand(source);
4030     if (destination->IsFPRegister()) {
4031       LocationOperand* op = LocationOperand::cast(source);
4032       if (op->representation() == MachineRepresentation::kFloat64) {
4033         __ LoadDouble(g.ToDoubleRegister(destination), src, r0);
4034       } else if (op->representation() == MachineRepresentation::kFloat32) {
4035         __ LoadSingle(g.ToDoubleRegister(destination), src, r0);
4036       } else {
4037         DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
4038         MemOperand src = g.ToMemOperand(source);
4039         __ mov(ip, Operand(src.offset()));
4040         __ LoadSimd128(g.ToSimd128Register(destination),
4041                        MemOperand(src.ra(), ip), r0, kScratchDoubleReg);
4042       }
4043     } else {
4044       LocationOperand* op = LocationOperand::cast(source);
4045       DoubleRegister temp = kScratchDoubleReg;
4046       if (op->representation() == MachineRepresentation::kFloat64) {
4047         __ LoadDouble(temp, src, r0);
4048         __ StoreDouble(temp, g.ToMemOperand(destination), r0);
4049       } else if (op->representation() == MachineRepresentation::kFloat32) {
4050         __ LoadSingle(temp, src, r0);
4051         __ StoreSingle(temp, g.ToMemOperand(destination), r0);
4052       } else {
4053         DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
4054         // push d0, to be used as scratch
4055         __ addi(sp, sp, Operand(-kSimd128Size));
4056         __ StoreSimd128(d0, MemOperand(r0, sp), r0, kScratchDoubleReg);
4057         MemOperand src = g.ToMemOperand(source);
4058         MemOperand dst = g.ToMemOperand(destination);
4059         __ mov(ip, Operand(src.offset()));
4060         __ LoadSimd128(d0, MemOperand(src.ra(), ip), r0, kScratchDoubleReg);
4061         __ mov(ip, Operand(dst.offset()));
4062         __ StoreSimd128(d0, MemOperand(dst.ra(), ip), r0, kScratchDoubleReg);
4063         // restore d0
4064         __ LoadSimd128(d0, MemOperand(r0, sp), ip, kScratchDoubleReg);
4065         __ addi(sp, sp, Operand(kSimd128Size));
4066       }
4067     }
4068   } else {
4069     UNREACHABLE();
4070   }
4071 }
4072 
4073 // Swaping contents in source and destination.
4074 // source and destination could be:
4075 //   Register,
4076 //   FloatRegister,
4077 //   DoubleRegister,
4078 //   StackSlot,
4079 //   FloatStackSlot,
4080 //   or DoubleStackSlot
4081 void CodeGenerator::AssembleSwap(InstructionOperand* source,
4082                                  InstructionOperand* destination) {
4083   PPCOperandConverter g(this, nullptr);
4084   if (source->IsRegister()) {
4085     Register src = g.ToRegister(source);
4086     if (destination->IsRegister()) {
4087       __ SwapP(src, g.ToRegister(destination), kScratchReg);
4088     } else {
4089       DCHECK(destination->IsStackSlot());
4090       __ SwapP(src, g.ToMemOperand(destination), kScratchReg);
4091     }
4092   } else if (source->IsStackSlot()) {
4093     DCHECK(destination->IsStackSlot());
4094     __ SwapP(g.ToMemOperand(source), g.ToMemOperand(destination), kScratchReg,
4095              r0);
4096   } else if (source->IsFloatRegister()) {
4097     DoubleRegister src = g.ToDoubleRegister(source);
4098     if (destination->IsFloatRegister()) {
4099       __ SwapFloat32(src, g.ToDoubleRegister(destination), kScratchDoubleReg);
4100     } else {
4101       DCHECK(destination->IsFloatStackSlot());
4102       __ SwapFloat32(src, g.ToMemOperand(destination), kScratchDoubleReg);
4103     }
4104   } else if (source->IsDoubleRegister()) {
4105     DoubleRegister src = g.ToDoubleRegister(source);
4106     if (destination->IsDoubleRegister()) {
4107       __ SwapDouble(src, g.ToDoubleRegister(destination), kScratchDoubleReg);
4108     } else {
4109       DCHECK(destination->IsDoubleStackSlot());
4110       __ SwapDouble(src, g.ToMemOperand(destination), kScratchDoubleReg);
4111     }
4112   } else if (source->IsFloatStackSlot()) {
4113     DCHECK(destination->IsFloatStackSlot());
4114     __ SwapFloat32(g.ToMemOperand(source), g.ToMemOperand(destination),
4115                    kScratchDoubleReg, d0);
4116   } else if (source->IsDoubleStackSlot()) {
4117     DCHECK(destination->IsDoubleStackSlot());
4118     __ SwapDouble(g.ToMemOperand(source), g.ToMemOperand(destination),
4119                   kScratchDoubleReg, d0);
4120 
4121   } else if (source->IsSimd128Register()) {
4122     Simd128Register src = g.ToSimd128Register(source);
4123     if (destination->IsSimd128Register()) {
4124       __ SwapSimd128(src, g.ToSimd128Register(destination), kScratchDoubleReg);
4125     } else {
4126       DCHECK(destination->IsSimd128StackSlot());
4127       __ SwapSimd128(src, g.ToMemOperand(destination), kScratchDoubleReg);
4128     }
4129   } else if (source->IsSimd128StackSlot()) {
4130     DCHECK(destination->IsSimd128StackSlot());
4131     __ SwapSimd128(g.ToMemOperand(source), g.ToMemOperand(destination),
4132                    kScratchDoubleReg);
4133 
4134   } else {
4135     UNREACHABLE();
4136   }
4137 
4138   return;
4139 }
4140 
4141 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
4142   for (size_t index = 0; index < target_count; ++index) {
4143     __ emit_label_addr(targets[index]);
4144   }
4145 }
4146 
4147 #undef __
4148 
4149 }  // namespace compiler
4150 }  // namespace internal
4151 }  // namespace v8
4152