1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2014 the V8 project authors. All rights reserved.
36
37 // A light-weight PPC Assembler
38 // Generates user mode instructions for the PPC architecture up
39
40 #ifndef V8_PPC_ASSEMBLER_PPC_H_
41 #define V8_PPC_ASSEMBLER_PPC_H_
42
43 #include <stdio.h>
44 #include <vector>
45
46 #include "src/assembler.h"
47 #include "src/double.h"
48 #include "src/ppc/constants-ppc.h"
49
50 #if V8_HOST_ARCH_PPC && \
51 (V8_OS_AIX || (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN))
52 #define ABI_USES_FUNCTION_DESCRIPTORS 1
53 #else
54 #define ABI_USES_FUNCTION_DESCRIPTORS 0
55 #endif
56
57 #if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
58 #define ABI_PASSES_HANDLES_IN_REGS 1
59 #else
60 #define ABI_PASSES_HANDLES_IN_REGS 0
61 #endif
62
63 #if !V8_HOST_ARCH_PPC || !V8_TARGET_ARCH_PPC64 || V8_TARGET_LITTLE_ENDIAN
64 #define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 1
65 #else
66 #define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 0
67 #endif
68
69 #if !V8_HOST_ARCH_PPC || (V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
70 #define ABI_CALL_VIA_IP 1
71 #else
72 #define ABI_CALL_VIA_IP 0
73 #endif
74
75 #if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
76 #define ABI_TOC_REGISTER 2
77 #else
78 #define ABI_TOC_REGISTER 13
79 #endif
80
81 #define INSTR_AND_DATA_CACHE_COHERENCY LWSYNC
82
83 namespace v8 {
84 namespace internal {
85
86 // clang-format off
87 #define GENERAL_REGISTERS(V) \
88 V(r0) V(sp) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
89 V(r8) V(r9) V(r10) V(r11) V(ip) V(r13) V(r14) V(r15) \
90 V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
91 V(r24) V(r25) V(r26) V(r27) V(r28) V(r29) V(r30) V(fp)
92
93 #if V8_EMBEDDED_CONSTANT_POOL
94 #define ALLOCATABLE_GENERAL_REGISTERS(V) \
95 V(r3) V(r4) V(r5) V(r6) V(r7) \
96 V(r8) V(r9) V(r10) V(r14) V(r15) \
97 V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
98 V(r24) V(r25) V(r26) V(r27) V(r30)
99 #else
100 #define ALLOCATABLE_GENERAL_REGISTERS(V) \
101 V(r3) V(r4) V(r5) V(r6) V(r7) \
102 V(r8) V(r9) V(r10) V(r14) V(r15) \
103 V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
104 V(r24) V(r25) V(r26) V(r27) V(r28) V(r30)
105 #endif
106
107 #define LOW_DOUBLE_REGISTERS(V) \
108 V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
109 V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15)
110
111 #define NON_LOW_DOUBLE_REGISTERS(V) \
112 V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
113 V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
114
115 #define DOUBLE_REGISTERS(V) \
116 LOW_DOUBLE_REGISTERS(V) NON_LOW_DOUBLE_REGISTERS(V)
117
118 #define FLOAT_REGISTERS DOUBLE_REGISTERS
119 #define SIMD128_REGISTERS DOUBLE_REGISTERS
120
121 #define ALLOCATABLE_DOUBLE_REGISTERS(V) \
122 V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
123 V(d8) V(d9) V(d10) V(d11) V(d12) V(d15) \
124 V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
125 V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
126
127 #define C_REGISTERS(V) \
128 V(cr0) V(cr1) V(cr2) V(cr3) V(cr4) V(cr5) V(cr6) V(cr7) \
129 V(cr8) V(cr9) V(cr10) V(cr11) V(cr12) V(cr15)
130 // clang-format on
131
132 // Register list in load/store instructions
133 // Note that the bit values must match those used in actual instruction encoding
134 const int kNumRegs = 32;
135
136 // Caller-saved/arguments registers
137 const RegList kJSCallerSaved = 1 << 3 | // r3 a1
138 1 << 4 | // r4 a2
139 1 << 5 | // r5 a3
140 1 << 6 | // r6 a4
141 1 << 7 | // r7 a5
142 1 << 8 | // r8 a6
143 1 << 9 | // r9 a7
144 1 << 10 | // r10 a8
145 1 << 11;
146
147 const int kNumJSCallerSaved = 9;
148
149 // Return the code of the n-th caller-saved register available to JavaScript
150 // e.g. JSCallerSavedReg(0) returns r0.code() == 0
151 int JSCallerSavedCode(int n);
152
153 // Callee-saved registers preserved when switching from C to JavaScript
154 const RegList kCalleeSaved = 1 << 14 | // r14
155 1 << 15 | // r15
156 1 << 16 | // r16
157 1 << 17 | // r17
158 1 << 18 | // r18
159 1 << 19 | // r19
160 1 << 20 | // r20
161 1 << 21 | // r21
162 1 << 22 | // r22
163 1 << 23 | // r23
164 1 << 24 | // r24
165 1 << 25 | // r25
166 1 << 26 | // r26
167 1 << 27 | // r27
168 1 << 28 | // r28
169 1 << 29 | // r29
170 1 << 30 | // r20
171 1 << 31; // r31
172
173 const int kNumCalleeSaved = 18;
174
175 const RegList kCallerSavedDoubles = 1 << 0 | // d0
176 1 << 1 | // d1
177 1 << 2 | // d2
178 1 << 3 | // d3
179 1 << 4 | // d4
180 1 << 5 | // d5
181 1 << 6 | // d6
182 1 << 7 | // d7
183 1 << 8 | // d8
184 1 << 9 | // d9
185 1 << 10 | // d10
186 1 << 11 | // d11
187 1 << 12 | // d12
188 1 << 13; // d13
189
190 const int kNumCallerSavedDoubles = 14;
191
192 const RegList kCalleeSavedDoubles = 1 << 14 | // d14
193 1 << 15 | // d15
194 1 << 16 | // d16
195 1 << 17 | // d17
196 1 << 18 | // d18
197 1 << 19 | // d19
198 1 << 20 | // d20
199 1 << 21 | // d21
200 1 << 22 | // d22
201 1 << 23 | // d23
202 1 << 24 | // d24
203 1 << 25 | // d25
204 1 << 26 | // d26
205 1 << 27 | // d27
206 1 << 28 | // d28
207 1 << 29 | // d29
208 1 << 30 | // d30
209 1 << 31; // d31
210
211 const int kNumCalleeSavedDoubles = 18;
212
213 // Number of registers for which space is reserved in safepoints. Must be a
214 // multiple of 8.
215 const int kNumSafepointRegisters = 32;
216
217 // The following constants describe the stack frame linkage area as
218 // defined by the ABI. Note that kNumRequiredStackFrameSlots must
219 // satisfy alignment requirements (rounding up if required).
220 #if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN
221 // [0] back chain
222 // [1] condition register save area
223 // [2] link register save area
224 // [3] TOC save area
225 // [4] Parameter1 save area
226 // ...
227 // [11] Parameter8 save area
228 // [12] Parameter9 slot (if necessary)
229 // ...
230 const int kNumRequiredStackFrameSlots = 12;
231 const int kStackFrameLRSlot = 2;
232 const int kStackFrameExtraParamSlot = 12;
233 #elif V8_OS_AIX || V8_TARGET_ARCH_PPC64
234 // [0] back chain
235 // [1] condition register save area
236 // [2] link register save area
237 // [3] reserved for compiler
238 // [4] reserved by binder
239 // [5] TOC save area
240 // [6] Parameter1 save area
241 // ...
242 // [13] Parameter8 save area
243 // [14] Parameter9 slot (if necessary)
244 // ...
245 #if V8_TARGET_ARCH_PPC64
246 const int kNumRequiredStackFrameSlots = 14;
247 #else
248 const int kNumRequiredStackFrameSlots = 16;
249 #endif
250 const int kStackFrameLRSlot = 2;
251 const int kStackFrameExtraParamSlot = 14;
252 #else
253 // [0] back chain
254 // [1] link register save area
255 // [2] Parameter9 slot (if necessary)
256 // ...
257 const int kNumRequiredStackFrameSlots = 4;
258 const int kStackFrameLRSlot = 1;
259 const int kStackFrameExtraParamSlot = 2;
260 #endif
261
262 // Define the list of registers actually saved at safepoints.
263 // Note that the number of saved registers may be smaller than the reserved
264 // space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
265 const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
266 const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
267
268 enum RegisterCode {
269 #define REGISTER_CODE(R) kRegCode_##R,
270 GENERAL_REGISTERS(REGISTER_CODE)
271 #undef REGISTER_CODE
272 kRegAfterLast
273 };
274
275 class Register : public RegisterBase<Register, kRegAfterLast> {
276 public:
277 #if V8_TARGET_LITTLE_ENDIAN
278 static constexpr int kMantissaOffset = 0;
279 static constexpr int kExponentOffset = 4;
280 #else
281 static constexpr int kMantissaOffset = 4;
282 static constexpr int kExponentOffset = 0;
283 #endif
284
285 private:
286 friend class RegisterBase;
Register(int code)287 explicit constexpr Register(int code) : RegisterBase(code) {}
288 };
289
290 ASSERT_TRIVIALLY_COPYABLE(Register);
291 static_assert(sizeof(Register) == sizeof(int),
292 "Register can efficiently be passed by value");
293
294 #define DEFINE_REGISTER(R) \
295 constexpr Register R = Register::from_code<kRegCode_##R>();
296 GENERAL_REGISTERS(DEFINE_REGISTER)
297 #undef DEFINE_REGISTER
298 constexpr Register no_reg = Register::no_reg();
299
300 // Aliases
301 constexpr Register kLithiumScratch = r11; // lithium scratch.
302 constexpr Register kConstantPoolRegister = r28; // Constant pool.
303 constexpr Register kRootRegister = r29; // Roots array pointer.
304 constexpr Register cp = r30; // JavaScript context pointer.
305
306 constexpr bool kPadArguments = false;
307 constexpr bool kSimpleFPAliasing = true;
308 constexpr bool kSimdMaskRegisters = false;
309
310 enum DoubleRegisterCode {
311 #define REGISTER_CODE(R) kDoubleCode_##R,
312 DOUBLE_REGISTERS(REGISTER_CODE)
313 #undef REGISTER_CODE
314 kDoubleAfterLast
315 };
316
317 // Double word FP register.
318 class DoubleRegister : public RegisterBase<DoubleRegister, kDoubleAfterLast> {
319 public:
320 // A few double registers are reserved: one as a scratch register and one to
321 // hold 0.0, that does not fit in the immediate field of vmov instructions.
322 // d14: 0.0
323 // d15: scratch register.
324 static constexpr int kSizeInBytes = 8;
325 inline static int NumRegisters();
326
327 private:
328 friend class RegisterBase;
DoubleRegister(int code)329 explicit constexpr DoubleRegister(int code) : RegisterBase(code) {}
330 };
331
332 ASSERT_TRIVIALLY_COPYABLE(DoubleRegister);
333 static_assert(sizeof(DoubleRegister) == sizeof(int),
334 "DoubleRegister can efficiently be passed by value");
335
336 typedef DoubleRegister FloatRegister;
337
338 // TODO(ppc) Define SIMD registers.
339 typedef DoubleRegister Simd128Register;
340
341 #define DEFINE_REGISTER(R) \
342 constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
343 DOUBLE_REGISTERS(DEFINE_REGISTER)
344 #undef DEFINE_REGISTER
345 constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
346
347 constexpr DoubleRegister kFirstCalleeSavedDoubleReg = d14;
348 constexpr DoubleRegister kLastCalleeSavedDoubleReg = d31;
349 constexpr DoubleRegister kDoubleRegZero = d14;
350 constexpr DoubleRegister kScratchDoubleReg = d13;
351
352 Register ToRegister(int num);
353
354 enum CRegisterCode {
355 #define REGISTER_CODE(R) kCCode_##R,
356 C_REGISTERS(REGISTER_CODE)
357 #undef REGISTER_CODE
358 kCAfterLast
359 };
360
361 // Coprocessor register
362 class CRegister : public RegisterBase<CRegister, kCAfterLast> {
363 friend class RegisterBase;
CRegister(int code)364 explicit constexpr CRegister(int code) : RegisterBase(code) {}
365 };
366
367 constexpr CRegister no_creg = CRegister::no_reg();
368 #define DECLARE_C_REGISTER(R) \
369 constexpr CRegister R = CRegister::from_code<kCCode_##R>();
C_REGISTERS(DECLARE_C_REGISTER)370 C_REGISTERS(DECLARE_C_REGISTER)
371 #undef DECLARE_C_REGISTER
372
373 // -----------------------------------------------------------------------------
374 // Machine instruction Operands
375
376 // Class Operand represents a shifter operand in data processing instructions
377 class Operand BASE_EMBEDDED {
378 public:
379 // immediate
380 INLINE(explicit Operand(intptr_t immediate,
381 RelocInfo::Mode rmode = RelocInfo::NONE)
382 : rmode_(rmode)) {
383 value_.immediate = immediate;
384 }
385 INLINE(static Operand Zero()) { return Operand(static_cast<intptr_t>(0)); }
386 INLINE(explicit Operand(const ExternalReference& f)
387 : rmode_(RelocInfo::EXTERNAL_REFERENCE)) {
388 value_.immediate = static_cast<intptr_t>(f.address());
389 }
390 explicit Operand(Handle<HeapObject> handle);
391 INLINE(explicit Operand(Smi* value) : rmode_(RelocInfo::NONE)) {
392 value_.immediate = reinterpret_cast<intptr_t>(value);
393 }
394 // rm
395 INLINE(explicit Operand(Register rm));
396
397 static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
398 static Operand EmbeddedCode(CodeStub* stub);
399
400 // Return true if this is a register operand.
401 INLINE(bool is_reg() const) { return rm_.is_valid(); }
402
403 bool must_output_reloc_info(const Assembler* assembler) const;
404
405 inline intptr_t immediate() const {
406 DCHECK(IsImmediate());
407 DCHECK(!IsHeapObjectRequest());
408 return value_.immediate;
409 }
410 bool IsImmediate() const { return !rm_.is_valid(); }
411
412 HeapObjectRequest heap_object_request() const {
413 DCHECK(IsHeapObjectRequest());
414 return value_.heap_object_request;
415 }
416
417 Register rm() const { return rm_; }
418
419 bool IsHeapObjectRequest() const {
420 DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
421 DCHECK_IMPLIES(is_heap_object_request_,
422 rmode_ == RelocInfo::EMBEDDED_OBJECT ||
423 rmode_ == RelocInfo::CODE_TARGET);
424 return is_heap_object_request_;
425 }
426
427 private:
428 Register rm_ = no_reg;
429 union Value {
430 Value() {}
431 HeapObjectRequest heap_object_request; // if is_heap_object_request_
432 intptr_t immediate; // otherwise
433 } value_; // valid if rm_ == no_reg
434 bool is_heap_object_request_ = false;
435
436 RelocInfo::Mode rmode_;
437
438 friend class Assembler;
439 friend class MacroAssembler;
440 };
441
442
443 // Class MemOperand represents a memory operand in load and store instructions
444 // On PowerPC we have base register + 16bit signed value
445 // Alternatively we can have a 16bit signed value immediate
446 class MemOperand BASE_EMBEDDED {
447 public:
448 explicit MemOperand(Register rn, int32_t offset = 0);
449
450 explicit MemOperand(Register ra, Register rb);
451
offset()452 int32_t offset() const {
453 return offset_;
454 }
455
456 // PowerPC - base register
ra()457 Register ra() const {
458 return ra_;
459 }
460
rb()461 Register rb() const {
462 return rb_;
463 }
464
465 private:
466 Register ra_; // base
467 int32_t offset_; // offset
468 Register rb_; // index
469
470 friend class Assembler;
471 };
472
473
474 class DeferredRelocInfo {
475 public:
DeferredRelocInfo()476 DeferredRelocInfo() {}
DeferredRelocInfo(int position,RelocInfo::Mode rmode,intptr_t data)477 DeferredRelocInfo(int position, RelocInfo::Mode rmode, intptr_t data)
478 : position_(position), rmode_(rmode), data_(data) {}
479
position()480 int position() const { return position_; }
rmode()481 RelocInfo::Mode rmode() const { return rmode_; }
data()482 intptr_t data() const { return data_; }
483
484 private:
485 int position_;
486 RelocInfo::Mode rmode_;
487 intptr_t data_;
488 };
489
490
491 class Assembler : public AssemblerBase {
492 public:
493 // Create an assembler. Instructions and relocation information are emitted
494 // into a buffer, with the instructions starting from the beginning and the
495 // relocation information starting from the end of the buffer. See CodeDesc
496 // for a detailed comment on the layout (globals.h).
497 //
498 // If the provided buffer is nullptr, the assembler allocates and grows its
499 // own buffer, and buffer_size determines the initial buffer size. The buffer
500 // is owned by the assembler and deallocated upon destruction of the
501 // assembler.
502 //
503 // If the provided buffer is not nullptr, the assembler uses the provided
504 // buffer for code generation and assumes its size to be buffer_size. If the
505 // buffer is too small, a fatal error occurs. No deallocation of the buffer is
506 // done upon destruction of the assembler.
Assembler(Isolate * isolate,void * buffer,int buffer_size)507 Assembler(Isolate* isolate, void* buffer, int buffer_size)
508 : Assembler(IsolateData(isolate), buffer, buffer_size) {}
509 Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
~Assembler()510 virtual ~Assembler() {}
511
512 // GetCode emits any pending (non-emitted) code and fills the descriptor
513 // desc. GetCode() is idempotent; it returns the same result if no other
514 // Assembler functions are invoked in between GetCode() calls.
515 void GetCode(Isolate* isolate, CodeDesc* desc);
516
517 // Label operations & relative jumps (PPUM Appendix D)
518 //
519 // Takes a branch opcode (cc) and a label (L) and generates
520 // either a backward branch or a forward branch and links it
521 // to the label fixup chain. Usage:
522 //
523 // Label L; // unbound label
524 // j(cc, &L); // forward branch to unbound label
525 // bind(&L); // bind label to the current pc
526 // j(cc, &L); // backward branch to bound label
527 // bind(&L); // illegal: a label may be bound only once
528 //
529 // Note: The same Label can be used for forward and backward branches
530 // but it may be bound only once.
531
532 void bind(Label* L); // binds an unbound label L to the current code position
533
534 // Links a label at the current pc_offset(). If already bound, returns the
535 // bound position. If already linked, returns the position of the prior link.
536 // Otherwise, returns the current pc_offset().
537 int link(Label* L);
538
539 // Determines if Label is bound and near enough so that a single
540 // branch instruction can be used to reach it.
541 bool is_near(Label* L, Condition cond);
542
543 // Returns the branch offset to the given label from the current code position
544 // Links the label to the current position if it is still unbound
branch_offset(Label * L)545 int branch_offset(Label* L) {
546 if (L->is_unused() && !trampoline_emitted_) {
547 TrackBranch();
548 }
549 return link(L) - pc_offset();
550 }
551
552 // Puts a labels target address at the given position.
553 // The high 8 bits are set to zero.
554 void label_at_put(Label* L, int at_offset);
555
556 INLINE(static bool IsConstantPoolLoadStart(
557 Address pc, ConstantPoolEntry::Access* access = nullptr));
558 INLINE(static bool IsConstantPoolLoadEnd(
559 Address pc, ConstantPoolEntry::Access* access = nullptr));
560 INLINE(static int GetConstantPoolOffset(Address pc,
561 ConstantPoolEntry::Access access,
562 ConstantPoolEntry::Type type));
563 INLINE(void PatchConstantPoolAccessInstruction(
564 int pc_offset, int offset, ConstantPoolEntry::Access access,
565 ConstantPoolEntry::Type type));
566
567 // Return the address in the constant pool of the code target address used by
568 // the branch/call instruction at pc, or the object in a mov.
569 INLINE(static Address target_constant_pool_address_at(
570 Address pc, Address constant_pool, ConstantPoolEntry::Access access,
571 ConstantPoolEntry::Type type));
572
573 // Read/Modify the code target address in the branch/call instruction at pc.
574 // The isolate argument is unused (and may be nullptr) when skipping flushing.
575 INLINE(static Address target_address_at(Address pc, Address constant_pool));
576 INLINE(static void set_target_address_at(
577 Address pc, Address constant_pool, Address target,
578 ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
579
580 // Return the code target address at a call site from the return address
581 // of that call in the instruction stream.
582 inline static Address target_address_from_return_address(Address pc);
583
584 // Given the address of the beginning of a call, return the address
585 // in the instruction stream that the call will return to.
586 INLINE(static Address return_address_from_call_start(Address pc));
587
588 // This sets the branch destination.
589 // This is for calls and branches within generated code.
590 inline static void deserialization_set_special_target_at(
591 Address instruction_payload, Code* code, Address target);
592
593 // Get the size of the special target encoded at 'instruction_payload'.
594 inline static int deserialization_special_target_size(
595 Address instruction_payload);
596
597 // This sets the internal reference at the pc.
598 inline static void deserialization_set_target_internal_reference_at(
599 Address pc, Address target,
600 RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
601
602 // Size of an instruction.
603 static constexpr int kInstrSize = sizeof(Instr);
604
605 // Here we are patching the address in the LUI/ORI instruction pair.
606 // These values are used in the serialization process and must be zero for
607 // PPC platform, as Code, Embedded Object or External-reference pointers
608 // are split across two consecutive instructions and don't exist separately
609 // in the code, so the serializer should not step forwards in memory after
610 // a target is resolved and written.
611 static constexpr int kSpecialTargetSize = 0;
612
613 // Number of instructions to load an address via a mov sequence.
614 #if V8_TARGET_ARCH_PPC64
615 static constexpr int kMovInstructionsConstantPool = 1;
616 static constexpr int kMovInstructionsNoConstantPool = 5;
617 #if defined(V8_PPC_TAGGING_OPT)
618 static constexpr int kTaggedLoadInstructions = 1;
619 #else
620 static constexpr int kTaggedLoadInstructions = 2;
621 #endif
622 #else
623 static constexpr int kMovInstructionsConstantPool = 1;
624 static constexpr int kMovInstructionsNoConstantPool = 2;
625 static constexpr int kTaggedLoadInstructions = 1;
626 #endif
627 static constexpr int kMovInstructions = FLAG_enable_embedded_constant_pool
628 ? kMovInstructionsConstantPool
629 : kMovInstructionsNoConstantPool;
630
631 // Distance between the instruction referring to the address of the call
632 // target and the return address.
633
634 // Call sequence is a FIXED_SEQUENCE:
635 // mov r8, @ call address
636 // mtlr r8
637 // blrl
638 // @ return address
639 static constexpr int kCallTargetAddressOffset =
640 (kMovInstructions + 2) * kInstrSize;
641
encode_crbit(const CRegister & cr,enum CRBit crbit)642 static inline int encode_crbit(const CRegister& cr, enum CRBit crbit) {
643 return ((cr.code() * CRWIDTH) + crbit);
644 }
645
646 #define DECLARE_PPC_X_INSTRUCTIONS_A_FORM(name, instr_name, instr_value) \
647 inline void name(const Register rt, const Register ra, \
648 const Register rb, const RCBit rc = LeaveRC) { \
649 x_form(instr_name, rt, ra, rb, rc); \
650 }
651
652 #define DECLARE_PPC_X_INSTRUCTIONS_B_FORM(name, instr_name, instr_value) \
653 inline void name(const Register ra, const Register rs, \
654 const Register rb, const RCBit rc = LeaveRC) { \
655 x_form(instr_name, rs, ra, rb, rc); \
656 }
657
658 #define DECLARE_PPC_X_INSTRUCTIONS_C_FORM(name, instr_name, instr_value) \
659 inline void name(const Register dst, const Register src, \
660 const RCBit rc = LeaveRC) { \
661 x_form(instr_name, src, dst, r0, rc); \
662 }
663
664 #define DECLARE_PPC_X_INSTRUCTIONS_D_FORM(name, instr_name, instr_value) \
665 template <class R> \
666 inline void name(const R rt, const Register ra, const Register rb, \
667 const RCBit rc = LeaveRC) { \
668 DCHECK(ra != r0); \
669 x_form(instr_name, rt.code(), ra.code(), rb.code(), rc); \
670 } \
671 template <class R> \
672 inline void name(const R dst, const MemOperand& src) { \
673 name(dst, src.ra(), src.rb()); \
674 }
675
676 #define DECLARE_PPC_X_INSTRUCTIONS_E_FORM(name, instr_name, instr_value) \
677 inline void name(const Register dst, const Register src, \
678 const int sh, const RCBit rc = LeaveRC) { \
679 x_form(instr_name, src.code(), dst.code(), sh, rc); \
680 }
681
682 #define DECLARE_PPC_X_INSTRUCTIONS_F_FORM(name, instr_name, instr_value) \
683 inline void name(const Register src1, const Register src2, \
684 const CRegister cr = cr7, const RCBit rc = LeaveRC) { \
685 x_form(instr_name, cr, src1, src2, rc); \
686 } \
687 inline void name##w(const Register src1, const Register src2, \
688 const CRegister cr = cr7, const RCBit rc = LeaveRC) { \
689 x_form(instr_name, cr.code() * B2, src1.code(), src2.code(), LeaveRC); \
690 }
691
692 #define DECLARE_PPC_X_INSTRUCTIONS_EH_S_FORM(name, instr_name, instr_value) \
693 inline void name(const Register dst, const MemOperand& src) { \
694 x_form(instr_name, src.ra(), dst, src.rb(), SetEH); \
695 }
696 #define DECLARE_PPC_X_INSTRUCTIONS_EH_L_FORM(name, instr_name, instr_value) \
697 inline void name(const Register dst, const MemOperand& src) { \
698 DCHECK(src.ra_ != r0); \
699 x_form(instr_name, src.ra(), dst, src.rb(), SetEH); \
700 }
701
x_form(Instr instr,int f1,int f2,int f3,int rc)702 inline void x_form(Instr instr, int f1, int f2, int f3, int rc) {
703 emit(instr | f1 * B21 | f2 * B16 | f3 * B11 | rc);
704 }
x_form(Instr instr,Register rs,Register ra,Register rb,RCBit rc)705 inline void x_form(Instr instr, Register rs, Register ra, Register rb,
706 RCBit rc) {
707 emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | rc);
708 }
709 inline void x_form(Instr instr, Register ra, Register rs, Register rb,
710 EHBit eh = SetEH) {
711 emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | eh);
712 }
x_form(Instr instr,CRegister cr,Register s1,Register s2,RCBit rc)713 inline void x_form(Instr instr, CRegister cr, Register s1, Register s2,
714 RCBit rc) {
715 #if V8_TARGET_ARCH_PPC64
716 int L = 1;
717 #else
718 int L = 0;
719 #endif
720 emit(instr | cr.code() * B23 | L * B21 | s1.code() * B16 |
721 s2.code() * B11 | rc);
722 }
723
724 PPC_X_OPCODE_A_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_A_FORM)
PPC_X_OPCODE_B_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_B_FORM)725 PPC_X_OPCODE_B_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_B_FORM)
726 PPC_X_OPCODE_C_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_C_FORM)
727 PPC_X_OPCODE_D_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_D_FORM)
728 PPC_X_OPCODE_E_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_E_FORM)
729 PPC_X_OPCODE_F_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_F_FORM)
730 PPC_X_OPCODE_EH_S_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_EH_S_FORM)
731 PPC_X_OPCODE_EH_L_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_EH_L_FORM)
732
733 inline void notx(Register dst, Register src, RCBit rc = LeaveRC) {
734 nor(dst, src, src, rc);
735 }
lwax(Register rt,const MemOperand & src)736 inline void lwax(Register rt, const MemOperand& src) {
737 #if V8_TARGET_ARCH_PPC64
738 Register ra = src.ra();
739 Register rb = src.rb();
740 DCHECK(ra != r0);
741 x_form(LWAX, rt, ra, rb, LeaveRC);
742 #else
743 lwzx(rt, src);
744 #endif
745 }
746 inline void extsw(Register rs, Register ra, RCBit rc = LeaveRC) {
747 #if V8_TARGET_ARCH_PPC64
748 emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc);
749 #else
750 // nop on 32-bit
751 DCHECK(rs == ra && rc == LeaveRC);
752 #endif
753 }
754
755 #undef DECLARE_PPC_X_INSTRUCTIONS_A_FORM
756 #undef DECLARE_PPC_X_INSTRUCTIONS_B_FORM
757 #undef DECLARE_PPC_X_INSTRUCTIONS_C_FORM
758 #undef DECLARE_PPC_X_INSTRUCTIONS_D_FORM
759 #undef DECLARE_PPC_X_INSTRUCTIONS_E_FORM
760 #undef DECLARE_PPC_X_INSTRUCTIONS_F_FORM
761 #undef DECLARE_PPC_X_INSTRUCTIONS_EH_S_FORM
762 #undef DECLARE_PPC_X_INSTRUCTIONS_EH_L_FORM
763
764 #define DECLARE_PPC_XX3_INSTRUCTIONS(name, instr_name, instr_value) \
765 inline void name(const DoubleRegister rt, const DoubleRegister ra, \
766 const DoubleRegister rb) { \
767 xx3_form(instr_name, rt, ra, rb); \
768 }
769
xx3_form(Instr instr,DoubleRegister t,DoubleRegister a,DoubleRegister b)770 inline void xx3_form(Instr instr, DoubleRegister t, DoubleRegister a,
771 DoubleRegister b) {
772 int AX = ((a.code() & 0x20) >> 5) & 0x1;
773 int BX = ((b.code() & 0x20) >> 5) & 0x1;
774 int TX = ((t.code() & 0x20) >> 5) & 0x1;
775
776 emit(instr | (t.code() & 0x1F) * B21 | (a.code() & 0x1F) * B16 |
777 (b.code() & 0x1F) * B11 | AX * B2 | BX * B1 | TX);
778 }
779
780 PPC_XX3_OPCODE_LIST(DECLARE_PPC_XX3_INSTRUCTIONS)
781 #undef DECLARE_PPC_XX3_INSTRUCTIONS
782
783 // ---------------------------------------------------------------------------
784 // Code generation
785
786 // Insert the smallest number of nop instructions
787 // possible to align the pc offset to a multiple
788 // of m. m must be a power of 2 (>= 4).
789 void Align(int m);
790 // Insert the smallest number of zero bytes possible to align the pc offset
791 // to a mulitple of m. m must be a power of 2 (>= 2).
792 void DataAlign(int m);
793 // Aligns code to something that's optimal for a jump target for the platform.
794 void CodeTargetAlign();
795
796 // Branch instructions
797 void bclr(BOfield bo, int condition_bit, LKBit lk);
798 void blr();
799 void bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk = LeaveLK);
800 void b(int branch_offset, LKBit lk);
801
802 void bcctr(BOfield bo, int condition_bit, LKBit lk);
803 void bctr();
804 void bctrl();
805
806 // Convenience branch instructions using labels
807 void b(Label* L, LKBit lk = LeaveLK) { b(branch_offset(L), lk); }
808
cmpi_optimization(CRegister cr)809 inline CRegister cmpi_optimization(CRegister cr) {
810 // Check whether the branch is preceded by an optimizable cmpi against 0.
811 // The cmpi can be deleted if it is also preceded by an instruction that
812 // sets the register used by the compare and supports a dot form.
813 unsigned int sradi_mask = kOpcodeMask | kExt2OpcodeVariant2Mask;
814 unsigned int srawi_mask = kOpcodeMask | kExt2OpcodeMask;
815 int pos = pc_offset();
816 int cmpi_pos = pc_offset() - kInstrSize;
817
818 if (cmpi_pos > 0 && optimizable_cmpi_pos_ == cmpi_pos &&
819 cmpi_cr_.code() == cr.code() && last_bound_pos_ != pos) {
820 int xpos = cmpi_pos - kInstrSize;
821 int xinstr = instr_at(xpos);
822 int cmpi_ra = (instr_at(cmpi_pos) & 0x1f0000) >> 16;
823 // ra is at the same bit position for the three cases below.
824 int ra = (xinstr & 0x1f0000) >> 16;
825 if (cmpi_ra == ra) {
826 if ((xinstr & sradi_mask) == (EXT2 | SRADIX)) {
827 cr = cr0;
828 instr_at_put(xpos, xinstr | SetRC);
829 pc_ -= kInstrSize;
830 } else if ((xinstr & srawi_mask) == (EXT2 | SRAWIX)) {
831 cr = cr0;
832 instr_at_put(xpos, xinstr | SetRC);
833 pc_ -= kInstrSize;
834 } else if ((xinstr & kOpcodeMask) == ANDIx) {
835 cr = cr0;
836 pc_ -= kInstrSize;
837 // nothing to do here since andi. records.
838 }
839 // didn't match one of the above, must keep cmpwi.
840 }
841 }
842 return cr;
843 }
844
845 void bc_short(Condition cond, Label* L, CRegister cr = cr7,
846 LKBit lk = LeaveLK) {
847 DCHECK(cond != al);
848 DCHECK(cr.code() >= 0 && cr.code() <= 7);
849
850 cr = cmpi_optimization(cr);
851
852 int b_offset = branch_offset(L);
853
854 switch (cond) {
855 case eq:
856 bc(b_offset, BT, encode_crbit(cr, CR_EQ), lk);
857 break;
858 case ne:
859 bc(b_offset, BF, encode_crbit(cr, CR_EQ), lk);
860 break;
861 case gt:
862 bc(b_offset, BT, encode_crbit(cr, CR_GT), lk);
863 break;
864 case le:
865 bc(b_offset, BF, encode_crbit(cr, CR_GT), lk);
866 break;
867 case lt:
868 bc(b_offset, BT, encode_crbit(cr, CR_LT), lk);
869 break;
870 case ge:
871 bc(b_offset, BF, encode_crbit(cr, CR_LT), lk);
872 break;
873 case unordered:
874 bc(b_offset, BT, encode_crbit(cr, CR_FU), lk);
875 break;
876 case ordered:
877 bc(b_offset, BF, encode_crbit(cr, CR_FU), lk);
878 break;
879 case overflow:
880 bc(b_offset, BT, encode_crbit(cr, CR_SO), lk);
881 break;
882 case nooverflow:
883 bc(b_offset, BF, encode_crbit(cr, CR_SO), lk);
884 break;
885 default:
886 UNIMPLEMENTED();
887 }
888 }
889
890 void bclr(Condition cond, CRegister cr = cr7, LKBit lk = LeaveLK) {
891 DCHECK(cond != al);
892 DCHECK(cr.code() >= 0 && cr.code() <= 7);
893
894 cr = cmpi_optimization(cr);
895
896 switch (cond) {
897 case eq:
898 bclr(BT, encode_crbit(cr, CR_EQ), lk);
899 break;
900 case ne:
901 bclr(BF, encode_crbit(cr, CR_EQ), lk);
902 break;
903 case gt:
904 bclr(BT, encode_crbit(cr, CR_GT), lk);
905 break;
906 case le:
907 bclr(BF, encode_crbit(cr, CR_GT), lk);
908 break;
909 case lt:
910 bclr(BT, encode_crbit(cr, CR_LT), lk);
911 break;
912 case ge:
913 bclr(BF, encode_crbit(cr, CR_LT), lk);
914 break;
915 case unordered:
916 bclr(BT, encode_crbit(cr, CR_FU), lk);
917 break;
918 case ordered:
919 bclr(BF, encode_crbit(cr, CR_FU), lk);
920 break;
921 case overflow:
922 bclr(BT, encode_crbit(cr, CR_SO), lk);
923 break;
924 case nooverflow:
925 bclr(BF, encode_crbit(cr, CR_SO), lk);
926 break;
927 default:
928 UNIMPLEMENTED();
929 }
930 }
931
932 void isel(Register rt, Register ra, Register rb, int cb);
933 void isel(Condition cond, Register rt, Register ra, Register rb,
934 CRegister cr = cr7) {
935 DCHECK(cond != al);
936 DCHECK(cr.code() >= 0 && cr.code() <= 7);
937
938 cr = cmpi_optimization(cr);
939
940 switch (cond) {
941 case eq:
942 isel(rt, ra, rb, encode_crbit(cr, CR_EQ));
943 break;
944 case ne:
945 isel(rt, rb, ra, encode_crbit(cr, CR_EQ));
946 break;
947 case gt:
948 isel(rt, ra, rb, encode_crbit(cr, CR_GT));
949 break;
950 case le:
951 isel(rt, rb, ra, encode_crbit(cr, CR_GT));
952 break;
953 case lt:
954 isel(rt, ra, rb, encode_crbit(cr, CR_LT));
955 break;
956 case ge:
957 isel(rt, rb, ra, encode_crbit(cr, CR_LT));
958 break;
959 case unordered:
960 isel(rt, ra, rb, encode_crbit(cr, CR_FU));
961 break;
962 case ordered:
963 isel(rt, rb, ra, encode_crbit(cr, CR_FU));
964 break;
965 case overflow:
966 isel(rt, ra, rb, encode_crbit(cr, CR_SO));
967 break;
968 case nooverflow:
969 isel(rt, rb, ra, encode_crbit(cr, CR_SO));
970 break;
971 default:
972 UNIMPLEMENTED();
973 }
974 }
975
976 void b(Condition cond, Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
977 if (cond == al) {
978 b(L, lk);
979 return;
980 }
981
982 if ((L->is_bound() && is_near(L, cond)) || !is_trampoline_emitted()) {
983 bc_short(cond, L, cr, lk);
984 return;
985 }
986
987 Label skip;
988 Condition neg_cond = NegateCondition(cond);
989 bc_short(neg_cond, &skip, cr);
990 b(L, lk);
991 bind(&skip);
992 }
993
994 void bne(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
995 b(ne, L, cr, lk);
996 }
997 void beq(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
998 b(eq, L, cr, lk);
999 }
1000 void blt(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
1001 b(lt, L, cr, lk);
1002 }
1003 void bge(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
1004 b(ge, L, cr, lk);
1005 }
1006 void ble(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
1007 b(le, L, cr, lk);
1008 }
1009 void bgt(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
1010 b(gt, L, cr, lk);
1011 }
1012 void bunordered(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
1013 b(unordered, L, cr, lk);
1014 }
1015 void bordered(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
1016 b(ordered, L, cr, lk);
1017 }
1018 void boverflow(Label* L, CRegister cr = cr0, LKBit lk = LeaveLK) {
1019 b(overflow, L, cr, lk);
1020 }
1021 void bnooverflow(Label* L, CRegister cr = cr0, LKBit lk = LeaveLK) {
1022 b(nooverflow, L, cr, lk);
1023 }
1024
1025 // Decrement CTR; branch if CTR != 0
1026 void bdnz(Label* L, LKBit lk = LeaveLK) {
1027 bc(branch_offset(L), DCBNZ, 0, lk);
1028 }
1029
1030 // Data-processing instructions
1031
1032 void sub(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
1033 RCBit r = LeaveRC);
1034
1035 void subc(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
1036 RCBit r = LeaveRC);
1037 void sube(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
1038 RCBit r = LeaveRC);
1039
1040 void subfic(Register dst, Register src, const Operand& imm);
1041
1042 void add(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
1043 RCBit r = LeaveRC);
1044
1045 void addc(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
1046 RCBit r = LeaveRC);
1047 void adde(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
1048 RCBit r = LeaveRC);
1049 void addze(Register dst, Register src1, OEBit o = LeaveOE, RCBit r = LeaveRC);
1050
1051 void mullw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
1052 RCBit r = LeaveRC);
1053
1054 void mulhw(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
1055 void mulhwu(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
1056
1057 void divw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
1058 RCBit r = LeaveRC);
1059 void divwu(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
1060 RCBit r = LeaveRC);
1061
1062 void addi(Register dst, Register src, const Operand& imm);
1063 void addis(Register dst, Register src, const Operand& imm);
1064 void addic(Register dst, Register src, const Operand& imm);
1065
1066 void andi(Register ra, Register rs, const Operand& imm);
1067 void andis(Register ra, Register rs, const Operand& imm);
1068 void ori(Register dst, Register src, const Operand& imm);
1069 void oris(Register dst, Register src, const Operand& imm);
1070 void xori(Register dst, Register src, const Operand& imm);
1071 void xoris(Register ra, Register rs, const Operand& imm);
1072 void cmpi(Register src1, const Operand& src2, CRegister cr = cr7);
1073 void cmpli(Register src1, const Operand& src2, CRegister cr = cr7);
1074 void cmpwi(Register src1, const Operand& src2, CRegister cr = cr7);
1075 void cmplwi(Register src1, const Operand& src2, CRegister cr = cr7);
1076 void li(Register dst, const Operand& src);
1077 void lis(Register dst, const Operand& imm);
1078 void mr(Register dst, Register src);
1079
1080 void lbz(Register dst, const MemOperand& src);
1081 void lhz(Register dst, const MemOperand& src);
1082 void lha(Register dst, const MemOperand& src);
1083 void lwz(Register dst, const MemOperand& src);
1084 void lwzu(Register dst, const MemOperand& src);
1085 void lwa(Register dst, const MemOperand& src);
1086 void stb(Register dst, const MemOperand& src);
1087 void sth(Register dst, const MemOperand& src);
1088 void stw(Register dst, const MemOperand& src);
1089 void stwu(Register dst, const MemOperand& src);
1090 void neg(Register rt, Register ra, OEBit o = LeaveOE, RCBit c = LeaveRC);
1091
1092 #if V8_TARGET_ARCH_PPC64
1093 void ld(Register rd, const MemOperand& src);
1094 void ldu(Register rd, const MemOperand& src);
1095 void std(Register rs, const MemOperand& src);
1096 void stdu(Register rs, const MemOperand& src);
1097 void rldic(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC);
1098 void rldicl(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC);
1099 void rldcl(Register ra, Register rs, Register rb, int mb, RCBit r = LeaveRC);
1100 void rldicr(Register dst, Register src, int sh, int me, RCBit r = LeaveRC);
1101 void rldimi(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC);
1102 void sldi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC);
1103 void srdi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC);
1104 void clrrdi(Register dst, Register src, const Operand& val,
1105 RCBit rc = LeaveRC);
1106 void clrldi(Register dst, Register src, const Operand& val,
1107 RCBit rc = LeaveRC);
1108 void sradi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
1109 void rotld(Register ra, Register rs, Register rb, RCBit r = LeaveRC);
1110 void rotldi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
1111 void rotrdi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
1112 void mulld(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
1113 RCBit r = LeaveRC);
1114 void divd(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
1115 RCBit r = LeaveRC);
1116 void divdu(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
1117 RCBit r = LeaveRC);
1118 #endif
1119
1120 void rlwinm(Register ra, Register rs, int sh, int mb, int me,
1121 RCBit rc = LeaveRC);
1122 void rlwimi(Register ra, Register rs, int sh, int mb, int me,
1123 RCBit rc = LeaveRC);
1124 void rlwnm(Register ra, Register rs, Register rb, int mb, int me,
1125 RCBit rc = LeaveRC);
1126 void slwi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC);
1127 void srwi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC);
1128 void clrrwi(Register dst, Register src, const Operand& val,
1129 RCBit rc = LeaveRC);
1130 void clrlwi(Register dst, Register src, const Operand& val,
1131 RCBit rc = LeaveRC);
1132 void rotlw(Register ra, Register rs, Register rb, RCBit r = LeaveRC);
1133 void rotlwi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
1134 void rotrwi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
1135
1136 void subi(Register dst, Register src1, const Operand& src2);
1137
1138 void mov(Register dst, const Operand& src);
1139 void bitwise_mov(Register dst, intptr_t value);
1140 void bitwise_mov32(Register dst, int32_t value);
1141 void bitwise_add32(Register dst, Register src, int32_t value);
1142
1143 // Load the position of the label relative to the generated code object
1144 // pointer in a register.
1145 void mov_label_offset(Register dst, Label* label);
1146
1147 // dst = base + label position + delta
1148 void add_label_offset(Register dst, Register base, Label* label,
1149 int delta = 0);
1150
1151 // Load the address of the label in a register and associate with an
1152 // internal reference relocation.
1153 void mov_label_addr(Register dst, Label* label);
1154
1155 // Emit the address of the label (i.e. a jump table entry) and associate with
1156 // an internal reference relocation.
1157 void emit_label_addr(Label* label);
1158
1159 // Multiply instructions
1160 void mul(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
1161 RCBit r = LeaveRC);
1162
1163 // Miscellaneous arithmetic instructions
1164
1165 // Special register access
1166 void crxor(int bt, int ba, int bb);
crclr(int bt)1167 void crclr(int bt) { crxor(bt, bt, bt); }
1168 void creqv(int bt, int ba, int bb);
crset(int bt)1169 void crset(int bt) { creqv(bt, bt, bt); }
1170 void mflr(Register dst);
1171 void mtlr(Register src);
1172 void mtctr(Register src);
1173 void mtxer(Register src);
1174 void mcrfs(CRegister cr, FPSCRBit bit);
1175 void mfcr(Register dst);
1176 #if V8_TARGET_ARCH_PPC64
1177 void mffprd(Register dst, DoubleRegister src);
1178 void mffprwz(Register dst, DoubleRegister src);
1179 void mtfprd(DoubleRegister dst, Register src);
1180 void mtfprwz(DoubleRegister dst, Register src);
1181 void mtfprwa(DoubleRegister dst, Register src);
1182 #endif
1183
1184 void function_descriptor();
1185
1186 // Exception-generating instructions and debugging support
1187 void stop(const char* msg, Condition cond = al,
1188 int32_t code = kDefaultStopCode, CRegister cr = cr7);
1189
1190 void bkpt(uint32_t imm16); // v5 and above
1191
1192 void dcbf(Register ra, Register rb);
1193 void sync();
1194 void lwsync();
1195 void icbi(Register ra, Register rb);
1196 void isync();
1197
1198 // Support for floating point
1199 void lfd(const DoubleRegister frt, const MemOperand& src);
1200 void lfdu(const DoubleRegister frt, const MemOperand& src);
1201 void lfs(const DoubleRegister frt, const MemOperand& src);
1202 void lfsu(const DoubleRegister frt, const MemOperand& src);
1203 void stfd(const DoubleRegister frs, const MemOperand& src);
1204 void stfdu(const DoubleRegister frs, const MemOperand& src);
1205 void stfs(const DoubleRegister frs, const MemOperand& src);
1206 void stfsu(const DoubleRegister frs, const MemOperand& src);
1207
1208 void fadd(const DoubleRegister frt, const DoubleRegister fra,
1209 const DoubleRegister frb, RCBit rc = LeaveRC);
1210 void fsub(const DoubleRegister frt, const DoubleRegister fra,
1211 const DoubleRegister frb, RCBit rc = LeaveRC);
1212 void fdiv(const DoubleRegister frt, const DoubleRegister fra,
1213 const DoubleRegister frb, RCBit rc = LeaveRC);
1214 void fmul(const DoubleRegister frt, const DoubleRegister fra,
1215 const DoubleRegister frc, RCBit rc = LeaveRC);
1216 void fcmpu(const DoubleRegister fra, const DoubleRegister frb,
1217 CRegister cr = cr7);
1218 void fmr(const DoubleRegister frt, const DoubleRegister frb,
1219 RCBit rc = LeaveRC);
1220 void fctiwz(const DoubleRegister frt, const DoubleRegister frb);
1221 void fctiw(const DoubleRegister frt, const DoubleRegister frb);
1222 void frin(const DoubleRegister frt, const DoubleRegister frb,
1223 RCBit rc = LeaveRC);
1224 void friz(const DoubleRegister frt, const DoubleRegister frb,
1225 RCBit rc = LeaveRC);
1226 void frip(const DoubleRegister frt, const DoubleRegister frb,
1227 RCBit rc = LeaveRC);
1228 void frim(const DoubleRegister frt, const DoubleRegister frb,
1229 RCBit rc = LeaveRC);
1230 void frsp(const DoubleRegister frt, const DoubleRegister frb,
1231 RCBit rc = LeaveRC);
1232 void fcfid(const DoubleRegister frt, const DoubleRegister frb,
1233 RCBit rc = LeaveRC);
1234 void fcfidu(const DoubleRegister frt, const DoubleRegister frb,
1235 RCBit rc = LeaveRC);
1236 void fcfidus(const DoubleRegister frt, const DoubleRegister frb,
1237 RCBit rc = LeaveRC);
1238 void fcfids(const DoubleRegister frt, const DoubleRegister frb,
1239 RCBit rc = LeaveRC);
1240 void fctid(const DoubleRegister frt, const DoubleRegister frb,
1241 RCBit rc = LeaveRC);
1242 void fctidz(const DoubleRegister frt, const DoubleRegister frb,
1243 RCBit rc = LeaveRC);
1244 void fctidu(const DoubleRegister frt, const DoubleRegister frb,
1245 RCBit rc = LeaveRC);
1246 void fctiduz(const DoubleRegister frt, const DoubleRegister frb,
1247 RCBit rc = LeaveRC);
1248 void fsel(const DoubleRegister frt, const DoubleRegister fra,
1249 const DoubleRegister frc, const DoubleRegister frb,
1250 RCBit rc = LeaveRC);
1251 void fneg(const DoubleRegister frt, const DoubleRegister frb,
1252 RCBit rc = LeaveRC);
1253 void mtfsb0(FPSCRBit bit, RCBit rc = LeaveRC);
1254 void mtfsb1(FPSCRBit bit, RCBit rc = LeaveRC);
1255 void mtfsfi(int bf, int immediate, RCBit rc = LeaveRC);
1256 void mffs(const DoubleRegister frt, RCBit rc = LeaveRC);
1257 void mtfsf(const DoubleRegister frb, bool L = 1, int FLM = 0, bool W = 0,
1258 RCBit rc = LeaveRC);
1259 void fsqrt(const DoubleRegister frt, const DoubleRegister frb,
1260 RCBit rc = LeaveRC);
1261 void fabs(const DoubleRegister frt, const DoubleRegister frb,
1262 RCBit rc = LeaveRC);
1263 void fmadd(const DoubleRegister frt, const DoubleRegister fra,
1264 const DoubleRegister frc, const DoubleRegister frb,
1265 RCBit rc = LeaveRC);
1266 void fmsub(const DoubleRegister frt, const DoubleRegister fra,
1267 const DoubleRegister frc, const DoubleRegister frb,
1268 RCBit rc = LeaveRC);
1269
1270 // Pseudo instructions
1271
1272 // Different nop operations are used by the code generator to detect certain
1273 // states of the generated code.
1274 enum NopMarkerTypes {
1275 NON_MARKING_NOP = 0,
1276 GROUP_ENDING_NOP,
1277 DEBUG_BREAK_NOP,
1278 // IC markers.
1279 PROPERTY_ACCESS_INLINED,
1280 PROPERTY_ACCESS_INLINED_CONTEXT,
1281 PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
1282 // Helper values.
1283 LAST_CODE_MARKER,
1284 FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
1285 };
1286
1287 void nop(int type = 0); // 0 is the default non-marking type.
1288
push(Register src)1289 void push(Register src) {
1290 #if V8_TARGET_ARCH_PPC64
1291 stdu(src, MemOperand(sp, -kPointerSize));
1292 #else
1293 stwu(src, MemOperand(sp, -kPointerSize));
1294 #endif
1295 }
1296
pop(Register dst)1297 void pop(Register dst) {
1298 #if V8_TARGET_ARCH_PPC64
1299 ld(dst, MemOperand(sp));
1300 #else
1301 lwz(dst, MemOperand(sp));
1302 #endif
1303 addi(sp, sp, Operand(kPointerSize));
1304 }
1305
pop()1306 void pop() { addi(sp, sp, Operand(kPointerSize)); }
1307
1308 // Jump unconditionally to given label.
jmp(Label * L)1309 void jmp(Label* L) { b(L); }
1310
1311 // Check the code size generated from label to here.
SizeOfCodeGeneratedSince(Label * label)1312 int SizeOfCodeGeneratedSince(Label* label) {
1313 return pc_offset() - label->pos();
1314 }
1315
1316 // Check the number of instructions generated from label to here.
InstructionsGeneratedSince(Label * label)1317 int InstructionsGeneratedSince(Label* label) {
1318 return SizeOfCodeGeneratedSince(label) / kInstrSize;
1319 }
1320
1321 // Class for scoping postponing the trampoline pool generation.
1322 class BlockTrampolinePoolScope {
1323 public:
BlockTrampolinePoolScope(Assembler * assem)1324 explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
1325 assem_->StartBlockTrampolinePool();
1326 }
~BlockTrampolinePoolScope()1327 ~BlockTrampolinePoolScope() { assem_->EndBlockTrampolinePool(); }
1328
1329 private:
1330 Assembler* assem_;
1331
1332 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
1333 };
1334
1335 // Class for scoping disabling constant pool entry merging
1336 class BlockConstantPoolEntrySharingScope {
1337 public:
BlockConstantPoolEntrySharingScope(Assembler * assem)1338 explicit BlockConstantPoolEntrySharingScope(Assembler* assem)
1339 : assem_(assem) {
1340 assem_->StartBlockConstantPoolEntrySharing();
1341 }
~BlockConstantPoolEntrySharingScope()1342 ~BlockConstantPoolEntrySharingScope() {
1343 assem_->EndBlockConstantPoolEntrySharing();
1344 }
1345
1346 private:
1347 Assembler* assem_;
1348
1349 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstantPoolEntrySharingScope);
1350 };
1351
1352 // Record a comment relocation entry that can be used by a disassembler.
1353 // Use --code-comments to enable.
1354 void RecordComment(const char* msg);
1355
1356 // Record a deoptimization reason that can be used by a log or cpu profiler.
1357 // Use --trace-deopt to enable.
1358 void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
1359 int id);
1360
1361 // Writes a single byte or word of data in the code stream. Used
1362 // for inline tables, e.g., jump-tables.
1363 void db(uint8_t data);
1364 void dd(uint32_t data);
1365 void dq(uint64_t data);
1366 void dp(uintptr_t data);
1367
1368 // Read/patch instructions
instr_at(int pos)1369 Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
instr_at_put(int pos,Instr instr)1370 void instr_at_put(int pos, Instr instr) {
1371 *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
1372 }
instr_at(Address pc)1373 static Instr instr_at(Address pc) { return *reinterpret_cast<Instr*>(pc); }
instr_at_put(Address pc,Instr instr)1374 static void instr_at_put(Address pc, Instr instr) {
1375 *reinterpret_cast<Instr*>(pc) = instr;
1376 }
1377 static Condition GetCondition(Instr instr);
1378
1379 static bool IsLis(Instr instr);
1380 static bool IsLi(Instr instr);
1381 static bool IsAddic(Instr instr);
1382 static bool IsOri(Instr instr);
1383
1384 static bool IsBranch(Instr instr);
1385 static Register GetRA(Instr instr);
1386 static Register GetRB(Instr instr);
1387 #if V8_TARGET_ARCH_PPC64
1388 static bool Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
1389 Instr instr4, Instr instr5);
1390 #else
1391 static bool Is32BitLoadIntoR12(Instr instr1, Instr instr2);
1392 #endif
1393
1394 static bool IsCmpRegister(Instr instr);
1395 static bool IsCmpImmediate(Instr instr);
1396 static bool IsRlwinm(Instr instr);
1397 static bool IsAndi(Instr instr);
1398 #if V8_TARGET_ARCH_PPC64
1399 static bool IsRldicl(Instr instr);
1400 #endif
1401 static bool IsCrSet(Instr instr);
1402 static Register GetCmpImmediateRegister(Instr instr);
1403 static int GetCmpImmediateRawImmediate(Instr instr);
1404 static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
1405
1406 // Postpone the generation of the trampoline pool for the specified number of
1407 // instructions.
1408 void BlockTrampolinePoolFor(int instructions);
1409 void CheckTrampolinePool();
1410
1411 // For mov. Return the number of actual instructions required to
1412 // load the operand into a register. This can be anywhere from
1413 // one (constant pool small section) to five instructions (full
1414 // 64-bit sequence).
1415 //
1416 // The value returned is only valid as long as no entries are added to the
1417 // constant pool between this call and the actual instruction being emitted.
1418 int instructions_required_for_mov(Register dst, const Operand& src) const;
1419
1420 // Decide between using the constant pool vs. a mov immediate sequence.
1421 bool use_constant_pool_for_mov(Register dst, const Operand& src,
1422 bool canOptimize) const;
1423
1424 // The code currently calls CheckBuffer() too often. This has the side
1425 // effect of randomly growing the buffer in the middle of multi-instruction
1426 // sequences.
1427 //
1428 // This function allows outside callers to check and grow the buffer
1429 void EnsureSpaceFor(int space_needed);
1430
EmitConstantPool()1431 int EmitConstantPool() { return constant_pool_builder_.Emit(this); }
1432
ConstantPoolAccessIsInOverflow()1433 bool ConstantPoolAccessIsInOverflow() const {
1434 return constant_pool_builder_.NextAccess(ConstantPoolEntry::INTPTR) ==
1435 ConstantPoolEntry::OVERFLOWED;
1436 }
1437
ConstantPoolPosition()1438 Label* ConstantPoolPosition() {
1439 return constant_pool_builder_.EmittedPosition();
1440 }
1441
1442 void EmitRelocations();
1443
1444 protected:
buffer_space()1445 int buffer_space() const { return reloc_info_writer.pos() - pc_; }
1446
1447 // Decode instruction(s) at pos and return backchain to previous
1448 // label reference or kEndOfChain.
1449 int target_at(int pos);
1450
1451 // Patch instruction(s) at pos to target target_pos (e.g. branch)
1452 void target_at_put(int pos, int target_pos, bool* is_branch = nullptr);
1453
1454 // Record reloc info for current pc_
1455 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
ConstantPoolAddEntry(RelocInfo::Mode rmode,intptr_t value)1456 ConstantPoolEntry::Access ConstantPoolAddEntry(RelocInfo::Mode rmode,
1457 intptr_t value) {
1458 bool sharing_ok = RelocInfo::IsNone(rmode) ||
1459 !(serializer_enabled() ||
1460 rmode < RelocInfo::FIRST_SHAREABLE_RELOC_MODE ||
1461 is_constant_pool_entry_sharing_blocked());
1462 return constant_pool_builder_.AddEntry(pc_offset(), value, sharing_ok);
1463 }
ConstantPoolAddEntry(Double value)1464 ConstantPoolEntry::Access ConstantPoolAddEntry(Double value) {
1465 return constant_pool_builder_.AddEntry(pc_offset(), value);
1466 }
1467
1468 // Block the emission of the trampoline pool before pc_offset.
BlockTrampolinePoolBefore(int pc_offset)1469 void BlockTrampolinePoolBefore(int pc_offset) {
1470 if (no_trampoline_pool_before_ < pc_offset)
1471 no_trampoline_pool_before_ = pc_offset;
1472 }
1473
StartBlockTrampolinePool()1474 void StartBlockTrampolinePool() { trampoline_pool_blocked_nesting_++; }
EndBlockTrampolinePool()1475 void EndBlockTrampolinePool() {
1476 int count = --trampoline_pool_blocked_nesting_;
1477 if (count == 0) CheckTrampolinePoolQuick();
1478 }
is_trampoline_pool_blocked()1479 bool is_trampoline_pool_blocked() const {
1480 return trampoline_pool_blocked_nesting_ > 0;
1481 }
1482
StartBlockConstantPoolEntrySharing()1483 void StartBlockConstantPoolEntrySharing() {
1484 constant_pool_entry_sharing_blocked_nesting_++;
1485 }
EndBlockConstantPoolEntrySharing()1486 void EndBlockConstantPoolEntrySharing() {
1487 constant_pool_entry_sharing_blocked_nesting_--;
1488 }
is_constant_pool_entry_sharing_blocked()1489 bool is_constant_pool_entry_sharing_blocked() const {
1490 return constant_pool_entry_sharing_blocked_nesting_ > 0;
1491 }
1492
has_exception()1493 bool has_exception() const { return internal_trampoline_exception_; }
1494
is_trampoline_emitted()1495 bool is_trampoline_emitted() const { return trampoline_emitted_; }
1496
1497 // Code generation
1498 // The relocation writer's position is at least kGap bytes below the end of
1499 // the generated instructions. This is so that multi-instruction sequences do
1500 // not have to check for overflow. The same is true for writes of large
1501 // relocation info entries.
1502 static constexpr int kGap = 32;
1503
1504 RelocInfoWriter reloc_info_writer;
1505
1506 private:
1507 // Avoid overflows for displacements etc.
1508 static const int kMaximalBufferSize = 512 * MB;
1509
1510 // Repeated checking whether the trampoline pool should be emitted is rather
1511 // expensive. By default we only check again once a number of instructions
1512 // has been generated.
1513 int next_trampoline_check_; // pc offset of next buffer check.
1514
1515 // Emission of the trampoline pool may be blocked in some code sequences.
1516 int trampoline_pool_blocked_nesting_; // Block emission if this is not zero.
1517 int no_trampoline_pool_before_; // Block emission before this pc offset.
1518
1519 // Do not share constant pool entries.
1520 int constant_pool_entry_sharing_blocked_nesting_;
1521
1522 // Relocation info generation
1523 // Each relocation is encoded as a variable size value
1524 static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
1525 std::vector<DeferredRelocInfo> relocations_;
1526
1527 // The bound position, before this we cannot do instruction elimination.
1528 int last_bound_pos_;
1529 // Optimizable cmpi information.
1530 int optimizable_cmpi_pos_;
1531 CRegister cmpi_cr_ = CRegister::no_reg();
1532
1533 ConstantPoolBuilder constant_pool_builder_;
1534
CheckBuffer()1535 void CheckBuffer() {
1536 if (buffer_space() <= kGap) {
1537 GrowBuffer();
1538 }
1539 }
1540
1541 void GrowBuffer(int needed = 0);
1542 // Code emission
emit(Instr x)1543 void emit(Instr x) {
1544 CheckBuffer();
1545 *reinterpret_cast<Instr*>(pc_) = x;
1546 pc_ += kInstrSize;
1547 CheckTrampolinePoolQuick();
1548 }
TrackBranch()1549 void TrackBranch() {
1550 DCHECK(!trampoline_emitted_);
1551 int count = tracked_branch_count_++;
1552 if (count == 0) {
1553 // We leave space (kMaxBlockTrampolineSectionSize)
1554 // for BlockTrampolinePoolScope buffer.
1555 next_trampoline_check_ =
1556 pc_offset() + kMaxCondBranchReach - kMaxBlockTrampolineSectionSize;
1557 } else {
1558 next_trampoline_check_ -= kTrampolineSlotsSize;
1559 }
1560 }
1561
1562 inline void UntrackBranch();
CheckTrampolinePoolQuick()1563 void CheckTrampolinePoolQuick() {
1564 if (pc_offset() >= next_trampoline_check_) {
1565 CheckTrampolinePool();
1566 }
1567 }
1568
1569 // Instruction generation
1570 void a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
1571 DoubleRegister frb, RCBit r);
1572 void d_form(Instr instr, Register rt, Register ra, const intptr_t val,
1573 bool signed_disp);
1574 void xo_form(Instr instr, Register rt, Register ra, Register rb, OEBit o,
1575 RCBit r);
1576 void md_form(Instr instr, Register ra, Register rs, int shift, int maskbit,
1577 RCBit r);
1578 void mds_form(Instr instr, Register ra, Register rs, Register rb, int maskbit,
1579 RCBit r);
1580
1581 // Labels
1582 void print(Label* L);
1583 int max_reach_from(int pos);
1584 void bind_to(Label* L, int pos);
1585 void next(Label* L);
1586
1587 class Trampoline {
1588 public:
Trampoline()1589 Trampoline() {
1590 next_slot_ = 0;
1591 free_slot_count_ = 0;
1592 }
Trampoline(int start,int slot_count)1593 Trampoline(int start, int slot_count) {
1594 next_slot_ = start;
1595 free_slot_count_ = slot_count;
1596 }
take_slot()1597 int take_slot() {
1598 int trampoline_slot = kInvalidSlotPos;
1599 if (free_slot_count_ <= 0) {
1600 // We have run out of space on trampolines.
1601 // Make sure we fail in debug mode, so we become aware of each case
1602 // when this happens.
1603 DCHECK(0);
1604 // Internal exception will be caught.
1605 } else {
1606 trampoline_slot = next_slot_;
1607 free_slot_count_--;
1608 next_slot_ += kTrampolineSlotsSize;
1609 }
1610 return trampoline_slot;
1611 }
1612
1613 private:
1614 int next_slot_;
1615 int free_slot_count_;
1616 };
1617
1618 int32_t get_trampoline_entry();
1619 int tracked_branch_count_;
1620 // If trampoline is emitted, generated code is becoming large. As
1621 // this is already a slow case which can possibly break our code
1622 // generation for the extreme case, we use this information to
1623 // trigger different mode of branch instruction generation, where we
1624 // no longer use a single branch instruction.
1625 bool trampoline_emitted_;
1626 static constexpr int kTrampolineSlotsSize = kInstrSize;
1627 static constexpr int kMaxCondBranchReach = (1 << (16 - 1)) - 1;
1628 static constexpr int kMaxBlockTrampolineSectionSize = 64 * kInstrSize;
1629 static constexpr int kInvalidSlotPos = -1;
1630
1631 Trampoline trampoline_;
1632 bool internal_trampoline_exception_;
1633
1634 friend class RegExpMacroAssemblerPPC;
1635 friend class RelocInfo;
1636 friend class BlockTrampolinePoolScope;
1637 friend class EnsureSpace;
1638
1639 // The following functions help with avoiding allocations of embedded heap
1640 // objects during the code assembly phase. {RequestHeapObject} records the
1641 // need for a future heap number allocation or code stub generation. After
1642 // code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
1643 // objects and place them where they are expected (determined by the pc offset
1644 // associated with each request). That is, for each request, it will patch the
1645 // dummy heap object handle that we emitted during code assembly with the
1646 // actual heap object handle.
1647 void RequestHeapObject(HeapObjectRequest request);
1648 void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
1649
1650 std::forward_list<HeapObjectRequest> heap_object_requests_;
1651 };
1652
1653
1654 class EnsureSpace BASE_EMBEDDED {
1655 public:
EnsureSpace(Assembler * assembler)1656 explicit EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
1657 };
1658
1659 class PatchingAssembler : public Assembler {
1660 public:
1661 PatchingAssembler(IsolateData isolate_data, byte* address, int instructions);
1662 ~PatchingAssembler();
1663 };
1664
1665 } // namespace internal
1666 } // namespace v8
1667
1668 #endif // V8_PPC_ASSEMBLER_PPC_H_
1669