1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_OBJECTS_CODE_H_
6 #define V8_OBJECTS_CODE_H_
7 
8 #include "src/handler-table.h"
9 #include "src/objects.h"
10 #include "src/objects/fixed-array.h"
11 
12 // Has to be the last include (doesn't have include guards):
13 #include "src/objects/object-macros.h"
14 
15 namespace v8 {
16 namespace internal {
17 
18 class ByteArray;
19 class BytecodeArray;
20 class CodeDataContainer;
21 
22 namespace interpreter {
23 class Register;
24 }
25 
26 // Code describes objects with on-the-fly generated machine code.
27 class Code : public HeapObject {
28  public:
29   // Opaque data type for encapsulating code flags like kind, inline
30   // cache state, and arguments count.
31   typedef uint32_t Flags;
32 
33 #define CODE_KIND_LIST(V)   \
34   V(OPTIMIZED_FUNCTION)     \
35   V(BYTECODE_HANDLER)       \
36   V(STUB)                   \
37   V(BUILTIN)                \
38   V(REGEXP)                 \
39   V(WASM_FUNCTION)          \
40   V(WASM_TO_JS_FUNCTION)    \
41   V(JS_TO_WASM_FUNCTION)    \
42   V(WASM_INTERPRETER_ENTRY) \
43   V(C_WASM_ENTRY)
44 
45   enum Kind {
46 #define DEFINE_CODE_KIND_ENUM(name) name,
47     CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
48 #undef DEFINE_CODE_KIND_ENUM
49         NUMBER_OF_KINDS
50   };
51 
52   static const char* Kind2String(Kind kind);
53 
54 #ifdef ENABLE_DISASSEMBLER
55   void Disassemble(const char* name, std::ostream& os,
56                    Address current_pc = kNullAddress);
57 #endif
58 
59   // [instruction_size]: Size of the native instructions, including embedded
60   // data such as the safepoints table.
61   inline int raw_instruction_size() const;
62   inline void set_raw_instruction_size(int value);
63 
64   // Returns the size of the native instructions, including embedded
65   // data such as the safepoints table. For off-heap code objects
66   // this may from instruction_size in that this will return the size of the
67   // off-heap instruction stream rather than the on-heap trampoline located
68   // at instruction_start.
69   inline int InstructionSize() const;
70 #ifdef V8_EMBEDDED_BUILTINS
71   int OffHeapInstructionSize() const;
72 #endif
73 
74   // [relocation_info]: Code relocation information
75   DECL_ACCESSORS(relocation_info, ByteArray)
76   void InvalidateEmbeddedObjects();
77 
78   // [deoptimization_data]: Array containing data for deopt.
79   DECL_ACCESSORS(deoptimization_data, FixedArray)
80 
81   // [source_position_table]: ByteArray for the source positions table or
82   // SourcePositionTableWithFrameCache.
83   DECL_ACCESSORS(source_position_table, Object)
84   inline ByteArray* SourcePositionTable() const;
85 
86   // [code_data_container]: A container indirection for all mutable fields.
87   DECL_ACCESSORS(code_data_container, CodeDataContainer)
88 
89   // [stub_key]: The major/minor key of a code stub.
90   inline uint32_t stub_key() const;
91   inline void set_stub_key(uint32_t key);
92 
93   // [next_code_link]: Link for lists of optimized or deoptimized code.
94   // Note that this field is stored in the {CodeDataContainer} to be mutable.
95   inline Object* next_code_link() const;
96   inline void set_next_code_link(Object* value);
97 
98   // [constant_pool offset]: Offset of the constant pool.
99   // Valid for FLAG_enable_embedded_constant_pool only
100   inline int constant_pool_offset() const;
101   inline void set_constant_pool_offset(int offset);
102 
103   // Unchecked accessors to be used during GC.
104   inline ByteArray* unchecked_relocation_info() const;
105 
106   inline int relocation_size() const;
107 
108   // [kind]: Access to specific code kind.
109   inline Kind kind() const;
110 
111   inline bool is_stub() const;
112   inline bool is_optimized_code() const;
113   inline bool is_wasm_code() const;
114 
115   // Testers for interpreter builtins.
116   inline bool is_interpreter_trampoline_builtin() const;
117 
118   // Tells whether the code checks the optimization marker in the function's
119   // feedback vector.
120   inline bool checks_optimization_marker() const;
121 
122   // Tells whether the outgoing parameters of this code are tagged pointers.
123   inline bool has_tagged_params() const;
124 
125   // [is_turbofanned]: For kind STUB or OPTIMIZED_FUNCTION, tells whether the
126   // code object was generated by the TurboFan optimizing compiler.
127   inline bool is_turbofanned() const;
128 
129   // [can_have_weak_objects]: For kind OPTIMIZED_FUNCTION, tells whether the
130   // embedded objects in code should be treated weakly.
131   inline bool can_have_weak_objects() const;
132   inline void set_can_have_weak_objects(bool value);
133 
134   // [is_construct_stub]: For kind BUILTIN, tells whether the code object
135   // represents a hand-written construct stub
136   // (e.g., NumberConstructor_ConstructStub).
137   inline bool is_construct_stub() const;
138   inline void set_is_construct_stub(bool value);
139 
140   // [builtin_index]: For builtins, tells which builtin index the code object
141   // has. The builtin index is a non-negative integer for builtins, and -1
142   // otherwise.
143   inline int builtin_index() const;
144   inline void set_builtin_index(int id);
145   inline bool is_builtin() const;
146 
147   inline bool has_safepoint_info() const;
148 
149   // [stack_slots]: If {has_safepoint_info()}, the number of stack slots
150   // reserved in the code prologue.
151   inline int stack_slots() const;
152 
153   // [safepoint_table_offset]: If {has_safepoint_info()}, the offset in the
154   // instruction stream where the safepoint table starts.
155   inline int safepoint_table_offset() const;
156   inline void set_safepoint_table_offset(int offset);
157 
158   // [handler_table_offset]: The offset in the instruction stream where the
159   // exception handler table starts.
160   inline int handler_table_offset() const;
161   inline void set_handler_table_offset(int offset);
162 
163   // [marked_for_deoptimization]: For kind OPTIMIZED_FUNCTION tells whether
164   // the code is going to be deoptimized because of dead embedded maps.
165   inline bool marked_for_deoptimization() const;
166   inline void set_marked_for_deoptimization(bool flag);
167 
168   // [deopt_already_counted]: For kind OPTIMIZED_FUNCTION tells whether
169   // the code was already deoptimized.
170   inline bool deopt_already_counted() const;
171   inline void set_deopt_already_counted(bool flag);
172 
173   // [is_promise_rejection]: For kind BUILTIN tells whether the
174   // exception thrown by the code will lead to promise rejection or
175   // uncaught if both this and is_exception_caught is set.
176   // Use GetBuiltinCatchPrediction to access this.
177   inline void set_is_promise_rejection(bool flag);
178 
179   // [is_exception_caught]: For kind BUILTIN tells whether the
180   // exception thrown by the code will be caught internally or
181   // uncaught if both this and is_promise_rejection is set.
182   // Use GetBuiltinCatchPrediction to access this.
183   inline void set_is_exception_caught(bool flag);
184 
185   // [constant_pool]: The constant pool for this function.
186   inline Address constant_pool() const;
187 
188   // Get the safepoint entry for the given pc.
189   SafepointEntry GetSafepointEntry(Address pc);
190 
191   // The entire code object including its header is copied verbatim to the
192   // snapshot so that it can be written in one, fast, memcpy during
193   // deserialization. The deserializer will overwrite some pointers, rather
194   // like a runtime linker, but the random allocation addresses used in the
195   // mksnapshot process would still be present in the unlinked snapshot data,
196   // which would make snapshot production non-reproducible. This method wipes
197   // out the to-be-overwritten header data for reproducible snapshots.
198   inline void WipeOutHeader();
199 
200   // Clear uninitialized padding space. This ensures that the snapshot content
201   // is deterministic.
202   inline void clear_padding();
203   // Initialize the flags field. Similar to clear_padding above this ensure that
204   // the snapshot content is deterministic.
205   inline void initialize_flags(Kind kind, bool has_unwinding_info,
206                                bool is_turbofanned, int stack_slots);
207 
208   // Convert a target address into a code object.
209   static inline Code* GetCodeFromTargetAddress(Address address);
210 
211   // Convert an entry address into an object.
212   static inline Object* GetObjectFromEntryAddress(Address location_of_address);
213 
214   // Convert a code entry into an object.
215   static inline Object* GetObjectFromCodeEntry(Address code_entry);
216 
217   // Returns the address of the first instruction.
218   inline Address raw_instruction_start() const;
219 
220   // Returns the address of the first instruction. For off-heap code objects
221   // this differs from instruction_start (which would point to the off-heap
222   // trampoline instead).
223   inline Address InstructionStart() const;
224 #ifdef V8_EMBEDDED_BUILTINS
225   Address OffHeapInstructionStart() const;
226 #endif
227 
228   // Returns the address right after the last instruction.
229   inline Address raw_instruction_end() const;
230 
231   // Returns the address right after the last instruction. For off-heap code
232   // objects this differs from instruction_end (which would point to the
233   // off-heap trampoline instead).
234   inline Address InstructionEnd() const;
235 #ifdef V8_EMBEDDED_BUILTINS
236   Address OffHeapInstructionEnd() const;
237 #endif
238 
239   // Returns the size of the instructions, padding, relocation and unwinding
240   // information.
241   inline int body_size() const;
242 
243   // Returns the size of code and its metadata. This includes the size of code
244   // relocation information, deoptimization data and handler table.
245   inline int SizeIncludingMetadata() const;
246 
247   // Returns the address of the first relocation info (read backwards!).
248   inline byte* relocation_start() const;
249 
250   // Returns the address right after the relocation info (read backwards!).
251   inline byte* relocation_end() const;
252 
253   // [has_unwinding_info]: Whether this code object has unwinding information.
254   // If it doesn't, unwinding_information_start() will point to invalid data.
255   //
256   // The body of all code objects has the following layout.
257   //
258   //  +--------------------------+  <-- raw_instruction_start()
259   //  |       instructions       |
260   //  |           ...            |
261   //  +--------------------------+
262   //  |      relocation info     |
263   //  |           ...            |
264   //  +--------------------------+  <-- raw_instruction_end()
265   //
266   // If has_unwinding_info() is false, raw_instruction_end() points to the first
267   // memory location after the end of the code object. Otherwise, the body
268   // continues as follows:
269   //
270   //  +--------------------------+
271   //  |    padding to the next   |
272   //  |  8-byte aligned address  |
273   //  +--------------------------+  <-- raw_instruction_end()
274   //  |   [unwinding_info_size]  |
275   //  |        as uint64_t       |
276   //  +--------------------------+  <-- unwinding_info_start()
277   //  |       unwinding info     |
278   //  |            ...           |
279   //  +--------------------------+  <-- unwinding_info_end()
280   //
281   // and unwinding_info_end() points to the first memory location after the end
282   // of the code object.
283   //
284   inline bool has_unwinding_info() const;
285 
286   // [unwinding_info_size]: Size of the unwinding information.
287   inline int unwinding_info_size() const;
288   inline void set_unwinding_info_size(int value);
289 
290   // Returns the address of the unwinding information, if any.
291   inline Address unwinding_info_start() const;
292 
293   // Returns the address right after the end of the unwinding information.
294   inline Address unwinding_info_end() const;
295 
296   // Code entry point.
297   inline Address entry() const;
298 
299   // Returns true if pc is inside this object's instructions.
300   inline bool contains(Address pc);
301 
302   // Relocate the code by delta bytes. Called to signal that this code
303   // object has been moved by delta bytes.
304   void Relocate(intptr_t delta);
305 
306   // Migrate code described by desc.
307   void CopyFrom(const CodeDesc& desc);
308 
309   // Migrate code from desc without flushing the instruction cache.
310   void CopyFromNoFlush(const CodeDesc& desc);
311 
312   // Flushes the instruction cache for the executable instructions of this code
313   // object.
314   void FlushICache() const;
315 
316   // Returns the object size for a given body (used for allocation).
SizeFor(int body_size)317   static int SizeFor(int body_size) {
318     DCHECK_SIZE_TAG_ALIGNED(body_size);
319     return RoundUp(kHeaderSize + body_size, kCodeAlignment);
320   }
321 
322   // Calculate the size of the code object to report for log events. This takes
323   // the layout of the code object into account.
324   inline int ExecutableSize() const;
325 
326   DECL_CAST(Code)
327 
328   // Dispatched behavior.
329   inline int CodeSize() const;
330 
331   DECL_PRINTER(Code)
332   DECL_VERIFIER(Code)
333 
334   void PrintDeoptLocation(FILE* out, const char* str, Address pc);
335   bool CanDeoptAt(Address pc);
336 
337   void SetMarkedForDeoptimization(const char* reason);
338 
339   inline HandlerTable::CatchPrediction GetBuiltinCatchPrediction();
340 
341 #ifdef DEBUG
342   enum VerifyMode { kNoContextSpecificPointers, kNoContextRetainingPointers };
343   void VerifyEmbeddedObjects(VerifyMode mode = kNoContextRetainingPointers);
344 #endif  // DEBUG
345 
346 #ifdef V8_EMBEDDED_BUILTINS
347   bool IsProcessIndependent();
348 #endif
349 
350   inline bool CanContainWeakObjects();
351 
352   inline bool IsWeakObject(Object* object);
353 
354   static inline bool IsWeakObjectInOptimizedCode(Object* object);
355 
356   static Handle<WeakCell> WeakCellFor(Handle<Code> code);
357   WeakCell* CachedWeakCell();
358 
359   // Return true if the function is inlined in the code.
360   bool Inlines(SharedFunctionInfo* sfi);
361 
362   class OptimizedCodeIterator {
363    public:
364     explicit OptimizedCodeIterator(Isolate* isolate);
365     Code* Next();
366 
367    private:
368     Context* next_context_;
369     Code* current_code_;
370     Isolate* isolate_;
371 
372     DisallowHeapAllocation no_gc;
373     DISALLOW_COPY_AND_ASSIGN(OptimizedCodeIterator)
374   };
375 
376   static const int kConstantPoolSize =
377       FLAG_enable_embedded_constant_pool ? kIntSize : 0;
378 
379   // Layout description.
380   static const int kRelocationInfoOffset = HeapObject::kHeaderSize;
381   static const int kDeoptimizationDataOffset =
382       kRelocationInfoOffset + kPointerSize;
383   static const int kSourcePositionTableOffset =
384       kDeoptimizationDataOffset + kPointerSize;
385   static const int kCodeDataContainerOffset =
386       kSourcePositionTableOffset + kPointerSize;
387   static const int kInstructionSizeOffset =
388       kCodeDataContainerOffset + kPointerSize;
389   static const int kFlagsOffset = kInstructionSizeOffset + kIntSize;
390   static const int kSafepointTableOffsetOffset = kFlagsOffset + kIntSize;
391   static const int kHandlerTableOffsetOffset =
392       kSafepointTableOffsetOffset + kIntSize;
393   static const int kStubKeyOffset = kHandlerTableOffsetOffset + kIntSize;
394   static const int kConstantPoolOffset = kStubKeyOffset + kIntSize;
395   static const int kBuiltinIndexOffset =
396       kConstantPoolOffset + kConstantPoolSize;
397   static const int kHeaderPaddingStart = kBuiltinIndexOffset + kIntSize;
398 
399   // Add padding to align the instruction start following right after
400   // the Code object header.
401   static const int kHeaderSize =
402       (kHeaderPaddingStart + kCodeAlignmentMask) & ~kCodeAlignmentMask;
403 
404   // Data or code not directly visited by GC directly starts here.
405   // The serializer needs to copy bytes starting from here verbatim.
406   // Objects embedded into code is visited via reloc info.
407   static const int kDataStart = kInstructionSizeOffset;
408 
409   inline int GetUnwindingInfoSizeOffset() const;
410 
411   class BodyDescriptor;
412 
413   // Flags layout.  BitField<type, shift, size>.
414 #define CODE_FLAGS_BIT_FIELDS(V, _)    \
415   V(HasUnwindingInfoField, bool, 1, _) \
416   V(KindField, Kind, 5, _)             \
417   V(IsTurbofannedField, bool, 1, _)    \
418   V(StackSlotsField, int, 24, _)
419   DEFINE_BIT_FIELDS(CODE_FLAGS_BIT_FIELDS)
420 #undef CODE_FLAGS_BIT_FIELDS
421   static_assert(NUMBER_OF_KINDS <= KindField::kMax, "Code::KindField size");
422   static_assert(StackSlotsField::kNext <= 32, "Code::flags field exhausted");
423 
424   // KindSpecificFlags layout (STUB, BUILTIN and OPTIMIZED_FUNCTION)
425 #define CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS(V, _) \
426   V(MarkedForDeoptimizationField, bool, 1, _)     \
427   V(DeoptAlreadyCountedField, bool, 1, _)         \
428   V(CanHaveWeakObjectsField, bool, 1, _)          \
429   V(IsConstructStubField, bool, 1, _)             \
430   V(IsPromiseRejectionField, bool, 1, _)          \
431   V(IsExceptionCaughtField, bool, 1, _)
432   DEFINE_BIT_FIELDS(CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS)
433 #undef CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS
434   static_assert(IsExceptionCaughtField::kNext <= 32, "KindSpecificFlags full");
435 
436   // The {marked_for_deoptimization} field is accessed from generated code.
437   static const int kMarkedForDeoptimizationBit =
438       MarkedForDeoptimizationField::kShift;
439 
440   static const int kArgumentsBits = 16;
441   static const int kMaxArguments = (1 << kArgumentsBits) - 1;
442 
443  private:
444   friend class RelocIterator;
445 
446   bool is_promise_rejection() const;
447   bool is_exception_caught() const;
448 
449   DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
450 };
451 
452 // CodeDataContainer is a container for all mutable fields associated with its
453 // referencing {Code} object. Since {Code} objects reside on write-protected
454 // pages within the heap, its header fields need to be immutable. There always
455 // is a 1-to-1 relation between {Code} and {CodeDataContainer}, the referencing
456 // field {Code::code_data_container} itself is immutable.
457 class CodeDataContainer : public HeapObject {
458  public:
459   DECL_ACCESSORS(next_code_link, Object)
460   DECL_INT_ACCESSORS(kind_specific_flags)
461 
462   // Clear uninitialized padding space. This ensures that the snapshot content
463   // is deterministic.
464   inline void clear_padding();
465 
466   DECL_CAST(CodeDataContainer)
467 
468   // Dispatched behavior.
469   DECL_PRINTER(CodeDataContainer)
470   DECL_VERIFIER(CodeDataContainer)
471 
472   static const int kNextCodeLinkOffset = HeapObject::kHeaderSize;
473   static const int kKindSpecificFlagsOffset =
474       kNextCodeLinkOffset + kPointerSize;
475   static const int kUnalignedSize = kKindSpecificFlagsOffset + kIntSize;
476   static const int kSize = OBJECT_POINTER_ALIGN(kUnalignedSize);
477 
478   // During mark compact we need to take special care for weak fields.
479   static const int kPointerFieldsStrongEndOffset = kNextCodeLinkOffset;
480   static const int kPointerFieldsWeakEndOffset = kKindSpecificFlagsOffset;
481 
482   // Ignores weakness.
483   typedef FixedBodyDescriptor<HeapObject::kHeaderSize,
484                               kPointerFieldsWeakEndOffset, kSize>
485       BodyDescriptor;
486 
487   // Respects weakness.
488   typedef FixedBodyDescriptor<HeapObject::kHeaderSize,
489                               kPointerFieldsStrongEndOffset, kSize>
490       BodyDescriptorWeak;
491 
492  private:
493   DISALLOW_IMPLICIT_CONSTRUCTORS(CodeDataContainer);
494 };
495 
496 class AbstractCode : public HeapObject {
497  public:
498   // All code kinds and INTERPRETED_FUNCTION.
499   enum Kind {
500 #define DEFINE_CODE_KIND_ENUM(name) name,
501     CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
502 #undef DEFINE_CODE_KIND_ENUM
503         INTERPRETED_FUNCTION,
504     NUMBER_OF_KINDS
505   };
506 
507   static const char* Kind2String(Kind kind);
508 
509   int SourcePosition(int offset);
510   int SourceStatementPosition(int offset);
511 
512   // Returns the address of the first instruction.
513   inline Address raw_instruction_start();
514 
515   // Returns the address of the first instruction. For off-heap code objects
516   // this differs from instruction_start (which would point to the off-heap
517   // trampoline instead).
518   inline Address InstructionStart();
519 
520   // Returns the address right after the last instruction.
521   inline Address raw_instruction_end();
522 
523   // Returns the address right after the last instruction. For off-heap code
524   // objects this differs from instruction_end (which would point to the
525   // off-heap trampoline instead).
526   inline Address InstructionEnd();
527 
528   // Returns the size of the code instructions.
529   inline int raw_instruction_size();
530 
531   // Returns the size of the native instructions, including embedded
532   // data such as the safepoints table. For off-heap code objects
533   // this may from instruction_size in that this will return the size of the
534   // off-heap instruction stream rather than the on-heap trampoline located
535   // at instruction_start.
536   inline int InstructionSize();
537 
538   // Return the source position table.
539   inline ByteArray* source_position_table();
540 
541   inline Object* stack_frame_cache();
542   static void SetStackFrameCache(Handle<AbstractCode> abstract_code,
543                                  Handle<SimpleNumberDictionary> cache);
544   void DropStackFrameCache();
545 
546   // Returns the size of instructions and the metadata.
547   inline int SizeIncludingMetadata();
548 
549   // Returns true if pc is inside this object's instructions.
550   inline bool contains(Address pc);
551 
552   // Returns the AbstractCode::Kind of the code.
553   inline Kind kind();
554 
555   // Calculate the size of the code object to report for log events. This takes
556   // the layout of the code object into account.
557   inline int ExecutableSize();
558 
559   DECL_CAST(AbstractCode)
560   inline Code* GetCode();
561   inline BytecodeArray* GetBytecodeArray();
562 
563   // Max loop nesting marker used to postpose OSR. We don't take loop
564   // nesting that is deeper than 5 levels into account.
565   static const int kMaxLoopNestingMarker = 6;
566 };
567 
568 // Dependent code is a singly linked list of fixed arrays. Each array contains
569 // code objects in weak cells for one dependent group. The suffix of the array
570 // can be filled with the undefined value if the number of codes is less than
571 // the length of the array.
572 //
573 // +------+-----------------+--------+--------+-----+--------+-----------+-----+
574 // | next | count & group 1 | code 1 | code 2 | ... | code n | undefined | ... |
575 // +------+-----------------+--------+--------+-----+--------+-----------+-----+
576 //    |
577 //    V
578 // +------+-----------------+--------+--------+-----+--------+-----------+-----+
579 // | next | count & group 2 | code 1 | code 2 | ... | code m | undefined | ... |
580 // +------+-----------------+--------+--------+-----+--------+-----------+-----+
581 //    |
582 //    V
583 // empty_fixed_array()
584 //
585 // The list of fixed arrays is ordered by dependency groups.
586 
587 class DependentCode : public FixedArray {
588  public:
589   enum DependencyGroup {
590     // Group of code that embed a transition to this map, and depend on being
591     // deoptimized when the transition is replaced by a new version.
592     kTransitionGroup,
593     // Group of code that omit run-time prototype checks for prototypes
594     // described by this map. The group is deoptimized whenever an object
595     // described by this map changes shape (and transitions to a new map),
596     // possibly invalidating the assumptions embedded in the code.
597     kPrototypeCheckGroup,
598     // Group of code that depends on global property values in property cells
599     // not being changed.
600     kPropertyCellChangedGroup,
601     // Group of code that omit run-time checks for field(s) introduced by
602     // this map, i.e. for the field type.
603     kFieldOwnerGroup,
604     // Group of code that omit run-time type checks for initial maps of
605     // constructors.
606     kInitialMapChangedGroup,
607     // Group of code that depends on tenuring information in AllocationSites
608     // not being changed.
609     kAllocationSiteTenuringChangedGroup,
610     // Group of code that depends on element transition information in
611     // AllocationSites not being changed.
612     kAllocationSiteTransitionChangedGroup
613   };
614 
615   static const int kGroupCount = kAllocationSiteTransitionChangedGroup + 1;
616   static const int kNextLinkIndex = 0;
617   static const int kFlagsIndex = 1;
618   static const int kCodesStartIndex = 2;
619 
620   bool Contains(DependencyGroup group, WeakCell* code_cell);
621   bool IsEmpty(DependencyGroup group);
622 
623   static Handle<DependentCode> InsertCompilationDependencies(
624       Handle<DependentCode> entries, DependencyGroup group,
625       Handle<Foreign> info);
626 
627   static Handle<DependentCode> InsertWeakCode(Handle<DependentCode> entries,
628                                               DependencyGroup group,
629                                               Handle<WeakCell> code_cell);
630 
631   void UpdateToFinishedCode(DependencyGroup group, Foreign* info,
632                             WeakCell* code_cell);
633 
634   void RemoveCompilationDependencies(DependentCode::DependencyGroup group,
635                                      Foreign* info);
636 
637   void DeoptimizeDependentCodeGroup(Isolate* isolate,
638                                     DependentCode::DependencyGroup group);
639 
640   bool MarkCodeForDeoptimization(Isolate* isolate,
641                                  DependentCode::DependencyGroup group);
642 
643   // The following low-level accessors should only be used by this class
644   // and the mark compact collector.
645   inline DependentCode* next_link();
646   inline void set_next_link(DependentCode* next);
647   inline int count();
648   inline void set_count(int value);
649   inline DependencyGroup group();
650   inline void set_group(DependencyGroup group);
651   inline Object* object_at(int i);
652   inline void set_object_at(int i, Object* object);
653   inline void clear_at(int i);
654   inline void copy(int from, int to);
655   DECL_CAST(DependentCode)
656 
657   static const char* DependencyGroupName(DependencyGroup group);
658 
659  private:
660   static Handle<DependentCode> Insert(Handle<DependentCode> entries,
661                                       DependencyGroup group,
662                                       Handle<Object> object);
663   static Handle<DependentCode> New(DependencyGroup group, Handle<Object> object,
664                                    Handle<DependentCode> next);
665   static Handle<DependentCode> EnsureSpace(Handle<DependentCode> entries);
666   // Compact by removing cleared weak cells and return true if there was
667   // any cleared weak cell.
668   bool Compact();
Grow(int number_of_entries)669   static int Grow(int number_of_entries) {
670     if (number_of_entries < 5) return number_of_entries + 1;
671     return number_of_entries * 5 / 4;
672   }
673   inline int flags();
674   inline void set_flags(int flags);
675   class GroupField : public BitField<int, 0, 3> {};
676   class CountField : public BitField<int, 3, 27> {};
677   STATIC_ASSERT(kGroupCount <= GroupField::kMax + 1);
678 };
679 
680 // BytecodeArray represents a sequence of interpreter bytecodes.
681 class BytecodeArray : public FixedArrayBase {
682  public:
683   enum Age {
684     kNoAgeBytecodeAge = 0,
685     kQuadragenarianBytecodeAge,
686     kQuinquagenarianBytecodeAge,
687     kSexagenarianBytecodeAge,
688     kSeptuagenarianBytecodeAge,
689     kOctogenarianBytecodeAge,
690     kAfterLastBytecodeAge,
691     kFirstBytecodeAge = kNoAgeBytecodeAge,
692     kLastBytecodeAge = kAfterLastBytecodeAge - 1,
693     kBytecodeAgeCount = kAfterLastBytecodeAge - kFirstBytecodeAge - 1,
694     kIsOldBytecodeAge = kSexagenarianBytecodeAge
695   };
696 
SizeFor(int length)697   static int SizeFor(int length) {
698     return OBJECT_POINTER_ALIGN(kHeaderSize + length);
699   }
700 
701   // Setter and getter
702   inline byte get(int index);
703   inline void set(int index, byte value);
704 
705   // Returns data start address.
706   inline Address GetFirstBytecodeAddress();
707 
708   // Accessors for frame size.
709   inline int frame_size() const;
710   inline void set_frame_size(int frame_size);
711 
712   // Accessor for register count (derived from frame_size).
713   inline int register_count() const;
714 
715   // Accessors for parameter count (including implicit 'this' receiver).
716   inline int parameter_count() const;
717   inline void set_parameter_count(int number_of_parameters);
718 
719   // Register used to pass the incoming new.target or generator object from the
720   // fucntion call.
721   inline interpreter::Register incoming_new_target_or_generator_register()
722       const;
723   inline void set_incoming_new_target_or_generator_register(
724       interpreter::Register incoming_new_target_or_generator_register);
725 
726   // Accessors for profiling count.
727   inline int interrupt_budget() const;
728   inline void set_interrupt_budget(int interrupt_budget);
729 
730   // Accessors for OSR loop nesting level.
731   inline int osr_loop_nesting_level() const;
732   inline void set_osr_loop_nesting_level(int depth);
733 
734   // Accessors for bytecode's code age.
735   inline Age bytecode_age() const;
736   inline void set_bytecode_age(Age age);
737 
738   // Accessors for the constant pool.
739   DECL_ACCESSORS(constant_pool, FixedArray)
740 
741   // Accessors for handler table containing offsets of exception handlers.
742   DECL_ACCESSORS(handler_table, ByteArray)
743 
744   // Accessors for source position table containing mappings between byte code
745   // offset and source position or SourcePositionTableWithFrameCache.
746   DECL_ACCESSORS(source_position_table, Object)
747 
748   inline ByteArray* SourcePositionTable();
749   inline void ClearFrameCacheFromSourcePositionTable();
750 
751   DECL_CAST(BytecodeArray)
752 
753   // Dispatched behavior.
754   inline int BytecodeArraySize();
755 
756   inline int raw_instruction_size();
757 
758   // Returns the size of bytecode and its metadata. This includes the size of
759   // bytecode, constant pool, source position table, and handler table.
760   inline int SizeIncludingMetadata();
761 
762   int SourcePosition(int offset);
763   int SourceStatementPosition(int offset);
764 
765   DECL_PRINTER(BytecodeArray)
766   DECL_VERIFIER(BytecodeArray)
767 
768   void Disassemble(std::ostream& os);
769 
770   void CopyBytecodesTo(BytecodeArray* to);
771 
772   // Bytecode aging
773   bool IsOld() const;
774   void MakeOlder();
775 
776   // Clear uninitialized padding space. This ensures that the snapshot content
777   // is deterministic.
778   inline void clear_padding();
779 
780 // Layout description.
781 #define BYTECODE_ARRAY_FIELDS(V)                           \
782   /* Pointer fields. */                                    \
783   V(kConstantPoolOffset, kPointerSize)                     \
784   V(kHandlerTableOffset, kPointerSize)                     \
785   V(kSourcePositionTableOffset, kPointerSize)              \
786   V(kFrameSizeOffset, kIntSize)                            \
787   V(kParameterSizeOffset, kIntSize)                        \
788   V(kIncomingNewTargetOrGeneratorRegisterOffset, kIntSize) \
789   V(kInterruptBudgetOffset, kIntSize)                      \
790   V(kOSRNestingLevelOffset, kCharSize)                     \
791   V(kBytecodeAgeOffset, kCharSize)                         \
792   /* Total size. */                                        \
793   V(kHeaderSize, 0)
794 
795   DEFINE_FIELD_OFFSET_CONSTANTS(FixedArrayBase::kHeaderSize,
796                                 BYTECODE_ARRAY_FIELDS)
797 #undef BYTECODE_ARRAY_FIELDS
798 
799   // Maximal memory consumption for a single BytecodeArray.
800   static const int kMaxSize = 512 * MB;
801   // Maximal length of a single BytecodeArray.
802   static const int kMaxLength = kMaxSize - kHeaderSize;
803 
804   class BodyDescriptor;
805   // No weak fields.
806   typedef BodyDescriptor BodyDescriptorWeak;
807 
808  private:
809   DISALLOW_IMPLICIT_CONSTRUCTORS(BytecodeArray);
810 };
811 
812 // DeoptimizationData is a fixed array used to hold the deoptimization data for
813 // optimized code.  It also contains information about functions that were
814 // inlined.  If N different functions were inlined then the first N elements of
815 // the literal array will contain these functions.
816 //
817 // It can be empty.
818 class DeoptimizationData : public FixedArray {
819  public:
820   // Layout description.  Indices in the array.
821   static const int kTranslationByteArrayIndex = 0;
822   static const int kInlinedFunctionCountIndex = 1;
823   static const int kLiteralArrayIndex = 2;
824   static const int kOsrBytecodeOffsetIndex = 3;
825   static const int kOsrPcOffsetIndex = 4;
826   static const int kOptimizationIdIndex = 5;
827   static const int kSharedFunctionInfoIndex = 6;
828   static const int kWeakCellCacheIndex = 7;
829   static const int kInliningPositionsIndex = 8;
830   static const int kFirstDeoptEntryIndex = 9;
831 
832   // Offsets of deopt entry elements relative to the start of the entry.
833   static const int kBytecodeOffsetRawOffset = 0;
834   static const int kTranslationIndexOffset = 1;
835   static const int kPcOffset = 2;
836   static const int kDeoptEntrySize = 3;
837 
838 // Simple element accessors.
839 #define DECL_ELEMENT_ACCESSORS(name, type) \
840   inline type* name();                     \
841   inline void Set##name(type* value);
842 
843   DECL_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray)
844   DECL_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi)
845   DECL_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
846   DECL_ELEMENT_ACCESSORS(OsrBytecodeOffset, Smi)
847   DECL_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
848   DECL_ELEMENT_ACCESSORS(OptimizationId, Smi)
849   DECL_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
850   DECL_ELEMENT_ACCESSORS(WeakCellCache, Object)
851   DECL_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
852 
853 #undef DECL_ELEMENT_ACCESSORS
854 
855 // Accessors for elements of the ith deoptimization entry.
856 #define DECL_ENTRY_ACCESSORS(name, type) \
857   inline type* name(int i);              \
858   inline void Set##name(int i, type* value);
859 
860   DECL_ENTRY_ACCESSORS(BytecodeOffsetRaw, Smi)
861   DECL_ENTRY_ACCESSORS(TranslationIndex, Smi)
862   DECL_ENTRY_ACCESSORS(Pc, Smi)
863 
864 #undef DECL_ENTRY_ACCESSORS
865 
866   inline BailoutId BytecodeOffset(int i);
867 
868   inline void SetBytecodeOffset(int i, BailoutId value);
869 
870   inline int DeoptCount();
871 
872   static const int kNotInlinedIndex = -1;
873 
874   // Returns the inlined function at the given position in LiteralArray, or the
875   // outer function if index == kNotInlinedIndex.
876   class SharedFunctionInfo* GetInlinedFunction(int index);
877 
878   // Allocates a DeoptimizationData.
879   static Handle<DeoptimizationData> New(Isolate* isolate, int deopt_entry_count,
880                                         PretenureFlag pretenure);
881 
882   // Return an empty DeoptimizationData.
883   static Handle<DeoptimizationData> Empty(Isolate* isolate);
884 
885   DECL_CAST(DeoptimizationData)
886 
887 #ifdef ENABLE_DISASSEMBLER
888   void DeoptimizationDataPrint(std::ostream& os);  // NOLINT
889 #endif
890 
891  private:
IndexForEntry(int i)892   static int IndexForEntry(int i) {
893     return kFirstDeoptEntryIndex + (i * kDeoptEntrySize);
894   }
895 
LengthFor(int entry_count)896   static int LengthFor(int entry_count) { return IndexForEntry(entry_count); }
897 };
898 
899 }  // namespace internal
900 }  // namespace v8
901 
902 #include "src/objects/object-macros-undef.h"
903 
904 #endif  // V8_OBJECTS_CODE_H_
905