1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // Note 1: Any file that includes this one should include object-macros-undef.h
6 // at the bottom.
7 
8 // Note 2: This file is deliberately missing the include guards (the undeffing
9 // approach wouldn't work otherwise).
10 //
11 // PRESUBMIT_INTENTIONALLY_MISSING_INCLUDE_GUARD
12 
13 // The accessors with RELAXED_, ACQUIRE_, and RELEASE_ prefixes should be used
14 // for fields that can be written to and read from multiple threads at the same
15 // time. See comments in src/base/atomicops.h for the memory ordering sematics.
16 
17 #define DECL_PRIMITIVE_ACCESSORS(name, type) \
18   inline type name() const;                  \
19   inline void set_##name(type value);
20 
21 #define DECL_BOOLEAN_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, bool)
22 
23 #define DECL_INT_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, int)
24 
25 #define DECL_INT32_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, int32_t)
26 
27 #define DECL_ACCESSORS(name, type)    \
28   inline type* name() const;          \
29   inline void set_##name(type* value, \
30                          WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
31 
32 #define DECL_CAST(type)                      \
33   INLINE(static type* cast(Object* object)); \
34   INLINE(static const type* cast(const Object* object));
35 
36 #define CAST_ACCESSOR(type)                       \
37   type* type::cast(Object* object) {              \
38     SLOW_DCHECK(object->Is##type());              \
39     return reinterpret_cast<type*>(object);       \
40   }                                               \
41   const type* type::cast(const Object* object) {  \
42     SLOW_DCHECK(object->Is##type());              \
43     return reinterpret_cast<const type*>(object); \
44   }
45 
46 #define INT_ACCESSORS(holder, name, offset)                         \
47   int holder::name() const { return READ_INT_FIELD(this, offset); } \
48   void holder::set_##name(int value) { WRITE_INT_FIELD(this, offset, value); }
49 
50 #define INT32_ACCESSORS(holder, name, offset)                             \
51   int32_t holder::name() const { return READ_INT32_FIELD(this, offset); } \
52   void holder::set_##name(int32_t value) {                                \
53     WRITE_INT32_FIELD(this, offset, value);                               \
54   }
55 
56 #define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
57                            set_condition)                             \
58   type* holder::name() const {                                        \
59     type* value = type::cast(READ_FIELD(this, offset));               \
60     DCHECK(get_condition);                                            \
61     return value;                                                     \
62   }                                                                   \
63   void holder::set_##name(type* value, WriteBarrierMode mode) {       \
64     DCHECK(set_condition);                                            \
65     WRITE_FIELD(this, offset, value);                                 \
66     CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);  \
67   }
68 #define ACCESSORS_CHECKED(holder, name, type, offset, condition) \
69   ACCESSORS_CHECKED2(holder, name, type, offset, condition, condition)
70 
71 #define ACCESSORS(holder, name, type, offset) \
72   ACCESSORS_CHECKED(holder, name, type, offset, true)
73 
74 #define WEAK_ACCESSORS_CHECKED2(holder, name, offset, get_condition,      \
75                                 set_condition)                            \
76   MaybeObject* holder::name() const {                                     \
77     MaybeObject* value = READ_WEAK_FIELD(this, offset);                   \
78     DCHECK(get_condition);                                                \
79     return value;                                                         \
80   }                                                                       \
81   void holder::set_##name(MaybeObject* value, WriteBarrierMode mode) {    \
82     DCHECK(set_condition);                                                \
83     WRITE_WEAK_FIELD(this, offset, value);                                \
84     CONDITIONAL_WEAK_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \
85   }
86 
87 #define WEAK_ACCESSORS_CHECKED(holder, name, offset, condition) \
88   WEAK_ACCESSORS_CHECKED2(holder, name, offset, condition, condition)
89 
90 #define WEAK_ACCESSORS(holder, name, offset) \
91   WEAK_ACCESSORS_CHECKED(holder, name, offset, true)
92 
93 // Getter that returns a Smi as an int and writes an int as a Smi.
94 #define SMI_ACCESSORS_CHECKED(holder, name, offset, condition) \
95   int holder::name() const {                                   \
96     DCHECK(condition);                                         \
97     Object* value = READ_FIELD(this, offset);                  \
98     return Smi::ToInt(value);                                  \
99   }                                                            \
100   void holder::set_##name(int value) {                         \
101     DCHECK(condition);                                         \
102     WRITE_FIELD(this, offset, Smi::FromInt(value));            \
103   }
104 
105 #define SMI_ACCESSORS(holder, name, offset) \
106   SMI_ACCESSORS_CHECKED(holder, name, offset, true)
107 
108 #define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset)    \
109   int holder::synchronized_##name() const {                 \
110     Object* value = ACQUIRE_READ_FIELD(this, offset);       \
111     return Smi::ToInt(value);                               \
112   }                                                         \
113   void holder::synchronized_set_##name(int value) {         \
114     RELEASE_WRITE_FIELD(this, offset, Smi::FromInt(value)); \
115   }
116 
117 #define RELAXED_SMI_ACCESSORS(holder, name, offset)         \
118   int holder::relaxed_read_##name() const {                 \
119     Object* value = RELAXED_READ_FIELD(this, offset);       \
120     return Smi::ToInt(value);                               \
121   }                                                         \
122   void holder::relaxed_write_##name(int value) {            \
123     RELAXED_WRITE_FIELD(this, offset, Smi::FromInt(value)); \
124   }
125 
126 #define BOOL_GETTER(holder, field, name, offset) \
127   bool holder::name() const { return BooleanBit::get(field(), offset); }
128 
129 #define BOOL_ACCESSORS(holder, field, name, offset)                      \
130   bool holder::name() const { return BooleanBit::get(field(), offset); } \
131   void holder::set_##name(bool value) {                                  \
132     set_##field(BooleanBit::set(field(), offset, value));                \
133   }
134 
135 #define BIT_FIELD_ACCESSORS(holder, field, name, BitField)      \
136   typename BitField::FieldType holder::name() const {           \
137     return BitField::decode(field());                           \
138   }                                                             \
139   void holder::set_##name(typename BitField::FieldType value) { \
140     set_##field(BitField::update(field(), value));              \
141   }
142 
143 #define TYPE_CHECKER(type, instancetype)           \
144   bool HeapObject::Is##type() const {              \
145     return map()->instance_type() == instancetype; \
146   }
147 
148 #define FIELD_ADDR(p, offset) \
149   (reinterpret_cast<Address>(p) + offset - kHeapObjectTag)
150 
151 #define READ_FIELD(p, offset) \
152   (*reinterpret_cast<Object* const*>(FIELD_ADDR(p, offset)))
153 
154 #define READ_WEAK_FIELD(p, offset) \
155   (*reinterpret_cast<MaybeObject* const*>(FIELD_ADDR(p, offset)))
156 
157 #define ACQUIRE_READ_FIELD(p, offset)           \
158   reinterpret_cast<Object*>(base::Acquire_Load( \
159       reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR(p, offset))))
160 
161 #define RELAXED_READ_FIELD(p, offset)           \
162   reinterpret_cast<Object*>(base::Relaxed_Load( \
163       reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR(p, offset))))
164 
165 #define RELAXED_READ_WEAK_FIELD(p, offset)           \
166   reinterpret_cast<MaybeObject*>(base::Relaxed_Load( \
167       reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR(p, offset))))
168 
169 #ifdef V8_CONCURRENT_MARKING
170 #define WRITE_FIELD(p, offset, value)                             \
171   base::Relaxed_Store(                                            \
172       reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
173       reinterpret_cast<base::AtomicWord>(value));
174 #define WRITE_WEAK_FIELD(p, offset, value)                        \
175   base::Relaxed_Store(                                            \
176       reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
177       reinterpret_cast<base::AtomicWord>(value));
178 #else
179 #define WRITE_FIELD(p, offset, value) \
180   (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
181 #define WRITE_WEAK_FIELD(p, offset, value) \
182   (*reinterpret_cast<MaybeObject**>(FIELD_ADDR(p, offset)) = value)
183 #endif
184 
185 #define RELEASE_WRITE_FIELD(p, offset, value)                     \
186   base::Release_Store(                                            \
187       reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
188       reinterpret_cast<base::AtomicWord>(value));
189 
190 #define RELAXED_WRITE_FIELD(p, offset, value)                     \
191   base::Relaxed_Store(                                            \
192       reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
193       reinterpret_cast<base::AtomicWord>(value));
194 
195 #define WRITE_BARRIER(heap, object, offset, value)          \
196   heap->incremental_marking()->RecordWrite(                 \
197       object, HeapObject::RawField(object, offset), value); \
198   heap->RecordWrite(object, HeapObject::RawField(object, offset), value);
199 
200 #define WEAK_WRITE_BARRIER(heap, object, offset, value)                    \
201   heap->incremental_marking()->RecordMaybeWeakWrite(                       \
202       object, HeapObject::RawMaybeWeakField(object, offset), value);       \
203   heap->RecordWrite(object, HeapObject::RawMaybeWeakField(object, offset), \
204                     value);
205 
206 #define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode)        \
207   if (mode != SKIP_WRITE_BARRIER) {                                         \
208     if (mode == UPDATE_WRITE_BARRIER) {                                     \
209       heap->incremental_marking()->RecordWrite(                             \
210           object, HeapObject::RawField(object, offset), value);             \
211     }                                                                       \
212     heap->RecordWrite(object, HeapObject::RawField(object, offset), value); \
213   }
214 
215 #define CONDITIONAL_WEAK_WRITE_BARRIER(heap, object, offset, value, mode)    \
216   if (mode != SKIP_WRITE_BARRIER) {                                          \
217     if (mode == UPDATE_WRITE_BARRIER) {                                      \
218       heap->incremental_marking()->RecordMaybeWeakWrite(                     \
219           object, HeapObject::RawMaybeWeakField(object, offset), value);     \
220     }                                                                        \
221     heap->RecordWrite(object, HeapObject::RawMaybeWeakField(object, offset), \
222                       value);                                                \
223   }
224 
225 #define READ_DOUBLE_FIELD(p, offset) ReadDoubleValue(FIELD_ADDR(p, offset))
226 
227 #define WRITE_DOUBLE_FIELD(p, offset, value) \
228   WriteDoubleValue(FIELD_ADDR(p, offset), value)
229 
230 #define READ_INT_FIELD(p, offset) \
231   (*reinterpret_cast<const int*>(FIELD_ADDR(p, offset)))
232 
233 #define WRITE_INT_FIELD(p, offset, value) \
234   (*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value)
235 
236 #define RELAXED_READ_INTPTR_FIELD(p, offset) \
237   static_cast<intptr_t>(base::Relaxed_Load(  \
238       reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR(p, offset))))
239 
240 #define READ_INTPTR_FIELD(p, offset) \
241   (*reinterpret_cast<const intptr_t*>(FIELD_ADDR(p, offset)))
242 
243 #define RELAXED_WRITE_INTPTR_FIELD(p, offset, value)              \
244   base::Relaxed_Store(                                            \
245       reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
246       static_cast<base::AtomicWord>(value));
247 
248 #define WRITE_INTPTR_FIELD(p, offset, value) \
249   (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value)
250 
251 #define READ_UINT8_FIELD(p, offset) \
252   (*reinterpret_cast<const uint8_t*>(FIELD_ADDR(p, offset)))
253 
254 #define WRITE_UINT8_FIELD(p, offset, value) \
255   (*reinterpret_cast<uint8_t*>(FIELD_ADDR(p, offset)) = value)
256 
257 #define RELAXED_WRITE_INT8_FIELD(p, offset, value)                             \
258   base::Relaxed_Store(reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \
259                       static_cast<base::Atomic8>(value));
260 
261 #define READ_INT8_FIELD(p, offset) \
262   (*reinterpret_cast<const int8_t*>(FIELD_ADDR(p, offset)))
263 
264 #define RELAXED_READ_INT8_FIELD(p, offset) \
265   static_cast<int8_t>(base::Relaxed_Load(  \
266       reinterpret_cast<const base::Atomic8*>(FIELD_ADDR(p, offset))))
267 
268 #define WRITE_INT8_FIELD(p, offset, value) \
269   (*reinterpret_cast<int8_t*>(FIELD_ADDR(p, offset)) = value)
270 
271 #define READ_UINT16_FIELD(p, offset) \
272   (*reinterpret_cast<const uint16_t*>(FIELD_ADDR(p, offset)))
273 
274 #define WRITE_UINT16_FIELD(p, offset, value) \
275   (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)) = value)
276 
277 #define READ_INT16_FIELD(p, offset) \
278   (*reinterpret_cast<const int16_t*>(FIELD_ADDR(p, offset)))
279 
280 #define WRITE_INT16_FIELD(p, offset, value) \
281   (*reinterpret_cast<int16_t*>(FIELD_ADDR(p, offset)) = value)
282 
283 #define READ_UINT32_FIELD(p, offset) \
284   (*reinterpret_cast<const uint32_t*>(FIELD_ADDR(p, offset)))
285 
286 #define WRITE_UINT32_FIELD(p, offset, value) \
287   (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value)
288 
289 #define READ_INT32_FIELD(p, offset) \
290   (*reinterpret_cast<const int32_t*>(FIELD_ADDR(p, offset)))
291 
292 #define WRITE_INT32_FIELD(p, offset, value) \
293   (*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value)
294 
295 #define READ_FLOAT_FIELD(p, offset) \
296   (*reinterpret_cast<const float*>(FIELD_ADDR(p, offset)))
297 
298 #define WRITE_FLOAT_FIELD(p, offset, value) \
299   (*reinterpret_cast<float*>(FIELD_ADDR(p, offset)) = value)
300 
301 #define READ_UINT64_FIELD(p, offset) \
302   (*reinterpret_cast<const uint64_t*>(FIELD_ADDR(p, offset)))
303 
304 #define WRITE_UINT64_FIELD(p, offset, value) \
305   (*reinterpret_cast<uint64_t*>(FIELD_ADDR(p, offset)) = value)
306 
307 #define READ_INT64_FIELD(p, offset) \
308   (*reinterpret_cast<const int64_t*>(FIELD_ADDR(p, offset)))
309 
310 #define WRITE_INT64_FIELD(p, offset, value) \
311   (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)) = value)
312 
313 #define READ_BYTE_FIELD(p, offset) \
314   (*reinterpret_cast<const byte*>(FIELD_ADDR(p, offset)))
315 
316 #define RELAXED_READ_BYTE_FIELD(p, offset) \
317   static_cast<byte>(base::Relaxed_Load(    \
318       reinterpret_cast<const base::Atomic8*>(FIELD_ADDR(p, offset))))
319 
320 #define WRITE_BYTE_FIELD(p, offset, value) \
321   (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value)
322 
323 #define RELAXED_WRITE_BYTE_FIELD(p, offset, value)                             \
324   base::Relaxed_Store(reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \
325                       static_cast<base::Atomic8>(value));
326 
327 #ifdef VERIFY_HEAP
328 #define DECL_VERIFIER(Name) void Name##Verify();
329 #else
330 #define DECL_VERIFIER(Name)
331 #endif
332 
333 #define DEFINE_DEOPT_ELEMENT_ACCESSORS(name, type)                             \
334   type* DeoptimizationData::name() { return type::cast(get(k##name##Index)); } \
335   void DeoptimizationData::Set##name(type* value) {                            \
336     set(k##name##Index, value);                                                \
337   }
338 
339 #define DEFINE_DEOPT_ENTRY_ACCESSORS(name, type)                \
340   type* DeoptimizationData::name(int i) {                       \
341     return type::cast(get(IndexForEntry(i) + k##name##Offset)); \
342   }                                                             \
343   void DeoptimizationData::Set##name(int i, type* value) {      \
344     set(IndexForEntry(i) + k##name##Offset, value);             \
345   }
346