1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_SLOT_SET_H_
6 #define V8_HEAP_SLOT_SET_H_
7 
8 #include <map>
9 #include <stack>
10 
11 #include "src/allocation.h"
12 #include "src/base/atomic-utils.h"
13 #include "src/base/bits.h"
14 #include "src/utils.h"
15 
16 namespace v8 {
17 namespace internal {
18 
19 enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
20 
21 // Data structure for maintaining a set of slots in a standard (non-large)
22 // page. The base address of the page must be set with SetPageStart before any
23 // operation.
24 // The data structure assumes that the slots are pointer size aligned and
25 // splits the valid slot offset range into kBuckets buckets.
26 // Each bucket is a bitmap with a bit corresponding to a single slot offset.
27 class SlotSet : public Malloced {
28  public:
29   enum EmptyBucketMode {
30     FREE_EMPTY_BUCKETS,     // An empty bucket will be deallocated immediately.
31     PREFREE_EMPTY_BUCKETS,  // An empty bucket will be unlinked from the slot
32                             // set, but deallocated on demand by a sweeper
33                             // thread.
34     KEEP_EMPTY_BUCKETS      // An empty bucket will be kept.
35   };
36 
SlotSet()37   SlotSet() {
38     for (int i = 0; i < kBuckets; i++) {
39       StoreBucket(&buckets_[i], nullptr);
40     }
41   }
42 
~SlotSet()43   ~SlotSet() {
44     for (int i = 0; i < kBuckets; i++) {
45       ReleaseBucket(i);
46     }
47     FreeToBeFreedBuckets();
48   }
49 
SetPageStart(Address page_start)50   void SetPageStart(Address page_start) { page_start_ = page_start; }
51 
52   // The slot offset specifies a slot at address page_start_ + slot_offset.
53   // This method should only be called on the main thread because concurrent
54   // allocation of the bucket is not thread-safe.
55   //
56   // AccessMode defines whether there can be concurrent access on the buckets
57   // or not.
58   template <AccessMode access_mode = AccessMode::ATOMIC>
Insert(int slot_offset)59   void Insert(int slot_offset) {
60     int bucket_index, cell_index, bit_index;
61     SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
62     Bucket bucket = LoadBucket<access_mode>(&buckets_[bucket_index]);
63     if (bucket == nullptr) {
64       bucket = AllocateBucket();
65       if (!SwapInNewBucket<access_mode>(&buckets_[bucket_index], bucket)) {
66         DeleteArray<uint32_t>(bucket);
67         bucket = LoadBucket<access_mode>(&buckets_[bucket_index]);
68       }
69     }
70     // Check that monotonicity is preserved, i.e., once a bucket is set we do
71     // not free it concurrently.
72     DCHECK_NOT_NULL(bucket);
73     DCHECK_EQ(bucket, LoadBucket<access_mode>(&buckets_[bucket_index]));
74     uint32_t mask = 1u << bit_index;
75     if ((LoadCell<access_mode>(&bucket[cell_index]) & mask) == 0) {
76       SetCellBits<access_mode>(&bucket[cell_index], mask);
77     }
78   }
79 
80   // The slot offset specifies a slot at address page_start_ + slot_offset.
81   // Returns true if the set contains the slot.
Contains(int slot_offset)82   bool Contains(int slot_offset) {
83     int bucket_index, cell_index, bit_index;
84     SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
85     Bucket bucket = LoadBucket(&buckets_[bucket_index]);
86     if (bucket == nullptr) return false;
87     return (LoadCell(&bucket[cell_index]) & (1u << bit_index)) != 0;
88   }
89 
90   // The slot offset specifies a slot at address page_start_ + slot_offset.
Remove(int slot_offset)91   void Remove(int slot_offset) {
92     int bucket_index, cell_index, bit_index;
93     SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
94     Bucket bucket = LoadBucket(&buckets_[bucket_index]);
95     if (bucket != nullptr) {
96       uint32_t cell = LoadCell(&bucket[cell_index]);
97       uint32_t bit_mask = 1u << bit_index;
98       if (cell & bit_mask) {
99         ClearCellBits(&bucket[cell_index], bit_mask);
100       }
101     }
102   }
103 
104   // The slot offsets specify a range of slots at addresses:
105   // [page_start_ + start_offset ... page_start_ + end_offset).
RemoveRange(int start_offset,int end_offset,EmptyBucketMode mode)106   void RemoveRange(int start_offset, int end_offset, EmptyBucketMode mode) {
107     CHECK_LE(end_offset, 1 << kPageSizeBits);
108     DCHECK_LE(start_offset, end_offset);
109     int start_bucket, start_cell, start_bit;
110     SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit);
111     int end_bucket, end_cell, end_bit;
112     SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit);
113     uint32_t start_mask = (1u << start_bit) - 1;
114     uint32_t end_mask = ~((1u << end_bit) - 1);
115     Bucket bucket;
116     if (start_bucket == end_bucket && start_cell == end_cell) {
117       bucket = LoadBucket(&buckets_[start_bucket]);
118       if (bucket != nullptr) {
119         ClearCellBits(&bucket[start_cell], ~(start_mask | end_mask));
120       }
121       return;
122     }
123     int current_bucket = start_bucket;
124     int current_cell = start_cell;
125     bucket = LoadBucket(&buckets_[current_bucket]);
126     if (bucket != nullptr) {
127       ClearCellBits(&bucket[current_cell], ~start_mask);
128     }
129     current_cell++;
130     if (current_bucket < end_bucket) {
131       if (bucket != nullptr) {
132         ClearBucket(bucket, current_cell, kCellsPerBucket);
133       }
134       // The rest of the current bucket is cleared.
135       // Move on to the next bucket.
136       current_bucket++;
137       current_cell = 0;
138     }
139     DCHECK(current_bucket == end_bucket ||
140            (current_bucket < end_bucket && current_cell == 0));
141     while (current_bucket < end_bucket) {
142       if (mode == PREFREE_EMPTY_BUCKETS) {
143         PreFreeEmptyBucket(current_bucket);
144       } else if (mode == FREE_EMPTY_BUCKETS) {
145         ReleaseBucket(current_bucket);
146       } else {
147         DCHECK(mode == KEEP_EMPTY_BUCKETS);
148         bucket = LoadBucket(&buckets_[current_bucket]);
149         if (bucket != nullptr) {
150           ClearBucket(bucket, 0, kCellsPerBucket);
151         }
152       }
153       current_bucket++;
154     }
155     // All buckets between start_bucket and end_bucket are cleared.
156     bucket = LoadBucket(&buckets_[current_bucket]);
157     DCHECK(current_bucket == end_bucket && current_cell <= end_cell);
158     if (current_bucket == kBuckets || bucket == nullptr) {
159       return;
160     }
161     while (current_cell < end_cell) {
162       StoreCell(&bucket[current_cell], 0);
163       current_cell++;
164     }
165     // All cells between start_cell and end_cell are cleared.
166     DCHECK(current_bucket == end_bucket && current_cell == end_cell);
167     ClearCellBits(&bucket[end_cell], ~end_mask);
168   }
169 
170   // The slot offset specifies a slot at address page_start_ + slot_offset.
Lookup(int slot_offset)171   bool Lookup(int slot_offset) {
172     int bucket_index, cell_index, bit_index;
173     SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
174     Bucket bucket = LoadBucket(&buckets_[bucket_index]);
175     if (bucket == nullptr) return false;
176     return (LoadCell(&bucket[cell_index]) & (1u << bit_index)) != 0;
177   }
178 
179   // Iterate over all slots in the set and for each slot invoke the callback.
180   // If the callback returns REMOVE_SLOT then the slot is removed from the set.
181   // Returns the new number of slots.
182   // This method should only be called on the main thread.
183   //
184   // Sample usage:
185   // Iterate([](Address slot_address) {
186   //    if (good(slot_address)) return KEEP_SLOT;
187   //    else return REMOVE_SLOT;
188   // });
189   template <typename Callback>
Iterate(Callback callback,EmptyBucketMode mode)190   int Iterate(Callback callback, EmptyBucketMode mode) {
191     int new_count = 0;
192     for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
193       Bucket bucket = LoadBucket(&buckets_[bucket_index]);
194       if (bucket != nullptr) {
195         int in_bucket_count = 0;
196         int cell_offset = bucket_index * kBitsPerBucket;
197         for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
198           uint32_t cell = LoadCell(&bucket[i]);
199           if (cell) {
200             uint32_t old_cell = cell;
201             uint32_t mask = 0;
202             while (cell) {
203               int bit_offset = base::bits::CountTrailingZeros(cell);
204               uint32_t bit_mask = 1u << bit_offset;
205               uint32_t slot = (cell_offset + bit_offset) << kPointerSizeLog2;
206               if (callback(page_start_ + slot) == KEEP_SLOT) {
207                 ++in_bucket_count;
208               } else {
209                 mask |= bit_mask;
210               }
211               cell ^= bit_mask;
212             }
213             uint32_t new_cell = old_cell & ~mask;
214             if (old_cell != new_cell) {
215               ClearCellBits(&bucket[i], mask);
216             }
217           }
218         }
219         if (mode == PREFREE_EMPTY_BUCKETS && in_bucket_count == 0) {
220           PreFreeEmptyBucket(bucket_index);
221         }
222         new_count += in_bucket_count;
223       }
224     }
225     return new_count;
226   }
227 
NumberOfPreFreedEmptyBuckets()228   int NumberOfPreFreedEmptyBuckets() {
229     base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
230     return static_cast<int>(to_be_freed_buckets_.size());
231   }
232 
PreFreeEmptyBuckets()233   void PreFreeEmptyBuckets() {
234     for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
235       Bucket bucket = LoadBucket(&buckets_[bucket_index]);
236       if (bucket != nullptr) {
237         if (IsEmptyBucket(bucket)) {
238           PreFreeEmptyBucket(bucket_index);
239         }
240       }
241     }
242   }
243 
FreeEmptyBuckets()244   void FreeEmptyBuckets() {
245     for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
246       Bucket bucket = LoadBucket(&buckets_[bucket_index]);
247       if (bucket != nullptr) {
248         if (IsEmptyBucket(bucket)) {
249           ReleaseBucket(bucket_index);
250         }
251       }
252     }
253   }
254 
FreeToBeFreedBuckets()255   void FreeToBeFreedBuckets() {
256     base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
257     while (!to_be_freed_buckets_.empty()) {
258       Bucket top = to_be_freed_buckets_.top();
259       to_be_freed_buckets_.pop();
260       DeleteArray<uint32_t>(top);
261     }
262     DCHECK_EQ(0u, to_be_freed_buckets_.size());
263   }
264 
265  private:
266   typedef uint32_t* Bucket;
267   static const int kMaxSlots = (1 << kPageSizeBits) / kPointerSize;
268   static const int kCellsPerBucket = 32;
269   static const int kCellsPerBucketLog2 = 5;
270   static const int kBitsPerCell = 32;
271   static const int kBitsPerCellLog2 = 5;
272   static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell;
273   static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
274   static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell;
275 
AllocateBucket()276   Bucket AllocateBucket() {
277     Bucket result = NewArray<uint32_t>(kCellsPerBucket);
278     for (int i = 0; i < kCellsPerBucket; i++) {
279       result[i] = 0;
280     }
281     return result;
282   }
283 
ClearBucket(Bucket bucket,int start_cell,int end_cell)284   void ClearBucket(Bucket bucket, int start_cell, int end_cell) {
285     DCHECK_GE(start_cell, 0);
286     DCHECK_LE(end_cell, kCellsPerBucket);
287     int current_cell = start_cell;
288     while (current_cell < kCellsPerBucket) {
289       StoreCell(&bucket[current_cell], 0);
290       current_cell++;
291     }
292   }
293 
PreFreeEmptyBucket(int bucket_index)294   void PreFreeEmptyBucket(int bucket_index) {
295     Bucket bucket = LoadBucket(&buckets_[bucket_index]);
296     if (bucket != nullptr) {
297       base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
298       to_be_freed_buckets_.push(bucket);
299       StoreBucket(&buckets_[bucket_index], nullptr);
300     }
301   }
302 
ReleaseBucket(int bucket_index)303   void ReleaseBucket(int bucket_index) {
304     Bucket bucket = LoadBucket(&buckets_[bucket_index]);
305     StoreBucket(&buckets_[bucket_index], nullptr);
306     DeleteArray<uint32_t>(bucket);
307   }
308 
309   template <AccessMode access_mode = AccessMode::ATOMIC>
LoadBucket(Bucket * bucket)310   Bucket LoadBucket(Bucket* bucket) {
311     if (access_mode == AccessMode::ATOMIC)
312       return base::AsAtomicPointer::Acquire_Load(bucket);
313     return *bucket;
314   }
315 
316   template <AccessMode access_mode = AccessMode::ATOMIC>
StoreBucket(Bucket * bucket,Bucket value)317   void StoreBucket(Bucket* bucket, Bucket value) {
318     if (access_mode == AccessMode::ATOMIC) {
319       base::AsAtomicPointer::Release_Store(bucket, value);
320     } else {
321       *bucket = value;
322     }
323   }
324 
IsEmptyBucket(Bucket bucket)325   bool IsEmptyBucket(Bucket bucket) {
326     for (int i = 0; i < kCellsPerBucket; i++) {
327       if (LoadCell(&bucket[i])) {
328         return false;
329       }
330     }
331     return true;
332   }
333 
334   template <AccessMode access_mode = AccessMode::ATOMIC>
SwapInNewBucket(Bucket * bucket,Bucket value)335   bool SwapInNewBucket(Bucket* bucket, Bucket value) {
336     if (access_mode == AccessMode::ATOMIC) {
337       return base::AsAtomicPointer::Release_CompareAndSwap(bucket, nullptr,
338                                                            value) == nullptr;
339     } else {
340       DCHECK_NULL(*bucket);
341       *bucket = value;
342       return true;
343     }
344   }
345 
346   template <AccessMode access_mode = AccessMode::ATOMIC>
LoadCell(uint32_t * cell)347   uint32_t LoadCell(uint32_t* cell) {
348     if (access_mode == AccessMode::ATOMIC)
349       return base::AsAtomic32::Acquire_Load(cell);
350     return *cell;
351   }
352 
StoreCell(uint32_t * cell,uint32_t value)353   void StoreCell(uint32_t* cell, uint32_t value) {
354     base::AsAtomic32::Release_Store(cell, value);
355   }
356 
ClearCellBits(uint32_t * cell,uint32_t mask)357   void ClearCellBits(uint32_t* cell, uint32_t mask) {
358     base::AsAtomic32::SetBits(cell, 0u, mask);
359   }
360 
361   template <AccessMode access_mode = AccessMode::ATOMIC>
SetCellBits(uint32_t * cell,uint32_t mask)362   void SetCellBits(uint32_t* cell, uint32_t mask) {
363     if (access_mode == AccessMode::ATOMIC) {
364       base::AsAtomic32::SetBits(cell, mask, mask);
365     } else {
366       *cell = (*cell & ~mask) | mask;
367     }
368   }
369 
370   // Converts the slot offset into bucket/cell/bit index.
SlotToIndices(int slot_offset,int * bucket_index,int * cell_index,int * bit_index)371   void SlotToIndices(int slot_offset, int* bucket_index, int* cell_index,
372                      int* bit_index) {
373     DCHECK_EQ(slot_offset % kPointerSize, 0);
374     int slot = slot_offset >> kPointerSizeLog2;
375     DCHECK(slot >= 0 && slot <= kMaxSlots);
376     *bucket_index = slot >> kBitsPerBucketLog2;
377     *cell_index = (slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1);
378     *bit_index = slot & (kBitsPerCell - 1);
379   }
380 
381   Bucket buckets_[kBuckets];
382   Address page_start_;
383   base::Mutex to_be_freed_buckets_mutex_;
384   std::stack<uint32_t*> to_be_freed_buckets_;
385 };
386 
387 enum SlotType {
388   EMBEDDED_OBJECT_SLOT,
389   OBJECT_SLOT,
390   CODE_TARGET_SLOT,
391   CODE_ENTRY_SLOT,
392   CLEARED_SLOT
393 };
394 
395 // Data structure for maintaining a multiset of typed slots in a page.
396 // Typed slots can only appear in Code and JSFunction objects, so
397 // the maximum possible offset is limited by the LargePage::kMaxCodePageSize.
398 // The implementation is a chain of chunks, where each chunks is an array of
399 // encoded (slot type, slot offset) pairs.
400 // There is no duplicate detection and we do not expect many duplicates because
401 // typed slots contain V8 internal pointers that are not directly exposed to JS.
402 class TypedSlotSet {
403  public:
404   enum IterationMode { PREFREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
405 
406   typedef std::pair<SlotType, uint32_t> TypeAndOffset;
407 
408   struct TypedSlot {
TypedSlotTypedSlot409     TypedSlot() : type_and_offset_(0), host_offset_(0) {}
410 
TypedSlotTypedSlot411     TypedSlot(SlotType type, uint32_t host_offset, uint32_t offset)
412         : type_and_offset_(TypeField::encode(type) |
413                            OffsetField::encode(offset)),
414           host_offset_(host_offset) {}
415 
416     bool operator==(const TypedSlot other) {
417       return type_and_offset() == other.type_and_offset() &&
418              host_offset() == other.host_offset();
419     }
420 
421     bool operator!=(const TypedSlot other) { return !(*this == other); }
422 
typeTypedSlot423     SlotType type() const { return TypeField::decode(type_and_offset()); }
424 
offsetTypedSlot425     uint32_t offset() const { return OffsetField::decode(type_and_offset()); }
426 
GetTypeAndOffsetTypedSlot427     TypeAndOffset GetTypeAndOffset() const {
428       uint32_t t_and_o = type_and_offset();
429       return std::make_pair(TypeField::decode(t_and_o),
430                             OffsetField::decode(t_and_o));
431     }
432 
type_and_offsetTypedSlot433     uint32_t type_and_offset() const {
434       return base::AsAtomic32::Acquire_Load(&type_and_offset_);
435     }
436 
host_offsetTypedSlot437     uint32_t host_offset() const {
438       return base::AsAtomic32::Acquire_Load(&host_offset_);
439     }
440 
SetTypedSlot441     void Set(TypedSlot slot) {
442       base::AsAtomic32::Release_Store(&type_and_offset_,
443                                       slot.type_and_offset());
444       base::AsAtomic32::Release_Store(&host_offset_, slot.host_offset());
445     }
446 
ClearTypedSlot447     void Clear() {
448       base::AsAtomic32::Release_Store(
449           &type_and_offset_,
450           TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0));
451       base::AsAtomic32::Release_Store(&host_offset_, 0);
452     }
453 
454     uint32_t type_and_offset_;
455     uint32_t host_offset_;
456   };
457   static const int kMaxOffset = 1 << 29;
458 
TypedSlotSet(Address page_start)459   explicit TypedSlotSet(Address page_start)
460       : page_start_(page_start), top_(new Chunk(nullptr, kInitialBufferSize)) {}
461 
~TypedSlotSet()462   ~TypedSlotSet() {
463     Chunk* chunk = load_top();
464     while (chunk != nullptr) {
465       Chunk* n = chunk->next();
466       delete chunk;
467       chunk = n;
468     }
469     FreeToBeFreedChunks();
470   }
471 
472   // The slot offset specifies a slot at address page_start_ + offset.
473   // This method can only be called on the main thread.
Insert(SlotType type,uint32_t host_offset,uint32_t offset)474   void Insert(SlotType type, uint32_t host_offset, uint32_t offset) {
475     TypedSlot slot(type, host_offset, offset);
476     Chunk* top_chunk = load_top();
477     if (!top_chunk) {
478       top_chunk = new Chunk(nullptr, kInitialBufferSize);
479       set_top(top_chunk);
480     }
481     if (!top_chunk->AddSlot(slot)) {
482       Chunk* new_top_chunk =
483           new Chunk(top_chunk, NextCapacity(top_chunk->capacity()));
484       bool added = new_top_chunk->AddSlot(slot);
485       set_top(new_top_chunk);
486       DCHECK(added);
487       USE(added);
488     }
489   }
490 
491   // Iterate over all slots in the set and for each slot invoke the callback.
492   // If the callback returns REMOVE_SLOT then the slot is removed from the set.
493   // Returns the new number of slots.
494   //
495   // Sample usage:
496   // Iterate([](SlotType slot_type, Address slot_address) {
497   //    if (good(slot_type, slot_address)) return KEEP_SLOT;
498   //    else return REMOVE_SLOT;
499   // });
500   template <typename Callback>
Iterate(Callback callback,IterationMode mode)501   int Iterate(Callback callback, IterationMode mode) {
502     STATIC_ASSERT(CLEARED_SLOT < 8);
503     Chunk* chunk = load_top();
504     Chunk* previous = nullptr;
505     int new_count = 0;
506     while (chunk != nullptr) {
507       TypedSlot* buf = chunk->buffer();
508       bool empty = true;
509       for (int i = 0; i < chunk->count(); i++) {
510         // Order is important here. We have to read out the slot type last to
511         // observe the concurrent removal case consistently.
512         Address host_addr = page_start_ + buf[i].host_offset();
513         TypeAndOffset type_and_offset = buf[i].GetTypeAndOffset();
514         SlotType type = type_and_offset.first;
515         if (type != CLEARED_SLOT) {
516           Address addr = page_start_ + type_and_offset.second;
517           if (callback(type, host_addr, addr) == KEEP_SLOT) {
518             new_count++;
519             empty = false;
520           } else {
521             buf[i].Clear();
522           }
523         }
524       }
525 
526       Chunk* n = chunk->next();
527       if (mode == PREFREE_EMPTY_CHUNKS && empty) {
528         // We remove the chunk from the list but let it still point its next
529         // chunk to allow concurrent iteration.
530         if (previous) {
531           previous->set_next(n);
532         } else {
533           set_top(n);
534         }
535         base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_);
536         to_be_freed_chunks_.push(chunk);
537       } else {
538         previous = chunk;
539       }
540       chunk = n;
541     }
542     return new_count;
543   }
544 
FreeToBeFreedChunks()545   void FreeToBeFreedChunks() {
546     base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_);
547     while (!to_be_freed_chunks_.empty()) {
548       Chunk* top = to_be_freed_chunks_.top();
549       to_be_freed_chunks_.pop();
550       delete top;
551     }
552   }
553 
RemoveInvaldSlots(std::map<uint32_t,uint32_t> & invalid_ranges)554   void RemoveInvaldSlots(std::map<uint32_t, uint32_t>& invalid_ranges) {
555     Chunk* chunk = load_top();
556     while (chunk != nullptr) {
557       TypedSlot* buf = chunk->buffer();
558       for (int i = 0; i < chunk->count(); i++) {
559         uint32_t host_offset = buf[i].host_offset();
560         std::map<uint32_t, uint32_t>::iterator upper_bound =
561             invalid_ranges.upper_bound(host_offset);
562         if (upper_bound == invalid_ranges.begin()) continue;
563         // upper_bounds points to the invalid range after the given slot. Hence,
564         // we have to go to the previous element.
565         upper_bound--;
566         DCHECK_LE(upper_bound->first, host_offset);
567         if (upper_bound->second > host_offset) {
568           buf[i].Clear();
569         }
570       }
571       chunk = chunk->next();
572     }
573   }
574 
575  private:
576   static const int kInitialBufferSize = 100;
577   static const int kMaxBufferSize = 16 * KB;
578 
NextCapacity(int capacity)579   static int NextCapacity(int capacity) {
580     return Min(kMaxBufferSize, capacity * 2);
581   }
582 
583   class OffsetField : public BitField<int, 0, 29> {};
584   class TypeField : public BitField<SlotType, 29, 3> {};
585 
586   struct Chunk : Malloced {
ChunkChunk587     explicit Chunk(Chunk* next_chunk, int chunk_capacity) {
588       next_ = next_chunk;
589       buffer_ = NewArray<TypedSlot>(chunk_capacity);
590       capacity_ = chunk_capacity;
591       count_ = 0;
592     }
593 
~ChunkChunk594     ~Chunk() { DeleteArray(buffer_); }
595 
AddSlotChunk596     bool AddSlot(TypedSlot slot) {
597       int current_count = count();
598       if (current_count == capacity()) return false;
599       TypedSlot* current_buffer = buffer();
600       // Order is important here. We have to write the slot first before
601       // increasing the counter to guarantee that a consistent state is
602       // observed by concurrent threads.
603       current_buffer[current_count].Set(slot);
604       set_count(current_count + 1);
605       return true;
606     }
607 
nextChunk608     Chunk* next() const { return base::AsAtomicPointer::Acquire_Load(&next_); }
609 
set_nextChunk610     void set_next(Chunk* n) {
611       return base::AsAtomicPointer::Release_Store(&next_, n);
612     }
613 
bufferChunk614     TypedSlot* buffer() const { return buffer_; }
615 
capacityChunk616     int32_t capacity() const { return capacity_; }
617 
countChunk618     int32_t count() const { return base::AsAtomic32::Acquire_Load(&count_); }
619 
set_countChunk620     void set_count(int32_t new_value) {
621       base::AsAtomic32::Release_Store(&count_, new_value);
622     }
623 
624    private:
625     Chunk* next_;
626     TypedSlot* buffer_;
627     int32_t capacity_;
628     int32_t count_;
629   };
630 
load_top()631   Chunk* load_top() { return base::AsAtomicPointer::Acquire_Load(&top_); }
632 
set_top(Chunk * c)633   void set_top(Chunk* c) { base::AsAtomicPointer::Release_Store(&top_, c); }
634 
635   Address page_start_;
636   Chunk* top_;
637   base::Mutex to_be_freed_chunks_mutex_;
638   std::stack<Chunk*> to_be_freed_chunks_;
639 };
640 
641 }  // namespace internal
642 }  // namespace v8
643 
644 #endif  // V8_HEAP_SLOT_SET_H_
645