1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "third_party/base/allocator/partition_allocator/partition_alloc.h"
6 
7 #include <string.h>
8 
9 #include <memory>
10 #include <type_traits>
11 
12 #include "third_party/base/allocator/partition_allocator/partition_direct_map_extent.h"
13 #include "third_party/base/allocator/partition_allocator/partition_oom.h"
14 #include "third_party/base/allocator/partition_allocator/partition_page.h"
15 #include "third_party/base/allocator/partition_allocator/spin_lock.h"
16 
17 namespace pdfium {
18 namespace base {
19 
20 // Two partition pages are used as guard / metadata page so make sure the super
21 // page size is bigger.
22 static_assert(kPartitionPageSize * 4 <= kSuperPageSize, "ok super page size");
23 static_assert(!(kSuperPageSize % kPartitionPageSize), "ok super page multiple");
24 // Four system pages gives us room to hack out a still-guard-paged piece
25 // of metadata in the middle of a guard partition page.
26 static_assert(kSystemPageSize * 4 <= kPartitionPageSize,
27               "ok partition page size");
28 static_assert(!(kPartitionPageSize % kSystemPageSize),
29               "ok partition page multiple");
30 static_assert(sizeof(internal::PartitionPage) <= kPageMetadataSize,
31               "PartitionPage should not be too big");
32 static_assert(sizeof(internal::PartitionBucket) <= kPageMetadataSize,
33               "PartitionBucket should not be too big");
34 static_assert(sizeof(internal::PartitionSuperPageExtentEntry) <=
35                   kPageMetadataSize,
36               "PartitionSuperPageExtentEntry should not be too big");
37 static_assert(kPageMetadataSize * kNumPartitionPagesPerSuperPage <=
38                   kSystemPageSize,
39               "page metadata fits in hole");
40 // Limit to prevent callers accidentally overflowing an int size.
41 static_assert(kGenericMaxDirectMapped <=
42                   (1UL << 31) + kPageAllocationGranularity,
43               "maximum direct mapped allocation");
44 // Check that some of our zanier calculations worked out as expected.
45 static_assert(kGenericSmallestBucket == 8, "generic smallest bucket");
46 static_assert(kGenericMaxBucketed == 983040, "generic max bucketed");
47 static_assert(kMaxSystemPagesPerSlotSpan < (1 << 8),
48               "System pages per slot span must be less than 128.");
49 
50 internal::PartitionRootBase::PartitionRootBase() = default;
51 internal::PartitionRootBase::~PartitionRootBase() = default;
52 PartitionRoot::PartitionRoot() = default;
53 PartitionRoot::~PartitionRoot() = default;
54 PartitionRootGeneric::PartitionRootGeneric() = default;
55 PartitionRootGeneric::~PartitionRootGeneric() = default;
56 PartitionAllocatorGeneric::PartitionAllocatorGeneric() = default;
57 PartitionAllocatorGeneric::~PartitionAllocatorGeneric() = default;
58 
GetLock()59 subtle::SpinLock* GetLock() {
60   static subtle::SpinLock* s_initialized_lock = nullptr;
61   if (!s_initialized_lock)
62     s_initialized_lock = new subtle::SpinLock();
63   return s_initialized_lock;
64 }
65 
66 static bool g_initialized = false;
67 
68 void (*internal::PartitionRootBase::gOomHandlingFunction)() = nullptr;
69 std::atomic<bool> PartitionAllocHooks::hooks_enabled_(false);
70 subtle::SpinLock PartitionAllocHooks::set_hooks_lock_;
71 std::atomic<PartitionAllocHooks::AllocationObserverHook*>
72     PartitionAllocHooks::allocation_observer_hook_(nullptr);
73 std::atomic<PartitionAllocHooks::FreeObserverHook*>
74     PartitionAllocHooks::free_observer_hook_(nullptr);
75 std::atomic<PartitionAllocHooks::AllocationOverrideHook*>
76     PartitionAllocHooks::allocation_override_hook_(nullptr);
77 std::atomic<PartitionAllocHooks::FreeOverrideHook*>
78     PartitionAllocHooks::free_override_hook_(nullptr);
79 std::atomic<PartitionAllocHooks::ReallocOverrideHook*>
80     PartitionAllocHooks::realloc_override_hook_(nullptr);
81 
SetObserverHooks(AllocationObserverHook * alloc_hook,FreeObserverHook * free_hook)82 void PartitionAllocHooks::SetObserverHooks(AllocationObserverHook* alloc_hook,
83                                            FreeObserverHook* free_hook) {
84   subtle::SpinLock::Guard guard(set_hooks_lock_);
85 
86   // Chained hooks are not supported. Registering a non-null hook when a
87   // non-null hook is already registered indicates somebody is trying to
88   // overwrite a hook.
89   CHECK((!allocation_observer_hook_ && !free_observer_hook_) ||
90         (!alloc_hook && !free_hook));
91   allocation_observer_hook_ = alloc_hook;
92   free_observer_hook_ = free_hook;
93 
94   hooks_enabled_ = allocation_observer_hook_ || allocation_override_hook_;
95 }
96 
SetOverrideHooks(AllocationOverrideHook * alloc_hook,FreeOverrideHook * free_hook,ReallocOverrideHook realloc_hook)97 void PartitionAllocHooks::SetOverrideHooks(AllocationOverrideHook* alloc_hook,
98                                            FreeOverrideHook* free_hook,
99                                            ReallocOverrideHook realloc_hook) {
100   subtle::SpinLock::Guard guard(set_hooks_lock_);
101 
102   CHECK((!allocation_override_hook_ && !free_override_hook_ &&
103          !realloc_override_hook_) ||
104         (!alloc_hook && !free_hook && !realloc_hook));
105   allocation_override_hook_ = alloc_hook;
106   free_override_hook_ = free_hook;
107   realloc_override_hook_ = realloc_hook;
108 
109   hooks_enabled_ = allocation_observer_hook_ || allocation_override_hook_;
110 }
111 
AllocationObserverHookIfEnabled(void * address,size_t size,const char * type_name)112 void PartitionAllocHooks::AllocationObserverHookIfEnabled(
113     void* address,
114     size_t size,
115     const char* type_name) {
116   if (AllocationObserverHook* hook =
117           allocation_observer_hook_.load(std::memory_order_relaxed)) {
118     hook(address, size, type_name);
119   }
120 }
121 
AllocationOverrideHookIfEnabled(void ** out,int flags,size_t size,const char * type_name)122 bool PartitionAllocHooks::AllocationOverrideHookIfEnabled(
123     void** out,
124     int flags,
125     size_t size,
126     const char* type_name) {
127   if (AllocationOverrideHook* hook =
128           allocation_override_hook_.load(std::memory_order_relaxed)) {
129     return hook(out, flags, size, type_name);
130   }
131   return false;
132 }
133 
FreeObserverHookIfEnabled(void * address)134 void PartitionAllocHooks::FreeObserverHookIfEnabled(void* address) {
135   if (FreeObserverHook* hook =
136           free_observer_hook_.load(std::memory_order_relaxed)) {
137     hook(address);
138   }
139 }
140 
FreeOverrideHookIfEnabled(void * address)141 bool PartitionAllocHooks::FreeOverrideHookIfEnabled(void* address) {
142   if (FreeOverrideHook* hook =
143           free_override_hook_.load(std::memory_order_relaxed)) {
144     return hook(address);
145   }
146   return false;
147 }
148 
ReallocObserverHookIfEnabled(void * old_address,void * new_address,size_t size,const char * type_name)149 void PartitionAllocHooks::ReallocObserverHookIfEnabled(void* old_address,
150                                                        void* new_address,
151                                                        size_t size,
152                                                        const char* type_name) {
153   // Report a reallocation as a free followed by an allocation.
154   AllocationObserverHook* allocation_hook =
155       allocation_observer_hook_.load(std::memory_order_relaxed);
156   FreeObserverHook* free_hook =
157       free_observer_hook_.load(std::memory_order_relaxed);
158   if (allocation_hook && free_hook) {
159     free_hook(old_address);
160     allocation_hook(new_address, size, type_name);
161   }
162 }
ReallocOverrideHookIfEnabled(size_t * out,void * address)163 bool PartitionAllocHooks::ReallocOverrideHookIfEnabled(size_t* out,
164                                                        void* address) {
165   if (ReallocOverrideHook* hook =
166           realloc_override_hook_.load(std::memory_order_relaxed)) {
167     return hook(out, address);
168   }
169   return false;
170 }
171 
PartitionAllocBaseInit(internal::PartitionRootBase * root)172 static void PartitionAllocBaseInit(internal::PartitionRootBase* root) {
173   DCHECK(!root->initialized);
174   {
175     subtle::SpinLock::Guard guard(*GetLock());
176     if (!g_initialized) {
177       g_initialized = true;
178       // We mark the sentinel bucket/page as free to make sure it is skipped by
179       // our logic to find a new active page.
180       internal::PartitionBucket::get_sentinel_bucket()->active_pages_head =
181           internal::PartitionPage::get_sentinel_page();
182     }
183   }
184 
185   root->initialized = true;
186 
187   // This is a "magic" value so we can test if a root pointer is valid.
188   root->inverted_self = ~reinterpret_cast<uintptr_t>(root);
189 }
190 
PartitionAllocGlobalInit(void (* oom_handling_function)())191 void PartitionAllocGlobalInit(void (*oom_handling_function)()) {
192   DCHECK(oom_handling_function);
193   internal::PartitionRootBase::gOomHandlingFunction = oom_handling_function;
194 }
195 
Init(size_t bucket_count,size_t maximum_allocation)196 void PartitionRoot::Init(size_t bucket_count, size_t maximum_allocation) {
197   PartitionAllocBaseInit(this);
198 
199   num_buckets = bucket_count;
200   max_allocation = maximum_allocation;
201   for (size_t i = 0; i < num_buckets; ++i) {
202     internal::PartitionBucket& bucket = buckets()[i];
203     bucket.Init(i == 0 ? kAllocationGranularity : (i << kBucketShift));
204   }
205 }
206 
Init()207 void PartitionRootGeneric::Init() {
208   subtle::SpinLock::Guard guard(lock);
209 
210   PartitionAllocBaseInit(this);
211 
212   // Precalculate some shift and mask constants used in the hot path.
213   // Example: malloc(41) == 101001 binary.
214   // Order is 6 (1 << 6-1) == 32 is highest bit set.
215   // order_index is the next three MSB == 010 == 2.
216   // sub_order_index_mask is a mask for the remaining bits == 11 (masking to 01
217   // for
218   // the sub_order_index).
219   size_t order;
220   for (order = 0; order <= kBitsPerSizeT; ++order) {
221     size_t order_index_shift;
222     if (order < kGenericNumBucketsPerOrderBits + 1)
223       order_index_shift = 0;
224     else
225       order_index_shift = order - (kGenericNumBucketsPerOrderBits + 1);
226     order_index_shifts[order] = order_index_shift;
227     size_t sub_order_index_mask;
228     if (order == kBitsPerSizeT) {
229       // This avoids invoking undefined behavior for an excessive shift.
230       sub_order_index_mask =
231           static_cast<size_t>(-1) >> (kGenericNumBucketsPerOrderBits + 1);
232     } else {
233       sub_order_index_mask = ((static_cast<size_t>(1) << order) - 1) >>
234                              (kGenericNumBucketsPerOrderBits + 1);
235     }
236     order_sub_index_masks[order] = sub_order_index_mask;
237   }
238 
239   // Set up the actual usable buckets first.
240   // Note that typical values (i.e. min allocation size of 8) will result in
241   // pseudo buckets (size==9 etc. or more generally, size is not a multiple
242   // of the smallest allocation granularity).
243   // We avoid them in the bucket lookup map, but we tolerate them to keep the
244   // code simpler and the structures more generic.
245   size_t i, j;
246   size_t current_size = kGenericSmallestBucket;
247   size_t current_increment =
248       kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits;
249   internal::PartitionBucket* bucket = &buckets[0];
250   for (i = 0; i < kGenericNumBucketedOrders; ++i) {
251     for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
252       bucket->Init(current_size);
253       // Disable psuedo buckets so that touching them faults.
254       if (current_size % kGenericSmallestBucket)
255         bucket->active_pages_head = nullptr;
256       current_size += current_increment;
257       ++bucket;
258     }
259     current_increment <<= 1;
260   }
261   DCHECK(current_size == 1 << kGenericMaxBucketedOrder);
262   DCHECK(bucket == &buckets[0] + kGenericNumBuckets);
263 
264   // Then set up the fast size -> bucket lookup table.
265   bucket = &buckets[0];
266   internal::PartitionBucket** bucket_ptr = &bucket_lookups[0];
267   for (order = 0; order <= kBitsPerSizeT; ++order) {
268     for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
269       if (order < kGenericMinBucketedOrder) {
270         // Use the bucket of the finest granularity for malloc(0) etc.
271         *bucket_ptr++ = &buckets[0];
272       } else if (order > kGenericMaxBucketedOrder) {
273         *bucket_ptr++ = internal::PartitionBucket::get_sentinel_bucket();
274       } else {
275         internal::PartitionBucket* valid_bucket = bucket;
276         // Skip over invalid buckets.
277         while (valid_bucket->slot_size % kGenericSmallestBucket)
278           valid_bucket++;
279         *bucket_ptr++ = valid_bucket;
280         bucket++;
281       }
282     }
283   }
284   DCHECK(bucket == &buckets[0] + kGenericNumBuckets);
285   DCHECK(bucket_ptr == &bucket_lookups[0] +
286                            ((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder));
287   // And there's one last bucket lookup that will be hit for e.g. malloc(-1),
288   // which tries to overflow to a non-existant order.
289   *bucket_ptr = internal::PartitionBucket::get_sentinel_bucket();
290 }
291 
PartitionReallocDirectMappedInPlace(PartitionRootGeneric * root,internal::PartitionPage * page,size_t raw_size)292 bool PartitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
293                                          internal::PartitionPage* page,
294                                          size_t raw_size) {
295   DCHECK(page->bucket->is_direct_mapped());
296 
297   raw_size = internal::PartitionCookieSizeAdjustAdd(raw_size);
298 
299   // Note that the new size might be a bucketed size; this function is called
300   // whenever we're reallocating a direct mapped allocation.
301   size_t new_size = internal::PartitionBucket::get_direct_map_size(raw_size);
302   if (new_size < kGenericMinDirectMappedDownsize)
303     return false;
304 
305   // bucket->slot_size is the current size of the allocation.
306   size_t current_size = page->bucket->slot_size;
307   char* char_ptr = static_cast<char*>(internal::PartitionPage::ToPointer(page));
308   if (new_size == current_size) {
309     // No need to move any memory around, but update size and cookie below.
310   } else if (new_size < current_size) {
311     size_t map_size =
312         internal::PartitionDirectMapExtent::FromPage(page)->map_size;
313 
314     // Don't reallocate in-place if new size is less than 80 % of the full
315     // map size, to avoid holding on to too much unused address space.
316     if ((new_size / kSystemPageSize) * 5 < (map_size / kSystemPageSize) * 4)
317       return false;
318 
319     // Shrink by decommitting unneeded pages and making them inaccessible.
320     size_t decommit_size = current_size - new_size;
321     root->DecommitSystemPages(char_ptr + new_size, decommit_size);
322     SetSystemPagesAccess(char_ptr + new_size, decommit_size, PageInaccessible);
323   } else if (new_size <=
324              internal::PartitionDirectMapExtent::FromPage(page)->map_size) {
325     // Grow within the actually allocated memory. Just need to make the
326     // pages accessible again.
327     size_t recommit_size = new_size - current_size;
328     SetSystemPagesAccess(char_ptr + current_size, recommit_size, PageReadWrite);
329     root->RecommitSystemPages(char_ptr + current_size, recommit_size);
330 
331 #if DCHECK_IS_ON()
332     memset(char_ptr + current_size, kUninitializedByte, recommit_size);
333 #endif
334   } else {
335     // We can't perform the realloc in-place.
336     // TODO: support this too when possible.
337     return false;
338   }
339 
340 #if DCHECK_IS_ON()
341   // Write a new trailing cookie.
342   internal::PartitionCookieWriteValue(char_ptr + raw_size -
343                                       internal::kCookieSize);
344 #endif
345 
346   page->set_raw_size(raw_size);
347   DCHECK(page->get_raw_size() == raw_size);
348 
349   page->bucket->slot_size = new_size;
350   return true;
351 }
352 
PartitionReallocGenericFlags(PartitionRootGeneric * root,int flags,void * ptr,size_t new_size,const char * type_name)353 void* PartitionReallocGenericFlags(PartitionRootGeneric* root,
354                                    int flags,
355                                    void* ptr,
356                                    size_t new_size,
357                                    const char* type_name) {
358 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
359   CHECK_MAX_SIZE_OR_RETURN_NULLPTR(new_size, flags);
360   void* result = realloc(ptr, new_size);
361   CHECK(result || flags & PartitionAllocReturnNull);
362   return result;
363 #else
364   if (UNLIKELY(!ptr))
365     return PartitionAllocGenericFlags(root, flags, new_size, type_name);
366   if (UNLIKELY(!new_size)) {
367     root->Free(ptr);
368     return nullptr;
369   }
370 
371   if (new_size > kGenericMaxDirectMapped) {
372     if (flags & PartitionAllocReturnNull)
373       return nullptr;
374     internal::PartitionExcessiveAllocationSize();
375   }
376 
377   const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
378   bool overridden = false;
379   size_t actual_old_size;
380   if (UNLIKELY(hooks_enabled)) {
381     overridden = PartitionAllocHooks::ReallocOverrideHookIfEnabled(
382         &actual_old_size, ptr);
383   }
384   if (LIKELY(!overridden)) {
385     internal::PartitionPage* page = internal::PartitionPage::FromPointer(
386         internal::PartitionCookieFreePointerAdjust(ptr));
387     // TODO(palmer): See if we can afford to make this a CHECK.
388     DCHECK(root->IsValidPage(page));
389 
390     if (UNLIKELY(page->bucket->is_direct_mapped())) {
391       // We may be able to perform the realloc in place by changing the
392       // accessibility of memory pages and, if reducing the size, decommitting
393       // them.
394       if (PartitionReallocDirectMappedInPlace(root, page, new_size)) {
395         if (UNLIKELY(hooks_enabled)) {
396           PartitionAllocHooks::ReallocObserverHookIfEnabled(ptr, ptr, new_size,
397                                                             type_name);
398         }
399         return ptr;
400       }
401     }
402 
403     const size_t actual_new_size = root->ActualSize(new_size);
404     actual_old_size = PartitionAllocGetSize(ptr);
405 
406     // TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the
407     // new size is a significant percentage smaller. We could do the same if we
408     // determine it is a win.
409     if (actual_new_size == actual_old_size) {
410       // Trying to allocate a block of size |new_size| would give us a block of
411       // the same size as the one we've already got, so re-use the allocation
412       // after updating statistics (and cookies, if present).
413       page->set_raw_size(internal::PartitionCookieSizeAdjustAdd(new_size));
414 #if DCHECK_IS_ON()
415       // Write a new trailing cookie when it is possible to keep track of
416       // |new_size| via the raw size pointer.
417       if (page->get_raw_size_ptr())
418         internal::PartitionCookieWriteValue(static_cast<char*>(ptr) + new_size);
419 #endif
420       return ptr;
421     }
422   }
423 
424   // This realloc cannot be resized in-place. Sadness.
425   void* ret = PartitionAllocGenericFlags(root, flags, new_size, type_name);
426   if (!ret) {
427     if (flags & PartitionAllocReturnNull)
428       return nullptr;
429     internal::PartitionExcessiveAllocationSize();
430   }
431 
432   size_t copy_size = actual_old_size;
433   if (new_size < copy_size)
434     copy_size = new_size;
435 
436   memcpy(ret, ptr, copy_size);
437   root->Free(ptr);
438   return ret;
439 #endif
440 }
441 
Realloc(void * ptr,size_t new_size,const char * type_name)442 void* PartitionRootGeneric::Realloc(void* ptr,
443                                     size_t new_size,
444                                     const char* type_name) {
445   return PartitionReallocGenericFlags(this, 0, ptr, new_size, type_name);
446 }
447 
TryRealloc(void * ptr,size_t new_size,const char * type_name)448 void* PartitionRootGeneric::TryRealloc(void* ptr,
449                                        size_t new_size,
450                                        const char* type_name) {
451   return PartitionReallocGenericFlags(this, PartitionAllocReturnNull, ptr,
452                                       new_size, type_name);
453 }
454 
PartitionPurgePage(internal::PartitionPage * page,bool discard)455 static size_t PartitionPurgePage(internal::PartitionPage* page, bool discard) {
456   const internal::PartitionBucket* bucket = page->bucket;
457   size_t slot_size = bucket->slot_size;
458   if (slot_size < kSystemPageSize || !page->num_allocated_slots)
459     return 0;
460 
461   size_t bucket_num_slots = bucket->get_slots_per_span();
462   size_t discardable_bytes = 0;
463 
464   size_t raw_size = page->get_raw_size();
465   if (raw_size) {
466     uint32_t used_bytes = static_cast<uint32_t>(RoundUpToSystemPage(raw_size));
467     discardable_bytes = bucket->slot_size - used_bytes;
468     if (discardable_bytes && discard) {
469       char* ptr =
470           reinterpret_cast<char*>(internal::PartitionPage::ToPointer(page));
471       ptr += used_bytes;
472       DiscardSystemPages(ptr, discardable_bytes);
473     }
474     return discardable_bytes;
475   }
476 
477   constexpr size_t kMaxSlotCount =
478       (kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize;
479   DCHECK(bucket_num_slots <= kMaxSlotCount);
480   DCHECK(page->num_unprovisioned_slots < bucket_num_slots);
481   size_t num_slots = bucket_num_slots - page->num_unprovisioned_slots;
482   char slot_usage[kMaxSlotCount];
483 #if !defined(OS_WIN)
484   // The last freelist entry should not be discarded when using OS_WIN.
485   // DiscardVirtualMemory makes the contents of discarded memory undefined.
486   size_t last_slot = static_cast<size_t>(-1);
487 #endif
488   memset(slot_usage, 1, num_slots);
489   char* ptr = reinterpret_cast<char*>(internal::PartitionPage::ToPointer(page));
490   // First, walk the freelist for this page and make a bitmap of which slots
491   // are not in use.
492   for (internal::PartitionFreelistEntry* entry = page->freelist_head; entry;
493        /**/) {
494     size_t slot_index = (reinterpret_cast<char*>(entry) - ptr) / slot_size;
495     DCHECK(slot_index < num_slots);
496     slot_usage[slot_index] = 0;
497     entry = internal::EncodedPartitionFreelistEntry::Decode(entry->next);
498 #if !defined(OS_WIN)
499     // If we have a slot where the masked freelist entry is 0, we can actually
500     // discard that freelist entry because touching a discarded page is
501     // guaranteed to return original content or 0. (Note that this optimization
502     // won't fire on big-endian machines because the masking function is
503     // negation.)
504     if (!internal::PartitionFreelistEntry::Encode(entry))
505       last_slot = slot_index;
506 #endif
507   }
508 
509   // If the slot(s) at the end of the slot span are not in used, we can truncate
510   // them entirely and rewrite the freelist.
511   size_t truncated_slots = 0;
512   while (!slot_usage[num_slots - 1]) {
513     truncated_slots++;
514     num_slots--;
515     DCHECK(num_slots);
516   }
517   // First, do the work of calculating the discardable bytes. Don't actually
518   // discard anything unless the discard flag was passed in.
519   if (truncated_slots) {
520     size_t unprovisioned_bytes = 0;
521     char* begin_ptr = ptr + (num_slots * slot_size);
522     char* end_ptr = begin_ptr + (slot_size * truncated_slots);
523     begin_ptr = reinterpret_cast<char*>(
524         RoundUpToSystemPage(reinterpret_cast<size_t>(begin_ptr)));
525     // We round the end pointer here up and not down because we're at the end of
526     // a slot span, so we "own" all the way up the page boundary.
527     end_ptr = reinterpret_cast<char*>(
528         RoundUpToSystemPage(reinterpret_cast<size_t>(end_ptr)));
529     DCHECK(end_ptr <= ptr + bucket->get_bytes_per_span());
530     if (begin_ptr < end_ptr) {
531       unprovisioned_bytes = end_ptr - begin_ptr;
532       discardable_bytes += unprovisioned_bytes;
533     }
534     if (unprovisioned_bytes && discard) {
535       DCHECK(truncated_slots > 0);
536       size_t num_new_entries = 0;
537       page->num_unprovisioned_slots += static_cast<uint16_t>(truncated_slots);
538 
539       // Rewrite the freelist.
540       internal::PartitionFreelistEntry* head = nullptr;
541       internal::PartitionFreelistEntry* back = head;
542       for (size_t slot_index = 0; slot_index < num_slots; ++slot_index) {
543         if (slot_usage[slot_index])
544           continue;
545 
546         auto* entry = reinterpret_cast<internal::PartitionFreelistEntry*>(
547             ptr + (slot_size * slot_index));
548         if (!head) {
549           head = entry;
550           back = entry;
551         } else {
552           back->next = internal::PartitionFreelistEntry::Encode(entry);
553           back = entry;
554         }
555         num_new_entries++;
556 #if !defined(OS_WIN)
557         last_slot = slot_index;
558 #endif
559       }
560 
561       page->freelist_head = head;
562       if (back)
563         back->next = internal::PartitionFreelistEntry::Encode(nullptr);
564 
565       DCHECK(num_new_entries == num_slots - page->num_allocated_slots);
566       // Discard the memory.
567       DiscardSystemPages(begin_ptr, unprovisioned_bytes);
568     }
569   }
570 
571   // Next, walk the slots and for any not in use, consider where the system page
572   // boundaries occur. We can release any system pages back to the system as
573   // long as we don't interfere with a freelist pointer or an adjacent slot.
574   for (size_t i = 0; i < num_slots; ++i) {
575     if (slot_usage[i])
576       continue;
577     // The first address we can safely discard is just after the freelist
578     // pointer. There's one quirk: if the freelist pointer is actually NULL, we
579     // can discard that pointer value too.
580     char* begin_ptr = ptr + (i * slot_size);
581     char* end_ptr = begin_ptr + slot_size;
582 #if !defined(OS_WIN)
583     if (i != last_slot)
584       begin_ptr += sizeof(internal::PartitionFreelistEntry);
585 #else
586     begin_ptr += sizeof(internal::PartitionFreelistEntry);
587 #endif
588     begin_ptr = reinterpret_cast<char*>(
589         RoundUpToSystemPage(reinterpret_cast<size_t>(begin_ptr)));
590     end_ptr = reinterpret_cast<char*>(
591         RoundDownToSystemPage(reinterpret_cast<size_t>(end_ptr)));
592     if (begin_ptr < end_ptr) {
593       size_t partial_slot_bytes = end_ptr - begin_ptr;
594       discardable_bytes += partial_slot_bytes;
595       if (discard)
596         DiscardSystemPages(begin_ptr, partial_slot_bytes);
597     }
598   }
599   return discardable_bytes;
600 }
601 
PartitionPurgeBucket(internal::PartitionBucket * bucket)602 static void PartitionPurgeBucket(internal::PartitionBucket* bucket) {
603   if (bucket->active_pages_head !=
604       internal::PartitionPage::get_sentinel_page()) {
605     for (internal::PartitionPage* page = bucket->active_pages_head; page;
606          page = page->next_page) {
607       DCHECK(page != internal::PartitionPage::get_sentinel_page());
608       PartitionPurgePage(page, true);
609     }
610   }
611 }
612 
PurgeMemory(int flags)613 void PartitionRoot::PurgeMemory(int flags) {
614   if (flags & PartitionPurgeDecommitEmptyPages)
615     DecommitEmptyPages();
616   // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages
617   // here because that flag is only useful for allocations >= system page size.
618   // We only have allocations that large inside generic partitions at the
619   // moment.
620 }
621 
PurgeMemory(int flags)622 void PartitionRootGeneric::PurgeMemory(int flags) {
623   subtle::SpinLock::Guard guard(lock);
624   if (flags & PartitionPurgeDecommitEmptyPages)
625     DecommitEmptyPages();
626   if (flags & PartitionPurgeDiscardUnusedSystemPages) {
627     for (size_t i = 0; i < kGenericNumBuckets; ++i) {
628       internal::PartitionBucket* bucket = &buckets[i];
629       if (bucket->slot_size >= kSystemPageSize)
630         PartitionPurgeBucket(bucket);
631     }
632   }
633 }
634 
PartitionDumpPageStats(PartitionBucketMemoryStats * stats_out,internal::PartitionPage * page)635 static void PartitionDumpPageStats(PartitionBucketMemoryStats* stats_out,
636                                    internal::PartitionPage* page) {
637   uint16_t bucket_num_slots = page->bucket->get_slots_per_span();
638 
639   if (page->is_decommitted()) {
640     ++stats_out->num_decommitted_pages;
641     return;
642   }
643 
644   stats_out->discardable_bytes += PartitionPurgePage(page, false);
645 
646   size_t raw_size = page->get_raw_size();
647   if (raw_size) {
648     stats_out->active_bytes += static_cast<uint32_t>(raw_size);
649   } else {
650     stats_out->active_bytes +=
651         (page->num_allocated_slots * stats_out->bucket_slot_size);
652   }
653 
654   size_t page_bytes_resident =
655       RoundUpToSystemPage((bucket_num_slots - page->num_unprovisioned_slots) *
656                           stats_out->bucket_slot_size);
657   stats_out->resident_bytes += page_bytes_resident;
658   if (page->is_empty()) {
659     stats_out->decommittable_bytes += page_bytes_resident;
660     ++stats_out->num_empty_pages;
661   } else if (page->is_full()) {
662     ++stats_out->num_full_pages;
663   } else {
664     DCHECK(page->is_active());
665     ++stats_out->num_active_pages;
666   }
667 }
668 
PartitionDumpBucketStats(PartitionBucketMemoryStats * stats_out,const internal::PartitionBucket * bucket)669 static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out,
670                                      const internal::PartitionBucket* bucket) {
671   DCHECK(!bucket->is_direct_mapped());
672   stats_out->is_valid = false;
673   // If the active page list is empty (==
674   // internal::PartitionPage::get_sentinel_page()), the bucket might still need
675   // to be reported if it has a list of empty, decommitted or full pages.
676   if (bucket->active_pages_head ==
677           internal::PartitionPage::get_sentinel_page() &&
678       !bucket->empty_pages_head && !bucket->decommitted_pages_head &&
679       !bucket->num_full_pages)
680     return;
681 
682   memset(stats_out, '\0', sizeof(*stats_out));
683   stats_out->is_valid = true;
684   stats_out->is_direct_map = false;
685   stats_out->num_full_pages = static_cast<size_t>(bucket->num_full_pages);
686   stats_out->bucket_slot_size = bucket->slot_size;
687   uint16_t bucket_num_slots = bucket->get_slots_per_span();
688   size_t bucket_useful_storage = stats_out->bucket_slot_size * bucket_num_slots;
689   stats_out->allocated_page_size = bucket->get_bytes_per_span();
690   stats_out->active_bytes = bucket->num_full_pages * bucket_useful_storage;
691   stats_out->resident_bytes =
692       bucket->num_full_pages * stats_out->allocated_page_size;
693 
694   for (internal::PartitionPage* page = bucket->empty_pages_head; page;
695        page = page->next_page) {
696     DCHECK(page->is_empty() || page->is_decommitted());
697     PartitionDumpPageStats(stats_out, page);
698   }
699   for (internal::PartitionPage* page = bucket->decommitted_pages_head; page;
700        page = page->next_page) {
701     DCHECK(page->is_decommitted());
702     PartitionDumpPageStats(stats_out, page);
703   }
704 
705   if (bucket->active_pages_head !=
706       internal::PartitionPage::get_sentinel_page()) {
707     for (internal::PartitionPage* page = bucket->active_pages_head; page;
708          page = page->next_page) {
709       DCHECK(page != internal::PartitionPage::get_sentinel_page());
710       PartitionDumpPageStats(stats_out, page);
711     }
712   }
713 }
714 
DumpStats(const char * partition_name,bool is_light_dump,PartitionStatsDumper * dumper)715 void PartitionRootGeneric::DumpStats(const char* partition_name,
716                                      bool is_light_dump,
717                                      PartitionStatsDumper* dumper) {
718   PartitionMemoryStats stats = {0};
719   stats.total_mmapped_bytes =
720       total_size_of_super_pages + total_size_of_direct_mapped_pages;
721   stats.total_committed_bytes = total_size_of_committed_pages;
722 
723   size_t direct_mapped_allocations_total_size = 0;
724 
725   static const size_t kMaxReportableDirectMaps = 4096;
726 
727   // Allocate on the heap rather than on the stack to avoid stack overflow
728   // skirmishes (on Windows, in particular).
729   std::unique_ptr<uint32_t[]> direct_map_lengths = nullptr;
730   if (!is_light_dump) {
731     direct_map_lengths =
732         std::unique_ptr<uint32_t[]>(new uint32_t[kMaxReportableDirectMaps]);
733   }
734 
735   PartitionBucketMemoryStats bucket_stats[kGenericNumBuckets];
736   size_t num_direct_mapped_allocations = 0;
737   {
738     subtle::SpinLock::Guard guard(lock);
739 
740     for (size_t i = 0; i < kGenericNumBuckets; ++i) {
741       const internal::PartitionBucket* bucket = &buckets[i];
742       // Don't report the pseudo buckets that the generic allocator sets up in
743       // order to preserve a fast size->bucket map (see
744       // PartitionRootGeneric::Init() for details).
745       if (!bucket->active_pages_head)
746         bucket_stats[i].is_valid = false;
747       else
748         PartitionDumpBucketStats(&bucket_stats[i], bucket);
749       if (bucket_stats[i].is_valid) {
750         stats.total_resident_bytes += bucket_stats[i].resident_bytes;
751         stats.total_active_bytes += bucket_stats[i].active_bytes;
752         stats.total_decommittable_bytes += bucket_stats[i].decommittable_bytes;
753         stats.total_discardable_bytes += bucket_stats[i].discardable_bytes;
754       }
755     }
756 
757     for (internal::PartitionDirectMapExtent* extent = direct_map_list;
758          extent && num_direct_mapped_allocations < kMaxReportableDirectMaps;
759          extent = extent->next_extent, ++num_direct_mapped_allocations) {
760       DCHECK(!extent->next_extent ||
761              extent->next_extent->prev_extent == extent);
762       size_t slot_size = extent->bucket->slot_size;
763       direct_mapped_allocations_total_size += slot_size;
764       if (is_light_dump)
765         continue;
766       direct_map_lengths[num_direct_mapped_allocations] = slot_size;
767     }
768   }
769 
770   if (!is_light_dump) {
771     // Call |PartitionsDumpBucketStats| after collecting stats because it can
772     // try to allocate using |PartitionRootGeneric::Alloc()| and it can't
773     // obtain the lock.
774     for (size_t i = 0; i < kGenericNumBuckets; ++i) {
775       if (bucket_stats[i].is_valid)
776         dumper->PartitionsDumpBucketStats(partition_name, &bucket_stats[i]);
777     }
778 
779     for (size_t i = 0; i < num_direct_mapped_allocations; ++i) {
780       uint32_t size = direct_map_lengths[i];
781 
782       PartitionBucketMemoryStats mapped_stats = {};
783       mapped_stats.is_valid = true;
784       mapped_stats.is_direct_map = true;
785       mapped_stats.num_full_pages = 1;
786       mapped_stats.allocated_page_size = size;
787       mapped_stats.bucket_slot_size = size;
788       mapped_stats.active_bytes = size;
789       mapped_stats.resident_bytes = size;
790       dumper->PartitionsDumpBucketStats(partition_name, &mapped_stats);
791     }
792   }
793 
794   stats.total_resident_bytes += direct_mapped_allocations_total_size;
795   stats.total_active_bytes += direct_mapped_allocations_total_size;
796   dumper->PartitionDumpTotals(partition_name, &stats);
797 }
798 
DumpStats(const char * partition_name,bool is_light_dump,PartitionStatsDumper * dumper)799 void PartitionRoot::DumpStats(const char* partition_name,
800                               bool is_light_dump,
801                               PartitionStatsDumper* dumper) {
802   PartitionMemoryStats stats = {0};
803   stats.total_mmapped_bytes = total_size_of_super_pages;
804   stats.total_committed_bytes = total_size_of_committed_pages;
805   DCHECK(!total_size_of_direct_mapped_pages);
806 
807   static constexpr size_t kMaxReportableBuckets = 4096 / sizeof(void*);
808   std::unique_ptr<PartitionBucketMemoryStats[]> memory_stats;
809   if (!is_light_dump) {
810     memory_stats = std::unique_ptr<PartitionBucketMemoryStats[]>(
811         new PartitionBucketMemoryStats[kMaxReportableBuckets]);
812   }
813 
814   const size_t partition_num_buckets = num_buckets;
815   DCHECK(partition_num_buckets <= kMaxReportableBuckets);
816 
817   for (size_t i = 0; i < partition_num_buckets; ++i) {
818     PartitionBucketMemoryStats bucket_stats = {0};
819     PartitionDumpBucketStats(&bucket_stats, &buckets()[i]);
820     if (bucket_stats.is_valid) {
821       stats.total_resident_bytes += bucket_stats.resident_bytes;
822       stats.total_active_bytes += bucket_stats.active_bytes;
823       stats.total_decommittable_bytes += bucket_stats.decommittable_bytes;
824       stats.total_discardable_bytes += bucket_stats.discardable_bytes;
825     }
826     if (!is_light_dump) {
827       if (bucket_stats.is_valid)
828         memory_stats[i] = bucket_stats;
829       else
830         memory_stats[i].is_valid = false;
831     }
832   }
833   if (!is_light_dump) {
834     // PartitionsDumpBucketStats is called after collecting stats because it
835     // can use PartitionRoot::Alloc() to allocate and this can affect the
836     // statistics.
837     for (size_t i = 0; i < partition_num_buckets; ++i) {
838       if (memory_stats[i].is_valid)
839         dumper->PartitionsDumpBucketStats(partition_name, &memory_stats[i]);
840     }
841   }
842   dumper->PartitionDumpTotals(partition_name, &stats);
843 }
844 
845 }  // namespace base
846 }  // namespace pdfium
847