1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/allocator/partition_allocator/partition_alloc.h"
6
7 #include <string.h>
8
9 #include <memory>
10 #include <type_traits>
11
12 #include "base/allocator/partition_allocator/partition_direct_map_extent.h"
13 #include "base/allocator/partition_allocator/partition_oom.h"
14 #include "base/allocator/partition_allocator/partition_page.h"
15 #include "base/allocator/partition_allocator/spin_lock.h"
16 #include "base/logging.h"
17 #include "base/no_destructor.h"
18 #include "base/synchronization/lock.h"
19
20 namespace base {
21
22 // Two partition pages are used as guard / metadata page so make sure the super
23 // page size is bigger.
24 static_assert(kPartitionPageSize * 4 <= kSuperPageSize, "ok super page size");
25 static_assert(!(kSuperPageSize % kPartitionPageSize), "ok super page multiple");
26 // Four system pages gives us room to hack out a still-guard-paged piece
27 // of metadata in the middle of a guard partition page.
28 static_assert(kSystemPageSize * 4 <= kPartitionPageSize,
29 "ok partition page size");
30 static_assert(!(kPartitionPageSize % kSystemPageSize),
31 "ok partition page multiple");
32 static_assert(sizeof(internal::PartitionPage) <= kPageMetadataSize,
33 "PartitionPage should not be too big");
34 static_assert(sizeof(internal::PartitionBucket) <= kPageMetadataSize,
35 "PartitionBucket should not be too big");
36 static_assert(sizeof(internal::PartitionSuperPageExtentEntry) <=
37 kPageMetadataSize,
38 "PartitionSuperPageExtentEntry should not be too big");
39 static_assert(kPageMetadataSize * kNumPartitionPagesPerSuperPage <=
40 kSystemPageSize,
41 "page metadata fits in hole");
42 // Limit to prevent callers accidentally overflowing an int size.
43 static_assert(kGenericMaxDirectMapped <=
44 (1UL << 31) + kPageAllocationGranularity,
45 "maximum direct mapped allocation");
46 // Check that some of our zanier calculations worked out as expected.
47 static_assert(kGenericSmallestBucket == 8, "generic smallest bucket");
48 static_assert(kGenericMaxBucketed == 983040, "generic max bucketed");
49 static_assert(kMaxSystemPagesPerSlotSpan < (1 << 8),
50 "System pages per slot span must be less than 128.");
51
52 internal::PartitionRootBase::PartitionRootBase() = default;
53 internal::PartitionRootBase::~PartitionRootBase() = default;
54 PartitionRoot::PartitionRoot() = default;
55 PartitionRoot::~PartitionRoot() = default;
56 PartitionRootGeneric::PartitionRootGeneric() = default;
57 PartitionRootGeneric::~PartitionRootGeneric() = default;
58 PartitionAllocatorGeneric::PartitionAllocatorGeneric() = default;
59
GetLock()60 Lock& GetLock() {
61 static NoDestructor<Lock> s_initialized_lock;
62 return *s_initialized_lock;
63 }
64 static bool g_initialized = false;
65
GetHooksLock()66 Lock& GetHooksLock() {
67 static NoDestructor<Lock> lock;
68 return *lock;
69 }
70
71 OomFunction internal::PartitionRootBase::g_oom_handling_function = nullptr;
72 std::atomic<bool> PartitionAllocHooks::hooks_enabled_(false);
73 std::atomic<PartitionAllocHooks::AllocationObserverHook*>
74 PartitionAllocHooks::allocation_observer_hook_(nullptr);
75 std::atomic<PartitionAllocHooks::FreeObserverHook*>
76 PartitionAllocHooks::free_observer_hook_(nullptr);
77 std::atomic<PartitionAllocHooks::AllocationOverrideHook*>
78 PartitionAllocHooks::allocation_override_hook_(nullptr);
79 std::atomic<PartitionAllocHooks::FreeOverrideHook*>
80 PartitionAllocHooks::free_override_hook_(nullptr);
81 std::atomic<PartitionAllocHooks::ReallocOverrideHook*>
82 PartitionAllocHooks::realloc_override_hook_(nullptr);
83
SetObserverHooks(AllocationObserverHook * alloc_hook,FreeObserverHook * free_hook)84 void PartitionAllocHooks::SetObserverHooks(AllocationObserverHook* alloc_hook,
85 FreeObserverHook* free_hook) {
86 AutoLock guard(GetHooksLock());
87
88 // Chained hooks are not supported. Registering a non-null hook when a
89 // non-null hook is already registered indicates somebody is trying to
90 // overwrite a hook.
91 CHECK((!allocation_observer_hook_ && !free_observer_hook_) ||
92 (!alloc_hook && !free_hook))
93 << "Overwriting already set observer hooks";
94 allocation_observer_hook_ = alloc_hook;
95 free_observer_hook_ = free_hook;
96
97 hooks_enabled_ = allocation_observer_hook_ || allocation_override_hook_;
98 }
99
SetOverrideHooks(AllocationOverrideHook * alloc_hook,FreeOverrideHook * free_hook,ReallocOverrideHook realloc_hook)100 void PartitionAllocHooks::SetOverrideHooks(AllocationOverrideHook* alloc_hook,
101 FreeOverrideHook* free_hook,
102 ReallocOverrideHook realloc_hook) {
103 AutoLock guard(GetHooksLock());
104
105 CHECK((!allocation_override_hook_ && !free_override_hook_ &&
106 !realloc_override_hook_) ||
107 (!alloc_hook && !free_hook && !realloc_hook))
108 << "Overwriting already set override hooks";
109 allocation_override_hook_ = alloc_hook;
110 free_override_hook_ = free_hook;
111 realloc_override_hook_ = realloc_hook;
112
113 hooks_enabled_ = allocation_observer_hook_ || allocation_override_hook_;
114 }
115
AllocationObserverHookIfEnabled(void * address,size_t size,const char * type_name)116 void PartitionAllocHooks::AllocationObserverHookIfEnabled(
117 void* address,
118 size_t size,
119 const char* type_name) {
120 if (AllocationObserverHook* hook =
121 allocation_observer_hook_.load(std::memory_order_relaxed)) {
122 hook(address, size, type_name);
123 }
124 }
125
AllocationOverrideHookIfEnabled(void ** out,int flags,size_t size,const char * type_name)126 bool PartitionAllocHooks::AllocationOverrideHookIfEnabled(
127 void** out,
128 int flags,
129 size_t size,
130 const char* type_name) {
131 if (AllocationOverrideHook* hook =
132 allocation_override_hook_.load(std::memory_order_relaxed)) {
133 return hook(out, flags, size, type_name);
134 }
135 return false;
136 }
137
FreeObserverHookIfEnabled(void * address)138 void PartitionAllocHooks::FreeObserverHookIfEnabled(void* address) {
139 if (FreeObserverHook* hook =
140 free_observer_hook_.load(std::memory_order_relaxed)) {
141 hook(address);
142 }
143 }
144
FreeOverrideHookIfEnabled(void * address)145 bool PartitionAllocHooks::FreeOverrideHookIfEnabled(void* address) {
146 if (FreeOverrideHook* hook =
147 free_override_hook_.load(std::memory_order_relaxed)) {
148 return hook(address);
149 }
150 return false;
151 }
152
ReallocObserverHookIfEnabled(void * old_address,void * new_address,size_t size,const char * type_name)153 void PartitionAllocHooks::ReallocObserverHookIfEnabled(void* old_address,
154 void* new_address,
155 size_t size,
156 const char* type_name) {
157 // Report a reallocation as a free followed by an allocation.
158 AllocationObserverHook* allocation_hook =
159 allocation_observer_hook_.load(std::memory_order_relaxed);
160 FreeObserverHook* free_hook =
161 free_observer_hook_.load(std::memory_order_relaxed);
162 if (allocation_hook && free_hook) {
163 free_hook(old_address);
164 allocation_hook(new_address, size, type_name);
165 }
166 }
ReallocOverrideHookIfEnabled(size_t * out,void * address)167 bool PartitionAllocHooks::ReallocOverrideHookIfEnabled(size_t* out,
168 void* address) {
169 if (ReallocOverrideHook* hook =
170 realloc_override_hook_.load(std::memory_order_relaxed)) {
171 return hook(out, address);
172 }
173 return false;
174 }
175
PartitionAllocBaseInit(internal::PartitionRootBase * root)176 static void PartitionAllocBaseInit(internal::PartitionRootBase* root) {
177 DCHECK(!root->initialized);
178 {
179 AutoLock guard(GetLock());
180 if (!g_initialized) {
181 g_initialized = true;
182 // We mark the sentinel bucket/page as free to make sure it is skipped by
183 // our logic to find a new active page.
184 internal::PartitionBucket::get_sentinel_bucket()->active_pages_head =
185 internal::PartitionPage::get_sentinel_page();
186 }
187 }
188
189 root->initialized = true;
190
191 // This is a "magic" value so we can test if a root pointer is valid.
192 root->inverted_self = ~reinterpret_cast<uintptr_t>(root);
193 }
194
PartitionAllocGlobalInit(OomFunction on_out_of_memory)195 void PartitionAllocGlobalInit(OomFunction on_out_of_memory) {
196 DCHECK(on_out_of_memory);
197 internal::PartitionRootBase::g_oom_handling_function = on_out_of_memory;
198 }
199
Init(size_t bucket_count,size_t maximum_allocation)200 void PartitionRoot::Init(size_t bucket_count, size_t maximum_allocation) {
201 PartitionAllocBaseInit(this);
202
203 num_buckets = bucket_count;
204 max_allocation = maximum_allocation;
205 for (size_t i = 0; i < num_buckets; ++i) {
206 internal::PartitionBucket& bucket = buckets()[i];
207 bucket.Init(i == 0 ? kAllocationGranularity : (i << kBucketShift));
208 }
209 }
210
Init()211 void PartitionRootGeneric::Init() {
212 subtle::SpinLock::Guard guard(lock);
213
214 PartitionAllocBaseInit(this);
215
216 // Precalculate some shift and mask constants used in the hot path.
217 // Example: malloc(41) == 101001 binary.
218 // Order is 6 (1 << 6-1) == 32 is highest bit set.
219 // order_index is the next three MSB == 010 == 2.
220 // sub_order_index_mask is a mask for the remaining bits == 11 (masking to 01
221 // for
222 // the sub_order_index).
223 size_t order;
224 for (order = 0; order <= kBitsPerSizeT; ++order) {
225 size_t order_index_shift;
226 if (order < kGenericNumBucketsPerOrderBits + 1)
227 order_index_shift = 0;
228 else
229 order_index_shift = order - (kGenericNumBucketsPerOrderBits + 1);
230 order_index_shifts[order] = order_index_shift;
231 size_t sub_order_index_mask;
232 if (order == kBitsPerSizeT) {
233 // This avoids invoking undefined behavior for an excessive shift.
234 sub_order_index_mask =
235 static_cast<size_t>(-1) >> (kGenericNumBucketsPerOrderBits + 1);
236 } else {
237 sub_order_index_mask = ((static_cast<size_t>(1) << order) - 1) >>
238 (kGenericNumBucketsPerOrderBits + 1);
239 }
240 order_sub_index_masks[order] = sub_order_index_mask;
241 }
242
243 // Set up the actual usable buckets first.
244 // Note that typical values (i.e. min allocation size of 8) will result in
245 // pseudo buckets (size==9 etc. or more generally, size is not a multiple
246 // of the smallest allocation granularity).
247 // We avoid them in the bucket lookup map, but we tolerate them to keep the
248 // code simpler and the structures more generic.
249 size_t i, j;
250 size_t current_size = kGenericSmallestBucket;
251 size_t current_increment =
252 kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits;
253 internal::PartitionBucket* bucket = &buckets[0];
254 for (i = 0; i < kGenericNumBucketedOrders; ++i) {
255 for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
256 bucket->Init(current_size);
257 // Disable psuedo buckets so that touching them faults.
258 if (current_size % kGenericSmallestBucket)
259 bucket->active_pages_head = nullptr;
260 current_size += current_increment;
261 ++bucket;
262 }
263 current_increment <<= 1;
264 }
265 DCHECK(current_size == 1 << kGenericMaxBucketedOrder);
266 DCHECK(bucket == &buckets[0] + kGenericNumBuckets);
267
268 // Then set up the fast size -> bucket lookup table.
269 bucket = &buckets[0];
270 internal::PartitionBucket** bucket_ptr = &bucket_lookups[0];
271 for (order = 0; order <= kBitsPerSizeT; ++order) {
272 for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
273 if (order < kGenericMinBucketedOrder) {
274 // Use the bucket of the finest granularity for malloc(0) etc.
275 *bucket_ptr++ = &buckets[0];
276 } else if (order > kGenericMaxBucketedOrder) {
277 *bucket_ptr++ = internal::PartitionBucket::get_sentinel_bucket();
278 } else {
279 internal::PartitionBucket* valid_bucket = bucket;
280 // Skip over invalid buckets.
281 while (valid_bucket->slot_size % kGenericSmallestBucket)
282 valid_bucket++;
283 *bucket_ptr++ = valid_bucket;
284 bucket++;
285 }
286 }
287 }
288 DCHECK(bucket == &buckets[0] + kGenericNumBuckets);
289 DCHECK(bucket_ptr == &bucket_lookups[0] +
290 ((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder));
291 // And there's one last bucket lookup that will be hit for e.g. malloc(-1),
292 // which tries to overflow to a non-existant order.
293 *bucket_ptr = internal::PartitionBucket::get_sentinel_bucket();
294 }
295
PartitionReallocDirectMappedInPlace(PartitionRootGeneric * root,internal::PartitionPage * page,size_t raw_size)296 bool PartitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
297 internal::PartitionPage* page,
298 size_t raw_size) {
299 DCHECK(page->bucket->is_direct_mapped());
300
301 raw_size = internal::PartitionCookieSizeAdjustAdd(raw_size);
302
303 // Note that the new size might be a bucketed size; this function is called
304 // whenever we're reallocating a direct mapped allocation.
305 size_t new_size = internal::PartitionBucket::get_direct_map_size(raw_size);
306 if (new_size < kGenericMinDirectMappedDownsize)
307 return false;
308
309 // bucket->slot_size is the current size of the allocation.
310 size_t current_size = page->bucket->slot_size;
311 char* char_ptr = static_cast<char*>(internal::PartitionPage::ToPointer(page));
312 if (new_size == current_size) {
313 // No need to move any memory around, but update size and cookie below.
314 } else if (new_size < current_size) {
315 size_t map_size =
316 internal::PartitionDirectMapExtent::FromPage(page)->map_size;
317
318 // Don't reallocate in-place if new size is less than 80 % of the full
319 // map size, to avoid holding on to too much unused address space.
320 if ((new_size / kSystemPageSize) * 5 < (map_size / kSystemPageSize) * 4)
321 return false;
322
323 // Shrink by decommitting unneeded pages and making them inaccessible.
324 size_t decommit_size = current_size - new_size;
325 root->DecommitSystemPages(char_ptr + new_size, decommit_size);
326 SetSystemPagesAccess(char_ptr + new_size, decommit_size, PageInaccessible);
327 } else if (new_size <=
328 internal::PartitionDirectMapExtent::FromPage(page)->map_size) {
329 // Grow within the actually allocated memory. Just need to make the
330 // pages accessible again.
331 size_t recommit_size = new_size - current_size;
332 SetSystemPagesAccess(char_ptr + current_size, recommit_size, PageReadWrite);
333 root->RecommitSystemPages(char_ptr + current_size, recommit_size);
334
335 #if DCHECK_IS_ON()
336 memset(char_ptr + current_size, kUninitializedByte, recommit_size);
337 #endif
338 } else {
339 // We can't perform the realloc in-place.
340 // TODO: support this too when possible.
341 return false;
342 }
343
344 #if DCHECK_IS_ON()
345 // Write a new trailing cookie.
346 internal::PartitionCookieWriteValue(char_ptr + raw_size -
347 internal::kCookieSize);
348 #endif
349
350 page->set_raw_size(raw_size);
351 DCHECK(page->get_raw_size() == raw_size);
352
353 page->bucket->slot_size = new_size;
354 return true;
355 }
356
PartitionReallocGenericFlags(PartitionRootGeneric * root,int flags,void * ptr,size_t new_size,const char * type_name)357 void* PartitionReallocGenericFlags(PartitionRootGeneric* root,
358 int flags,
359 void* ptr,
360 size_t new_size,
361 const char* type_name) {
362 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
363 CHECK_MAX_SIZE_OR_RETURN_NULLPTR(new_size, flags);
364 void* result = realloc(ptr, new_size);
365 CHECK(result || flags & PartitionAllocReturnNull);
366 return result;
367 #else
368 if (UNLIKELY(!ptr))
369 return PartitionAllocGenericFlags(root, flags, new_size, type_name);
370 if (UNLIKELY(!new_size)) {
371 root->Free(ptr);
372 return nullptr;
373 }
374
375 if (new_size > kGenericMaxDirectMapped) {
376 if (flags & PartitionAllocReturnNull)
377 return nullptr;
378 internal::PartitionExcessiveAllocationSize(new_size);
379 }
380
381 const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
382 bool overridden = false;
383 size_t actual_old_size;
384 if (UNLIKELY(hooks_enabled)) {
385 overridden = PartitionAllocHooks::ReallocOverrideHookIfEnabled(
386 &actual_old_size, ptr);
387 }
388 if (LIKELY(!overridden)) {
389 internal::PartitionPage* page = internal::PartitionPage::FromPointer(
390 internal::PartitionCookieFreePointerAdjust(ptr));
391 // TODO(palmer): See if we can afford to make this a CHECK.
392 DCHECK(root->IsValidPage(page));
393
394 if (UNLIKELY(page->bucket->is_direct_mapped())) {
395 // We may be able to perform the realloc in place by changing the
396 // accessibility of memory pages and, if reducing the size, decommitting
397 // them.
398 if (PartitionReallocDirectMappedInPlace(root, page, new_size)) {
399 if (UNLIKELY(hooks_enabled)) {
400 PartitionAllocHooks::ReallocObserverHookIfEnabled(ptr, ptr, new_size,
401 type_name);
402 }
403 return ptr;
404 }
405 }
406
407 const size_t actual_new_size = root->ActualSize(new_size);
408 actual_old_size = PartitionAllocGetSize(ptr);
409
410 // TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the
411 // new size is a significant percentage smaller. We could do the same if we
412 // determine it is a win.
413 if (actual_new_size == actual_old_size) {
414 // Trying to allocate a block of size |new_size| would give us a block of
415 // the same size as the one we've already got, so re-use the allocation
416 // after updating statistics (and cookies, if present).
417 page->set_raw_size(internal::PartitionCookieSizeAdjustAdd(new_size));
418 #if DCHECK_IS_ON()
419 // Write a new trailing cookie when it is possible to keep track of
420 // |new_size| via the raw size pointer.
421 if (page->get_raw_size_ptr())
422 internal::PartitionCookieWriteValue(static_cast<char*>(ptr) + new_size);
423 #endif
424 return ptr;
425 }
426 }
427
428 // This realloc cannot be resized in-place. Sadness.
429 void* ret = PartitionAllocGenericFlags(root, flags, new_size, type_name);
430 if (!ret) {
431 if (flags & PartitionAllocReturnNull)
432 return nullptr;
433 internal::PartitionExcessiveAllocationSize(new_size);
434 }
435
436 size_t copy_size = actual_old_size;
437 if (new_size < copy_size)
438 copy_size = new_size;
439
440 memcpy(ret, ptr, copy_size);
441 root->Free(ptr);
442 return ret;
443 #endif
444 }
445
Realloc(void * ptr,size_t new_size,const char * type_name)446 void* PartitionRootGeneric::Realloc(void* ptr,
447 size_t new_size,
448 const char* type_name) {
449 return PartitionReallocGenericFlags(this, 0, ptr, new_size, type_name);
450 }
451
TryRealloc(void * ptr,size_t new_size,const char * type_name)452 void* PartitionRootGeneric::TryRealloc(void* ptr,
453 size_t new_size,
454 const char* type_name) {
455 return PartitionReallocGenericFlags(this, PartitionAllocReturnNull, ptr,
456 new_size, type_name);
457 }
458
PartitionPurgePage(internal::PartitionPage * page,bool discard)459 static size_t PartitionPurgePage(internal::PartitionPage* page, bool discard) {
460 const internal::PartitionBucket* bucket = page->bucket;
461 size_t slot_size = bucket->slot_size;
462 if (slot_size < kSystemPageSize || !page->num_allocated_slots)
463 return 0;
464
465 size_t bucket_num_slots = bucket->get_slots_per_span();
466 size_t discardable_bytes = 0;
467
468 size_t raw_size = page->get_raw_size();
469 if (raw_size) {
470 uint32_t used_bytes = static_cast<uint32_t>(RoundUpToSystemPage(raw_size));
471 discardable_bytes = bucket->slot_size - used_bytes;
472 if (discardable_bytes && discard) {
473 char* ptr =
474 reinterpret_cast<char*>(internal::PartitionPage::ToPointer(page));
475 ptr += used_bytes;
476 DiscardSystemPages(ptr, discardable_bytes);
477 }
478 return discardable_bytes;
479 }
480
481 constexpr size_t kMaxSlotCount =
482 (kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize;
483 DCHECK(bucket_num_slots <= kMaxSlotCount);
484 DCHECK(page->num_unprovisioned_slots < bucket_num_slots);
485 size_t num_slots = bucket_num_slots - page->num_unprovisioned_slots;
486 char slot_usage[kMaxSlotCount];
487 #if !defined(OS_WIN)
488 // The last freelist entry should not be discarded when using OS_WIN.
489 // DiscardVirtualMemory makes the contents of discarded memory undefined.
490 size_t last_slot = static_cast<size_t>(-1);
491 #endif
492 memset(slot_usage, 1, num_slots);
493 char* ptr = reinterpret_cast<char*>(internal::PartitionPage::ToPointer(page));
494 // First, walk the freelist for this page and make a bitmap of which slots
495 // are not in use.
496 for (internal::PartitionFreelistEntry* entry = page->freelist_head; entry;
497 /**/) {
498 size_t slot_index = (reinterpret_cast<char*>(entry) - ptr) / slot_size;
499 DCHECK(slot_index < num_slots);
500 slot_usage[slot_index] = 0;
501 entry = internal::EncodedPartitionFreelistEntry::Decode(entry->next);
502 #if !defined(OS_WIN)
503 // If we have a slot where the masked freelist entry is 0, we can actually
504 // discard that freelist entry because touching a discarded page is
505 // guaranteed to return original content or 0. (Note that this optimization
506 // won't fire on big-endian machines because the masking function is
507 // negation.)
508 if (!internal::PartitionFreelistEntry::Encode(entry))
509 last_slot = slot_index;
510 #endif
511 }
512
513 // If the slot(s) at the end of the slot span are not in used, we can truncate
514 // them entirely and rewrite the freelist.
515 size_t truncated_slots = 0;
516 while (!slot_usage[num_slots - 1]) {
517 truncated_slots++;
518 num_slots--;
519 DCHECK(num_slots);
520 }
521 // First, do the work of calculating the discardable bytes. Don't actually
522 // discard anything unless the discard flag was passed in.
523 if (truncated_slots) {
524 size_t unprovisioned_bytes = 0;
525 char* begin_ptr = ptr + (num_slots * slot_size);
526 char* end_ptr = begin_ptr + (slot_size * truncated_slots);
527 begin_ptr = reinterpret_cast<char*>(
528 RoundUpToSystemPage(reinterpret_cast<size_t>(begin_ptr)));
529 // We round the end pointer here up and not down because we're at the end of
530 // a slot span, so we "own" all the way up the page boundary.
531 end_ptr = reinterpret_cast<char*>(
532 RoundUpToSystemPage(reinterpret_cast<size_t>(end_ptr)));
533 DCHECK(end_ptr <= ptr + bucket->get_bytes_per_span());
534 if (begin_ptr < end_ptr) {
535 unprovisioned_bytes = end_ptr - begin_ptr;
536 discardable_bytes += unprovisioned_bytes;
537 }
538 if (unprovisioned_bytes && discard) {
539 DCHECK(truncated_slots > 0);
540 size_t num_new_entries = 0;
541 page->num_unprovisioned_slots += static_cast<uint16_t>(truncated_slots);
542
543 // Rewrite the freelist.
544 internal::PartitionFreelistEntry* head = nullptr;
545 internal::PartitionFreelistEntry* back = head;
546 for (size_t slot_index = 0; slot_index < num_slots; ++slot_index) {
547 if (slot_usage[slot_index])
548 continue;
549
550 auto* entry = reinterpret_cast<internal::PartitionFreelistEntry*>(
551 ptr + (slot_size * slot_index));
552 if (!head) {
553 head = entry;
554 back = entry;
555 } else {
556 back->next = internal::PartitionFreelistEntry::Encode(entry);
557 back = entry;
558 }
559 num_new_entries++;
560 #if !defined(OS_WIN)
561 last_slot = slot_index;
562 #endif
563 }
564
565 page->freelist_head = head;
566 if (back)
567 back->next = internal::PartitionFreelistEntry::Encode(nullptr);
568
569 DCHECK(num_new_entries == num_slots - page->num_allocated_slots);
570 // Discard the memory.
571 DiscardSystemPages(begin_ptr, unprovisioned_bytes);
572 }
573 }
574
575 // Next, walk the slots and for any not in use, consider where the system page
576 // boundaries occur. We can release any system pages back to the system as
577 // long as we don't interfere with a freelist pointer or an adjacent slot.
578 for (size_t i = 0; i < num_slots; ++i) {
579 if (slot_usage[i])
580 continue;
581 // The first address we can safely discard is just after the freelist
582 // pointer. There's one quirk: if the freelist pointer is actually NULL, we
583 // can discard that pointer value too.
584 char* begin_ptr = ptr + (i * slot_size);
585 char* end_ptr = begin_ptr + slot_size;
586 #if !defined(OS_WIN)
587 if (i != last_slot)
588 begin_ptr += sizeof(internal::PartitionFreelistEntry);
589 #else
590 begin_ptr += sizeof(internal::PartitionFreelistEntry);
591 #endif
592 begin_ptr = reinterpret_cast<char*>(
593 RoundUpToSystemPage(reinterpret_cast<size_t>(begin_ptr)));
594 end_ptr = reinterpret_cast<char*>(
595 RoundDownToSystemPage(reinterpret_cast<size_t>(end_ptr)));
596 if (begin_ptr < end_ptr) {
597 size_t partial_slot_bytes = end_ptr - begin_ptr;
598 discardable_bytes += partial_slot_bytes;
599 if (discard)
600 DiscardSystemPages(begin_ptr, partial_slot_bytes);
601 }
602 }
603 return discardable_bytes;
604 }
605
PartitionPurgeBucket(internal::PartitionBucket * bucket)606 static void PartitionPurgeBucket(internal::PartitionBucket* bucket) {
607 if (bucket->active_pages_head !=
608 internal::PartitionPage::get_sentinel_page()) {
609 for (internal::PartitionPage* page = bucket->active_pages_head; page;
610 page = page->next_page) {
611 DCHECK(page != internal::PartitionPage::get_sentinel_page());
612 PartitionPurgePage(page, true);
613 }
614 }
615 }
616
PurgeMemory(int flags)617 void PartitionRoot::PurgeMemory(int flags) {
618 if (flags & PartitionPurgeDecommitEmptyPages)
619 DecommitEmptyPages();
620 // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages
621 // here because that flag is only useful for allocations >= system page size.
622 // We only have allocations that large inside generic partitions at the
623 // moment.
624 }
625
PurgeMemory(int flags)626 void PartitionRootGeneric::PurgeMemory(int flags) {
627 subtle::SpinLock::Guard guard(lock);
628 if (flags & PartitionPurgeDecommitEmptyPages)
629 DecommitEmptyPages();
630 if (flags & PartitionPurgeDiscardUnusedSystemPages) {
631 for (size_t i = 0; i < kGenericNumBuckets; ++i) {
632 internal::PartitionBucket* bucket = &buckets[i];
633 if (bucket->slot_size >= kSystemPageSize)
634 PartitionPurgeBucket(bucket);
635 }
636 }
637 }
638
PartitionDumpPageStats(PartitionBucketMemoryStats * stats_out,internal::PartitionPage * page)639 static void PartitionDumpPageStats(PartitionBucketMemoryStats* stats_out,
640 internal::PartitionPage* page) {
641 uint16_t bucket_num_slots = page->bucket->get_slots_per_span();
642
643 if (page->is_decommitted()) {
644 ++stats_out->num_decommitted_pages;
645 return;
646 }
647
648 stats_out->discardable_bytes += PartitionPurgePage(page, false);
649
650 size_t raw_size = page->get_raw_size();
651 if (raw_size) {
652 stats_out->active_bytes += static_cast<uint32_t>(raw_size);
653 } else {
654 stats_out->active_bytes +=
655 (page->num_allocated_slots * stats_out->bucket_slot_size);
656 }
657
658 size_t page_bytes_resident =
659 RoundUpToSystemPage((bucket_num_slots - page->num_unprovisioned_slots) *
660 stats_out->bucket_slot_size);
661 stats_out->resident_bytes += page_bytes_resident;
662 if (page->is_empty()) {
663 stats_out->decommittable_bytes += page_bytes_resident;
664 ++stats_out->num_empty_pages;
665 } else if (page->is_full()) {
666 ++stats_out->num_full_pages;
667 } else {
668 DCHECK(page->is_active());
669 ++stats_out->num_active_pages;
670 }
671 }
672
PartitionDumpBucketStats(PartitionBucketMemoryStats * stats_out,const internal::PartitionBucket * bucket)673 static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out,
674 const internal::PartitionBucket* bucket) {
675 DCHECK(!bucket->is_direct_mapped());
676 stats_out->is_valid = false;
677 // If the active page list is empty (==
678 // internal::PartitionPage::get_sentinel_page()), the bucket might still need
679 // to be reported if it has a list of empty, decommitted or full pages.
680 if (bucket->active_pages_head ==
681 internal::PartitionPage::get_sentinel_page() &&
682 !bucket->empty_pages_head && !bucket->decommitted_pages_head &&
683 !bucket->num_full_pages)
684 return;
685
686 memset(stats_out, '\0', sizeof(*stats_out));
687 stats_out->is_valid = true;
688 stats_out->is_direct_map = false;
689 stats_out->num_full_pages = static_cast<size_t>(bucket->num_full_pages);
690 stats_out->bucket_slot_size = bucket->slot_size;
691 uint16_t bucket_num_slots = bucket->get_slots_per_span();
692 size_t bucket_useful_storage = stats_out->bucket_slot_size * bucket_num_slots;
693 stats_out->allocated_page_size = bucket->get_bytes_per_span();
694 stats_out->active_bytes = bucket->num_full_pages * bucket_useful_storage;
695 stats_out->resident_bytes =
696 bucket->num_full_pages * stats_out->allocated_page_size;
697
698 for (internal::PartitionPage* page = bucket->empty_pages_head; page;
699 page = page->next_page) {
700 DCHECK(page->is_empty() || page->is_decommitted());
701 PartitionDumpPageStats(stats_out, page);
702 }
703 for (internal::PartitionPage* page = bucket->decommitted_pages_head; page;
704 page = page->next_page) {
705 DCHECK(page->is_decommitted());
706 PartitionDumpPageStats(stats_out, page);
707 }
708
709 if (bucket->active_pages_head !=
710 internal::PartitionPage::get_sentinel_page()) {
711 for (internal::PartitionPage* page = bucket->active_pages_head; page;
712 page = page->next_page) {
713 DCHECK(page != internal::PartitionPage::get_sentinel_page());
714 PartitionDumpPageStats(stats_out, page);
715 }
716 }
717 }
718
DumpStats(const char * partition_name,bool is_light_dump,PartitionStatsDumper * dumper)719 void PartitionRootGeneric::DumpStats(const char* partition_name,
720 bool is_light_dump,
721 PartitionStatsDumper* dumper) {
722 PartitionMemoryStats stats = {0};
723 stats.total_mmapped_bytes =
724 total_size_of_super_pages + total_size_of_direct_mapped_pages;
725 stats.total_committed_bytes = total_size_of_committed_pages;
726
727 size_t direct_mapped_allocations_total_size = 0;
728
729 static const size_t kMaxReportableDirectMaps = 4096;
730
731 // Allocate on the heap rather than on the stack to avoid stack overflow
732 // skirmishes (on Windows, in particular).
733 std::unique_ptr<uint32_t[]> direct_map_lengths = nullptr;
734 if (!is_light_dump) {
735 direct_map_lengths =
736 std::unique_ptr<uint32_t[]>(new uint32_t[kMaxReportableDirectMaps]);
737 }
738
739 PartitionBucketMemoryStats bucket_stats[kGenericNumBuckets];
740 size_t num_direct_mapped_allocations = 0;
741 {
742 subtle::SpinLock::Guard guard(lock);
743
744 for (size_t i = 0; i < kGenericNumBuckets; ++i) {
745 const internal::PartitionBucket* bucket = &buckets[i];
746 // Don't report the pseudo buckets that the generic allocator sets up in
747 // order to preserve a fast size->bucket map (see
748 // PartitionRootGeneric::Init() for details).
749 if (!bucket->active_pages_head)
750 bucket_stats[i].is_valid = false;
751 else
752 PartitionDumpBucketStats(&bucket_stats[i], bucket);
753 if (bucket_stats[i].is_valid) {
754 stats.total_resident_bytes += bucket_stats[i].resident_bytes;
755 stats.total_active_bytes += bucket_stats[i].active_bytes;
756 stats.total_decommittable_bytes += bucket_stats[i].decommittable_bytes;
757 stats.total_discardable_bytes += bucket_stats[i].discardable_bytes;
758 }
759 }
760
761 for (internal::PartitionDirectMapExtent* extent = direct_map_list;
762 extent && num_direct_mapped_allocations < kMaxReportableDirectMaps;
763 extent = extent->next_extent, ++num_direct_mapped_allocations) {
764 DCHECK(!extent->next_extent ||
765 extent->next_extent->prev_extent == extent);
766 size_t slot_size = extent->bucket->slot_size;
767 direct_mapped_allocations_total_size += slot_size;
768 if (is_light_dump)
769 continue;
770 direct_map_lengths[num_direct_mapped_allocations] = slot_size;
771 }
772 }
773
774 if (!is_light_dump) {
775 // Call |PartitionsDumpBucketStats| after collecting stats because it can
776 // try to allocate using |PartitionRootGeneric::Alloc()| and it can't
777 // obtain the lock.
778 for (size_t i = 0; i < kGenericNumBuckets; ++i) {
779 if (bucket_stats[i].is_valid)
780 dumper->PartitionsDumpBucketStats(partition_name, &bucket_stats[i]);
781 }
782
783 for (size_t i = 0; i < num_direct_mapped_allocations; ++i) {
784 uint32_t size = direct_map_lengths[i];
785
786 PartitionBucketMemoryStats mapped_stats = {};
787 mapped_stats.is_valid = true;
788 mapped_stats.is_direct_map = true;
789 mapped_stats.num_full_pages = 1;
790 mapped_stats.allocated_page_size = size;
791 mapped_stats.bucket_slot_size = size;
792 mapped_stats.active_bytes = size;
793 mapped_stats.resident_bytes = size;
794 dumper->PartitionsDumpBucketStats(partition_name, &mapped_stats);
795 }
796 }
797
798 stats.total_resident_bytes += direct_mapped_allocations_total_size;
799 stats.total_active_bytes += direct_mapped_allocations_total_size;
800 dumper->PartitionDumpTotals(partition_name, &stats);
801 }
802
DumpStats(const char * partition_name,bool is_light_dump,PartitionStatsDumper * dumper)803 void PartitionRoot::DumpStats(const char* partition_name,
804 bool is_light_dump,
805 PartitionStatsDumper* dumper) {
806 PartitionMemoryStats stats = {0};
807 stats.total_mmapped_bytes = total_size_of_super_pages;
808 stats.total_committed_bytes = total_size_of_committed_pages;
809 DCHECK(!total_size_of_direct_mapped_pages);
810
811 static constexpr size_t kMaxReportableBuckets = 4096 / sizeof(void*);
812 std::unique_ptr<PartitionBucketMemoryStats[]> memory_stats;
813 if (!is_light_dump) {
814 memory_stats = std::unique_ptr<PartitionBucketMemoryStats[]>(
815 new PartitionBucketMemoryStats[kMaxReportableBuckets]);
816 }
817
818 const size_t partition_num_buckets = num_buckets;
819 DCHECK(partition_num_buckets <= kMaxReportableBuckets);
820
821 for (size_t i = 0; i < partition_num_buckets; ++i) {
822 PartitionBucketMemoryStats bucket_stats = {0};
823 PartitionDumpBucketStats(&bucket_stats, &buckets()[i]);
824 if (bucket_stats.is_valid) {
825 stats.total_resident_bytes += bucket_stats.resident_bytes;
826 stats.total_active_bytes += bucket_stats.active_bytes;
827 stats.total_decommittable_bytes += bucket_stats.decommittable_bytes;
828 stats.total_discardable_bytes += bucket_stats.discardable_bytes;
829 }
830 if (!is_light_dump) {
831 if (bucket_stats.is_valid)
832 memory_stats[i] = bucket_stats;
833 else
834 memory_stats[i].is_valid = false;
835 }
836 }
837 if (!is_light_dump) {
838 // PartitionsDumpBucketStats is called after collecting stats because it
839 // can use PartitionRoot::Alloc() to allocate and this can affect the
840 // statistics.
841 for (size_t i = 0; i < partition_num_buckets; ++i) {
842 if (memory_stats[i].is_valid)
843 dumper->PartitionsDumpBucketStats(partition_name, &memory_stats[i]);
844 }
845 }
846 dumper->PartitionDumpTotals(partition_name, &stats);
847 }
848
849 } // namespace base
850