1 // Copyright (c) 2018 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "third_party/base/allocator/partition_allocator/partition_bucket.h"
6 
7 #include "build/build_config.h"
8 #include "third_party/base/allocator/partition_allocator/oom.h"
9 #include "third_party/base/allocator/partition_allocator/page_allocator.h"
10 #include "third_party/base/allocator/partition_allocator/partition_alloc_constants.h"
11 #include "third_party/base/allocator/partition_allocator/partition_direct_map_extent.h"
12 #include "third_party/base/allocator/partition_allocator/partition_oom.h"
13 #include "third_party/base/allocator/partition_allocator/partition_page.h"
14 #include "third_party/base/allocator/partition_allocator/partition_root_base.h"
15 
16 namespace pdfium {
17 namespace base {
18 namespace internal {
19 
20 namespace {
21 
PartitionDirectMap(PartitionRootBase * root,int flags,size_t raw_size)22 ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root,
23                                                 int flags,
24                                                 size_t raw_size) {
25   size_t size = PartitionBucket::get_direct_map_size(raw_size);
26 
27   // Because we need to fake looking like a super page, we need to allocate
28   // a bunch of system pages more than "size":
29   // - The first few system pages are the partition page in which the super
30   // page metadata is stored. We fault just one system page out of a partition
31   // page sized clump.
32   // - We add a trailing guard page on 32-bit (on 64-bit we rely on the
33   // massive address space plus randomization instead).
34   size_t map_size = size + kPartitionPageSize;
35 #if !defined(ARCH_CPU_64_BITS)
36   map_size += kSystemPageSize;
37 #endif
38   // Round up to the allocation granularity.
39   map_size += kPageAllocationGranularityOffsetMask;
40   map_size &= kPageAllocationGranularityBaseMask;
41 
42   char* ptr = reinterpret_cast<char*>(AllocPages(nullptr, map_size,
43                                                  kSuperPageSize, PageReadWrite,
44                                                  PageTag::kPartitionAlloc));
45   if (UNLIKELY(!ptr))
46     return nullptr;
47 
48   size_t committed_page_size = size + kSystemPageSize;
49   root->total_size_of_direct_mapped_pages += committed_page_size;
50   root->IncreaseCommittedPages(committed_page_size);
51 
52   char* slot = ptr + kPartitionPageSize;
53   SetSystemPagesAccess(ptr + (kSystemPageSize * 2),
54                        kPartitionPageSize - (kSystemPageSize * 2),
55                        PageInaccessible);
56 #if !defined(ARCH_CPU_64_BITS)
57   SetSystemPagesAccess(ptr, kSystemPageSize, PageInaccessible);
58   SetSystemPagesAccess(slot + size, kSystemPageSize, PageInaccessible);
59 #endif
60 
61   PartitionSuperPageExtentEntry* extent =
62       reinterpret_cast<PartitionSuperPageExtentEntry*>(
63           PartitionSuperPageToMetadataArea(ptr));
64   extent->root = root;
65   // The new structures are all located inside a fresh system page so they
66   // will all be zeroed out. These DCHECKs are for documentation.
67   DCHECK(!extent->super_page_base);
68   DCHECK(!extent->super_pages_end);
69   DCHECK(!extent->next);
70   PartitionPage* page = PartitionPage::FromPointerNoAlignmentCheck(slot);
71   PartitionBucket* bucket = reinterpret_cast<PartitionBucket*>(
72       reinterpret_cast<char*>(page) + (kPageMetadataSize * 2));
73   DCHECK(!page->next_page);
74   DCHECK(!page->num_allocated_slots);
75   DCHECK(!page->num_unprovisioned_slots);
76   DCHECK(!page->page_offset);
77   DCHECK(!page->empty_cache_index);
78   page->bucket = bucket;
79   page->freelist_head = reinterpret_cast<PartitionFreelistEntry*>(slot);
80   PartitionFreelistEntry* next_entry =
81       reinterpret_cast<PartitionFreelistEntry*>(slot);
82   next_entry->next = PartitionFreelistEntry::Encode(nullptr);
83 
84   DCHECK(!bucket->active_pages_head);
85   DCHECK(!bucket->empty_pages_head);
86   DCHECK(!bucket->decommitted_pages_head);
87   DCHECK(!bucket->num_system_pages_per_slot_span);
88   DCHECK(!bucket->num_full_pages);
89   bucket->slot_size = size;
90 
91   PartitionDirectMapExtent* map_extent =
92       PartitionDirectMapExtent::FromPage(page);
93   map_extent->map_size = map_size - kPartitionPageSize - kSystemPageSize;
94   map_extent->bucket = bucket;
95 
96   // Maintain the doubly-linked list of all direct mappings.
97   map_extent->next_extent = root->direct_map_list;
98   if (map_extent->next_extent)
99     map_extent->next_extent->prev_extent = map_extent;
100   map_extent->prev_extent = nullptr;
101   root->direct_map_list = map_extent;
102 
103   return page;
104 }
105 
106 }  // namespace
107 
108 // static
109 PartitionBucket PartitionBucket::sentinel_bucket_;
110 
get_sentinel_bucket()111 PartitionBucket* PartitionBucket::get_sentinel_bucket() {
112   return &sentinel_bucket_;
113 }
114 
115 // TODO(ajwong): This seems to interact badly with
116 // get_pages_per_slot_span() which rounds the value from this up to a
117 // multiple of kNumSystemPagesPerPartitionPage (aka 4) anyways.
118 // http://crbug.com/776537
119 //
120 // TODO(ajwong): The waste calculation seems wrong. The PTE usage should cover
121 // both used and unsed pages.
122 // http://crbug.com/776537
get_system_pages_per_slot_span()123 uint8_t PartitionBucket::get_system_pages_per_slot_span() {
124   // This works out reasonably for the current bucket sizes of the generic
125   // allocator, and the current values of partition page size and constants.
126   // Specifically, we have enough room to always pack the slots perfectly into
127   // some number of system pages. The only waste is the waste associated with
128   // unfaulted pages (i.e. wasted address space).
129   // TODO: we end up using a lot of system pages for very small sizes. For
130   // example, we'll use 12 system pages for slot size 24. The slot size is
131   // so small that the waste would be tiny with just 4, or 1, system pages.
132   // Later, we can investigate whether there are anti-fragmentation benefits
133   // to using fewer system pages.
134   double best_waste_ratio = 1.0f;
135   uint16_t best_pages = 0;
136   if (slot_size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) {
137     // TODO(ajwong): Why is there a DCHECK here for this?
138     // http://crbug.com/776537
139     DCHECK(!(slot_size % kSystemPageSize));
140     best_pages = static_cast<uint16_t>(slot_size / kSystemPageSize);
141     // TODO(ajwong): Should this be checking against
142     // kMaxSystemPagesPerSlotSpan or numeric_limits<uint8_t>::max?
143     // http://crbug.com/776537
144     CHECK(best_pages < (1 << 8));
145     return static_cast<uint8_t>(best_pages);
146   }
147   DCHECK(slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize);
148   for (uint16_t i = kNumSystemPagesPerPartitionPage - 1;
149        i <= kMaxSystemPagesPerSlotSpan; ++i) {
150     size_t page_size = kSystemPageSize * i;
151     size_t num_slots = page_size / slot_size;
152     size_t waste = page_size - (num_slots * slot_size);
153     // Leaving a page unfaulted is not free; the page will occupy an empty page
154     // table entry.  Make a simple attempt to account for that.
155     //
156     // TODO(ajwong): This looks wrong. PTEs are allocated for all pages
157     // regardless of whether or not they are wasted. Should it just
158     // be waste += i * sizeof(void*)?
159     // http://crbug.com/776537
160     size_t num_remainder_pages = i & (kNumSystemPagesPerPartitionPage - 1);
161     size_t num_unfaulted_pages =
162         num_remainder_pages
163             ? (kNumSystemPagesPerPartitionPage - num_remainder_pages)
164             : 0;
165     waste += sizeof(void*) * num_unfaulted_pages;
166     double waste_ratio =
167         static_cast<double>(waste) / static_cast<double>(page_size);
168     if (waste_ratio < best_waste_ratio) {
169       best_waste_ratio = waste_ratio;
170       best_pages = i;
171     }
172   }
173   DCHECK(best_pages > 0);
174   CHECK(best_pages <= kMaxSystemPagesPerSlotSpan);
175   return static_cast<uint8_t>(best_pages);
176 }
177 
Init(uint32_t new_slot_size)178 void PartitionBucket::Init(uint32_t new_slot_size) {
179   slot_size = new_slot_size;
180   active_pages_head = PartitionPage::get_sentinel_page();
181   empty_pages_head = nullptr;
182   decommitted_pages_head = nullptr;
183   num_full_pages = 0;
184   num_system_pages_per_slot_span = get_system_pages_per_slot_span();
185 }
186 
OnFull()187 NOINLINE void PartitionBucket::OnFull() {
188   OOM_CRASH();
189 }
190 
AllocNewSlotSpan(PartitionRootBase * root,int flags,uint16_t num_partition_pages)191 ALWAYS_INLINE void* PartitionBucket::AllocNewSlotSpan(
192     PartitionRootBase* root,
193     int flags,
194     uint16_t num_partition_pages) {
195   DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page) %
196            kPartitionPageSize));
197   DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page_end) %
198            kPartitionPageSize));
199   DCHECK(num_partition_pages <= kNumPartitionPagesPerSuperPage);
200   size_t total_size = kPartitionPageSize * num_partition_pages;
201   size_t num_partition_pages_left =
202       (root->next_partition_page_end - root->next_partition_page) >>
203       kPartitionPageShift;
204   if (LIKELY(num_partition_pages_left >= num_partition_pages)) {
205     // In this case, we can still hand out pages from the current super page
206     // allocation.
207     char* ret = root->next_partition_page;
208 
209     // Fresh System Pages in the SuperPages are decommited. Commit them
210     // before vending them back.
211     SetSystemPagesAccess(ret, total_size, PageReadWrite);
212 
213     root->next_partition_page += total_size;
214     root->IncreaseCommittedPages(total_size);
215     return ret;
216   }
217 
218   // Need a new super page. We want to allocate super pages in a continguous
219   // address region as much as possible. This is important for not causing
220   // page table bloat and not fragmenting address spaces in 32 bit
221   // architectures.
222   char* requested_address = root->next_super_page;
223   char* super_page = reinterpret_cast<char*>(
224       AllocPages(requested_address, kSuperPageSize, kSuperPageSize,
225                  PageReadWrite, PageTag::kPartitionAlloc));
226   if (UNLIKELY(!super_page))
227     return nullptr;
228 
229   root->total_size_of_super_pages += kSuperPageSize;
230   root->IncreaseCommittedPages(total_size);
231 
232   // |total_size| MUST be less than kSuperPageSize - (kPartitionPageSize*2).
233   // This is a trustworthy value because num_partition_pages is not user
234   // controlled.
235   //
236   // TODO(ajwong): Introduce a DCHECK.
237   root->next_super_page = super_page + kSuperPageSize;
238   char* ret = super_page + kPartitionPageSize;
239   root->next_partition_page = ret + total_size;
240   root->next_partition_page_end = root->next_super_page - kPartitionPageSize;
241   // Make the first partition page in the super page a guard page, but leave a
242   // hole in the middle.
243   // This is where we put page metadata and also a tiny amount of extent
244   // metadata.
245   SetSystemPagesAccess(super_page, kSystemPageSize, PageInaccessible);
246   SetSystemPagesAccess(super_page + (kSystemPageSize * 2),
247                        kPartitionPageSize - (kSystemPageSize * 2),
248                        PageInaccessible);
249   //  SetSystemPagesAccess(super_page + (kSuperPageSize -
250   //  kPartitionPageSize),
251   //                             kPartitionPageSize, PageInaccessible);
252   // All remaining slotspans for the unallocated PartitionPages inside the
253   // SuperPage are conceptually decommitted. Correctly set the state here
254   // so they do not occupy resources.
255   //
256   // TODO(ajwong): Refactor Page Allocator API so the SuperPage comes in
257   // decommited initially.
258   SetSystemPagesAccess(super_page + kPartitionPageSize + total_size,
259                        (kSuperPageSize - kPartitionPageSize - total_size),
260                        PageInaccessible);
261 
262   // If we were after a specific address, but didn't get it, assume that
263   // the system chose a lousy address. Here most OS'es have a default
264   // algorithm that isn't randomized. For example, most Linux
265   // distributions will allocate the mapping directly before the last
266   // successful mapping, which is far from random. So we just get fresh
267   // randomness for the next mapping attempt.
268   if (requested_address && requested_address != super_page)
269     root->next_super_page = nullptr;
270 
271   // We allocated a new super page so update super page metadata.
272   // First check if this is a new extent or not.
273   PartitionSuperPageExtentEntry* latest_extent =
274       reinterpret_cast<PartitionSuperPageExtentEntry*>(
275           PartitionSuperPageToMetadataArea(super_page));
276   // By storing the root in every extent metadata object, we have a fast way
277   // to go from a pointer within the partition to the root object.
278   latest_extent->root = root;
279   // Most new extents will be part of a larger extent, and these three fields
280   // are unused, but we initialize them to 0 so that we get a clear signal
281   // in case they are accidentally used.
282   latest_extent->super_page_base = nullptr;
283   latest_extent->super_pages_end = nullptr;
284   latest_extent->next = nullptr;
285 
286   PartitionSuperPageExtentEntry* current_extent = root->current_extent;
287   bool is_new_extent = (super_page != requested_address);
288   if (UNLIKELY(is_new_extent)) {
289     if (UNLIKELY(!current_extent)) {
290       DCHECK(!root->first_extent);
291       root->first_extent = latest_extent;
292     } else {
293       DCHECK(current_extent->super_page_base);
294       current_extent->next = latest_extent;
295     }
296     root->current_extent = latest_extent;
297     latest_extent->super_page_base = super_page;
298     latest_extent->super_pages_end = super_page + kSuperPageSize;
299   } else {
300     // We allocated next to an existing extent so just nudge the size up a
301     // little.
302     DCHECK(current_extent->super_pages_end);
303     current_extent->super_pages_end += kSuperPageSize;
304     DCHECK(ret >= current_extent->super_page_base &&
305            ret < current_extent->super_pages_end);
306   }
307   return ret;
308 }
309 
get_pages_per_slot_span()310 ALWAYS_INLINE uint16_t PartitionBucket::get_pages_per_slot_span() {
311   // Rounds up to nearest multiple of kNumSystemPagesPerPartitionPage.
312   return (num_system_pages_per_slot_span +
313           (kNumSystemPagesPerPartitionPage - 1)) /
314          kNumSystemPagesPerPartitionPage;
315 }
316 
InitializeSlotSpan(PartitionPage * page)317 ALWAYS_INLINE void PartitionBucket::InitializeSlotSpan(PartitionPage* page) {
318   // The bucket never changes. We set it up once.
319   page->bucket = this;
320   page->empty_cache_index = -1;
321 
322   page->Reset();
323 
324   // If this page has just a single slot, do not set up page offsets for any
325   // page metadata other than the first one. This ensures that attempts to
326   // touch invalid page metadata fail.
327   if (page->num_unprovisioned_slots == 1)
328     return;
329 
330   uint16_t num_partition_pages = get_pages_per_slot_span();
331   char* page_char_ptr = reinterpret_cast<char*>(page);
332   for (uint16_t i = 1; i < num_partition_pages; ++i) {
333     page_char_ptr += kPageMetadataSize;
334     PartitionPage* secondary_page =
335         reinterpret_cast<PartitionPage*>(page_char_ptr);
336     secondary_page->page_offset = i;
337   }
338 }
339 
AllocAndFillFreelist(PartitionPage * page)340 ALWAYS_INLINE char* PartitionBucket::AllocAndFillFreelist(PartitionPage* page) {
341   DCHECK(page != PartitionPage::get_sentinel_page());
342   uint16_t num_slots = page->num_unprovisioned_slots;
343   DCHECK(num_slots);
344   // We should only get here when _every_ slot is either used or unprovisioned.
345   // (The third state is "on the freelist". If we have a non-empty freelist, we
346   // should not get here.)
347   DCHECK(num_slots + page->num_allocated_slots == get_slots_per_span());
348   // Similarly, make explicitly sure that the freelist is empty.
349   DCHECK(!page->freelist_head);
350   DCHECK(page->num_allocated_slots >= 0);
351 
352   size_t size = slot_size;
353   char* base = reinterpret_cast<char*>(PartitionPage::ToPointer(page));
354   char* return_object = base + (size * page->num_allocated_slots);
355   char* first_freelist_pointer = return_object + size;
356   char* first_freelist_pointer_extent =
357       first_freelist_pointer + sizeof(PartitionFreelistEntry*);
358   // Our goal is to fault as few system pages as possible. We calculate the
359   // page containing the "end" of the returned slot, and then allow freelist
360   // pointers to be written up to the end of that page.
361   char* sub_page_limit = reinterpret_cast<char*>(
362       RoundUpToSystemPage(reinterpret_cast<size_t>(first_freelist_pointer)));
363   char* slots_limit = return_object + (size * num_slots);
364   char* freelist_limit = sub_page_limit;
365   if (UNLIKELY(slots_limit < freelist_limit))
366     freelist_limit = slots_limit;
367 
368   uint16_t num_new_freelist_entries = 0;
369   if (LIKELY(first_freelist_pointer_extent <= freelist_limit)) {
370     // Only consider used space in the slot span. If we consider wasted
371     // space, we may get an off-by-one when a freelist pointer fits in the
372     // wasted space, but a slot does not.
373     // We know we can fit at least one freelist pointer.
374     num_new_freelist_entries = 1;
375     // Any further entries require space for the whole slot span.
376     num_new_freelist_entries += static_cast<uint16_t>(
377         (freelist_limit - first_freelist_pointer_extent) / size);
378   }
379 
380   // We always return an object slot -- that's the +1 below.
381   // We do not neccessarily create any new freelist entries, because we cross
382   // sub page boundaries frequently for large bucket sizes.
383   DCHECK(num_new_freelist_entries + 1 <= num_slots);
384   num_slots -= (num_new_freelist_entries + 1);
385   page->num_unprovisioned_slots = num_slots;
386   page->num_allocated_slots++;
387 
388   if (LIKELY(num_new_freelist_entries)) {
389     char* freelist_pointer = first_freelist_pointer;
390     PartitionFreelistEntry* entry =
391         reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer);
392     page->freelist_head = entry;
393     while (--num_new_freelist_entries) {
394       freelist_pointer += size;
395       PartitionFreelistEntry* next_entry =
396           reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer);
397       entry->next = PartitionFreelistEntry::Encode(next_entry);
398       entry = next_entry;
399     }
400     entry->next = PartitionFreelistEntry::Encode(nullptr);
401   } else {
402     page->freelist_head = nullptr;
403   }
404   return return_object;
405 }
406 
SetNewActivePage()407 bool PartitionBucket::SetNewActivePage() {
408   PartitionPage* page = active_pages_head;
409   if (page == PartitionPage::get_sentinel_page())
410     return false;
411 
412   PartitionPage* next_page;
413 
414   for (; page; page = next_page) {
415     next_page = page->next_page;
416     DCHECK(page->bucket == this);
417     DCHECK(page != empty_pages_head);
418     DCHECK(page != decommitted_pages_head);
419 
420     if (LIKELY(page->is_active())) {
421       // This page is usable because it has freelist entries, or has
422       // unprovisioned slots we can create freelist entries from.
423       active_pages_head = page;
424       return true;
425     }
426 
427     // Deal with empty and decommitted pages.
428     if (LIKELY(page->is_empty())) {
429       page->next_page = empty_pages_head;
430       empty_pages_head = page;
431     } else if (LIKELY(page->is_decommitted())) {
432       page->next_page = decommitted_pages_head;
433       decommitted_pages_head = page;
434     } else {
435       DCHECK(page->is_full());
436       // If we get here, we found a full page. Skip over it too, and also
437       // tag it as full (via a negative value). We need it tagged so that
438       // free'ing can tell, and move it back into the active page list.
439       page->num_allocated_slots = -page->num_allocated_slots;
440       ++num_full_pages;
441       // num_full_pages is a uint16_t for efficient packing so guard against
442       // overflow to be safe.
443       if (UNLIKELY(!num_full_pages))
444         OnFull();
445       // Not necessary but might help stop accidents.
446       page->next_page = nullptr;
447     }
448   }
449 
450   active_pages_head = PartitionPage::get_sentinel_page();
451   return false;
452 }
453 
SlowPathAlloc(PartitionRootBase * root,int flags,size_t size,bool * is_already_zeroed)454 void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root,
455                                      int flags,
456                                      size_t size,
457                                      bool* is_already_zeroed) {
458   // The slow path is called when the freelist is empty.
459   DCHECK(!active_pages_head->freelist_head);
460 
461   PartitionPage* new_page = nullptr;
462   *is_already_zeroed = false;
463 
464   // For the PartitionRootGeneric::Alloc() API, we have a bunch of buckets
465   // marked as special cases. We bounce them through to the slow path so that
466   // we can still have a blazing fast hot path due to lack of corner-case
467   // branches.
468   //
469   // Note: The ordering of the conditionals matter! In particular,
470   // SetNewActivePage() has a side-effect even when returning
471   // false where it sweeps the active page list and may move things into
472   // the empty or decommitted lists which affects the subsequent conditional.
473   bool return_null = flags & PartitionAllocReturnNull;
474   if (UNLIKELY(is_direct_mapped())) {
475     DCHECK(size > kGenericMaxBucketed);
476     DCHECK(this == get_sentinel_bucket());
477     DCHECK(active_pages_head == PartitionPage::get_sentinel_page());
478     if (size > kGenericMaxDirectMapped) {
479       if (return_null)
480         return nullptr;
481       PartitionExcessiveAllocationSize();
482     }
483     new_page = PartitionDirectMap(root, flags, size);
484 #if !defined(OS_MACOSX)
485     // Turn off the optimization to see if it helps https://crbug.com/892550.
486     *is_already_zeroed = true;
487 #endif
488   } else if (LIKELY(SetNewActivePage())) {
489     // First, did we find an active page in the active pages list?
490     new_page = active_pages_head;
491     DCHECK(new_page->is_active());
492   } else if (LIKELY(empty_pages_head != nullptr) ||
493              LIKELY(decommitted_pages_head != nullptr)) {
494     // Second, look in our lists of empty and decommitted pages.
495     // Check empty pages first, which are preferred, but beware that an
496     // empty page might have been decommitted.
497     while (LIKELY((new_page = empty_pages_head) != nullptr)) {
498       DCHECK(new_page->bucket == this);
499       DCHECK(new_page->is_empty() || new_page->is_decommitted());
500       empty_pages_head = new_page->next_page;
501       // Accept the empty page unless it got decommitted.
502       if (new_page->freelist_head) {
503         new_page->next_page = nullptr;
504         break;
505       }
506       DCHECK(new_page->is_decommitted());
507       new_page->next_page = decommitted_pages_head;
508       decommitted_pages_head = new_page;
509     }
510     if (UNLIKELY(!new_page) && LIKELY(decommitted_pages_head != nullptr)) {
511       new_page = decommitted_pages_head;
512       DCHECK(new_page->bucket == this);
513       DCHECK(new_page->is_decommitted());
514       decommitted_pages_head = new_page->next_page;
515       void* addr = PartitionPage::ToPointer(new_page);
516       root->RecommitSystemPages(addr, new_page->bucket->get_bytes_per_span());
517       new_page->Reset();
518       // TODO(https://crbug.com/890752): Optimizing here might cause pages to
519       // not be zeroed.
520       // *is_already_zeroed = true;
521     }
522     DCHECK(new_page);
523   } else {
524     // Third. If we get here, we need a brand new page.
525     uint16_t num_partition_pages = get_pages_per_slot_span();
526     void* raw_pages = AllocNewSlotSpan(root, flags, num_partition_pages);
527     if (LIKELY(raw_pages != nullptr)) {
528       new_page = PartitionPage::FromPointerNoAlignmentCheck(raw_pages);
529       InitializeSlotSpan(new_page);
530       // TODO(https://crbug.com/890752): Optimizing here causes pages to not be
531       // zeroed on at least macOS.
532       // *is_already_zeroed = true;
533     }
534   }
535 
536   // Bail if we had a memory allocation failure.
537   if (UNLIKELY(!new_page)) {
538     DCHECK(active_pages_head == PartitionPage::get_sentinel_page());
539     if (return_null)
540       return nullptr;
541     root->OutOfMemory();
542   }
543 
544   // TODO(ajwong): Is there a way to avoid the reading of bucket here?
545   // It seems like in many of the conditional branches above, |this| ==
546   // |new_page->bucket|. Maybe pull this into another function?
547   PartitionBucket* bucket = new_page->bucket;
548   DCHECK(bucket != get_sentinel_bucket());
549   bucket->active_pages_head = new_page;
550   new_page->set_raw_size(size);
551 
552   // If we found an active page with free slots, or an empty page, we have a
553   // usable freelist head.
554   if (LIKELY(new_page->freelist_head != nullptr)) {
555     PartitionFreelistEntry* entry = new_page->freelist_head;
556     PartitionFreelistEntry* new_head =
557         EncodedPartitionFreelistEntry::Decode(entry->next);
558     new_page->freelist_head = new_head;
559     new_page->num_allocated_slots++;
560     return entry;
561   }
562   // Otherwise, we need to build the freelist.
563   DCHECK(new_page->num_unprovisioned_slots);
564   return AllocAndFillFreelist(new_page);
565 }
566 
567 }  // namespace internal
568 }  // namespace base
569 }  // namespace pdfium
570