1 // Copyright (c) 2018 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "third_party/base/allocator/partition_allocator/partition_bucket.h"
6 
7 #include "build/build_config.h"
8 #include "third_party/base/allocator/partition_allocator/oom.h"
9 #include "third_party/base/allocator/partition_allocator/page_allocator.h"
10 #include "third_party/base/allocator/partition_allocator/partition_alloc_constants.h"
11 #include "third_party/base/allocator/partition_allocator/partition_direct_map_extent.h"
12 #include "third_party/base/allocator/partition_allocator/partition_oom.h"
13 #include "third_party/base/allocator/partition_allocator/partition_page.h"
14 #include "third_party/base/allocator/partition_allocator/partition_root_base.h"
15 
16 namespace pdfium {
17 namespace base {
18 namespace internal {
19 
20 namespace {
21 
PartitionDirectMap(PartitionRootBase * root,int flags,size_t raw_size)22 ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root,
23                                                 int flags,
24                                                 size_t raw_size) {
25   size_t size = PartitionBucket::get_direct_map_size(raw_size);
26 
27   // Because we need to fake looking like a super page, we need to allocate
28   // a bunch of system pages more than "size":
29   // - The first few system pages are the partition page in which the super
30   // page metadata is stored. We fault just one system page out of a partition
31   // page sized clump.
32   // - We add a trailing guard page on 32-bit (on 64-bit we rely on the
33   // massive address space plus randomization instead).
34   size_t map_size = size + PartitionPageSize();
35 #if !defined(ARCH_CPU_64_BITS)
36   map_size += SystemPageSize();
37 #endif
38   // Round up to the allocation granularity.
39   map_size += PageAllocationGranularityOffsetMask();
40   map_size &= PageAllocationGranularityBaseMask();
41 
42   char* ptr = reinterpret_cast<char*>(AllocPages(nullptr, map_size,
43                                                  kSuperPageSize, PageReadWrite,
44                                                  PageTag::kPartitionAlloc));
45   if (UNLIKELY(!ptr))
46     return nullptr;
47 
48   size_t committed_page_size = size + SystemPageSize();
49   root->total_size_of_direct_mapped_pages += committed_page_size;
50   root->IncreaseCommittedPages(committed_page_size);
51 
52   char* slot = ptr + PartitionPageSize();
53   SetSystemPagesAccess(ptr + (SystemPageSize() * 2),
54                        PartitionPageSize() - (SystemPageSize() * 2),
55                        PageInaccessible);
56 #if !defined(ARCH_CPU_64_BITS)
57   SetSystemPagesAccess(ptr, SystemPageSize(), PageInaccessible);
58   SetSystemPagesAccess(slot + size, SystemPageSize(), PageInaccessible);
59 #endif
60 
61   PartitionSuperPageExtentEntry* extent =
62       reinterpret_cast<PartitionSuperPageExtentEntry*>(
63           PartitionSuperPageToMetadataArea(ptr));
64   extent->root = root;
65   // The new structures are all located inside a fresh system page so they
66   // will all be zeroed out. These DCHECKs are for documentation.
67   DCHECK(!extent->super_page_base);
68   DCHECK(!extent->super_pages_end);
69   DCHECK(!extent->next);
70   PartitionPage* page = PartitionPage::FromPointerNoAlignmentCheck(slot);
71   PartitionBucket* bucket = reinterpret_cast<PartitionBucket*>(
72       reinterpret_cast<char*>(page) + (kPageMetadataSize * 2));
73   DCHECK(!page->next_page);
74   DCHECK(!page->num_allocated_slots);
75   DCHECK(!page->num_unprovisioned_slots);
76   DCHECK(!page->page_offset);
77   DCHECK(!page->empty_cache_index);
78   page->bucket = bucket;
79   page->freelist_head = reinterpret_cast<PartitionFreelistEntry*>(slot);
80   PartitionFreelistEntry* next_entry =
81       reinterpret_cast<PartitionFreelistEntry*>(slot);
82   next_entry->next = PartitionFreelistEntry::Encode(nullptr);
83 
84   DCHECK(!bucket->active_pages_head);
85   DCHECK(!bucket->empty_pages_head);
86   DCHECK(!bucket->decommitted_pages_head);
87   DCHECK(!bucket->num_system_pages_per_slot_span);
88   DCHECK(!bucket->num_full_pages);
89   bucket->slot_size = size;
90 
91   PartitionDirectMapExtent* map_extent =
92       PartitionDirectMapExtent::FromPage(page);
93   map_extent->map_size = map_size - PartitionPageSize() - SystemPageSize();
94   map_extent->bucket = bucket;
95 
96   // Maintain the doubly-linked list of all direct mappings.
97   map_extent->next_extent = root->direct_map_list;
98   if (map_extent->next_extent)
99     map_extent->next_extent->prev_extent = map_extent;
100   map_extent->prev_extent = nullptr;
101   root->direct_map_list = map_extent;
102 
103   return page;
104 }
105 
106 }  // namespace
107 
108 // static
109 PartitionBucket PartitionBucket::sentinel_bucket_;
110 
get_sentinel_bucket()111 PartitionBucket* PartitionBucket::get_sentinel_bucket() {
112   return &sentinel_bucket_;
113 }
114 
115 // TODO(ajwong): This seems to interact badly with
116 // get_pages_per_slot_span() which rounds the value from this up to a
117 // multiple of NumSystemPagesPerPartitionPage() (aka 4) anyways.
118 // http://crbug.com/776537
119 //
120 // TODO(ajwong): The waste calculation seems wrong. The PTE usage should cover
121 // both used and unsed pages.
122 // http://crbug.com/776537
get_system_pages_per_slot_span()123 uint8_t PartitionBucket::get_system_pages_per_slot_span() {
124   // This works out reasonably for the current bucket sizes of the generic
125   // allocator, and the current values of partition page size and constants.
126   // Specifically, we have enough room to always pack the slots perfectly into
127   // some number of system pages. The only waste is the waste associated with
128   // unfaulted pages (i.e. wasted address space).
129   // TODO: we end up using a lot of system pages for very small sizes. For
130   // example, we'll use 12 system pages for slot size 24. The slot size is
131   // so small that the waste would be tiny with just 4, or 1, system pages.
132   // Later, we can investigate whether there are anti-fragmentation benefits
133   // to using fewer system pages.
134   double best_waste_ratio = 1.0f;
135   uint16_t best_pages = 0;
136   if (slot_size > MaxSystemPagesPerSlotSpan() * SystemPageSize()) {
137     // TODO(ajwong): Why is there a DCHECK here for this?
138     // http://crbug.com/776537
139     DCHECK(!(slot_size % SystemPageSize()));
140     best_pages = static_cast<uint16_t>(slot_size / SystemPageSize());
141     // TODO(ajwong): Should this be checking against
142     // MaxSystemPagesPerSlotSpan() or numeric_limits<uint8_t>::max?
143     // http://crbug.com/776537
144     CHECK(best_pages < (1 << 8));
145     return static_cast<uint8_t>(best_pages);
146   }
147   DCHECK(slot_size <= MaxSystemPagesPerSlotSpan() * SystemPageSize());
148   for (uint16_t i = NumSystemPagesPerPartitionPage() - 1;
149        i <= MaxSystemPagesPerSlotSpan(); ++i) {
150     size_t page_size = SystemPageSize() * i;
151     size_t num_slots = page_size / slot_size;
152     size_t waste = page_size - (num_slots * slot_size);
153     // Leaving a page unfaulted is not free; the page will occupy an empty page
154     // table entry.  Make a simple attempt to account for that.
155     //
156     // TODO(ajwong): This looks wrong. PTEs are allocated for all pages
157     // regardless of whether or not they are wasted. Should it just
158     // be waste += i * sizeof(void*)?
159     // http://crbug.com/776537
160     size_t num_remainder_pages = i & (NumSystemPagesPerPartitionPage() - 1);
161     size_t num_unfaulted_pages =
162         num_remainder_pages
163             ? (NumSystemPagesPerPartitionPage() - num_remainder_pages)
164             : 0;
165     waste += sizeof(void*) * num_unfaulted_pages;
166     double waste_ratio =
167         static_cast<double>(waste) / static_cast<double>(page_size);
168     if (waste_ratio < best_waste_ratio) {
169       best_waste_ratio = waste_ratio;
170       best_pages = i;
171     }
172   }
173   DCHECK(best_pages > 0);
174   CHECK(best_pages <= MaxSystemPagesPerSlotSpan());
175   return static_cast<uint8_t>(best_pages);
176 }
177 
Init(uint32_t new_slot_size)178 void PartitionBucket::Init(uint32_t new_slot_size) {
179   slot_size = new_slot_size;
180   active_pages_head = PartitionPage::get_sentinel_page();
181   empty_pages_head = nullptr;
182   decommitted_pages_head = nullptr;
183   num_full_pages = 0;
184   num_system_pages_per_slot_span = get_system_pages_per_slot_span();
185 }
186 
OnFull()187 NOINLINE void PartitionBucket::OnFull() {
188   OOM_CRASH(0);
189 }
190 
AllocNewSlotSpan(PartitionRootBase * root,int flags,uint16_t num_partition_pages)191 ALWAYS_INLINE void* PartitionBucket::AllocNewSlotSpan(
192     PartitionRootBase* root,
193     int flags,
194     uint16_t num_partition_pages) {
195   DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page) %
196            PartitionPageSize()));
197   DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page_end) %
198            PartitionPageSize()));
199   DCHECK(num_partition_pages <= NumPartitionPagesPerSuperPage());
200   size_t total_size = PartitionPageSize() * num_partition_pages;
201   size_t num_partition_pages_left =
202       (root->next_partition_page_end - root->next_partition_page) >>
203       PartitionPageShift();
204   if (LIKELY(num_partition_pages_left >= num_partition_pages)) {
205     // In this case, we can still hand out pages from the current super page
206     // allocation.
207     char* ret = root->next_partition_page;
208 
209     // Fresh System Pages in the SuperPages are decommited. Commit them
210     // before vending them back.
211     SetSystemPagesAccess(ret, total_size, PageReadWrite);
212 
213     root->next_partition_page += total_size;
214     root->IncreaseCommittedPages(total_size);
215     return ret;
216   }
217 
218   // Need a new super page. We want to allocate super pages in a continguous
219   // address region as much as possible. This is important for not causing
220   // page table bloat and not fragmenting address spaces in 32 bit
221   // architectures.
222   char* requested_address = root->next_super_page;
223   char* super_page = reinterpret_cast<char*>(
224       AllocPages(requested_address, kSuperPageSize, kSuperPageSize,
225                  PageReadWrite, PageTag::kPartitionAlloc));
226   if (UNLIKELY(!super_page))
227     return nullptr;
228 
229   root->total_size_of_super_pages += kSuperPageSize;
230   root->IncreaseCommittedPages(total_size);
231 
232   // |total_size| MUST be less than kSuperPageSize - (PartitionPageSize()*2).
233   // This is a trustworthy value because num_partition_pages is not user
234   // controlled.
235   //
236   // TODO(ajwong): Introduce a DCHECK.
237   root->next_super_page = super_page + kSuperPageSize;
238   char* ret = super_page + PartitionPageSize();
239   root->next_partition_page = ret + total_size;
240   root->next_partition_page_end = root->next_super_page - PartitionPageSize();
241   // Make the first partition page in the super page a guard page, but leave a
242   // hole in the middle.
243   // This is where we put page metadata and also a tiny amount of extent
244   // metadata.
245   SetSystemPagesAccess(super_page, SystemPageSize(), PageInaccessible);
246   SetSystemPagesAccess(super_page + (SystemPageSize() * 2),
247                        PartitionPageSize() - (SystemPageSize() * 2),
248                        PageInaccessible);
249   //  SetSystemPagesAccess(super_page + (kSuperPageSize -
250   //  PartitionPageSize()), PartitionPageSize(), PageInaccessible);
251   // All remaining slotspans for the unallocated PartitionPages inside the
252   // SuperPage are conceptually decommitted. Correctly set the state here
253   // so they do not occupy resources.
254   //
255   // TODO(ajwong): Refactor Page Allocator API so the SuperPage comes in
256   // decommited initially.
257   SetSystemPagesAccess(super_page + PartitionPageSize() + total_size,
258                        (kSuperPageSize - PartitionPageSize() - total_size),
259                        PageInaccessible);
260 
261   // If we were after a specific address, but didn't get it, assume that
262   // the system chose a lousy address. Here most OS'es have a default
263   // algorithm that isn't randomized. For example, most Linux
264   // distributions will allocate the mapping directly before the last
265   // successful mapping, which is far from random. So we just get fresh
266   // randomness for the next mapping attempt.
267   if (requested_address && requested_address != super_page)
268     root->next_super_page = nullptr;
269 
270   // We allocated a new super page so update super page metadata.
271   // First check if this is a new extent or not.
272   PartitionSuperPageExtentEntry* latest_extent =
273       reinterpret_cast<PartitionSuperPageExtentEntry*>(
274           PartitionSuperPageToMetadataArea(super_page));
275   // By storing the root in every extent metadata object, we have a fast way
276   // to go from a pointer within the partition to the root object.
277   latest_extent->root = root;
278   // Most new extents will be part of a larger extent, and these three fields
279   // are unused, but we initialize them to 0 so that we get a clear signal
280   // in case they are accidentally used.
281   latest_extent->super_page_base = nullptr;
282   latest_extent->super_pages_end = nullptr;
283   latest_extent->next = nullptr;
284 
285   PartitionSuperPageExtentEntry* current_extent = root->current_extent;
286   bool is_new_extent = (super_page != requested_address);
287   if (UNLIKELY(is_new_extent)) {
288     if (UNLIKELY(!current_extent)) {
289       DCHECK(!root->first_extent);
290       root->first_extent = latest_extent;
291     } else {
292       DCHECK(current_extent->super_page_base);
293       current_extent->next = latest_extent;
294     }
295     root->current_extent = latest_extent;
296     latest_extent->super_page_base = super_page;
297     latest_extent->super_pages_end = super_page + kSuperPageSize;
298   } else {
299     // We allocated next to an existing extent so just nudge the size up a
300     // little.
301     DCHECK(current_extent->super_pages_end);
302     current_extent->super_pages_end += kSuperPageSize;
303     DCHECK(ret >= current_extent->super_page_base &&
304            ret < current_extent->super_pages_end);
305   }
306   return ret;
307 }
308 
get_pages_per_slot_span()309 ALWAYS_INLINE uint16_t PartitionBucket::get_pages_per_slot_span() {
310   // Rounds up to nearest multiple of NumSystemPagesPerPartitionPage().
311   return (num_system_pages_per_slot_span +
312           (NumSystemPagesPerPartitionPage() - 1)) /
313          NumSystemPagesPerPartitionPage();
314 }
315 
InitializeSlotSpan(PartitionPage * page)316 ALWAYS_INLINE void PartitionBucket::InitializeSlotSpan(PartitionPage* page) {
317   // The bucket never changes. We set it up once.
318   page->bucket = this;
319   page->empty_cache_index = -1;
320 
321   page->Reset();
322 
323   // If this page has just a single slot, do not set up page offsets for any
324   // page metadata other than the first one. This ensures that attempts to
325   // touch invalid page metadata fail.
326   if (page->num_unprovisioned_slots == 1)
327     return;
328 
329   uint16_t num_partition_pages = get_pages_per_slot_span();
330   char* page_char_ptr = reinterpret_cast<char*>(page);
331   for (uint16_t i = 1; i < num_partition_pages; ++i) {
332     page_char_ptr += kPageMetadataSize;
333     PartitionPage* secondary_page =
334         reinterpret_cast<PartitionPage*>(page_char_ptr);
335     secondary_page->page_offset = i;
336   }
337 }
338 
AllocAndFillFreelist(PartitionPage * page)339 ALWAYS_INLINE char* PartitionBucket::AllocAndFillFreelist(PartitionPage* page) {
340   DCHECK(page != PartitionPage::get_sentinel_page());
341   uint16_t num_slots = page->num_unprovisioned_slots;
342   DCHECK(num_slots);
343   // We should only get here when _every_ slot is either used or unprovisioned.
344   // (The third state is "on the freelist". If we have a non-empty freelist, we
345   // should not get here.)
346   DCHECK(num_slots + page->num_allocated_slots == get_slots_per_span());
347   // Similarly, make explicitly sure that the freelist is empty.
348   DCHECK(!page->freelist_head);
349   DCHECK(page->num_allocated_slots >= 0);
350 
351   size_t size = slot_size;
352   char* base = reinterpret_cast<char*>(PartitionPage::ToPointer(page));
353   char* return_object = base + (size * page->num_allocated_slots);
354   char* first_freelist_pointer = return_object + size;
355   char* first_freelist_pointer_extent =
356       first_freelist_pointer + sizeof(PartitionFreelistEntry*);
357   // Our goal is to fault as few system pages as possible. We calculate the
358   // page containing the "end" of the returned slot, and then allow freelist
359   // pointers to be written up to the end of that page.
360   char* sub_page_limit = reinterpret_cast<char*>(
361       RoundUpToSystemPage(reinterpret_cast<size_t>(first_freelist_pointer)));
362   char* slots_limit = return_object + (size * num_slots);
363   char* freelist_limit = sub_page_limit;
364   if (UNLIKELY(slots_limit < freelist_limit))
365     freelist_limit = slots_limit;
366 
367   uint16_t num_new_freelist_entries = 0;
368   if (LIKELY(first_freelist_pointer_extent <= freelist_limit)) {
369     // Only consider used space in the slot span. If we consider wasted
370     // space, we may get an off-by-one when a freelist pointer fits in the
371     // wasted space, but a slot does not.
372     // We know we can fit at least one freelist pointer.
373     num_new_freelist_entries = 1;
374     // Any further entries require space for the whole slot span.
375     num_new_freelist_entries += static_cast<uint16_t>(
376         (freelist_limit - first_freelist_pointer_extent) / size);
377   }
378 
379   // We always return an object slot -- that's the +1 below.
380   // We do not neccessarily create any new freelist entries, because we cross
381   // sub page boundaries frequently for large bucket sizes.
382   DCHECK(num_new_freelist_entries + 1 <= num_slots);
383   num_slots -= (num_new_freelist_entries + 1);
384   page->num_unprovisioned_slots = num_slots;
385   page->num_allocated_slots++;
386 
387   if (LIKELY(num_new_freelist_entries)) {
388     char* freelist_pointer = first_freelist_pointer;
389     PartitionFreelistEntry* entry =
390         reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer);
391     page->freelist_head = entry;
392     while (--num_new_freelist_entries) {
393       freelist_pointer += size;
394       PartitionFreelistEntry* next_entry =
395           reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer);
396       entry->next = PartitionFreelistEntry::Encode(next_entry);
397       entry = next_entry;
398     }
399     entry->next = PartitionFreelistEntry::Encode(nullptr);
400   } else {
401     page->freelist_head = nullptr;
402   }
403   return return_object;
404 }
405 
SetNewActivePage()406 bool PartitionBucket::SetNewActivePage() {
407   PartitionPage* page = active_pages_head;
408   if (page == PartitionPage::get_sentinel_page())
409     return false;
410 
411   PartitionPage* next_page;
412 
413   for (; page; page = next_page) {
414     next_page = page->next_page;
415     DCHECK(page->bucket == this);
416     DCHECK(page != empty_pages_head);
417     DCHECK(page != decommitted_pages_head);
418 
419     if (LIKELY(page->is_active())) {
420       // This page is usable because it has freelist entries, or has
421       // unprovisioned slots we can create freelist entries from.
422       active_pages_head = page;
423       return true;
424     }
425 
426     // Deal with empty and decommitted pages.
427     if (LIKELY(page->is_empty())) {
428       page->next_page = empty_pages_head;
429       empty_pages_head = page;
430     } else if (LIKELY(page->is_decommitted())) {
431       page->next_page = decommitted_pages_head;
432       decommitted_pages_head = page;
433     } else {
434       DCHECK(page->is_full());
435       // If we get here, we found a full page. Skip over it too, and also
436       // tag it as full (via a negative value). We need it tagged so that
437       // free'ing can tell, and move it back into the active page list.
438       page->num_allocated_slots = -page->num_allocated_slots;
439       ++num_full_pages;
440       // num_full_pages is a uint16_t for efficient packing so guard against
441       // overflow to be safe.
442       if (UNLIKELY(!num_full_pages))
443         OnFull();
444       // Not necessary but might help stop accidents.
445       page->next_page = nullptr;
446     }
447   }
448 
449   active_pages_head = PartitionPage::get_sentinel_page();
450   return false;
451 }
452 
SlowPathAlloc(PartitionRootBase * root,int flags,size_t size,bool * is_already_zeroed)453 void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root,
454                                      int flags,
455                                      size_t size,
456                                      bool* is_already_zeroed) {
457   // The slow path is called when the freelist is empty.
458   DCHECK(!active_pages_head->freelist_head);
459 
460   PartitionPage* new_page = nullptr;
461   *is_already_zeroed = false;
462 
463   // For the PartitionRootGeneric::Alloc() API, we have a bunch of buckets
464   // marked as special cases. We bounce them through to the slow path so that
465   // we can still have a blazing fast hot path due to lack of corner-case
466   // branches.
467   //
468   // Note: The ordering of the conditionals matter! In particular,
469   // SetNewActivePage() has a side-effect even when returning
470   // false where it sweeps the active page list and may move things into
471   // the empty or decommitted lists which affects the subsequent conditional.
472   bool return_null = flags & PartitionAllocReturnNull;
473   if (UNLIKELY(is_direct_mapped())) {
474     DCHECK(size > kGenericMaxBucketed);
475     DCHECK(this == get_sentinel_bucket());
476     DCHECK(active_pages_head == PartitionPage::get_sentinel_page());
477     if (size > GenericMaxDirectMapped()) {
478       if (return_null)
479         return nullptr;
480       PartitionExcessiveAllocationSize(size);
481     }
482     new_page = PartitionDirectMap(root, flags, size);
483     *is_already_zeroed = true;
484   } else if (LIKELY(SetNewActivePage())) {
485     // First, did we find an active page in the active pages list?
486     new_page = active_pages_head;
487     DCHECK(new_page->is_active());
488   } else if (LIKELY(empty_pages_head != nullptr) ||
489              LIKELY(decommitted_pages_head != nullptr)) {
490     // Second, look in our lists of empty and decommitted pages.
491     // Check empty pages first, which are preferred, but beware that an
492     // empty page might have been decommitted.
493     while (LIKELY((new_page = empty_pages_head) != nullptr)) {
494       DCHECK(new_page->bucket == this);
495       DCHECK(new_page->is_empty() || new_page->is_decommitted());
496       empty_pages_head = new_page->next_page;
497       // Accept the empty page unless it got decommitted.
498       if (new_page->freelist_head) {
499         new_page->next_page = nullptr;
500         break;
501       }
502       DCHECK(new_page->is_decommitted());
503       new_page->next_page = decommitted_pages_head;
504       decommitted_pages_head = new_page;
505     }
506     if (UNLIKELY(!new_page) && LIKELY(decommitted_pages_head != nullptr)) {
507       new_page = decommitted_pages_head;
508       DCHECK(new_page->bucket == this);
509       DCHECK(new_page->is_decommitted());
510       decommitted_pages_head = new_page->next_page;
511       void* addr = PartitionPage::ToPointer(new_page);
512       root->RecommitSystemPages(addr, new_page->bucket->get_bytes_per_span());
513       new_page->Reset();
514       // TODO(https://crbug.com/890752): Optimizing here might cause pages to
515       // not be zeroed.
516       // *is_already_zeroed = true;
517     }
518     DCHECK(new_page);
519   } else {
520     // Third. If we get here, we need a brand new page.
521     uint16_t num_partition_pages = get_pages_per_slot_span();
522     void* raw_pages = AllocNewSlotSpan(root, flags, num_partition_pages);
523     if (LIKELY(raw_pages != nullptr)) {
524       new_page = PartitionPage::FromPointerNoAlignmentCheck(raw_pages);
525       InitializeSlotSpan(new_page);
526       // TODO(https://crbug.com/890752): Optimizing here causes pages to not be
527       // zeroed on at least macOS.
528       // *is_already_zeroed = true;
529     }
530   }
531 
532   // Bail if we had a memory allocation failure.
533   if (UNLIKELY(!new_page)) {
534     DCHECK(active_pages_head == PartitionPage::get_sentinel_page());
535     if (return_null)
536       return nullptr;
537     root->OutOfMemory(size);
538   }
539 
540   // TODO(ajwong): Is there a way to avoid the reading of bucket here?
541   // It seems like in many of the conditional branches above, |this| ==
542   // |new_page->bucket|. Maybe pull this into another function?
543   PartitionBucket* bucket = new_page->bucket;
544   DCHECK(bucket != get_sentinel_bucket());
545   bucket->active_pages_head = new_page;
546   new_page->set_raw_size(size);
547 
548   // If we found an active page with free slots, or an empty page, we have a
549   // usable freelist head.
550   if (LIKELY(new_page->freelist_head != nullptr)) {
551     PartitionFreelistEntry* entry = new_page->freelist_head;
552     PartitionFreelistEntry* new_head =
553         EncodedPartitionFreelistEntry::Decode(entry->next);
554     new_page->freelist_head = new_head;
555     new_page->num_allocated_slots++;
556     return entry;
557   }
558   // Otherwise, we need to build the freelist.
559   DCHECK(new_page->num_unprovisioned_slots);
560   return AllocAndFillFreelist(new_page);
561 }
562 
563 }  // namespace internal
564 }  // namespace base
565 }  // namespace pdfium
566