1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 /*
8 * Tenured heap management.
9 *
10 * This file contains method definitions for the following classes for code that
11 * is not specific to a particular phase of GC:
12 *
13 * - Arena
14 * - ArenaList
15 * - FreeLists
16 * - ArenaLists
17 * - TenuredChunk
18 * - ChunkPool
19 */
20
21 #include "gc/Heap-inl.h"
22
23 #include "gc/GCLock.h"
24 #include "jit/Assembler.h"
25 #include "vm/RegExpShared.h"
26
27 #include "gc/ArenaList-inl.h"
28 #include "gc/PrivateIterators-inl.h"
29
30 using namespace js;
31 using namespace js::gc;
32
33 // Check that reserved bits of a Cell are compatible with our typical allocators
34 // since most derived classes will store a pointer in the first word.
35 static const size_t MinFirstWordAlignment = 1u << CellFlagBitsReservedForGC;
36 static_assert(js::detail::LIFO_ALLOC_ALIGN >= MinFirstWordAlignment,
37 "CellFlagBitsReservedForGC should support LifoAlloc");
38 static_assert(CellAlignBytes >= MinFirstWordAlignment,
39 "CellFlagBitsReservedForGC should support gc::Cell");
40 static_assert(js::jit::CodeAlignment >= MinFirstWordAlignment,
41 "CellFlagBitsReservedForGC should support JIT code");
42 static_assert(js::gc::JSClassAlignBytes >= MinFirstWordAlignment,
43 "CellFlagBitsReservedForGC should support JSClass pointers");
44 static_assert(js::ScopeDataAlignBytes >= MinFirstWordAlignment,
45 "CellFlagBitsReservedForGC should support scope data pointers");
46
47 #define CHECK_THING_SIZE(allocKind, traceKind, type, sizedType, bgFinal, \
48 nursery, compact) \
49 static_assert(sizeof(sizedType) >= SortedArenaList::MinThingSize, \
50 #sizedType " is smaller than SortedArenaList::MinThingSize!"); \
51 static_assert(sizeof(sizedType) >= sizeof(FreeSpan), \
52 #sizedType " is smaller than FreeSpan"); \
53 static_assert(sizeof(sizedType) % CellAlignBytes == 0, \
54 "Size of " #sizedType " is not a multiple of CellAlignBytes"); \
55 static_assert(sizeof(sizedType) >= MinCellSize, \
56 "Size of " #sizedType " is smaller than the minimum size");
57 FOR_EACH_ALLOCKIND(CHECK_THING_SIZE);
58 #undef CHECK_THING_SIZE
59
60 FreeSpan FreeLists::emptySentinel;
61
62 template <typename T>
63 struct ArenaLayout {
thingSizeArenaLayout64 static constexpr size_t thingSize() { return sizeof(T); }
thingsPerArenaArenaLayout65 static constexpr size_t thingsPerArena() {
66 return (ArenaSize - ArenaHeaderSize) / thingSize();
67 }
firstThingOffsetArenaLayout68 static constexpr size_t firstThingOffset() {
69 return ArenaSize - thingSize() * thingsPerArena();
70 }
71 };
72
73 const uint8_t Arena::ThingSizes[] = {
74 #define EXPAND_THING_SIZE(_1, _2, _3, sizedType, _4, _5, _6) \
75 ArenaLayout<sizedType>::thingSize(),
76 FOR_EACH_ALLOCKIND(EXPAND_THING_SIZE)
77 #undef EXPAND_THING_SIZE
78 };
79
80 const uint8_t Arena::FirstThingOffsets[] = {
81 #define EXPAND_FIRST_THING_OFFSET(_1, _2, _3, sizedType, _4, _5, _6) \
82 ArenaLayout<sizedType>::firstThingOffset(),
83 FOR_EACH_ALLOCKIND(EXPAND_FIRST_THING_OFFSET)
84 #undef EXPAND_FIRST_THING_OFFSET
85 };
86
87 const uint8_t Arena::ThingsPerArena[] = {
88 #define EXPAND_THINGS_PER_ARENA(_1, _2, _3, sizedType, _4, _5, _6) \
89 ArenaLayout<sizedType>::thingsPerArena(),
90 FOR_EACH_ALLOCKIND(EXPAND_THINGS_PER_ARENA)
91 #undef EXPAND_THINGS_PER_ARENA
92 };
93
unmarkAll()94 void Arena::unmarkAll() {
95 MarkBitmapWord* arenaBits = chunk()->markBits.arenaBits(this);
96 for (size_t i = 0; i < ArenaBitmapWords; i++) {
97 arenaBits[i] = 0;
98 }
99 }
100
unmarkPreMarkedFreeCells()101 void Arena::unmarkPreMarkedFreeCells() {
102 for (ArenaFreeCellIter cell(this); !cell.done(); cell.next()) {
103 MOZ_ASSERT(cell->isMarkedBlack());
104 cell->unmark();
105 }
106 }
107
108 #ifdef DEBUG
109
checkNoMarkedFreeCells()110 void Arena::checkNoMarkedFreeCells() {
111 for (ArenaFreeCellIter cell(this); !cell.done(); cell.next()) {
112 MOZ_ASSERT(!cell->isMarkedAny());
113 }
114 }
115
checkAllCellsMarkedBlack()116 void Arena::checkAllCellsMarkedBlack() {
117 for (ArenaCellIter cell(this); !cell.done(); cell.next()) {
118 MOZ_ASSERT(cell->isMarkedBlack());
119 }
120 }
121
122 #endif
123
124 #if defined(DEBUG) || defined(JS_GC_ZEAL)
checkNoMarkedCells()125 void Arena::checkNoMarkedCells() {
126 for (ArenaCellIter cell(this); !cell.done(); cell.next()) {
127 MOZ_ASSERT(!cell->isMarkedAny());
128 }
129 }
130 #endif
131
132 /* static */
staticAsserts()133 void Arena::staticAsserts() {
134 static_assert(size_t(AllocKind::LIMIT) <= 255,
135 "All AllocKinds and AllocKind::LIMIT must fit in a uint8_t.");
136 static_assert(std::size(ThingSizes) == AllocKindCount,
137 "We haven't defined all thing sizes.");
138 static_assert(std::size(FirstThingOffsets) == AllocKindCount,
139 "We haven't defined all offsets.");
140 static_assert(std::size(ThingsPerArena) == AllocKindCount,
141 "We haven't defined all counts.");
142 }
143
144 /* static */
checkLookupTables()145 void Arena::checkLookupTables() {
146 #ifdef DEBUG
147 for (size_t i = 0; i < AllocKindCount; i++) {
148 MOZ_ASSERT(
149 FirstThingOffsets[i] + ThingsPerArena[i] * ThingSizes[i] == ArenaSize,
150 "Inconsistent arena lookup table data");
151 }
152 #endif
153 }
154
155 #ifdef DEBUG
dump()156 void js::gc::ArenaList::dump() {
157 fprintf(stderr, "ArenaList %p:", this);
158 if (cursorp_ == &head_) {
159 fprintf(stderr, " *");
160 }
161 for (Arena* arena = head(); arena; arena = arena->next) {
162 fprintf(stderr, " %p", arena);
163 if (cursorp_ == &arena->next) {
164 fprintf(stderr, " *");
165 }
166 }
167 fprintf(stderr, "\n");
168 }
169 #endif
170
removeRemainingArenas(Arena ** arenap)171 Arena* ArenaList::removeRemainingArenas(Arena** arenap) {
172 // This is only ever called to remove arenas that are after the cursor, so
173 // we don't need to update it.
174 #ifdef DEBUG
175 for (Arena* arena = *arenap; arena; arena = arena->next) {
176 MOZ_ASSERT(cursorp_ != &arena->next);
177 }
178 #endif
179 Arena* remainingArenas = *arenap;
180 *arenap = nullptr;
181 check();
182 return remainingArenas;
183 }
184
FreeLists()185 FreeLists::FreeLists() {
186 for (auto i : AllAllocKinds()) {
187 freeLists_[i] = &emptySentinel;
188 }
189 }
190
ArenaLists(Zone * zone)191 ArenaLists::ArenaLists(Zone* zone)
192 : zone_(zone),
193 freeLists_(zone),
194 arenaLists_(zone),
195 collectingArenaLists_(zone),
196 incrementalSweptArenaKind(zone, AllocKind::LIMIT),
197 incrementalSweptArenas(zone),
198 gcCompactPropMapArenasToUpdate(zone, nullptr),
199 gcNormalPropMapArenasToUpdate(zone, nullptr),
200 savedEmptyArenas(zone, nullptr) {
201 for (auto i : AllAllocKinds()) {
202 concurrentUse(i) = ConcurrentUse::None;
203 }
204 }
205
ReleaseArenas(JSRuntime * rt,Arena * arena,const AutoLockGC & lock)206 void ReleaseArenas(JSRuntime* rt, Arena* arena, const AutoLockGC& lock) {
207 Arena* next;
208 for (; arena; arena = next) {
209 next = arena->next;
210 rt->gc.releaseArena(arena, lock);
211 }
212 }
213
ReleaseArenaList(JSRuntime * rt,ArenaList & arenaList,const AutoLockGC & lock)214 void ReleaseArenaList(JSRuntime* rt, ArenaList& arenaList,
215 const AutoLockGC& lock) {
216 ReleaseArenas(rt, arenaList.head(), lock);
217 arenaList.clear();
218 }
219
~ArenaLists()220 ArenaLists::~ArenaLists() {
221 AutoLockGC lock(runtime());
222
223 for (auto i : AllAllocKinds()) {
224 /*
225 * We can only call this during the shutdown after the last GC when
226 * the background finalization is disabled.
227 */
228 MOZ_ASSERT(concurrentUse(i) == ConcurrentUse::None);
229 ReleaseArenaList(runtime(), arenaList(i), lock);
230 }
231 ReleaseArenaList(runtime(), incrementalSweptArenas.ref(), lock);
232
233 ReleaseArenas(runtime(), savedEmptyArenas, lock);
234 }
235
moveArenasToCollectingLists()236 void ArenaLists::moveArenasToCollectingLists() {
237 checkEmptyFreeLists();
238 for (AllocKind kind : AllAllocKinds()) {
239 MOZ_ASSERT(collectingArenaList(kind).isEmpty());
240 collectingArenaList(kind) = std::move(arenaList(kind));
241 MOZ_ASSERT(arenaList(kind).isEmpty());
242 }
243 }
244
mergeArenasFromCollectingLists()245 void ArenaLists::mergeArenasFromCollectingLists() {
246 for (AllocKind kind : AllAllocKinds()) {
247 collectingArenaList(kind).insertListWithCursorAtEnd(arenaList(kind));
248 arenaList(kind) = std::move(collectingArenaList(kind));
249 MOZ_ASSERT(collectingArenaList(kind).isEmpty());
250 }
251 }
252
takeSweptEmptyArenas()253 Arena* ArenaLists::takeSweptEmptyArenas() {
254 Arena* arenas = savedEmptyArenas;
255 savedEmptyArenas = nullptr;
256 return arenas;
257 }
258
setIncrementalSweptArenas(AllocKind kind,SortedArenaList & arenas)259 void ArenaLists::setIncrementalSweptArenas(AllocKind kind,
260 SortedArenaList& arenas) {
261 incrementalSweptArenaKind = kind;
262 incrementalSweptArenas.ref().clear();
263 incrementalSweptArenas = arenas.toArenaList();
264 }
265
clearIncrementalSweptArenas()266 void ArenaLists::clearIncrementalSweptArenas() {
267 incrementalSweptArenaKind = AllocKind::LIMIT;
268 incrementalSweptArenas.ref().clear();
269 }
270
checkGCStateNotInUse()271 void ArenaLists::checkGCStateNotInUse() {
272 // Called before and after collection to check the state is as expected.
273 #ifdef DEBUG
274 checkSweepStateNotInUse();
275 for (auto i : AllAllocKinds()) {
276 MOZ_ASSERT(collectingArenaList(i).isEmpty());
277 }
278 #endif
279 }
280
checkSweepStateNotInUse()281 void ArenaLists::checkSweepStateNotInUse() {
282 #ifdef DEBUG
283 checkNoArenasToUpdate();
284 MOZ_ASSERT(incrementalSweptArenaKind == AllocKind::LIMIT);
285 MOZ_ASSERT(incrementalSweptArenas.ref().isEmpty());
286 MOZ_ASSERT(!savedEmptyArenas);
287 for (auto i : AllAllocKinds()) {
288 MOZ_ASSERT(concurrentUse(i) == ConcurrentUse::None);
289 }
290 #endif
291 }
292
checkNoArenasToUpdate()293 void ArenaLists::checkNoArenasToUpdate() {
294 MOZ_ASSERT(!gcCompactPropMapArenasToUpdate);
295 MOZ_ASSERT(!gcNormalPropMapArenasToUpdate);
296 }
297
checkNoArenasToUpdateForKind(AllocKind kind)298 void ArenaLists::checkNoArenasToUpdateForKind(AllocKind kind) {
299 #ifdef DEBUG
300 switch (kind) {
301 case AllocKind::COMPACT_PROP_MAP:
302 MOZ_ASSERT(!gcCompactPropMapArenasToUpdate);
303 break;
304 case AllocKind::NORMAL_PROP_MAP:
305 MOZ_ASSERT(!gcNormalPropMapArenasToUpdate);
306 break;
307 default:
308 break;
309 }
310 #endif
311 }
312
canDecommitPage(size_t pageIndex) const313 inline bool TenuredChunk::canDecommitPage(size_t pageIndex) const {
314 if (decommittedPages[pageIndex]) {
315 return false;
316 }
317
318 size_t arenaIndex = pageIndex * ArenasPerPage;
319 for (size_t i = 0; i < ArenasPerPage; i++) {
320 if (!freeCommittedArenas[arenaIndex + i]) {
321 return false;
322 }
323 }
324
325 return true;
326 }
327
decommitFreeArenas(GCRuntime * gc,const bool & cancel,AutoLockGC & lock)328 void TenuredChunk::decommitFreeArenas(GCRuntime* gc, const bool& cancel,
329 AutoLockGC& lock) {
330 MOZ_ASSERT(DecommitEnabled());
331
332 for (size_t i = 0; i < PagesPerChunk; i++) {
333 if (cancel) {
334 break;
335 }
336
337 if (canDecommitPage(i) && !decommitOneFreePage(gc, i, lock)) {
338 break;
339 }
340 }
341 }
342
recycleArena(Arena * arena,SortedArenaList & dest,size_t thingsPerArena)343 void TenuredChunk::recycleArena(Arena* arena, SortedArenaList& dest,
344 size_t thingsPerArena) {
345 arena->setAsFullyUnused();
346 dest.insertAt(arena, thingsPerArena);
347 }
348
releaseArena(GCRuntime * gc,Arena * arena,const AutoLockGC & lock)349 void TenuredChunk::releaseArena(GCRuntime* gc, Arena* arena,
350 const AutoLockGC& lock) {
351 MOZ_ASSERT(!arena->allocated());
352 MOZ_ASSERT(!freeCommittedArenas[arenaIndex(arena)]);
353
354 freeCommittedArenas[arenaIndex(arena)] = true;
355 ++info.numArenasFreeCommitted;
356 ++info.numArenasFree;
357 gc->updateOnArenaFree();
358
359 verify();
360
361 updateChunkListAfterFree(gc, 1, lock);
362 }
363
decommitOneFreePage(GCRuntime * gc,size_t pageIndex,AutoLockGC & lock)364 bool TenuredChunk::decommitOneFreePage(GCRuntime* gc, size_t pageIndex,
365 AutoLockGC& lock) {
366 MOZ_ASSERT(DecommitEnabled());
367 MOZ_ASSERT(canDecommitPage(pageIndex));
368 MOZ_ASSERT(info.numArenasFreeCommitted >= ArenasPerPage);
369
370 // Temporarily mark the page as allocated while we decommit.
371 for (size_t i = 0; i < ArenasPerPage; i++) {
372 size_t arenaIndex = pageIndex * ArenasPerPage + i;
373 MOZ_ASSERT(freeCommittedArenas[arenaIndex]);
374 freeCommittedArenas[arenaIndex] = false;
375 }
376 info.numArenasFreeCommitted -= ArenasPerPage;
377 info.numArenasFree -= ArenasPerPage;
378 updateChunkListAfterAlloc(gc, lock);
379
380 verify();
381
382 bool ok;
383 {
384 AutoUnlockGC unlock(lock);
385 ok = !oom::ShouldFailWithOOM() &&
386 MarkPagesUnusedSoft(pageAddress(pageIndex), PageSize);
387 }
388
389 // Mark the page as decommited if successful or restore the original free
390 // state.
391 if (ok) {
392 decommittedPages[pageIndex] = true;
393 } else {
394 for (size_t i = 0; i < ArenasPerPage; i++) {
395 size_t arenaIndex = pageIndex * ArenasPerPage + i;
396 MOZ_ASSERT(!freeCommittedArenas[arenaIndex]);
397 freeCommittedArenas[arenaIndex] = true;
398 }
399 info.numArenasFreeCommitted += ArenasPerPage;
400 }
401
402 info.numArenasFree += ArenasPerPage;
403 updateChunkListAfterFree(gc, ArenasPerPage, lock);
404
405 verify();
406
407 return ok;
408 }
409
decommitFreeArenasWithoutUnlocking(const AutoLockGC & lock)410 void TenuredChunk::decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock) {
411 MOZ_ASSERT(DecommitEnabled());
412
413 for (size_t i = 0; i < PagesPerChunk; i++) {
414 if (!canDecommitPage(i)) {
415 continue;
416 }
417
418 MOZ_ASSERT(!decommittedPages[i]);
419 MOZ_ASSERT(info.numArenasFreeCommitted >= ArenasPerPage);
420
421 if (js::oom::ShouldFailWithOOM() ||
422 !MarkPagesUnusedSoft(pageAddress(i), SystemPageSize())) {
423 break;
424 }
425
426 decommittedPages[i] = true;
427 for (size_t j = 0; j < ArenasPerPage; ++j) {
428 size_t arenaIndex = i * ArenasPerPage + j;
429 MOZ_ASSERT(freeCommittedArenas[arenaIndex]);
430 freeCommittedArenas[arenaIndex] = false;
431 }
432 info.numArenasFreeCommitted -= ArenasPerPage;
433 }
434
435 verify();
436 }
437
updateChunkListAfterAlloc(GCRuntime * gc,const AutoLockGC & lock)438 void TenuredChunk::updateChunkListAfterAlloc(GCRuntime* gc,
439 const AutoLockGC& lock) {
440 if (MOZ_UNLIKELY(!hasAvailableArenas())) {
441 gc->availableChunks(lock).remove(this);
442 gc->fullChunks(lock).push(this);
443 }
444 }
445
updateChunkListAfterFree(GCRuntime * gc,size_t numArenasFree,const AutoLockGC & lock)446 void TenuredChunk::updateChunkListAfterFree(GCRuntime* gc, size_t numArenasFree,
447 const AutoLockGC& lock) {
448 if (info.numArenasFree == numArenasFree) {
449 gc->fullChunks(lock).remove(this);
450 gc->availableChunks(lock).push(this);
451 } else if (!unused()) {
452 MOZ_ASSERT(gc->availableChunks(lock).contains(this));
453 } else {
454 MOZ_ASSERT(unused());
455 gc->availableChunks(lock).remove(this);
456 gc->recycleChunk(this, lock);
457 }
458 }
459
pop()460 TenuredChunk* ChunkPool::pop() {
461 MOZ_ASSERT(bool(head_) == bool(count_));
462 if (!count_) {
463 return nullptr;
464 }
465 return remove(head_);
466 }
467
push(TenuredChunk * chunk)468 void ChunkPool::push(TenuredChunk* chunk) {
469 MOZ_ASSERT(!chunk->info.next);
470 MOZ_ASSERT(!chunk->info.prev);
471
472 chunk->info.next = head_;
473 if (head_) {
474 head_->info.prev = chunk;
475 }
476 head_ = chunk;
477 ++count_;
478 }
479
remove(TenuredChunk * chunk)480 TenuredChunk* ChunkPool::remove(TenuredChunk* chunk) {
481 MOZ_ASSERT(count_ > 0);
482 MOZ_ASSERT(contains(chunk));
483
484 if (head_ == chunk) {
485 head_ = chunk->info.next;
486 }
487 if (chunk->info.prev) {
488 chunk->info.prev->info.next = chunk->info.next;
489 }
490 if (chunk->info.next) {
491 chunk->info.next->info.prev = chunk->info.prev;
492 }
493 chunk->info.next = chunk->info.prev = nullptr;
494 --count_;
495
496 return chunk;
497 }
498
499 // We could keep the chunk pool sorted, but that's likely to be more expensive.
500 // This sort is nlogn, but keeping it sorted is likely to be m*n, with m being
501 // the number of operations (likely higher than n).
sort()502 void ChunkPool::sort() {
503 // Only sort if the list isn't already sorted.
504 if (!isSorted()) {
505 head_ = mergeSort(head(), count());
506
507 // Fixup prev pointers.
508 TenuredChunk* prev = nullptr;
509 for (TenuredChunk* cur = head_; cur; cur = cur->info.next) {
510 cur->info.prev = prev;
511 prev = cur;
512 }
513 }
514
515 MOZ_ASSERT(verify());
516 MOZ_ASSERT(isSorted());
517 }
518
mergeSort(TenuredChunk * list,size_t count)519 TenuredChunk* ChunkPool::mergeSort(TenuredChunk* list, size_t count) {
520 MOZ_ASSERT(bool(list) == bool(count));
521
522 if (count < 2) {
523 return list;
524 }
525
526 size_t half = count / 2;
527
528 // Split;
529 TenuredChunk* front = list;
530 TenuredChunk* back;
531 {
532 TenuredChunk* cur = list;
533 for (size_t i = 0; i < half - 1; i++) {
534 MOZ_ASSERT(cur);
535 cur = cur->info.next;
536 }
537 back = cur->info.next;
538 cur->info.next = nullptr;
539 }
540
541 front = mergeSort(front, half);
542 back = mergeSort(back, count - half);
543
544 // Merge
545 list = nullptr;
546 TenuredChunk** cur = &list;
547 while (front || back) {
548 if (!front) {
549 *cur = back;
550 break;
551 }
552 if (!back) {
553 *cur = front;
554 break;
555 }
556
557 // Note that the sort is stable due to the <= here. Nothing depends on
558 // this but it could.
559 if (front->info.numArenasFree <= back->info.numArenasFree) {
560 *cur = front;
561 front = front->info.next;
562 cur = &(*cur)->info.next;
563 } else {
564 *cur = back;
565 back = back->info.next;
566 cur = &(*cur)->info.next;
567 }
568 }
569
570 return list;
571 }
572
isSorted() const573 bool ChunkPool::isSorted() const {
574 uint32_t last = 1;
575 for (TenuredChunk* cursor = head_; cursor; cursor = cursor->info.next) {
576 if (cursor->info.numArenasFree < last) {
577 return false;
578 }
579 last = cursor->info.numArenasFree;
580 }
581 return true;
582 }
583
584 #ifdef DEBUG
585
contains(TenuredChunk * chunk) const586 bool ChunkPool::contains(TenuredChunk* chunk) const {
587 verify();
588 for (TenuredChunk* cursor = head_; cursor; cursor = cursor->info.next) {
589 if (cursor == chunk) {
590 return true;
591 }
592 }
593 return false;
594 }
595
verify() const596 bool ChunkPool::verify() const {
597 MOZ_ASSERT(bool(head_) == bool(count_));
598 uint32_t count = 0;
599 for (TenuredChunk* cursor = head_; cursor;
600 cursor = cursor->info.next, ++count) {
601 MOZ_ASSERT_IF(cursor->info.prev, cursor->info.prev->info.next == cursor);
602 MOZ_ASSERT_IF(cursor->info.next, cursor->info.next->info.prev == cursor);
603 }
604 MOZ_ASSERT(count_ == count);
605 return true;
606 }
607
verifyChunks() const608 void ChunkPool::verifyChunks() const {
609 for (TenuredChunk* chunk = head_; chunk; chunk = chunk->info.next) {
610 chunk->verify();
611 }
612 }
613
verify() const614 void TenuredChunk::verify() const {
615 MOZ_ASSERT(info.numArenasFree <= ArenasPerChunk);
616 MOZ_ASSERT(info.numArenasFreeCommitted <= info.numArenasFree);
617
618 size_t decommittedCount = decommittedPages.Count() * ArenasPerPage;
619 size_t freeCommittedCount = freeCommittedArenas.Count();
620 size_t freeCount = freeCommittedCount + decommittedCount;
621
622 MOZ_ASSERT(freeCount == info.numArenasFree);
623 MOZ_ASSERT(freeCommittedCount == info.numArenasFreeCommitted);
624
625 for (size_t i = 0; i < ArenasPerChunk; ++i) {
626 MOZ_ASSERT(!(decommittedPages[pageIndex(i)] && freeCommittedArenas[i]));
627 MOZ_ASSERT_IF(freeCommittedArenas[i], !arenas[i].allocated());
628 }
629 }
630
631 #endif
632
next()633 void ChunkPool::Iter::next() {
634 MOZ_ASSERT(!done());
635 current_ = current_->info.next;
636 }
637