1 // -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2 // Copyright (c) 2008, Google Inc.
3 // All rights reserved.
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
7 // met:
8 //
9 // * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer.
11 // * Redistributions in binary form must reproduce the above
12 // copyright notice, this list of conditions and the following disclaimer
13 // in the documentation and/or other materials provided with the
14 // distribution.
15 // * Neither the name of Google Inc. nor the names of its
16 // contributors may be used to endorse or promote products derived from
17 // this software without specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 // ---
32 // Author: Sanjay Ghemawat <opensource@google.com>
33
34 #include <config.h>
35 #ifdef HAVE_INTTYPES_H
36 #include <inttypes.h> // for PRIuPTR
37 #endif
38 #include <errno.h> // for ENOMEM, errno
39 #include <gperftools/malloc_extension.h> // for MallocRange, etc
40 #include "base/basictypes.h"
41 #include "base/commandlineflags.h"
42 #include "internal_logging.h" // for ASSERT, TCMalloc_Printer, etc
43 #include "page_heap_allocator.h" // for PageHeapAllocator
44 #include "static_vars.h" // for Static
45 #include "system-alloc.h" // for TCMalloc_SystemAlloc, etc
46
47 DEFINE_double(tcmalloc_release_rate,
48 EnvToDouble("TCMALLOC_RELEASE_RATE", 1.0),
49 "Rate at which we release unused memory to the system. "
50 "Zero means we never release memory back to the system. "
51 "Increase this flag to return memory faster; decrease it "
52 "to return memory slower. Reasonable rates are in the "
53 "range [0,10]");
54
55 DEFINE_int64(tcmalloc_heap_limit_mb,
56 EnvToInt("TCMALLOC_HEAP_LIMIT_MB", 0),
57 "Limit total size of the process heap to the "
58 "specified number of MiB. "
59 "When we approach the limit the memory is released "
60 "to the system more aggressively (more minor page faults). "
61 "Zero means to allocate as long as system allows.");
62
63 namespace tcmalloc {
64
PageHeap()65 PageHeap::PageHeap()
66 : pagemap_(MetaDataAlloc),
67 pagemap_cache_(0),
68 scavenge_counter_(0),
69 // Start scavenging at kMaxPages list
70 release_index_(kMaxPages),
71 aggressive_decommit_(false) {
72 COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits);
73 DLL_Init(&large_.normal);
74 DLL_Init(&large_.returned);
75 for (int i = 0; i < kMaxPages; i++) {
76 DLL_Init(&free_[i].normal);
77 DLL_Init(&free_[i].returned);
78 }
79 }
80
SearchFreeAndLargeLists(Length n)81 Span* PageHeap::SearchFreeAndLargeLists(Length n) {
82 ASSERT(Check());
83 ASSERT(n > 0);
84
85 // Find first size >= n that has a non-empty list
86 for (Length s = n; s < kMaxPages; s++) {
87 Span* ll = &free_[s].normal;
88 // If we're lucky, ll is non-empty, meaning it has a suitable span.
89 if (!DLL_IsEmpty(ll)) {
90 ASSERT(ll->next->location == Span::ON_NORMAL_FREELIST);
91 return Carve(ll->next, n);
92 }
93 // Alternatively, maybe there's a usable returned span.
94 ll = &free_[s].returned;
95 if (!DLL_IsEmpty(ll)) {
96 // We did not call EnsureLimit before, to avoid releasing the span
97 // that will be taken immediately back.
98 // Calling EnsureLimit here is not very expensive, as it fails only if
99 // there is no more normal spans (and it fails efficiently)
100 // or SystemRelease does not work (there is probably no returned spans).
101 if (EnsureLimit(n)) {
102 // ll may have became empty due to coalescing
103 if (!DLL_IsEmpty(ll)) {
104 ASSERT(ll->next->location == Span::ON_RETURNED_FREELIST);
105 return Carve(ll->next, n);
106 }
107 }
108 }
109 }
110 // No luck in free lists, our last chance is in a larger class.
111 return AllocLarge(n); // May be NULL
112 }
113
114 static const size_t kForcedCoalesceInterval = 128*1024*1024;
115
New(Length n)116 Span* PageHeap::New(Length n) {
117 ASSERT(Check());
118 ASSERT(n > 0);
119
120 Span* result = SearchFreeAndLargeLists(n);
121 if (result != NULL)
122 return result;
123
124 if (stats_.free_bytes != 0 && stats_.unmapped_bytes != 0
125 && stats_.free_bytes + stats_.unmapped_bytes >= stats_.system_bytes / 4
126 && (stats_.system_bytes / kForcedCoalesceInterval
127 != (stats_.system_bytes + (n << kPageShift)) / kForcedCoalesceInterval)) {
128 // We're about to grow heap, but there are lots of free pages.
129 // tcmalloc's design decision to keep unmapped and free spans
130 // separately and never coalesce them means that sometimes there
131 // can be free pages span of sufficient size, but it consists of
132 // "segments" of different type so page heap search cannot find
133 // it. In order to prevent growing heap and wasting memory in such
134 // case we're going to unmap all free pages. So that all free
135 // spans are maximally coalesced.
136 //
137 // We're also limiting 'rate' of going into this path to be at
138 // most once per 128 megs of heap growth. Otherwise programs that
139 // grow heap frequently (and that means by small amount) could be
140 // penalized with higher count of minor page faults.
141 //
142 // See also large_heap_fragmentation_unittest.cc and
143 // https://code.google.com/p/gperftools/issues/detail?id=368
144 ReleaseAtLeastNPages(static_cast<Length>(0x7fffffff));
145
146 // then try again. If we are forced to grow heap because of large
147 // spans fragmentation and not because of problem described above,
148 // then at the very least we've just unmapped free but
149 // insufficiently big large spans back to OS. So in case of really
150 // unlucky memory fragmentation we'll be consuming virtual address
151 // space, but not real memory
152 result = SearchFreeAndLargeLists(n);
153 if (result != NULL) return result;
154 }
155
156 // Grow the heap and try again.
157 if (!GrowHeap(n)) {
158 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
159 ASSERT(Check());
160 // underlying SysAllocator likely set ENOMEM but we can get here
161 // due to EnsureLimit so we set it here too.
162 //
163 // Setting errno to ENOMEM here allows us to avoid dealing with it
164 // in fast-path.
165 errno = ENOMEM;
166 return NULL;
167 }
168 return SearchFreeAndLargeLists(n);
169 }
170
AllocLarge(Length n)171 Span* PageHeap::AllocLarge(Length n) {
172 // find the best span (closest to n in size).
173 // The following loops implements address-ordered best-fit.
174 Span *best = NULL;
175
176 // Search through normal list
177 for (Span* span = large_.normal.next;
178 span != &large_.normal;
179 span = span->next) {
180 if (span->length >= n) {
181 if ((best == NULL)
182 || (span->length < best->length)
183 || ((span->length == best->length) && (span->start < best->start))) {
184 best = span;
185 ASSERT(best->location == Span::ON_NORMAL_FREELIST);
186 }
187 }
188 }
189
190 Span *bestNormal = best;
191
192 // Search through released list in case it has a better fit
193 for (Span* span = large_.returned.next;
194 span != &large_.returned;
195 span = span->next) {
196 if (span->length >= n) {
197 if ((best == NULL)
198 || (span->length < best->length)
199 || ((span->length == best->length) && (span->start < best->start))) {
200 best = span;
201 ASSERT(best->location == Span::ON_RETURNED_FREELIST);
202 }
203 }
204 }
205
206 if (best == bestNormal) {
207 return best == NULL ? NULL : Carve(best, n);
208 }
209
210 // best comes from returned list.
211
212 if (EnsureLimit(n, false)) {
213 return Carve(best, n);
214 }
215
216 if (EnsureLimit(n, true)) {
217 // best could have been destroyed by coalescing.
218 // bestNormal is not a best-fit, and it could be destroyed as well.
219 // We retry, the limit is already ensured:
220 return AllocLarge(n);
221 }
222
223 // If bestNormal existed, EnsureLimit would succeeded:
224 ASSERT(bestNormal == NULL);
225 // We are not allowed to take best from returned list.
226 return NULL;
227 }
228
Split(Span * span,Length n)229 Span* PageHeap::Split(Span* span, Length n) {
230 ASSERT(0 < n);
231 ASSERT(n < span->length);
232 ASSERT(span->location == Span::IN_USE);
233 ASSERT(span->sizeclass == 0);
234 Event(span, 'T', n);
235
236 const int extra = span->length - n;
237 Span* leftover = NewSpan(span->start + n, extra);
238 ASSERT(leftover->location == Span::IN_USE);
239 Event(leftover, 'U', extra);
240 RecordSpan(leftover);
241 pagemap_.set(span->start + n - 1, span); // Update map from pageid to span
242 span->length = n;
243
244 return leftover;
245 }
246
CommitSpan(Span * span)247 void PageHeap::CommitSpan(Span* span) {
248 ++stats_.commit_count;
249
250 TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift),
251 static_cast<size_t>(span->length << kPageShift));
252 stats_.committed_bytes += span->length << kPageShift;
253 stats_.total_commit_bytes += (span->length << kPageShift);
254 }
255
DecommitSpan(Span * span)256 bool PageHeap::DecommitSpan(Span* span) {
257 ++stats_.decommit_count;
258
259 bool rv = TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift),
260 static_cast<size_t>(span->length << kPageShift));
261 if (rv) {
262 stats_.committed_bytes -= span->length << kPageShift;
263 stats_.total_decommit_bytes += (span->length << kPageShift);
264 }
265
266 return rv;
267 }
268
Carve(Span * span,Length n)269 Span* PageHeap::Carve(Span* span, Length n) {
270 ASSERT(n > 0);
271 ASSERT(span->location != Span::IN_USE);
272 const int old_location = span->location;
273 RemoveFromFreeList(span);
274 span->location = Span::IN_USE;
275 Event(span, 'A', n);
276
277 const int extra = span->length - n;
278 ASSERT(extra >= 0);
279 if (extra > 0) {
280 Span* leftover = NewSpan(span->start + n, extra);
281 leftover->location = old_location;
282 Event(leftover, 'S', extra);
283 RecordSpan(leftover);
284
285 // The previous span of |leftover| was just splitted -- no need to
286 // coalesce them. The next span of |leftover| was not previously coalesced
287 // with |span|, i.e. is NULL or has got location other than |old_location|.
288 #ifndef NDEBUG
289 const PageID p = leftover->start;
290 const Length len = leftover->length;
291 Span* next = GetDescriptor(p+len);
292 ASSERT (next == NULL ||
293 next->location == Span::IN_USE ||
294 next->location != leftover->location);
295 #endif
296
297 PrependToFreeList(leftover); // Skip coalescing - no candidates possible
298 span->length = n;
299 pagemap_.set(span->start + n - 1, span);
300 }
301 ASSERT(Check());
302 if (old_location == Span::ON_RETURNED_FREELIST) {
303 // We need to recommit this address space.
304 CommitSpan(span);
305 }
306 ASSERT(span->location == Span::IN_USE);
307 ASSERT(span->length == n);
308 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
309 return span;
310 }
311
Delete(Span * span)312 void PageHeap::Delete(Span* span) {
313 ASSERT(Check());
314 ASSERT(span->location == Span::IN_USE);
315 ASSERT(span->length > 0);
316 ASSERT(GetDescriptor(span->start) == span);
317 ASSERT(GetDescriptor(span->start + span->length - 1) == span);
318 const Length n = span->length;
319 span->sizeclass = 0;
320 span->sample = 0;
321 span->location = Span::ON_NORMAL_FREELIST;
322 Event(span, 'D', span->length);
323 MergeIntoFreeList(span); // Coalesces if possible
324 IncrementalScavenge(n);
325 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
326 ASSERT(Check());
327 }
328
MayMergeSpans(Span * span,Span * other)329 bool PageHeap::MayMergeSpans(Span *span, Span *other) {
330 if (aggressive_decommit_) {
331 return other->location != Span::IN_USE;
332 }
333 return span->location == other->location;
334 }
335
MergeIntoFreeList(Span * span)336 void PageHeap::MergeIntoFreeList(Span* span) {
337 ASSERT(span->location != Span::IN_USE);
338
339 // Coalesce -- we guarantee that "p" != 0, so no bounds checking
340 // necessary. We do not bother resetting the stale pagemap
341 // entries for the pieces we are merging together because we only
342 // care about the pagemap entries for the boundaries.
343 //
344 // Note: depending on aggressive_decommit_ mode we allow only
345 // similar spans to be coalesced.
346 //
347 // The following applies if aggressive_decommit_ is enabled:
348 //
349 // Note that the adjacent spans we merge into "span" may come out of a
350 // "normal" (committed) list, and cleanly merge with our IN_USE span, which
351 // is implicitly committed. If the adjacents spans are on the "returned"
352 // (decommitted) list, then we must get both spans into the same state before
353 // or after we coalesce them. The current code always decomits. This is
354 // achieved by blindly decommitting the entire coalesced region, which may
355 // include any combination of committed and decommitted spans, at the end of
356 // the method.
357
358 // TODO(jar): "Always decommit" causes some extra calls to commit when we are
359 // called in GrowHeap() during an allocation :-/. We need to eval the cost of
360 // that oscillation, and possibly do something to reduce it.
361
362 // TODO(jar): We need a better strategy for deciding to commit, or decommit,
363 // based on memory usage and free heap sizes.
364
365 uint64_t temp_committed = 0;
366
367 const PageID p = span->start;
368 const Length n = span->length;
369 Span* prev = GetDescriptor(p-1);
370 if (prev != NULL && MayMergeSpans(span, prev)) {
371 // Merge preceding span into this span
372 ASSERT(prev->start + prev->length == p);
373 const Length len = prev->length;
374 if (aggressive_decommit_ && prev->location == Span::ON_RETURNED_FREELIST) {
375 // We're about to put the merge span into the returned freelist and call
376 // DecommitSpan() on it, which will mark the entire span including this
377 // one as released and decrease stats_.committed_bytes by the size of the
378 // merged span. To make the math work out we temporarily increase the
379 // stats_.committed_bytes amount.
380 temp_committed = prev->length << kPageShift;
381 }
382 RemoveFromFreeList(prev);
383 DeleteSpan(prev);
384 span->start -= len;
385 span->length += len;
386 pagemap_.set(span->start, span);
387 Event(span, 'L', len);
388 }
389 Span* next = GetDescriptor(p+n);
390 if (next != NULL && MayMergeSpans(span, next)) {
391 // Merge next span into this span
392 ASSERT(next->start == p+n);
393 const Length len = next->length;
394 if (aggressive_decommit_ && next->location == Span::ON_RETURNED_FREELIST) {
395 // See the comment below 'if (prev->location ...' for explanation.
396 temp_committed += next->length << kPageShift;
397 }
398 RemoveFromFreeList(next);
399 DeleteSpan(next);
400 span->length += len;
401 pagemap_.set(span->start + span->length - 1, span);
402 Event(span, 'R', len);
403 }
404
405 if (aggressive_decommit_) {
406 if (DecommitSpan(span)) {
407 span->location = Span::ON_RETURNED_FREELIST;
408 stats_.committed_bytes += temp_committed;
409 } else {
410 ASSERT(temp_committed == 0);
411 }
412 }
413 PrependToFreeList(span);
414 }
415
PrependToFreeList(Span * span)416 void PageHeap::PrependToFreeList(Span* span) {
417 ASSERT(span->location != Span::IN_USE);
418 SpanList* list = (span->length < kMaxPages) ? &free_[span->length] : &large_;
419 if (span->location == Span::ON_NORMAL_FREELIST) {
420 stats_.free_bytes += (span->length << kPageShift);
421 DLL_Prepend(&list->normal, span);
422 } else {
423 stats_.unmapped_bytes += (span->length << kPageShift);
424 DLL_Prepend(&list->returned, span);
425 }
426 }
427
RemoveFromFreeList(Span * span)428 void PageHeap::RemoveFromFreeList(Span* span) {
429 ASSERT(span->location != Span::IN_USE);
430 if (span->location == Span::ON_NORMAL_FREELIST) {
431 stats_.free_bytes -= (span->length << kPageShift);
432 } else {
433 stats_.unmapped_bytes -= (span->length << kPageShift);
434 }
435 DLL_Remove(span);
436 }
437
IncrementalScavenge(Length n)438 void PageHeap::IncrementalScavenge(Length n) {
439 // Fast path; not yet time to release memory
440 scavenge_counter_ -= n;
441 if (scavenge_counter_ >= 0) return; // Not yet time to scavenge
442
443 const double rate = FLAGS_tcmalloc_release_rate;
444 if (rate <= 1e-6) {
445 // Tiny release rate means that releasing is disabled.
446 scavenge_counter_ = kDefaultReleaseDelay;
447 return;
448 }
449
450 ++stats_.scavenge_count;
451
452 Length released_pages = ReleaseAtLeastNPages(1);
453
454 if (released_pages == 0) {
455 // Nothing to scavenge, delay for a while.
456 scavenge_counter_ = kDefaultReleaseDelay;
457 } else {
458 // Compute how long to wait until we return memory.
459 // FLAGS_tcmalloc_release_rate==1 means wait for 1000 pages
460 // after releasing one page.
461 const double mult = 1000.0 / rate;
462 double wait = mult * static_cast<double>(released_pages);
463 if (wait > kMaxReleaseDelay) {
464 // Avoid overflow and bound to reasonable range.
465 wait = kMaxReleaseDelay;
466 }
467 scavenge_counter_ = static_cast<int64_t>(wait);
468 }
469 }
470
ReleaseLastNormalSpan(SpanList * slist)471 Length PageHeap::ReleaseLastNormalSpan(SpanList* slist) {
472 Span* s = slist->normal.prev;
473 ASSERT(s->location == Span::ON_NORMAL_FREELIST);
474
475 if (DecommitSpan(s)) {
476 RemoveFromFreeList(s);
477 const Length n = s->length;
478 s->location = Span::ON_RETURNED_FREELIST;
479 MergeIntoFreeList(s); // Coalesces if possible.
480 return n;
481 }
482
483 return 0;
484 }
485
ReleaseAtLeastNPages(Length num_pages)486 Length PageHeap::ReleaseAtLeastNPages(Length num_pages) {
487 Length released_pages = 0;
488
489 // Round robin through the lists of free spans, releasing the last
490 // span in each list. Stop after releasing at least num_pages
491 // or when there is nothing more to release.
492 while (released_pages < num_pages && stats_.free_bytes > 0) {
493 for (int i = 0; i < kMaxPages+1 && released_pages < num_pages;
494 i++, release_index_++) {
495 if (release_index_ > kMaxPages) release_index_ = 0;
496 SpanList* slist = (release_index_ == kMaxPages) ?
497 &large_ : &free_[release_index_];
498 if (!DLL_IsEmpty(&slist->normal)) {
499 Length released_len = ReleaseLastNormalSpan(slist);
500 // Some systems do not support release
501 if (released_len == 0) return released_pages;
502 released_pages += released_len;
503 }
504 }
505 }
506 return released_pages;
507 }
508
EnsureLimit(Length n,bool withRelease)509 bool PageHeap::EnsureLimit(Length n, bool withRelease)
510 {
511 Length limit = (FLAGS_tcmalloc_heap_limit_mb*1024*1024) >> kPageShift;
512 if (limit == 0) return true; //there is no limit
513
514 // We do not use stats_.system_bytes because it does not take
515 // MetaDataAllocs into account.
516 Length takenPages = TCMalloc_SystemTaken >> kPageShift;
517 //XXX takenPages may be slightly bigger than limit for two reasons:
518 //* MetaDataAllocs ignore the limit (it is not easy to handle
519 // out of memory there)
520 //* sys_alloc may round allocation up to huge page size,
521 // although smaller limit was ensured
522
523 ASSERT(takenPages >= stats_.unmapped_bytes >> kPageShift);
524 takenPages -= stats_.unmapped_bytes >> kPageShift;
525
526 if (takenPages + n > limit && withRelease) {
527 takenPages -= ReleaseAtLeastNPages(takenPages + n - limit);
528 }
529
530 return takenPages + n <= limit;
531 }
532
RegisterSizeClass(Span * span,size_t sc)533 void PageHeap::RegisterSizeClass(Span* span, size_t sc) {
534 // Associate span object with all interior pages as well
535 ASSERT(span->location == Span::IN_USE);
536 ASSERT(GetDescriptor(span->start) == span);
537 ASSERT(GetDescriptor(span->start+span->length-1) == span);
538 Event(span, 'C', sc);
539 span->sizeclass = sc;
540 for (Length i = 1; i < span->length-1; i++) {
541 pagemap_.set(span->start+i, span);
542 }
543 }
544
GetSmallSpanStats(SmallSpanStats * result)545 void PageHeap::GetSmallSpanStats(SmallSpanStats* result) {
546 for (int s = 0; s < kMaxPages; s++) {
547 result->normal_length[s] = DLL_Length(&free_[s].normal);
548 result->returned_length[s] = DLL_Length(&free_[s].returned);
549 }
550 }
551
GetLargeSpanStats(LargeSpanStats * result)552 void PageHeap::GetLargeSpanStats(LargeSpanStats* result) {
553 result->spans = 0;
554 result->normal_pages = 0;
555 result->returned_pages = 0;
556 for (Span* s = large_.normal.next; s != &large_.normal; s = s->next) {
557 result->normal_pages += s->length;;
558 result->spans++;
559 }
560 for (Span* s = large_.returned.next; s != &large_.returned; s = s->next) {
561 result->returned_pages += s->length;
562 result->spans++;
563 }
564 }
565
GetNextRange(PageID start,base::MallocRange * r)566 bool PageHeap::GetNextRange(PageID start, base::MallocRange* r) {
567 Span* span = reinterpret_cast<Span*>(pagemap_.Next(start));
568 if (span == NULL) {
569 return false;
570 }
571 r->address = span->start << kPageShift;
572 r->length = span->length << kPageShift;
573 r->fraction = 0;
574 switch (span->location) {
575 case Span::IN_USE:
576 r->type = base::MallocRange::INUSE;
577 r->fraction = 1;
578 if (span->sizeclass > 0) {
579 // Only some of the objects in this span may be in use.
580 const size_t osize = Static::sizemap()->class_to_size(span->sizeclass);
581 r->fraction = (1.0 * osize * span->refcount) / r->length;
582 }
583 break;
584 case Span::ON_NORMAL_FREELIST:
585 r->type = base::MallocRange::FREE;
586 break;
587 case Span::ON_RETURNED_FREELIST:
588 r->type = base::MallocRange::UNMAPPED;
589 break;
590 default:
591 r->type = base::MallocRange::UNKNOWN;
592 break;
593 }
594 return true;
595 }
596
RecordGrowth(size_t growth)597 static void RecordGrowth(size_t growth) {
598 StackTrace* t = Static::stacktrace_allocator()->New();
599 t->depth = GetStackTrace(t->stack, kMaxStackDepth-1, 3);
600 t->size = growth;
601 t->stack[kMaxStackDepth-1] = reinterpret_cast<void*>(Static::growth_stacks());
602 Static::set_growth_stacks(t);
603 }
604
GrowHeap(Length n)605 bool PageHeap::GrowHeap(Length n) {
606 ASSERT(kMaxPages >= kMinSystemAlloc);
607 if (n > kMaxValidPages) return false;
608 Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc);
609 size_t actual_size;
610 void* ptr = NULL;
611 if (EnsureLimit(ask)) {
612 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
613 }
614 if (ptr == NULL) {
615 if (n < ask) {
616 // Try growing just "n" pages
617 ask = n;
618 if (EnsureLimit(ask)) {
619 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
620 }
621 }
622 if (ptr == NULL) return false;
623 }
624 ask = actual_size >> kPageShift;
625 RecordGrowth(ask << kPageShift);
626
627 ++stats_.reserve_count;
628 ++stats_.commit_count;
629
630 uint64_t old_system_bytes = stats_.system_bytes;
631 stats_.system_bytes += (ask << kPageShift);
632 stats_.committed_bytes += (ask << kPageShift);
633
634 stats_.total_commit_bytes += (ask << kPageShift);
635 stats_.total_reserve_bytes += (ask << kPageShift);
636
637 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
638 ASSERT(p > 0);
639
640 // If we have already a lot of pages allocated, just pre allocate a bunch of
641 // memory for the page map. This prevents fragmentation by pagemap metadata
642 // when a program keeps allocating and freeing large blocks.
643
644 if (old_system_bytes < kPageMapBigAllocationThreshold
645 && stats_.system_bytes >= kPageMapBigAllocationThreshold) {
646 pagemap_.PreallocateMoreMemory();
647 }
648
649 // Make sure pagemap_ has entries for all of the new pages.
650 // Plus ensure one before and one after so coalescing code
651 // does not need bounds-checking.
652 if (pagemap_.Ensure(p-1, ask+2)) {
653 // Pretend the new area is allocated and then Delete() it to cause
654 // any necessary coalescing to occur.
655 Span* span = NewSpan(p, ask);
656 RecordSpan(span);
657 Delete(span);
658 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
659 ASSERT(Check());
660 return true;
661 } else {
662 // We could not allocate memory within "pagemap_"
663 // TODO: Once we can return memory to the system, return the new span
664 return false;
665 }
666 }
667
Check()668 bool PageHeap::Check() {
669 ASSERT(free_[0].normal.next == &free_[0].normal);
670 ASSERT(free_[0].returned.next == &free_[0].returned);
671 return true;
672 }
673
CheckExpensive()674 bool PageHeap::CheckExpensive() {
675 bool result = Check();
676 CheckList(&large_.normal, kMaxPages, 1000000000, Span::ON_NORMAL_FREELIST);
677 CheckList(&large_.returned, kMaxPages, 1000000000, Span::ON_RETURNED_FREELIST);
678 for (Length s = 1; s < kMaxPages; s++) {
679 CheckList(&free_[s].normal, s, s, Span::ON_NORMAL_FREELIST);
680 CheckList(&free_[s].returned, s, s, Span::ON_RETURNED_FREELIST);
681 }
682 return result;
683 }
684
CheckList(Span * list,Length min_pages,Length max_pages,int freelist)685 bool PageHeap::CheckList(Span* list, Length min_pages, Length max_pages,
686 int freelist) {
687 for (Span* s = list->next; s != list; s = s->next) {
688 CHECK_CONDITION(s->location == freelist); // NORMAL or RETURNED
689 CHECK_CONDITION(s->length >= min_pages);
690 CHECK_CONDITION(s->length <= max_pages);
691 CHECK_CONDITION(GetDescriptor(s->start) == s);
692 CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s);
693 }
694 return true;
695 }
696
697 } // namespace tcmalloc
698