1// Copyright 2019 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Page allocator.
6//
7// The page allocator manages mapped pages (defined by pageSize, NOT
8// physPageSize) for allocation and re-use. It is embedded into mheap.
9//
10// Pages are managed using a bitmap that is sharded into chunks.
11// In the bitmap, 1 means in-use, and 0 means free. The bitmap spans the
12// process's address space. Chunks are managed in a sparse-array-style structure
13// similar to mheap.arenas, since the bitmap may be large on some systems.
14//
15// The bitmap is efficiently searched by using a radix tree in combination
16// with fast bit-wise intrinsics. Allocation is performed using an address-ordered
17// first-fit approach.
18//
19// Each entry in the radix tree is a summary that describes three properties of
20// a particular region of the address space: the number of contiguous free pages
21// at the start and end of the region it represents, and the maximum number of
22// contiguous free pages found anywhere in that region.
23//
24// Each level of the radix tree is stored as one contiguous array, which represents
25// a different granularity of subdivision of the processes' address space. Thus, this
26// radix tree is actually implicit in these large arrays, as opposed to having explicit
27// dynamically-allocated pointer-based node structures. Naturally, these arrays may be
28// quite large for system with large address spaces, so in these cases they are mapped
29// into memory as needed. The leaf summaries of the tree correspond to a bitmap chunk.
30//
31// The root level (referred to as L0 and index 0 in pageAlloc.summary) has each
32// summary represent the largest section of address space (16 GiB on 64-bit systems),
33// with each subsequent level representing successively smaller subsections until we
34// reach the finest granularity at the leaves, a chunk.
35//
36// More specifically, each summary in each level (except for leaf summaries)
37// represents some number of entries in the following level. For example, each
38// summary in the root level may represent a 16 GiB region of address space,
39// and in the next level there could be 8 corresponding entries which represent 2
40// GiB subsections of that 16 GiB region, each of which could correspond to 8
41// entries in the next level which each represent 256 MiB regions, and so on.
42//
43// Thus, this design only scales to heaps so large, but can always be extended to
44// larger heaps by simply adding levels to the radix tree, which mostly costs
45// additional virtual address space. The choice of managing large arrays also means
46// that a large amount of virtual address space may be reserved by the runtime.
47
48package runtime
49
50import (
51	"runtime/internal/atomic"
52	"unsafe"
53)
54
55const (
56	// The size of a bitmap chunk, i.e. the amount of bits (that is, pages) to consider
57	// in the bitmap at once.
58	pallocChunkPages    = 1 << logPallocChunkPages
59	pallocChunkBytes    = pallocChunkPages * pageSize
60	logPallocChunkPages = 9
61	logPallocChunkBytes = logPallocChunkPages + pageShift
62
63	// The number of radix bits for each level.
64	//
65	// The value of 3 is chosen such that the block of summaries we need to scan at
66	// each level fits in 64 bytes (2^3 summaries * 8 bytes per summary), which is
67	// close to the L1 cache line width on many systems. Also, a value of 3 fits 4 tree
68	// levels perfectly into the 21-bit pallocBits summary field at the root level.
69	//
70	// The following equation explains how each of the constants relate:
71	// summaryL0Bits + (summaryLevels-1)*summaryLevelBits + logPallocChunkBytes = heapAddrBits
72	//
73	// summaryLevels is an architecture-dependent value defined in mpagealloc_*.go.
74	summaryLevelBits = 3
75	summaryL0Bits    = heapAddrBits - logPallocChunkBytes - (summaryLevels-1)*summaryLevelBits
76
77	// pallocChunksL2Bits is the number of bits of the chunk index number
78	// covered by the second level of the chunks map.
79	//
80	// See (*pageAlloc).chunks for more details. Update the documentation
81	// there should this change.
82	pallocChunksL2Bits  = heapAddrBits - logPallocChunkBytes - pallocChunksL1Bits
83	pallocChunksL1Shift = pallocChunksL2Bits
84
85	// Maximum searchAddr value, which indicates that the heap has no free space.
86	//
87	// We subtract arenaBaseOffset because we want this to represent the maximum
88	// value in the shifted address space, but searchAddr is stored as a regular
89	// memory address. See arenaBaseOffset for details.
90	maxSearchAddr = ^uintptr(0) - arenaBaseOffset
91
92	// Minimum scavAddr value, which indicates that the scavenger is done.
93	//
94	// minScavAddr + arenaBaseOffset == 0
95	minScavAddr = (^arenaBaseOffset + 1) & uintptrMask
96)
97
98// Global chunk index.
99//
100// Represents an index into the leaf level of the radix tree.
101// Similar to arenaIndex, except instead of arenas, it divides the address
102// space into chunks.
103type chunkIdx uint
104
105// chunkIndex returns the global index of the palloc chunk containing the
106// pointer p.
107func chunkIndex(p uintptr) chunkIdx {
108	return chunkIdx((p + arenaBaseOffset) / pallocChunkBytes)
109}
110
111// chunkIndex returns the base address of the palloc chunk at index ci.
112func chunkBase(ci chunkIdx) uintptr {
113	return uintptr(ci)*pallocChunkBytes - arenaBaseOffset
114}
115
116// chunkPageIndex computes the index of the page that contains p,
117// relative to the chunk which contains p.
118func chunkPageIndex(p uintptr) uint {
119	return uint(p % pallocChunkBytes / pageSize)
120}
121
122// l1 returns the index into the first level of (*pageAlloc).chunks.
123func (i chunkIdx) l1() uint {
124	if pallocChunksL1Bits == 0 {
125		// Let the compiler optimize this away if there's no
126		// L1 map.
127		return 0
128	} else {
129		return uint(i) >> pallocChunksL1Shift
130	}
131}
132
133// l2 returns the index into the second level of (*pageAlloc).chunks.
134func (i chunkIdx) l2() uint {
135	if pallocChunksL1Bits == 0 {
136		return uint(i)
137	} else {
138		return uint(i) & (1<<pallocChunksL2Bits - 1)
139	}
140}
141
142// addrsToSummaryRange converts base and limit pointers into a range
143// of entries for the given summary level.
144//
145// The returned range is inclusive on the lower bound and exclusive on
146// the upper bound.
147func addrsToSummaryRange(level int, base, limit uintptr) (lo int, hi int) {
148	// This is slightly more nuanced than just a shift for the exclusive
149	// upper-bound. Note that the exclusive upper bound may be within a
150	// summary at this level, meaning if we just do the obvious computation
151	// hi will end up being an inclusive upper bound. Unfortunately, just
152	// adding 1 to that is too broad since we might be on the very edge of
153	// of a summary's max page count boundary for this level
154	// (1 << levelLogPages[level]). So, make limit an inclusive upper bound
155	// then shift, then add 1, so we get an exclusive upper bound at the end.
156	lo = int((base + arenaBaseOffset) >> levelShift[level])
157	hi = int(((limit-1)+arenaBaseOffset)>>levelShift[level]) + 1
158	return
159}
160
161// blockAlignSummaryRange aligns indices into the given level to that
162// level's block width (1 << levelBits[level]). It assumes lo is inclusive
163// and hi is exclusive, and so aligns them down and up respectively.
164func blockAlignSummaryRange(level int, lo, hi int) (int, int) {
165	e := uintptr(1) << levelBits[level]
166	return int(alignDown(uintptr(lo), e)), int(alignUp(uintptr(hi), e))
167}
168
169type pageAlloc struct {
170	// Radix tree of summaries.
171	//
172	// Each slice's cap represents the whole memory reservation.
173	// Each slice's len reflects the allocator's maximum known
174	// mapped heap address for that level.
175	//
176	// The backing store of each summary level is reserved in init
177	// and may or may not be committed in grow (small address spaces
178	// may commit all the memory in init).
179	//
180	// The purpose of keeping len <= cap is to enforce bounds checks
181	// on the top end of the slice so that instead of an unknown
182	// runtime segmentation fault, we get a much friendlier out-of-bounds
183	// error.
184	//
185	// To iterate over a summary level, use inUse to determine which ranges
186	// are currently available. Otherwise one might try to access
187	// memory which is only Reserved which may result in a hard fault.
188	//
189	// We may still get segmentation faults < len since some of that
190	// memory may not be committed yet.
191	summary [summaryLevels][]pallocSum
192
193	// chunks is a slice of bitmap chunks.
194	//
195	// The total size of chunks is quite large on most 64-bit platforms
196	// (O(GiB) or more) if flattened, so rather than making one large mapping
197	// (which has problems on some platforms, even when PROT_NONE) we use a
198	// two-level sparse array approach similar to the arena index in mheap.
199	//
200	// To find the chunk containing a memory address `a`, do:
201	//   chunkOf(chunkIndex(a))
202	//
203	// Below is a table describing the configuration for chunks for various
204	// heapAddrBits supported by the runtime.
205	//
206	// heapAddrBits | L1 Bits | L2 Bits | L2 Entry Size
207	// ------------------------------------------------
208	// 32           | 0       | 10      | 128 KiB
209	// 33 (iOS)     | 0       | 11      | 256 KiB
210	// 48           | 13      | 13      | 1 MiB
211	//
212	// There's no reason to use the L1 part of chunks on 32-bit, the
213	// address space is small so the L2 is small. For platforms with a
214	// 48-bit address space, we pick the L1 such that the L2 is 1 MiB
215	// in size, which is a good balance between low granularity without
216	// making the impact on BSS too high (note the L1 is stored directly
217	// in pageAlloc).
218	//
219	// To iterate over the bitmap, use inUse to determine which ranges
220	// are currently available. Otherwise one might iterate over unused
221	// ranges.
222	//
223	// TODO(mknyszek): Consider changing the definition of the bitmap
224	// such that 1 means free and 0 means in-use so that summaries and
225	// the bitmaps align better on zero-values.
226	chunks [1 << pallocChunksL1Bits]*[1 << pallocChunksL2Bits]pallocData
227
228	// The address to start an allocation search with. It must never
229	// point to any memory that is not contained in inUse, i.e.
230	// inUse.contains(searchAddr) must always be true.
231	//
232	// When added with arenaBaseOffset, we guarantee that
233	// all valid heap addresses (when also added with
234	// arenaBaseOffset) below this value are allocated and
235	// not worth searching.
236	//
237	// Note that adding in arenaBaseOffset transforms addresses
238	// to a new address space with a linear view of the full address
239	// space on architectures with segmented address spaces.
240	searchAddr uintptr
241
242	// The address to start a scavenge candidate search with. It
243	// need not point to memory contained in inUse.
244	scavAddr uintptr
245
246	// The amount of memory scavenged since the last scavtrace print.
247	//
248	// Read and updated atomically.
249	scavReleased uintptr
250
251	// start and end represent the chunk indices
252	// which pageAlloc knows about. It assumes
253	// chunks in the range [start, end) are
254	// currently ready to use.
255	start, end chunkIdx
256
257	// inUse is a slice of ranges of address space which are
258	// known by the page allocator to be currently in-use (passed
259	// to grow).
260	//
261	// This field is currently unused on 32-bit architectures but
262	// is harmless to track. We care much more about having a
263	// contiguous heap in these cases and take additional measures
264	// to ensure that, so in nearly all cases this should have just
265	// 1 element.
266	//
267	// All access is protected by the mheapLock.
268	inUse addrRanges
269
270	// mheap_.lock. This level of indirection makes it possible
271	// to test pageAlloc indepedently of the runtime allocator.
272	mheapLock *mutex
273
274	// sysStat is the runtime memstat to update when new system
275	// memory is committed by the pageAlloc for allocation metadata.
276	sysStat *uint64
277
278	// Whether or not this struct is being used in tests.
279	test bool
280}
281
282func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) {
283	if levelLogPages[0] > logMaxPackedValue {
284		// We can't represent 1<<levelLogPages[0] pages, the maximum number
285		// of pages we need to represent at the root level, in a summary, which
286		// is a big problem. Throw.
287		print("runtime: root level max pages = ", 1<<levelLogPages[0], "\n")
288		print("runtime: summary max pages = ", maxPackedValue, "\n")
289		throw("root level max pages doesn't fit in summary")
290	}
291	s.sysStat = sysStat
292
293	// Initialize s.inUse.
294	s.inUse.init(sysStat)
295
296	// System-dependent initialization.
297	s.sysInit()
298
299	// Start with the searchAddr in a state indicating there's no free memory.
300	s.searchAddr = maxSearchAddr
301
302	// Start with the scavAddr in a state indicating there's nothing more to do.
303	s.scavAddr = minScavAddr
304
305	// Set the mheapLock.
306	s.mheapLock = mheapLock
307}
308
309// compareSearchAddrTo compares an address against s.searchAddr in a linearized
310// view of the address space on systems with discontinuous process address spaces.
311// This linearized view is the same one generated by chunkIndex and arenaIndex,
312// done by adding arenaBaseOffset.
313//
314// On systems without a discontinuous address space, it's just a normal comparison.
315//
316// Returns < 0 if addr is less than s.searchAddr in the linearized address space.
317// Returns > 0 if addr is greater than s.searchAddr in the linearized address space.
318// Returns 0 if addr and s.searchAddr are equal.
319func (s *pageAlloc) compareSearchAddrTo(addr uintptr) int {
320	// Compare with arenaBaseOffset added because it gives us a linear, contiguous view
321	// of the heap on architectures with signed address spaces.
322	lAddr := addr + arenaBaseOffset
323	lSearchAddr := s.searchAddr + arenaBaseOffset
324	if lAddr < lSearchAddr {
325		return -1
326	} else if lAddr > lSearchAddr {
327		return 1
328	}
329	return 0
330}
331
332// chunkOf returns the chunk at the given chunk index.
333func (s *pageAlloc) chunkOf(ci chunkIdx) *pallocData {
334	return &s.chunks[ci.l1()][ci.l2()]
335}
336
337// grow sets up the metadata for the address range [base, base+size).
338// It may allocate metadata, in which case *s.sysStat will be updated.
339//
340// s.mheapLock must be held.
341func (s *pageAlloc) grow(base, size uintptr) {
342	// Round up to chunks, since we can't deal with increments smaller
343	// than chunks. Also, sysGrow expects aligned values.
344	limit := alignUp(base+size, pallocChunkBytes)
345	base = alignDown(base, pallocChunkBytes)
346
347	// Grow the summary levels in a system-dependent manner.
348	// We just update a bunch of additional metadata here.
349	s.sysGrow(base, limit)
350
351	// Update s.start and s.end.
352	// If no growth happened yet, start == 0. This is generally
353	// safe since the zero page is unmapped.
354	firstGrowth := s.start == 0
355	start, end := chunkIndex(base), chunkIndex(limit)
356	if firstGrowth || start < s.start {
357		s.start = start
358	}
359	if end > s.end {
360		s.end = end
361	}
362	// Note that [base, limit) will never overlap with any existing
363	// range inUse because grow only ever adds never-used memory
364	// regions to the page allocator.
365	s.inUse.add(addrRange{base, limit})
366
367	// A grow operation is a lot like a free operation, so if our
368	// chunk ends up below the (linearized) s.searchAddr, update
369	// s.searchAddr to the new address, just like in free.
370	if s.compareSearchAddrTo(base) < 0 {
371		s.searchAddr = base
372	}
373
374	// Add entries into chunks, which is sparse, if needed. Then,
375	// initialize the bitmap.
376	//
377	// Newly-grown memory is always considered scavenged.
378	// Set all the bits in the scavenged bitmaps high.
379	for c := chunkIndex(base); c < chunkIndex(limit); c++ {
380		if s.chunks[c.l1()] == nil {
381			// Create the necessary l2 entry.
382			//
383			// Store it atomically to avoid races with readers which
384			// don't acquire the heap lock.
385			r := sysAlloc(unsafe.Sizeof(*s.chunks[0]), s.sysStat)
386			atomic.StorepNoWB(unsafe.Pointer(&s.chunks[c.l1()]), r)
387		}
388		s.chunkOf(c).scavenged.setRange(0, pallocChunkPages)
389	}
390
391	// Update summaries accordingly. The grow acts like a free, so
392	// we need to ensure this newly-free memory is visible in the
393	// summaries.
394	s.update(base, size/pageSize, true, false)
395}
396
397// update updates heap metadata. It must be called each time the bitmap
398// is updated.
399//
400// If contig is true, update does some optimizations assuming that there was
401// a contiguous allocation or free between addr and addr+npages. alloc indicates
402// whether the operation performed was an allocation or a free.
403//
404// s.mheapLock must be held.
405func (s *pageAlloc) update(base, npages uintptr, contig, alloc bool) {
406	// base, limit, start, and end are inclusive.
407	limit := base + npages*pageSize - 1
408	sc, ec := chunkIndex(base), chunkIndex(limit)
409
410	// Handle updating the lowest level first.
411	if sc == ec {
412		// Fast path: the allocation doesn't span more than one chunk,
413		// so update this one and if the summary didn't change, return.
414		x := s.summary[len(s.summary)-1][sc]
415		y := s.chunkOf(sc).summarize()
416		if x == y {
417			return
418		}
419		s.summary[len(s.summary)-1][sc] = y
420	} else if contig {
421		// Slow contiguous path: the allocation spans more than one chunk
422		// and at least one summary is guaranteed to change.
423		summary := s.summary[len(s.summary)-1]
424
425		// Update the summary for chunk sc.
426		summary[sc] = s.chunkOf(sc).summarize()
427
428		// Update the summaries for chunks in between, which are
429		// either totally allocated or freed.
430		whole := s.summary[len(s.summary)-1][sc+1 : ec]
431		if alloc {
432			// Should optimize into a memclr.
433			for i := range whole {
434				whole[i] = 0
435			}
436		} else {
437			for i := range whole {
438				whole[i] = freeChunkSum
439			}
440		}
441
442		// Update the summary for chunk ec.
443		summary[ec] = s.chunkOf(ec).summarize()
444	} else {
445		// Slow general path: the allocation spans more than one chunk
446		// and at least one summary is guaranteed to change.
447		//
448		// We can't assume a contiguous allocation happened, so walk over
449		// every chunk in the range and manually recompute the summary.
450		summary := s.summary[len(s.summary)-1]
451		for c := sc; c <= ec; c++ {
452			summary[c] = s.chunkOf(c).summarize()
453		}
454	}
455
456	// Walk up the radix tree and update the summaries appropriately.
457	changed := true
458	for l := len(s.summary) - 2; l >= 0 && changed; l-- {
459		// Update summaries at level l from summaries at level l+1.
460		changed = false
461
462		// "Constants" for the previous level which we
463		// need to compute the summary from that level.
464		logEntriesPerBlock := levelBits[l+1]
465		logMaxPages := levelLogPages[l+1]
466
467		// lo and hi describe all the parts of the level we need to look at.
468		lo, hi := addrsToSummaryRange(l, base, limit+1)
469
470		// Iterate over each block, updating the corresponding summary in the less-granular level.
471		for i := lo; i < hi; i++ {
472			children := s.summary[l+1][i<<logEntriesPerBlock : (i+1)<<logEntriesPerBlock]
473			sum := mergeSummaries(children, logMaxPages)
474			old := s.summary[l][i]
475			if old != sum {
476				changed = true
477				s.summary[l][i] = sum
478			}
479		}
480	}
481}
482
483// allocRange marks the range of memory [base, base+npages*pageSize) as
484// allocated. It also updates the summaries to reflect the newly-updated
485// bitmap.
486//
487// Returns the amount of scavenged memory in bytes present in the
488// allocated range.
489//
490// s.mheapLock must be held.
491func (s *pageAlloc) allocRange(base, npages uintptr) uintptr {
492	limit := base + npages*pageSize - 1
493	sc, ec := chunkIndex(base), chunkIndex(limit)
494	si, ei := chunkPageIndex(base), chunkPageIndex(limit)
495
496	scav := uint(0)
497	if sc == ec {
498		// The range doesn't cross any chunk boundaries.
499		chunk := s.chunkOf(sc)
500		scav += chunk.scavenged.popcntRange(si, ei+1-si)
501		chunk.allocRange(si, ei+1-si)
502	} else {
503		// The range crosses at least one chunk boundary.
504		chunk := s.chunkOf(sc)
505		scav += chunk.scavenged.popcntRange(si, pallocChunkPages-si)
506		chunk.allocRange(si, pallocChunkPages-si)
507		for c := sc + 1; c < ec; c++ {
508			chunk := s.chunkOf(c)
509			scav += chunk.scavenged.popcntRange(0, pallocChunkPages)
510			chunk.allocAll()
511		}
512		chunk = s.chunkOf(ec)
513		scav += chunk.scavenged.popcntRange(0, ei+1)
514		chunk.allocRange(0, ei+1)
515	}
516	s.update(base, npages, true, true)
517	return uintptr(scav) * pageSize
518}
519
520// find searches for the first (address-ordered) contiguous free region of
521// npages in size and returns a base address for that region.
522//
523// It uses s.searchAddr to prune its search and assumes that no palloc chunks
524// below chunkIndex(s.searchAddr) contain any free memory at all.
525//
526// find also computes and returns a candidate s.searchAddr, which may or
527// may not prune more of the address space than s.searchAddr already does.
528//
529// find represents the slow path and the full radix tree search.
530//
531// Returns a base address of 0 on failure, in which case the candidate
532// searchAddr returned is invalid and must be ignored.
533//
534// s.mheapLock must be held.
535func (s *pageAlloc) find(npages uintptr) (uintptr, uintptr) {
536	// Search algorithm.
537	//
538	// This algorithm walks each level l of the radix tree from the root level
539	// to the leaf level. It iterates over at most 1 << levelBits[l] of entries
540	// in a given level in the radix tree, and uses the summary information to
541	// find either:
542	//  1) That a given subtree contains a large enough contiguous region, at
543	//     which point it continues iterating on the next level, or
544	//  2) That there are enough contiguous boundary-crossing bits to satisfy
545	//     the allocation, at which point it knows exactly where to start
546	//     allocating from.
547	//
548	// i tracks the index into the current level l's structure for the
549	// contiguous 1 << levelBits[l] entries we're actually interested in.
550	//
551	// NOTE: Technically this search could allocate a region which crosses
552	// the arenaBaseOffset boundary, which when arenaBaseOffset != 0, is
553	// a discontinuity. However, the only way this could happen is if the
554	// page at the zero address is mapped, and this is impossible on
555	// every system we support where arenaBaseOffset != 0. So, the
556	// discontinuity is already encoded in the fact that the OS will never
557	// map the zero page for us, and this function doesn't try to handle
558	// this case in any way.
559
560	// i is the beginning of the block of entries we're searching at the
561	// current level.
562	i := 0
563
564	// firstFree is the region of address space that we are certain to
565	// find the first free page in the heap. base and bound are the inclusive
566	// bounds of this window, and both are addresses in the linearized, contiguous
567	// view of the address space (with arenaBaseOffset pre-added). At each level,
568	// this window is narrowed as we find the memory region containing the
569	// first free page of memory. To begin with, the range reflects the
570	// full process address space.
571	//
572	// firstFree is updated by calling foundFree each time free space in the
573	// heap is discovered.
574	//
575	// At the end of the search, base-arenaBaseOffset is the best new
576	// searchAddr we could deduce in this search.
577	firstFree := struct {
578		base, bound uintptr
579	}{
580		base:  0,
581		bound: (1<<heapAddrBits - 1),
582	}
583	// foundFree takes the given address range [addr, addr+size) and
584	// updates firstFree if it is a narrower range. The input range must
585	// either be fully contained within firstFree or not overlap with it
586	// at all.
587	//
588	// This way, we'll record the first summary we find with any free
589	// pages on the root level and narrow that down if we descend into
590	// that summary. But as soon as we need to iterate beyond that summary
591	// in a level to find a large enough range, we'll stop narrowing.
592	foundFree := func(addr, size uintptr) {
593		if firstFree.base <= addr && addr+size-1 <= firstFree.bound {
594			// This range fits within the current firstFree window, so narrow
595			// down the firstFree window to the base and bound of this range.
596			firstFree.base = addr
597			firstFree.bound = addr + size - 1
598		} else if !(addr+size-1 < firstFree.base || addr > firstFree.bound) {
599			// This range only partially overlaps with the firstFree range,
600			// so throw.
601			print("runtime: addr = ", hex(addr), ", size = ", size, "\n")
602			print("runtime: base = ", hex(firstFree.base), ", bound = ", hex(firstFree.bound), "\n")
603			throw("range partially overlaps")
604		}
605	}
606
607	// lastSum is the summary which we saw on the previous level that made us
608	// move on to the next level. Used to print additional information in the
609	// case of a catastrophic failure.
610	// lastSumIdx is that summary's index in the previous level.
611	lastSum := packPallocSum(0, 0, 0)
612	lastSumIdx := -1
613
614nextLevel:
615	for l := 0; l < len(s.summary); l++ {
616		// For the root level, entriesPerBlock is the whole level.
617		entriesPerBlock := 1 << levelBits[l]
618		logMaxPages := levelLogPages[l]
619
620		// We've moved into a new level, so let's update i to our new
621		// starting index. This is a no-op for level 0.
622		i <<= levelBits[l]
623
624		// Slice out the block of entries we care about.
625		entries := s.summary[l][i : i+entriesPerBlock]
626
627		// Determine j0, the first index we should start iterating from.
628		// The searchAddr may help us eliminate iterations if we followed the
629		// searchAddr on the previous level or we're on the root leve, in which
630		// case the searchAddr should be the same as i after levelShift.
631		j0 := 0
632		if searchIdx := int((s.searchAddr + arenaBaseOffset) >> levelShift[l]); searchIdx&^(entriesPerBlock-1) == i {
633			j0 = searchIdx & (entriesPerBlock - 1)
634		}
635
636		// Run over the level entries looking for
637		// a contiguous run of at least npages either
638		// within an entry or across entries.
639		//
640		// base contains the page index (relative to
641		// the first entry's first page) of the currently
642		// considered run of consecutive pages.
643		//
644		// size contains the size of the currently considered
645		// run of consecutive pages.
646		var base, size uint
647		for j := j0; j < len(entries); j++ {
648			sum := entries[j]
649			if sum == 0 {
650				// A full entry means we broke any streak and
651				// that we should skip it altogether.
652				size = 0
653				continue
654			}
655
656			// We've encountered a non-zero summary which means
657			// free memory, so update firstFree.
658			foundFree(uintptr((i+j)<<levelShift[l]), (uintptr(1)<<logMaxPages)*pageSize)
659
660			s := sum.start()
661			if size+s >= uint(npages) {
662				// If size == 0 we don't have a run yet,
663				// which means base isn't valid. So, set
664				// base to the first page in this block.
665				if size == 0 {
666					base = uint(j) << logMaxPages
667				}
668				// We hit npages; we're done!
669				size += s
670				break
671			}
672			if sum.max() >= uint(npages) {
673				// The entry itself contains npages contiguous
674				// free pages, so continue on the next level
675				// to find that run.
676				i += j
677				lastSumIdx = i
678				lastSum = sum
679				continue nextLevel
680			}
681			if size == 0 || s < 1<<logMaxPages {
682				// We either don't have a current run started, or this entry
683				// isn't totally free (meaning we can't continue the current
684				// one), so try to begin a new run by setting size and base
685				// based on sum.end.
686				size = sum.end()
687				base = uint(j+1)<<logMaxPages - size
688				continue
689			}
690			// The entry is completely free, so continue the run.
691			size += 1 << logMaxPages
692		}
693		if size >= uint(npages) {
694			// We found a sufficiently large run of free pages straddling
695			// some boundary, so compute the address and return it.
696			addr := uintptr(i<<levelShift[l]) - arenaBaseOffset + uintptr(base)*pageSize
697			return addr, firstFree.base - arenaBaseOffset
698		}
699		if l == 0 {
700			// We're at level zero, so that means we've exhausted our search.
701			return 0, maxSearchAddr
702		}
703
704		// We're not at level zero, and we exhausted the level we were looking in.
705		// This means that either our calculations were wrong or the level above
706		// lied to us. In either case, dump some useful state and throw.
707		print("runtime: summary[", l-1, "][", lastSumIdx, "] = ", lastSum.start(), ", ", lastSum.max(), ", ", lastSum.end(), "\n")
708		print("runtime: level = ", l, ", npages = ", npages, ", j0 = ", j0, "\n")
709		print("runtime: s.searchAddr = ", hex(s.searchAddr), ", i = ", i, "\n")
710		print("runtime: levelShift[level] = ", levelShift[l], ", levelBits[level] = ", levelBits[l], "\n")
711		for j := 0; j < len(entries); j++ {
712			sum := entries[j]
713			print("runtime: summary[", l, "][", i+j, "] = (", sum.start(), ", ", sum.max(), ", ", sum.end(), ")\n")
714		}
715		throw("bad summary data")
716	}
717
718	// Since we've gotten to this point, that means we haven't found a
719	// sufficiently-sized free region straddling some boundary (chunk or larger).
720	// This means the last summary we inspected must have had a large enough "max"
721	// value, so look inside the chunk to find a suitable run.
722	//
723	// After iterating over all levels, i must contain a chunk index which
724	// is what the final level represents.
725	ci := chunkIdx(i)
726	j, searchIdx := s.chunkOf(ci).find(npages, 0)
727	if j < 0 {
728		// We couldn't find any space in this chunk despite the summaries telling
729		// us it should be there. There's likely a bug, so dump some state and throw.
730		sum := s.summary[len(s.summary)-1][i]
731		print("runtime: summary[", len(s.summary)-1, "][", i, "] = (", sum.start(), ", ", sum.max(), ", ", sum.end(), ")\n")
732		print("runtime: npages = ", npages, "\n")
733		throw("bad summary data")
734	}
735
736	// Compute the address at which the free space starts.
737	addr := chunkBase(ci) + uintptr(j)*pageSize
738
739	// Since we actually searched the chunk, we may have
740	// found an even narrower free window.
741	searchAddr := chunkBase(ci) + uintptr(searchIdx)*pageSize
742	foundFree(searchAddr+arenaBaseOffset, chunkBase(ci+1)-searchAddr)
743	return addr, firstFree.base - arenaBaseOffset
744}
745
746// alloc allocates npages worth of memory from the page heap, returning the base
747// address for the allocation and the amount of scavenged memory in bytes
748// contained in the region [base address, base address + npages*pageSize).
749//
750// Returns a 0 base address on failure, in which case other returned values
751// should be ignored.
752//
753// s.mheapLock must be held.
754func (s *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) {
755	// If the searchAddr refers to a region which has a higher address than
756	// any known chunk, then we know we're out of memory.
757	if chunkIndex(s.searchAddr) >= s.end {
758		return 0, 0
759	}
760
761	// If npages has a chance of fitting in the chunk where the searchAddr is,
762	// search it directly.
763	searchAddr := uintptr(0)
764	if pallocChunkPages-chunkPageIndex(s.searchAddr) >= uint(npages) {
765		// npages is guaranteed to be no greater than pallocChunkPages here.
766		i := chunkIndex(s.searchAddr)
767		if max := s.summary[len(s.summary)-1][i].max(); max >= uint(npages) {
768			j, searchIdx := s.chunkOf(i).find(npages, chunkPageIndex(s.searchAddr))
769			if j < 0 {
770				print("runtime: max = ", max, ", npages = ", npages, "\n")
771				print("runtime: searchIdx = ", chunkPageIndex(s.searchAddr), ", s.searchAddr = ", hex(s.searchAddr), "\n")
772				throw("bad summary data")
773			}
774			addr = chunkBase(i) + uintptr(j)*pageSize
775			searchAddr = chunkBase(i) + uintptr(searchIdx)*pageSize
776			goto Found
777		}
778	}
779	// We failed to use a searchAddr for one reason or another, so try
780	// the slow path.
781	addr, searchAddr = s.find(npages)
782	if addr == 0 {
783		if npages == 1 {
784			// We failed to find a single free page, the smallest unit
785			// of allocation. This means we know the heap is completely
786			// exhausted. Otherwise, the heap still might have free
787			// space in it, just not enough contiguous space to
788			// accommodate npages.
789			s.searchAddr = maxSearchAddr
790		}
791		return 0, 0
792	}
793Found:
794	// Go ahead and actually mark the bits now that we have an address.
795	scav = s.allocRange(addr, npages)
796
797	// If we found a higher (linearized) searchAddr, we know that all the
798	// heap memory before that searchAddr in a linear address space is
799	// allocated, so bump s.searchAddr up to the new one.
800	if s.compareSearchAddrTo(searchAddr) > 0 {
801		s.searchAddr = searchAddr
802	}
803	return addr, scav
804}
805
806// free returns npages worth of memory starting at base back to the page heap.
807//
808// s.mheapLock must be held.
809func (s *pageAlloc) free(base, npages uintptr) {
810	// If we're freeing pages below the (linearized) s.searchAddr, update searchAddr.
811	if s.compareSearchAddrTo(base) < 0 {
812		s.searchAddr = base
813	}
814	if npages == 1 {
815		// Fast path: we're clearing a single bit, and we know exactly
816		// where it is, so mark it directly.
817		i := chunkIndex(base)
818		s.chunkOf(i).free1(chunkPageIndex(base))
819	} else {
820		// Slow path: we're clearing more bits so we may need to iterate.
821		limit := base + npages*pageSize - 1
822		sc, ec := chunkIndex(base), chunkIndex(limit)
823		si, ei := chunkPageIndex(base), chunkPageIndex(limit)
824
825		if sc == ec {
826			// The range doesn't cross any chunk boundaries.
827			s.chunkOf(sc).free(si, ei+1-si)
828		} else {
829			// The range crosses at least one chunk boundary.
830			s.chunkOf(sc).free(si, pallocChunkPages-si)
831			for c := sc + 1; c < ec; c++ {
832				s.chunkOf(c).freeAll()
833			}
834			s.chunkOf(ec).free(0, ei+1)
835		}
836	}
837	s.update(base, npages, true, false)
838}
839
840const (
841	pallocSumBytes = unsafe.Sizeof(pallocSum(0))
842
843	// maxPackedValue is the maximum value that any of the three fields in
844	// the pallocSum may take on.
845	maxPackedValue    = 1 << logMaxPackedValue
846	logMaxPackedValue = logPallocChunkPages + (summaryLevels-1)*summaryLevelBits
847
848	freeChunkSum = pallocSum(uint64(pallocChunkPages) |
849		uint64(pallocChunkPages<<logMaxPackedValue) |
850		uint64(pallocChunkPages<<(2*logMaxPackedValue)))
851)
852
853// pallocSum is a packed summary type which packs three numbers: start, max,
854// and end into a single 8-byte value. Each of these values are a summary of
855// a bitmap and are thus counts, each of which may have a maximum value of
856// 2^21 - 1, or all three may be equal to 2^21. The latter case is represented
857// by just setting the 64th bit.
858type pallocSum uint64
859
860// packPallocSum takes a start, max, and end value and produces a pallocSum.
861func packPallocSum(start, max, end uint) pallocSum {
862	if max == maxPackedValue {
863		return pallocSum(uint64(1 << 63))
864	}
865	return pallocSum((uint64(start) & (maxPackedValue - 1)) |
866		((uint64(max) & (maxPackedValue - 1)) << logMaxPackedValue) |
867		((uint64(end) & (maxPackedValue - 1)) << (2 * logMaxPackedValue)))
868}
869
870// start extracts the start value from a packed sum.
871func (p pallocSum) start() uint {
872	if uint64(p)&uint64(1<<63) != 0 {
873		return maxPackedValue
874	}
875	return uint(uint64(p) & (maxPackedValue - 1))
876}
877
878// max extracts the max value from a packed sum.
879func (p pallocSum) max() uint {
880	if uint64(p)&uint64(1<<63) != 0 {
881		return maxPackedValue
882	}
883	return uint((uint64(p) >> logMaxPackedValue) & (maxPackedValue - 1))
884}
885
886// end extracts the end value from a packed sum.
887func (p pallocSum) end() uint {
888	if uint64(p)&uint64(1<<63) != 0 {
889		return maxPackedValue
890	}
891	return uint((uint64(p) >> (2 * logMaxPackedValue)) & (maxPackedValue - 1))
892}
893
894// unpack unpacks all three values from the summary.
895func (p pallocSum) unpack() (uint, uint, uint) {
896	if uint64(p)&uint64(1<<63) != 0 {
897		return maxPackedValue, maxPackedValue, maxPackedValue
898	}
899	return uint(uint64(p) & (maxPackedValue - 1)),
900		uint((uint64(p) >> logMaxPackedValue) & (maxPackedValue - 1)),
901		uint((uint64(p) >> (2 * logMaxPackedValue)) & (maxPackedValue - 1))
902}
903
904// mergeSummaries merges consecutive summaries which may each represent at
905// most 1 << logMaxPagesPerSum pages each together into one.
906func mergeSummaries(sums []pallocSum, logMaxPagesPerSum uint) pallocSum {
907	// Merge the summaries in sums into one.
908	//
909	// We do this by keeping a running summary representing the merged
910	// summaries of sums[:i] in start, max, and end.
911	start, max, end := sums[0].unpack()
912	for i := 1; i < len(sums); i++ {
913		// Merge in sums[i].
914		si, mi, ei := sums[i].unpack()
915
916		// Merge in sums[i].start only if the running summary is
917		// completely free, otherwise this summary's start
918		// plays no role in the combined sum.
919		if start == uint(i)<<logMaxPagesPerSum {
920			start += si
921		}
922
923		// Recompute the max value of the running sum by looking
924		// across the boundary between the running sum and sums[i]
925		// and at the max sums[i], taking the greatest of those two
926		// and the max of the running sum.
927		if end+si > max {
928			max = end + si
929		}
930		if mi > max {
931			max = mi
932		}
933
934		// Merge in end by checking if this new summary is totally
935		// free. If it is, then we want to extend the running sum's
936		// end by the new summary. If not, then we have some alloc'd
937		// pages in there and we just want to take the end value in
938		// sums[i].
939		if ei == 1<<logMaxPagesPerSum {
940			end += 1 << logMaxPagesPerSum
941		} else {
942			end = ei
943		}
944	}
945	return packPallocSum(start, max, end)
946}
947