1// Copyright 2019 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7import (
8	"runtime/internal/sys"
9	"unsafe"
10)
11
12const pageCachePages = 8 * unsafe.Sizeof(pageCache{}.cache)
13
14// pageCache represents a per-p cache of pages the allocator can
15// allocate from without a lock. More specifically, it represents
16// a pageCachePages*pageSize chunk of memory with 0 or more free
17// pages in it.
18type pageCache struct {
19	base  uintptr // base address of the chunk
20	cache uint64  // 64-bit bitmap representing free pages (1 means free)
21	scav  uint64  // 64-bit bitmap representing scavenged pages (1 means scavenged)
22}
23
24// empty returns true if the pageCache has any free pages, and false
25// otherwise.
26func (c *pageCache) empty() bool {
27	return c.cache == 0
28}
29
30// alloc allocates npages from the page cache and is the main entry
31// point for allocation.
32//
33// Returns a base address and the amount of scavenged memory in the
34// allocated region in bytes.
35//
36// Returns a base address of zero on failure, in which case the
37// amount of scavenged memory should be ignored.
38func (c *pageCache) alloc(npages uintptr) (uintptr, uintptr) {
39	if c.cache == 0 {
40		return 0, 0
41	}
42	if npages == 1 {
43		i := uintptr(sys.TrailingZeros64(c.cache))
44		scav := (c.scav >> i) & 1
45		c.cache &^= 1 << i // set bit to mark in-use
46		c.scav &^= 1 << i  // clear bit to mark unscavenged
47		return c.base + i*pageSize, uintptr(scav) * pageSize
48	}
49	return c.allocN(npages)
50}
51
52// allocN is a helper which attempts to allocate npages worth of pages
53// from the cache. It represents the general case for allocating from
54// the page cache.
55//
56// Returns a base address and the amount of scavenged memory in the
57// allocated region in bytes.
58func (c *pageCache) allocN(npages uintptr) (uintptr, uintptr) {
59	i := findBitRange64(c.cache, uint(npages))
60	if i >= 64 {
61		return 0, 0
62	}
63	mask := ((uint64(1) << npages) - 1) << i
64	scav := sys.OnesCount64(c.scav & mask)
65	c.cache &^= mask // mark in-use bits
66	c.scav &^= mask  // clear scavenged bits
67	return c.base + uintptr(i*pageSize), uintptr(scav) * pageSize
68}
69
70// flush empties out unallocated free pages in the given cache
71// into s. Then, it clears the cache, such that empty returns
72// true.
73//
74// s.mheapLock must be held or the world must be stopped.
75func (c *pageCache) flush(s *pageAlloc) {
76	if c.empty() {
77		return
78	}
79	ci := chunkIndex(c.base)
80	pi := chunkPageIndex(c.base)
81
82	// This method is called very infrequently, so just do the
83	// slower, safer thing by iterating over each bit individually.
84	for i := uint(0); i < 64; i++ {
85		if c.cache&(1<<i) != 0 {
86			s.chunkOf(ci).free1(pi + i)
87		}
88		if c.scav&(1<<i) != 0 {
89			s.chunkOf(ci).scavenged.setRange(pi+i, 1)
90		}
91	}
92	// Since this is a lot like a free, we need to make sure
93	// we update the searchAddr just like free does.
94	if s.compareSearchAddrTo(c.base) < 0 {
95		s.searchAddr = c.base
96	}
97	s.update(c.base, pageCachePages, false, false)
98	*c = pageCache{}
99}
100
101// allocToCache acquires a pageCachePages-aligned chunk of free pages which
102// may not be contiguous, and returns a pageCache structure which owns the
103// chunk.
104//
105// s.mheapLock must be held.
106func (s *pageAlloc) allocToCache() pageCache {
107	// If the searchAddr refers to a region which has a higher address than
108	// any known chunk, then we know we're out of memory.
109	if chunkIndex(s.searchAddr) >= s.end {
110		return pageCache{}
111	}
112	c := pageCache{}
113	ci := chunkIndex(s.searchAddr) // chunk index
114	if s.summary[len(s.summary)-1][ci] != 0 {
115		// Fast path: there's free pages at or near the searchAddr address.
116		chunk := s.chunkOf(ci)
117		j, _ := chunk.find(1, chunkPageIndex(s.searchAddr))
118		if j < 0 {
119			throw("bad summary data")
120		}
121		c = pageCache{
122			base:  chunkBase(ci) + alignDown(uintptr(j), 64)*pageSize,
123			cache: ^chunk.pages64(j),
124			scav:  chunk.scavenged.block64(j),
125		}
126	} else {
127		// Slow path: the searchAddr address had nothing there, so go find
128		// the first free page the slow way.
129		addr, _ := s.find(1)
130		if addr == 0 {
131			// We failed to find adequate free space, so mark the searchAddr as OoM
132			// and return an empty pageCache.
133			s.searchAddr = maxSearchAddr
134			return pageCache{}
135		}
136		ci := chunkIndex(addr)
137		chunk := s.chunkOf(ci)
138		c = pageCache{
139			base:  alignDown(addr, 64*pageSize),
140			cache: ^chunk.pages64(chunkPageIndex(addr)),
141			scav:  chunk.scavenged.block64(chunkPageIndex(addr)),
142		}
143	}
144
145	// Set the bits as allocated and clear the scavenged bits.
146	s.allocRange(c.base, pageCachePages)
147
148	// Update as an allocation, but note that it's not contiguous.
149	s.update(c.base, pageCachePages, false, true)
150
151	// Set the search address to the last page represented by the cache.
152	// Since all of the pages in this block are going to the cache, and we
153	// searched for the first free page, we can confidently start at the
154	// next page.
155	//
156	// However, s.searchAddr is not allowed to point into unmapped heap memory
157	// unless it is maxSearchAddr, so make it the last page as opposed to
158	// the page after.
159	s.searchAddr = c.base + pageSize*(pageCachePages-1)
160	return c
161}
162