1// Copyright 2016 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7import (
8	"internal/cpu"
9	"runtime/internal/atomic"
10	"runtime/internal/sys"
11	"unsafe"
12)
13
14// A gcSweepBuf is a set of *mspans.
15//
16// gcSweepBuf is safe for concurrent push operations *or* concurrent
17// pop operations, but not both simultaneously.
18type gcSweepBuf struct {
19	// A gcSweepBuf is a two-level data structure consisting of a
20	// growable spine that points to fixed-sized blocks. The spine
21	// can be accessed without locks, but adding a block or
22	// growing it requires taking the spine lock.
23	//
24	// Because each mspan covers at least 8K of heap and takes at
25	// most 8 bytes in the gcSweepBuf, the growth of the spine is
26	// quite limited.
27	//
28	// The spine and all blocks are allocated off-heap, which
29	// allows this to be used in the memory manager and avoids the
30	// need for write barriers on all of these. We never release
31	// this memory because there could be concurrent lock-free
32	// access and we're likely to reuse it anyway. (In principle,
33	// we could do this during STW.)
34
35	spineLock mutex
36	spine     unsafe.Pointer // *[N]*gcSweepBlock, accessed atomically
37	spineLen  uintptr        // Spine array length, accessed atomically
38	spineCap  uintptr        // Spine array cap, accessed under lock
39
40	// index is the first unused slot in the logical concatenation
41	// of all blocks. It is accessed atomically.
42	index uint32
43}
44
45const (
46	gcSweepBlockEntries    = 512 // 4KB on 64-bit
47	gcSweepBufInitSpineCap = 256 // Enough for 1GB heap on 64-bit
48)
49
50type gcSweepBlock struct {
51	spans [gcSweepBlockEntries]*mspan
52}
53
54// push adds span s to buffer b. push is safe to call concurrently
55// with other push operations, but NOT to call concurrently with pop.
56func (b *gcSweepBuf) push(s *mspan) {
57	// Obtain our slot.
58	cursor := uintptr(atomic.Xadd(&b.index, +1) - 1)
59	top, bottom := cursor/gcSweepBlockEntries, cursor%gcSweepBlockEntries
60
61	// Do we need to add a block?
62	spineLen := atomic.Loaduintptr(&b.spineLen)
63	var block *gcSweepBlock
64retry:
65	if top < spineLen {
66		spine := atomic.Loadp(unsafe.Pointer(&b.spine))
67		blockp := add(spine, sys.PtrSize*top)
68		block = (*gcSweepBlock)(atomic.Loadp(blockp))
69	} else {
70		// Add a new block to the spine, potentially growing
71		// the spine.
72		lock(&b.spineLock)
73		// spineLen cannot change until we release the lock,
74		// but may have changed while we were waiting.
75		spineLen = atomic.Loaduintptr(&b.spineLen)
76		if top < spineLen {
77			unlock(&b.spineLock)
78			goto retry
79		}
80
81		if spineLen == b.spineCap {
82			// Grow the spine.
83			newCap := b.spineCap * 2
84			if newCap == 0 {
85				newCap = gcSweepBufInitSpineCap
86			}
87			newSpine := persistentalloc(newCap*sys.PtrSize, cpu.CacheLineSize, &memstats.gc_sys)
88			if b.spineCap != 0 {
89				// Blocks are allocated off-heap, so
90				// no write barriers.
91				memmove(newSpine, b.spine, b.spineCap*sys.PtrSize)
92			}
93			// Spine is allocated off-heap, so no write barrier.
94			atomic.StorepNoWB(unsafe.Pointer(&b.spine), newSpine)
95			b.spineCap = newCap
96			// We can't immediately free the old spine
97			// since a concurrent push with a lower index
98			// could still be reading from it. We let it
99			// leak because even a 1TB heap would waste
100			// less than 2MB of memory on old spines. If
101			// this is a problem, we could free old spines
102			// during STW.
103		}
104
105		// Allocate a new block and add it to the spine.
106		block = (*gcSweepBlock)(persistentalloc(unsafe.Sizeof(gcSweepBlock{}), cpu.CacheLineSize, &memstats.gc_sys))
107		blockp := add(b.spine, sys.PtrSize*top)
108		// Blocks are allocated off-heap, so no write barrier.
109		atomic.StorepNoWB(blockp, unsafe.Pointer(block))
110		atomic.Storeuintptr(&b.spineLen, spineLen+1)
111		unlock(&b.spineLock)
112	}
113
114	// We have a block. Insert the span atomically, since there may be
115	// concurrent readers via the block API.
116	atomic.StorepNoWB(unsafe.Pointer(&block.spans[bottom]), unsafe.Pointer(s))
117}
118
119// pop removes and returns a span from buffer b, or nil if b is empty.
120// pop is safe to call concurrently with other pop operations, but NOT
121// to call concurrently with push.
122func (b *gcSweepBuf) pop() *mspan {
123	cursor := atomic.Xadd(&b.index, -1)
124	if int32(cursor) < 0 {
125		atomic.Xadd(&b.index, +1)
126		return nil
127	}
128
129	// There are no concurrent spine or block modifications during
130	// pop, so we can omit the atomics.
131	top, bottom := cursor/gcSweepBlockEntries, cursor%gcSweepBlockEntries
132	blockp := (**gcSweepBlock)(add(b.spine, sys.PtrSize*uintptr(top)))
133	block := *blockp
134	s := block.spans[bottom]
135	// Clear the pointer for block(i).
136	block.spans[bottom] = nil
137	return s
138}
139
140// numBlocks returns the number of blocks in buffer b. numBlocks is
141// safe to call concurrently with any other operation. Spans that have
142// been pushed prior to the call to numBlocks are guaranteed to appear
143// in some block in the range [0, numBlocks()), assuming there are no
144// intervening pops. Spans that are pushed after the call may also
145// appear in these blocks.
146func (b *gcSweepBuf) numBlocks() int {
147	return int((atomic.Load(&b.index) + gcSweepBlockEntries - 1) / gcSweepBlockEntries)
148}
149
150// block returns the spans in the i'th block of buffer b. block is
151// safe to call concurrently with push. The block may contain nil
152// pointers that must be ignored, and each entry in the block must be
153// loaded atomically.
154func (b *gcSweepBuf) block(i int) []*mspan {
155	// Perform bounds check before loading spine address since
156	// push ensures the allocated length is at least spineLen.
157	if i < 0 || uintptr(i) >= atomic.Loaduintptr(&b.spineLen) {
158		throw("block index out of range")
159	}
160
161	// Get block i.
162	spine := atomic.Loadp(unsafe.Pointer(&b.spine))
163	blockp := add(spine, sys.PtrSize*uintptr(i))
164	block := (*gcSweepBlock)(atomic.Loadp(blockp))
165
166	// Slice the block if necessary.
167	cursor := uintptr(atomic.Load(&b.index))
168	top, bottom := cursor/gcSweepBlockEntries, cursor%gcSweepBlockEntries
169	var spans []*mspan
170	if uintptr(i) < top {
171		spans = block.spans[:]
172	} else {
173		spans = block.spans[:bottom]
174	}
175	return spans
176}
177