1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7import (
8	"runtime/internal/atomic"
9	"unsafe"
10)
11
12// Per-thread (in Go, per-P) cache for small objects.
13// No locking needed because it is per-thread (per-P).
14//
15// mcaches are allocated from non-GC'd memory, so any heap pointers
16// must be specially handled.
17//
18//go:notinheap
19type mcache struct {
20	// The following members are accessed on every malloc,
21	// so they are grouped here for better caching.
22	next_sample uintptr // trigger heap sample after allocating this many bytes
23	local_scan  uintptr // bytes of scannable heap allocated
24
25	// Allocator cache for tiny objects w/o pointers.
26	// See "Tiny allocator" comment in malloc.go.
27
28	// tiny points to the beginning of the current tiny block, or
29	// nil if there is no current tiny block.
30	//
31	// tiny is a heap pointer. Since mcache is in non-GC'd memory,
32	// we handle it by clearing it in releaseAll during mark
33	// termination.
34	tiny             uintptr
35	tinyoffset       uintptr
36	local_tinyallocs uintptr // number of tiny allocs not counted in other stats
37
38	// The rest is not accessed on every malloc.
39
40	alloc [numSpanClasses]*mspan // spans to allocate from, indexed by spanClass
41
42	// Local allocator stats, flushed during GC.
43	local_largefree  uintptr                  // bytes freed for large objects (>maxsmallsize)
44	local_nlargefree uintptr                  // number of frees for large objects (>maxsmallsize)
45	local_nsmallfree [_NumSizeClasses]uintptr // number of frees for small objects (<=maxsmallsize)
46
47	// flushGen indicates the sweepgen during which this mcache
48	// was last flushed. If flushGen != mheap_.sweepgen, the spans
49	// in this mcache are stale and need to the flushed so they
50	// can be swept. This is done in acquirep.
51	flushGen uint32
52}
53
54// A gclink is a node in a linked list of blocks, like mlink,
55// but it is opaque to the garbage collector.
56// The GC does not trace the pointers during collection,
57// and the compiler does not emit write barriers for assignments
58// of gclinkptr values. Code should store references to gclinks
59// as gclinkptr, not as *gclink.
60type gclink struct {
61	next gclinkptr
62}
63
64// A gclinkptr is a pointer to a gclink, but it is opaque
65// to the garbage collector.
66type gclinkptr uintptr
67
68// ptr returns the *gclink form of p.
69// The result should be used for accessing fields, not stored
70// in other data structures.
71func (p gclinkptr) ptr() *gclink {
72	return (*gclink)(unsafe.Pointer(p))
73}
74
75// dummy mspan that contains no free objects.
76var emptymspan mspan
77
78func allocmcache() *mcache {
79	var c *mcache
80	systemstack(func() {
81		lock(&mheap_.lock)
82		c = (*mcache)(mheap_.cachealloc.alloc())
83		c.flushGen = mheap_.sweepgen
84		unlock(&mheap_.lock)
85	})
86	for i := range c.alloc {
87		c.alloc[i] = &emptymspan
88	}
89	c.next_sample = nextSample()
90	return c
91}
92
93func freemcache(c *mcache) {
94	systemstack(func() {
95		c.releaseAll()
96
97		// NOTE(rsc,rlh): If gcworkbuffree comes back, we need to coordinate
98		// with the stealing of gcworkbufs during garbage collection to avoid
99		// a race where the workbuf is double-freed.
100		// gcworkbuffree(c.gcworkbuf)
101
102		lock(&mheap_.lock)
103		purgecachedstats(c)
104		mheap_.cachealloc.free(unsafe.Pointer(c))
105		unlock(&mheap_.lock)
106	})
107}
108
109// refill acquires a new span of span class spc for c. This span will
110// have at least one free object. The current span in c must be full.
111//
112// Must run in a non-preemptible context since otherwise the owner of
113// c could change.
114func (c *mcache) refill(spc spanClass) {
115	// Return the current cached span to the central lists.
116	s := c.alloc[spc]
117
118	if uintptr(s.allocCount) != s.nelems {
119		throw("refill of span with free space remaining")
120	}
121	if s != &emptymspan {
122		// Mark this span as no longer cached.
123		if s.sweepgen != mheap_.sweepgen+3 {
124			throw("bad sweepgen in refill")
125		}
126		atomic.Store(&s.sweepgen, mheap_.sweepgen)
127	}
128
129	// Get a new cached span from the central lists.
130	s = mheap_.central[spc].mcentral.cacheSpan()
131	if s == nil {
132		throw("out of memory")
133	}
134
135	if uintptr(s.allocCount) == s.nelems {
136		throw("span has no free space")
137	}
138
139	// Indicate that this span is cached and prevent asynchronous
140	// sweeping in the next sweep phase.
141	s.sweepgen = mheap_.sweepgen + 3
142
143	c.alloc[spc] = s
144}
145
146func (c *mcache) releaseAll() {
147	for i := range c.alloc {
148		s := c.alloc[i]
149		if s != &emptymspan {
150			mheap_.central[i].mcentral.uncacheSpan(s)
151			c.alloc[i] = &emptymspan
152		}
153	}
154	// Clear tinyalloc pool.
155	c.tiny = 0
156	c.tinyoffset = 0
157}
158
159// prepareForSweep flushes c if the system has entered a new sweep phase
160// since c was populated. This must happen between the sweep phase
161// starting and the first allocation from c.
162func (c *mcache) prepareForSweep() {
163	// Alternatively, instead of making sure we do this on every P
164	// between starting the world and allocating on that P, we
165	// could leave allocate-black on, allow allocation to continue
166	// as usual, use a ragged barrier at the beginning of sweep to
167	// ensure all cached spans are swept, and then disable
168	// allocate-black. However, with this approach it's difficult
169	// to avoid spilling mark bits into the *next* GC cycle.
170	sg := mheap_.sweepgen
171	if c.flushGen == sg {
172		return
173	} else if c.flushGen != sg-2 {
174		println("bad flushGen", c.flushGen, "in prepareForSweep; sweepgen", sg)
175		throw("bad flushGen")
176	}
177	c.releaseAll()
178	atomic.Store(&c.flushGen, mheap_.sweepgen) // Synchronizes with gcStart
179}
180