1// Copyright 2013 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package sync
6
7import (
8	"internal/race"
9	"runtime"
10	"sync/atomic"
11	"unsafe"
12)
13
14// A Pool is a set of temporary objects that may be individually saved and
15// retrieved.
16//
17// Any item stored in the Pool may be removed automatically at any time without
18// notification. If the Pool holds the only reference when this happens, the
19// item might be deallocated.
20//
21// A Pool is safe for use by multiple goroutines simultaneously.
22//
23// Pool's purpose is to cache allocated but unused items for later reuse,
24// relieving pressure on the garbage collector. That is, it makes it easy to
25// build efficient, thread-safe free lists. However, it is not suitable for all
26// free lists.
27//
28// An appropriate use of a Pool is to manage a group of temporary items
29// silently shared among and potentially reused by concurrent independent
30// clients of a package. Pool provides a way to amortize allocation overhead
31// across many clients.
32//
33// An example of good use of a Pool is in the fmt package, which maintains a
34// dynamically-sized store of temporary output buffers. The store scales under
35// load (when many goroutines are actively printing) and shrinks when
36// quiescent.
37//
38// On the other hand, a free list maintained as part of a short-lived object is
39// not a suitable use for a Pool, since the overhead does not amortize well in
40// that scenario. It is more efficient to have such objects implement their own
41// free list.
42//
43type Pool struct {
44	local     unsafe.Pointer // local fixed-size per-P pool, actual type is [P]poolLocal
45	localSize uintptr        // size of the local array
46
47	// New optionally specifies a function to generate
48	// a value when Get would otherwise return nil.
49	// It may not be changed concurrently with calls to Get.
50	New func() interface{}
51}
52
53// Local per-P Pool appendix.
54type poolLocal struct {
55	private interface{}   // Can be used only by the respective P.
56	shared  []interface{} // Can be used by any P.
57	Mutex                 // Protects shared.
58	pad     [128]byte     // Prevents false sharing.
59}
60
61// Put adds x to the pool.
62func (p *Pool) Put(x interface{}) {
63	if race.Enabled {
64		// Under race detector the Pool degenerates into no-op.
65		// It's conforming, simple and does not introduce excessive
66		// happens-before edges between unrelated goroutines.
67		return
68	}
69	if x == nil {
70		return
71	}
72	l := p.pin()
73	if l.private == nil {
74		l.private = x
75		x = nil
76	}
77	runtime_procUnpin()
78	if x == nil {
79		return
80	}
81	l.Lock()
82	l.shared = append(l.shared, x)
83	l.Unlock()
84}
85
86// Get selects an arbitrary item from the Pool, removes it from the
87// Pool, and returns it to the caller.
88// Get may choose to ignore the pool and treat it as empty.
89// Callers should not assume any relation between values passed to Put and
90// the values returned by Get.
91//
92// If Get would otherwise return nil and p.New is non-nil, Get returns
93// the result of calling p.New.
94func (p *Pool) Get() interface{} {
95	if race.Enabled {
96		if p.New != nil {
97			return p.New()
98		}
99		return nil
100	}
101	l := p.pin()
102	x := l.private
103	l.private = nil
104	runtime_procUnpin()
105	if x != nil {
106		return x
107	}
108	l.Lock()
109	last := len(l.shared) - 1
110	if last >= 0 {
111		x = l.shared[last]
112		l.shared = l.shared[:last]
113	}
114	l.Unlock()
115	if x != nil {
116		return x
117	}
118	return p.getSlow()
119}
120
121func (p *Pool) getSlow() (x interface{}) {
122	// See the comment in pin regarding ordering of the loads.
123	size := atomic.LoadUintptr(&p.localSize) // load-acquire
124	local := p.local                         // load-consume
125	// Try to steal one element from other procs.
126	pid := runtime_procPin()
127	runtime_procUnpin()
128	for i := 0; i < int(size); i++ {
129		l := indexLocal(local, (pid+i+1)%int(size))
130		l.Lock()
131		last := len(l.shared) - 1
132		if last >= 0 {
133			x = l.shared[last]
134			l.shared = l.shared[:last]
135			l.Unlock()
136			break
137		}
138		l.Unlock()
139	}
140
141	if x == nil && p.New != nil {
142		x = p.New()
143	}
144	return x
145}
146
147// pin pins the current goroutine to P, disables preemption and returns poolLocal pool for the P.
148// Caller must call runtime_procUnpin() when done with the pool.
149func (p *Pool) pin() *poolLocal {
150	pid := runtime_procPin()
151	// In pinSlow we store to localSize and then to local, here we load in opposite order.
152	// Since we've disabled preemption, GC can not happen in between.
153	// Thus here we must observe local at least as large localSize.
154	// We can observe a newer/larger local, it is fine (we must observe its zero-initialized-ness).
155	s := atomic.LoadUintptr(&p.localSize) // load-acquire
156	l := p.local                          // load-consume
157	if uintptr(pid) < s {
158		return indexLocal(l, pid)
159	}
160	return p.pinSlow()
161}
162
163func (p *Pool) pinSlow() *poolLocal {
164	// Retry under the mutex.
165	// Can not lock the mutex while pinned.
166	runtime_procUnpin()
167	allPoolsMu.Lock()
168	defer allPoolsMu.Unlock()
169	pid := runtime_procPin()
170	// poolCleanup won't be called while we are pinned.
171	s := p.localSize
172	l := p.local
173	if uintptr(pid) < s {
174		return indexLocal(l, pid)
175	}
176	if p.local == nil {
177		allPools = append(allPools, p)
178	}
179	// If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one.
180	size := runtime.GOMAXPROCS(0)
181	local := make([]poolLocal, size)
182	atomic.StorePointer((*unsafe.Pointer)(&p.local), unsafe.Pointer(&local[0])) // store-release
183	atomic.StoreUintptr(&p.localSize, uintptr(size))                            // store-release
184	return &local[pid]
185}
186
187func poolCleanup() {
188	// This function is called with the world stopped, at the beginning of a garbage collection.
189	// It must not allocate and probably should not call any runtime functions.
190	// Defensively zero out everything, 2 reasons:
191	// 1. To prevent false retention of whole Pools.
192	// 2. If GC happens while a goroutine works with l.shared in Put/Get,
193	//    it will retain whole Pool. So next cycle memory consumption would be doubled.
194	for i, p := range allPools {
195		allPools[i] = nil
196		for i := 0; i < int(p.localSize); i++ {
197			l := indexLocal(p.local, i)
198			l.private = nil
199			for j := range l.shared {
200				l.shared[j] = nil
201			}
202			l.shared = nil
203		}
204		p.local = nil
205		p.localSize = 0
206	}
207	allPools = []*Pool{}
208}
209
210var (
211	allPoolsMu Mutex
212	allPools   []*Pool
213)
214
215func init() {
216	runtime_registerPoolCleanup(poolCleanup)
217}
218
219func indexLocal(l unsafe.Pointer, i int) *poolLocal {
220	return &(*[1000000]poolLocal)(l)[i]
221}
222
223// Implemented in runtime.
224func runtime_registerPoolCleanup(cleanup func())
225func runtime_procPin() int
226func runtime_procUnpin()
227