1// Copyright 2016 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package sync
6
7import (
8	"sync/atomic"
9	"unsafe"
10)
11
12// Map is like a Go map[interface{}]interface{} but is safe for concurrent use
13// by multiple goroutines without additional locking or coordination.
14// Loads, stores, and deletes run in amortized constant time.
15//
16// The Map type is specialized. Most code should use a plain Go map instead,
17// with separate locking or coordination, for better type safety and to make it
18// easier to maintain other invariants along with the map content.
19//
20// The Map type is optimized for two common use cases: (1) when the entry for a given
21// key is only ever written once but read many times, as in caches that only grow,
22// or (2) when multiple goroutines read, write, and overwrite entries for disjoint
23// sets of keys. In these two cases, use of a Map may significantly reduce lock
24// contention compared to a Go map paired with a separate Mutex or RWMutex.
25//
26// The zero Map is empty and ready for use. A Map must not be copied after first use.
27type Map struct {
28	mu Mutex
29
30	// read contains the portion of the map's contents that are safe for
31	// concurrent access (with or without mu held).
32	//
33	// The read field itself is always safe to load, but must only be stored with
34	// mu held.
35	//
36	// Entries stored in read may be updated concurrently without mu, but updating
37	// a previously-expunged entry requires that the entry be copied to the dirty
38	// map and unexpunged with mu held.
39	read atomic.Value // readOnly
40
41	// dirty contains the portion of the map's contents that require mu to be
42	// held. To ensure that the dirty map can be promoted to the read map quickly,
43	// it also includes all of the non-expunged entries in the read map.
44	//
45	// Expunged entries are not stored in the dirty map. An expunged entry in the
46	// clean map must be unexpunged and added to the dirty map before a new value
47	// can be stored to it.
48	//
49	// If the dirty map is nil, the next write to the map will initialize it by
50	// making a shallow copy of the clean map, omitting stale entries.
51	dirty map[interface{}]*entry
52
53	// misses counts the number of loads since the read map was last updated that
54	// needed to lock mu to determine whether the key was present.
55	//
56	// Once enough misses have occurred to cover the cost of copying the dirty
57	// map, the dirty map will be promoted to the read map (in the unamended
58	// state) and the next store to the map will make a new dirty copy.
59	misses int
60}
61
62// readOnly is an immutable struct stored atomically in the Map.read field.
63type readOnly struct {
64	m       map[interface{}]*entry
65	amended bool // true if the dirty map contains some key not in m.
66}
67
68// expunged is an arbitrary pointer that marks entries which have been deleted
69// from the dirty map.
70var expunged = unsafe.Pointer(new(interface{}))
71
72// An entry is a slot in the map corresponding to a particular key.
73type entry struct {
74	// p points to the interface{} value stored for the entry.
75	//
76	// If p == nil, the entry has been deleted and m.dirty == nil.
77	//
78	// If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
79	// is missing from m.dirty.
80	//
81	// Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
82	// != nil, in m.dirty[key].
83	//
84	// An entry can be deleted by atomic replacement with nil: when m.dirty is
85	// next created, it will atomically replace nil with expunged and leave
86	// m.dirty[key] unset.
87	//
88	// An entry's associated value can be updated by atomic replacement, provided
89	// p != expunged. If p == expunged, an entry's associated value can be updated
90	// only after first setting m.dirty[key] = e so that lookups using the dirty
91	// map find the entry.
92	p unsafe.Pointer // *interface{}
93}
94
95func newEntry(i interface{}) *entry {
96	return &entry{p: unsafe.Pointer(&i)}
97}
98
99// Load returns the value stored in the map for a key, or nil if no
100// value is present.
101// The ok result indicates whether value was found in the map.
102func (m *Map) Load(key interface{}) (value interface{}, ok bool) {
103	read, _ := m.read.Load().(readOnly)
104	e, ok := read.m[key]
105	if !ok && read.amended {
106		m.mu.Lock()
107		// Avoid reporting a spurious miss if m.dirty got promoted while we were
108		// blocked on m.mu. (If further loads of the same key will not miss, it's
109		// not worth copying the dirty map for this key.)
110		read, _ = m.read.Load().(readOnly)
111		e, ok = read.m[key]
112		if !ok && read.amended {
113			e, ok = m.dirty[key]
114			// Regardless of whether the entry was present, record a miss: this key
115			// will take the slow path until the dirty map is promoted to the read
116			// map.
117			m.missLocked()
118		}
119		m.mu.Unlock()
120	}
121	if !ok {
122		return nil, false
123	}
124	return e.load()
125}
126
127func (e *entry) load() (value interface{}, ok bool) {
128	p := atomic.LoadPointer(&e.p)
129	if p == nil || p == expunged {
130		return nil, false
131	}
132	return *(*interface{})(p), true
133}
134
135// Store sets the value for a key.
136func (m *Map) Store(key, value interface{}) {
137	read, _ := m.read.Load().(readOnly)
138	if e, ok := read.m[key]; ok && e.tryStore(&value) {
139		return
140	}
141
142	m.mu.Lock()
143	read, _ = m.read.Load().(readOnly)
144	if e, ok := read.m[key]; ok {
145		if e.unexpungeLocked() {
146			// The entry was previously expunged, which implies that there is a
147			// non-nil dirty map and this entry is not in it.
148			m.dirty[key] = e
149		}
150		e.storeLocked(&value)
151	} else if e, ok := m.dirty[key]; ok {
152		e.storeLocked(&value)
153	} else {
154		if !read.amended {
155			// We're adding the first new key to the dirty map.
156			// Make sure it is allocated and mark the read-only map as incomplete.
157			m.dirtyLocked()
158			m.read.Store(readOnly{m: read.m, amended: true})
159		}
160		m.dirty[key] = newEntry(value)
161	}
162	m.mu.Unlock()
163}
164
165// tryStore stores a value if the entry has not been expunged.
166//
167// If the entry is expunged, tryStore returns false and leaves the entry
168// unchanged.
169func (e *entry) tryStore(i *interface{}) bool {
170	for {
171		p := atomic.LoadPointer(&e.p)
172		if p == expunged {
173			return false
174		}
175		if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) {
176			return true
177		}
178	}
179}
180
181// unexpungeLocked ensures that the entry is not marked as expunged.
182//
183// If the entry was previously expunged, it must be added to the dirty map
184// before m.mu is unlocked.
185func (e *entry) unexpungeLocked() (wasExpunged bool) {
186	return atomic.CompareAndSwapPointer(&e.p, expunged, nil)
187}
188
189// storeLocked unconditionally stores a value to the entry.
190//
191// The entry must be known not to be expunged.
192func (e *entry) storeLocked(i *interface{}) {
193	atomic.StorePointer(&e.p, unsafe.Pointer(i))
194}
195
196// LoadOrStore returns the existing value for the key if present.
197// Otherwise, it stores and returns the given value.
198// The loaded result is true if the value was loaded, false if stored.
199func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) {
200	// Avoid locking if it's a clean hit.
201	read, _ := m.read.Load().(readOnly)
202	if e, ok := read.m[key]; ok {
203		actual, loaded, ok := e.tryLoadOrStore(value)
204		if ok {
205			return actual, loaded
206		}
207	}
208
209	m.mu.Lock()
210	read, _ = m.read.Load().(readOnly)
211	if e, ok := read.m[key]; ok {
212		if e.unexpungeLocked() {
213			m.dirty[key] = e
214		}
215		actual, loaded, _ = e.tryLoadOrStore(value)
216	} else if e, ok := m.dirty[key]; ok {
217		actual, loaded, _ = e.tryLoadOrStore(value)
218		m.missLocked()
219	} else {
220		if !read.amended {
221			// We're adding the first new key to the dirty map.
222			// Make sure it is allocated and mark the read-only map as incomplete.
223			m.dirtyLocked()
224			m.read.Store(readOnly{m: read.m, amended: true})
225		}
226		m.dirty[key] = newEntry(value)
227		actual, loaded = value, false
228	}
229	m.mu.Unlock()
230
231	return actual, loaded
232}
233
234// tryLoadOrStore atomically loads or stores a value if the entry is not
235// expunged.
236//
237// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
238// returns with ok==false.
239func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bool) {
240	p := atomic.LoadPointer(&e.p)
241	if p == expunged {
242		return nil, false, false
243	}
244	if p != nil {
245		return *(*interface{})(p), true, true
246	}
247
248	// Copy the interface after the first load to make this method more amenable
249	// to escape analysis: if we hit the "load" path or the entry is expunged, we
250	// shouldn't bother heap-allocating.
251	ic := i
252	for {
253		if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) {
254			return i, false, true
255		}
256		p = atomic.LoadPointer(&e.p)
257		if p == expunged {
258			return nil, false, false
259		}
260		if p != nil {
261			return *(*interface{})(p), true, true
262		}
263	}
264}
265
266// LoadAndDelete deletes the value for a key, returning the previous value if any.
267// The loaded result reports whether the key was present.
268func (m *Map) LoadAndDelete(key interface{}) (value interface{}, loaded bool) {
269	read, _ := m.read.Load().(readOnly)
270	e, ok := read.m[key]
271	if !ok && read.amended {
272		m.mu.Lock()
273		read, _ = m.read.Load().(readOnly)
274		e, ok = read.m[key]
275		if !ok && read.amended {
276			e, ok = m.dirty[key]
277			delete(m.dirty, key)
278			// Regardless of whether the entry was present, record a miss: this key
279			// will take the slow path until the dirty map is promoted to the read
280			// map.
281			m.missLocked()
282		}
283		m.mu.Unlock()
284	}
285	if ok {
286		return e.delete()
287	}
288	return nil, false
289}
290
291// Delete deletes the value for a key.
292func (m *Map) Delete(key interface{}) {
293	m.LoadAndDelete(key)
294}
295
296func (e *entry) delete() (value interface{}, ok bool) {
297	for {
298		p := atomic.LoadPointer(&e.p)
299		if p == nil || p == expunged {
300			return nil, false
301		}
302		if atomic.CompareAndSwapPointer(&e.p, p, nil) {
303			return *(*interface{})(p), true
304		}
305	}
306}
307
308// Range calls f sequentially for each key and value present in the map.
309// If f returns false, range stops the iteration.
310//
311// Range does not necessarily correspond to any consistent snapshot of the Map's
312// contents: no key will be visited more than once, but if the value for any key
313// is stored or deleted concurrently, Range may reflect any mapping for that key
314// from any point during the Range call.
315//
316// Range may be O(N) with the number of elements in the map even if f returns
317// false after a constant number of calls.
318func (m *Map) Range(f func(key, value interface{}) bool) {
319	// We need to be able to iterate over all of the keys that were already
320	// present at the start of the call to Range.
321	// If read.amended is false, then read.m satisfies that property without
322	// requiring us to hold m.mu for a long time.
323	read, _ := m.read.Load().(readOnly)
324	if read.amended {
325		// m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
326		// (assuming the caller does not break out early), so a call to Range
327		// amortizes an entire copy of the map: we can promote the dirty copy
328		// immediately!
329		m.mu.Lock()
330		read, _ = m.read.Load().(readOnly)
331		if read.amended {
332			read = readOnly{m: m.dirty}
333			m.read.Store(read)
334			m.dirty = nil
335			m.misses = 0
336		}
337		m.mu.Unlock()
338	}
339
340	for k, e := range read.m {
341		v, ok := e.load()
342		if !ok {
343			continue
344		}
345		if !f(k, v) {
346			break
347		}
348	}
349}
350
351func (m *Map) missLocked() {
352	m.misses++
353	if m.misses < len(m.dirty) {
354		return
355	}
356	m.read.Store(readOnly{m: m.dirty})
357	m.dirty = nil
358	m.misses = 0
359}
360
361func (m *Map) dirtyLocked() {
362	if m.dirty != nil {
363		return
364	}
365
366	read, _ := m.read.Load().(readOnly)
367	m.dirty = make(map[interface{}]*entry, len(read.m))
368	for k, e := range read.m {
369		if !e.tryExpungeLocked() {
370			m.dirty[k] = e
371		}
372	}
373}
374
375func (e *entry) tryExpungeLocked() (isExpunged bool) {
376	p := atomic.LoadPointer(&e.p)
377	for p == nil {
378		if atomic.CompareAndSwapPointer(&e.p, nil, expunged) {
379			return true
380		}
381		p = atomic.LoadPointer(&e.p)
382	}
383	return p == expunged
384}
385