1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package sync provides basic synchronization primitives such as mutual
6// exclusion locks. Other than the Once and WaitGroup types, most are intended
7// for use by low-level library routines. Higher-level synchronization is
8// better done via channels and communication.
9//
10// Values containing the types defined in this package should not be copied.
11package sync
12
13import (
14	"internal/race"
15	"sync/atomic"
16	"unsafe"
17)
18
19func throw(string) // provided by runtime
20
21// A Mutex is a mutual exclusion lock.
22// The zero value for a Mutex is an unlocked mutex.
23//
24// A Mutex must not be copied after first use.
25type Mutex struct {
26	state int32
27	sema  uint32
28}
29
30// A Locker represents an object that can be locked and unlocked.
31type Locker interface {
32	Lock()
33	Unlock()
34}
35
36const (
37	mutexLocked = 1 << iota // mutex is locked
38	mutexWoken
39	mutexStarving
40	mutexWaiterShift = iota
41
42	// Mutex fairness.
43	//
44	// Mutex can be in 2 modes of operations: normal and starvation.
45	// In normal mode waiters are queued in FIFO order, but a woken up waiter
46	// does not own the mutex and competes with new arriving goroutines over
47	// the ownership. New arriving goroutines have an advantage -- they are
48	// already running on CPU and there can be lots of them, so a woken up
49	// waiter has good chances of losing. In such case it is queued at front
50	// of the wait queue. If a waiter fails to acquire the mutex for more than 1ms,
51	// it switches mutex to the starvation mode.
52	//
53	// In starvation mode ownership of the mutex is directly handed off from
54	// the unlocking goroutine to the waiter at the front of the queue.
55	// New arriving goroutines don't try to acquire the mutex even if it appears
56	// to be unlocked, and don't try to spin. Instead they queue themselves at
57	// the tail of the wait queue.
58	//
59	// If a waiter receives ownership of the mutex and sees that either
60	// (1) it is the last waiter in the queue, or (2) it waited for less than 1 ms,
61	// it switches mutex back to normal operation mode.
62	//
63	// Normal mode has considerably better performance as a goroutine can acquire
64	// a mutex several times in a row even if there are blocked waiters.
65	// Starvation mode is important to prevent pathological cases of tail latency.
66	starvationThresholdNs = 1e6
67)
68
69// Lock locks m.
70// If the lock is already in use, the calling goroutine
71// blocks until the mutex is available.
72func (m *Mutex) Lock() {
73	// Fast path: grab unlocked mutex.
74	if atomic.CompareAndSwapInt32(&m.state, 0, mutexLocked) {
75		if race.Enabled {
76			race.Acquire(unsafe.Pointer(m))
77		}
78		return
79	}
80
81	var waitStartTime int64
82	starving := false
83	awoke := false
84	iter := 0
85	old := m.state
86	for {
87		// Don't spin in starvation mode, ownership is handed off to waiters
88		// so we won't be able to acquire the mutex anyway.
89		if old&(mutexLocked|mutexStarving) == mutexLocked && runtime_canSpin(iter) {
90			// Active spinning makes sense.
91			// Try to set mutexWoken flag to inform Unlock
92			// to not wake other blocked goroutines.
93			if !awoke && old&mutexWoken == 0 && old>>mutexWaiterShift != 0 &&
94				atomic.CompareAndSwapInt32(&m.state, old, old|mutexWoken) {
95				awoke = true
96			}
97			runtime_doSpin()
98			iter++
99			old = m.state
100			continue
101		}
102		new := old
103		// Don't try to acquire starving mutex, new arriving goroutines must queue.
104		if old&mutexStarving == 0 {
105			new |= mutexLocked
106		}
107		if old&(mutexLocked|mutexStarving) != 0 {
108			new += 1 << mutexWaiterShift
109		}
110		// The current goroutine switches mutex to starvation mode.
111		// But if the mutex is currently unlocked, don't do the switch.
112		// Unlock expects that starving mutex has waiters, which will not
113		// be true in this case.
114		if starving && old&mutexLocked != 0 {
115			new |= mutexStarving
116		}
117		if awoke {
118			// The goroutine has been woken from sleep,
119			// so we need to reset the flag in either case.
120			if new&mutexWoken == 0 {
121				panic("sync: inconsistent mutex state")
122			}
123			new &^= mutexWoken
124		}
125		if atomic.CompareAndSwapInt32(&m.state, old, new) {
126			if old&(mutexLocked|mutexStarving) == 0 {
127				break // locked the mutex with CAS
128			}
129			// If we were already waiting before, queue at the front of the queue.
130			queueLifo := waitStartTime != 0
131			if waitStartTime == 0 {
132				waitStartTime = runtime_nanotime()
133			}
134			runtime_SemacquireMutex(&m.sema, queueLifo)
135			starving = starving || runtime_nanotime()-waitStartTime > starvationThresholdNs
136			old = m.state
137			if old&mutexStarving != 0 {
138				// If this goroutine was woken and mutex is in starvation mode,
139				// ownership was handed off to us but mutex is in somewhat
140				// inconsistent state: mutexLocked is not set and we are still
141				// accounted as waiter. Fix that.
142				if old&(mutexLocked|mutexWoken) != 0 || old>>mutexWaiterShift == 0 {
143					panic("sync: inconsistent mutex state")
144				}
145				delta := int32(mutexLocked - 1<<mutexWaiterShift)
146				if !starving || old>>mutexWaiterShift == 1 {
147					// Exit starvation mode.
148					// Critical to do it here and consider wait time.
149					// Starvation mode is so inefficient, that two goroutines
150					// can go lock-step infinitely once they switch mutex
151					// to starvation mode.
152					delta -= mutexStarving
153				}
154				atomic.AddInt32(&m.state, delta)
155				break
156			}
157			awoke = true
158			iter = 0
159		} else {
160			old = m.state
161		}
162	}
163
164	if race.Enabled {
165		race.Acquire(unsafe.Pointer(m))
166	}
167}
168
169// Unlock unlocks m.
170// It is a run-time error if m is not locked on entry to Unlock.
171//
172// A locked Mutex is not associated with a particular goroutine.
173// It is allowed for one goroutine to lock a Mutex and then
174// arrange for another goroutine to unlock it.
175func (m *Mutex) Unlock() {
176	if race.Enabled {
177		_ = m.state
178		race.Release(unsafe.Pointer(m))
179	}
180
181	// Fast path: drop lock bit.
182	new := atomic.AddInt32(&m.state, -mutexLocked)
183	if (new+mutexLocked)&mutexLocked == 0 {
184		panic("sync: unlock of unlocked mutex")
185	}
186	if new&mutexStarving == 0 {
187		old := new
188		for {
189			// If there are no waiters or a goroutine has already
190			// been woken or grabbed the lock, no need to wake anyone.
191			// In starvation mode ownership is directly handed off from unlocking
192			// goroutine to the next waiter. We are not part of this chain,
193			// since we did not observe mutexStarving when we unlocked the mutex above.
194			// So get off the way.
195			if old>>mutexWaiterShift == 0 || old&(mutexLocked|mutexWoken|mutexStarving) != 0 {
196				return
197			}
198			// Grab the right to wake someone.
199			new = (old - 1<<mutexWaiterShift) | mutexWoken
200			if atomic.CompareAndSwapInt32(&m.state, old, new) {
201				runtime_Semrelease(&m.sema, false)
202				return
203			}
204			old = m.state
205		}
206	} else {
207		// Starving mode: handoff mutex ownership to the next waiter.
208		// Note: mutexLocked is not set, the waiter will set it after wakeup.
209		// But mutex is still considered locked if mutexStarving is set,
210		// so new coming goroutines won't acquire it.
211		runtime_Semrelease(&m.sema, true)
212	}
213}
214