1// Copyright 2011 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// +build aix darwin nacl netbsd openbsd plan9 solaris windows
6
7package runtime
8
9import (
10	"runtime/internal/atomic"
11	"unsafe"
12)
13
14// For gccgo, while we still have C runtime code, use go:linkname to
15// rename some functions to themselves, so that the compiler will
16// export them.
17//
18//go:linkname lock runtime.lock
19//go:linkname unlock runtime.unlock
20//go:linkname noteclear runtime.noteclear
21//go:linkname notewakeup runtime.notewakeup
22//go:linkname notesleep runtime.notesleep
23//go:linkname notetsleep runtime.notetsleep
24//go:linkname notetsleepg runtime.notetsleepg
25
26// This implementation depends on OS-specific implementations of
27//
28//	func semacreate(mp *m)
29//		Create a semaphore for mp, if it does not already have one.
30//
31//	func semasleep(ns int64) int32
32//		If ns < 0, acquire m's semaphore and return 0.
33//		If ns >= 0, try to acquire m's semaphore for at most ns nanoseconds.
34//		Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
35//
36//	func semawakeup(mp *m)
37//		Wake up mp, which is or will soon be sleeping on its semaphore.
38//
39const (
40	mutex_locked uintptr = 1
41
42	active_spin     = 4
43	active_spin_cnt = 30
44	passive_spin    = 1
45)
46
47func lock(l *mutex) {
48	gp := getg()
49	if gp.m.locks < 0 {
50		throw("runtime·lock: lock count")
51	}
52	gp.m.locks++
53
54	// Speculative grab for lock.
55	if atomic.Casuintptr(&l.key, 0, mutex_locked) {
56		return
57	}
58	semacreate(gp.m)
59
60	// On uniprocessor's, no point spinning.
61	// On multiprocessors, spin for ACTIVE_SPIN attempts.
62	spin := 0
63	if ncpu > 1 {
64		spin = active_spin
65	}
66Loop:
67	for i := 0; ; i++ {
68		v := atomic.Loaduintptr(&l.key)
69		if v&mutex_locked == 0 {
70			// Unlocked. Try to lock.
71			if atomic.Casuintptr(&l.key, v, v|mutex_locked) {
72				return
73			}
74			i = 0
75		}
76		if i < spin {
77			procyield(active_spin_cnt)
78		} else if i < spin+passive_spin {
79			osyield()
80		} else {
81			// Someone else has it.
82			// l->waitm points to a linked list of M's waiting
83			// for this lock, chained through m->nextwaitm.
84			// Queue this M.
85			for {
86				gp.m.nextwaitm = muintptr(v &^ mutex_locked)
87				if atomic.Casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|mutex_locked) {
88					break
89				}
90				v = atomic.Loaduintptr(&l.key)
91				if v&mutex_locked == 0 {
92					continue Loop
93				}
94			}
95			if v&mutex_locked != 0 {
96				// Queued. Wait.
97				semasleep(-1)
98				i = 0
99			}
100		}
101	}
102}
103
104//go:nowritebarrier
105// We might not be holding a p in this code.
106func unlock(l *mutex) {
107	gp := getg()
108	var mp *m
109	for {
110		v := atomic.Loaduintptr(&l.key)
111		if v == mutex_locked {
112			if atomic.Casuintptr(&l.key, mutex_locked, 0) {
113				break
114			}
115		} else {
116			// Other M's are waiting for the lock.
117			// Dequeue an M.
118			mp = muintptr(v &^ mutex_locked).ptr()
119			if atomic.Casuintptr(&l.key, v, uintptr(mp.nextwaitm)) {
120				// Dequeued an M.  Wake it.
121				semawakeup(mp)
122				break
123			}
124		}
125	}
126	gp.m.locks--
127	if gp.m.locks < 0 {
128		throw("runtime·unlock: lock count")
129	}
130	// if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
131	//	gp.stackguard0 = stackPreempt
132	// }
133}
134
135// One-time notifications.
136func noteclear(n *note) {
137	n.key = 0
138}
139
140func notewakeup(n *note) {
141	var v uintptr
142	for {
143		v = atomic.Loaduintptr(&n.key)
144		if atomic.Casuintptr(&n.key, v, mutex_locked) {
145			break
146		}
147	}
148
149	// Successfully set waitm to locked.
150	// What was it before?
151	switch {
152	case v == 0:
153		// Nothing was waiting. Done.
154	case v == mutex_locked:
155		// Two notewakeups! Not allowed.
156		throw("notewakeup - double wakeup")
157	default:
158		// Must be the waiting m. Wake it up.
159		semawakeup((*m)(unsafe.Pointer(v)))
160	}
161}
162
163func notesleep(n *note) {
164	gp := getg()
165	if gp != gp.m.g0 {
166		throw("notesleep not on g0")
167	}
168	semacreate(gp.m)
169	if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
170		// Must be locked (got wakeup).
171		if n.key != mutex_locked {
172			throw("notesleep - waitm out of sync")
173		}
174		return
175	}
176	// Queued. Sleep.
177	gp.m.blocked = true
178	if *cgo_yield == nil {
179		semasleep(-1)
180	} else {
181		// Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
182		const ns = 10e6
183		for atomic.Loaduintptr(&n.key) == 0 {
184			semasleep(ns)
185			asmcgocall(*cgo_yield, nil)
186		}
187	}
188	gp.m.blocked = false
189}
190
191//go:nosplit
192func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool {
193	// gp and deadline are logically local variables, but they are written
194	// as parameters so that the stack space they require is charged
195	// to the caller.
196	// This reduces the nosplit footprint of notetsleep_internal.
197	gp = getg()
198
199	// Register for wakeup on n->waitm.
200	if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
201		// Must be locked (got wakeup).
202		if n.key != mutex_locked {
203			throw("notetsleep - waitm out of sync")
204		}
205		return true
206	}
207	if ns < 0 {
208		// Queued. Sleep.
209		gp.m.blocked = true
210		if *cgo_yield == nil {
211			semasleep(-1)
212		} else {
213			// Sleep in arbitrary-but-moderate intervals to poll libc interceptors.
214			const ns = 10e6
215			for semasleep(ns) < 0 {
216				asmcgocall(*cgo_yield, nil)
217			}
218		}
219		gp.m.blocked = false
220		return true
221	}
222
223	deadline = nanotime() + ns
224	for {
225		// Registered. Sleep.
226		gp.m.blocked = true
227		if *cgo_yield != nil && ns > 10e6 {
228			ns = 10e6
229		}
230		if semasleep(ns) >= 0 {
231			gp.m.blocked = false
232			// Acquired semaphore, semawakeup unregistered us.
233			// Done.
234			return true
235		}
236		if *cgo_yield != nil {
237			asmcgocall(*cgo_yield, nil)
238		}
239		gp.m.blocked = false
240		// Interrupted or timed out. Still registered. Semaphore not acquired.
241		ns = deadline - nanotime()
242		if ns <= 0 {
243			break
244		}
245		// Deadline hasn't arrived. Keep sleeping.
246	}
247
248	// Deadline arrived. Still registered. Semaphore not acquired.
249	// Want to give up and return, but have to unregister first,
250	// so that any notewakeup racing with the return does not
251	// try to grant us the semaphore when we don't expect it.
252	for {
253		v := atomic.Loaduintptr(&n.key)
254		switch v {
255		case uintptr(unsafe.Pointer(gp.m)):
256			// No wakeup yet; unregister if possible.
257			if atomic.Casuintptr(&n.key, v, 0) {
258				return false
259			}
260		case mutex_locked:
261			// Wakeup happened so semaphore is available.
262			// Grab it to avoid getting out of sync.
263			gp.m.blocked = true
264			if semasleep(-1) < 0 {
265				throw("runtime: unable to acquire - semaphore out of sync")
266			}
267			gp.m.blocked = false
268			return true
269		default:
270			throw("runtime: unexpected waitm - semaphore out of sync")
271		}
272	}
273}
274
275func notetsleep(n *note, ns int64) bool {
276	gp := getg()
277	if gp != gp.m.g0 && gp.m.preemptoff != "" {
278		throw("notetsleep not on g0")
279	}
280	semacreate(gp.m)
281	return notetsleep_internal(n, ns, nil, 0)
282}
283
284// same as runtime·notetsleep, but called on user g (not g0)
285// calls only nosplit functions between entersyscallblock/exitsyscall
286func notetsleepg(n *note, ns int64) bool {
287	gp := getg()
288	if gp == gp.m.g0 {
289		throw("notetsleepg on g0")
290	}
291	semacreate(gp.m)
292	entersyscallblock(0)
293	ok := notetsleep_internal(n, ns, nil, 0)
294	exitsyscall(0)
295	return ok
296}
297