1// Copyright 2011 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// +build aix darwin hurd netbsd openbsd plan9 solaris windows
6
7package runtime
8
9import (
10	"runtime/internal/atomic"
11	"unsafe"
12)
13
14// For gccgo, while we still have C runtime code, use go:linkname to
15// export some functions.
16//
17//go:linkname lock
18//go:linkname unlock
19//go:linkname noteclear
20//go:linkname notewakeup
21//go:linkname notesleep
22//go:linkname notetsleep
23//go:linkname notetsleepg
24
25// This implementation depends on OS-specific implementations of
26//
27//	func semacreate(mp *m)
28//		Create a semaphore for mp, if it does not already have one.
29//
30//	func semasleep(ns int64) int32
31//		If ns < 0, acquire m's semaphore and return 0.
32//		If ns >= 0, try to acquire m's semaphore for at most ns nanoseconds.
33//		Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
34//
35//	func semawakeup(mp *m)
36//		Wake up mp, which is or will soon be sleeping on its semaphore.
37//
38const (
39	mutex_locked uintptr = 1
40
41	active_spin     = 4
42	active_spin_cnt = 30
43	passive_spin    = 1
44)
45
46func lock(l *mutex) {
47	gp := getg()
48	if gp.m.locks < 0 {
49		throw("runtime·lock: lock count")
50	}
51	gp.m.locks++
52
53	// Speculative grab for lock.
54	if atomic.Casuintptr(&l.key, 0, mutex_locked) {
55		return
56	}
57	semacreate(gp.m)
58
59	// On uniprocessor's, no point spinning.
60	// On multiprocessors, spin for ACTIVE_SPIN attempts.
61	spin := 0
62	if ncpu > 1 {
63		spin = active_spin
64	}
65Loop:
66	for i := 0; ; i++ {
67		v := atomic.Loaduintptr(&l.key)
68		if v&mutex_locked == 0 {
69			// Unlocked. Try to lock.
70			if atomic.Casuintptr(&l.key, v, v|mutex_locked) {
71				return
72			}
73			i = 0
74		}
75		if i < spin {
76			procyield(active_spin_cnt)
77		} else if i < spin+passive_spin {
78			osyield()
79		} else {
80			// Someone else has it.
81			// l->waitm points to a linked list of M's waiting
82			// for this lock, chained through m->nextwaitm.
83			// Queue this M.
84			for {
85				gp.m.nextwaitm = muintptr(v &^ mutex_locked)
86				if atomic.Casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|mutex_locked) {
87					break
88				}
89				v = atomic.Loaduintptr(&l.key)
90				if v&mutex_locked == 0 {
91					continue Loop
92				}
93			}
94			if v&mutex_locked != 0 {
95				// Queued. Wait.
96				semasleep(-1)
97				i = 0
98			}
99		}
100	}
101}
102
103//go:nowritebarrier
104// We might not be holding a p in this code.
105func unlock(l *mutex) {
106	gp := getg()
107	var mp *m
108	for {
109		v := atomic.Loaduintptr(&l.key)
110		if v == mutex_locked {
111			if atomic.Casuintptr(&l.key, mutex_locked, 0) {
112				break
113			}
114		} else {
115			// Other M's are waiting for the lock.
116			// Dequeue an M.
117			mp = muintptr(v &^ mutex_locked).ptr()
118			if atomic.Casuintptr(&l.key, v, uintptr(mp.nextwaitm)) {
119				// Dequeued an M.  Wake it.
120				semawakeup(mp)
121				break
122			}
123		}
124	}
125	gp.m.locks--
126	if gp.m.locks < 0 {
127		throw("runtime·unlock: lock count")
128	}
129	// if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
130	//	gp.stackguard0 = stackPreempt
131	// }
132}
133
134// One-time notifications.
135func noteclear(n *note) {
136	if GOOS == "aix" {
137		// On AIX, semaphores might not synchronize the memory in some
138		// rare cases. See issue #30189.
139		atomic.Storeuintptr(&n.key, 0)
140	} else {
141		n.key = 0
142	}
143}
144
145func notewakeup(n *note) {
146	var v uintptr
147	for {
148		v = atomic.Loaduintptr(&n.key)
149		if atomic.Casuintptr(&n.key, v, mutex_locked) {
150			break
151		}
152	}
153
154	// Successfully set waitm to locked.
155	// What was it before?
156	switch {
157	case v == 0:
158		// Nothing was waiting. Done.
159	case v == mutex_locked:
160		// Two notewakeups! Not allowed.
161		throw("notewakeup - double wakeup")
162	default:
163		// Must be the waiting m. Wake it up.
164		semawakeup((*m)(unsafe.Pointer(v)))
165	}
166}
167
168func notesleep(n *note) {
169	gp := getg()
170	if gp != gp.m.g0 {
171		throw("notesleep not on g0")
172	}
173	semacreate(gp.m)
174	if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
175		// Must be locked (got wakeup).
176		if n.key != mutex_locked {
177			throw("notesleep - waitm out of sync")
178		}
179		return
180	}
181	// Queued. Sleep.
182	gp.m.blocked = true
183	if *cgo_yield == nil {
184		semasleep(-1)
185	} else {
186		// Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
187		const ns = 10e6
188		for atomic.Loaduintptr(&n.key) == 0 {
189			semasleep(ns)
190			asmcgocall(*cgo_yield, nil)
191		}
192	}
193	gp.m.blocked = false
194}
195
196//go:nosplit
197func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool {
198	// gp and deadline are logically local variables, but they are written
199	// as parameters so that the stack space they require is charged
200	// to the caller.
201	// This reduces the nosplit footprint of notetsleep_internal.
202	gp = getg()
203
204	// Register for wakeup on n->waitm.
205	if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
206		// Must be locked (got wakeup).
207		if n.key != mutex_locked {
208			throw("notetsleep - waitm out of sync")
209		}
210		return true
211	}
212	if ns < 0 {
213		// Queued. Sleep.
214		gp.m.blocked = true
215		if *cgo_yield == nil {
216			semasleep(-1)
217		} else {
218			// Sleep in arbitrary-but-moderate intervals to poll libc interceptors.
219			const ns = 10e6
220			for semasleep(ns) < 0 {
221				asmcgocall(*cgo_yield, nil)
222			}
223		}
224		gp.m.blocked = false
225		return true
226	}
227
228	deadline = nanotime() + ns
229	for {
230		// Registered. Sleep.
231		gp.m.blocked = true
232		if *cgo_yield != nil && ns > 10e6 {
233			ns = 10e6
234		}
235		if semasleep(ns) >= 0 {
236			gp.m.blocked = false
237			// Acquired semaphore, semawakeup unregistered us.
238			// Done.
239			return true
240		}
241		if *cgo_yield != nil {
242			asmcgocall(*cgo_yield, nil)
243		}
244		gp.m.blocked = false
245		// Interrupted or timed out. Still registered. Semaphore not acquired.
246		ns = deadline - nanotime()
247		if ns <= 0 {
248			break
249		}
250		// Deadline hasn't arrived. Keep sleeping.
251	}
252
253	// Deadline arrived. Still registered. Semaphore not acquired.
254	// Want to give up and return, but have to unregister first,
255	// so that any notewakeup racing with the return does not
256	// try to grant us the semaphore when we don't expect it.
257	for {
258		v := atomic.Loaduintptr(&n.key)
259		switch v {
260		case uintptr(unsafe.Pointer(gp.m)):
261			// No wakeup yet; unregister if possible.
262			if atomic.Casuintptr(&n.key, v, 0) {
263				return false
264			}
265		case mutex_locked:
266			// Wakeup happened so semaphore is available.
267			// Grab it to avoid getting out of sync.
268			gp.m.blocked = true
269			if semasleep(-1) < 0 {
270				throw("runtime: unable to acquire - semaphore out of sync")
271			}
272			gp.m.blocked = false
273			return true
274		default:
275			throw("runtime: unexpected waitm - semaphore out of sync")
276		}
277	}
278}
279
280func notetsleep(n *note, ns int64) bool {
281	gp := getg()
282	if gp != gp.m.g0 {
283		throw("notetsleep not on g0")
284	}
285	semacreate(gp.m)
286	return notetsleep_internal(n, ns, nil, 0)
287}
288
289// same as runtime·notetsleep, but called on user g (not g0)
290// calls only nosplit functions between entersyscallblock/exitsyscall
291func notetsleepg(n *note, ns int64) bool {
292	gp := getg()
293	if gp == gp.m.g0 {
294		throw("notetsleepg on g0")
295	}
296	semacreate(gp.m)
297	entersyscallblock()
298	ok := notetsleep_internal(n, ns, nil, 0)
299	exitsyscall()
300	return ok
301}
302
303func beforeIdle(int64) bool {
304	return false
305}
306
307func checkTimeouts() {}
308