1// Copyright 2011 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// +build dragonfly freebsd linux
6
7package runtime
8
9import (
10	"runtime/internal/atomic"
11	"unsafe"
12)
13
14// For gccgo, while we still have C runtime code, use go:linkname to
15// export some functions.
16//
17//go:linkname lock
18//go:linkname unlock
19//go:linkname noteclear
20//go:linkname notewakeup
21//go:linkname notesleep
22//go:linkname notetsleep
23//go:linkname notetsleepg
24
25// This implementation depends on OS-specific implementations of
26//
27//	futexsleep(addr *uint32, val uint32, ns int64)
28//		Atomically,
29//			if *addr == val { sleep }
30//		Might be woken up spuriously; that's allowed.
31//		Don't sleep longer than ns; ns < 0 means forever.
32//
33//	futexwakeup(addr *uint32, cnt uint32)
34//		If any procs are sleeping on addr, wake up at most cnt.
35
36const (
37	mutex_unlocked = 0
38	mutex_locked   = 1
39	mutex_sleeping = 2
40
41	active_spin     = 4
42	active_spin_cnt = 30
43	passive_spin    = 1
44)
45
46// Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
47// mutex_sleeping means that there is presumably at least one sleeping thread.
48// Note that there can be spinning threads during all states - they do not
49// affect mutex's state.
50
51// We use the uintptr mutex.key and note.key as a uint32.
52//go:nosplit
53func key32(p *uintptr) *uint32 {
54	return (*uint32)(unsafe.Pointer(p))
55}
56
57func lock(l *mutex) {
58	gp := getg()
59
60	if gp.m.locks < 0 {
61		throw("runtime·lock: lock count")
62	}
63	gp.m.locks++
64
65	// Speculative grab for lock.
66	v := atomic.Xchg(key32(&l.key), mutex_locked)
67	if v == mutex_unlocked {
68		return
69	}
70
71	// wait is either MUTEX_LOCKED or MUTEX_SLEEPING
72	// depending on whether there is a thread sleeping
73	// on this mutex. If we ever change l->key from
74	// MUTEX_SLEEPING to some other value, we must be
75	// careful to change it back to MUTEX_SLEEPING before
76	// returning, to ensure that the sleeping thread gets
77	// its wakeup call.
78	wait := v
79
80	// On uniprocessors, no point spinning.
81	// On multiprocessors, spin for ACTIVE_SPIN attempts.
82	spin := 0
83	if ncpu > 1 {
84		spin = active_spin
85	}
86	for {
87		// Try for lock, spinning.
88		for i := 0; i < spin; i++ {
89			for l.key == mutex_unlocked {
90				if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
91					return
92				}
93			}
94			procyield(active_spin_cnt)
95		}
96
97		// Try for lock, rescheduling.
98		for i := 0; i < passive_spin; i++ {
99			for l.key == mutex_unlocked {
100				if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
101					return
102				}
103			}
104			osyield()
105		}
106
107		// Sleep.
108		v = atomic.Xchg(key32(&l.key), mutex_sleeping)
109		if v == mutex_unlocked {
110			return
111		}
112		wait = mutex_sleeping
113		futexsleep(key32(&l.key), mutex_sleeping, -1)
114	}
115}
116
117func unlock(l *mutex) {
118	v := atomic.Xchg(key32(&l.key), mutex_unlocked)
119	if v == mutex_unlocked {
120		throw("unlock of unlocked lock")
121	}
122	if v == mutex_sleeping {
123		futexwakeup(key32(&l.key), 1)
124	}
125
126	gp := getg()
127	gp.m.locks--
128	if gp.m.locks < 0 {
129		throw("runtime·unlock: lock count")
130	}
131	// if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
132	//	gp.stackguard0 = stackPreempt
133	// }
134}
135
136// One-time notifications.
137func noteclear(n *note) {
138	n.key = 0
139}
140
141func notewakeup(n *note) {
142	old := atomic.Xchg(key32(&n.key), 1)
143	if old != 0 {
144		print("notewakeup - double wakeup (", old, ")\n")
145		throw("notewakeup - double wakeup")
146	}
147	futexwakeup(key32(&n.key), 1)
148}
149
150func notesleep(n *note) {
151	gp := getg()
152	if gp != gp.m.g0 {
153		throw("notesleep not on g0")
154	}
155	ns := int64(-1)
156	if *cgo_yield != nil {
157		// Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
158		ns = 10e6
159	}
160	for atomic.Load(key32(&n.key)) == 0 {
161		gp.m.blocked = true
162		futexsleep(key32(&n.key), 0, ns)
163		if *cgo_yield != nil {
164			asmcgocall(*cgo_yield, nil)
165		}
166		gp.m.blocked = false
167	}
168}
169
170// May run with m.p==nil if called from notetsleep, so write barriers
171// are not allowed.
172//
173//go:nosplit
174//go:nowritebarrier
175func notetsleep_internal(n *note, ns int64) bool {
176	gp := getg()
177
178	if ns < 0 {
179		if *cgo_yield != nil {
180			// Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
181			ns = 10e6
182		}
183		for atomic.Load(key32(&n.key)) == 0 {
184			gp.m.blocked = true
185			futexsleep(key32(&n.key), 0, ns)
186			if *cgo_yield != nil {
187				asmcgocall(*cgo_yield, nil)
188			}
189			gp.m.blocked = false
190		}
191		return true
192	}
193
194	if atomic.Load(key32(&n.key)) != 0 {
195		return true
196	}
197
198	deadline := nanotime() + ns
199	for {
200		if *cgo_yield != nil && ns > 10e6 {
201			ns = 10e6
202		}
203		gp.m.blocked = true
204		futexsleep(key32(&n.key), 0, ns)
205		if *cgo_yield != nil {
206			asmcgocall(*cgo_yield, nil)
207		}
208		gp.m.blocked = false
209		if atomic.Load(key32(&n.key)) != 0 {
210			break
211		}
212		now := nanotime()
213		if now >= deadline {
214			break
215		}
216		ns = deadline - now
217	}
218	return atomic.Load(key32(&n.key)) != 0
219}
220
221func notetsleep(n *note, ns int64) bool {
222	gp := getg()
223	if gp != gp.m.g0 && gp.m.preemptoff != "" {
224		throw("notetsleep not on g0")
225	}
226
227	return notetsleep_internal(n, ns)
228}
229
230// same as runtime·notetsleep, but called on user g (not g0)
231// calls only nosplit functions between entersyscallblock/exitsyscall
232func notetsleepg(n *note, ns int64) bool {
233	gp := getg()
234	if gp == gp.m.g0 {
235		throw("notetsleepg on g0")
236	}
237
238	entersyscallblock()
239	ok := notetsleep_internal(n, ns)
240	exitsyscall()
241	return ok
242}
243
244func beforeIdle(int64) bool {
245	return false
246}
247
248func checkTimeouts() {}
249