1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 // +build darwin nacl netbsd openbsd plan9 solaris windows
6
7 #include "runtime.h"
8
9 // This implementation depends on OS-specific implementations of
10 //
11 // uintptr runtime_semacreate(void)
12 // Create a semaphore, which will be assigned to m->waitsema.
13 // The zero value is treated as absence of any semaphore,
14 // so be sure to return a non-zero value.
15 //
16 // int32 runtime_semasleep(int64 ns)
17 // If ns < 0, acquire m->waitsema and return 0.
18 // If ns >= 0, try to acquire m->waitsema for at most ns nanoseconds.
19 // Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
20 //
21 // int32 runtime_semawakeup(M *mp)
22 // Wake up mp, which is or will soon be sleeping on mp->waitsema.
23 //
24
25 enum
26 {
27 LOCKED = 1,
28
29 ACTIVE_SPIN = 4,
30 ACTIVE_SPIN_CNT = 30,
31 PASSIVE_SPIN = 1,
32 };
33
34 void
runtime_lock(Lock * l)35 runtime_lock(Lock *l)
36 {
37 M *m;
38 uintptr v;
39 uint32 i, spin;
40
41 m = runtime_m();
42 if(m->locks++ < 0)
43 runtime_throw("runtime_lock: lock count");
44
45 // Speculative grab for lock.
46 if(runtime_casp((void**)&l->key, nil, (void*)LOCKED))
47 return;
48
49 if(m->waitsema == 0)
50 m->waitsema = runtime_semacreate();
51
52 // On uniprocessor's, no point spinning.
53 // On multiprocessors, spin for ACTIVE_SPIN attempts.
54 spin = 0;
55 if(runtime_ncpu > 1)
56 spin = ACTIVE_SPIN;
57
58 for(i=0;; i++) {
59 v = (uintptr)runtime_atomicloadp((void**)&l->key);
60 if((v&LOCKED) == 0) {
61 unlocked:
62 if(runtime_casp((void**)&l->key, (void*)v, (void*)(v|LOCKED)))
63 return;
64 i = 0;
65 }
66 if(i<spin)
67 runtime_procyield(ACTIVE_SPIN_CNT);
68 else if(i<spin+PASSIVE_SPIN)
69 runtime_osyield();
70 else {
71 // Someone else has it.
72 // l->waitm points to a linked list of M's waiting
73 // for this lock, chained through m->nextwaitm.
74 // Queue this M.
75 for(;;) {
76 m->nextwaitm = (void*)(v&~LOCKED);
77 if(runtime_casp((void**)&l->key, (void*)v, (void*)((uintptr)m|LOCKED)))
78 break;
79 v = (uintptr)runtime_atomicloadp((void**)&l->key);
80 if((v&LOCKED) == 0)
81 goto unlocked;
82 }
83 if(v&LOCKED) {
84 // Queued. Wait.
85 runtime_semasleep(-1);
86 i = 0;
87 }
88 }
89 }
90 }
91
92 void
runtime_unlock(Lock * l)93 runtime_unlock(Lock *l)
94 {
95 uintptr v;
96 M *mp;
97
98 for(;;) {
99 v = (uintptr)runtime_atomicloadp((void**)&l->key);
100 if(v == LOCKED) {
101 if(runtime_casp((void**)&l->key, (void*)LOCKED, nil))
102 break;
103 } else {
104 // Other M's are waiting for the lock.
105 // Dequeue an M.
106 mp = (void*)(v&~LOCKED);
107 if(runtime_casp((void**)&l->key, (void*)v, mp->nextwaitm)) {
108 // Dequeued an M. Wake it.
109 runtime_semawakeup(mp);
110 break;
111 }
112 }
113 }
114
115 if(--runtime_m()->locks < 0)
116 runtime_throw("runtime_unlock: lock count");
117 }
118
119 // One-time notifications.
120 void
runtime_noteclear(Note * n)121 runtime_noteclear(Note *n)
122 {
123 n->key = 0;
124 }
125
126 void
runtime_notewakeup(Note * n)127 runtime_notewakeup(Note *n)
128 {
129 M *mp;
130
131 do
132 mp = runtime_atomicloadp((void**)&n->key);
133 while(!runtime_casp((void**)&n->key, mp, (void*)LOCKED));
134
135 // Successfully set waitm to LOCKED.
136 // What was it before?
137 if(mp == nil) {
138 // Nothing was waiting. Done.
139 } else if(mp == (M*)LOCKED) {
140 // Two notewakeups! Not allowed.
141 runtime_throw("notewakeup - double wakeup");
142 } else {
143 // Must be the waiting m. Wake it up.
144 runtime_semawakeup(mp);
145 }
146 }
147
148 void
runtime_notesleep(Note * n)149 runtime_notesleep(Note *n)
150 {
151 M *m;
152
153 m = runtime_m();
154
155 /* For gccgo it's OK to sleep in non-g0, and it happens in
156 stoptheworld because we have not implemented preemption.
157
158 if(runtime_g() != m->g0)
159 runtime_throw("notesleep not on g0");
160 */
161
162 if(m->waitsema == 0)
163 m->waitsema = runtime_semacreate();
164 if(!runtime_casp((void**)&n->key, nil, m)) { // must be LOCKED (got wakeup)
165 if(n->key != LOCKED)
166 runtime_throw("notesleep - waitm out of sync");
167 return;
168 }
169 // Queued. Sleep.
170 m->blocked = true;
171 runtime_semasleep(-1);
172 m->blocked = false;
173 }
174
175 static bool
notetsleep(Note * n,int64 ns,int64 deadline,M * mp)176 notetsleep(Note *n, int64 ns, int64 deadline, M *mp)
177 {
178 M *m;
179
180 m = runtime_m();
181
182 // Conceptually, deadline and mp are local variables.
183 // They are passed as arguments so that the space for them
184 // does not count against our nosplit stack sequence.
185
186 // Register for wakeup on n->waitm.
187 if(!runtime_casp((void**)&n->key, nil, m)) { // must be LOCKED (got wakeup already)
188 if(n->key != LOCKED)
189 runtime_throw("notetsleep - waitm out of sync");
190 return true;
191 }
192
193 if(ns < 0) {
194 // Queued. Sleep.
195 m->blocked = true;
196 runtime_semasleep(-1);
197 m->blocked = false;
198 return true;
199 }
200
201 deadline = runtime_nanotime() + ns;
202 for(;;) {
203 // Registered. Sleep.
204 m->blocked = true;
205 if(runtime_semasleep(ns) >= 0) {
206 m->blocked = false;
207 // Acquired semaphore, semawakeup unregistered us.
208 // Done.
209 return true;
210 }
211 m->blocked = false;
212
213 // Interrupted or timed out. Still registered. Semaphore not acquired.
214 ns = deadline - runtime_nanotime();
215 if(ns <= 0)
216 break;
217 // Deadline hasn't arrived. Keep sleeping.
218 }
219
220 // Deadline arrived. Still registered. Semaphore not acquired.
221 // Want to give up and return, but have to unregister first,
222 // so that any notewakeup racing with the return does not
223 // try to grant us the semaphore when we don't expect it.
224 for(;;) {
225 mp = runtime_atomicloadp((void**)&n->key);
226 if(mp == m) {
227 // No wakeup yet; unregister if possible.
228 if(runtime_casp((void**)&n->key, mp, nil))
229 return false;
230 } else if(mp == (M*)LOCKED) {
231 // Wakeup happened so semaphore is available.
232 // Grab it to avoid getting out of sync.
233 m->blocked = true;
234 if(runtime_semasleep(-1) < 0)
235 runtime_throw("runtime: unable to acquire - semaphore out of sync");
236 m->blocked = false;
237 return true;
238 } else
239 runtime_throw("runtime: unexpected waitm - semaphore out of sync");
240 }
241 }
242
243 bool
runtime_notetsleep(Note * n,int64 ns)244 runtime_notetsleep(Note *n, int64 ns)
245 {
246 M *m;
247 bool res;
248
249 m = runtime_m();
250
251 if(runtime_g() != m->g0 && !m->gcing)
252 runtime_throw("notetsleep not on g0");
253
254 if(m->waitsema == 0)
255 m->waitsema = runtime_semacreate();
256
257 res = notetsleep(n, ns, 0, nil);
258 return res;
259 }
260
261 // same as runtime_notetsleep, but called on user g (not g0)
262 // calls only nosplit functions between entersyscallblock/exitsyscall
263 bool
runtime_notetsleepg(Note * n,int64 ns)264 runtime_notetsleepg(Note *n, int64 ns)
265 {
266 M *m;
267 bool res;
268
269 m = runtime_m();
270
271 if(runtime_g() == m->g0)
272 runtime_throw("notetsleepg on g0");
273
274 if(m->waitsema == 0)
275 m->waitsema = runtime_semacreate();
276
277 runtime_entersyscallblock();
278 res = notetsleep(n, ns, 0, nil);
279 runtime_exitsyscall();
280 return res;
281 }
282