1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7// This file contains the implementation of Go select statements.
8
9import (
10	"runtime/internal/atomic"
11	"unsafe"
12)
13
14// For gccgo, use go:linkname to export compiler-called functions.
15//
16//go:linkname selectgo
17//go:linkname block
18
19const debugSelect = false
20
21// Select case descriptor.
22// Known to compiler.
23// Changes here must also be made in src/cmd/compile/internal/walk/select.go's scasetype.
24type scase struct {
25	c    *hchan         // chan
26	elem unsafe.Pointer // data element
27}
28
29func sellock(scases []scase, lockorder []uint16) {
30	var c *hchan
31	for _, o := range lockorder {
32		c0 := scases[o].c
33		if c0 != c {
34			c = c0
35			lock(&c.lock)
36		}
37	}
38}
39
40func selunlock(scases []scase, lockorder []uint16) {
41	// We must be very careful here to not touch sel after we have unlocked
42	// the last lock, because sel can be freed right after the last unlock.
43	// Consider the following situation.
44	// First M calls runtime·park() in runtime·selectgo() passing the sel.
45	// Once runtime·park() has unlocked the last lock, another M makes
46	// the G that calls select runnable again and schedules it for execution.
47	// When the G runs on another M, it locks all the locks and frees sel.
48	// Now if the first M touches sel, it will access freed memory.
49	for i := len(lockorder) - 1; i >= 0; i-- {
50		c := scases[lockorder[i]].c
51		if i > 0 && c == scases[lockorder[i-1]].c {
52			continue // will unlock it on the next iteration
53		}
54		unlock(&c.lock)
55	}
56}
57
58func selparkcommit(gp *g, _ unsafe.Pointer) bool {
59	// There are unlocked sudogs that point into gp's stack. Stack
60	// copying must lock the channels of those sudogs.
61	// Set activeStackChans here instead of before we try parking
62	// because we could self-deadlock in stack growth on a
63	// channel lock.
64	gp.activeStackChans = true
65	// Mark that it's safe for stack shrinking to occur now,
66	// because any thread acquiring this G's stack for shrinking
67	// is guaranteed to observe activeStackChans after this store.
68	atomic.Store8(&gp.parkingOnChan, 0)
69	// Make sure we unlock after setting activeStackChans and
70	// unsetting parkingOnChan. The moment we unlock any of the
71	// channel locks we risk gp getting readied by a channel operation
72	// and so gp could continue running before everything before the
73	// unlock is visible (even to gp itself).
74
75	// This must not access gp's stack (see gopark). In
76	// particular, it must not access the *hselect. That's okay,
77	// because by the time this is called, gp.waiting has all
78	// channels in lock order.
79	var lastc *hchan
80	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
81		if sg.c != lastc && lastc != nil {
82			// As soon as we unlock the channel, fields in
83			// any sudog with that channel may change,
84			// including c and waitlink. Since multiple
85			// sudogs may have the same channel, we unlock
86			// only after we've passed the last instance
87			// of a channel.
88			unlock(&lastc.lock)
89		}
90		lastc = sg.c
91	}
92	if lastc != nil {
93		unlock(&lastc.lock)
94	}
95	return true
96}
97
98func block() {
99	gopark(nil, nil, waitReasonSelectNoCases, traceEvGoStop, 1) // forever
100}
101
102// selectgo implements the select statement.
103//
104// cas0 points to an array of type [ncases]scase, and order0 points to
105// an array of type [2*ncases]uint16 where ncases must be <= 65536.
106// Both reside on the goroutine's stack (regardless of any escaping in
107// selectgo).
108//
109// For race detector builds, pc0 points to an array of type
110// [ncases]uintptr (also on the stack); for other builds, it's set to
111// nil.
112//
113// selectgo returns the index of the chosen scase, which matches the
114// ordinal position of its respective select{recv,send,default} call.
115// Also, if the chosen scase was a receive operation, it reports whether
116// a value was received.
117func selectgo(cas0 *scase, order0 *uint16, nsends, nrecvs int, block bool) (int, bool) {
118	if debugSelect {
119		print("select: cas0=", cas0, "\n")
120	}
121
122	// NOTE: In order to maintain a lean stack size, the number of scases
123	// is capped at 65536.
124	cas1 := (*[1 << 16]scase)(unsafe.Pointer(cas0))
125	order1 := (*[1 << 17]uint16)(unsafe.Pointer(order0))
126
127	ncases := nsends + nrecvs
128	scases := cas1[:ncases:ncases]
129	pollorder := order1[:ncases:ncases]
130	lockorder := order1[ncases:][:ncases:ncases]
131	// NOTE: pollorder/lockorder's underlying array was not zero-initialized by compiler.
132
133	var t0 int64
134	if blockprofilerate > 0 {
135		t0 = cputicks()
136	}
137
138	// The compiler rewrites selects that statically have
139	// only 0 or 1 cases plus default into simpler constructs.
140	// The only way we can end up with such small sel.ncase
141	// values here is for a larger select in which most channels
142	// have been nilled out. The general code handles those
143	// cases correctly, and they are rare enough not to bother
144	// optimizing (and needing to test).
145
146	// needed for gccgo, which doesn't zero pollorder
147	if ncases > 0 {
148		pollorder[0] = 0
149	}
150
151	// generate permuted order
152	norder := 0
153	for i := range scases {
154		cas := &scases[i]
155
156		// Omit cases without channels from the poll and lock orders.
157		if cas.c == nil {
158			cas.elem = nil // allow GC
159			continue
160		}
161
162		j := fastrandn(uint32(norder + 1))
163		pollorder[norder] = pollorder[j]
164		pollorder[j] = uint16(i)
165		norder++
166	}
167	pollorder = pollorder[:norder]
168	lockorder = lockorder[:norder]
169
170	// sort the cases by Hchan address to get the locking order.
171	// simple heap sort, to guarantee n log n time and constant stack footprint.
172	for i := range lockorder {
173		j := i
174		// Start with the pollorder to permute cases on the same channel.
175		c := scases[pollorder[i]].c
176		for j > 0 && scases[lockorder[(j-1)/2]].c.sortkey() < c.sortkey() {
177			k := (j - 1) / 2
178			lockorder[j] = lockorder[k]
179			j = k
180		}
181		lockorder[j] = pollorder[i]
182	}
183	for i := len(lockorder) - 1; i >= 0; i-- {
184		o := lockorder[i]
185		c := scases[o].c
186		lockorder[i] = lockorder[0]
187		j := 0
188		for {
189			k := j*2 + 1
190			if k >= i {
191				break
192			}
193			if k+1 < i && scases[lockorder[k]].c.sortkey() < scases[lockorder[k+1]].c.sortkey() {
194				k++
195			}
196			if c.sortkey() < scases[lockorder[k]].c.sortkey() {
197				lockorder[j] = lockorder[k]
198				j = k
199				continue
200			}
201			break
202		}
203		lockorder[j] = o
204	}
205
206	if debugSelect {
207		for i := 0; i+1 < len(lockorder); i++ {
208			if scases[lockorder[i]].c.sortkey() > scases[lockorder[i+1]].c.sortkey() {
209				print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n")
210				throw("select: broken sort")
211			}
212		}
213	}
214
215	// lock all the channels involved in the select
216	sellock(scases, lockorder)
217
218	var (
219		gp     *g
220		sg     *sudog
221		c      *hchan
222		k      *scase
223		sglist *sudog
224		sgnext *sudog
225		qp     unsafe.Pointer
226		nextp  **sudog
227	)
228
229	// pass 1 - look for something already waiting
230	var casi int
231	var cas *scase
232	var caseSuccess bool
233	var caseReleaseTime int64 = -1
234	var recvOK bool
235	for _, casei := range pollorder {
236		casi = int(casei)
237		cas = &scases[casi]
238		c = cas.c
239
240		if casi >= nsends {
241			sg = c.sendq.dequeue()
242			if sg != nil {
243				goto recv
244			}
245			if c.qcount > 0 {
246				goto bufrecv
247			}
248			if c.closed != 0 {
249				goto rclose
250			}
251		} else {
252			if c.closed != 0 {
253				goto sclose
254			}
255			sg = c.recvq.dequeue()
256			if sg != nil {
257				goto send
258			}
259			if c.qcount < c.dataqsiz {
260				goto bufsend
261			}
262		}
263	}
264
265	if !block {
266		selunlock(scases, lockorder)
267		casi = -1
268		goto retc
269	}
270
271	// pass 2 - enqueue on all chans
272	gp = getg()
273	if gp.waiting != nil {
274		throw("gp.waiting != nil")
275	}
276	nextp = &gp.waiting
277	for _, casei := range lockorder {
278		casi = int(casei)
279		cas = &scases[casi]
280		c = cas.c
281		sg := acquireSudog()
282		sg.g = gp
283		sg.isSelect = true
284		// No stack splits between assigning elem and enqueuing
285		// sg on gp.waiting where copystack can find it.
286		sg.elem = cas.elem
287		sg.releasetime = 0
288		if t0 != 0 {
289			sg.releasetime = -1
290		}
291		sg.c = c
292		// Construct waiting list in lock order.
293		*nextp = sg
294		nextp = &sg.waitlink
295
296		if casi < nsends {
297			c.sendq.enqueue(sg)
298		} else {
299			c.recvq.enqueue(sg)
300		}
301	}
302
303	// wait for someone to wake us up
304	gp.param = nil
305	// Signal to anyone trying to shrink our stack that we're about
306	// to park on a channel. The window between when this G's status
307	// changes and when we set gp.activeStackChans is not safe for
308	// stack shrinking.
309	atomic.Store8(&gp.parkingOnChan, 1)
310	gopark(selparkcommit, nil, waitReasonSelect, traceEvGoBlockSelect, 1)
311	gp.activeStackChans = false
312
313	sellock(scases, lockorder)
314
315	gp.selectDone = 0
316	sg = (*sudog)(gp.param)
317	gp.param = nil
318
319	// pass 3 - dequeue from unsuccessful chans
320	// otherwise they stack up on quiet channels
321	// record the successful case, if any.
322	// We singly-linked up the SudoGs in lock order.
323	casi = -1
324	cas = nil
325	caseSuccess = false
326	sglist = gp.waiting
327	// Clear all elem before unlinking from gp.waiting.
328	for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink {
329		sg1.isSelect = false
330		sg1.elem = nil
331		sg1.c = nil
332	}
333	gp.waiting = nil
334
335	for _, casei := range lockorder {
336		k = &scases[casei]
337		if sg == sglist {
338			// sg has already been dequeued by the G that woke us up.
339			casi = int(casei)
340			cas = k
341			caseSuccess = sglist.success
342			if sglist.releasetime > 0 {
343				caseReleaseTime = sglist.releasetime
344			}
345		} else {
346			c = k.c
347			if int(casei) < nsends {
348				c.sendq.dequeueSudoG(sglist)
349			} else {
350				c.recvq.dequeueSudoG(sglist)
351			}
352		}
353		sgnext = sglist.waitlink
354		sglist.waitlink = nil
355		releaseSudog(sglist)
356		sglist = sgnext
357	}
358
359	if cas == nil {
360		throw("selectgo: bad wakeup")
361	}
362
363	c = cas.c
364
365	if debugSelect {
366		print("wait-return: cas0=", cas0, " c=", c, " cas=", cas, " send=", casi < nsends, "\n")
367	}
368
369	if casi < nsends {
370		if !caseSuccess {
371			goto sclose
372		}
373	} else {
374		recvOK = caseSuccess
375	}
376
377	selunlock(scases, lockorder)
378	goto retc
379
380bufrecv:
381	// can receive from buffer
382	recvOK = true
383	qp = chanbuf(c, c.recvx)
384	if cas.elem != nil {
385		typedmemmove(c.elemtype, cas.elem, qp)
386	}
387	typedmemclr(c.elemtype, qp)
388	c.recvx++
389	if c.recvx == c.dataqsiz {
390		c.recvx = 0
391	}
392	c.qcount--
393	selunlock(scases, lockorder)
394	goto retc
395
396bufsend:
397	// can send to buffer
398	typedmemmove(c.elemtype, chanbuf(c, c.sendx), cas.elem)
399	c.sendx++
400	if c.sendx == c.dataqsiz {
401		c.sendx = 0
402	}
403	c.qcount++
404	selunlock(scases, lockorder)
405	goto retc
406
407recv:
408	// can receive from sleeping sender (sg)
409	recv(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2)
410	if debugSelect {
411		print("syncrecv: cas0=", cas0, " c=", c, "\n")
412	}
413	recvOK = true
414	goto retc
415
416rclose:
417	// read at end of closed channel
418	selunlock(scases, lockorder)
419	recvOK = false
420	if cas.elem != nil {
421		typedmemclr(c.elemtype, cas.elem)
422	}
423	if raceenabled {
424		raceacquire(c.raceaddr())
425	}
426	goto retc
427
428send:
429	// can send to a sleeping receiver (sg)
430	send(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2)
431	if debugSelect {
432		print("syncsend: cas0=", cas0, " c=", c, "\n")
433	}
434	goto retc
435
436retc:
437	if caseReleaseTime > 0 {
438		blockevent(caseReleaseTime-t0, 1)
439	}
440
441	// Check preemption, since unlike gc we don't check on every call.
442	// A test case for this one is BenchmarkPingPongHog in proc_test.go.
443	if block && getg().preempt {
444		checkPreempt()
445	}
446
447	return casi, recvOK
448
449sclose:
450	// send on closed channel
451	selunlock(scases, lockorder)
452	panic(plainError("send on closed channel"))
453}
454
455func (c *hchan) sortkey() uintptr {
456	return uintptr(unsafe.Pointer(c))
457}
458
459// A runtimeSelect is a single case passed to rselect.
460// This must match ../reflect/value.go:/runtimeSelect
461type runtimeSelect struct {
462	dir selectDir
463	typ unsafe.Pointer // channel type (not used here)
464	ch  *hchan         // channel
465	val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir)
466}
467
468// These values must match ../reflect/value.go:/SelectDir.
469type selectDir int
470
471const (
472	_             selectDir = iota
473	selectSend              // case Chan <- Send
474	selectRecv              // case <-Chan:
475	selectDefault           // default
476)
477
478//go:linkname reflect_rselect reflect.rselect
479func reflect_rselect(cases []runtimeSelect) (int, bool) {
480	if len(cases) == 0 {
481		block()
482	}
483	sel := make([]scase, len(cases))
484	orig := make([]int, len(cases))
485	nsends, nrecvs := 0, 0
486	dflt := -1
487	for i, rc := range cases {
488		var j int
489		switch rc.dir {
490		case selectDefault:
491			dflt = i
492			continue
493		case selectSend:
494			j = nsends
495			nsends++
496		case selectRecv:
497			nrecvs++
498			j = len(cases) - nrecvs
499		}
500
501		sel[j] = scase{c: rc.ch, elem: rc.val}
502		orig[j] = i
503	}
504
505	// Only a default case.
506	if nsends+nrecvs == 0 {
507		return dflt, false
508	}
509
510	// Compact sel and orig if necessary.
511	if nsends+nrecvs < len(cases) {
512		copy(sel[nsends:], sel[len(cases)-nrecvs:])
513		copy(orig[nsends:], orig[len(cases)-nrecvs:])
514	}
515
516	order := make([]uint16, 2*(nsends+nrecvs))
517
518	chosen, recvOK := selectgo(&sel[0], &order[0], nsends, nrecvs, dflt == -1)
519
520	// Translate chosen back to caller's ordering.
521	if chosen < 0 {
522		chosen = dflt
523	} else {
524		chosen = orig[chosen]
525	}
526	return chosen, recvOK
527}
528
529func (q *waitq) dequeueSudoG(sgp *sudog) {
530	x := sgp.prev
531	y := sgp.next
532	if x != nil {
533		if y != nil {
534			// middle of queue
535			x.next = y
536			y.prev = x
537			sgp.next = nil
538			sgp.prev = nil
539			return
540		}
541		// end of queue
542		x.next = nil
543		q.last = x
544		sgp.prev = nil
545		return
546	}
547	if y != nil {
548		// start of queue
549		y.prev = nil
550		q.first = y
551		sgp.next = nil
552		return
553	}
554
555	// x==y==nil. Either sgp is the only element in the queue,
556	// or it has already been removed. Use q.first to disambiguate.
557	if q.first == sgp {
558		q.first = nil
559		q.last = nil
560	}
561}
562