1// Copyright 2009 The Go Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style 3// license that can be found in the LICENSE file. 4 5package runtime 6 7// This file contains the implementation of Go select statements. 8 9import ( 10 "runtime/internal/sys" 11 "unsafe" 12) 13 14// For gccgo, use go:linkname to rename compiler-called functions to 15// themselves, so that the compiler will export them. 16// 17//go:linkname newselect runtime.newselect 18//go:linkname selectdefault runtime.selectdefault 19//go:linkname selectsend runtime.selectsend 20//go:linkname selectrecv runtime.selectrecv 21//go:linkname selectgo runtime.selectgo 22 23const debugSelect = false 24 25const ( 26 // scase.kind 27 caseNil = iota 28 caseRecv 29 caseSend 30 caseDefault 31) 32 33// Select statement header. 34// Known to compiler. 35// Changes here must also be made in src/cmd/internal/gc/select.go's selecttype. 36type hselect struct { 37 tcase uint16 // total count of scase[] 38 ncase uint16 // currently filled scase[] 39 pollorder *uint16 // case poll order 40 lockorder *uint16 // channel lock order 41 scase [1]scase // one per case (in order of appearance) 42} 43 44// Select case descriptor. 45// Known to compiler. 46// Changes here must also be made in src/cmd/internal/gc/select.go's selecttype. 47type scase struct { 48 elem unsafe.Pointer // data element 49 c *hchan // chan 50 pc uintptr // return pc (for race detector / msan) 51 kind uint16 52 receivedp *bool // pointer to received bool, if any 53 releasetime int64 54} 55 56var ( 57 chansendpc = funcPC(chansend) 58 chanrecvpc = funcPC(chanrecv) 59) 60 61func selectsize(size uintptr) uintptr { 62 selsize := unsafe.Sizeof(hselect{}) + 63 (size-1)*unsafe.Sizeof(hselect{}.scase[0]) + 64 size*unsafe.Sizeof(*hselect{}.lockorder) + 65 size*unsafe.Sizeof(*hselect{}.pollorder) 66 return round(selsize, sys.Int64Align) 67} 68 69func newselect(sel *hselect, selsize int64, size int32) { 70 if selsize != int64(selectsize(uintptr(size))) { 71 print("runtime: bad select size ", selsize, ", want ", selectsize(uintptr(size)), "\n") 72 throw("bad select size") 73 } 74 if size != int32(uint16(size)) { 75 throw("select size too large") 76 } 77 sel.tcase = uint16(size) 78 sel.ncase = 0 79 sel.lockorder = (*uint16)(add(unsafe.Pointer(&sel.scase), uintptr(size)*unsafe.Sizeof(hselect{}.scase[0]))) 80 sel.pollorder = (*uint16)(add(unsafe.Pointer(sel.lockorder), uintptr(size)*unsafe.Sizeof(*hselect{}.lockorder))) 81 82 // For gccgo the temporary variable will not have been zeroed. 83 memclrNoHeapPointers(unsafe.Pointer(&sel.scase), uintptr(size)*unsafe.Sizeof(hselect{}.scase[0])+uintptr(size)*unsafe.Sizeof(*hselect{}.lockorder)+uintptr(size)*unsafe.Sizeof(*hselect{}.pollorder)) 84 85 if debugSelect { 86 print("newselect s=", sel, " size=", size, "\n") 87 } 88} 89 90func selectsend(sel *hselect, c *hchan, elem unsafe.Pointer) { 91 pc := getcallerpc() 92 i := sel.ncase 93 if i >= sel.tcase { 94 throw("selectsend: too many cases") 95 } 96 sel.ncase = i + 1 97 if c == nil { 98 return 99 } 100 cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0]))) 101 cas.pc = pc 102 cas.c = c 103 cas.kind = caseSend 104 cas.elem = elem 105 106 if debugSelect { 107 print("selectsend s=", sel, " pc=", hex(cas.pc), " chan=", cas.c, "\n") 108 } 109} 110 111func selectrecv(sel *hselect, c *hchan, elem unsafe.Pointer, received *bool) { 112 pc := getcallerpc() 113 i := sel.ncase 114 if i >= sel.tcase { 115 throw("selectrecv: too many cases") 116 } 117 sel.ncase = i + 1 118 if c == nil { 119 return 120 } 121 cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0]))) 122 cas.pc = pc 123 cas.c = c 124 cas.kind = caseRecv 125 cas.elem = elem 126 cas.receivedp = received 127 128 if debugSelect { 129 print("selectrecv s=", sel, " pc=", hex(cas.pc), " chan=", cas.c, "\n") 130 } 131} 132 133func selectdefault(sel *hselect) { 134 pc := getcallerpc() 135 i := sel.ncase 136 if i >= sel.tcase { 137 throw("selectdefault: too many cases") 138 } 139 sel.ncase = i + 1 140 cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0]))) 141 cas.pc = pc 142 cas.c = nil 143 cas.kind = caseDefault 144 145 if debugSelect { 146 print("selectdefault s=", sel, " pc=", hex(cas.pc), "\n") 147 } 148} 149 150func sellock(scases []scase, lockorder []uint16) { 151 var c *hchan 152 for _, o := range lockorder { 153 c0 := scases[o].c 154 if c0 != nil && c0 != c { 155 c = c0 156 lock(&c.lock) 157 } 158 } 159} 160 161func selunlock(scases []scase, lockorder []uint16) { 162 // We must be very careful here to not touch sel after we have unlocked 163 // the last lock, because sel can be freed right after the last unlock. 164 // Consider the following situation. 165 // First M calls runtime·park() in runtime·selectgo() passing the sel. 166 // Once runtime·park() has unlocked the last lock, another M makes 167 // the G that calls select runnable again and schedules it for execution. 168 // When the G runs on another M, it locks all the locks and frees sel. 169 // Now if the first M touches sel, it will access freed memory. 170 for i := len(scases) - 1; i >= 0; i-- { 171 c := scases[lockorder[i]].c 172 if c == nil { 173 break 174 } 175 if i > 0 && c == scases[lockorder[i-1]].c { 176 continue // will unlock it on the next iteration 177 } 178 unlock(&c.lock) 179 } 180} 181 182func selparkcommit(gp *g, _ unsafe.Pointer) bool { 183 // This must not access gp's stack (see gopark). In 184 // particular, it must not access the *hselect. That's okay, 185 // because by the time this is called, gp.waiting has all 186 // channels in lock order. 187 var lastc *hchan 188 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 189 if sg.c != lastc && lastc != nil { 190 // As soon as we unlock the channel, fields in 191 // any sudog with that channel may change, 192 // including c and waitlink. Since multiple 193 // sudogs may have the same channel, we unlock 194 // only after we've passed the last instance 195 // of a channel. 196 unlock(&lastc.lock) 197 } 198 lastc = sg.c 199 } 200 if lastc != nil { 201 unlock(&lastc.lock) 202 } 203 return true 204} 205 206func block() { 207 gopark(nil, nil, "select (no cases)", traceEvGoStop, 1) // forever 208} 209 210// selectgo implements the select statement. 211// 212// *sel is on the current goroutine's stack (regardless of any 213// escaping in selectgo). 214// 215// selectgo returns the index of the chosen scase, which matches the 216// ordinal position of its respective select{recv,send,default} call. 217func selectgo(sel *hselect) int { 218 if debugSelect { 219 print("select: sel=", sel, "\n") 220 } 221 if sel.ncase != sel.tcase { 222 throw("selectgo: case count mismatch") 223 } 224 225 scaseslice := slice{unsafe.Pointer(&sel.scase), int(sel.ncase), int(sel.ncase)} 226 scases := *(*[]scase)(unsafe.Pointer(&scaseslice)) 227 228 var t0 int64 229 if blockprofilerate > 0 { 230 t0 = cputicks() 231 for i := 0; i < int(sel.ncase); i++ { 232 scases[i].releasetime = -1 233 } 234 } 235 236 // The compiler rewrites selects that statically have 237 // only 0 or 1 cases plus default into simpler constructs. 238 // The only way we can end up with such small sel.ncase 239 // values here is for a larger select in which most channels 240 // have been nilled out. The general code handles those 241 // cases correctly, and they are rare enough not to bother 242 // optimizing (and needing to test). 243 244 // generate permuted order 245 pollslice := slice{unsafe.Pointer(sel.pollorder), int(sel.ncase), int(sel.ncase)} 246 pollorder := *(*[]uint16)(unsafe.Pointer(&pollslice)) 247 for i := 1; i < int(sel.ncase); i++ { 248 j := fastrandn(uint32(i + 1)) 249 pollorder[i] = pollorder[j] 250 pollorder[j] = uint16(i) 251 } 252 253 // sort the cases by Hchan address to get the locking order. 254 // simple heap sort, to guarantee n log n time and constant stack footprint. 255 lockslice := slice{unsafe.Pointer(sel.lockorder), int(sel.ncase), int(sel.ncase)} 256 lockorder := *(*[]uint16)(unsafe.Pointer(&lockslice)) 257 for i := 0; i < int(sel.ncase); i++ { 258 j := i 259 // Start with the pollorder to permute cases on the same channel. 260 c := scases[pollorder[i]].c 261 for j > 0 && scases[lockorder[(j-1)/2]].c.sortkey() < c.sortkey() { 262 k := (j - 1) / 2 263 lockorder[j] = lockorder[k] 264 j = k 265 } 266 lockorder[j] = pollorder[i] 267 } 268 for i := int(sel.ncase) - 1; i >= 0; i-- { 269 o := lockorder[i] 270 c := scases[o].c 271 lockorder[i] = lockorder[0] 272 j := 0 273 for { 274 k := j*2 + 1 275 if k >= i { 276 break 277 } 278 if k+1 < i && scases[lockorder[k]].c.sortkey() < scases[lockorder[k+1]].c.sortkey() { 279 k++ 280 } 281 if c.sortkey() < scases[lockorder[k]].c.sortkey() { 282 lockorder[j] = lockorder[k] 283 j = k 284 continue 285 } 286 break 287 } 288 lockorder[j] = o 289 } 290 /* 291 for i := 0; i+1 < int(sel.ncase); i++ { 292 if scases[lockorder[i]].c.sortkey() > scases[lockorder[i+1]].c.sortkey() { 293 print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n") 294 throw("select: broken sort") 295 } 296 } 297 */ 298 299 // lock all the channels involved in the select 300 sellock(scases, lockorder) 301 302 var ( 303 gp *g 304 sg *sudog 305 c *hchan 306 k *scase 307 sglist *sudog 308 sgnext *sudog 309 qp unsafe.Pointer 310 nextp **sudog 311 ) 312 313loop: 314 // pass 1 - look for something already waiting 315 var dfli int 316 var dfl *scase 317 var casi int 318 var cas *scase 319 for i := 0; i < int(sel.ncase); i++ { 320 casi = int(pollorder[i]) 321 cas = &scases[casi] 322 c = cas.c 323 324 switch cas.kind { 325 case caseNil: 326 continue 327 328 case caseRecv: 329 sg = c.sendq.dequeue() 330 if sg != nil { 331 goto recv 332 } 333 if c.qcount > 0 { 334 goto bufrecv 335 } 336 if c.closed != 0 { 337 goto rclose 338 } 339 340 case caseSend: 341 if raceenabled { 342 racereadpc(unsafe.Pointer(c), cas.pc, chansendpc) 343 } 344 if c.closed != 0 { 345 goto sclose 346 } 347 sg = c.recvq.dequeue() 348 if sg != nil { 349 goto send 350 } 351 if c.qcount < c.dataqsiz { 352 goto bufsend 353 } 354 355 case caseDefault: 356 dfli = casi 357 dfl = cas 358 } 359 } 360 361 if dfl != nil { 362 selunlock(scases, lockorder) 363 casi = dfli 364 cas = dfl 365 goto retc 366 } 367 368 // pass 2 - enqueue on all chans 369 gp = getg() 370 if gp.waiting != nil { 371 throw("gp.waiting != nil") 372 } 373 nextp = &gp.waiting 374 for _, casei := range lockorder { 375 casi = int(casei) 376 cas = &scases[casi] 377 if cas.kind == caseNil { 378 continue 379 } 380 c = cas.c 381 sg := acquireSudog() 382 sg.g = gp 383 sg.isSelect = true 384 // No stack splits between assigning elem and enqueuing 385 // sg on gp.waiting where copystack can find it. 386 sg.elem = cas.elem 387 sg.releasetime = 0 388 if t0 != 0 { 389 sg.releasetime = -1 390 } 391 sg.c = c 392 // Construct waiting list in lock order. 393 *nextp = sg 394 nextp = &sg.waitlink 395 396 switch cas.kind { 397 case caseRecv: 398 c.recvq.enqueue(sg) 399 400 case caseSend: 401 c.sendq.enqueue(sg) 402 } 403 } 404 405 // wait for someone to wake us up 406 gp.param = nil 407 gopark(selparkcommit, nil, "select", traceEvGoBlockSelect, 1) 408 409 sellock(scases, lockorder) 410 411 gp.selectDone = 0 412 sg = (*sudog)(gp.param) 413 gp.param = nil 414 415 // pass 3 - dequeue from unsuccessful chans 416 // otherwise they stack up on quiet channels 417 // record the successful case, if any. 418 // We singly-linked up the SudoGs in lock order. 419 casi = -1 420 cas = nil 421 sglist = gp.waiting 422 // Clear all elem before unlinking from gp.waiting. 423 for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink { 424 sg1.isSelect = false 425 sg1.elem = nil 426 sg1.c = nil 427 } 428 gp.waiting = nil 429 430 for _, casei := range lockorder { 431 k = &scases[casei] 432 if k.kind == caseNil { 433 continue 434 } 435 if sglist.releasetime > 0 { 436 k.releasetime = sglist.releasetime 437 } 438 if sg == sglist { 439 // sg has already been dequeued by the G that woke us up. 440 casi = int(casei) 441 cas = k 442 } else { 443 c = k.c 444 if k.kind == caseSend { 445 c.sendq.dequeueSudoG(sglist) 446 } else { 447 c.recvq.dequeueSudoG(sglist) 448 } 449 } 450 sgnext = sglist.waitlink 451 sglist.waitlink = nil 452 releaseSudog(sglist) 453 sglist = sgnext 454 } 455 456 if cas == nil { 457 // We can wake up with gp.param == nil (so cas == nil) 458 // when a channel involved in the select has been closed. 459 // It is easiest to loop and re-run the operation; 460 // we'll see that it's now closed. 461 // Maybe some day we can signal the close explicitly, 462 // but we'd have to distinguish close-on-reader from close-on-writer. 463 // It's easiest not to duplicate the code and just recheck above. 464 // We know that something closed, and things never un-close, 465 // so we won't block again. 466 goto loop 467 } 468 469 c = cas.c 470 471 if debugSelect { 472 print("wait-return: sel=", sel, " c=", c, " cas=", cas, " kind=", cas.kind, "\n") 473 } 474 475 if cas.kind == caseRecv && cas.receivedp != nil { 476 *cas.receivedp = true 477 } 478 479 if raceenabled { 480 if cas.kind == caseRecv && cas.elem != nil { 481 raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc) 482 } else if cas.kind == caseSend { 483 raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc) 484 } 485 } 486 if msanenabled { 487 if cas.kind == caseRecv && cas.elem != nil { 488 msanwrite(cas.elem, c.elemtype.size) 489 } else if cas.kind == caseSend { 490 msanread(cas.elem, c.elemtype.size) 491 } 492 } 493 494 selunlock(scases, lockorder) 495 goto retc 496 497bufrecv: 498 // can receive from buffer 499 if raceenabled { 500 if cas.elem != nil { 501 raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc) 502 } 503 raceacquire(chanbuf(c, c.recvx)) 504 racerelease(chanbuf(c, c.recvx)) 505 } 506 if msanenabled && cas.elem != nil { 507 msanwrite(cas.elem, c.elemtype.size) 508 } 509 if cas.receivedp != nil { 510 *cas.receivedp = true 511 } 512 qp = chanbuf(c, c.recvx) 513 if cas.elem != nil { 514 typedmemmove(c.elemtype, cas.elem, qp) 515 } 516 typedmemclr(c.elemtype, qp) 517 c.recvx++ 518 if c.recvx == c.dataqsiz { 519 c.recvx = 0 520 } 521 c.qcount-- 522 selunlock(scases, lockorder) 523 goto retc 524 525bufsend: 526 // can send to buffer 527 if raceenabled { 528 raceacquire(chanbuf(c, c.sendx)) 529 racerelease(chanbuf(c, c.sendx)) 530 raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc) 531 } 532 if msanenabled { 533 msanread(cas.elem, c.elemtype.size) 534 } 535 typedmemmove(c.elemtype, chanbuf(c, c.sendx), cas.elem) 536 c.sendx++ 537 if c.sendx == c.dataqsiz { 538 c.sendx = 0 539 } 540 c.qcount++ 541 selunlock(scases, lockorder) 542 goto retc 543 544recv: 545 // can receive from sleeping sender (sg) 546 recv(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2) 547 if debugSelect { 548 print("syncrecv: sel=", sel, " c=", c, "\n") 549 } 550 if cas.receivedp != nil { 551 *cas.receivedp = true 552 } 553 goto retc 554 555rclose: 556 // read at end of closed channel 557 selunlock(scases, lockorder) 558 if cas.receivedp != nil { 559 *cas.receivedp = false 560 } 561 if cas.elem != nil { 562 typedmemclr(c.elemtype, cas.elem) 563 } 564 if raceenabled { 565 raceacquire(unsafe.Pointer(c)) 566 } 567 goto retc 568 569send: 570 // can send to a sleeping receiver (sg) 571 if raceenabled { 572 raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc) 573 } 574 if msanenabled { 575 msanread(cas.elem, c.elemtype.size) 576 } 577 send(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2) 578 if debugSelect { 579 print("syncsend: sel=", sel, " c=", c, "\n") 580 } 581 goto retc 582 583retc: 584 if cas.releasetime > 0 { 585 blockevent(cas.releasetime-t0, 1) 586 } 587 588 // Check preemption, since unlike gc we don't check on every call. 589 // A test case for this one is BenchmarkPingPongHog in proc_test.go. 590 if dfl != nil && getg().preempt { 591 checkPreempt() 592 } 593 594 return casi 595 596sclose: 597 // send on closed channel 598 selunlock(scases, lockorder) 599 panic(plainError("send on closed channel")) 600} 601 602func (c *hchan) sortkey() uintptr { 603 // TODO(khr): if we have a moving garbage collector, we'll need to 604 // change this function. 605 return uintptr(unsafe.Pointer(c)) 606} 607 608// A runtimeSelect is a single case passed to rselect. 609// This must match ../reflect/value.go:/runtimeSelect 610type runtimeSelect struct { 611 dir selectDir 612 typ unsafe.Pointer // channel type (not used here) 613 ch *hchan // channel 614 val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir) 615} 616 617// These values must match ../reflect/value.go:/SelectDir. 618type selectDir int 619 620const ( 621 _ selectDir = iota 622 selectSend // case Chan <- Send 623 selectRecv // case <-Chan: 624 selectDefault // default 625) 626 627//go:linkname reflect_rselect reflect.rselect 628func reflect_rselect(cases []runtimeSelect) (chosen int, recvOK bool) { 629 // flagNoScan is safe here, because all objects are also referenced from cases. 630 size := selectsize(uintptr(len(cases))) 631 sel := (*hselect)(mallocgc(size, nil, true)) 632 newselect(sel, int64(size), int32(len(cases))) 633 r := new(bool) 634 for i := range cases { 635 rc := &cases[i] 636 switch rc.dir { 637 case selectDefault: 638 selectdefault(sel) 639 case selectSend: 640 selectsend(sel, rc.ch, rc.val) 641 case selectRecv: 642 selectrecv(sel, rc.ch, rc.val, r) 643 } 644 } 645 646 chosen = selectgo(sel) 647 recvOK = *r 648 return 649} 650 651func (q *waitq) dequeueSudoG(sgp *sudog) { 652 x := sgp.prev 653 y := sgp.next 654 if x != nil { 655 if y != nil { 656 // middle of queue 657 x.next = y 658 y.prev = x 659 sgp.next = nil 660 sgp.prev = nil 661 return 662 } 663 // end of queue 664 x.next = nil 665 q.last = x 666 sgp.prev = nil 667 return 668 } 669 if y != nil { 670 // start of queue 671 y.prev = nil 672 q.first = y 673 sgp.next = nil 674 return 675 } 676 677 // x==y==nil. Either sgp is the only element in the queue, 678 // or it has already been removed. Use q.first to disambiguate. 679 if q.first == sgp { 680 q.first = nil 681 q.last = nil 682 } 683} 684