1// Copyright 2015 The Go Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style 3// license that can be found in the LICENSE file. 4 5// Transport code. 6 7package http2 8 9import ( 10 "bufio" 11 "bytes" 12 "compress/gzip" 13 "context" 14 "crypto/rand" 15 "crypto/tls" 16 "errors" 17 "fmt" 18 "io" 19 "io/ioutil" 20 "log" 21 "math" 22 mathrand "math/rand" 23 "net" 24 "net/http" 25 "net/http/httptrace" 26 "net/textproto" 27 "sort" 28 "strconv" 29 "strings" 30 "sync" 31 "sync/atomic" 32 "time" 33 34 "golang.org/x/net/http/httpguts" 35 "golang.org/x/net/http2/hpack" 36 "golang.org/x/net/idna" 37) 38 39const ( 40 // transportDefaultConnFlow is how many connection-level flow control 41 // tokens we give the server at start-up, past the default 64k. 42 transportDefaultConnFlow = 1 << 30 43 44 // transportDefaultStreamFlow is how many stream-level flow 45 // control tokens we announce to the peer, and how many bytes 46 // we buffer per stream. 47 transportDefaultStreamFlow = 4 << 20 48 49 // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send 50 // a stream-level WINDOW_UPDATE for at a time. 51 transportDefaultStreamMinRefresh = 4 << 10 52 53 defaultUserAgent = "Go-http-client/2.0" 54 55 // initialMaxConcurrentStreams is a connections maxConcurrentStreams until 56 // it's received servers initial SETTINGS frame, which corresponds with the 57 // spec's minimum recommended value. 58 initialMaxConcurrentStreams = 100 59 60 // defaultMaxConcurrentStreams is a connections default maxConcurrentStreams 61 // if the server doesn't include one in its initial SETTINGS frame. 62 defaultMaxConcurrentStreams = 1000 63) 64 65// Transport is an HTTP/2 Transport. 66// 67// A Transport internally caches connections to servers. It is safe 68// for concurrent use by multiple goroutines. 69type Transport struct { 70 // DialTLS specifies an optional dial function for creating 71 // TLS connections for requests. 72 // 73 // If DialTLS is nil, tls.Dial is used. 74 // 75 // If the returned net.Conn has a ConnectionState method like tls.Conn, 76 // it will be used to set http.Response.TLS. 77 DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) 78 79 // TLSClientConfig specifies the TLS configuration to use with 80 // tls.Client. If nil, the default configuration is used. 81 TLSClientConfig *tls.Config 82 83 // ConnPool optionally specifies an alternate connection pool to use. 84 // If nil, the default is used. 85 ConnPool ClientConnPool 86 87 // DisableCompression, if true, prevents the Transport from 88 // requesting compression with an "Accept-Encoding: gzip" 89 // request header when the Request contains no existing 90 // Accept-Encoding value. If the Transport requests gzip on 91 // its own and gets a gzipped response, it's transparently 92 // decoded in the Response.Body. However, if the user 93 // explicitly requested gzip it is not automatically 94 // uncompressed. 95 DisableCompression bool 96 97 // AllowHTTP, if true, permits HTTP/2 requests using the insecure, 98 // plain-text "http" scheme. Note that this does not enable h2c support. 99 AllowHTTP bool 100 101 // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to 102 // send in the initial settings frame. It is how many bytes 103 // of response headers are allowed. Unlike the http2 spec, zero here 104 // means to use a default limit (currently 10MB). If you actually 105 // want to advertise an unlimited value to the peer, Transport 106 // interprets the highest possible value here (0xffffffff or 1<<32-1) 107 // to mean no limit. 108 MaxHeaderListSize uint32 109 110 // StrictMaxConcurrentStreams controls whether the server's 111 // SETTINGS_MAX_CONCURRENT_STREAMS should be respected 112 // globally. If false, new TCP connections are created to the 113 // server as needed to keep each under the per-connection 114 // SETTINGS_MAX_CONCURRENT_STREAMS limit. If true, the 115 // server's SETTINGS_MAX_CONCURRENT_STREAMS is interpreted as 116 // a global limit and callers of RoundTrip block when needed, 117 // waiting for their turn. 118 StrictMaxConcurrentStreams bool 119 120 // ReadIdleTimeout is the timeout after which a health check using ping 121 // frame will be carried out if no frame is received on the connection. 122 // Note that a ping response will is considered a received frame, so if 123 // there is no other traffic on the connection, the health check will 124 // be performed every ReadIdleTimeout interval. 125 // If zero, no health check is performed. 126 ReadIdleTimeout time.Duration 127 128 // PingTimeout is the timeout after which the connection will be closed 129 // if a response to Ping is not received. 130 // Defaults to 15s. 131 PingTimeout time.Duration 132 133 // CountError, if non-nil, is called on HTTP/2 transport errors. 134 // It's intended to increment a metric for monitoring, such 135 // as an expvar or Prometheus metric. 136 // The errType consists of only ASCII word characters. 137 CountError func(errType string) 138 139 // t1, if non-nil, is the standard library Transport using 140 // this transport. Its settings are used (but not its 141 // RoundTrip method, etc). 142 t1 *http.Transport 143 144 connPoolOnce sync.Once 145 connPoolOrDef ClientConnPool // non-nil version of ConnPool 146} 147 148func (t *Transport) maxHeaderListSize() uint32 { 149 if t.MaxHeaderListSize == 0 { 150 return 10 << 20 151 } 152 if t.MaxHeaderListSize == 0xffffffff { 153 return 0 154 } 155 return t.MaxHeaderListSize 156} 157 158func (t *Transport) disableCompression() bool { 159 return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) 160} 161 162func (t *Transport) pingTimeout() time.Duration { 163 if t.PingTimeout == 0 { 164 return 15 * time.Second 165 } 166 return t.PingTimeout 167 168} 169 170// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. 171// It returns an error if t1 has already been HTTP/2-enabled. 172// 173// Use ConfigureTransports instead to configure the HTTP/2 Transport. 174func ConfigureTransport(t1 *http.Transport) error { 175 _, err := ConfigureTransports(t1) 176 return err 177} 178 179// ConfigureTransports configures a net/http HTTP/1 Transport to use HTTP/2. 180// It returns a new HTTP/2 Transport for further configuration. 181// It returns an error if t1 has already been HTTP/2-enabled. 182func ConfigureTransports(t1 *http.Transport) (*Transport, error) { 183 return configureTransports(t1) 184} 185 186func configureTransports(t1 *http.Transport) (*Transport, error) { 187 connPool := new(clientConnPool) 188 t2 := &Transport{ 189 ConnPool: noDialClientConnPool{connPool}, 190 t1: t1, 191 } 192 connPool.t = t2 193 if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { 194 return nil, err 195 } 196 if t1.TLSClientConfig == nil { 197 t1.TLSClientConfig = new(tls.Config) 198 } 199 if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { 200 t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) 201 } 202 if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { 203 t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") 204 } 205 upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { 206 addr := authorityAddr("https", authority) 207 if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { 208 go c.Close() 209 return erringRoundTripper{err} 210 } else if !used { 211 // Turns out we don't need this c. 212 // For example, two goroutines made requests to the same host 213 // at the same time, both kicking off TCP dials. (since protocol 214 // was unknown) 215 go c.Close() 216 } 217 return t2 218 } 219 if m := t1.TLSNextProto; len(m) == 0 { 220 t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ 221 "h2": upgradeFn, 222 } 223 } else { 224 m["h2"] = upgradeFn 225 } 226 return t2, nil 227} 228 229func (t *Transport) connPool() ClientConnPool { 230 t.connPoolOnce.Do(t.initConnPool) 231 return t.connPoolOrDef 232} 233 234func (t *Transport) initConnPool() { 235 if t.ConnPool != nil { 236 t.connPoolOrDef = t.ConnPool 237 } else { 238 t.connPoolOrDef = &clientConnPool{t: t} 239 } 240} 241 242// ClientConn is the state of a single HTTP/2 client connection to an 243// HTTP/2 server. 244type ClientConn struct { 245 t *Transport 246 tconn net.Conn // usually *tls.Conn, except specialized impls 247 tlsState *tls.ConnectionState // nil only for specialized impls 248 reused uint32 // whether conn is being reused; atomic 249 singleUse bool // whether being used for a single http.Request 250 251 // readLoop goroutine fields: 252 readerDone chan struct{} // closed on error 253 readerErr error // set before readerDone is closed 254 255 idleTimeout time.Duration // or 0 for never 256 idleTimer *time.Timer 257 258 mu sync.Mutex // guards following 259 cond *sync.Cond // hold mu; broadcast on flow/closed changes 260 flow flow // our conn-level flow control quota (cs.flow is per stream) 261 inflow flow // peer's conn-level flow control 262 doNotReuse bool // whether conn is marked to not be reused for any future requests 263 closing bool 264 closed bool 265 seenSettings bool // true if we've seen a settings frame, false otherwise 266 wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back 267 goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received 268 goAwayDebug string // goAway frame's debug data, retained as a string 269 streams map[uint32]*clientStream // client-initiated 270 nextStreamID uint32 271 pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams 272 pings map[[8]byte]chan struct{} // in flight ping data to notification channel 273 br *bufio.Reader 274 lastActive time.Time 275 lastIdle time.Time // time last idle 276 // Settings from peer: (also guarded by wmu) 277 maxFrameSize uint32 278 maxConcurrentStreams uint32 279 peerMaxHeaderListSize uint64 280 initialWindowSize uint32 281 282 // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. 283 // Write to reqHeaderMu to lock it, read from it to unlock. 284 // Lock reqmu BEFORE mu or wmu. 285 reqHeaderMu chan struct{} 286 287 // wmu is held while writing. 288 // Acquire BEFORE mu when holding both, to avoid blocking mu on network writes. 289 // Only acquire both at the same time when changing peer settings. 290 wmu sync.Mutex 291 bw *bufio.Writer 292 fr *Framer 293 werr error // first write error that has occurred 294 hbuf bytes.Buffer // HPACK encoder writes into this 295 henc *hpack.Encoder 296} 297 298// clientStream is the state for a single HTTP/2 stream. One of these 299// is created for each Transport.RoundTrip call. 300type clientStream struct { 301 cc *ClientConn 302 req *http.Request 303 trace *httptrace.ClientTrace // or nil 304 ID uint32 305 resc chan resAndError 306 bufPipe pipe // buffered pipe with the flow-controlled response payload 307 startedWrite bool // started request body write; guarded by cc.mu 308 requestedGzip bool 309 on100 func() // optional code to run if get a 100 continue response 310 311 flow flow // guarded by cc.mu 312 inflow flow // guarded by cc.mu 313 bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read 314 readErr error // sticky read error; owned by transportResponseBody.Read 315 stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu 316 didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu 317 318 peerReset chan struct{} // closed on peer reset 319 resetErr error // populated before peerReset is closed 320 321 done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu 322 323 // owned by clientConnReadLoop: 324 firstByte bool // got the first response byte 325 pastHeaders bool // got first MetaHeadersFrame (actual headers) 326 pastTrailers bool // got optional second MetaHeadersFrame (trailers) 327 num1xx uint8 // number of 1xx responses seen 328 329 trailer http.Header // accumulated trailers 330 resTrailer *http.Header // client's Response.Trailer 331} 332 333// awaitRequestCancel waits for the user to cancel a request or for the done 334// channel to be signaled. A non-nil error is returned only if the request was 335// canceled. 336func awaitRequestCancel(req *http.Request, done <-chan struct{}) error { 337 ctx := req.Context() 338 if req.Cancel == nil && ctx.Done() == nil { 339 return nil 340 } 341 select { 342 case <-req.Cancel: 343 return errRequestCanceled 344 case <-ctx.Done(): 345 return ctx.Err() 346 case <-done: 347 return nil 348 } 349} 350 351var got1xxFuncForTests func(int, textproto.MIMEHeader) error 352 353// get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func, 354// if any. It returns nil if not set or if the Go version is too old. 355func (cs *clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error { 356 if fn := got1xxFuncForTests; fn != nil { 357 return fn 358 } 359 return traceGot1xxResponseFunc(cs.trace) 360} 361 362// awaitRequestCancel waits for the user to cancel a request, its context to 363// expire, or for the request to be done (any way it might be removed from the 364// cc.streams map: peer reset, successful completion, TCP connection breakage, 365// etc). If the request is canceled, then cs will be canceled and closed. 366func (cs *clientStream) awaitRequestCancel(req *http.Request) { 367 if err := awaitRequestCancel(req, cs.done); err != nil { 368 cs.cancelStream() 369 cs.bufPipe.CloseWithError(err) 370 } 371} 372 373func (cs *clientStream) cancelStream() { 374 cc := cs.cc 375 cc.mu.Lock() 376 didReset := cs.didReset 377 cs.didReset = true 378 cc.mu.Unlock() 379 380 if !didReset { 381 cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) 382 cc.forgetStreamID(cs.ID) 383 } 384} 385 386// checkResetOrDone reports any error sent in a RST_STREAM frame by the 387// server, or errStreamClosed if the stream is complete. 388func (cs *clientStream) checkResetOrDone() error { 389 select { 390 case <-cs.peerReset: 391 return cs.resetErr 392 case <-cs.done: 393 return errStreamClosed 394 default: 395 return nil 396 } 397} 398 399func (cs *clientStream) getStartedWrite() bool { 400 cc := cs.cc 401 cc.mu.Lock() 402 defer cc.mu.Unlock() 403 return cs.startedWrite 404} 405 406func (cs *clientStream) abortRequestBodyWrite(err error) { 407 if err == nil { 408 panic("nil error") 409 } 410 cc := cs.cc 411 cc.mu.Lock() 412 if cs.stopReqBody == nil { 413 cs.stopReqBody = err 414 cc.cond.Broadcast() 415 // Close the body after releasing the mutex, in case it blocks. 416 if body := cs.req.Body; body != nil { 417 defer body.Close() 418 } 419 } 420 cc.mu.Unlock() 421} 422 423type stickyErrWriter struct { 424 w io.Writer 425 err *error 426} 427 428func (sew stickyErrWriter) Write(p []byte) (n int, err error) { 429 if *sew.err != nil { 430 return 0, *sew.err 431 } 432 n, err = sew.w.Write(p) 433 *sew.err = err 434 return 435} 436 437// noCachedConnError is the concrete type of ErrNoCachedConn, which 438// needs to be detected by net/http regardless of whether it's its 439// bundled version (in h2_bundle.go with a rewritten type name) or 440// from a user's x/net/http2. As such, as it has a unique method name 441// (IsHTTP2NoCachedConnError) that net/http sniffs for via func 442// isNoCachedConnError. 443type noCachedConnError struct{} 444 445func (noCachedConnError) IsHTTP2NoCachedConnError() {} 446func (noCachedConnError) Error() string { return "http2: no cached connection was available" } 447 448// isNoCachedConnError reports whether err is of type noCachedConnError 449// or its equivalent renamed type in net/http2's h2_bundle.go. Both types 450// may coexist in the same running program. 451func isNoCachedConnError(err error) bool { 452 _, ok := err.(interface{ IsHTTP2NoCachedConnError() }) 453 return ok 454} 455 456var ErrNoCachedConn error = noCachedConnError{} 457 458// RoundTripOpt are options for the Transport.RoundTripOpt method. 459type RoundTripOpt struct { 460 // OnlyCachedConn controls whether RoundTripOpt may 461 // create a new TCP connection. If set true and 462 // no cached connection is available, RoundTripOpt 463 // will return ErrNoCachedConn. 464 OnlyCachedConn bool 465} 466 467func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { 468 return t.RoundTripOpt(req, RoundTripOpt{}) 469} 470 471// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) 472// and returns a host:port. The port 443 is added if needed. 473func authorityAddr(scheme string, authority string) (addr string) { 474 host, port, err := net.SplitHostPort(authority) 475 if err != nil { // authority didn't have a port 476 port = "443" 477 if scheme == "http" { 478 port = "80" 479 } 480 host = authority 481 } 482 if a, err := idna.ToASCII(host); err == nil { 483 host = a 484 } 485 // IPv6 address literal, without a port: 486 if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { 487 return host + ":" + port 488 } 489 return net.JoinHostPort(host, port) 490} 491 492// RoundTripOpt is like RoundTrip, but takes options. 493func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { 494 if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { 495 return nil, errors.New("http2: unsupported scheme") 496 } 497 498 addr := authorityAddr(req.URL.Scheme, req.URL.Host) 499 for retry := 0; ; retry++ { 500 cc, err := t.connPool().GetClientConn(req, addr) 501 if err != nil { 502 t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) 503 return nil, err 504 } 505 reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1) 506 traceGotConn(req, cc, reused) 507 body := req.Body 508 res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req) 509 if err != nil && retry <= 6 { 510 if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil { 511 // After the first retry, do exponential backoff with 10% jitter. 512 if retry == 0 { 513 continue 514 } 515 backoff := float64(uint(1) << (uint(retry) - 1)) 516 backoff += backoff * (0.1 * mathrand.Float64()) 517 select { 518 case <-time.After(time.Second * time.Duration(backoff)): 519 continue 520 case <-req.Context().Done(): 521 err = req.Context().Err() 522 } 523 } 524 } 525 if err != nil { 526 t.vlogf("RoundTrip failure: %v", err) 527 // If the error occurred after the body write started, 528 // the body writer will close the body. Otherwise, do so here. 529 if body != nil && !gotErrAfterReqBodyWrite { 530 body.Close() 531 } 532 return nil, err 533 } 534 return res, nil 535 } 536} 537 538// CloseIdleConnections closes any connections which were previously 539// connected from previous requests but are now sitting idle. 540// It does not interrupt any connections currently in use. 541func (t *Transport) CloseIdleConnections() { 542 if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { 543 cp.closeIdleConnections() 544 } 545} 546 547var ( 548 errClientConnClosed = errors.New("http2: client conn is closed") 549 errClientConnUnusable = errors.New("http2: client conn not usable") 550 errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") 551) 552 553// shouldRetryRequest is called by RoundTrip when a request fails to get 554// response headers. It is always called with a non-nil error. 555// It returns either a request to retry (either the same request, or a 556// modified clone), or an error if the request can't be replayed. 557func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) { 558 if !canRetryError(err) { 559 return nil, err 560 } 561 // If the Body is nil (or http.NoBody), it's safe to reuse 562 // this request and its Body. 563 if req.Body == nil || req.Body == http.NoBody { 564 return req, nil 565 } 566 567 // If the request body can be reset back to its original 568 // state via the optional req.GetBody, do that. 569 if req.GetBody != nil { 570 req.Body.Close() 571 body, err := req.GetBody() 572 if err != nil { 573 return nil, err 574 } 575 newReq := *req 576 newReq.Body = body 577 return &newReq, nil 578 } 579 580 // The Request.Body can't reset back to the beginning, but we 581 // don't seem to have started to read from it yet, so reuse 582 // the request directly. The "afterBodyWrite" means the 583 // bodyWrite process has started, which becomes true before 584 // the first Read. 585 if !afterBodyWrite { 586 return req, nil 587 } 588 589 return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) 590} 591 592func canRetryError(err error) bool { 593 if err == errClientConnUnusable || err == errClientConnGotGoAway { 594 return true 595 } 596 if se, ok := err.(StreamError); ok { 597 if se.Code == ErrCodeProtocol && se.Cause == errFromPeer { 598 // See golang/go#47635, golang/go#42777 599 return true 600 } 601 return se.Code == ErrCodeRefusedStream 602 } 603 return false 604} 605 606func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { 607 host, _, err := net.SplitHostPort(addr) 608 if err != nil { 609 return nil, err 610 } 611 tconn, err := t.dialTLS(ctx)("tcp", addr, t.newTLSConfig(host)) 612 if err != nil { 613 return nil, err 614 } 615 return t.newClientConn(tconn, singleUse) 616} 617 618func (t *Transport) newTLSConfig(host string) *tls.Config { 619 cfg := new(tls.Config) 620 if t.TLSClientConfig != nil { 621 *cfg = *t.TLSClientConfig.Clone() 622 } 623 if !strSliceContains(cfg.NextProtos, NextProtoTLS) { 624 cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) 625 } 626 if cfg.ServerName == "" { 627 cfg.ServerName = host 628 } 629 return cfg 630} 631 632func (t *Transport) dialTLS(ctx context.Context) func(string, string, *tls.Config) (net.Conn, error) { 633 if t.DialTLS != nil { 634 return t.DialTLS 635 } 636 return func(network, addr string, cfg *tls.Config) (net.Conn, error) { 637 tlsCn, err := t.dialTLSWithContext(ctx, network, addr, cfg) 638 if err != nil { 639 return nil, err 640 } 641 state := tlsCn.ConnectionState() 642 if p := state.NegotiatedProtocol; p != NextProtoTLS { 643 return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) 644 } 645 if !state.NegotiatedProtocolIsMutual { 646 return nil, errors.New("http2: could not negotiate protocol mutually") 647 } 648 return tlsCn, nil 649 } 650} 651 652// disableKeepAlives reports whether connections should be closed as 653// soon as possible after handling the first request. 654func (t *Transport) disableKeepAlives() bool { 655 return t.t1 != nil && t.t1.DisableKeepAlives 656} 657 658func (t *Transport) expectContinueTimeout() time.Duration { 659 if t.t1 == nil { 660 return 0 661 } 662 return t.t1.ExpectContinueTimeout 663} 664 665func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { 666 return t.newClientConn(c, t.disableKeepAlives()) 667} 668 669func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { 670 cc := &ClientConn{ 671 t: t, 672 tconn: c, 673 readerDone: make(chan struct{}), 674 nextStreamID: 1, 675 maxFrameSize: 16 << 10, // spec default 676 initialWindowSize: 65535, // spec default 677 maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. 678 peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. 679 streams: make(map[uint32]*clientStream), 680 singleUse: singleUse, 681 wantSettingsAck: true, 682 pings: make(map[[8]byte]chan struct{}), 683 reqHeaderMu: make(chan struct{}, 1), 684 } 685 if d := t.idleConnTimeout(); d != 0 { 686 cc.idleTimeout = d 687 cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) 688 } 689 if VerboseLogs { 690 t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) 691 } 692 693 cc.cond = sync.NewCond(&cc.mu) 694 cc.flow.add(int32(initialWindowSize)) 695 696 // TODO: adjust this writer size to account for frame size + 697 // MTU + crypto/tls record padding. 698 cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) 699 cc.br = bufio.NewReader(c) 700 cc.fr = NewFramer(cc.bw, cc.br) 701 cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) 702 cc.fr.MaxHeaderListSize = t.maxHeaderListSize() 703 704 // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on 705 // henc in response to SETTINGS frames? 706 cc.henc = hpack.NewEncoder(&cc.hbuf) 707 708 if t.AllowHTTP { 709 cc.nextStreamID = 3 710 } 711 712 if cs, ok := c.(connectionStater); ok { 713 state := cs.ConnectionState() 714 cc.tlsState = &state 715 } 716 717 initialSettings := []Setting{ 718 {ID: SettingEnablePush, Val: 0}, 719 {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, 720 } 721 if max := t.maxHeaderListSize(); max != 0 { 722 initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) 723 } 724 725 cc.bw.Write(clientPreface) 726 cc.fr.WriteSettings(initialSettings...) 727 cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) 728 cc.inflow.add(transportDefaultConnFlow + initialWindowSize) 729 cc.bw.Flush() 730 if cc.werr != nil { 731 cc.Close() 732 return nil, cc.werr 733 } 734 735 go cc.readLoop() 736 return cc, nil 737} 738 739func (cc *ClientConn) healthCheck() { 740 pingTimeout := cc.t.pingTimeout() 741 // We don't need to periodically ping in the health check, because the readLoop of ClientConn will 742 // trigger the healthCheck again if there is no frame received. 743 ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) 744 defer cancel() 745 err := cc.Ping(ctx) 746 if err != nil { 747 cc.closeForLostPing() 748 cc.t.connPool().MarkDead(cc) 749 return 750 } 751} 752 753// SetDoNotReuse marks cc as not reusable for future HTTP requests. 754func (cc *ClientConn) SetDoNotReuse() { 755 cc.mu.Lock() 756 defer cc.mu.Unlock() 757 cc.doNotReuse = true 758} 759 760func (cc *ClientConn) setGoAway(f *GoAwayFrame) { 761 cc.mu.Lock() 762 defer cc.mu.Unlock() 763 764 old := cc.goAway 765 cc.goAway = f 766 767 // Merge the previous and current GoAway error frames. 768 if cc.goAwayDebug == "" { 769 cc.goAwayDebug = string(f.DebugData()) 770 } 771 if old != nil && old.ErrCode != ErrCodeNo { 772 cc.goAway.ErrCode = old.ErrCode 773 } 774 last := f.LastStreamID 775 for streamID, cs := range cc.streams { 776 if streamID > last { 777 select { 778 case cs.resc <- resAndError{err: errClientConnGotGoAway}: 779 default: 780 } 781 } 782 } 783} 784 785// CanTakeNewRequest reports whether the connection can take a new request, 786// meaning it has not been closed or received or sent a GOAWAY. 787func (cc *ClientConn) CanTakeNewRequest() bool { 788 cc.mu.Lock() 789 defer cc.mu.Unlock() 790 return cc.canTakeNewRequestLocked() 791} 792 793// clientConnIdleState describes the suitability of a client 794// connection to initiate a new RoundTrip request. 795type clientConnIdleState struct { 796 canTakeNewRequest bool 797 freshConn bool // whether it's unused by any previous request 798} 799 800func (cc *ClientConn) idleState() clientConnIdleState { 801 cc.mu.Lock() 802 defer cc.mu.Unlock() 803 return cc.idleStateLocked() 804} 805 806func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { 807 if cc.singleUse && cc.nextStreamID > 1 { 808 return 809 } 810 var maxConcurrentOkay bool 811 if cc.t.StrictMaxConcurrentStreams { 812 // We'll tell the caller we can take a new request to 813 // prevent the caller from dialing a new TCP 814 // connection, but then we'll block later before 815 // writing it. 816 maxConcurrentOkay = true 817 } else { 818 maxConcurrentOkay = int64(len(cc.streams)+1) <= int64(cc.maxConcurrentStreams) 819 } 820 821 st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && 822 !cc.doNotReuse && 823 int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && 824 !cc.tooIdleLocked() 825 st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest 826 return 827} 828 829func (cc *ClientConn) canTakeNewRequestLocked() bool { 830 st := cc.idleStateLocked() 831 return st.canTakeNewRequest 832} 833 834// tooIdleLocked reports whether this connection has been been sitting idle 835// for too much wall time. 836func (cc *ClientConn) tooIdleLocked() bool { 837 // The Round(0) strips the monontonic clock reading so the 838 // times are compared based on their wall time. We don't want 839 // to reuse a connection that's been sitting idle during 840 // VM/laptop suspend if monotonic time was also frozen. 841 return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout 842} 843 844// onIdleTimeout is called from a time.AfterFunc goroutine. It will 845// only be called when we're idle, but because we're coming from a new 846// goroutine, there could be a new request coming in at the same time, 847// so this simply calls the synchronized closeIfIdle to shut down this 848// connection. The timer could just call closeIfIdle, but this is more 849// clear. 850func (cc *ClientConn) onIdleTimeout() { 851 cc.closeIfIdle() 852} 853 854func (cc *ClientConn) closeIfIdle() { 855 cc.mu.Lock() 856 if len(cc.streams) > 0 { 857 cc.mu.Unlock() 858 return 859 } 860 cc.closed = true 861 nextID := cc.nextStreamID 862 // TODO: do clients send GOAWAY too? maybe? Just Close: 863 cc.mu.Unlock() 864 865 if VerboseLogs { 866 cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) 867 } 868 cc.tconn.Close() 869} 870 871var shutdownEnterWaitStateHook = func() {} 872 873// Shutdown gracefully close the client connection, waiting for running streams to complete. 874func (cc *ClientConn) Shutdown(ctx context.Context) error { 875 if err := cc.sendGoAway(); err != nil { 876 return err 877 } 878 // Wait for all in-flight streams to complete or connection to close 879 done := make(chan error, 1) 880 cancelled := false // guarded by cc.mu 881 go func() { 882 cc.mu.Lock() 883 defer cc.mu.Unlock() 884 for { 885 if len(cc.streams) == 0 || cc.closed { 886 cc.closed = true 887 done <- cc.tconn.Close() 888 break 889 } 890 if cancelled { 891 break 892 } 893 cc.cond.Wait() 894 } 895 }() 896 shutdownEnterWaitStateHook() 897 select { 898 case err := <-done: 899 return err 900 case <-ctx.Done(): 901 cc.mu.Lock() 902 // Free the goroutine above 903 cancelled = true 904 cc.cond.Broadcast() 905 cc.mu.Unlock() 906 return ctx.Err() 907 } 908} 909 910func (cc *ClientConn) sendGoAway() error { 911 cc.mu.Lock() 912 closing := cc.closing 913 cc.closing = true 914 maxStreamID := cc.nextStreamID 915 cc.mu.Unlock() 916 if closing { 917 // GOAWAY sent already 918 return nil 919 } 920 921 cc.wmu.Lock() 922 defer cc.wmu.Unlock() 923 // Send a graceful shutdown frame to server 924 if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil { 925 return err 926 } 927 if err := cc.bw.Flush(); err != nil { 928 return err 929 } 930 // Prevent new requests 931 return nil 932} 933 934// closes the client connection immediately. In-flight requests are interrupted. 935// err is sent to streams. 936func (cc *ClientConn) closeForError(err error) error { 937 cc.mu.Lock() 938 streams := cc.streams 939 cc.streams = nil 940 cc.closed = true 941 cc.mu.Unlock() 942 943 for _, cs := range streams { 944 select { 945 case cs.resc <- resAndError{err: err}: 946 default: 947 } 948 cs.bufPipe.CloseWithError(err) 949 } 950 951 cc.mu.Lock() 952 defer cc.cond.Broadcast() 953 defer cc.mu.Unlock() 954 return cc.tconn.Close() 955} 956 957// Close closes the client connection immediately. 958// 959// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. 960func (cc *ClientConn) Close() error { 961 err := errors.New("http2: client connection force closed via ClientConn.Close") 962 return cc.closeForError(err) 963} 964 965// closes the client connection immediately. In-flight requests are interrupted. 966func (cc *ClientConn) closeForLostPing() error { 967 err := errors.New("http2: client connection lost") 968 if f := cc.t.CountError; f != nil { 969 f("conn_close_lost_ping") 970 } 971 return cc.closeForError(err) 972} 973 974// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not 975// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. 976var errRequestCanceled = errors.New("net/http: request canceled") 977 978func commaSeparatedTrailers(req *http.Request) (string, error) { 979 keys := make([]string, 0, len(req.Trailer)) 980 for k := range req.Trailer { 981 k = http.CanonicalHeaderKey(k) 982 switch k { 983 case "Transfer-Encoding", "Trailer", "Content-Length": 984 return "", fmt.Errorf("invalid Trailer key %q", k) 985 } 986 keys = append(keys, k) 987 } 988 if len(keys) > 0 { 989 sort.Strings(keys) 990 return strings.Join(keys, ","), nil 991 } 992 return "", nil 993} 994 995func (cc *ClientConn) responseHeaderTimeout() time.Duration { 996 if cc.t.t1 != nil { 997 return cc.t.t1.ResponseHeaderTimeout 998 } 999 // No way to do this (yet?) with just an http2.Transport. Probably 1000 // no need. Request.Cancel this is the new way. We only need to support 1001 // this for compatibility with the old http.Transport fields when 1002 // we're doing transparent http2. 1003 return 0 1004} 1005 1006// checkConnHeaders checks whether req has any invalid connection-level headers. 1007// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. 1008// Certain headers are special-cased as okay but not transmitted later. 1009func checkConnHeaders(req *http.Request) error { 1010 if v := req.Header.Get("Upgrade"); v != "" { 1011 return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) 1012 } 1013 if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { 1014 return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) 1015 } 1016 if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { 1017 return fmt.Errorf("http2: invalid Connection request header: %q", vv) 1018 } 1019 return nil 1020} 1021 1022// actualContentLength returns a sanitized version of 1023// req.ContentLength, where 0 actually means zero (not unknown) and -1 1024// means unknown. 1025func actualContentLength(req *http.Request) int64 { 1026 if req.Body == nil || req.Body == http.NoBody { 1027 return 0 1028 } 1029 if req.ContentLength != 0 { 1030 return req.ContentLength 1031 } 1032 return -1 1033} 1034 1035func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { 1036 resp, _, err := cc.roundTrip(req) 1037 return resp, err 1038} 1039 1040func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAfterReqBodyWrite bool, err error) { 1041 ctx := req.Context() 1042 if err := checkConnHeaders(req); err != nil { 1043 return nil, false, err 1044 } 1045 if cc.idleTimer != nil { 1046 cc.idleTimer.Stop() 1047 } 1048 1049 trailers, err := commaSeparatedTrailers(req) 1050 if err != nil { 1051 return nil, false, err 1052 } 1053 hasTrailers := trailers != "" 1054 1055 // Acquire the new-request lock by writing to reqHeaderMu. 1056 // This lock guards the critical section covering allocating a new stream ID 1057 // (requires mu) and creating the stream (requires wmu). 1058 if cc.reqHeaderMu == nil { 1059 panic("RoundTrip on initialized ClientConn") // for tests 1060 } 1061 select { 1062 case cc.reqHeaderMu <- struct{}{}: 1063 case <-req.Cancel: 1064 return nil, false, errRequestCanceled 1065 case <-ctx.Done(): 1066 return nil, false, ctx.Err() 1067 } 1068 reqHeaderMuNeedsUnlock := true 1069 defer func() { 1070 if reqHeaderMuNeedsUnlock { 1071 <-cc.reqHeaderMu 1072 } 1073 }() 1074 1075 cc.mu.Lock() 1076 if err := cc.awaitOpenSlotForRequest(req); err != nil { 1077 cc.mu.Unlock() 1078 return nil, false, err 1079 } 1080 1081 body := req.Body 1082 contentLen := actualContentLength(req) 1083 hasBody := contentLen != 0 1084 1085 // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? 1086 var requestedGzip bool 1087 if !cc.t.disableCompression() && 1088 req.Header.Get("Accept-Encoding") == "" && 1089 req.Header.Get("Range") == "" && 1090 req.Method != "HEAD" { 1091 // Request gzip only, not deflate. Deflate is ambiguous and 1092 // not as universally supported anyway. 1093 // See: https://zlib.net/zlib_faq.html#faq39 1094 // 1095 // Note that we don't request this for HEAD requests, 1096 // due to a bug in nginx: 1097 // http://trac.nginx.org/nginx/ticket/358 1098 // https://golang.org/issue/5522 1099 // 1100 // We don't request gzip if the request is for a range, since 1101 // auto-decoding a portion of a gzipped document will just fail 1102 // anyway. See https://golang.org/issue/8923 1103 requestedGzip = true 1104 } 1105 1106 cs := cc.newStream() 1107 cs.req = req 1108 cs.trace = httptrace.ContextClientTrace(req.Context()) 1109 cs.requestedGzip = requestedGzip 1110 bodyWriter := cc.t.getBodyWriterState(cs, body) 1111 cs.on100 = bodyWriter.on100 1112 cc.mu.Unlock() 1113 1114 // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is 1115 // sent by writeRequestBody below, along with any Trailers, 1116 // again in form HEADERS{1}, CONTINUATION{0,}) 1117 cc.wmu.Lock() 1118 hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) 1119 if err != nil { 1120 cc.wmu.Unlock() 1121 return nil, false, err 1122 } 1123 1124 defer func() { 1125 cc.wmu.Lock() 1126 werr := cc.werr 1127 cc.wmu.Unlock() 1128 if werr != nil { 1129 cc.Close() 1130 } 1131 }() 1132 1133 endStream := !hasBody && !hasTrailers 1134 err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) 1135 cc.wmu.Unlock() 1136 <-cc.reqHeaderMu // release the new-request lock 1137 reqHeaderMuNeedsUnlock = false 1138 traceWroteHeaders(cs.trace) 1139 1140 if err != nil { 1141 if hasBody { 1142 bodyWriter.cancel() 1143 } 1144 cc.forgetStreamID(cs.ID) 1145 // Don't bother sending a RST_STREAM (our write already failed; 1146 // no need to keep writing) 1147 traceWroteRequest(cs.trace, err) 1148 // TODO(dneil): An error occurred while writing the headers. 1149 // Should we return an error indicating that this request can be retried? 1150 return nil, false, err 1151 } 1152 1153 var respHeaderTimer <-chan time.Time 1154 if hasBody { 1155 bodyWriter.scheduleBodyWrite() 1156 } else { 1157 traceWroteRequest(cs.trace, nil) 1158 if d := cc.responseHeaderTimeout(); d != 0 { 1159 timer := time.NewTimer(d) 1160 defer timer.Stop() 1161 respHeaderTimer = timer.C 1162 } 1163 } 1164 1165 readLoopResCh := cs.resc 1166 bodyWritten := false 1167 1168 handleReadLoopResponse := func(re resAndError) (*http.Response, bool, error) { 1169 res := re.res 1170 if re.err != nil || res.StatusCode > 299 { 1171 // On error or status code 3xx, 4xx, 5xx, etc abort any 1172 // ongoing write, assuming that the server doesn't care 1173 // about our request body. If the server replied with 1xx or 1174 // 2xx, however, then assume the server DOES potentially 1175 // want our body (e.g. full-duplex streaming: 1176 // golang.org/issue/13444). If it turns out the server 1177 // doesn't, they'll RST_STREAM us soon enough. This is a 1178 // heuristic to avoid adding knobs to Transport. Hopefully 1179 // we can keep it. 1180 bodyWriter.cancel() 1181 cs.abortRequestBodyWrite(errStopReqBodyWrite) 1182 if hasBody && !bodyWritten { 1183 <-bodyWriter.resc 1184 } 1185 } 1186 if re.err != nil { 1187 cc.forgetStreamID(cs.ID) 1188 return nil, cs.getStartedWrite(), re.err 1189 } 1190 res.Request = req 1191 res.TLS = cc.tlsState 1192 return res, false, nil 1193 } 1194 1195 handleError := func(err error) (*http.Response, bool, error) { 1196 if !hasBody || bodyWritten { 1197 cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) 1198 } else { 1199 bodyWriter.cancel() 1200 cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) 1201 <-bodyWriter.resc 1202 } 1203 cc.forgetStreamID(cs.ID) 1204 return nil, cs.getStartedWrite(), err 1205 } 1206 1207 for { 1208 select { 1209 case re := <-readLoopResCh: 1210 return handleReadLoopResponse(re) 1211 case <-respHeaderTimer: 1212 return handleError(errTimeout) 1213 case <-ctx.Done(): 1214 return handleError(ctx.Err()) 1215 case <-req.Cancel: 1216 return handleError(errRequestCanceled) 1217 case <-cs.peerReset: 1218 // processResetStream already removed the 1219 // stream from the streams map; no need for 1220 // forgetStreamID. 1221 return nil, cs.getStartedWrite(), cs.resetErr 1222 case err := <-bodyWriter.resc: 1223 bodyWritten = true 1224 // Prefer the read loop's response, if available. Issue 16102. 1225 select { 1226 case re := <-readLoopResCh: 1227 return handleReadLoopResponse(re) 1228 default: 1229 } 1230 if err != nil { 1231 cc.forgetStreamID(cs.ID) 1232 return nil, cs.getStartedWrite(), err 1233 } 1234 if d := cc.responseHeaderTimeout(); d != 0 { 1235 timer := time.NewTimer(d) 1236 defer timer.Stop() 1237 respHeaderTimer = timer.C 1238 } 1239 } 1240 } 1241} 1242 1243// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams. 1244// Must hold cc.mu. 1245func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error { 1246 var waitingForConn chan struct{} 1247 var waitingForConnErr error // guarded by cc.mu 1248 for { 1249 cc.lastActive = time.Now() 1250 if cc.closed || !cc.canTakeNewRequestLocked() { 1251 if waitingForConn != nil { 1252 close(waitingForConn) 1253 } 1254 return errClientConnUnusable 1255 } 1256 cc.lastIdle = time.Time{} 1257 if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) { 1258 if waitingForConn != nil { 1259 close(waitingForConn) 1260 } 1261 return nil 1262 } 1263 // Unfortunately, we cannot wait on a condition variable and channel at 1264 // the same time, so instead, we spin up a goroutine to check if the 1265 // request is canceled while we wait for a slot to open in the connection. 1266 if waitingForConn == nil { 1267 waitingForConn = make(chan struct{}) 1268 go func() { 1269 if err := awaitRequestCancel(req, waitingForConn); err != nil { 1270 cc.mu.Lock() 1271 waitingForConnErr = err 1272 cc.cond.Broadcast() 1273 cc.mu.Unlock() 1274 } 1275 }() 1276 } 1277 cc.pendingRequests++ 1278 cc.cond.Wait() 1279 cc.pendingRequests-- 1280 if waitingForConnErr != nil { 1281 return waitingForConnErr 1282 } 1283 } 1284} 1285 1286// requires cc.wmu be held 1287func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error { 1288 first := true // first frame written (HEADERS is first, then CONTINUATION) 1289 for len(hdrs) > 0 && cc.werr == nil { 1290 chunk := hdrs 1291 if len(chunk) > maxFrameSize { 1292 chunk = chunk[:maxFrameSize] 1293 } 1294 hdrs = hdrs[len(chunk):] 1295 endHeaders := len(hdrs) == 0 1296 if first { 1297 cc.fr.WriteHeaders(HeadersFrameParam{ 1298 StreamID: streamID, 1299 BlockFragment: chunk, 1300 EndStream: endStream, 1301 EndHeaders: endHeaders, 1302 }) 1303 first = false 1304 } else { 1305 cc.fr.WriteContinuation(streamID, endHeaders, chunk) 1306 } 1307 } 1308 // TODO(bradfitz): this Flush could potentially block (as 1309 // could the WriteHeaders call(s) above), which means they 1310 // wouldn't respond to Request.Cancel being readable. That's 1311 // rare, but this should probably be in a goroutine. 1312 cc.bw.Flush() 1313 return cc.werr 1314} 1315 1316// internal error values; they don't escape to callers 1317var ( 1318 // abort request body write; don't send cancel 1319 errStopReqBodyWrite = errors.New("http2: aborting request body write") 1320 1321 // abort request body write, but send stream reset of cancel. 1322 errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") 1323 1324 errReqBodyTooLong = errors.New("http2: request body larger than specified content length") 1325) 1326 1327// frameScratchBufferLen returns the length of a buffer to use for 1328// outgoing request bodies to read/write to/from. 1329// 1330// It returns max(1, min(peer's advertised max frame size, 1331// Request.ContentLength+1, 512KB)). 1332func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int { 1333 const max = 512 << 10 1334 n := int64(maxFrameSize) 1335 if n > max { 1336 n = max 1337 } 1338 if cl := actualContentLength(cs.req); cl != -1 && cl+1 < n { 1339 // Add an extra byte past the declared content-length to 1340 // give the caller's Request.Body io.Reader a chance to 1341 // give us more bytes than they declared, so we can catch it 1342 // early. 1343 n = cl + 1 1344 } 1345 if n < 1 { 1346 return 1 1347 } 1348 return int(n) // doesn't truncate; max is 512K 1349} 1350 1351var bufPool sync.Pool // of *[]byte 1352 1353func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { 1354 cc := cs.cc 1355 sentEnd := false // whether we sent the final DATA frame w/ END_STREAM 1356 1357 defer func() { 1358 traceWroteRequest(cs.trace, err) 1359 // TODO: write h12Compare test showing whether 1360 // Request.Body is closed by the Transport, 1361 // and in multiple cases: server replies <=299 and >299 1362 // while still writing request body 1363 var cerr error 1364 cc.mu.Lock() 1365 if cs.stopReqBody == nil { 1366 cs.stopReqBody = errStopReqBodyWrite 1367 cerr = bodyCloser.Close() 1368 } 1369 cc.mu.Unlock() 1370 if err == nil { 1371 err = cerr 1372 } 1373 }() 1374 1375 req := cs.req 1376 hasTrailers := req.Trailer != nil 1377 remainLen := actualContentLength(req) 1378 hasContentLen := remainLen != -1 1379 1380 cc.mu.Lock() 1381 maxFrameSize := int(cc.maxFrameSize) 1382 cc.mu.Unlock() 1383 1384 // Scratch buffer for reading into & writing from. 1385 scratchLen := cs.frameScratchBufferLen(maxFrameSize) 1386 var buf []byte 1387 if bp, ok := bufPool.Get().(*[]byte); ok && len(*bp) >= scratchLen { 1388 defer bufPool.Put(bp) 1389 buf = *bp 1390 } else { 1391 buf = make([]byte, scratchLen) 1392 defer bufPool.Put(&buf) 1393 } 1394 1395 var sawEOF bool 1396 for !sawEOF { 1397 n, err := body.Read(buf[:len(buf)]) 1398 if hasContentLen { 1399 remainLen -= int64(n) 1400 if remainLen == 0 && err == nil { 1401 // The request body's Content-Length was predeclared and 1402 // we just finished reading it all, but the underlying io.Reader 1403 // returned the final chunk with a nil error (which is one of 1404 // the two valid things a Reader can do at EOF). Because we'd prefer 1405 // to send the END_STREAM bit early, double-check that we're actually 1406 // at EOF. Subsequent reads should return (0, EOF) at this point. 1407 // If either value is different, we return an error in one of two ways below. 1408 var scratch [1]byte 1409 var n1 int 1410 n1, err = body.Read(scratch[:]) 1411 remainLen -= int64(n1) 1412 } 1413 if remainLen < 0 { 1414 err = errReqBodyTooLong 1415 cc.writeStreamReset(cs.ID, ErrCodeCancel, err) 1416 return err 1417 } 1418 } 1419 if err == io.EOF { 1420 sawEOF = true 1421 err = nil 1422 } else if err != nil { 1423 cc.writeStreamReset(cs.ID, ErrCodeCancel, err) 1424 return err 1425 } 1426 1427 remain := buf[:n] 1428 for len(remain) > 0 && err == nil { 1429 var allowed int32 1430 allowed, err = cs.awaitFlowControl(len(remain)) 1431 switch { 1432 case err == errStopReqBodyWrite: 1433 return err 1434 case err == errStopReqBodyWriteAndCancel: 1435 cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) 1436 return err 1437 case err != nil: 1438 return err 1439 } 1440 cc.wmu.Lock() 1441 data := remain[:allowed] 1442 remain = remain[allowed:] 1443 sentEnd = sawEOF && len(remain) == 0 && !hasTrailers 1444 err = cc.fr.WriteData(cs.ID, sentEnd, data) 1445 if err == nil { 1446 // TODO(bradfitz): this flush is for latency, not bandwidth. 1447 // Most requests won't need this. Make this opt-in or 1448 // opt-out? Use some heuristic on the body type? Nagel-like 1449 // timers? Based on 'n'? Only last chunk of this for loop, 1450 // unless flow control tokens are low? For now, always. 1451 // If we change this, see comment below. 1452 err = cc.bw.Flush() 1453 } 1454 cc.wmu.Unlock() 1455 } 1456 if err != nil { 1457 return err 1458 } 1459 } 1460 1461 if sentEnd { 1462 // Already sent END_STREAM (which implies we have no 1463 // trailers) and flushed, because currently all 1464 // WriteData frames above get a flush. So we're done. 1465 return nil 1466 } 1467 1468 cc.wmu.Lock() 1469 var trls []byte 1470 if hasTrailers { 1471 trls, err = cc.encodeTrailers(req) 1472 if err != nil { 1473 cc.wmu.Unlock() 1474 cc.writeStreamReset(cs.ID, ErrCodeInternal, err) 1475 cc.forgetStreamID(cs.ID) 1476 return err 1477 } 1478 } 1479 defer cc.wmu.Unlock() 1480 1481 // Two ways to send END_STREAM: either with trailers, or 1482 // with an empty DATA frame. 1483 if len(trls) > 0 { 1484 err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls) 1485 } else { 1486 err = cc.fr.WriteData(cs.ID, true, nil) 1487 } 1488 if ferr := cc.bw.Flush(); ferr != nil && err == nil { 1489 err = ferr 1490 } 1491 return err 1492} 1493 1494// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow 1495// control tokens from the server. 1496// It returns either the non-zero number of tokens taken or an error 1497// if the stream is dead. 1498func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { 1499 cc := cs.cc 1500 cc.mu.Lock() 1501 defer cc.mu.Unlock() 1502 for { 1503 if cc.closed { 1504 return 0, errClientConnClosed 1505 } 1506 if cs.stopReqBody != nil { 1507 return 0, cs.stopReqBody 1508 } 1509 if err := cs.checkResetOrDone(); err != nil { 1510 return 0, err 1511 } 1512 if a := cs.flow.available(); a > 0 { 1513 take := a 1514 if int(take) > maxBytes { 1515 1516 take = int32(maxBytes) // can't truncate int; take is int32 1517 } 1518 if take > int32(cc.maxFrameSize) { 1519 take = int32(cc.maxFrameSize) 1520 } 1521 cs.flow.take(take) 1522 return take, nil 1523 } 1524 cc.cond.Wait() 1525 } 1526} 1527 1528// requires cc.wmu be held. 1529func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { 1530 cc.hbuf.Reset() 1531 1532 host := req.Host 1533 if host == "" { 1534 host = req.URL.Host 1535 } 1536 host, err := httpguts.PunycodeHostPort(host) 1537 if err != nil { 1538 return nil, err 1539 } 1540 1541 var path string 1542 if req.Method != "CONNECT" { 1543 path = req.URL.RequestURI() 1544 if !validPseudoPath(path) { 1545 orig := path 1546 path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) 1547 if !validPseudoPath(path) { 1548 if req.URL.Opaque != "" { 1549 return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) 1550 } else { 1551 return nil, fmt.Errorf("invalid request :path %q", orig) 1552 } 1553 } 1554 } 1555 } 1556 1557 // Check for any invalid headers and return an error before we 1558 // potentially pollute our hpack state. (We want to be able to 1559 // continue to reuse the hpack encoder for future requests) 1560 for k, vv := range req.Header { 1561 if !httpguts.ValidHeaderFieldName(k) { 1562 return nil, fmt.Errorf("invalid HTTP header name %q", k) 1563 } 1564 for _, v := range vv { 1565 if !httpguts.ValidHeaderFieldValue(v) { 1566 return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) 1567 } 1568 } 1569 } 1570 1571 enumerateHeaders := func(f func(name, value string)) { 1572 // 8.1.2.3 Request Pseudo-Header Fields 1573 // The :path pseudo-header field includes the path and query parts of the 1574 // target URI (the path-absolute production and optionally a '?' character 1575 // followed by the query production (see Sections 3.3 and 3.4 of 1576 // [RFC3986]). 1577 f(":authority", host) 1578 m := req.Method 1579 if m == "" { 1580 m = http.MethodGet 1581 } 1582 f(":method", m) 1583 if req.Method != "CONNECT" { 1584 f(":path", path) 1585 f(":scheme", req.URL.Scheme) 1586 } 1587 if trailers != "" { 1588 f("trailer", trailers) 1589 } 1590 1591 var didUA bool 1592 for k, vv := range req.Header { 1593 if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") { 1594 // Host is :authority, already sent. 1595 // Content-Length is automatic, set below. 1596 continue 1597 } else if asciiEqualFold(k, "connection") || 1598 asciiEqualFold(k, "proxy-connection") || 1599 asciiEqualFold(k, "transfer-encoding") || 1600 asciiEqualFold(k, "upgrade") || 1601 asciiEqualFold(k, "keep-alive") { 1602 // Per 8.1.2.2 Connection-Specific Header 1603 // Fields, don't send connection-specific 1604 // fields. We have already checked if any 1605 // are error-worthy so just ignore the rest. 1606 continue 1607 } else if asciiEqualFold(k, "user-agent") { 1608 // Match Go's http1 behavior: at most one 1609 // User-Agent. If set to nil or empty string, 1610 // then omit it. Otherwise if not mentioned, 1611 // include the default (below). 1612 didUA = true 1613 if len(vv) < 1 { 1614 continue 1615 } 1616 vv = vv[:1] 1617 if vv[0] == "" { 1618 continue 1619 } 1620 } else if asciiEqualFold(k, "cookie") { 1621 // Per 8.1.2.5 To allow for better compression efficiency, the 1622 // Cookie header field MAY be split into separate header fields, 1623 // each with one or more cookie-pairs. 1624 for _, v := range vv { 1625 for { 1626 p := strings.IndexByte(v, ';') 1627 if p < 0 { 1628 break 1629 } 1630 f("cookie", v[:p]) 1631 p++ 1632 // strip space after semicolon if any. 1633 for p+1 <= len(v) && v[p] == ' ' { 1634 p++ 1635 } 1636 v = v[p:] 1637 } 1638 if len(v) > 0 { 1639 f("cookie", v) 1640 } 1641 } 1642 continue 1643 } 1644 1645 for _, v := range vv { 1646 f(k, v) 1647 } 1648 } 1649 if shouldSendReqContentLength(req.Method, contentLength) { 1650 f("content-length", strconv.FormatInt(contentLength, 10)) 1651 } 1652 if addGzipHeader { 1653 f("accept-encoding", "gzip") 1654 } 1655 if !didUA { 1656 f("user-agent", defaultUserAgent) 1657 } 1658 } 1659 1660 // Do a first pass over the headers counting bytes to ensure 1661 // we don't exceed cc.peerMaxHeaderListSize. This is done as a 1662 // separate pass before encoding the headers to prevent 1663 // modifying the hpack state. 1664 hlSize := uint64(0) 1665 enumerateHeaders(func(name, value string) { 1666 hf := hpack.HeaderField{Name: name, Value: value} 1667 hlSize += uint64(hf.Size()) 1668 }) 1669 1670 if hlSize > cc.peerMaxHeaderListSize { 1671 return nil, errRequestHeaderListSize 1672 } 1673 1674 trace := httptrace.ContextClientTrace(req.Context()) 1675 traceHeaders := traceHasWroteHeaderField(trace) 1676 1677 // Header list size is ok. Write the headers. 1678 enumerateHeaders(func(name, value string) { 1679 name, ascii := asciiToLower(name) 1680 if !ascii { 1681 // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header 1682 // field names have to be ASCII characters (just as in HTTP/1.x). 1683 return 1684 } 1685 cc.writeHeader(name, value) 1686 if traceHeaders { 1687 traceWroteHeaderField(trace, name, value) 1688 } 1689 }) 1690 1691 return cc.hbuf.Bytes(), nil 1692} 1693 1694// shouldSendReqContentLength reports whether the http2.Transport should send 1695// a "content-length" request header. This logic is basically a copy of the net/http 1696// transferWriter.shouldSendContentLength. 1697// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). 1698// -1 means unknown. 1699func shouldSendReqContentLength(method string, contentLength int64) bool { 1700 if contentLength > 0 { 1701 return true 1702 } 1703 if contentLength < 0 { 1704 return false 1705 } 1706 // For zero bodies, whether we send a content-length depends on the method. 1707 // It also kinda doesn't matter for http2 either way, with END_STREAM. 1708 switch method { 1709 case "POST", "PUT", "PATCH": 1710 return true 1711 default: 1712 return false 1713 } 1714} 1715 1716// requires cc.wmu be held. 1717func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) { 1718 cc.hbuf.Reset() 1719 1720 hlSize := uint64(0) 1721 for k, vv := range req.Trailer { 1722 for _, v := range vv { 1723 hf := hpack.HeaderField{Name: k, Value: v} 1724 hlSize += uint64(hf.Size()) 1725 } 1726 } 1727 if hlSize > cc.peerMaxHeaderListSize { 1728 return nil, errRequestHeaderListSize 1729 } 1730 1731 for k, vv := range req.Trailer { 1732 lowKey, ascii := asciiToLower(k) 1733 if !ascii { 1734 // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header 1735 // field names have to be ASCII characters (just as in HTTP/1.x). 1736 continue 1737 } 1738 // Transfer-Encoding, etc.. have already been filtered at the 1739 // start of RoundTrip 1740 for _, v := range vv { 1741 cc.writeHeader(lowKey, v) 1742 } 1743 } 1744 return cc.hbuf.Bytes(), nil 1745} 1746 1747func (cc *ClientConn) writeHeader(name, value string) { 1748 if VerboseLogs { 1749 log.Printf("http2: Transport encoding header %q = %q", name, value) 1750 } 1751 cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) 1752} 1753 1754type resAndError struct { 1755 _ incomparable 1756 res *http.Response 1757 err error 1758} 1759 1760// requires cc.mu be held. 1761func (cc *ClientConn) newStream() *clientStream { 1762 cs := &clientStream{ 1763 cc: cc, 1764 ID: cc.nextStreamID, 1765 resc: make(chan resAndError, 1), 1766 peerReset: make(chan struct{}), 1767 done: make(chan struct{}), 1768 } 1769 cs.flow.add(int32(cc.initialWindowSize)) 1770 cs.flow.setConnFlow(&cc.flow) 1771 cs.inflow.add(transportDefaultStreamFlow) 1772 cs.inflow.setConnFlow(&cc.inflow) 1773 cc.nextStreamID += 2 1774 cc.streams[cs.ID] = cs 1775 return cs 1776} 1777 1778func (cc *ClientConn) forgetStreamID(id uint32) { 1779 cc.streamByID(id, true) 1780} 1781 1782func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { 1783 cc.mu.Lock() 1784 defer cc.mu.Unlock() 1785 cs := cc.streams[id] 1786 if andRemove && cs != nil && !cc.closed { 1787 cc.lastActive = time.Now() 1788 delete(cc.streams, id) 1789 if len(cc.streams) == 0 && cc.idleTimer != nil { 1790 cc.idleTimer.Reset(cc.idleTimeout) 1791 cc.lastIdle = time.Now() 1792 } 1793 close(cs.done) 1794 // Wake up checkResetOrDone via clientStream.awaitFlowControl and 1795 // wake up RoundTrip if there is a pending request. 1796 cc.cond.Broadcast() 1797 } 1798 return cs 1799} 1800 1801// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. 1802type clientConnReadLoop struct { 1803 _ incomparable 1804 cc *ClientConn 1805 closeWhenIdle bool 1806} 1807 1808// readLoop runs in its own goroutine and reads and dispatches frames. 1809func (cc *ClientConn) readLoop() { 1810 rl := &clientConnReadLoop{cc: cc} 1811 defer rl.cleanup() 1812 cc.readerErr = rl.run() 1813 if ce, ok := cc.readerErr.(ConnectionError); ok { 1814 cc.wmu.Lock() 1815 cc.fr.WriteGoAway(0, ErrCode(ce), nil) 1816 cc.wmu.Unlock() 1817 } 1818} 1819 1820// GoAwayError is returned by the Transport when the server closes the 1821// TCP connection after sending a GOAWAY frame. 1822type GoAwayError struct { 1823 LastStreamID uint32 1824 ErrCode ErrCode 1825 DebugData string 1826} 1827 1828func (e GoAwayError) Error() string { 1829 return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", 1830 e.LastStreamID, e.ErrCode, e.DebugData) 1831} 1832 1833func isEOFOrNetReadError(err error) bool { 1834 if err == io.EOF { 1835 return true 1836 } 1837 ne, ok := err.(*net.OpError) 1838 return ok && ne.Op == "read" 1839} 1840 1841func (rl *clientConnReadLoop) cleanup() { 1842 cc := rl.cc 1843 defer cc.tconn.Close() 1844 defer cc.t.connPool().MarkDead(cc) 1845 defer close(cc.readerDone) 1846 1847 if cc.idleTimer != nil { 1848 cc.idleTimer.Stop() 1849 } 1850 1851 // Close any response bodies if the server closes prematurely. 1852 // TODO: also do this if we've written the headers but not 1853 // gotten a response yet. 1854 err := cc.readerErr 1855 cc.mu.Lock() 1856 if cc.goAway != nil && isEOFOrNetReadError(err) { 1857 err = GoAwayError{ 1858 LastStreamID: cc.goAway.LastStreamID, 1859 ErrCode: cc.goAway.ErrCode, 1860 DebugData: cc.goAwayDebug, 1861 } 1862 } else if err == io.EOF { 1863 err = io.ErrUnexpectedEOF 1864 } 1865 cc.closed = true 1866 streams := cc.streams 1867 cc.streams = nil 1868 cc.mu.Unlock() 1869 for _, cs := range streams { 1870 cs.bufPipe.CloseWithError(err) // no-op if already closed 1871 select { 1872 case cs.resc <- resAndError{err: err}: 1873 default: 1874 } 1875 close(cs.done) 1876 } 1877 cc.mu.Lock() 1878 cc.cond.Broadcast() 1879 cc.mu.Unlock() 1880} 1881 1882// countReadFrameError calls Transport.CountError with a string 1883// representing err. 1884func (cc *ClientConn) countReadFrameError(err error) { 1885 f := cc.t.CountError 1886 if f == nil || err == nil { 1887 return 1888 } 1889 if ce, ok := err.(ConnectionError); ok { 1890 errCode := ErrCode(ce) 1891 f(fmt.Sprintf("read_frame_conn_error_%s", errCode.stringToken())) 1892 return 1893 } 1894 if errors.Is(err, io.EOF) { 1895 f("read_frame_eof") 1896 return 1897 } 1898 if errors.Is(err, io.ErrUnexpectedEOF) { 1899 f("read_frame_unexpected_eof") 1900 return 1901 } 1902 if errors.Is(err, ErrFrameTooLarge) { 1903 f("read_frame_too_large") 1904 return 1905 } 1906 f("read_frame_other") 1907} 1908 1909func (rl *clientConnReadLoop) run() error { 1910 cc := rl.cc 1911 rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse 1912 gotReply := false // ever saw a HEADERS reply 1913 gotSettings := false 1914 readIdleTimeout := cc.t.ReadIdleTimeout 1915 var t *time.Timer 1916 if readIdleTimeout != 0 { 1917 t = time.AfterFunc(readIdleTimeout, cc.healthCheck) 1918 defer t.Stop() 1919 } 1920 for { 1921 f, err := cc.fr.ReadFrame() 1922 if t != nil { 1923 t.Reset(readIdleTimeout) 1924 } 1925 if err != nil { 1926 cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) 1927 } 1928 if se, ok := err.(StreamError); ok { 1929 if cs := cc.streamByID(se.StreamID, false); cs != nil { 1930 cs.cc.writeStreamReset(cs.ID, se.Code, err) 1931 cs.cc.forgetStreamID(cs.ID) 1932 if se.Cause == nil { 1933 se.Cause = cc.fr.errDetail 1934 } 1935 rl.endStreamError(cs, se) 1936 } 1937 continue 1938 } else if err != nil { 1939 cc.countReadFrameError(err) 1940 return err 1941 } 1942 if VerboseLogs { 1943 cc.vlogf("http2: Transport received %s", summarizeFrame(f)) 1944 } 1945 if !gotSettings { 1946 if _, ok := f.(*SettingsFrame); !ok { 1947 cc.logf("protocol error: received %T before a SETTINGS frame", f) 1948 return ConnectionError(ErrCodeProtocol) 1949 } 1950 gotSettings = true 1951 } 1952 maybeIdle := false // whether frame might transition us to idle 1953 1954 switch f := f.(type) { 1955 case *MetaHeadersFrame: 1956 err = rl.processHeaders(f) 1957 maybeIdle = true 1958 gotReply = true 1959 case *DataFrame: 1960 err = rl.processData(f) 1961 maybeIdle = true 1962 case *GoAwayFrame: 1963 err = rl.processGoAway(f) 1964 maybeIdle = true 1965 case *RSTStreamFrame: 1966 err = rl.processResetStream(f) 1967 maybeIdle = true 1968 case *SettingsFrame: 1969 err = rl.processSettings(f) 1970 case *PushPromiseFrame: 1971 err = rl.processPushPromise(f) 1972 case *WindowUpdateFrame: 1973 err = rl.processWindowUpdate(f) 1974 case *PingFrame: 1975 err = rl.processPing(f) 1976 default: 1977 cc.logf("Transport: unhandled response frame type %T", f) 1978 } 1979 if err != nil { 1980 if VerboseLogs { 1981 cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) 1982 } 1983 return err 1984 } 1985 if rl.closeWhenIdle && gotReply && maybeIdle { 1986 cc.closeIfIdle() 1987 } 1988 } 1989} 1990 1991func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { 1992 cc := rl.cc 1993 cs := cc.streamByID(f.StreamID, false) 1994 if cs == nil { 1995 // We'd get here if we canceled a request while the 1996 // server had its response still in flight. So if this 1997 // was just something we canceled, ignore it. 1998 return nil 1999 } 2000 if f.StreamEnded() { 2001 // Issue 20521: If the stream has ended, streamByID() causes 2002 // clientStream.done to be closed, which causes the request's bodyWriter 2003 // to be closed with an errStreamClosed, which may be received by 2004 // clientConn.RoundTrip before the result of processing these headers. 2005 // Deferring stream closure allows the header processing to occur first. 2006 // clientConn.RoundTrip may still receive the bodyWriter error first, but 2007 // the fix for issue 16102 prioritises any response. 2008 // 2009 // Issue 22413: If there is no request body, we should close the 2010 // stream before writing to cs.resc so that the stream is closed 2011 // immediately once RoundTrip returns. 2012 if cs.req.Body != nil { 2013 defer cc.forgetStreamID(f.StreamID) 2014 } else { 2015 cc.forgetStreamID(f.StreamID) 2016 } 2017 } 2018 if !cs.firstByte { 2019 if cs.trace != nil { 2020 // TODO(bradfitz): move first response byte earlier, 2021 // when we first read the 9 byte header, not waiting 2022 // until all the HEADERS+CONTINUATION frames have been 2023 // merged. This works for now. 2024 traceFirstResponseByte(cs.trace) 2025 } 2026 cs.firstByte = true 2027 } 2028 if !cs.pastHeaders { 2029 cs.pastHeaders = true 2030 } else { 2031 return rl.processTrailers(cs, f) 2032 } 2033 2034 res, err := rl.handleResponse(cs, f) 2035 if err != nil { 2036 if _, ok := err.(ConnectionError); ok { 2037 return err 2038 } 2039 // Any other error type is a stream error. 2040 cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) 2041 cc.forgetStreamID(cs.ID) 2042 cs.resc <- resAndError{err: err} 2043 return nil // return nil from process* funcs to keep conn alive 2044 } 2045 if res == nil { 2046 // (nil, nil) special case. See handleResponse docs. 2047 return nil 2048 } 2049 cs.resTrailer = &res.Trailer 2050 cs.resc <- resAndError{res: res} 2051 return nil 2052} 2053 2054// may return error types nil, or ConnectionError. Any other error value 2055// is a StreamError of type ErrCodeProtocol. The returned error in that case 2056// is the detail. 2057// 2058// As a special case, handleResponse may return (nil, nil) to skip the 2059// frame (currently only used for 1xx responses). 2060func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { 2061 if f.Truncated { 2062 return nil, errResponseHeaderListSize 2063 } 2064 2065 status := f.PseudoValue("status") 2066 if status == "" { 2067 return nil, errors.New("malformed response from server: missing status pseudo header") 2068 } 2069 statusCode, err := strconv.Atoi(status) 2070 if err != nil { 2071 return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header") 2072 } 2073 2074 regularFields := f.RegularFields() 2075 strs := make([]string, len(regularFields)) 2076 header := make(http.Header, len(regularFields)) 2077 res := &http.Response{ 2078 Proto: "HTTP/2.0", 2079 ProtoMajor: 2, 2080 Header: header, 2081 StatusCode: statusCode, 2082 Status: status + " " + http.StatusText(statusCode), 2083 } 2084 for _, hf := range regularFields { 2085 key := http.CanonicalHeaderKey(hf.Name) 2086 if key == "Trailer" { 2087 t := res.Trailer 2088 if t == nil { 2089 t = make(http.Header) 2090 res.Trailer = t 2091 } 2092 foreachHeaderElement(hf.Value, func(v string) { 2093 t[http.CanonicalHeaderKey(v)] = nil 2094 }) 2095 } else { 2096 vv := header[key] 2097 if vv == nil && len(strs) > 0 { 2098 // More than likely this will be a single-element key. 2099 // Most headers aren't multi-valued. 2100 // Set the capacity on strs[0] to 1, so any future append 2101 // won't extend the slice into the other strings. 2102 vv, strs = strs[:1:1], strs[1:] 2103 vv[0] = hf.Value 2104 header[key] = vv 2105 } else { 2106 header[key] = append(vv, hf.Value) 2107 } 2108 } 2109 } 2110 2111 if statusCode >= 100 && statusCode <= 199 { 2112 cs.num1xx++ 2113 const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http 2114 if cs.num1xx > max1xxResponses { 2115 return nil, errors.New("http2: too many 1xx informational responses") 2116 } 2117 if fn := cs.get1xxTraceFunc(); fn != nil { 2118 if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { 2119 return nil, err 2120 } 2121 } 2122 if statusCode == 100 { 2123 traceGot100Continue(cs.trace) 2124 if cs.on100 != nil { 2125 cs.on100() // forces any write delay timer to fire 2126 } 2127 } 2128 cs.pastHeaders = false // do it all again 2129 return nil, nil 2130 } 2131 2132 streamEnded := f.StreamEnded() 2133 isHead := cs.req.Method == "HEAD" 2134 if !streamEnded || isHead { 2135 res.ContentLength = -1 2136 if clens := res.Header["Content-Length"]; len(clens) == 1 { 2137 if cl, err := strconv.ParseUint(clens[0], 10, 63); err == nil { 2138 res.ContentLength = int64(cl) 2139 } else { 2140 // TODO: care? unlike http/1, it won't mess up our framing, so it's 2141 // more safe smuggling-wise to ignore. 2142 } 2143 } else if len(clens) > 1 { 2144 // TODO: care? unlike http/1, it won't mess up our framing, so it's 2145 // more safe smuggling-wise to ignore. 2146 } 2147 } 2148 2149 if streamEnded || isHead { 2150 res.Body = noBody 2151 return res, nil 2152 } 2153 2154 cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}} 2155 cs.bytesRemain = res.ContentLength 2156 res.Body = transportResponseBody{cs} 2157 go cs.awaitRequestCancel(cs.req) 2158 2159 if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { 2160 res.Header.Del("Content-Encoding") 2161 res.Header.Del("Content-Length") 2162 res.ContentLength = -1 2163 res.Body = &gzipReader{body: res.Body} 2164 res.Uncompressed = true 2165 } 2166 return res, nil 2167} 2168 2169func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { 2170 if cs.pastTrailers { 2171 // Too many HEADERS frames for this stream. 2172 return ConnectionError(ErrCodeProtocol) 2173 } 2174 cs.pastTrailers = true 2175 if !f.StreamEnded() { 2176 // We expect that any headers for trailers also 2177 // has END_STREAM. 2178 return ConnectionError(ErrCodeProtocol) 2179 } 2180 if len(f.PseudoFields()) > 0 { 2181 // No pseudo header fields are defined for trailers. 2182 // TODO: ConnectionError might be overly harsh? Check. 2183 return ConnectionError(ErrCodeProtocol) 2184 } 2185 2186 trailer := make(http.Header) 2187 for _, hf := range f.RegularFields() { 2188 key := http.CanonicalHeaderKey(hf.Name) 2189 trailer[key] = append(trailer[key], hf.Value) 2190 } 2191 cs.trailer = trailer 2192 2193 rl.endStream(cs) 2194 return nil 2195} 2196 2197// transportResponseBody is the concrete type of Transport.RoundTrip's 2198// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. 2199// On Close it sends RST_STREAM if EOF wasn't already seen. 2200type transportResponseBody struct { 2201 cs *clientStream 2202} 2203 2204func (b transportResponseBody) Read(p []byte) (n int, err error) { 2205 cs := b.cs 2206 cc := cs.cc 2207 2208 if cs.readErr != nil { 2209 return 0, cs.readErr 2210 } 2211 n, err = b.cs.bufPipe.Read(p) 2212 if cs.bytesRemain != -1 { 2213 if int64(n) > cs.bytesRemain { 2214 n = int(cs.bytesRemain) 2215 if err == nil { 2216 err = errors.New("net/http: server replied with more than declared Content-Length; truncated") 2217 cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) 2218 } 2219 cs.readErr = err 2220 return int(cs.bytesRemain), err 2221 } 2222 cs.bytesRemain -= int64(n) 2223 if err == io.EOF && cs.bytesRemain > 0 { 2224 err = io.ErrUnexpectedEOF 2225 cs.readErr = err 2226 return n, err 2227 } 2228 } 2229 if n == 0 { 2230 // No flow control tokens to send back. 2231 return 2232 } 2233 2234 cc.mu.Lock() 2235 var connAdd, streamAdd int32 2236 // Check the conn-level first, before the stream-level. 2237 if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { 2238 connAdd = transportDefaultConnFlow - v 2239 cc.inflow.add(connAdd) 2240 } 2241 if err == nil { // No need to refresh if the stream is over or failed. 2242 // Consider any buffered body data (read from the conn but not 2243 // consumed by the client) when computing flow control for this 2244 // stream. 2245 v := int(cs.inflow.available()) + cs.bufPipe.Len() 2246 if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { 2247 streamAdd = int32(transportDefaultStreamFlow - v) 2248 cs.inflow.add(streamAdd) 2249 } 2250 } 2251 cc.mu.Unlock() 2252 2253 if connAdd != 0 || streamAdd != 0 { 2254 cc.wmu.Lock() 2255 defer cc.wmu.Unlock() 2256 if connAdd != 0 { 2257 cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) 2258 } 2259 if streamAdd != 0 { 2260 cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) 2261 } 2262 cc.bw.Flush() 2263 } 2264 return 2265} 2266 2267var errClosedResponseBody = errors.New("http2: response body closed") 2268 2269func (b transportResponseBody) Close() error { 2270 cs := b.cs 2271 cc := cs.cc 2272 2273 serverSentStreamEnd := cs.bufPipe.Err() == io.EOF 2274 unread := cs.bufPipe.Len() 2275 2276 if unread > 0 || !serverSentStreamEnd { 2277 cc.mu.Lock() 2278 if !serverSentStreamEnd { 2279 cs.didReset = true 2280 } 2281 // Return connection-level flow control. 2282 if unread > 0 { 2283 cc.inflow.add(int32(unread)) 2284 } 2285 cc.mu.Unlock() 2286 2287 cc.wmu.Lock() 2288 if !serverSentStreamEnd { 2289 cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) 2290 } 2291 // Return connection-level flow control. 2292 if unread > 0 { 2293 cc.fr.WriteWindowUpdate(0, uint32(unread)) 2294 } 2295 cc.bw.Flush() 2296 cc.wmu.Unlock() 2297 } 2298 2299 cs.bufPipe.BreakWithError(errClosedResponseBody) 2300 cc.forgetStreamID(cs.ID) 2301 return nil 2302} 2303 2304func (rl *clientConnReadLoop) processData(f *DataFrame) error { 2305 cc := rl.cc 2306 cs := cc.streamByID(f.StreamID, f.StreamEnded()) 2307 data := f.Data() 2308 if cs == nil { 2309 cc.mu.Lock() 2310 neverSent := cc.nextStreamID 2311 cc.mu.Unlock() 2312 if f.StreamID >= neverSent { 2313 // We never asked for this. 2314 cc.logf("http2: Transport received unsolicited DATA frame; closing connection") 2315 return ConnectionError(ErrCodeProtocol) 2316 } 2317 // We probably did ask for this, but canceled. Just ignore it. 2318 // TODO: be stricter here? only silently ignore things which 2319 // we canceled, but not things which were closed normally 2320 // by the peer? Tough without accumulating too much state. 2321 2322 // But at least return their flow control: 2323 if f.Length > 0 { 2324 cc.mu.Lock() 2325 cc.inflow.add(int32(f.Length)) 2326 cc.mu.Unlock() 2327 2328 cc.wmu.Lock() 2329 cc.fr.WriteWindowUpdate(0, uint32(f.Length)) 2330 cc.bw.Flush() 2331 cc.wmu.Unlock() 2332 } 2333 return nil 2334 } 2335 if !cs.firstByte { 2336 cc.logf("protocol error: received DATA before a HEADERS frame") 2337 rl.endStreamError(cs, StreamError{ 2338 StreamID: f.StreamID, 2339 Code: ErrCodeProtocol, 2340 }) 2341 return nil 2342 } 2343 if f.Length > 0 { 2344 if cs.req.Method == "HEAD" && len(data) > 0 { 2345 cc.logf("protocol error: received DATA on a HEAD request") 2346 rl.endStreamError(cs, StreamError{ 2347 StreamID: f.StreamID, 2348 Code: ErrCodeProtocol, 2349 }) 2350 return nil 2351 } 2352 // Check connection-level flow control. 2353 cc.mu.Lock() 2354 if cs.inflow.available() >= int32(f.Length) { 2355 cs.inflow.take(int32(f.Length)) 2356 } else { 2357 cc.mu.Unlock() 2358 return ConnectionError(ErrCodeFlowControl) 2359 } 2360 // Return any padded flow control now, since we won't 2361 // refund it later on body reads. 2362 var refund int 2363 if pad := int(f.Length) - len(data); pad > 0 { 2364 refund += pad 2365 } 2366 // Return len(data) now if the stream is already closed, 2367 // since data will never be read. 2368 didReset := cs.didReset 2369 if didReset { 2370 refund += len(data) 2371 } 2372 if refund > 0 { 2373 cc.inflow.add(int32(refund)) 2374 if !didReset { 2375 cs.inflow.add(int32(refund)) 2376 } 2377 } 2378 cc.mu.Unlock() 2379 2380 if refund > 0 { 2381 cc.wmu.Lock() 2382 cc.fr.WriteWindowUpdate(0, uint32(refund)) 2383 if !didReset { 2384 cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) 2385 } 2386 cc.bw.Flush() 2387 cc.wmu.Unlock() 2388 } 2389 2390 if len(data) > 0 && !didReset { 2391 if _, err := cs.bufPipe.Write(data); err != nil { 2392 rl.endStreamError(cs, err) 2393 return err 2394 } 2395 } 2396 } 2397 2398 if f.StreamEnded() { 2399 rl.endStream(cs) 2400 } 2401 return nil 2402} 2403 2404func (rl *clientConnReadLoop) endStream(cs *clientStream) { 2405 // TODO: check that any declared content-length matches, like 2406 // server.go's (*stream).endStream method. 2407 rl.endStreamError(cs, nil) 2408} 2409 2410func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { 2411 var code func() 2412 if err == nil { 2413 err = io.EOF 2414 code = cs.copyTrailers 2415 } 2416 if isConnectionCloseRequest(cs.req) { 2417 rl.closeWhenIdle = true 2418 } 2419 cs.bufPipe.closeWithErrorAndCode(err, code) 2420 2421 select { 2422 case cs.resc <- resAndError{err: err}: 2423 default: 2424 } 2425} 2426 2427func (cs *clientStream) copyTrailers() { 2428 for k, vv := range cs.trailer { 2429 t := cs.resTrailer 2430 if *t == nil { 2431 *t = make(http.Header) 2432 } 2433 (*t)[k] = vv 2434 } 2435} 2436 2437func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { 2438 cc := rl.cc 2439 cc.t.connPool().MarkDead(cc) 2440 if f.ErrCode != 0 { 2441 // TODO: deal with GOAWAY more. particularly the error code 2442 cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) 2443 if fn := cc.t.CountError; fn != nil { 2444 fn("recv_goaway_" + f.ErrCode.stringToken()) 2445 } 2446 2447 } 2448 cc.setGoAway(f) 2449 return nil 2450} 2451 2452func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { 2453 cc := rl.cc 2454 // Locking both mu and wmu here allows frame encoding to read settings with only wmu held. 2455 // Acquiring wmu when f.IsAck() is unnecessary, but convenient and mostly harmless. 2456 cc.wmu.Lock() 2457 defer cc.wmu.Unlock() 2458 2459 if err := rl.processSettingsNoWrite(f); err != nil { 2460 return err 2461 } 2462 if !f.IsAck() { 2463 cc.fr.WriteSettingsAck() 2464 cc.bw.Flush() 2465 } 2466 return nil 2467} 2468 2469func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { 2470 cc := rl.cc 2471 cc.mu.Lock() 2472 defer cc.mu.Unlock() 2473 2474 if f.IsAck() { 2475 if cc.wantSettingsAck { 2476 cc.wantSettingsAck = false 2477 return nil 2478 } 2479 return ConnectionError(ErrCodeProtocol) 2480 } 2481 2482 var seenMaxConcurrentStreams bool 2483 err := f.ForeachSetting(func(s Setting) error { 2484 switch s.ID { 2485 case SettingMaxFrameSize: 2486 cc.maxFrameSize = s.Val 2487 case SettingMaxConcurrentStreams: 2488 cc.maxConcurrentStreams = s.Val 2489 seenMaxConcurrentStreams = true 2490 case SettingMaxHeaderListSize: 2491 cc.peerMaxHeaderListSize = uint64(s.Val) 2492 case SettingInitialWindowSize: 2493 // Values above the maximum flow-control 2494 // window size of 2^31-1 MUST be treated as a 2495 // connection error (Section 5.4.1) of type 2496 // FLOW_CONTROL_ERROR. 2497 if s.Val > math.MaxInt32 { 2498 return ConnectionError(ErrCodeFlowControl) 2499 } 2500 2501 // Adjust flow control of currently-open 2502 // frames by the difference of the old initial 2503 // window size and this one. 2504 delta := int32(s.Val) - int32(cc.initialWindowSize) 2505 for _, cs := range cc.streams { 2506 cs.flow.add(delta) 2507 } 2508 cc.cond.Broadcast() 2509 2510 cc.initialWindowSize = s.Val 2511 default: 2512 // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. 2513 cc.vlogf("Unhandled Setting: %v", s) 2514 } 2515 return nil 2516 }) 2517 if err != nil { 2518 return err 2519 } 2520 2521 if !cc.seenSettings { 2522 if !seenMaxConcurrentStreams { 2523 // This was the servers initial SETTINGS frame and it 2524 // didn't contain a MAX_CONCURRENT_STREAMS field so 2525 // increase the number of concurrent streams this 2526 // connection can establish to our default. 2527 cc.maxConcurrentStreams = defaultMaxConcurrentStreams 2528 } 2529 cc.seenSettings = true 2530 } 2531 2532 return nil 2533} 2534 2535func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { 2536 cc := rl.cc 2537 cs := cc.streamByID(f.StreamID, false) 2538 if f.StreamID != 0 && cs == nil { 2539 return nil 2540 } 2541 2542 cc.mu.Lock() 2543 defer cc.mu.Unlock() 2544 2545 fl := &cc.flow 2546 if cs != nil { 2547 fl = &cs.flow 2548 } 2549 if !fl.add(int32(f.Increment)) { 2550 return ConnectionError(ErrCodeFlowControl) 2551 } 2552 cc.cond.Broadcast() 2553 return nil 2554} 2555 2556func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { 2557 cs := rl.cc.streamByID(f.StreamID, true) 2558 if cs == nil { 2559 // TODO: return error if server tries to RST_STEAM an idle stream 2560 return nil 2561 } 2562 select { 2563 case <-cs.peerReset: 2564 // Already reset. 2565 // This is the only goroutine 2566 // which closes this, so there 2567 // isn't a race. 2568 default: 2569 serr := streamError(cs.ID, f.ErrCode) 2570 if f.ErrCode == ErrCodeProtocol { 2571 rl.cc.SetDoNotReuse() 2572 serr.Cause = errFromPeer 2573 } 2574 if fn := cs.cc.t.CountError; fn != nil { 2575 fn("recv_rststream_" + f.ErrCode.stringToken()) 2576 } 2577 cs.resetErr = serr 2578 close(cs.peerReset) 2579 cs.bufPipe.CloseWithError(serr) 2580 cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl 2581 } 2582 return nil 2583} 2584 2585// Ping sends a PING frame to the server and waits for the ack. 2586func (cc *ClientConn) Ping(ctx context.Context) error { 2587 c := make(chan struct{}) 2588 // Generate a random payload 2589 var p [8]byte 2590 for { 2591 if _, err := rand.Read(p[:]); err != nil { 2592 return err 2593 } 2594 cc.mu.Lock() 2595 // check for dup before insert 2596 if _, found := cc.pings[p]; !found { 2597 cc.pings[p] = c 2598 cc.mu.Unlock() 2599 break 2600 } 2601 cc.mu.Unlock() 2602 } 2603 cc.wmu.Lock() 2604 if err := cc.fr.WritePing(false, p); err != nil { 2605 cc.wmu.Unlock() 2606 return err 2607 } 2608 if err := cc.bw.Flush(); err != nil { 2609 cc.wmu.Unlock() 2610 return err 2611 } 2612 cc.wmu.Unlock() 2613 select { 2614 case <-c: 2615 return nil 2616 case <-ctx.Done(): 2617 return ctx.Err() 2618 case <-cc.readerDone: 2619 // connection closed 2620 return cc.readerErr 2621 } 2622} 2623 2624func (rl *clientConnReadLoop) processPing(f *PingFrame) error { 2625 if f.IsAck() { 2626 cc := rl.cc 2627 cc.mu.Lock() 2628 defer cc.mu.Unlock() 2629 // If ack, notify listener if any 2630 if c, ok := cc.pings[f.Data]; ok { 2631 close(c) 2632 delete(cc.pings, f.Data) 2633 } 2634 return nil 2635 } 2636 cc := rl.cc 2637 cc.wmu.Lock() 2638 defer cc.wmu.Unlock() 2639 if err := cc.fr.WritePing(true, f.Data); err != nil { 2640 return err 2641 } 2642 return cc.bw.Flush() 2643} 2644 2645func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { 2646 // We told the peer we don't want them. 2647 // Spec says: 2648 // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH 2649 // setting of the peer endpoint is set to 0. An endpoint that 2650 // has set this setting and has received acknowledgement MUST 2651 // treat the receipt of a PUSH_PROMISE frame as a connection 2652 // error (Section 5.4.1) of type PROTOCOL_ERROR." 2653 return ConnectionError(ErrCodeProtocol) 2654} 2655 2656func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { 2657 // TODO: map err to more interesting error codes, once the 2658 // HTTP community comes up with some. But currently for 2659 // RST_STREAM there's no equivalent to GOAWAY frame's debug 2660 // data, and the error codes are all pretty vague ("cancel"). 2661 cc.wmu.Lock() 2662 cc.fr.WriteRSTStream(streamID, code) 2663 cc.bw.Flush() 2664 cc.wmu.Unlock() 2665} 2666 2667var ( 2668 errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") 2669 errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") 2670) 2671 2672func (cc *ClientConn) logf(format string, args ...interface{}) { 2673 cc.t.logf(format, args...) 2674} 2675 2676func (cc *ClientConn) vlogf(format string, args ...interface{}) { 2677 cc.t.vlogf(format, args...) 2678} 2679 2680func (t *Transport) vlogf(format string, args ...interface{}) { 2681 if VerboseLogs { 2682 t.logf(format, args...) 2683 } 2684} 2685 2686func (t *Transport) logf(format string, args ...interface{}) { 2687 log.Printf(format, args...) 2688} 2689 2690var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) 2691 2692func strSliceContains(ss []string, s string) bool { 2693 for _, v := range ss { 2694 if v == s { 2695 return true 2696 } 2697 } 2698 return false 2699} 2700 2701type erringRoundTripper struct{ err error } 2702 2703func (rt erringRoundTripper) RoundTripErr() error { return rt.err } 2704func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } 2705 2706// gzipReader wraps a response body so it can lazily 2707// call gzip.NewReader on the first call to Read 2708type gzipReader struct { 2709 _ incomparable 2710 body io.ReadCloser // underlying Response.Body 2711 zr *gzip.Reader // lazily-initialized gzip reader 2712 zerr error // sticky error 2713} 2714 2715func (gz *gzipReader) Read(p []byte) (n int, err error) { 2716 if gz.zerr != nil { 2717 return 0, gz.zerr 2718 } 2719 if gz.zr == nil { 2720 gz.zr, err = gzip.NewReader(gz.body) 2721 if err != nil { 2722 gz.zerr = err 2723 return 0, err 2724 } 2725 } 2726 return gz.zr.Read(p) 2727} 2728 2729func (gz *gzipReader) Close() error { 2730 return gz.body.Close() 2731} 2732 2733type errorReader struct{ err error } 2734 2735func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } 2736 2737// bodyWriterState encapsulates various state around the Transport's writing 2738// of the request body, particularly regarding doing delayed writes of the body 2739// when the request contains "Expect: 100-continue". 2740type bodyWriterState struct { 2741 cs *clientStream 2742 timer *time.Timer // if non-nil, we're doing a delayed write 2743 fnonce *sync.Once // to call fn with 2744 fn func() // the code to run in the goroutine, writing the body 2745 resc chan error // result of fn's execution 2746 delay time.Duration // how long we should delay a delayed write for 2747} 2748 2749func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { 2750 s.cs = cs 2751 if body == nil { 2752 return 2753 } 2754 resc := make(chan error, 1) 2755 s.resc = resc 2756 s.fn = func() { 2757 cs.cc.mu.Lock() 2758 cs.startedWrite = true 2759 cs.cc.mu.Unlock() 2760 resc <- cs.writeRequestBody(body, cs.req.Body) 2761 } 2762 s.delay = t.expectContinueTimeout() 2763 if s.delay == 0 || 2764 !httpguts.HeaderValuesContainsToken( 2765 cs.req.Header["Expect"], 2766 "100-continue") { 2767 return 2768 } 2769 s.fnonce = new(sync.Once) 2770 2771 // Arm the timer with a very large duration, which we'll 2772 // intentionally lower later. It has to be large now because 2773 // we need a handle to it before writing the headers, but the 2774 // s.delay value is defined to not start until after the 2775 // request headers were written. 2776 const hugeDuration = 365 * 24 * time.Hour 2777 s.timer = time.AfterFunc(hugeDuration, func() { 2778 s.fnonce.Do(s.fn) 2779 }) 2780 return 2781} 2782 2783func (s bodyWriterState) cancel() { 2784 if s.timer != nil { 2785 if s.timer.Stop() { 2786 s.resc <- nil 2787 } 2788 } 2789} 2790 2791func (s bodyWriterState) on100() { 2792 if s.timer == nil { 2793 // If we didn't do a delayed write, ignore the server's 2794 // bogus 100 continue response. 2795 return 2796 } 2797 s.timer.Stop() 2798 go func() { s.fnonce.Do(s.fn) }() 2799} 2800 2801// scheduleBodyWrite starts writing the body, either immediately (in 2802// the common case) or after the delay timeout. It should not be 2803// called until after the headers have been written. 2804func (s bodyWriterState) scheduleBodyWrite() { 2805 if s.timer == nil { 2806 // We're not doing a delayed write (see 2807 // getBodyWriterState), so just start the writing 2808 // goroutine immediately. 2809 go s.fn() 2810 return 2811 } 2812 traceWait100Continue(s.cs.trace) 2813 if s.timer.Stop() { 2814 s.timer.Reset(s.delay) 2815 } 2816} 2817 2818// isConnectionCloseRequest reports whether req should use its own 2819// connection for a single request and then close the connection. 2820func isConnectionCloseRequest(req *http.Request) bool { 2821 return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close") 2822} 2823 2824// registerHTTPSProtocol calls Transport.RegisterProtocol but 2825// converting panics into errors. 2826func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err error) { 2827 defer func() { 2828 if e := recover(); e != nil { 2829 err = fmt.Errorf("%v", e) 2830 } 2831 }() 2832 t.RegisterProtocol("https", rt) 2833 return nil 2834} 2835 2836// noDialH2RoundTripper is a RoundTripper which only tries to complete the request 2837// if there's already has a cached connection to the host. 2838// (The field is exported so it can be accessed via reflect from net/http; tested 2839// by TestNoDialH2RoundTripperType) 2840type noDialH2RoundTripper struct{ *Transport } 2841 2842func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { 2843 res, err := rt.Transport.RoundTrip(req) 2844 if isNoCachedConnError(err) { 2845 return nil, http.ErrSkipAltProtocol 2846 } 2847 return res, err 2848} 2849 2850func (t *Transport) idleConnTimeout() time.Duration { 2851 if t.t1 != nil { 2852 return t.t1.IdleConnTimeout 2853 } 2854 return 0 2855} 2856 2857func traceGetConn(req *http.Request, hostPort string) { 2858 trace := httptrace.ContextClientTrace(req.Context()) 2859 if trace == nil || trace.GetConn == nil { 2860 return 2861 } 2862 trace.GetConn(hostPort) 2863} 2864 2865func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { 2866 trace := httptrace.ContextClientTrace(req.Context()) 2867 if trace == nil || trace.GotConn == nil { 2868 return 2869 } 2870 ci := httptrace.GotConnInfo{Conn: cc.tconn} 2871 ci.Reused = reused 2872 cc.mu.Lock() 2873 ci.WasIdle = len(cc.streams) == 0 && reused 2874 if ci.WasIdle && !cc.lastActive.IsZero() { 2875 ci.IdleTime = time.Now().Sub(cc.lastActive) 2876 } 2877 cc.mu.Unlock() 2878 2879 trace.GotConn(ci) 2880} 2881 2882func traceWroteHeaders(trace *httptrace.ClientTrace) { 2883 if trace != nil && trace.WroteHeaders != nil { 2884 trace.WroteHeaders() 2885 } 2886} 2887 2888func traceGot100Continue(trace *httptrace.ClientTrace) { 2889 if trace != nil && trace.Got100Continue != nil { 2890 trace.Got100Continue() 2891 } 2892} 2893 2894func traceWait100Continue(trace *httptrace.ClientTrace) { 2895 if trace != nil && trace.Wait100Continue != nil { 2896 trace.Wait100Continue() 2897 } 2898} 2899 2900func traceWroteRequest(trace *httptrace.ClientTrace, err error) { 2901 if trace != nil && trace.WroteRequest != nil { 2902 trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) 2903 } 2904} 2905 2906func traceFirstResponseByte(trace *httptrace.ClientTrace) { 2907 if trace != nil && trace.GotFirstResponseByte != nil { 2908 trace.GotFirstResponseByte() 2909 } 2910} 2911