1/*
2 *
3 * Copyright 2014 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 *     http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package test
20
21import (
22	"bufio"
23	"bytes"
24	"compress/gzip"
25	"context"
26	"crypto/tls"
27	"errors"
28	"flag"
29	"fmt"
30	"io"
31	"math"
32	"net"
33	"net/http"
34	"os"
35	"reflect"
36	"runtime"
37	"strings"
38	"sync"
39	"sync/atomic"
40	"syscall"
41	"testing"
42	"time"
43
44	"github.com/golang/protobuf/proto"
45	anypb "github.com/golang/protobuf/ptypes/any"
46	"golang.org/x/net/http2"
47	"golang.org/x/net/http2/hpack"
48	spb "google.golang.org/genproto/googleapis/rpc/status"
49	"google.golang.org/grpc"
50	"google.golang.org/grpc/codes"
51	"google.golang.org/grpc/connectivity"
52	"google.golang.org/grpc/credentials"
53	"google.golang.org/grpc/encoding"
54	_ "google.golang.org/grpc/encoding/gzip"
55	"google.golang.org/grpc/health"
56	healthgrpc "google.golang.org/grpc/health/grpc_health_v1"
57	healthpb "google.golang.org/grpc/health/grpc_health_v1"
58	"google.golang.org/grpc/internal"
59	"google.golang.org/grpc/internal/channelz"
60	"google.golang.org/grpc/internal/grpcsync"
61	"google.golang.org/grpc/internal/grpctest"
62	"google.golang.org/grpc/internal/stubserver"
63	"google.golang.org/grpc/internal/testutils"
64	"google.golang.org/grpc/internal/transport"
65	"google.golang.org/grpc/keepalive"
66	"google.golang.org/grpc/metadata"
67	"google.golang.org/grpc/peer"
68	"google.golang.org/grpc/resolver"
69	"google.golang.org/grpc/resolver/manual"
70	"google.golang.org/grpc/serviceconfig"
71	"google.golang.org/grpc/stats"
72	"google.golang.org/grpc/status"
73	"google.golang.org/grpc/tap"
74	"google.golang.org/grpc/test/bufconn"
75	testpb "google.golang.org/grpc/test/grpc_testing"
76	"google.golang.org/grpc/testdata"
77)
78
79const defaultHealthService = "grpc.health.v1.Health"
80
81func init() {
82	channelz.TurnOn()
83}
84
85type s struct {
86	grpctest.Tester
87}
88
89func Test(t *testing.T) {
90	grpctest.RunSubTests(t, s{})
91}
92
93var (
94	// For headers:
95	testMetadata = metadata.MD{
96		"key1":     []string{"value1"},
97		"key2":     []string{"value2"},
98		"key3-bin": []string{"binvalue1", string([]byte{1, 2, 3})},
99	}
100	testMetadata2 = metadata.MD{
101		"key1": []string{"value12"},
102		"key2": []string{"value22"},
103	}
104	// For trailers:
105	testTrailerMetadata = metadata.MD{
106		"tkey1":     []string{"trailerValue1"},
107		"tkey2":     []string{"trailerValue2"},
108		"tkey3-bin": []string{"trailerbinvalue1", string([]byte{3, 2, 1})},
109	}
110	testTrailerMetadata2 = metadata.MD{
111		"tkey1": []string{"trailerValue12"},
112		"tkey2": []string{"trailerValue22"},
113	}
114	// capital "Key" is illegal in HTTP/2.
115	malformedHTTP2Metadata = metadata.MD{
116		"Key": []string{"foo"},
117	}
118	testAppUA     = "myApp1/1.0 myApp2/0.9"
119	failAppUA     = "fail-this-RPC"
120	detailedError = status.ErrorProto(&spb.Status{
121		Code:    int32(codes.DataLoss),
122		Message: "error for testing: " + failAppUA,
123		Details: []*anypb.Any{{
124			TypeUrl: "url",
125			Value:   []byte{6, 0, 0, 6, 1, 3},
126		}},
127	})
128)
129
130var raceMode bool // set by race.go in race mode
131
132type testServer struct {
133	testpb.UnimplementedTestServiceServer
134
135	security           string // indicate the authentication protocol used by this server.
136	earlyFail          bool   // whether to error out the execution of a service handler prematurely.
137	setAndSendHeader   bool   // whether to call setHeader and sendHeader.
138	setHeaderOnly      bool   // whether to only call setHeader, not sendHeader.
139	multipleSetTrailer bool   // whether to call setTrailer multiple times.
140	unaryCallSleepTime time.Duration
141}
142
143func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
144	if md, ok := metadata.FromIncomingContext(ctx); ok {
145		// For testing purpose, returns an error if user-agent is failAppUA.
146		// To test that client gets the correct error.
147		if ua, ok := md["user-agent"]; !ok || strings.HasPrefix(ua[0], failAppUA) {
148			return nil, detailedError
149		}
150		var str []string
151		for _, entry := range md["user-agent"] {
152			str = append(str, "ua", entry)
153		}
154		grpc.SendHeader(ctx, metadata.Pairs(str...))
155	}
156	return new(testpb.Empty), nil
157}
158
159func newPayload(t testpb.PayloadType, size int32) (*testpb.Payload, error) {
160	if size < 0 {
161		return nil, fmt.Errorf("requested a response with invalid length %d", size)
162	}
163	body := make([]byte, size)
164	switch t {
165	case testpb.PayloadType_COMPRESSABLE:
166	case testpb.PayloadType_UNCOMPRESSABLE:
167		return nil, fmt.Errorf("PayloadType UNCOMPRESSABLE is not supported")
168	default:
169		return nil, fmt.Errorf("unsupported payload type: %d", t)
170	}
171	return &testpb.Payload{
172		Type: t,
173		Body: body,
174	}, nil
175}
176
177func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
178	md, ok := metadata.FromIncomingContext(ctx)
179	if ok {
180		if _, exists := md[":authority"]; !exists {
181			return nil, status.Errorf(codes.DataLoss, "expected an :authority metadata: %v", md)
182		}
183		if s.setAndSendHeader {
184			if err := grpc.SetHeader(ctx, md); err != nil {
185				return nil, status.Errorf(status.Code(err), "grpc.SetHeader(_, %v) = %v, want <nil>", md, err)
186			}
187			if err := grpc.SendHeader(ctx, testMetadata2); err != nil {
188				return nil, status.Errorf(status.Code(err), "grpc.SendHeader(_, %v) = %v, want <nil>", testMetadata2, err)
189			}
190		} else if s.setHeaderOnly {
191			if err := grpc.SetHeader(ctx, md); err != nil {
192				return nil, status.Errorf(status.Code(err), "grpc.SetHeader(_, %v) = %v, want <nil>", md, err)
193			}
194			if err := grpc.SetHeader(ctx, testMetadata2); err != nil {
195				return nil, status.Errorf(status.Code(err), "grpc.SetHeader(_, %v) = %v, want <nil>", testMetadata2, err)
196			}
197		} else {
198			if err := grpc.SendHeader(ctx, md); err != nil {
199				return nil, status.Errorf(status.Code(err), "grpc.SendHeader(_, %v) = %v, want <nil>", md, err)
200			}
201		}
202		if err := grpc.SetTrailer(ctx, testTrailerMetadata); err != nil {
203			return nil, status.Errorf(status.Code(err), "grpc.SetTrailer(_, %v) = %v, want <nil>", testTrailerMetadata, err)
204		}
205		if s.multipleSetTrailer {
206			if err := grpc.SetTrailer(ctx, testTrailerMetadata2); err != nil {
207				return nil, status.Errorf(status.Code(err), "grpc.SetTrailer(_, %v) = %v, want <nil>", testTrailerMetadata2, err)
208			}
209		}
210	}
211	pr, ok := peer.FromContext(ctx)
212	if !ok {
213		return nil, status.Error(codes.DataLoss, "failed to get peer from ctx")
214	}
215	if pr.Addr == net.Addr(nil) {
216		return nil, status.Error(codes.DataLoss, "failed to get peer address")
217	}
218	if s.security != "" {
219		// Check Auth info
220		var authType, serverName string
221		switch info := pr.AuthInfo.(type) {
222		case credentials.TLSInfo:
223			authType = info.AuthType()
224			serverName = info.State.ServerName
225		default:
226			return nil, status.Error(codes.Unauthenticated, "Unknown AuthInfo type")
227		}
228		if authType != s.security {
229			return nil, status.Errorf(codes.Unauthenticated, "Wrong auth type: got %q, want %q", authType, s.security)
230		}
231		if serverName != "x.test.example.com" {
232			return nil, status.Errorf(codes.Unauthenticated, "Unknown server name %q", serverName)
233		}
234	}
235	// Simulate some service delay.
236	time.Sleep(s.unaryCallSleepTime)
237
238	payload, err := newPayload(in.GetResponseType(), in.GetResponseSize())
239	if err != nil {
240		return nil, err
241	}
242
243	return &testpb.SimpleResponse{
244		Payload: payload,
245	}, nil
246}
247
248func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error {
249	if md, ok := metadata.FromIncomingContext(stream.Context()); ok {
250		if _, exists := md[":authority"]; !exists {
251			return status.Errorf(codes.DataLoss, "expected an :authority metadata: %v", md)
252		}
253		// For testing purpose, returns an error if user-agent is failAppUA.
254		// To test that client gets the correct error.
255		if ua, ok := md["user-agent"]; !ok || strings.HasPrefix(ua[0], failAppUA) {
256			return status.Error(codes.DataLoss, "error for testing: "+failAppUA)
257		}
258	}
259	cs := args.GetResponseParameters()
260	for _, c := range cs {
261		if us := c.GetIntervalUs(); us > 0 {
262			time.Sleep(time.Duration(us) * time.Microsecond)
263		}
264
265		payload, err := newPayload(args.GetResponseType(), c.GetSize())
266		if err != nil {
267			return err
268		}
269
270		if err := stream.Send(&testpb.StreamingOutputCallResponse{
271			Payload: payload,
272		}); err != nil {
273			return err
274		}
275	}
276	return nil
277}
278
279func (s *testServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error {
280	var sum int
281	for {
282		in, err := stream.Recv()
283		if err == io.EOF {
284			return stream.SendAndClose(&testpb.StreamingInputCallResponse{
285				AggregatedPayloadSize: int32(sum),
286			})
287		}
288		if err != nil {
289			return err
290		}
291		p := in.GetPayload().GetBody()
292		sum += len(p)
293		if s.earlyFail {
294			return status.Error(codes.NotFound, "not found")
295		}
296	}
297}
298
299func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error {
300	md, ok := metadata.FromIncomingContext(stream.Context())
301	if ok {
302		if s.setAndSendHeader {
303			if err := stream.SetHeader(md); err != nil {
304				return status.Errorf(status.Code(err), "%v.SetHeader(_, %v) = %v, want <nil>", stream, md, err)
305			}
306			if err := stream.SendHeader(testMetadata2); err != nil {
307				return status.Errorf(status.Code(err), "%v.SendHeader(_, %v) = %v, want <nil>", stream, testMetadata2, err)
308			}
309		} else if s.setHeaderOnly {
310			if err := stream.SetHeader(md); err != nil {
311				return status.Errorf(status.Code(err), "%v.SetHeader(_, %v) = %v, want <nil>", stream, md, err)
312			}
313			if err := stream.SetHeader(testMetadata2); err != nil {
314				return status.Errorf(status.Code(err), "%v.SetHeader(_, %v) = %v, want <nil>", stream, testMetadata2, err)
315			}
316		} else {
317			if err := stream.SendHeader(md); err != nil {
318				return status.Errorf(status.Code(err), "%v.SendHeader(%v) = %v, want %v", stream, md, err, nil)
319			}
320		}
321		stream.SetTrailer(testTrailerMetadata)
322		if s.multipleSetTrailer {
323			stream.SetTrailer(testTrailerMetadata2)
324		}
325	}
326	for {
327		in, err := stream.Recv()
328		if err == io.EOF {
329			// read done.
330			return nil
331		}
332		if err != nil {
333			// to facilitate testSvrWriteStatusEarlyWrite
334			if status.Code(err) == codes.ResourceExhausted {
335				return status.Errorf(codes.Internal, "fake error for test testSvrWriteStatusEarlyWrite. true error: %s", err.Error())
336			}
337			return err
338		}
339		cs := in.GetResponseParameters()
340		for _, c := range cs {
341			if us := c.GetIntervalUs(); us > 0 {
342				time.Sleep(time.Duration(us) * time.Microsecond)
343			}
344
345			payload, err := newPayload(in.GetResponseType(), c.GetSize())
346			if err != nil {
347				return err
348			}
349
350			if err := stream.Send(&testpb.StreamingOutputCallResponse{
351				Payload: payload,
352			}); err != nil {
353				// to facilitate testSvrWriteStatusEarlyWrite
354				if status.Code(err) == codes.ResourceExhausted {
355					return status.Errorf(codes.Internal, "fake error for test testSvrWriteStatusEarlyWrite. true error: %s", err.Error())
356				}
357				return err
358			}
359		}
360	}
361}
362
363func (s *testServer) HalfDuplexCall(stream testpb.TestService_HalfDuplexCallServer) error {
364	var msgBuf []*testpb.StreamingOutputCallRequest
365	for {
366		in, err := stream.Recv()
367		if err == io.EOF {
368			// read done.
369			break
370		}
371		if err != nil {
372			return err
373		}
374		msgBuf = append(msgBuf, in)
375	}
376	for _, m := range msgBuf {
377		cs := m.GetResponseParameters()
378		for _, c := range cs {
379			if us := c.GetIntervalUs(); us > 0 {
380				time.Sleep(time.Duration(us) * time.Microsecond)
381			}
382
383			payload, err := newPayload(m.GetResponseType(), c.GetSize())
384			if err != nil {
385				return err
386			}
387
388			if err := stream.Send(&testpb.StreamingOutputCallResponse{
389				Payload: payload,
390			}); err != nil {
391				return err
392			}
393		}
394	}
395	return nil
396}
397
398type env struct {
399	name         string
400	network      string // The type of network such as tcp, unix, etc.
401	security     string // The security protocol such as TLS, SSH, etc.
402	httpHandler  bool   // whether to use the http.Handler ServerTransport; requires TLS
403	balancer     string // One of "round_robin", "pick_first", or "".
404	customDialer func(string, string, time.Duration) (net.Conn, error)
405}
406
407func (e env) runnable() bool {
408	if runtime.GOOS == "windows" && e.network == "unix" {
409		return false
410	}
411	return true
412}
413
414func (e env) dialer(addr string, timeout time.Duration) (net.Conn, error) {
415	if e.customDialer != nil {
416		return e.customDialer(e.network, addr, timeout)
417	}
418	return net.DialTimeout(e.network, addr, timeout)
419}
420
421var (
422	tcpClearEnv   = env{name: "tcp-clear-v1-balancer", network: "tcp"}
423	tcpTLSEnv     = env{name: "tcp-tls-v1-balancer", network: "tcp", security: "tls"}
424	tcpClearRREnv = env{name: "tcp-clear", network: "tcp", balancer: "round_robin"}
425	tcpTLSRREnv   = env{name: "tcp-tls", network: "tcp", security: "tls", balancer: "round_robin"}
426	handlerEnv    = env{name: "handler-tls", network: "tcp", security: "tls", httpHandler: true, balancer: "round_robin"}
427	noBalancerEnv = env{name: "no-balancer", network: "tcp", security: "tls"}
428	allEnv        = []env{tcpClearEnv, tcpTLSEnv, tcpClearRREnv, tcpTLSRREnv, handlerEnv, noBalancerEnv}
429)
430
431var onlyEnv = flag.String("only_env", "", "If non-empty, one of 'tcp-clear', 'tcp-tls', 'unix-clear', 'unix-tls', or 'handler-tls' to only run the tests for that environment. Empty means all.")
432
433func listTestEnv() (envs []env) {
434	if *onlyEnv != "" {
435		for _, e := range allEnv {
436			if e.name == *onlyEnv {
437				if !e.runnable() {
438					panic(fmt.Sprintf("--only_env environment %q does not run on %s", *onlyEnv, runtime.GOOS))
439				}
440				return []env{e}
441			}
442		}
443		panic(fmt.Sprintf("invalid --only_env value %q", *onlyEnv))
444	}
445	for _, e := range allEnv {
446		if e.runnable() {
447			envs = append(envs, e)
448		}
449	}
450	return envs
451}
452
453// test is an end-to-end test. It should be created with the newTest
454// func, modified as needed, and then started with its startServer method.
455// It should be cleaned up with the tearDown method.
456type test struct {
457	// The following are setup in newTest().
458	t      *testing.T
459	e      env
460	ctx    context.Context // valid for life of test, before tearDown
461	cancel context.CancelFunc
462
463	// The following knobs are for the server-side, and should be set after
464	// calling newTest() and before calling startServer().
465
466	// whether or not to expose the server's health via the default health
467	// service implementation.
468	enableHealthServer bool
469	// In almost all cases, one should set the 'enableHealthServer' flag above to
470	// expose the server's health using the default health service
471	// implementation. This should only be used when a non-default health service
472	// implementation is required.
473	healthServer            healthpb.HealthServer
474	maxStream               uint32
475	tapHandle               tap.ServerInHandle
476	maxServerMsgSize        *int
477	maxServerReceiveMsgSize *int
478	maxServerSendMsgSize    *int
479	maxServerHeaderListSize *uint32
480	// Used to test the deprecated API WithCompressor and WithDecompressor.
481	serverCompression           bool
482	unknownHandler              grpc.StreamHandler
483	unaryServerInt              grpc.UnaryServerInterceptor
484	streamServerInt             grpc.StreamServerInterceptor
485	serverInitialWindowSize     int32
486	serverInitialConnWindowSize int32
487	customServerOptions         []grpc.ServerOption
488
489	// The following knobs are for the client-side, and should be set after
490	// calling newTest() and before calling clientConn().
491	maxClientMsgSize        *int
492	maxClientReceiveMsgSize *int
493	maxClientSendMsgSize    *int
494	maxClientHeaderListSize *uint32
495	userAgent               string
496	// Used to test the deprecated API WithCompressor and WithDecompressor.
497	clientCompression bool
498	// Used to test the new compressor registration API UseCompressor.
499	clientUseCompression bool
500	// clientNopCompression is set to create a compressor whose type is not supported.
501	clientNopCompression        bool
502	unaryClientInt              grpc.UnaryClientInterceptor
503	streamClientInt             grpc.StreamClientInterceptor
504	sc                          <-chan grpc.ServiceConfig
505	customCodec                 encoding.Codec
506	clientInitialWindowSize     int32
507	clientInitialConnWindowSize int32
508	perRPCCreds                 credentials.PerRPCCredentials
509	customDialOptions           []grpc.DialOption
510	resolverScheme              string
511
512	// All test dialing is blocking by default. Set this to true if dial
513	// should be non-blocking.
514	nonBlockingDial bool
515
516	// These are are set once startServer is called. The common case is to have
517	// only one testServer.
518	srv     stopper
519	hSrv    healthpb.HealthServer
520	srvAddr string
521
522	// These are are set once startServers is called.
523	srvs     []stopper
524	hSrvs    []healthpb.HealthServer
525	srvAddrs []string
526
527	cc          *grpc.ClientConn // nil until requested via clientConn
528	restoreLogs func()           // nil unless declareLogNoise is used
529}
530
531type stopper interface {
532	Stop()
533	GracefulStop()
534}
535
536func (te *test) tearDown() {
537	if te.cancel != nil {
538		te.cancel()
539		te.cancel = nil
540	}
541
542	if te.cc != nil {
543		te.cc.Close()
544		te.cc = nil
545	}
546
547	if te.restoreLogs != nil {
548		te.restoreLogs()
549		te.restoreLogs = nil
550	}
551
552	if te.srv != nil {
553		te.srv.Stop()
554	}
555	for _, s := range te.srvs {
556		s.Stop()
557	}
558}
559
560// newTest returns a new test using the provided testing.T and
561// environment.  It is returned with default values. Tests should
562// modify it before calling its startServer and clientConn methods.
563func newTest(t *testing.T, e env) *test {
564	te := &test{
565		t:         t,
566		e:         e,
567		maxStream: math.MaxUint32,
568	}
569	te.ctx, te.cancel = context.WithCancel(context.Background())
570	return te
571}
572
573func (te *test) listenAndServe(ts testpb.TestServiceServer, listen func(network, address string) (net.Listener, error)) net.Listener {
574	te.t.Helper()
575	te.t.Logf("Running test in %s environment...", te.e.name)
576	sopts := []grpc.ServerOption{grpc.MaxConcurrentStreams(te.maxStream)}
577	if te.maxServerMsgSize != nil {
578		sopts = append(sopts, grpc.MaxMsgSize(*te.maxServerMsgSize))
579	}
580	if te.maxServerReceiveMsgSize != nil {
581		sopts = append(sopts, grpc.MaxRecvMsgSize(*te.maxServerReceiveMsgSize))
582	}
583	if te.maxServerSendMsgSize != nil {
584		sopts = append(sopts, grpc.MaxSendMsgSize(*te.maxServerSendMsgSize))
585	}
586	if te.maxServerHeaderListSize != nil {
587		sopts = append(sopts, grpc.MaxHeaderListSize(*te.maxServerHeaderListSize))
588	}
589	if te.tapHandle != nil {
590		sopts = append(sopts, grpc.InTapHandle(te.tapHandle))
591	}
592	if te.serverCompression {
593		sopts = append(sopts,
594			grpc.RPCCompressor(grpc.NewGZIPCompressor()),
595			grpc.RPCDecompressor(grpc.NewGZIPDecompressor()),
596		)
597	}
598	if te.unaryServerInt != nil {
599		sopts = append(sopts, grpc.UnaryInterceptor(te.unaryServerInt))
600	}
601	if te.streamServerInt != nil {
602		sopts = append(sopts, grpc.StreamInterceptor(te.streamServerInt))
603	}
604	if te.unknownHandler != nil {
605		sopts = append(sopts, grpc.UnknownServiceHandler(te.unknownHandler))
606	}
607	if te.serverInitialWindowSize > 0 {
608		sopts = append(sopts, grpc.InitialWindowSize(te.serverInitialWindowSize))
609	}
610	if te.serverInitialConnWindowSize > 0 {
611		sopts = append(sopts, grpc.InitialConnWindowSize(te.serverInitialConnWindowSize))
612	}
613	la := "localhost:0"
614	switch te.e.network {
615	case "unix":
616		la = "/tmp/testsock" + fmt.Sprintf("%d", time.Now().UnixNano())
617		syscall.Unlink(la)
618	}
619	lis, err := listen(te.e.network, la)
620	if err != nil {
621		te.t.Fatalf("Failed to listen: %v", err)
622	}
623	if te.e.security == "tls" {
624		creds, err := credentials.NewServerTLSFromFile(testdata.Path("x509/server1_cert.pem"), testdata.Path("x509/server1_key.pem"))
625		if err != nil {
626			te.t.Fatalf("Failed to generate credentials %v", err)
627		}
628		sopts = append(sopts, grpc.Creds(creds))
629	}
630	sopts = append(sopts, te.customServerOptions...)
631	s := grpc.NewServer(sopts...)
632	if ts != nil {
633		testpb.RegisterTestServiceServer(s, ts)
634	}
635
636	// Create a new default health server if enableHealthServer is set, or use
637	// the provided one.
638	hs := te.healthServer
639	if te.enableHealthServer {
640		hs = health.NewServer()
641	}
642	if hs != nil {
643		healthgrpc.RegisterHealthServer(s, hs)
644	}
645
646	addr := la
647	switch te.e.network {
648	case "unix":
649	default:
650		_, port, err := net.SplitHostPort(lis.Addr().String())
651		if err != nil {
652			te.t.Fatalf("Failed to parse listener address: %v", err)
653		}
654		addr = "localhost:" + port
655	}
656
657	te.srv = s
658	te.hSrv = hs
659	te.srvAddr = addr
660
661	if te.e.httpHandler {
662		if te.e.security != "tls" {
663			te.t.Fatalf("unsupported environment settings")
664		}
665		cert, err := tls.LoadX509KeyPair(testdata.Path("x509/server1_cert.pem"), testdata.Path("x509/server1_key.pem"))
666		if err != nil {
667			te.t.Fatal("tls.LoadX509KeyPair(server1.pem, server1.key) failed: ", err)
668		}
669		hs := &http.Server{
670			Handler:   s,
671			TLSConfig: &tls.Config{Certificates: []tls.Certificate{cert}},
672		}
673		if err := http2.ConfigureServer(hs, &http2.Server{MaxConcurrentStreams: te.maxStream}); err != nil {
674			te.t.Fatal("http2.ConfigureServer(_, _) failed: ", err)
675		}
676		te.srv = wrapHS{hs}
677		tlsListener := tls.NewListener(lis, hs.TLSConfig)
678		go hs.Serve(tlsListener)
679		return lis
680	}
681
682	go s.Serve(lis)
683	return lis
684}
685
686type wrapHS struct {
687	s *http.Server
688}
689
690func (w wrapHS) GracefulStop() {
691	w.s.Shutdown(context.Background())
692}
693
694func (w wrapHS) Stop() {
695	w.s.Close()
696}
697
698func (te *test) startServerWithConnControl(ts testpb.TestServiceServer) *listenerWrapper {
699	l := te.listenAndServe(ts, listenWithConnControl)
700	return l.(*listenerWrapper)
701}
702
703// startServer starts a gRPC server exposing the provided TestService
704// implementation. Callers should defer a call to te.tearDown to clean up
705func (te *test) startServer(ts testpb.TestServiceServer) {
706	te.t.Helper()
707	te.listenAndServe(ts, net.Listen)
708}
709
710// startServers starts 'num' gRPC servers exposing the provided TestService.
711func (te *test) startServers(ts testpb.TestServiceServer, num int) {
712	for i := 0; i < num; i++ {
713		te.startServer(ts)
714		te.srvs = append(te.srvs, te.srv.(*grpc.Server))
715		te.hSrvs = append(te.hSrvs, te.hSrv)
716		te.srvAddrs = append(te.srvAddrs, te.srvAddr)
717		te.srv = nil
718		te.hSrv = nil
719		te.srvAddr = ""
720	}
721}
722
723// setHealthServingStatus is a helper function to set the health status.
724func (te *test) setHealthServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) {
725	hs, ok := te.hSrv.(*health.Server)
726	if !ok {
727		panic(fmt.Sprintf("SetServingStatus(%v, %v) called for health server of type %T", service, status, hs))
728	}
729	hs.SetServingStatus(service, status)
730}
731
732type nopCompressor struct {
733	grpc.Compressor
734}
735
736// NewNopCompressor creates a compressor to test the case that type is not supported.
737func NewNopCompressor() grpc.Compressor {
738	return &nopCompressor{grpc.NewGZIPCompressor()}
739}
740
741func (c *nopCompressor) Type() string {
742	return "nop"
743}
744
745type nopDecompressor struct {
746	grpc.Decompressor
747}
748
749// NewNopDecompressor creates a decompressor to test the case that type is not supported.
750func NewNopDecompressor() grpc.Decompressor {
751	return &nopDecompressor{grpc.NewGZIPDecompressor()}
752}
753
754func (d *nopDecompressor) Type() string {
755	return "nop"
756}
757
758func (te *test) configDial(opts ...grpc.DialOption) ([]grpc.DialOption, string) {
759	opts = append(opts, grpc.WithDialer(te.e.dialer), grpc.WithUserAgent(te.userAgent))
760
761	if te.sc != nil {
762		opts = append(opts, grpc.WithServiceConfig(te.sc))
763	}
764
765	if te.clientCompression {
766		opts = append(opts,
767			grpc.WithCompressor(grpc.NewGZIPCompressor()),
768			grpc.WithDecompressor(grpc.NewGZIPDecompressor()),
769		)
770	}
771	if te.clientUseCompression {
772		opts = append(opts, grpc.WithDefaultCallOptions(grpc.UseCompressor("gzip")))
773	}
774	if te.clientNopCompression {
775		opts = append(opts,
776			grpc.WithCompressor(NewNopCompressor()),
777			grpc.WithDecompressor(NewNopDecompressor()),
778		)
779	}
780	if te.unaryClientInt != nil {
781		opts = append(opts, grpc.WithUnaryInterceptor(te.unaryClientInt))
782	}
783	if te.streamClientInt != nil {
784		opts = append(opts, grpc.WithStreamInterceptor(te.streamClientInt))
785	}
786	if te.maxClientMsgSize != nil {
787		opts = append(opts, grpc.WithMaxMsgSize(*te.maxClientMsgSize))
788	}
789	if te.maxClientReceiveMsgSize != nil {
790		opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(*te.maxClientReceiveMsgSize)))
791	}
792	if te.maxClientSendMsgSize != nil {
793		opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(*te.maxClientSendMsgSize)))
794	}
795	if te.maxClientHeaderListSize != nil {
796		opts = append(opts, grpc.WithMaxHeaderListSize(*te.maxClientHeaderListSize))
797	}
798	switch te.e.security {
799	case "tls":
800		creds, err := credentials.NewClientTLSFromFile(testdata.Path("x509/server_ca_cert.pem"), "x.test.example.com")
801		if err != nil {
802			te.t.Fatalf("Failed to load credentials: %v", err)
803		}
804		opts = append(opts, grpc.WithTransportCredentials(creds))
805	case "empty":
806		// Don't add any transport creds option.
807	default:
808		opts = append(opts, grpc.WithInsecure())
809	}
810	// TODO(bar) switch balancer case "pick_first".
811	var scheme string
812	if te.resolverScheme == "" {
813		scheme = "passthrough:///"
814	} else {
815		scheme = te.resolverScheme + ":///"
816	}
817	if te.e.balancer != "" {
818		opts = append(opts, grpc.WithBalancerName(te.e.balancer))
819	}
820	if te.clientInitialWindowSize > 0 {
821		opts = append(opts, grpc.WithInitialWindowSize(te.clientInitialWindowSize))
822	}
823	if te.clientInitialConnWindowSize > 0 {
824		opts = append(opts, grpc.WithInitialConnWindowSize(te.clientInitialConnWindowSize))
825	}
826	if te.perRPCCreds != nil {
827		opts = append(opts, grpc.WithPerRPCCredentials(te.perRPCCreds))
828	}
829	if te.customCodec != nil {
830		opts = append(opts, grpc.WithDefaultCallOptions(grpc.ForceCodec(te.customCodec)))
831	}
832	if !te.nonBlockingDial && te.srvAddr != "" {
833		// Only do a blocking dial if server is up.
834		opts = append(opts, grpc.WithBlock())
835	}
836	if te.srvAddr == "" {
837		te.srvAddr = "client.side.only.test"
838	}
839	opts = append(opts, te.customDialOptions...)
840	return opts, scheme
841}
842
843func (te *test) clientConnWithConnControl() (*grpc.ClientConn, *dialerWrapper) {
844	if te.cc != nil {
845		return te.cc, nil
846	}
847	opts, scheme := te.configDial()
848	dw := &dialerWrapper{}
849	// overwrite the dialer before
850	opts = append(opts, grpc.WithDialer(dw.dialer))
851	var err error
852	te.cc, err = grpc.Dial(scheme+te.srvAddr, opts...)
853	if err != nil {
854		te.t.Fatalf("Dial(%q) = %v", scheme+te.srvAddr, err)
855	}
856	return te.cc, dw
857}
858
859func (te *test) clientConn(opts ...grpc.DialOption) *grpc.ClientConn {
860	if te.cc != nil {
861		return te.cc
862	}
863	var scheme string
864	opts, scheme = te.configDial(opts...)
865	var err error
866	te.cc, err = grpc.Dial(scheme+te.srvAddr, opts...)
867	if err != nil {
868		te.t.Fatalf("Dial(%q) = %v", scheme+te.srvAddr, err)
869	}
870	return te.cc
871}
872
873func (te *test) declareLogNoise(phrases ...string) {
874	te.restoreLogs = declareLogNoise(te.t, phrases...)
875}
876
877func (te *test) withServerTester(fn func(st *serverTester)) {
878	c, err := te.e.dialer(te.srvAddr, 10*time.Second)
879	if err != nil {
880		te.t.Fatal(err)
881	}
882	defer c.Close()
883	if te.e.security == "tls" {
884		c = tls.Client(c, &tls.Config{
885			InsecureSkipVerify: true,
886			NextProtos:         []string{http2.NextProtoTLS},
887		})
888	}
889	st := newServerTesterFromConn(te.t, c)
890	st.greet()
891	fn(st)
892}
893
894type lazyConn struct {
895	net.Conn
896	beLazy int32
897}
898
899func (l *lazyConn) Write(b []byte) (int, error) {
900	if atomic.LoadInt32(&(l.beLazy)) == 1 {
901		time.Sleep(time.Second)
902	}
903	return l.Conn.Write(b)
904}
905
906func (s) TestContextDeadlineNotIgnored(t *testing.T) {
907	e := noBalancerEnv
908	var lc *lazyConn
909	e.customDialer = func(network, addr string, timeout time.Duration) (net.Conn, error) {
910		conn, err := net.DialTimeout(network, addr, timeout)
911		if err != nil {
912			return nil, err
913		}
914		lc = &lazyConn{Conn: conn}
915		return lc, nil
916	}
917
918	te := newTest(t, e)
919	te.startServer(&testServer{security: e.security})
920	defer te.tearDown()
921
922	cc := te.clientConn()
923	tc := testpb.NewTestServiceClient(cc)
924	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
925	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
926		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
927	}
928	cancel()
929	atomic.StoreInt32(&(lc.beLazy), 1)
930	ctx, cancel = context.WithTimeout(context.Background(), 50*time.Millisecond)
931	defer cancel()
932	t1 := time.Now()
933	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
934		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, context.DeadlineExceeded", err)
935	}
936	if time.Since(t1) > 2*time.Second {
937		t.Fatalf("TestService/EmptyCall(_, _) ran over the deadline")
938	}
939}
940
941func (s) TestTimeoutOnDeadServer(t *testing.T) {
942	for _, e := range listTestEnv() {
943		testTimeoutOnDeadServer(t, e)
944	}
945}
946
947func testTimeoutOnDeadServer(t *testing.T, e env) {
948	te := newTest(t, e)
949	te.userAgent = testAppUA
950	te.declareLogNoise(
951		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
952		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
953		"grpc: addrConn.resetTransport failed to create client transport: connection error",
954	)
955	te.startServer(&testServer{security: e.security})
956	defer te.tearDown()
957
958	cc := te.clientConn()
959	tc := testpb.NewTestServiceClient(cc)
960	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
961	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
962		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
963	}
964	te.srv.Stop()
965	cancel()
966
967	// Wait for the client to notice the connection is gone.
968	ctx, cancel = context.WithTimeout(context.Background(), 500*time.Millisecond)
969	state := cc.GetState()
970	for ; state == connectivity.Ready && cc.WaitForStateChange(ctx, state); state = cc.GetState() {
971	}
972	cancel()
973	if state == connectivity.Ready {
974		t.Fatalf("Timed out waiting for non-ready state")
975	}
976	ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond)
977	_, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true))
978	cancel()
979	if e.balancer != "" && status.Code(err) != codes.DeadlineExceeded {
980		// If e.balancer == nil, the ac will stop reconnecting because the dialer returns non-temp error,
981		// the error will be an internal error.
982		t.Fatalf("TestService/EmptyCall(%v, _) = _, %v, want _, error code: %s", ctx, err, codes.DeadlineExceeded)
983	}
984	awaitNewConnLogOutput()
985}
986
987func (s) TestServerGracefulStopIdempotent(t *testing.T) {
988	for _, e := range listTestEnv() {
989		if e.name == "handler-tls" {
990			continue
991		}
992		testServerGracefulStopIdempotent(t, e)
993	}
994}
995
996func testServerGracefulStopIdempotent(t *testing.T, e env) {
997	te := newTest(t, e)
998	te.userAgent = testAppUA
999	te.startServer(&testServer{security: e.security})
1000	defer te.tearDown()
1001
1002	for i := 0; i < 3; i++ {
1003		te.srv.GracefulStop()
1004	}
1005}
1006
1007func (s) TestServerGoAway(t *testing.T) {
1008	for _, e := range listTestEnv() {
1009		if e.name == "handler-tls" {
1010			continue
1011		}
1012		testServerGoAway(t, e)
1013	}
1014}
1015
1016func testServerGoAway(t *testing.T, e env) {
1017	te := newTest(t, e)
1018	te.userAgent = testAppUA
1019	te.startServer(&testServer{security: e.security})
1020	defer te.tearDown()
1021
1022	cc := te.clientConn()
1023	tc := testpb.NewTestServiceClient(cc)
1024	// Finish an RPC to make sure the connection is good.
1025	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
1026	defer cancel()
1027	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
1028		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
1029	}
1030	ch := make(chan struct{})
1031	go func() {
1032		te.srv.GracefulStop()
1033		close(ch)
1034	}()
1035	// Loop until the server side GoAway signal is propagated to the client.
1036	for {
1037		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
1038		if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) != codes.DeadlineExceeded {
1039			cancel()
1040			break
1041		}
1042		cancel()
1043	}
1044	// A new RPC should fail.
1045	ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
1046	defer cancel()
1047	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable && status.Code(err) != codes.Internal {
1048		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s or %s", err, codes.Unavailable, codes.Internal)
1049	}
1050	<-ch
1051	awaitNewConnLogOutput()
1052}
1053
1054func (s) TestServerGoAwayPendingRPC(t *testing.T) {
1055	for _, e := range listTestEnv() {
1056		if e.name == "handler-tls" {
1057			continue
1058		}
1059		testServerGoAwayPendingRPC(t, e)
1060	}
1061}
1062
1063func testServerGoAwayPendingRPC(t *testing.T, e env) {
1064	te := newTest(t, e)
1065	te.userAgent = testAppUA
1066	te.declareLogNoise(
1067		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
1068		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
1069		"grpc: addrConn.resetTransport failed to create client transport: connection error",
1070	)
1071	te.startServer(&testServer{security: e.security})
1072	defer te.tearDown()
1073
1074	cc := te.clientConn()
1075	tc := testpb.NewTestServiceClient(cc)
1076	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
1077	stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
1078	if err != nil {
1079		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
1080	}
1081	// Finish an RPC to make sure the connection is good.
1082	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
1083		t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, <nil>", tc, err)
1084	}
1085	ch := make(chan struct{})
1086	go func() {
1087		te.srv.GracefulStop()
1088		close(ch)
1089	}()
1090	// Loop until the server side GoAway signal is propagated to the client.
1091	start := time.Now()
1092	errored := false
1093	for time.Since(start) < time.Second {
1094		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
1095		_, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true))
1096		cancel()
1097		if err != nil {
1098			errored = true
1099			break
1100		}
1101	}
1102	if !errored {
1103		t.Fatalf("GoAway never received by client")
1104	}
1105	respParam := []*testpb.ResponseParameters{{Size: 1}}
1106	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100))
1107	if err != nil {
1108		t.Fatal(err)
1109	}
1110	req := &testpb.StreamingOutputCallRequest{
1111		ResponseType:       testpb.PayloadType_COMPRESSABLE,
1112		ResponseParameters: respParam,
1113		Payload:            payload,
1114	}
1115	// The existing RPC should be still good to proceed.
1116	if err := stream.Send(req); err != nil {
1117		t.Fatalf("%v.Send(_) = %v, want <nil>", stream, err)
1118	}
1119	if _, err := stream.Recv(); err != nil {
1120		t.Fatalf("%v.Recv() = _, %v, want _, <nil>", stream, err)
1121	}
1122	// The RPC will run until canceled.
1123	cancel()
1124	<-ch
1125	awaitNewConnLogOutput()
1126}
1127
1128func (s) TestServerMultipleGoAwayPendingRPC(t *testing.T) {
1129	for _, e := range listTestEnv() {
1130		if e.name == "handler-tls" {
1131			continue
1132		}
1133		testServerMultipleGoAwayPendingRPC(t, e)
1134	}
1135}
1136
1137func testServerMultipleGoAwayPendingRPC(t *testing.T, e env) {
1138	te := newTest(t, e)
1139	te.userAgent = testAppUA
1140	te.declareLogNoise(
1141		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
1142		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
1143		"grpc: addrConn.resetTransport failed to create client transport: connection error",
1144	)
1145	te.startServer(&testServer{security: e.security})
1146	defer te.tearDown()
1147
1148	cc := te.clientConn()
1149	tc := testpb.NewTestServiceClient(cc)
1150	ctx, cancel := context.WithCancel(context.Background())
1151	stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
1152	if err != nil {
1153		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
1154	}
1155	// Finish an RPC to make sure the connection is good.
1156	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
1157		t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, <nil>", tc, err)
1158	}
1159	ch1 := make(chan struct{})
1160	go func() {
1161		te.srv.GracefulStop()
1162		close(ch1)
1163	}()
1164	ch2 := make(chan struct{})
1165	go func() {
1166		te.srv.GracefulStop()
1167		close(ch2)
1168	}()
1169	// Loop until the server side GoAway signal is propagated to the client.
1170	for {
1171		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
1172		if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
1173			cancel()
1174			break
1175		}
1176		cancel()
1177	}
1178	select {
1179	case <-ch1:
1180		t.Fatal("GracefulStop() terminated early")
1181	case <-ch2:
1182		t.Fatal("GracefulStop() terminated early")
1183	default:
1184	}
1185	respParam := []*testpb.ResponseParameters{
1186		{
1187			Size: 1,
1188		},
1189	}
1190	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100))
1191	if err != nil {
1192		t.Fatal(err)
1193	}
1194	req := &testpb.StreamingOutputCallRequest{
1195		ResponseType:       testpb.PayloadType_COMPRESSABLE,
1196		ResponseParameters: respParam,
1197		Payload:            payload,
1198	}
1199	// The existing RPC should be still good to proceed.
1200	if err := stream.Send(req); err != nil {
1201		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
1202	}
1203	if _, err := stream.Recv(); err != nil {
1204		t.Fatalf("%v.Recv() = _, %v, want _, <nil>", stream, err)
1205	}
1206	if err := stream.CloseSend(); err != nil {
1207		t.Fatalf("%v.CloseSend() = %v, want <nil>", stream, err)
1208	}
1209	<-ch1
1210	<-ch2
1211	cancel()
1212	awaitNewConnLogOutput()
1213}
1214
1215func (s) TestConcurrentClientConnCloseAndServerGoAway(t *testing.T) {
1216	for _, e := range listTestEnv() {
1217		if e.name == "handler-tls" {
1218			continue
1219		}
1220		testConcurrentClientConnCloseAndServerGoAway(t, e)
1221	}
1222}
1223
1224func testConcurrentClientConnCloseAndServerGoAway(t *testing.T, e env) {
1225	te := newTest(t, e)
1226	te.userAgent = testAppUA
1227	te.declareLogNoise(
1228		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
1229		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
1230		"grpc: addrConn.resetTransport failed to create client transport: connection error",
1231	)
1232	te.startServer(&testServer{security: e.security})
1233	defer te.tearDown()
1234
1235	cc := te.clientConn()
1236	tc := testpb.NewTestServiceClient(cc)
1237	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
1238	defer cancel()
1239	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
1240		t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, <nil>", tc, err)
1241	}
1242	ch := make(chan struct{})
1243	// Close ClientConn and Server concurrently.
1244	go func() {
1245		te.srv.GracefulStop()
1246		close(ch)
1247	}()
1248	go func() {
1249		cc.Close()
1250	}()
1251	<-ch
1252}
1253
1254func (s) TestConcurrentServerStopAndGoAway(t *testing.T) {
1255	for _, e := range listTestEnv() {
1256		if e.name == "handler-tls" {
1257			continue
1258		}
1259		testConcurrentServerStopAndGoAway(t, e)
1260	}
1261}
1262
1263func testConcurrentServerStopAndGoAway(t *testing.T, e env) {
1264	te := newTest(t, e)
1265	te.userAgent = testAppUA
1266	te.declareLogNoise(
1267		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
1268		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
1269		"grpc: addrConn.resetTransport failed to create client transport: connection error",
1270	)
1271	te.startServer(&testServer{security: e.security})
1272	defer te.tearDown()
1273
1274	cc := te.clientConn()
1275	tc := testpb.NewTestServiceClient(cc)
1276	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
1277	defer cancel()
1278	stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
1279	if err != nil {
1280		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
1281	}
1282
1283	// Finish an RPC to make sure the connection is good.
1284	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
1285		t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, <nil>", tc, err)
1286	}
1287
1288	ch := make(chan struct{})
1289	go func() {
1290		te.srv.GracefulStop()
1291		close(ch)
1292	}()
1293	// Loop until the server side GoAway signal is propagated to the client.
1294	for {
1295		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
1296		if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
1297			cancel()
1298			break
1299		}
1300		cancel()
1301	}
1302	// Stop the server and close all the connections.
1303	te.srv.Stop()
1304	respParam := []*testpb.ResponseParameters{
1305		{
1306			Size: 1,
1307		},
1308	}
1309	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100))
1310	if err != nil {
1311		t.Fatal(err)
1312	}
1313	req := &testpb.StreamingOutputCallRequest{
1314		ResponseType:       testpb.PayloadType_COMPRESSABLE,
1315		ResponseParameters: respParam,
1316		Payload:            payload,
1317	}
1318	sendStart := time.Now()
1319	for {
1320		if err := stream.Send(req); err == io.EOF {
1321			// stream.Send should eventually send io.EOF
1322			break
1323		} else if err != nil {
1324			// Send should never return a transport-level error.
1325			t.Fatalf("stream.Send(%v) = %v; want <nil or io.EOF>", req, err)
1326		}
1327		if time.Since(sendStart) > 2*time.Second {
1328			t.Fatalf("stream.Send(_) did not return io.EOF after 2s")
1329		}
1330		time.Sleep(time.Millisecond)
1331	}
1332	if _, err := stream.Recv(); err == nil || err == io.EOF {
1333		t.Fatalf("%v.Recv() = _, %v, want _, <non-nil, non-EOF>", stream, err)
1334	}
1335	<-ch
1336	awaitNewConnLogOutput()
1337}
1338
1339func (s) TestDetailedConnectionCloseErrorPropagatesToRpcError(t *testing.T) {
1340	rpcStartedOnServer := make(chan struct{})
1341	rpcDoneOnClient := make(chan struct{})
1342	ss := &stubserver.StubServer{
1343		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
1344			close(rpcStartedOnServer)
1345			<-rpcDoneOnClient
1346			return status.Error(codes.Internal, "arbitrary status")
1347		},
1348	}
1349	if err := ss.Start(nil); err != nil {
1350		t.Fatalf("Error starting endpoint server: %v", err)
1351	}
1352	defer ss.Stop()
1353
1354	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
1355	defer cancel()
1356	// The precise behavior of this test is subject to raceyness around the timing of when TCP packets
1357	// are sent from client to server, and when we tell the server to stop, so we need to account for both
1358	// of these possible error messages:
1359	// 1) If the call to ss.S.Stop() causes the server's sockets to close while there's still in-fight
1360	//    data from the client on the TCP connection, then the kernel can send an RST back to the client (also
1361	//    see https://stackoverflow.com/questions/33053507/econnreset-in-send-linux-c). Note that while this
1362	//    condition is expected to be rare due to the rpcStartedOnServer synchronization, in theory it should
1363	//    be possible, e.g. if the client sends a BDP ping at the right time.
1364	// 2) If, for example, the call to ss.S.Stop() happens after the RPC headers have been received at the
1365	//    server, then the TCP connection can shutdown gracefully when the server's socket closes.
1366	const possibleConnResetMsg = "connection reset by peer"
1367	const possibleEOFMsg = "error reading from server: EOF"
1368	// Start an RPC. Then, while the RPC is still being accepted or handled at the server, abruptly
1369	// stop the server, killing the connection. The RPC error message should include details about the specific
1370	// connection error that was encountered.
1371	stream, err := ss.Client.FullDuplexCall(ctx)
1372	if err != nil {
1373		t.Fatalf("%v.FullDuplexCall = _, %v, want _, <nil>", ss.Client, err)
1374	}
1375	// Block until the RPC has been started on the server. This ensures that the ClientConn will find a healthy
1376	// connection for the RPC to go out on initially, and that the TCP connection will shut down strictly after
1377	// the RPC has been started on it.
1378	<-rpcStartedOnServer
1379	ss.S.Stop()
1380	if _, err := stream.Recv(); err == nil || (!strings.Contains(err.Error(), possibleConnResetMsg) && !strings.Contains(err.Error(), possibleEOFMsg)) {
1381		t.Fatalf("%v.Recv() = _, %v, want _, rpc error containing substring: %q OR %q", stream, err, possibleConnResetMsg, possibleEOFMsg)
1382	}
1383	close(rpcDoneOnClient)
1384}
1385
1386func (s) TestDetailedGoawayErrorOnGracefulClosePropagatesToRPCError(t *testing.T) {
1387	rpcDoneOnClient := make(chan struct{})
1388	ss := &stubserver.StubServer{
1389		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
1390			<-rpcDoneOnClient
1391			return status.Error(codes.Internal, "arbitrary status")
1392		},
1393	}
1394	sopts := []grpc.ServerOption{
1395		grpc.KeepaliveParams(keepalive.ServerParameters{
1396			MaxConnectionAge:      time.Millisecond * 100,
1397			MaxConnectionAgeGrace: time.Millisecond,
1398		}),
1399	}
1400	if err := ss.Start(sopts); err != nil {
1401		t.Fatalf("Error starting endpoint server: %v", err)
1402	}
1403	defer ss.Stop()
1404
1405	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
1406	defer cancel()
1407	stream, err := ss.Client.FullDuplexCall(ctx)
1408	if err != nil {
1409		t.Fatalf("%v.FullDuplexCall = _, %v, want _, <nil>", ss.Client, err)
1410	}
1411	const expectedErrorMessageSubstring = "received prior goaway: code: NO_ERROR"
1412	_, err = stream.Recv()
1413	close(rpcDoneOnClient)
1414	if err == nil || !strings.Contains(err.Error(), expectedErrorMessageSubstring) {
1415		t.Fatalf("%v.Recv() = _, %v, want _, rpc error containing substring: %q", stream, err, expectedErrorMessageSubstring)
1416	}
1417}
1418
1419func (s) TestDetailedGoawayErrorOnAbruptClosePropagatesToRPCError(t *testing.T) {
1420	// set the min keepalive time very low so that this test can take
1421	// a reasonable amount of time
1422	prev := internal.KeepaliveMinPingTime
1423	internal.KeepaliveMinPingTime = time.Millisecond
1424	defer func() { internal.KeepaliveMinPingTime = prev }()
1425
1426	rpcDoneOnClient := make(chan struct{})
1427	ss := &stubserver.StubServer{
1428		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
1429			<-rpcDoneOnClient
1430			return status.Error(codes.Internal, "arbitrary status")
1431		},
1432	}
1433	sopts := []grpc.ServerOption{
1434		grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
1435			MinTime: time.Second * 1000, /* arbitrary, large value */
1436		}),
1437	}
1438	dopts := []grpc.DialOption{
1439		grpc.WithKeepaliveParams(keepalive.ClientParameters{
1440			Time:                time.Millisecond,   /* should trigger "too many pings" error quickly */
1441			Timeout:             time.Second * 1000, /* arbitrary, large value */
1442			PermitWithoutStream: false,
1443		}),
1444	}
1445	if err := ss.Start(sopts, dopts...); err != nil {
1446		t.Fatalf("Error starting endpoint server: %v", err)
1447	}
1448	defer ss.Stop()
1449
1450	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
1451	defer cancel()
1452	stream, err := ss.Client.FullDuplexCall(ctx)
1453	if err != nil {
1454		t.Fatalf("%v.FullDuplexCall = _, %v, want _, <nil>", ss.Client, err)
1455	}
1456	const expectedErrorMessageSubstring = `received prior goaway: code: ENHANCE_YOUR_CALM, debug data: "too_many_pings"`
1457	_, err = stream.Recv()
1458	close(rpcDoneOnClient)
1459	if err == nil || !strings.Contains(err.Error(), expectedErrorMessageSubstring) {
1460		t.Fatalf("%v.Recv() = _, %v, want _, rpc error containing substring: |%v|", stream, err, expectedErrorMessageSubstring)
1461	}
1462}
1463
1464func (s) TestClientConnCloseAfterGoAwayWithActiveStream(t *testing.T) {
1465	for _, e := range listTestEnv() {
1466		if e.name == "handler-tls" {
1467			continue
1468		}
1469		testClientConnCloseAfterGoAwayWithActiveStream(t, e)
1470	}
1471}
1472
1473func testClientConnCloseAfterGoAwayWithActiveStream(t *testing.T, e env) {
1474	te := newTest(t, e)
1475	te.startServer(&testServer{security: e.security})
1476	defer te.tearDown()
1477	cc := te.clientConn()
1478	tc := testpb.NewTestServiceClient(cc)
1479
1480	ctx, cancel := context.WithCancel(context.Background())
1481	defer cancel()
1482	if _, err := tc.FullDuplexCall(ctx); err != nil {
1483		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, <nil>", tc, err)
1484	}
1485	done := make(chan struct{})
1486	go func() {
1487		te.srv.GracefulStop()
1488		close(done)
1489	}()
1490	time.Sleep(50 * time.Millisecond)
1491	cc.Close()
1492	timeout := time.NewTimer(time.Second)
1493	select {
1494	case <-done:
1495	case <-timeout.C:
1496		t.Fatalf("Test timed-out.")
1497	}
1498}
1499
1500func (s) TestFailFast(t *testing.T) {
1501	for _, e := range listTestEnv() {
1502		testFailFast(t, e)
1503	}
1504}
1505
1506func testFailFast(t *testing.T, e env) {
1507	te := newTest(t, e)
1508	te.userAgent = testAppUA
1509	te.declareLogNoise(
1510		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
1511		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
1512		"grpc: addrConn.resetTransport failed to create client transport: connection error",
1513	)
1514	te.startServer(&testServer{security: e.security})
1515	defer te.tearDown()
1516
1517	cc := te.clientConn()
1518	tc := testpb.NewTestServiceClient(cc)
1519	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
1520	defer cancel()
1521	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
1522		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
1523	}
1524	// Stop the server and tear down all the existing connections.
1525	te.srv.Stop()
1526	// Loop until the server teardown is propagated to the client.
1527	for {
1528		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
1529		_, err := tc.EmptyCall(ctx, &testpb.Empty{})
1530		cancel()
1531		if status.Code(err) == codes.Unavailable {
1532			break
1533		}
1534		t.Logf("%v.EmptyCall(_, _) = _, %v", tc, err)
1535		time.Sleep(10 * time.Millisecond)
1536	}
1537	// The client keeps reconnecting and ongoing fail-fast RPCs should fail with code.Unavailable.
1538	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable {
1539		t.Fatalf("TestService/EmptyCall(_, _, _) = _, %v, want _, error code: %s", err, codes.Unavailable)
1540	}
1541	if _, err := tc.StreamingInputCall(ctx); status.Code(err) != codes.Unavailable {
1542		t.Fatalf("TestService/StreamingInputCall(_) = _, %v, want _, error code: %s", err, codes.Unavailable)
1543	}
1544
1545	awaitNewConnLogOutput()
1546}
1547
1548func testServiceConfigSetup(t *testing.T, e env) *test {
1549	te := newTest(t, e)
1550	te.userAgent = testAppUA
1551	te.declareLogNoise(
1552		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
1553		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
1554		"grpc: addrConn.resetTransport failed to create client transport: connection error",
1555		"Failed to dial : context canceled; please retry.",
1556	)
1557	return te
1558}
1559
1560func newBool(b bool) (a *bool) {
1561	return &b
1562}
1563
1564func newInt(b int) (a *int) {
1565	return &b
1566}
1567
1568func newDuration(b time.Duration) (a *time.Duration) {
1569	a = new(time.Duration)
1570	*a = b
1571	return
1572}
1573
1574func (s) TestGetMethodConfig(t *testing.T) {
1575	te := testServiceConfigSetup(t, tcpClearRREnv)
1576	defer te.tearDown()
1577	r := manual.NewBuilderWithScheme("whatever")
1578
1579	te.resolverScheme = r.Scheme()
1580	cc := te.clientConn(grpc.WithResolvers(r))
1581	addrs := []resolver.Address{{Addr: te.srvAddr}}
1582	r.UpdateState(resolver.State{
1583		Addresses: addrs,
1584		ServiceConfig: parseCfg(r, `{
1585    "methodConfig": [
1586        {
1587            "name": [
1588                {
1589                    "service": "grpc.testing.TestService",
1590                    "method": "EmptyCall"
1591                }
1592            ],
1593            "waitForReady": true,
1594            "timeout": ".001s"
1595        },
1596        {
1597            "name": [
1598                {
1599                    "service": "grpc.testing.TestService"
1600                }
1601            ],
1602            "waitForReady": false
1603        }
1604    ]
1605}`)})
1606
1607	tc := testpb.NewTestServiceClient(cc)
1608
1609	// Make sure service config has been processed by grpc.
1610	for {
1611		if cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall").WaitForReady != nil {
1612			break
1613		}
1614		time.Sleep(time.Millisecond)
1615	}
1616
1617	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
1618	defer cancel()
1619	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
1620	var err error
1621	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
1622		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
1623	}
1624
1625	r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: parseCfg(r, `{
1626    "methodConfig": [
1627        {
1628            "name": [
1629                {
1630                    "service": "grpc.testing.TestService",
1631                    "method": "UnaryCall"
1632                }
1633            ],
1634            "waitForReady": true,
1635            "timeout": ".001s"
1636        },
1637        {
1638            "name": [
1639                {
1640                    "service": "grpc.testing.TestService"
1641                }
1642            ],
1643            "waitForReady": false
1644        }
1645    ]
1646}`)})
1647
1648	// Make sure service config has been processed by grpc.
1649	for {
1650		if mc := cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall"); mc.WaitForReady != nil && !*mc.WaitForReady {
1651			break
1652		}
1653		time.Sleep(time.Millisecond)
1654	}
1655	// The following RPCs are expected to become fail-fast.
1656	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable {
1657		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.Unavailable)
1658	}
1659}
1660
1661func (s) TestServiceConfigWaitForReady(t *testing.T) {
1662	te := testServiceConfigSetup(t, tcpClearRREnv)
1663	defer te.tearDown()
1664	r := manual.NewBuilderWithScheme("whatever")
1665
1666	// Case1: Client API set failfast to be false, and service config set wait_for_ready to be false, Client API should win, and the rpc will wait until deadline exceeds.
1667	te.resolverScheme = r.Scheme()
1668	cc := te.clientConn(grpc.WithResolvers(r))
1669	addrs := []resolver.Address{{Addr: te.srvAddr}}
1670	r.UpdateState(resolver.State{
1671		Addresses: addrs,
1672		ServiceConfig: parseCfg(r, `{
1673    "methodConfig": [
1674        {
1675            "name": [
1676                {
1677                    "service": "grpc.testing.TestService",
1678                    "method": "EmptyCall"
1679                },
1680                {
1681                    "service": "grpc.testing.TestService",
1682                    "method": "FullDuplexCall"
1683                }
1684            ],
1685            "waitForReady": false,
1686            "timeout": ".001s"
1687        }
1688    ]
1689}`)})
1690
1691	tc := testpb.NewTestServiceClient(cc)
1692
1693	// Make sure service config has been processed by grpc.
1694	for {
1695		if cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").WaitForReady != nil {
1696			break
1697		}
1698		time.Sleep(time.Millisecond)
1699	}
1700	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
1701	defer cancel()
1702	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
1703	var err error
1704	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
1705		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
1706	}
1707	if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
1708		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
1709	}
1710
1711	// Generate a service config update.
1712	// Case2:Client API set failfast to be false, and service config set wait_for_ready to be true, and the rpc will wait until deadline exceeds.
1713	r.UpdateState(resolver.State{
1714		Addresses: addrs,
1715		ServiceConfig: parseCfg(r, `{
1716    "methodConfig": [
1717        {
1718            "name": [
1719                {
1720                    "service": "grpc.testing.TestService",
1721                    "method": "EmptyCall"
1722                },
1723                {
1724                    "service": "grpc.testing.TestService",
1725                    "method": "FullDuplexCall"
1726                }
1727            ],
1728            "waitForReady": true,
1729            "timeout": ".001s"
1730        }
1731    ]
1732}`)})
1733
1734	// Wait for the new service config to take effect.
1735	for {
1736		if mc := cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall"); mc.WaitForReady != nil && *mc.WaitForReady {
1737			break
1738		}
1739		time.Sleep(time.Millisecond)
1740	}
1741	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
1742	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
1743		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
1744	}
1745	if _, err := tc.FullDuplexCall(ctx); status.Code(err) != codes.DeadlineExceeded {
1746		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
1747	}
1748}
1749
1750func (s) TestServiceConfigTimeout(t *testing.T) {
1751	te := testServiceConfigSetup(t, tcpClearRREnv)
1752	defer te.tearDown()
1753	r := manual.NewBuilderWithScheme("whatever")
1754
1755	// Case1: Client API sets timeout to be 1ns and ServiceConfig sets timeout to be 1hr. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds.
1756	te.resolverScheme = r.Scheme()
1757	cc := te.clientConn(grpc.WithResolvers(r))
1758	addrs := []resolver.Address{{Addr: te.srvAddr}}
1759	r.UpdateState(resolver.State{
1760		Addresses: addrs,
1761		ServiceConfig: parseCfg(r, `{
1762    "methodConfig": [
1763        {
1764            "name": [
1765                {
1766                    "service": "grpc.testing.TestService",
1767                    "method": "EmptyCall"
1768                },
1769                {
1770                    "service": "grpc.testing.TestService",
1771                    "method": "FullDuplexCall"
1772                }
1773            ],
1774            "waitForReady": true,
1775            "timeout": "3600s"
1776        }
1777    ]
1778}`)})
1779
1780	tc := testpb.NewTestServiceClient(cc)
1781
1782	// Make sure service config has been processed by grpc.
1783	for {
1784		if cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").Timeout != nil {
1785			break
1786		}
1787		time.Sleep(time.Millisecond)
1788	}
1789
1790	// The following RPCs are expected to become non-fail-fast ones with 1ns deadline.
1791	var err error
1792	ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond)
1793	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
1794		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
1795	}
1796	cancel()
1797
1798	ctx, cancel = context.WithTimeout(context.Background(), time.Nanosecond)
1799	if _, err = tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
1800		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
1801	}
1802	cancel()
1803
1804	// Generate a service config update.
1805	// Case2: Client API sets timeout to be 1hr and ServiceConfig sets timeout to be 1ns. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds.
1806	r.UpdateState(resolver.State{
1807		Addresses: addrs,
1808		ServiceConfig: parseCfg(r, `{
1809    "methodConfig": [
1810        {
1811            "name": [
1812                {
1813                    "service": "grpc.testing.TestService",
1814                    "method": "EmptyCall"
1815                },
1816                {
1817                    "service": "grpc.testing.TestService",
1818                    "method": "FullDuplexCall"
1819                }
1820            ],
1821            "waitForReady": true,
1822            "timeout": ".000000001s"
1823        }
1824    ]
1825}`)})
1826
1827	// Wait for the new service config to take effect.
1828	for {
1829		if mc := cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall"); mc.Timeout != nil && *mc.Timeout == time.Nanosecond {
1830			break
1831		}
1832		time.Sleep(time.Millisecond)
1833	}
1834
1835	ctx, cancel = context.WithTimeout(context.Background(), time.Hour)
1836	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
1837		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
1838	}
1839	cancel()
1840
1841	ctx, cancel = context.WithTimeout(context.Background(), time.Hour)
1842	if _, err = tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
1843		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
1844	}
1845	cancel()
1846}
1847
1848func (s) TestServiceConfigMaxMsgSize(t *testing.T) {
1849	e := tcpClearRREnv
1850	r := manual.NewBuilderWithScheme("whatever")
1851
1852	// Setting up values and objects shared across all test cases.
1853	const smallSize = 1
1854	const largeSize = 1024
1855	const extraLargeSize = 2048
1856
1857	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
1858	if err != nil {
1859		t.Fatal(err)
1860	}
1861	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
1862	if err != nil {
1863		t.Fatal(err)
1864	}
1865	extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize)
1866	if err != nil {
1867		t.Fatal(err)
1868	}
1869
1870	// Case1: sc set maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
1871	te1 := testServiceConfigSetup(t, e)
1872	defer te1.tearDown()
1873
1874	te1.resolverScheme = r.Scheme()
1875	te1.nonBlockingDial = true
1876	te1.startServer(&testServer{security: e.security})
1877	cc1 := te1.clientConn(grpc.WithResolvers(r))
1878
1879	addrs := []resolver.Address{{Addr: te1.srvAddr}}
1880	sc := parseCfg(r, `{
1881    "methodConfig": [
1882        {
1883            "name": [
1884                {
1885                    "service": "grpc.testing.TestService",
1886                    "method": "UnaryCall"
1887                },
1888                {
1889                    "service": "grpc.testing.TestService",
1890                    "method": "FullDuplexCall"
1891                }
1892            ],
1893            "maxRequestMessageBytes": 2048,
1894            "maxResponseMessageBytes": 2048
1895        }
1896    ]
1897}`)
1898	r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: sc})
1899	tc := testpb.NewTestServiceClient(cc1)
1900
1901	req := &testpb.SimpleRequest{
1902		ResponseType: testpb.PayloadType_COMPRESSABLE,
1903		ResponseSize: int32(extraLargeSize),
1904		Payload:      smallPayload,
1905	}
1906
1907	for {
1908		if cc1.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").MaxReqSize != nil {
1909			break
1910		}
1911		time.Sleep(time.Millisecond)
1912	}
1913	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
1914	defer cancel()
1915	// Test for unary RPC recv.
1916	if _, err = tc.UnaryCall(ctx, req, grpc.WaitForReady(true)); err == nil || status.Code(err) != codes.ResourceExhausted {
1917		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
1918	}
1919
1920	// Test for unary RPC send.
1921	req.Payload = extraLargePayload
1922	req.ResponseSize = int32(smallSize)
1923	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
1924		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
1925	}
1926
1927	// Test for streaming RPC recv.
1928	respParam := []*testpb.ResponseParameters{
1929		{
1930			Size: int32(extraLargeSize),
1931		},
1932	}
1933	sreq := &testpb.StreamingOutputCallRequest{
1934		ResponseType:       testpb.PayloadType_COMPRESSABLE,
1935		ResponseParameters: respParam,
1936		Payload:            smallPayload,
1937	}
1938	stream, err := tc.FullDuplexCall(te1.ctx)
1939	if err != nil {
1940		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
1941	}
1942	if err = stream.Send(sreq); err != nil {
1943		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
1944	}
1945	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
1946		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
1947	}
1948
1949	// Test for streaming RPC send.
1950	respParam[0].Size = int32(smallSize)
1951	sreq.Payload = extraLargePayload
1952	stream, err = tc.FullDuplexCall(te1.ctx)
1953	if err != nil {
1954		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
1955	}
1956	if err = stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
1957		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
1958	}
1959
1960	// Case2: Client API set maxReqSize to 1024 (send), maxRespSize to 1024 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
1961	te2 := testServiceConfigSetup(t, e)
1962	te2.resolverScheme = r.Scheme()
1963	te2.nonBlockingDial = true
1964	te2.maxClientReceiveMsgSize = newInt(1024)
1965	te2.maxClientSendMsgSize = newInt(1024)
1966
1967	te2.startServer(&testServer{security: e.security})
1968	defer te2.tearDown()
1969	cc2 := te2.clientConn(grpc.WithResolvers(r))
1970	r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: te2.srvAddr}}, ServiceConfig: sc})
1971	tc = testpb.NewTestServiceClient(cc2)
1972
1973	for {
1974		if cc2.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").MaxReqSize != nil {
1975			break
1976		}
1977		time.Sleep(time.Millisecond)
1978	}
1979
1980	// Test for unary RPC recv.
1981	req.Payload = smallPayload
1982	req.ResponseSize = int32(largeSize)
1983
1984	if _, err = tc.UnaryCall(ctx, req, grpc.WaitForReady(true)); err == nil || status.Code(err) != codes.ResourceExhausted {
1985		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
1986	}
1987
1988	// Test for unary RPC send.
1989	req.Payload = largePayload
1990	req.ResponseSize = int32(smallSize)
1991	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
1992		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
1993	}
1994
1995	// Test for streaming RPC recv.
1996	stream, err = tc.FullDuplexCall(te2.ctx)
1997	respParam[0].Size = int32(largeSize)
1998	sreq.Payload = smallPayload
1999	if err != nil {
2000		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
2001	}
2002	if err = stream.Send(sreq); err != nil {
2003		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
2004	}
2005	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
2006		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
2007	}
2008
2009	// Test for streaming RPC send.
2010	respParam[0].Size = int32(smallSize)
2011	sreq.Payload = largePayload
2012	stream, err = tc.FullDuplexCall(te2.ctx)
2013	if err != nil {
2014		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
2015	}
2016	if err = stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
2017		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
2018	}
2019
2020	// Case3: Client API set maxReqSize to 4096 (send), maxRespSize to 4096 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
2021	te3 := testServiceConfigSetup(t, e)
2022	te3.resolverScheme = r.Scheme()
2023	te3.nonBlockingDial = true
2024	te3.maxClientReceiveMsgSize = newInt(4096)
2025	te3.maxClientSendMsgSize = newInt(4096)
2026
2027	te3.startServer(&testServer{security: e.security})
2028	defer te3.tearDown()
2029
2030	cc3 := te3.clientConn(grpc.WithResolvers(r))
2031	r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: te3.srvAddr}}, ServiceConfig: sc})
2032	tc = testpb.NewTestServiceClient(cc3)
2033
2034	for {
2035		if cc3.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").MaxReqSize != nil {
2036			break
2037		}
2038		time.Sleep(time.Millisecond)
2039	}
2040
2041	// Test for unary RPC recv.
2042	req.Payload = smallPayload
2043	req.ResponseSize = int32(largeSize)
2044
2045	if _, err = tc.UnaryCall(ctx, req, grpc.WaitForReady(true)); err != nil {
2046		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want <nil>", err)
2047	}
2048
2049	req.ResponseSize = int32(extraLargeSize)
2050	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
2051		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
2052	}
2053
2054	// Test for unary RPC send.
2055	req.Payload = largePayload
2056	req.ResponseSize = int32(smallSize)
2057	if _, err := tc.UnaryCall(ctx, req); err != nil {
2058		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want <nil>", err)
2059	}
2060
2061	req.Payload = extraLargePayload
2062	if _, err = tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
2063		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
2064	}
2065
2066	// Test for streaming RPC recv.
2067	stream, err = tc.FullDuplexCall(te3.ctx)
2068	if err != nil {
2069		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
2070	}
2071	respParam[0].Size = int32(largeSize)
2072	sreq.Payload = smallPayload
2073
2074	if err = stream.Send(sreq); err != nil {
2075		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
2076	}
2077	if _, err = stream.Recv(); err != nil {
2078		t.Fatalf("%v.Recv() = _, %v, want <nil>", stream, err)
2079	}
2080
2081	respParam[0].Size = int32(extraLargeSize)
2082
2083	if err = stream.Send(sreq); err != nil {
2084		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
2085	}
2086	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
2087		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
2088	}
2089
2090	// Test for streaming RPC send.
2091	respParam[0].Size = int32(smallSize)
2092	sreq.Payload = largePayload
2093	stream, err = tc.FullDuplexCall(te3.ctx)
2094	if err != nil {
2095		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
2096	}
2097	if err := stream.Send(sreq); err != nil {
2098		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
2099	}
2100	sreq.Payload = extraLargePayload
2101	if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
2102		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
2103	}
2104}
2105
2106// Reading from a streaming RPC may fail with context canceled if timeout was
2107// set by service config (https://github.com/grpc/grpc-go/issues/1818). This
2108// test makes sure read from streaming RPC doesn't fail in this case.
2109func (s) TestStreamingRPCWithTimeoutInServiceConfigRecv(t *testing.T) {
2110	te := testServiceConfigSetup(t, tcpClearRREnv)
2111	te.startServer(&testServer{security: tcpClearRREnv.security})
2112	defer te.tearDown()
2113	r := manual.NewBuilderWithScheme("whatever")
2114
2115	te.resolverScheme = r.Scheme()
2116	te.nonBlockingDial = true
2117	cc := te.clientConn(grpc.WithResolvers(r))
2118	tc := testpb.NewTestServiceClient(cc)
2119
2120	r.UpdateState(resolver.State{
2121		Addresses: []resolver.Address{{Addr: te.srvAddr}},
2122		ServiceConfig: parseCfg(r, `{
2123	    "methodConfig": [
2124	        {
2125	            "name": [
2126	                {
2127	                    "service": "grpc.testing.TestService",
2128	                    "method": "FullDuplexCall"
2129	                }
2130	            ],
2131	            "waitForReady": true,
2132	            "timeout": "10s"
2133	        }
2134	    ]
2135	}`)})
2136	// Make sure service config has been processed by grpc.
2137	for {
2138		if cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").Timeout != nil {
2139			break
2140		}
2141		time.Sleep(time.Millisecond)
2142	}
2143
2144	ctx, cancel := context.WithCancel(context.Background())
2145	defer cancel()
2146	stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
2147	if err != nil {
2148		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want <nil>", err)
2149	}
2150
2151	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 0)
2152	if err != nil {
2153		t.Fatalf("failed to newPayload: %v", err)
2154	}
2155	req := &testpb.StreamingOutputCallRequest{
2156		ResponseType:       testpb.PayloadType_COMPRESSABLE,
2157		ResponseParameters: []*testpb.ResponseParameters{{Size: 0}},
2158		Payload:            payload,
2159	}
2160	if err := stream.Send(req); err != nil {
2161		t.Fatalf("stream.Send(%v) = %v, want <nil>", req, err)
2162	}
2163	stream.CloseSend()
2164	time.Sleep(time.Second)
2165	// Sleep 1 second before recv to make sure the final status is received
2166	// before the recv.
2167	if _, err := stream.Recv(); err != nil {
2168		t.Fatalf("stream.Recv = _, %v, want _, <nil>", err)
2169	}
2170	// Keep reading to drain the stream.
2171	for {
2172		if _, err := stream.Recv(); err != nil {
2173			break
2174		}
2175	}
2176}
2177
2178func (s) TestPreloaderClientSend(t *testing.T) {
2179	for _, e := range listTestEnv() {
2180		testPreloaderClientSend(t, e)
2181	}
2182}
2183
2184func testPreloaderClientSend(t *testing.T, e env) {
2185	te := newTest(t, e)
2186	te.userAgent = testAppUA
2187	te.declareLogNoise(
2188		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
2189		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
2190		"grpc: addrConn.resetTransport failed to create client transport: connection error",
2191		"Failed to dial : context canceled; please retry.",
2192	)
2193	te.startServer(&testServer{security: e.security})
2194
2195	defer te.tearDown()
2196	tc := testpb.NewTestServiceClient(te.clientConn())
2197
2198	// Test for streaming RPC recv.
2199	// Set context for send with proper RPC Information
2200	stream, err := tc.FullDuplexCall(te.ctx, grpc.UseCompressor("gzip"))
2201	if err != nil {
2202		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
2203	}
2204	var index int
2205	for index < len(reqSizes) {
2206		respParam := []*testpb.ResponseParameters{
2207			{
2208				Size: int32(respSizes[index]),
2209			},
2210		}
2211
2212		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index]))
2213		if err != nil {
2214			t.Fatal(err)
2215		}
2216
2217		req := &testpb.StreamingOutputCallRequest{
2218			ResponseType:       testpb.PayloadType_COMPRESSABLE,
2219			ResponseParameters: respParam,
2220			Payload:            payload,
2221		}
2222		preparedMsg := &grpc.PreparedMsg{}
2223		err = preparedMsg.Encode(stream, req)
2224		if err != nil {
2225			t.Fatalf("PrepareMsg failed for size %d : %v", reqSizes[index], err)
2226		}
2227		if err := stream.SendMsg(preparedMsg); err != nil {
2228			t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
2229		}
2230		reply, err := stream.Recv()
2231		if err != nil {
2232			t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
2233		}
2234		pt := reply.GetPayload().GetType()
2235		if pt != testpb.PayloadType_COMPRESSABLE {
2236			t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE)
2237		}
2238		size := len(reply.GetPayload().GetBody())
2239		if size != int(respSizes[index]) {
2240			t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index])
2241		}
2242		index++
2243	}
2244	if err := stream.CloseSend(); err != nil {
2245		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
2246	}
2247	if _, err := stream.Recv(); err != io.EOF {
2248		t.Fatalf("%v failed to complele the ping pong test: %v", stream, err)
2249	}
2250}
2251
2252func (s) TestPreloaderSenderSend(t *testing.T) {
2253	ss := &stubserver.StubServer{
2254		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
2255			for i := 0; i < 10; i++ {
2256				preparedMsg := &grpc.PreparedMsg{}
2257				err := preparedMsg.Encode(stream, &testpb.StreamingOutputCallResponse{
2258					Payload: &testpb.Payload{
2259						Body: []byte{'0' + uint8(i)},
2260					},
2261				})
2262				if err != nil {
2263					return err
2264				}
2265				stream.SendMsg(preparedMsg)
2266			}
2267			return nil
2268		},
2269	}
2270	if err := ss.Start(nil); err != nil {
2271		t.Fatalf("Error starting endpoint server: %v", err)
2272	}
2273	defer ss.Stop()
2274
2275	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
2276	defer cancel()
2277
2278	stream, err := ss.Client.FullDuplexCall(ctx)
2279	if err != nil {
2280		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err)
2281	}
2282
2283	var ngot int
2284	var buf bytes.Buffer
2285	for {
2286		reply, err := stream.Recv()
2287		if err == io.EOF {
2288			break
2289		}
2290		if err != nil {
2291			t.Fatal(err)
2292		}
2293		ngot++
2294		if buf.Len() > 0 {
2295			buf.WriteByte(',')
2296		}
2297		buf.Write(reply.GetPayload().GetBody())
2298	}
2299	if want := 10; ngot != want {
2300		t.Errorf("Got %d replies, want %d", ngot, want)
2301	}
2302	if got, want := buf.String(), "0,1,2,3,4,5,6,7,8,9"; got != want {
2303		t.Errorf("Got replies %q; want %q", got, want)
2304	}
2305}
2306
2307func (s) TestMaxMsgSizeClientDefault(t *testing.T) {
2308	for _, e := range listTestEnv() {
2309		testMaxMsgSizeClientDefault(t, e)
2310	}
2311}
2312
2313func testMaxMsgSizeClientDefault(t *testing.T, e env) {
2314	te := newTest(t, e)
2315	te.userAgent = testAppUA
2316	te.declareLogNoise(
2317		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
2318		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
2319		"grpc: addrConn.resetTransport failed to create client transport: connection error",
2320		"Failed to dial : context canceled; please retry.",
2321	)
2322	te.startServer(&testServer{security: e.security})
2323
2324	defer te.tearDown()
2325	tc := testpb.NewTestServiceClient(te.clientConn())
2326
2327	const smallSize = 1
2328	const largeSize = 4 * 1024 * 1024
2329	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
2330	if err != nil {
2331		t.Fatal(err)
2332	}
2333	req := &testpb.SimpleRequest{
2334		ResponseType: testpb.PayloadType_COMPRESSABLE,
2335		ResponseSize: int32(largeSize),
2336		Payload:      smallPayload,
2337	}
2338
2339	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
2340	defer cancel()
2341	// Test for unary RPC recv.
2342	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
2343		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
2344	}
2345
2346	respParam := []*testpb.ResponseParameters{
2347		{
2348			Size: int32(largeSize),
2349		},
2350	}
2351	sreq := &testpb.StreamingOutputCallRequest{
2352		ResponseType:       testpb.PayloadType_COMPRESSABLE,
2353		ResponseParameters: respParam,
2354		Payload:            smallPayload,
2355	}
2356
2357	// Test for streaming RPC recv.
2358	stream, err := tc.FullDuplexCall(te.ctx)
2359	if err != nil {
2360		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
2361	}
2362	if err := stream.Send(sreq); err != nil {
2363		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
2364	}
2365	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
2366		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
2367	}
2368}
2369
2370func (s) TestMaxMsgSizeClientAPI(t *testing.T) {
2371	for _, e := range listTestEnv() {
2372		testMaxMsgSizeClientAPI(t, e)
2373	}
2374}
2375
2376func testMaxMsgSizeClientAPI(t *testing.T, e env) {
2377	te := newTest(t, e)
2378	te.userAgent = testAppUA
2379	// To avoid error on server side.
2380	te.maxServerSendMsgSize = newInt(5 * 1024 * 1024)
2381	te.maxClientReceiveMsgSize = newInt(1024)
2382	te.maxClientSendMsgSize = newInt(1024)
2383	te.declareLogNoise(
2384		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
2385		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
2386		"grpc: addrConn.resetTransport failed to create client transport: connection error",
2387		"Failed to dial : context canceled; please retry.",
2388	)
2389	te.startServer(&testServer{security: e.security})
2390
2391	defer te.tearDown()
2392	tc := testpb.NewTestServiceClient(te.clientConn())
2393
2394	const smallSize = 1
2395	const largeSize = 1024
2396	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
2397	if err != nil {
2398		t.Fatal(err)
2399	}
2400
2401	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
2402	if err != nil {
2403		t.Fatal(err)
2404	}
2405	req := &testpb.SimpleRequest{
2406		ResponseType: testpb.PayloadType_COMPRESSABLE,
2407		ResponseSize: int32(largeSize),
2408		Payload:      smallPayload,
2409	}
2410
2411	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
2412	defer cancel()
2413	// Test for unary RPC recv.
2414	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
2415		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
2416	}
2417
2418	// Test for unary RPC send.
2419	req.Payload = largePayload
2420	req.ResponseSize = int32(smallSize)
2421	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
2422		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
2423	}
2424
2425	respParam := []*testpb.ResponseParameters{
2426		{
2427			Size: int32(largeSize),
2428		},
2429	}
2430	sreq := &testpb.StreamingOutputCallRequest{
2431		ResponseType:       testpb.PayloadType_COMPRESSABLE,
2432		ResponseParameters: respParam,
2433		Payload:            smallPayload,
2434	}
2435
2436	// Test for streaming RPC recv.
2437	stream, err := tc.FullDuplexCall(te.ctx)
2438	if err != nil {
2439		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
2440	}
2441	if err := stream.Send(sreq); err != nil {
2442		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
2443	}
2444	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
2445		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
2446	}
2447
2448	// Test for streaming RPC send.
2449	respParam[0].Size = int32(smallSize)
2450	sreq.Payload = largePayload
2451	stream, err = tc.FullDuplexCall(te.ctx)
2452	if err != nil {
2453		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
2454	}
2455	if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
2456		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
2457	}
2458}
2459
2460func (s) TestMaxMsgSizeServerAPI(t *testing.T) {
2461	for _, e := range listTestEnv() {
2462		testMaxMsgSizeServerAPI(t, e)
2463	}
2464}
2465
2466func testMaxMsgSizeServerAPI(t *testing.T, e env) {
2467	te := newTest(t, e)
2468	te.userAgent = testAppUA
2469	te.maxServerReceiveMsgSize = newInt(1024)
2470	te.maxServerSendMsgSize = newInt(1024)
2471	te.declareLogNoise(
2472		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
2473		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
2474		"grpc: addrConn.resetTransport failed to create client transport: connection error",
2475		"Failed to dial : context canceled; please retry.",
2476	)
2477	te.startServer(&testServer{security: e.security})
2478
2479	defer te.tearDown()
2480	tc := testpb.NewTestServiceClient(te.clientConn())
2481
2482	const smallSize = 1
2483	const largeSize = 1024
2484	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
2485	if err != nil {
2486		t.Fatal(err)
2487	}
2488
2489	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
2490	if err != nil {
2491		t.Fatal(err)
2492	}
2493	req := &testpb.SimpleRequest{
2494		ResponseType: testpb.PayloadType_COMPRESSABLE,
2495		ResponseSize: int32(largeSize),
2496		Payload:      smallPayload,
2497	}
2498
2499	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
2500	defer cancel()
2501	// Test for unary RPC send.
2502	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
2503		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
2504	}
2505
2506	// Test for unary RPC recv.
2507	req.Payload = largePayload
2508	req.ResponseSize = int32(smallSize)
2509	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
2510		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
2511	}
2512
2513	respParam := []*testpb.ResponseParameters{
2514		{
2515			Size: int32(largeSize),
2516		},
2517	}
2518	sreq := &testpb.StreamingOutputCallRequest{
2519		ResponseType:       testpb.PayloadType_COMPRESSABLE,
2520		ResponseParameters: respParam,
2521		Payload:            smallPayload,
2522	}
2523
2524	// Test for streaming RPC send.
2525	stream, err := tc.FullDuplexCall(te.ctx)
2526	if err != nil {
2527		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
2528	}
2529	if err := stream.Send(sreq); err != nil {
2530		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
2531	}
2532	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
2533		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
2534	}
2535
2536	// Test for streaming RPC recv.
2537	respParam[0].Size = int32(smallSize)
2538	sreq.Payload = largePayload
2539	stream, err = tc.FullDuplexCall(te.ctx)
2540	if err != nil {
2541		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
2542	}
2543	if err := stream.Send(sreq); err != nil {
2544		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
2545	}
2546	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
2547		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
2548	}
2549}
2550
2551func (s) TestTap(t *testing.T) {
2552	for _, e := range listTestEnv() {
2553		if e.name == "handler-tls" {
2554			continue
2555		}
2556		testTap(t, e)
2557	}
2558}
2559
2560type myTap struct {
2561	cnt int
2562}
2563
2564func (t *myTap) handle(ctx context.Context, info *tap.Info) (context.Context, error) {
2565	if info != nil {
2566		switch info.FullMethodName {
2567		case "/grpc.testing.TestService/EmptyCall":
2568			t.cnt++
2569		case "/grpc.testing.TestService/UnaryCall":
2570			return nil, fmt.Errorf("tap error")
2571		case "/grpc.testing.TestService/FullDuplexCall":
2572			return nil, status.Errorf(codes.FailedPrecondition, "test custom error")
2573		}
2574	}
2575	return ctx, nil
2576}
2577
2578func testTap(t *testing.T, e env) {
2579	te := newTest(t, e)
2580	te.userAgent = testAppUA
2581	ttap := &myTap{}
2582	te.tapHandle = ttap.handle
2583	te.declareLogNoise(
2584		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
2585		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
2586		"grpc: addrConn.resetTransport failed to create client transport: connection error",
2587	)
2588	te.startServer(&testServer{security: e.security})
2589	defer te.tearDown()
2590
2591	cc := te.clientConn()
2592	tc := testpb.NewTestServiceClient(cc)
2593	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
2594	defer cancel()
2595	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
2596		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
2597	}
2598	if ttap.cnt != 1 {
2599		t.Fatalf("Get the count in ttap %d, want 1", ttap.cnt)
2600	}
2601
2602	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 31)
2603	if err != nil {
2604		t.Fatal(err)
2605	}
2606
2607	req := &testpb.SimpleRequest{
2608		ResponseType: testpb.PayloadType_COMPRESSABLE,
2609		ResponseSize: 45,
2610		Payload:      payload,
2611	}
2612	if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.PermissionDenied {
2613		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, %s", err, codes.PermissionDenied)
2614	}
2615	str, err := tc.FullDuplexCall(ctx)
2616	if err != nil {
2617		t.Fatalf("Unexpected error creating stream: %v", err)
2618	}
2619	if _, err := str.Recv(); status.Code(err) != codes.FailedPrecondition {
2620		t.Fatalf("FullDuplexCall Recv() = _, %v, want _, %s", err, codes.FailedPrecondition)
2621	}
2622}
2623
2624// healthCheck is a helper function to make a unary health check RPC and return
2625// the response.
2626func healthCheck(d time.Duration, cc *grpc.ClientConn, service string) (*healthpb.HealthCheckResponse, error) {
2627	ctx, cancel := context.WithTimeout(context.Background(), d)
2628	defer cancel()
2629	hc := healthgrpc.NewHealthClient(cc)
2630	return hc.Check(ctx, &healthpb.HealthCheckRequest{Service: service})
2631}
2632
2633// verifyHealthCheckStatus is a helper function to verify that the current
2634// health status of the service matches the one passed in 'wantStatus'.
2635func verifyHealthCheckStatus(t *testing.T, d time.Duration, cc *grpc.ClientConn, service string, wantStatus healthpb.HealthCheckResponse_ServingStatus) {
2636	t.Helper()
2637	resp, err := healthCheck(d, cc, service)
2638	if err != nil {
2639		t.Fatalf("Health/Check(_, _) = _, %v, want _, <nil>", err)
2640	}
2641	if resp.Status != wantStatus {
2642		t.Fatalf("Got the serving status %v, want %v", resp.Status, wantStatus)
2643	}
2644}
2645
2646// verifyHealthCheckErrCode is a helper function to verify that a unary health
2647// check RPC returns an error with a code set to 'wantCode'.
2648func verifyHealthCheckErrCode(t *testing.T, d time.Duration, cc *grpc.ClientConn, service string, wantCode codes.Code) {
2649	t.Helper()
2650	if _, err := healthCheck(d, cc, service); status.Code(err) != wantCode {
2651		t.Fatalf("Health/Check() got errCode %v, want %v", status.Code(err), wantCode)
2652	}
2653}
2654
2655// newHealthCheckStream is a helper function to start a health check streaming
2656// RPC, and returns the stream.
2657func newHealthCheckStream(t *testing.T, cc *grpc.ClientConn, service string) (healthgrpc.Health_WatchClient, context.CancelFunc) {
2658	t.Helper()
2659	ctx, cancel := context.WithCancel(context.Background())
2660	hc := healthgrpc.NewHealthClient(cc)
2661	stream, err := hc.Watch(ctx, &healthpb.HealthCheckRequest{Service: service})
2662	if err != nil {
2663		t.Fatalf("hc.Watch(_, %v) failed: %v", service, err)
2664	}
2665	return stream, cancel
2666}
2667
2668// healthWatchChecker is a helper function to verify that the next health
2669// status returned on the given stream matches the one passed in 'wantStatus'.
2670func healthWatchChecker(t *testing.T, stream healthgrpc.Health_WatchClient, wantStatus healthpb.HealthCheckResponse_ServingStatus) {
2671	t.Helper()
2672	response, err := stream.Recv()
2673	if err != nil {
2674		t.Fatalf("stream.Recv() failed: %v", err)
2675	}
2676	if response.Status != wantStatus {
2677		t.Fatalf("got servingStatus %v, want %v", response.Status, wantStatus)
2678	}
2679}
2680
2681// TestHealthCheckSuccess invokes the unary Check() RPC on the health server in
2682// a successful case.
2683func (s) TestHealthCheckSuccess(t *testing.T) {
2684	for _, e := range listTestEnv() {
2685		testHealthCheckSuccess(t, e)
2686	}
2687}
2688
2689func testHealthCheckSuccess(t *testing.T, e env) {
2690	te := newTest(t, e)
2691	te.enableHealthServer = true
2692	te.startServer(&testServer{security: e.security})
2693	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
2694	defer te.tearDown()
2695
2696	verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), defaultHealthService, codes.OK)
2697}
2698
2699// TestHealthCheckFailure invokes the unary Check() RPC on the health server
2700// with an expired context and expects the RPC to fail.
2701func (s) TestHealthCheckFailure(t *testing.T) {
2702	for _, e := range listTestEnv() {
2703		testHealthCheckFailure(t, e)
2704	}
2705}
2706
2707func testHealthCheckFailure(t *testing.T, e env) {
2708	te := newTest(t, e)
2709	te.declareLogNoise(
2710		"Failed to dial ",
2711		"grpc: the client connection is closing; please retry",
2712	)
2713	te.enableHealthServer = true
2714	te.startServer(&testServer{security: e.security})
2715	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
2716	defer te.tearDown()
2717
2718	verifyHealthCheckErrCode(t, 0*time.Second, te.clientConn(), defaultHealthService, codes.DeadlineExceeded)
2719	awaitNewConnLogOutput()
2720}
2721
2722// TestHealthCheckOff makes a unary Check() RPC on the health server where the
2723// health status of the defaultHealthService is not set, and therefore expects
2724// an error code 'codes.NotFound'.
2725func (s) TestHealthCheckOff(t *testing.T) {
2726	for _, e := range listTestEnv() {
2727		// TODO(bradfitz): Temporarily skip this env due to #619.
2728		if e.name == "handler-tls" {
2729			continue
2730		}
2731		testHealthCheckOff(t, e)
2732	}
2733}
2734
2735func testHealthCheckOff(t *testing.T, e env) {
2736	te := newTest(t, e)
2737	te.enableHealthServer = true
2738	te.startServer(&testServer{security: e.security})
2739	defer te.tearDown()
2740
2741	verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), defaultHealthService, codes.NotFound)
2742}
2743
2744// TestHealthWatchMultipleClients makes a streaming Watch() RPC on the health
2745// server with multiple clients and expects the same status on both streams.
2746func (s) TestHealthWatchMultipleClients(t *testing.T) {
2747	for _, e := range listTestEnv() {
2748		testHealthWatchMultipleClients(t, e)
2749	}
2750}
2751
2752func testHealthWatchMultipleClients(t *testing.T, e env) {
2753	te := newTest(t, e)
2754	te.enableHealthServer = true
2755	te.startServer(&testServer{security: e.security})
2756	defer te.tearDown()
2757
2758	cc := te.clientConn()
2759	stream1, cf1 := newHealthCheckStream(t, cc, defaultHealthService)
2760	defer cf1()
2761	healthWatchChecker(t, stream1, healthpb.HealthCheckResponse_SERVICE_UNKNOWN)
2762
2763	stream2, cf2 := newHealthCheckStream(t, cc, defaultHealthService)
2764	defer cf2()
2765	healthWatchChecker(t, stream2, healthpb.HealthCheckResponse_SERVICE_UNKNOWN)
2766
2767	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING)
2768	healthWatchChecker(t, stream1, healthpb.HealthCheckResponse_NOT_SERVING)
2769	healthWatchChecker(t, stream2, healthpb.HealthCheckResponse_NOT_SERVING)
2770}
2771
2772// TestHealthWatchSameStatusmakes a streaming Watch() RPC on the health server
2773// and makes sure that the health status of the server is as expected after
2774// multiple calls to SetServingStatus with the same status.
2775func (s) TestHealthWatchSameStatus(t *testing.T) {
2776	for _, e := range listTestEnv() {
2777		testHealthWatchSameStatus(t, e)
2778	}
2779}
2780
2781func testHealthWatchSameStatus(t *testing.T, e env) {
2782	te := newTest(t, e)
2783	te.enableHealthServer = true
2784	te.startServer(&testServer{security: e.security})
2785	defer te.tearDown()
2786
2787	stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService)
2788	defer cf()
2789
2790	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVICE_UNKNOWN)
2791	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
2792	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING)
2793	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
2794	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING)
2795	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_NOT_SERVING)
2796}
2797
2798// TestHealthWatchServiceStatusSetBeforeStartingServer starts a health server
2799// on which the health status for the defaultService is set before the gRPC
2800// server is started, and expects the correct health status to be returned.
2801func (s) TestHealthWatchServiceStatusSetBeforeStartingServer(t *testing.T) {
2802	for _, e := range listTestEnv() {
2803		testHealthWatchSetServiceStatusBeforeStartingServer(t, e)
2804	}
2805}
2806
2807func testHealthWatchSetServiceStatusBeforeStartingServer(t *testing.T, e env) {
2808	hs := health.NewServer()
2809	te := newTest(t, e)
2810	te.healthServer = hs
2811	hs.SetServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
2812	te.startServer(&testServer{security: e.security})
2813	defer te.tearDown()
2814
2815	stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService)
2816	defer cf()
2817	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING)
2818}
2819
2820// TestHealthWatchDefaultStatusChange verifies the simple case where the
2821// service starts off with a SERVICE_UNKNOWN status (because SetServingStatus
2822// hasn't been called yet) and then moves to SERVING after SetServingStatus is
2823// called.
2824func (s) TestHealthWatchDefaultStatusChange(t *testing.T) {
2825	for _, e := range listTestEnv() {
2826		testHealthWatchDefaultStatusChange(t, e)
2827	}
2828}
2829
2830func testHealthWatchDefaultStatusChange(t *testing.T, e env) {
2831	te := newTest(t, e)
2832	te.enableHealthServer = true
2833	te.startServer(&testServer{security: e.security})
2834	defer te.tearDown()
2835
2836	stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService)
2837	defer cf()
2838	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVICE_UNKNOWN)
2839	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
2840	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING)
2841}
2842
2843// TestHealthWatchSetServiceStatusBeforeClientCallsWatch verifies the case
2844// where the health status is set to SERVING before the client calls Watch().
2845func (s) TestHealthWatchSetServiceStatusBeforeClientCallsWatch(t *testing.T) {
2846	for _, e := range listTestEnv() {
2847		testHealthWatchSetServiceStatusBeforeClientCallsWatch(t, e)
2848	}
2849}
2850
2851func testHealthWatchSetServiceStatusBeforeClientCallsWatch(t *testing.T, e env) {
2852	te := newTest(t, e)
2853	te.enableHealthServer = true
2854	te.startServer(&testServer{security: e.security})
2855	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
2856	defer te.tearDown()
2857
2858	stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService)
2859	defer cf()
2860	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING)
2861}
2862
2863// TestHealthWatchOverallServerHealthChange verifies setting the overall status
2864// of the server by using the empty service name.
2865func (s) TestHealthWatchOverallServerHealthChange(t *testing.T) {
2866	for _, e := range listTestEnv() {
2867		testHealthWatchOverallServerHealthChange(t, e)
2868	}
2869}
2870
2871func testHealthWatchOverallServerHealthChange(t *testing.T, e env) {
2872	te := newTest(t, e)
2873	te.enableHealthServer = true
2874	te.startServer(&testServer{security: e.security})
2875	defer te.tearDown()
2876
2877	stream, cf := newHealthCheckStream(t, te.clientConn(), "")
2878	defer cf()
2879	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING)
2880	te.setHealthServingStatus("", healthpb.HealthCheckResponse_NOT_SERVING)
2881	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_NOT_SERVING)
2882}
2883
2884// TestUnknownHandler verifies that an expected error is returned (by setting
2885// the unknownHandler on the server) for a service which is not exposed to the
2886// client.
2887func (s) TestUnknownHandler(t *testing.T) {
2888	// An example unknownHandler that returns a different code and a different
2889	// method, making sure that we do not expose what methods are implemented to
2890	// a client that is not authenticated.
2891	unknownHandler := func(srv interface{}, stream grpc.ServerStream) error {
2892		return status.Error(codes.Unauthenticated, "user unauthenticated")
2893	}
2894	for _, e := range listTestEnv() {
2895		// TODO(bradfitz): Temporarily skip this env due to #619.
2896		if e.name == "handler-tls" {
2897			continue
2898		}
2899		testUnknownHandler(t, e, unknownHandler)
2900	}
2901}
2902
2903func testUnknownHandler(t *testing.T, e env, unknownHandler grpc.StreamHandler) {
2904	te := newTest(t, e)
2905	te.unknownHandler = unknownHandler
2906	te.startServer(&testServer{security: e.security})
2907	defer te.tearDown()
2908	verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), "", codes.Unauthenticated)
2909}
2910
2911// TestHealthCheckServingStatus makes a streaming Watch() RPC on the health
2912// server and verifies a bunch of health status transitions.
2913func (s) TestHealthCheckServingStatus(t *testing.T) {
2914	for _, e := range listTestEnv() {
2915		testHealthCheckServingStatus(t, e)
2916	}
2917}
2918
2919func testHealthCheckServingStatus(t *testing.T, e env) {
2920	te := newTest(t, e)
2921	te.enableHealthServer = true
2922	te.startServer(&testServer{security: e.security})
2923	defer te.tearDown()
2924
2925	cc := te.clientConn()
2926	verifyHealthCheckStatus(t, 1*time.Second, cc, "", healthpb.HealthCheckResponse_SERVING)
2927	verifyHealthCheckErrCode(t, 1*time.Second, cc, defaultHealthService, codes.NotFound)
2928	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
2929	verifyHealthCheckStatus(t, 1*time.Second, cc, defaultHealthService, healthpb.HealthCheckResponse_SERVING)
2930	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING)
2931	verifyHealthCheckStatus(t, 1*time.Second, cc, defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING)
2932}
2933
2934func (s) TestEmptyUnaryWithUserAgent(t *testing.T) {
2935	for _, e := range listTestEnv() {
2936		testEmptyUnaryWithUserAgent(t, e)
2937	}
2938}
2939
2940func testEmptyUnaryWithUserAgent(t *testing.T, e env) {
2941	te := newTest(t, e)
2942	te.userAgent = testAppUA
2943	te.startServer(&testServer{security: e.security})
2944	defer te.tearDown()
2945
2946	cc := te.clientConn()
2947	tc := testpb.NewTestServiceClient(cc)
2948	var header metadata.MD
2949	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
2950	defer cancel()
2951	reply, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.Header(&header))
2952	if err != nil || !proto.Equal(&testpb.Empty{}, reply) {
2953		t.Fatalf("TestService/EmptyCall(_, _) = %v, %v, want %v, <nil>", reply, err, &testpb.Empty{})
2954	}
2955	if v, ok := header["ua"]; !ok || !strings.HasPrefix(v[0], testAppUA) {
2956		t.Fatalf("header[\"ua\"] = %q, %t, want string with prefix %q, true", v, ok, testAppUA)
2957	}
2958
2959	te.srv.Stop()
2960}
2961
2962func (s) TestFailedEmptyUnary(t *testing.T) {
2963	for _, e := range listTestEnv() {
2964		if e.name == "handler-tls" {
2965			// This test covers status details, but
2966			// Grpc-Status-Details-Bin is not support in handler_server.
2967			continue
2968		}
2969		testFailedEmptyUnary(t, e)
2970	}
2971}
2972
2973func testFailedEmptyUnary(t *testing.T, e env) {
2974	te := newTest(t, e)
2975	te.userAgent = failAppUA
2976	te.startServer(&testServer{security: e.security})
2977	defer te.tearDown()
2978	tc := testpb.NewTestServiceClient(te.clientConn())
2979
2980	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
2981	wantErr := detailedError
2982	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); !testutils.StatusErrEqual(err, wantErr) {
2983		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, wantErr)
2984	}
2985}
2986
2987func (s) TestLargeUnary(t *testing.T) {
2988	for _, e := range listTestEnv() {
2989		testLargeUnary(t, e)
2990	}
2991}
2992
2993func testLargeUnary(t *testing.T, e env) {
2994	te := newTest(t, e)
2995	te.startServer(&testServer{security: e.security})
2996	defer te.tearDown()
2997	tc := testpb.NewTestServiceClient(te.clientConn())
2998
2999	const argSize = 271828
3000	const respSize = 314159
3001
3002	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
3003	if err != nil {
3004		t.Fatal(err)
3005	}
3006
3007	req := &testpb.SimpleRequest{
3008		ResponseType: testpb.PayloadType_COMPRESSABLE,
3009		ResponseSize: respSize,
3010		Payload:      payload,
3011	}
3012
3013	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
3014	defer cancel()
3015	reply, err := tc.UnaryCall(ctx, req)
3016	if err != nil {
3017		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err)
3018	}
3019	pt := reply.GetPayload().GetType()
3020	ps := len(reply.GetPayload().GetBody())
3021	if pt != testpb.PayloadType_COMPRESSABLE || ps != respSize {
3022		t.Fatalf("Got the reply with type %d len %d; want %d, %d", pt, ps, testpb.PayloadType_COMPRESSABLE, respSize)
3023	}
3024}
3025
3026// Test backward-compatibility API for setting msg size limit.
3027func (s) TestExceedMsgLimit(t *testing.T) {
3028	for _, e := range listTestEnv() {
3029		testExceedMsgLimit(t, e)
3030	}
3031}
3032
3033func testExceedMsgLimit(t *testing.T, e env) {
3034	te := newTest(t, e)
3035	maxMsgSize := 1024
3036	te.maxServerMsgSize, te.maxClientMsgSize = newInt(maxMsgSize), newInt(maxMsgSize)
3037	te.startServer(&testServer{security: e.security})
3038	defer te.tearDown()
3039	tc := testpb.NewTestServiceClient(te.clientConn())
3040
3041	largeSize := int32(maxMsgSize + 1)
3042	const smallSize = 1
3043
3044	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
3045	if err != nil {
3046		t.Fatal(err)
3047	}
3048	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
3049	if err != nil {
3050		t.Fatal(err)
3051	}
3052
3053	// Make sure the server cannot receive a unary RPC of largeSize.
3054	req := &testpb.SimpleRequest{
3055		ResponseType: testpb.PayloadType_COMPRESSABLE,
3056		ResponseSize: smallSize,
3057		Payload:      largePayload,
3058	}
3059
3060	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
3061	defer cancel()
3062	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
3063		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
3064	}
3065	// Make sure the client cannot receive a unary RPC of largeSize.
3066	req.ResponseSize = largeSize
3067	req.Payload = smallPayload
3068	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
3069		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
3070	}
3071
3072	// Make sure the server cannot receive a streaming RPC of largeSize.
3073	stream, err := tc.FullDuplexCall(te.ctx)
3074	if err != nil {
3075		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
3076	}
3077	respParam := []*testpb.ResponseParameters{
3078		{
3079			Size: 1,
3080		},
3081	}
3082
3083	sreq := &testpb.StreamingOutputCallRequest{
3084		ResponseType:       testpb.PayloadType_COMPRESSABLE,
3085		ResponseParameters: respParam,
3086		Payload:            largePayload,
3087	}
3088	if err := stream.Send(sreq); err != nil {
3089		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
3090	}
3091	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
3092		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
3093	}
3094
3095	// Test on client side for streaming RPC.
3096	stream, err = tc.FullDuplexCall(te.ctx)
3097	if err != nil {
3098		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
3099	}
3100	respParam[0].Size = largeSize
3101	sreq.Payload = smallPayload
3102	if err := stream.Send(sreq); err != nil {
3103		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
3104	}
3105	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
3106		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
3107	}
3108}
3109
3110func (s) TestPeerClientSide(t *testing.T) {
3111	for _, e := range listTestEnv() {
3112		testPeerClientSide(t, e)
3113	}
3114}
3115
3116func testPeerClientSide(t *testing.T, e env) {
3117	te := newTest(t, e)
3118	te.userAgent = testAppUA
3119	te.startServer(&testServer{security: e.security})
3120	defer te.tearDown()
3121	tc := testpb.NewTestServiceClient(te.clientConn())
3122	peer := new(peer.Peer)
3123	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
3124	defer cancel()
3125	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil {
3126		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
3127	}
3128	pa := peer.Addr.String()
3129	if e.network == "unix" {
3130		if pa != te.srvAddr {
3131			t.Fatalf("peer.Addr = %v, want %v", pa, te.srvAddr)
3132		}
3133		return
3134	}
3135	_, pp, err := net.SplitHostPort(pa)
3136	if err != nil {
3137		t.Fatalf("Failed to parse address from peer.")
3138	}
3139	_, sp, err := net.SplitHostPort(te.srvAddr)
3140	if err != nil {
3141		t.Fatalf("Failed to parse address of test server.")
3142	}
3143	if pp != sp {
3144		t.Fatalf("peer.Addr = localhost:%v, want localhost:%v", pp, sp)
3145	}
3146}
3147
3148// TestPeerNegative tests that if call fails setting peer
3149// doesn't cause a segmentation fault.
3150// issue#1141 https://github.com/grpc/grpc-go/issues/1141
3151func (s) TestPeerNegative(t *testing.T) {
3152	for _, e := range listTestEnv() {
3153		testPeerNegative(t, e)
3154	}
3155}
3156
3157func testPeerNegative(t *testing.T, e env) {
3158	te := newTest(t, e)
3159	te.startServer(&testServer{security: e.security})
3160	defer te.tearDown()
3161
3162	cc := te.clientConn()
3163	tc := testpb.NewTestServiceClient(cc)
3164	peer := new(peer.Peer)
3165	ctx, cancel := context.WithCancel(context.Background())
3166	cancel()
3167	tc.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer))
3168}
3169
3170func (s) TestPeerFailedRPC(t *testing.T) {
3171	for _, e := range listTestEnv() {
3172		testPeerFailedRPC(t, e)
3173	}
3174}
3175
3176func testPeerFailedRPC(t *testing.T, e env) {
3177	te := newTest(t, e)
3178	te.maxServerReceiveMsgSize = newInt(1 * 1024)
3179	te.startServer(&testServer{security: e.security})
3180
3181	defer te.tearDown()
3182	tc := testpb.NewTestServiceClient(te.clientConn())
3183
3184	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
3185	defer cancel()
3186	// first make a successful request to the server
3187	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
3188		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
3189	}
3190
3191	// make a second request that will be rejected by the server
3192	const largeSize = 5 * 1024
3193	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
3194	if err != nil {
3195		t.Fatal(err)
3196	}
3197	req := &testpb.SimpleRequest{
3198		ResponseType: testpb.PayloadType_COMPRESSABLE,
3199		Payload:      largePayload,
3200	}
3201
3202	peer := new(peer.Peer)
3203	if _, err := tc.UnaryCall(ctx, req, grpc.Peer(peer)); err == nil || status.Code(err) != codes.ResourceExhausted {
3204		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
3205	} else {
3206		pa := peer.Addr.String()
3207		if e.network == "unix" {
3208			if pa != te.srvAddr {
3209				t.Fatalf("peer.Addr = %v, want %v", pa, te.srvAddr)
3210			}
3211			return
3212		}
3213		_, pp, err := net.SplitHostPort(pa)
3214		if err != nil {
3215			t.Fatalf("Failed to parse address from peer.")
3216		}
3217		_, sp, err := net.SplitHostPort(te.srvAddr)
3218		if err != nil {
3219			t.Fatalf("Failed to parse address of test server.")
3220		}
3221		if pp != sp {
3222			t.Fatalf("peer.Addr = localhost:%v, want localhost:%v", pp, sp)
3223		}
3224	}
3225}
3226
3227func (s) TestMetadataUnaryRPC(t *testing.T) {
3228	for _, e := range listTestEnv() {
3229		testMetadataUnaryRPC(t, e)
3230	}
3231}
3232
3233func testMetadataUnaryRPC(t *testing.T, e env) {
3234	te := newTest(t, e)
3235	te.startServer(&testServer{security: e.security})
3236	defer te.tearDown()
3237	tc := testpb.NewTestServiceClient(te.clientConn())
3238
3239	const argSize = 2718
3240	const respSize = 314
3241
3242	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
3243	if err != nil {
3244		t.Fatal(err)
3245	}
3246
3247	req := &testpb.SimpleRequest{
3248		ResponseType: testpb.PayloadType_COMPRESSABLE,
3249		ResponseSize: respSize,
3250		Payload:      payload,
3251	}
3252	var header, trailer metadata.MD
3253	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
3254	if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.Trailer(&trailer)); err != nil {
3255		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
3256	}
3257	// Ignore optional response headers that Servers may set:
3258	if header != nil {
3259		delete(header, "trailer") // RFC 2616 says server SHOULD (but optional) declare trailers
3260		delete(header, "date")    // the Date header is also optional
3261		delete(header, "user-agent")
3262		delete(header, "content-type")
3263	}
3264	if !reflect.DeepEqual(header, testMetadata) {
3265		t.Fatalf("Received header metadata %v, want %v", header, testMetadata)
3266	}
3267	if !reflect.DeepEqual(trailer, testTrailerMetadata) {
3268		t.Fatalf("Received trailer metadata %v, want %v", trailer, testTrailerMetadata)
3269	}
3270}
3271
3272func (s) TestMetadataOrderUnaryRPC(t *testing.T) {
3273	for _, e := range listTestEnv() {
3274		testMetadataOrderUnaryRPC(t, e)
3275	}
3276}
3277
3278func testMetadataOrderUnaryRPC(t *testing.T, e env) {
3279	te := newTest(t, e)
3280	te.startServer(&testServer{security: e.security})
3281	defer te.tearDown()
3282	tc := testpb.NewTestServiceClient(te.clientConn())
3283
3284	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
3285	ctx = metadata.AppendToOutgoingContext(ctx, "key1", "value2")
3286	ctx = metadata.AppendToOutgoingContext(ctx, "key1", "value3")
3287
3288	// using Join to built expected metadata instead of FromOutgoingContext
3289	newMetadata := metadata.Join(testMetadata, metadata.Pairs("key1", "value2", "key1", "value3"))
3290
3291	var header metadata.MD
3292	if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{}, grpc.Header(&header)); err != nil {
3293		t.Fatal(err)
3294	}
3295
3296	// Ignore optional response headers that Servers may set:
3297	if header != nil {
3298		delete(header, "trailer") // RFC 2616 says server SHOULD (but optional) declare trailers
3299		delete(header, "date")    // the Date header is also optional
3300		delete(header, "user-agent")
3301		delete(header, "content-type")
3302	}
3303
3304	if !reflect.DeepEqual(header, newMetadata) {
3305		t.Fatalf("Received header metadata %v, want %v", header, newMetadata)
3306	}
3307}
3308
3309func (s) TestMultipleSetTrailerUnaryRPC(t *testing.T) {
3310	for _, e := range listTestEnv() {
3311		testMultipleSetTrailerUnaryRPC(t, e)
3312	}
3313}
3314
3315func testMultipleSetTrailerUnaryRPC(t *testing.T, e env) {
3316	te := newTest(t, e)
3317	te.startServer(&testServer{security: e.security, multipleSetTrailer: true})
3318	defer te.tearDown()
3319	tc := testpb.NewTestServiceClient(te.clientConn())
3320
3321	const (
3322		argSize  = 1
3323		respSize = 1
3324	)
3325	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
3326	if err != nil {
3327		t.Fatal(err)
3328	}
3329
3330	req := &testpb.SimpleRequest{
3331		ResponseType: testpb.PayloadType_COMPRESSABLE,
3332		ResponseSize: respSize,
3333		Payload:      payload,
3334	}
3335	var trailer metadata.MD
3336	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
3337	if _, err := tc.UnaryCall(ctx, req, grpc.Trailer(&trailer), grpc.WaitForReady(true)); err != nil {
3338		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
3339	}
3340	expectedTrailer := metadata.Join(testTrailerMetadata, testTrailerMetadata2)
3341	if !reflect.DeepEqual(trailer, expectedTrailer) {
3342		t.Fatalf("Received trailer metadata %v, want %v", trailer, expectedTrailer)
3343	}
3344}
3345
3346func (s) TestMultipleSetTrailerStreamingRPC(t *testing.T) {
3347	for _, e := range listTestEnv() {
3348		testMultipleSetTrailerStreamingRPC(t, e)
3349	}
3350}
3351
3352func testMultipleSetTrailerStreamingRPC(t *testing.T, e env) {
3353	te := newTest(t, e)
3354	te.startServer(&testServer{security: e.security, multipleSetTrailer: true})
3355	defer te.tearDown()
3356	tc := testpb.NewTestServiceClient(te.clientConn())
3357
3358	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
3359	stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
3360	if err != nil {
3361		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
3362	}
3363	if err := stream.CloseSend(); err != nil {
3364		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
3365	}
3366	if _, err := stream.Recv(); err != io.EOF {
3367		t.Fatalf("%v failed to complele the FullDuplexCall: %v", stream, err)
3368	}
3369
3370	trailer := stream.Trailer()
3371	expectedTrailer := metadata.Join(testTrailerMetadata, testTrailerMetadata2)
3372	if !reflect.DeepEqual(trailer, expectedTrailer) {
3373		t.Fatalf("Received trailer metadata %v, want %v", trailer, expectedTrailer)
3374	}
3375}
3376
3377func (s) TestSetAndSendHeaderUnaryRPC(t *testing.T) {
3378	for _, e := range listTestEnv() {
3379		if e.name == "handler-tls" {
3380			continue
3381		}
3382		testSetAndSendHeaderUnaryRPC(t, e)
3383	}
3384}
3385
3386// To test header metadata is sent on SendHeader().
3387func testSetAndSendHeaderUnaryRPC(t *testing.T, e env) {
3388	te := newTest(t, e)
3389	te.startServer(&testServer{security: e.security, setAndSendHeader: true})
3390	defer te.tearDown()
3391	tc := testpb.NewTestServiceClient(te.clientConn())
3392
3393	const (
3394		argSize  = 1
3395		respSize = 1
3396	)
3397	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
3398	if err != nil {
3399		t.Fatal(err)
3400	}
3401
3402	req := &testpb.SimpleRequest{
3403		ResponseType: testpb.PayloadType_COMPRESSABLE,
3404		ResponseSize: respSize,
3405		Payload:      payload,
3406	}
3407	var header metadata.MD
3408	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
3409	if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.WaitForReady(true)); err != nil {
3410		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
3411	}
3412	delete(header, "user-agent")
3413	delete(header, "content-type")
3414	expectedHeader := metadata.Join(testMetadata, testMetadata2)
3415	if !reflect.DeepEqual(header, expectedHeader) {
3416		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
3417	}
3418}
3419
3420func (s) TestMultipleSetHeaderUnaryRPC(t *testing.T) {
3421	for _, e := range listTestEnv() {
3422		if e.name == "handler-tls" {
3423			continue
3424		}
3425		testMultipleSetHeaderUnaryRPC(t, e)
3426	}
3427}
3428
3429// To test header metadata is sent when sending response.
3430func testMultipleSetHeaderUnaryRPC(t *testing.T, e env) {
3431	te := newTest(t, e)
3432	te.startServer(&testServer{security: e.security, setHeaderOnly: true})
3433	defer te.tearDown()
3434	tc := testpb.NewTestServiceClient(te.clientConn())
3435
3436	const (
3437		argSize  = 1
3438		respSize = 1
3439	)
3440	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
3441	if err != nil {
3442		t.Fatal(err)
3443	}
3444
3445	req := &testpb.SimpleRequest{
3446		ResponseType: testpb.PayloadType_COMPRESSABLE,
3447		ResponseSize: respSize,
3448		Payload:      payload,
3449	}
3450
3451	var header metadata.MD
3452	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
3453	if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.WaitForReady(true)); err != nil {
3454		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
3455	}
3456	delete(header, "user-agent")
3457	delete(header, "content-type")
3458	expectedHeader := metadata.Join(testMetadata, testMetadata2)
3459	if !reflect.DeepEqual(header, expectedHeader) {
3460		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
3461	}
3462}
3463
3464func (s) TestMultipleSetHeaderUnaryRPCError(t *testing.T) {
3465	for _, e := range listTestEnv() {
3466		if e.name == "handler-tls" {
3467			continue
3468		}
3469		testMultipleSetHeaderUnaryRPCError(t, e)
3470	}
3471}
3472
3473// To test header metadata is sent when sending status.
3474func testMultipleSetHeaderUnaryRPCError(t *testing.T, e env) {
3475	te := newTest(t, e)
3476	te.startServer(&testServer{security: e.security, setHeaderOnly: true})
3477	defer te.tearDown()
3478	tc := testpb.NewTestServiceClient(te.clientConn())
3479
3480	const (
3481		argSize  = 1
3482		respSize = -1 // Invalid respSize to make RPC fail.
3483	)
3484	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
3485	if err != nil {
3486		t.Fatal(err)
3487	}
3488
3489	req := &testpb.SimpleRequest{
3490		ResponseType: testpb.PayloadType_COMPRESSABLE,
3491		ResponseSize: respSize,
3492		Payload:      payload,
3493	}
3494	var header metadata.MD
3495	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
3496	if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.WaitForReady(true)); err == nil {
3497		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <non-nil>", ctx, err)
3498	}
3499	delete(header, "user-agent")
3500	delete(header, "content-type")
3501	expectedHeader := metadata.Join(testMetadata, testMetadata2)
3502	if !reflect.DeepEqual(header, expectedHeader) {
3503		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
3504	}
3505}
3506
3507func (s) TestSetAndSendHeaderStreamingRPC(t *testing.T) {
3508	for _, e := range listTestEnv() {
3509		if e.name == "handler-tls" {
3510			continue
3511		}
3512		testSetAndSendHeaderStreamingRPC(t, e)
3513	}
3514}
3515
3516// To test header metadata is sent on SendHeader().
3517func testSetAndSendHeaderStreamingRPC(t *testing.T, e env) {
3518	te := newTest(t, e)
3519	te.startServer(&testServer{security: e.security, setAndSendHeader: true})
3520	defer te.tearDown()
3521	tc := testpb.NewTestServiceClient(te.clientConn())
3522
3523	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
3524	stream, err := tc.FullDuplexCall(ctx)
3525	if err != nil {
3526		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
3527	}
3528	if err := stream.CloseSend(); err != nil {
3529		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
3530	}
3531	if _, err := stream.Recv(); err != io.EOF {
3532		t.Fatalf("%v failed to complele the FullDuplexCall: %v", stream, err)
3533	}
3534
3535	header, err := stream.Header()
3536	if err != nil {
3537		t.Fatalf("%v.Header() = _, %v, want _, <nil>", stream, err)
3538	}
3539	delete(header, "user-agent")
3540	delete(header, "content-type")
3541	expectedHeader := metadata.Join(testMetadata, testMetadata2)
3542	if !reflect.DeepEqual(header, expectedHeader) {
3543		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
3544	}
3545}
3546
3547func (s) TestMultipleSetHeaderStreamingRPC(t *testing.T) {
3548	for _, e := range listTestEnv() {
3549		if e.name == "handler-tls" {
3550			continue
3551		}
3552		testMultipleSetHeaderStreamingRPC(t, e)
3553	}
3554}
3555
3556// To test header metadata is sent when sending response.
3557func testMultipleSetHeaderStreamingRPC(t *testing.T, e env) {
3558	te := newTest(t, e)
3559	te.startServer(&testServer{security: e.security, setHeaderOnly: true})
3560	defer te.tearDown()
3561	tc := testpb.NewTestServiceClient(te.clientConn())
3562
3563	const (
3564		argSize  = 1
3565		respSize = 1
3566	)
3567	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
3568	stream, err := tc.FullDuplexCall(ctx)
3569	if err != nil {
3570		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
3571	}
3572
3573	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
3574	if err != nil {
3575		t.Fatal(err)
3576	}
3577
3578	req := &testpb.StreamingOutputCallRequest{
3579		ResponseType: testpb.PayloadType_COMPRESSABLE,
3580		ResponseParameters: []*testpb.ResponseParameters{
3581			{Size: respSize},
3582		},
3583		Payload: payload,
3584	}
3585	if err := stream.Send(req); err != nil {
3586		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
3587	}
3588	if _, err := stream.Recv(); err != nil {
3589		t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
3590	}
3591	if err := stream.CloseSend(); err != nil {
3592		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
3593	}
3594	if _, err := stream.Recv(); err != io.EOF {
3595		t.Fatalf("%v failed to complele the FullDuplexCall: %v", stream, err)
3596	}
3597
3598	header, err := stream.Header()
3599	if err != nil {
3600		t.Fatalf("%v.Header() = _, %v, want _, <nil>", stream, err)
3601	}
3602	delete(header, "user-agent")
3603	delete(header, "content-type")
3604	expectedHeader := metadata.Join(testMetadata, testMetadata2)
3605	if !reflect.DeepEqual(header, expectedHeader) {
3606		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
3607	}
3608
3609}
3610
3611func (s) TestMultipleSetHeaderStreamingRPCError(t *testing.T) {
3612	for _, e := range listTestEnv() {
3613		if e.name == "handler-tls" {
3614			continue
3615		}
3616		testMultipleSetHeaderStreamingRPCError(t, e)
3617	}
3618}
3619
3620// To test header metadata is sent when sending status.
3621func testMultipleSetHeaderStreamingRPCError(t *testing.T, e env) {
3622	te := newTest(t, e)
3623	te.startServer(&testServer{security: e.security, setHeaderOnly: true})
3624	defer te.tearDown()
3625	tc := testpb.NewTestServiceClient(te.clientConn())
3626
3627	const (
3628		argSize  = 1
3629		respSize = -1
3630	)
3631	ctx, cancel := context.WithCancel(context.Background())
3632	defer cancel()
3633	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
3634	stream, err := tc.FullDuplexCall(ctx)
3635	if err != nil {
3636		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
3637	}
3638
3639	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
3640	if err != nil {
3641		t.Fatal(err)
3642	}
3643
3644	req := &testpb.StreamingOutputCallRequest{
3645		ResponseType: testpb.PayloadType_COMPRESSABLE,
3646		ResponseParameters: []*testpb.ResponseParameters{
3647			{Size: respSize},
3648		},
3649		Payload: payload,
3650	}
3651	if err := stream.Send(req); err != nil {
3652		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
3653	}
3654	if _, err := stream.Recv(); err == nil {
3655		t.Fatalf("%v.Recv() = %v, want <non-nil>", stream, err)
3656	}
3657
3658	header, err := stream.Header()
3659	if err != nil {
3660		t.Fatalf("%v.Header() = _, %v, want _, <nil>", stream, err)
3661	}
3662	delete(header, "user-agent")
3663	delete(header, "content-type")
3664	expectedHeader := metadata.Join(testMetadata, testMetadata2)
3665	if !reflect.DeepEqual(header, expectedHeader) {
3666		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
3667	}
3668	if err := stream.CloseSend(); err != nil {
3669		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
3670	}
3671}
3672
3673// TestMalformedHTTP2Metadata verfies the returned error when the client
3674// sends an illegal metadata.
3675func (s) TestMalformedHTTP2Metadata(t *testing.T) {
3676	for _, e := range listTestEnv() {
3677		if e.name == "handler-tls" {
3678			// Failed with "server stops accepting new RPCs".
3679			// Server stops accepting new RPCs when the client sends an illegal http2 header.
3680			continue
3681		}
3682		testMalformedHTTP2Metadata(t, e)
3683	}
3684}
3685
3686func testMalformedHTTP2Metadata(t *testing.T, e env) {
3687	te := newTest(t, e)
3688	te.startServer(&testServer{security: e.security})
3689	defer te.tearDown()
3690	tc := testpb.NewTestServiceClient(te.clientConn())
3691
3692	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 2718)
3693	if err != nil {
3694		t.Fatal(err)
3695	}
3696
3697	req := &testpb.SimpleRequest{
3698		ResponseType: testpb.PayloadType_COMPRESSABLE,
3699		ResponseSize: 314,
3700		Payload:      payload,
3701	}
3702	ctx := metadata.NewOutgoingContext(context.Background(), malformedHTTP2Metadata)
3703	if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.Internal {
3704		t.Fatalf("TestService.UnaryCall(%v, _) = _, %v; want _, %s", ctx, err, codes.Internal)
3705	}
3706}
3707
3708// Tests that the client transparently retries correctly when receiving a
3709// RST_STREAM with code REFUSED_STREAM.
3710func (s) TestTransparentRetry(t *testing.T) {
3711	testCases := []struct {
3712		failFast bool
3713		errCode  codes.Code
3714	}{{
3715		// success attempt: 1, (stream ID 1)
3716	}, {
3717		// success attempt: 2, (stream IDs 3, 5)
3718	}, {
3719		// no success attempt (stream IDs 7, 9)
3720		errCode: codes.Unavailable,
3721	}, {
3722		// success attempt: 1 (stream ID 11),
3723		failFast: true,
3724	}, {
3725		// success attempt: 2 (stream IDs 13, 15),
3726		failFast: true,
3727	}, {
3728		// no success attempt (stream IDs 17, 19)
3729		failFast: true,
3730		errCode:  codes.Unavailable,
3731	}}
3732
3733	lis, err := net.Listen("tcp", "localhost:0")
3734	if err != nil {
3735		t.Fatalf("Failed to listen. Err: %v", err)
3736	}
3737	defer lis.Close()
3738	server := &httpServer{
3739		headerFields: [][]string{{
3740			":status", "200",
3741			"content-type", "application/grpc",
3742			"grpc-status", "0",
3743		}},
3744		refuseStream: func(i uint32) bool {
3745			switch i {
3746			case 1, 5, 11, 15: // these stream IDs succeed
3747				return false
3748			}
3749			return true // these are refused
3750		},
3751	}
3752	server.start(t, lis)
3753	cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
3754	if err != nil {
3755		t.Fatalf("failed to dial due to err: %v", err)
3756	}
3757	defer cc.Close()
3758
3759	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
3760	defer cancel()
3761
3762	client := testpb.NewTestServiceClient(cc)
3763
3764	for i, tc := range testCases {
3765		stream, err := client.FullDuplexCall(ctx)
3766		if err != nil {
3767			t.Fatalf("error creating stream due to err: %v", err)
3768		}
3769		code := func(err error) codes.Code {
3770			if err == io.EOF {
3771				return codes.OK
3772			}
3773			return status.Code(err)
3774		}
3775		if _, err := stream.Recv(); code(err) != tc.errCode {
3776			t.Fatalf("%v: stream.Recv() = _, %v, want error code: %v", i, err, tc.errCode)
3777		}
3778
3779	}
3780}
3781
3782func (s) TestCancel(t *testing.T) {
3783	for _, e := range listTestEnv() {
3784		testCancel(t, e)
3785	}
3786}
3787
3788func testCancel(t *testing.T, e env) {
3789	te := newTest(t, e)
3790	te.declareLogNoise("grpc: the client connection is closing; please retry")
3791	te.startServer(&testServer{security: e.security, unaryCallSleepTime: time.Second})
3792	defer te.tearDown()
3793
3794	cc := te.clientConn()
3795	tc := testpb.NewTestServiceClient(cc)
3796
3797	const argSize = 2718
3798	const respSize = 314
3799
3800	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
3801	if err != nil {
3802		t.Fatal(err)
3803	}
3804
3805	req := &testpb.SimpleRequest{
3806		ResponseType: testpb.PayloadType_COMPRESSABLE,
3807		ResponseSize: respSize,
3808		Payload:      payload,
3809	}
3810	ctx, cancel := context.WithCancel(context.Background())
3811	time.AfterFunc(1*time.Millisecond, cancel)
3812	if r, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.Canceled {
3813		t.Fatalf("TestService/UnaryCall(_, _) = %v, %v; want _, error code: %s", r, err, codes.Canceled)
3814	}
3815	awaitNewConnLogOutput()
3816}
3817
3818func (s) TestCancelNoIO(t *testing.T) {
3819	for _, e := range listTestEnv() {
3820		testCancelNoIO(t, e)
3821	}
3822}
3823
3824func testCancelNoIO(t *testing.T, e env) {
3825	te := newTest(t, e)
3826	te.declareLogNoise("http2Client.notifyError got notified that the client transport was broken")
3827	te.maxStream = 1 // Only allows 1 live stream per server transport.
3828	te.startServer(&testServer{security: e.security})
3829	defer te.tearDown()
3830
3831	cc := te.clientConn()
3832	tc := testpb.NewTestServiceClient(cc)
3833
3834	// Start one blocked RPC for which we'll never send streaming
3835	// input. This will consume the 1 maximum concurrent streams,
3836	// causing future RPCs to hang.
3837	ctx, cancelFirst := context.WithCancel(context.Background())
3838	_, err := tc.StreamingInputCall(ctx)
3839	if err != nil {
3840		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err)
3841	}
3842
3843	// Loop until the ClientConn receives the initial settings
3844	// frame from the server, notifying it about the maximum
3845	// concurrent streams. We know when it's received it because
3846	// an RPC will fail with codes.DeadlineExceeded instead of
3847	// succeeding.
3848	// TODO(bradfitz): add internal test hook for this (Issue 534)
3849	for {
3850		ctx, cancelSecond := context.WithTimeout(context.Background(), 50*time.Millisecond)
3851		_, err := tc.StreamingInputCall(ctx)
3852		cancelSecond()
3853		if err == nil {
3854			continue
3855		}
3856		if status.Code(err) == codes.DeadlineExceeded {
3857			break
3858		}
3859		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %s", tc, err, codes.DeadlineExceeded)
3860	}
3861	// If there are any RPCs in flight before the client receives
3862	// the max streams setting, let them be expired.
3863	// TODO(bradfitz): add internal test hook for this (Issue 534)
3864	time.Sleep(50 * time.Millisecond)
3865
3866	go func() {
3867		time.Sleep(50 * time.Millisecond)
3868		cancelFirst()
3869	}()
3870
3871	// This should be blocked until the 1st is canceled, then succeed.
3872	ctx, cancelThird := context.WithTimeout(context.Background(), 500*time.Millisecond)
3873	if _, err := tc.StreamingInputCall(ctx); err != nil {
3874		t.Errorf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err)
3875	}
3876	cancelThird()
3877}
3878
3879// The following tests the gRPC streaming RPC implementations.
3880// TODO(zhaoq): Have better coverage on error cases.
3881var (
3882	reqSizes  = []int{27182, 8, 1828, 45904}
3883	respSizes = []int{31415, 9, 2653, 58979}
3884)
3885
3886func (s) TestNoService(t *testing.T) {
3887	for _, e := range listTestEnv() {
3888		testNoService(t, e)
3889	}
3890}
3891
3892func testNoService(t *testing.T, e env) {
3893	te := newTest(t, e)
3894	te.startServer(nil)
3895	defer te.tearDown()
3896
3897	cc := te.clientConn()
3898	tc := testpb.NewTestServiceClient(cc)
3899
3900	stream, err := tc.FullDuplexCall(te.ctx, grpc.WaitForReady(true))
3901	if err != nil {
3902		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
3903	}
3904	if _, err := stream.Recv(); status.Code(err) != codes.Unimplemented {
3905		t.Fatalf("stream.Recv() = _, %v, want _, error code %s", err, codes.Unimplemented)
3906	}
3907}
3908
3909func (s) TestPingPong(t *testing.T) {
3910	for _, e := range listTestEnv() {
3911		testPingPong(t, e)
3912	}
3913}
3914
3915func testPingPong(t *testing.T, e env) {
3916	te := newTest(t, e)
3917	te.startServer(&testServer{security: e.security})
3918	defer te.tearDown()
3919	tc := testpb.NewTestServiceClient(te.clientConn())
3920
3921	stream, err := tc.FullDuplexCall(te.ctx)
3922	if err != nil {
3923		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
3924	}
3925	var index int
3926	for index < len(reqSizes) {
3927		respParam := []*testpb.ResponseParameters{
3928			{
3929				Size: int32(respSizes[index]),
3930			},
3931		}
3932
3933		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index]))
3934		if err != nil {
3935			t.Fatal(err)
3936		}
3937
3938		req := &testpb.StreamingOutputCallRequest{
3939			ResponseType:       testpb.PayloadType_COMPRESSABLE,
3940			ResponseParameters: respParam,
3941			Payload:            payload,
3942		}
3943		if err := stream.Send(req); err != nil {
3944			t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
3945		}
3946		reply, err := stream.Recv()
3947		if err != nil {
3948			t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
3949		}
3950		pt := reply.GetPayload().GetType()
3951		if pt != testpb.PayloadType_COMPRESSABLE {
3952			t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE)
3953		}
3954		size := len(reply.GetPayload().GetBody())
3955		if size != int(respSizes[index]) {
3956			t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index])
3957		}
3958		index++
3959	}
3960	if err := stream.CloseSend(); err != nil {
3961		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
3962	}
3963	if _, err := stream.Recv(); err != io.EOF {
3964		t.Fatalf("%v failed to complele the ping pong test: %v", stream, err)
3965	}
3966}
3967
3968func (s) TestMetadataStreamingRPC(t *testing.T) {
3969	for _, e := range listTestEnv() {
3970		testMetadataStreamingRPC(t, e)
3971	}
3972}
3973
3974func testMetadataStreamingRPC(t *testing.T, e env) {
3975	te := newTest(t, e)
3976	te.startServer(&testServer{security: e.security})
3977	defer te.tearDown()
3978	tc := testpb.NewTestServiceClient(te.clientConn())
3979
3980	ctx := metadata.NewOutgoingContext(te.ctx, testMetadata)
3981	stream, err := tc.FullDuplexCall(ctx)
3982	if err != nil {
3983		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
3984	}
3985	go func() {
3986		headerMD, err := stream.Header()
3987		if e.security == "tls" {
3988			delete(headerMD, "transport_security_type")
3989		}
3990		delete(headerMD, "trailer") // ignore if present
3991		delete(headerMD, "user-agent")
3992		delete(headerMD, "content-type")
3993		if err != nil || !reflect.DeepEqual(testMetadata, headerMD) {
3994			t.Errorf("#1 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata)
3995		}
3996		// test the cached value.
3997		headerMD, err = stream.Header()
3998		delete(headerMD, "trailer") // ignore if present
3999		delete(headerMD, "user-agent")
4000		delete(headerMD, "content-type")
4001		if err != nil || !reflect.DeepEqual(testMetadata, headerMD) {
4002			t.Errorf("#2 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata)
4003		}
4004		err = func() error {
4005			for index := 0; index < len(reqSizes); index++ {
4006				respParam := []*testpb.ResponseParameters{
4007					{
4008						Size: int32(respSizes[index]),
4009					},
4010				}
4011
4012				payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index]))
4013				if err != nil {
4014					return err
4015				}
4016
4017				req := &testpb.StreamingOutputCallRequest{
4018					ResponseType:       testpb.PayloadType_COMPRESSABLE,
4019					ResponseParameters: respParam,
4020					Payload:            payload,
4021				}
4022				if err := stream.Send(req); err != nil {
4023					return fmt.Errorf("%v.Send(%v) = %v, want <nil>", stream, req, err)
4024				}
4025			}
4026			return nil
4027		}()
4028		// Tell the server we're done sending args.
4029		stream.CloseSend()
4030		if err != nil {
4031			t.Error(err)
4032		}
4033	}()
4034	for {
4035		if _, err := stream.Recv(); err != nil {
4036			break
4037		}
4038	}
4039	trailerMD := stream.Trailer()
4040	if !reflect.DeepEqual(testTrailerMetadata, trailerMD) {
4041		t.Fatalf("%v.Trailer() = %v, want %v", stream, trailerMD, testTrailerMetadata)
4042	}
4043}
4044
4045func (s) TestServerStreaming(t *testing.T) {
4046	for _, e := range listTestEnv() {
4047		testServerStreaming(t, e)
4048	}
4049}
4050
4051func testServerStreaming(t *testing.T, e env) {
4052	te := newTest(t, e)
4053	te.startServer(&testServer{security: e.security})
4054	defer te.tearDown()
4055	tc := testpb.NewTestServiceClient(te.clientConn())
4056
4057	respParam := make([]*testpb.ResponseParameters, len(respSizes))
4058	for i, s := range respSizes {
4059		respParam[i] = &testpb.ResponseParameters{
4060			Size: int32(s),
4061		}
4062	}
4063	req := &testpb.StreamingOutputCallRequest{
4064		ResponseType:       testpb.PayloadType_COMPRESSABLE,
4065		ResponseParameters: respParam,
4066	}
4067
4068	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
4069	defer cancel()
4070	stream, err := tc.StreamingOutputCall(ctx, req)
4071	if err != nil {
4072		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err)
4073	}
4074	var rpcStatus error
4075	var respCnt int
4076	var index int
4077	for {
4078		reply, err := stream.Recv()
4079		if err != nil {
4080			rpcStatus = err
4081			break
4082		}
4083		pt := reply.GetPayload().GetType()
4084		if pt != testpb.PayloadType_COMPRESSABLE {
4085			t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE)
4086		}
4087		size := len(reply.GetPayload().GetBody())
4088		if size != int(respSizes[index]) {
4089			t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index])
4090		}
4091		index++
4092		respCnt++
4093	}
4094	if rpcStatus != io.EOF {
4095		t.Fatalf("Failed to finish the server streaming rpc: %v, want <EOF>", rpcStatus)
4096	}
4097	if respCnt != len(respSizes) {
4098		t.Fatalf("Got %d reply, want %d", len(respSizes), respCnt)
4099	}
4100}
4101
4102func (s) TestFailedServerStreaming(t *testing.T) {
4103	for _, e := range listTestEnv() {
4104		testFailedServerStreaming(t, e)
4105	}
4106}
4107
4108func testFailedServerStreaming(t *testing.T, e env) {
4109	te := newTest(t, e)
4110	te.userAgent = failAppUA
4111	te.startServer(&testServer{security: e.security})
4112	defer te.tearDown()
4113	tc := testpb.NewTestServiceClient(te.clientConn())
4114
4115	respParam := make([]*testpb.ResponseParameters, len(respSizes))
4116	for i, s := range respSizes {
4117		respParam[i] = &testpb.ResponseParameters{
4118			Size: int32(s),
4119		}
4120	}
4121	req := &testpb.StreamingOutputCallRequest{
4122		ResponseType:       testpb.PayloadType_COMPRESSABLE,
4123		ResponseParameters: respParam,
4124	}
4125	ctx := metadata.NewOutgoingContext(te.ctx, testMetadata)
4126	stream, err := tc.StreamingOutputCall(ctx, req)
4127	if err != nil {
4128		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err)
4129	}
4130	wantErr := status.Error(codes.DataLoss, "error for testing: "+failAppUA)
4131	if _, err := stream.Recv(); !equalError(err, wantErr) {
4132		t.Fatalf("%v.Recv() = _, %v, want _, %v", stream, err, wantErr)
4133	}
4134}
4135
4136func equalError(x, y error) bool {
4137	return x == y || (x != nil && y != nil && x.Error() == y.Error())
4138}
4139
4140// concurrentSendServer is a TestServiceServer whose
4141// StreamingOutputCall makes ten serial Send calls, sending payloads
4142// "0".."9", inclusive.  TestServerStreamingConcurrent verifies they
4143// were received in the correct order, and that there were no races.
4144//
4145// All other TestServiceServer methods crash if called.
4146type concurrentSendServer struct {
4147	testpb.TestServiceServer
4148}
4149
4150func (s concurrentSendServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error {
4151	for i := 0; i < 10; i++ {
4152		stream.Send(&testpb.StreamingOutputCallResponse{
4153			Payload: &testpb.Payload{
4154				Body: []byte{'0' + uint8(i)},
4155			},
4156		})
4157	}
4158	return nil
4159}
4160
4161// Tests doing a bunch of concurrent streaming output calls.
4162func (s) TestServerStreamingConcurrent(t *testing.T) {
4163	for _, e := range listTestEnv() {
4164		testServerStreamingConcurrent(t, e)
4165	}
4166}
4167
4168func testServerStreamingConcurrent(t *testing.T, e env) {
4169	te := newTest(t, e)
4170	te.startServer(concurrentSendServer{})
4171	defer te.tearDown()
4172
4173	cc := te.clientConn()
4174	tc := testpb.NewTestServiceClient(cc)
4175
4176	doStreamingCall := func() {
4177		req := &testpb.StreamingOutputCallRequest{}
4178		ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
4179		defer cancel()
4180		stream, err := tc.StreamingOutputCall(ctx, req)
4181		if err != nil {
4182			t.Errorf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err)
4183			return
4184		}
4185		var ngot int
4186		var buf bytes.Buffer
4187		for {
4188			reply, err := stream.Recv()
4189			if err == io.EOF {
4190				break
4191			}
4192			if err != nil {
4193				t.Fatal(err)
4194			}
4195			ngot++
4196			if buf.Len() > 0 {
4197				buf.WriteByte(',')
4198			}
4199			buf.Write(reply.GetPayload().GetBody())
4200		}
4201		if want := 10; ngot != want {
4202			t.Errorf("Got %d replies, want %d", ngot, want)
4203		}
4204		if got, want := buf.String(), "0,1,2,3,4,5,6,7,8,9"; got != want {
4205			t.Errorf("Got replies %q; want %q", got, want)
4206		}
4207	}
4208
4209	var wg sync.WaitGroup
4210	for i := 0; i < 20; i++ {
4211		wg.Add(1)
4212		go func() {
4213			defer wg.Done()
4214			doStreamingCall()
4215		}()
4216	}
4217	wg.Wait()
4218
4219}
4220
4221func generatePayloadSizes() [][]int {
4222	reqSizes := [][]int{
4223		{27182, 8, 1828, 45904},
4224	}
4225
4226	num8KPayloads := 1024
4227	eightKPayloads := []int{}
4228	for i := 0; i < num8KPayloads; i++ {
4229		eightKPayloads = append(eightKPayloads, (1 << 13))
4230	}
4231	reqSizes = append(reqSizes, eightKPayloads)
4232
4233	num2MPayloads := 8
4234	twoMPayloads := []int{}
4235	for i := 0; i < num2MPayloads; i++ {
4236		twoMPayloads = append(twoMPayloads, (1 << 21))
4237	}
4238	reqSizes = append(reqSizes, twoMPayloads)
4239
4240	return reqSizes
4241}
4242
4243func (s) TestClientStreaming(t *testing.T) {
4244	for _, s := range generatePayloadSizes() {
4245		for _, e := range listTestEnv() {
4246			testClientStreaming(t, e, s)
4247		}
4248	}
4249}
4250
4251func testClientStreaming(t *testing.T, e env, sizes []int) {
4252	te := newTest(t, e)
4253	te.startServer(&testServer{security: e.security})
4254	defer te.tearDown()
4255	tc := testpb.NewTestServiceClient(te.clientConn())
4256
4257	ctx, cancel := context.WithTimeout(te.ctx, time.Second*30)
4258	defer cancel()
4259	stream, err := tc.StreamingInputCall(ctx)
4260	if err != nil {
4261		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want <nil>", tc, err)
4262	}
4263
4264	var sum int
4265	for _, s := range sizes {
4266		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(s))
4267		if err != nil {
4268			t.Fatal(err)
4269		}
4270
4271		req := &testpb.StreamingInputCallRequest{
4272			Payload: payload,
4273		}
4274		if err := stream.Send(req); err != nil {
4275			t.Fatalf("%v.Send(_) = %v, want <nil>", stream, err)
4276		}
4277		sum += s
4278	}
4279	reply, err := stream.CloseAndRecv()
4280	if err != nil {
4281		t.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil)
4282	}
4283	if reply.GetAggregatedPayloadSize() != int32(sum) {
4284		t.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum)
4285	}
4286}
4287
4288func (s) TestClientStreamingError(t *testing.T) {
4289	for _, e := range listTestEnv() {
4290		if e.name == "handler-tls" {
4291			continue
4292		}
4293		testClientStreamingError(t, e)
4294	}
4295}
4296
4297func testClientStreamingError(t *testing.T, e env) {
4298	te := newTest(t, e)
4299	te.startServer(&testServer{security: e.security, earlyFail: true})
4300	defer te.tearDown()
4301	tc := testpb.NewTestServiceClient(te.clientConn())
4302
4303	stream, err := tc.StreamingInputCall(te.ctx)
4304	if err != nil {
4305		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want <nil>", tc, err)
4306	}
4307	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 1)
4308	if err != nil {
4309		t.Fatal(err)
4310	}
4311
4312	req := &testpb.StreamingInputCallRequest{
4313		Payload: payload,
4314	}
4315	// The 1st request should go through.
4316	if err := stream.Send(req); err != nil {
4317		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
4318	}
4319	for {
4320		if err := stream.Send(req); err != io.EOF {
4321			continue
4322		}
4323		if _, err := stream.CloseAndRecv(); status.Code(err) != codes.NotFound {
4324			t.Fatalf("%v.CloseAndRecv() = %v, want error %s", stream, err, codes.NotFound)
4325		}
4326		break
4327	}
4328}
4329
4330func (s) TestExceedMaxStreamsLimit(t *testing.T) {
4331	for _, e := range listTestEnv() {
4332		testExceedMaxStreamsLimit(t, e)
4333	}
4334}
4335
4336func testExceedMaxStreamsLimit(t *testing.T, e env) {
4337	te := newTest(t, e)
4338	te.declareLogNoise(
4339		"http2Client.notifyError got notified that the client transport was broken",
4340		"Conn.resetTransport failed to create client transport",
4341		"grpc: the connection is closing",
4342	)
4343	te.maxStream = 1 // Only allows 1 live stream per server transport.
4344	te.startServer(&testServer{security: e.security})
4345	defer te.tearDown()
4346
4347	cc := te.clientConn()
4348	tc := testpb.NewTestServiceClient(cc)
4349
4350	_, err := tc.StreamingInputCall(te.ctx)
4351	if err != nil {
4352		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err)
4353	}
4354	// Loop until receiving the new max stream setting from the server.
4355	for {
4356		ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
4357		defer cancel()
4358		_, err := tc.StreamingInputCall(ctx)
4359		if err == nil {
4360			time.Sleep(50 * time.Millisecond)
4361			continue
4362		}
4363		if status.Code(err) == codes.DeadlineExceeded {
4364			break
4365		}
4366		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %s", tc, err, codes.DeadlineExceeded)
4367	}
4368}
4369
4370func (s) TestStreamsQuotaRecovery(t *testing.T) {
4371	for _, e := range listTestEnv() {
4372		testStreamsQuotaRecovery(t, e)
4373	}
4374}
4375
4376func testStreamsQuotaRecovery(t *testing.T, e env) {
4377	te := newTest(t, e)
4378	te.declareLogNoise(
4379		"http2Client.notifyError got notified that the client transport was broken",
4380		"Conn.resetTransport failed to create client transport",
4381		"grpc: the connection is closing",
4382	)
4383	te.maxStream = 1 // Allows 1 live stream.
4384	te.startServer(&testServer{security: e.security})
4385	defer te.tearDown()
4386
4387	cc := te.clientConn()
4388	tc := testpb.NewTestServiceClient(cc)
4389	ctx, cancel := context.WithCancel(context.Background())
4390	defer cancel()
4391	if _, err := tc.StreamingInputCall(ctx); err != nil {
4392		t.Fatalf("tc.StreamingInputCall(_) = _, %v, want _, <nil>", err)
4393	}
4394	// Loop until the new max stream setting is effective.
4395	for {
4396		ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
4397		_, err := tc.StreamingInputCall(ctx)
4398		cancel()
4399		if err == nil {
4400			time.Sleep(5 * time.Millisecond)
4401			continue
4402		}
4403		if status.Code(err) == codes.DeadlineExceeded {
4404			break
4405		}
4406		t.Fatalf("tc.StreamingInputCall(_) = _, %v, want _, %s", err, codes.DeadlineExceeded)
4407	}
4408
4409	var wg sync.WaitGroup
4410	for i := 0; i < 10; i++ {
4411		wg.Add(1)
4412		go func() {
4413			defer wg.Done()
4414			payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 314)
4415			if err != nil {
4416				t.Error(err)
4417				return
4418			}
4419			req := &testpb.SimpleRequest{
4420				ResponseType: testpb.PayloadType_COMPRESSABLE,
4421				ResponseSize: 1592,
4422				Payload:      payload,
4423			}
4424			// No rpc should go through due to the max streams limit.
4425			ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
4426			defer cancel()
4427			if _, err := tc.UnaryCall(ctx, req, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
4428				t.Errorf("tc.UnaryCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
4429			}
4430		}()
4431	}
4432	wg.Wait()
4433
4434	cancel()
4435	// A new stream should be allowed after canceling the first one.
4436	ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
4437	defer cancel()
4438	if _, err := tc.StreamingInputCall(ctx); err != nil {
4439		t.Fatalf("tc.StreamingInputCall(_) = _, %v, want _, %v", err, nil)
4440	}
4441}
4442
4443func (s) TestCompressServerHasNoSupport(t *testing.T) {
4444	for _, e := range listTestEnv() {
4445		testCompressServerHasNoSupport(t, e)
4446	}
4447}
4448
4449func testCompressServerHasNoSupport(t *testing.T, e env) {
4450	te := newTest(t, e)
4451	te.serverCompression = false
4452	te.clientCompression = false
4453	te.clientNopCompression = true
4454	te.startServer(&testServer{security: e.security})
4455	defer te.tearDown()
4456	tc := testpb.NewTestServiceClient(te.clientConn())
4457
4458	const argSize = 271828
4459	const respSize = 314159
4460	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
4461	if err != nil {
4462		t.Fatal(err)
4463	}
4464	req := &testpb.SimpleRequest{
4465		ResponseType: testpb.PayloadType_COMPRESSABLE,
4466		ResponseSize: respSize,
4467		Payload:      payload,
4468	}
4469
4470	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
4471	defer cancel()
4472	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.Unimplemented {
4473		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code %s", err, codes.Unimplemented)
4474	}
4475	// Streaming RPC
4476	stream, err := tc.FullDuplexCall(ctx)
4477	if err != nil {
4478		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
4479	}
4480	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.Unimplemented {
4481		t.Fatalf("%v.Recv() = %v, want error code %s", stream, err, codes.Unimplemented)
4482	}
4483}
4484
4485func (s) TestCompressOK(t *testing.T) {
4486	for _, e := range listTestEnv() {
4487		testCompressOK(t, e)
4488	}
4489}
4490
4491func testCompressOK(t *testing.T, e env) {
4492	te := newTest(t, e)
4493	te.serverCompression = true
4494	te.clientCompression = true
4495	te.startServer(&testServer{security: e.security})
4496	defer te.tearDown()
4497	tc := testpb.NewTestServiceClient(te.clientConn())
4498
4499	// Unary call
4500	const argSize = 271828
4501	const respSize = 314159
4502	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
4503	if err != nil {
4504		t.Fatal(err)
4505	}
4506	req := &testpb.SimpleRequest{
4507		ResponseType: testpb.PayloadType_COMPRESSABLE,
4508		ResponseSize: respSize,
4509		Payload:      payload,
4510	}
4511	ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something"))
4512	if _, err := tc.UnaryCall(ctx, req); err != nil {
4513		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err)
4514	}
4515	// Streaming RPC
4516	ctx, cancel := context.WithCancel(context.Background())
4517	defer cancel()
4518	stream, err := tc.FullDuplexCall(ctx)
4519	if err != nil {
4520		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
4521	}
4522	respParam := []*testpb.ResponseParameters{
4523		{
4524			Size: 31415,
4525		},
4526	}
4527	payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415))
4528	if err != nil {
4529		t.Fatal(err)
4530	}
4531	sreq := &testpb.StreamingOutputCallRequest{
4532		ResponseType:       testpb.PayloadType_COMPRESSABLE,
4533		ResponseParameters: respParam,
4534		Payload:            payload,
4535	}
4536	if err := stream.Send(sreq); err != nil {
4537		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
4538	}
4539	stream.CloseSend()
4540	if _, err := stream.Recv(); err != nil {
4541		t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
4542	}
4543	if _, err := stream.Recv(); err != io.EOF {
4544		t.Fatalf("%v.Recv() = %v, want io.EOF", stream, err)
4545	}
4546}
4547
4548func (s) TestIdentityEncoding(t *testing.T) {
4549	for _, e := range listTestEnv() {
4550		testIdentityEncoding(t, e)
4551	}
4552}
4553
4554func testIdentityEncoding(t *testing.T, e env) {
4555	te := newTest(t, e)
4556	te.startServer(&testServer{security: e.security})
4557	defer te.tearDown()
4558	tc := testpb.NewTestServiceClient(te.clientConn())
4559
4560	// Unary call
4561	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 5)
4562	if err != nil {
4563		t.Fatal(err)
4564	}
4565	req := &testpb.SimpleRequest{
4566		ResponseType: testpb.PayloadType_COMPRESSABLE,
4567		ResponseSize: 10,
4568		Payload:      payload,
4569	}
4570	ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something"))
4571	if _, err := tc.UnaryCall(ctx, req); err != nil {
4572		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err)
4573	}
4574	// Streaming RPC
4575	ctx, cancel := context.WithCancel(context.Background())
4576	defer cancel()
4577	stream, err := tc.FullDuplexCall(ctx, grpc.UseCompressor("identity"))
4578	if err != nil {
4579		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
4580	}
4581	payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415))
4582	if err != nil {
4583		t.Fatal(err)
4584	}
4585	sreq := &testpb.StreamingOutputCallRequest{
4586		ResponseType:       testpb.PayloadType_COMPRESSABLE,
4587		ResponseParameters: []*testpb.ResponseParameters{{Size: 10}},
4588		Payload:            payload,
4589	}
4590	if err := stream.Send(sreq); err != nil {
4591		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
4592	}
4593	stream.CloseSend()
4594	if _, err := stream.Recv(); err != nil {
4595		t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
4596	}
4597	if _, err := stream.Recv(); err != io.EOF {
4598		t.Fatalf("%v.Recv() = %v, want io.EOF", stream, err)
4599	}
4600}
4601
4602func (s) TestUnaryClientInterceptor(t *testing.T) {
4603	for _, e := range listTestEnv() {
4604		testUnaryClientInterceptor(t, e)
4605	}
4606}
4607
4608func failOkayRPC(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
4609	err := invoker(ctx, method, req, reply, cc, opts...)
4610	if err == nil {
4611		return status.Error(codes.NotFound, "")
4612	}
4613	return err
4614}
4615
4616func testUnaryClientInterceptor(t *testing.T, e env) {
4617	te := newTest(t, e)
4618	te.userAgent = testAppUA
4619	te.unaryClientInt = failOkayRPC
4620	te.startServer(&testServer{security: e.security})
4621	defer te.tearDown()
4622
4623	tc := testpb.NewTestServiceClient(te.clientConn())
4624	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
4625	defer cancel()
4626	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.NotFound {
4627		t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, error code %s", tc, err, codes.NotFound)
4628	}
4629}
4630
4631func (s) TestStreamClientInterceptor(t *testing.T) {
4632	for _, e := range listTestEnv() {
4633		testStreamClientInterceptor(t, e)
4634	}
4635}
4636
4637func failOkayStream(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
4638	s, err := streamer(ctx, desc, cc, method, opts...)
4639	if err == nil {
4640		return nil, status.Error(codes.NotFound, "")
4641	}
4642	return s, nil
4643}
4644
4645func testStreamClientInterceptor(t *testing.T, e env) {
4646	te := newTest(t, e)
4647	te.streamClientInt = failOkayStream
4648	te.startServer(&testServer{security: e.security})
4649	defer te.tearDown()
4650
4651	tc := testpb.NewTestServiceClient(te.clientConn())
4652	respParam := []*testpb.ResponseParameters{
4653		{
4654			Size: int32(1),
4655		},
4656	}
4657	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1))
4658	if err != nil {
4659		t.Fatal(err)
4660	}
4661	req := &testpb.StreamingOutputCallRequest{
4662		ResponseType:       testpb.PayloadType_COMPRESSABLE,
4663		ResponseParameters: respParam,
4664		Payload:            payload,
4665	}
4666	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
4667	defer cancel()
4668	if _, err := tc.StreamingOutputCall(ctx, req); status.Code(err) != codes.NotFound {
4669		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want _, error code %s", tc, err, codes.NotFound)
4670	}
4671}
4672
4673func (s) TestUnaryServerInterceptor(t *testing.T) {
4674	for _, e := range listTestEnv() {
4675		testUnaryServerInterceptor(t, e)
4676	}
4677}
4678
4679func errInjector(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
4680	return nil, status.Error(codes.PermissionDenied, "")
4681}
4682
4683func testUnaryServerInterceptor(t *testing.T, e env) {
4684	te := newTest(t, e)
4685	te.unaryServerInt = errInjector
4686	te.startServer(&testServer{security: e.security})
4687	defer te.tearDown()
4688
4689	tc := testpb.NewTestServiceClient(te.clientConn())
4690	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
4691	defer cancel()
4692	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.PermissionDenied {
4693		t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, error code %s", tc, err, codes.PermissionDenied)
4694	}
4695}
4696
4697func (s) TestStreamServerInterceptor(t *testing.T) {
4698	for _, e := range listTestEnv() {
4699		// TODO(bradfitz): Temporarily skip this env due to #619.
4700		if e.name == "handler-tls" {
4701			continue
4702		}
4703		testStreamServerInterceptor(t, e)
4704	}
4705}
4706
4707func fullDuplexOnly(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
4708	if info.FullMethod == "/grpc.testing.TestService/FullDuplexCall" {
4709		return handler(srv, ss)
4710	}
4711	// Reject the other methods.
4712	return status.Error(codes.PermissionDenied, "")
4713}
4714
4715func testStreamServerInterceptor(t *testing.T, e env) {
4716	te := newTest(t, e)
4717	te.streamServerInt = fullDuplexOnly
4718	te.startServer(&testServer{security: e.security})
4719	defer te.tearDown()
4720
4721	tc := testpb.NewTestServiceClient(te.clientConn())
4722	respParam := []*testpb.ResponseParameters{
4723		{
4724			Size: int32(1),
4725		},
4726	}
4727	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1))
4728	if err != nil {
4729		t.Fatal(err)
4730	}
4731	req := &testpb.StreamingOutputCallRequest{
4732		ResponseType:       testpb.PayloadType_COMPRESSABLE,
4733		ResponseParameters: respParam,
4734		Payload:            payload,
4735	}
4736	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
4737	defer cancel()
4738	s1, err := tc.StreamingOutputCall(ctx, req)
4739	if err != nil {
4740		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want _, <nil>", tc, err)
4741	}
4742	if _, err := s1.Recv(); status.Code(err) != codes.PermissionDenied {
4743		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, error code %s", tc, err, codes.PermissionDenied)
4744	}
4745	s2, err := tc.FullDuplexCall(ctx)
4746	if err != nil {
4747		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
4748	}
4749	if err := s2.Send(req); err != nil {
4750		t.Fatalf("%v.Send(_) = %v, want <nil>", s2, err)
4751	}
4752	if _, err := s2.Recv(); err != nil {
4753		t.Fatalf("%v.Recv() = _, %v, want _, <nil>", s2, err)
4754	}
4755}
4756
4757// funcServer implements methods of TestServiceServer using funcs,
4758// similar to an http.HandlerFunc.
4759// Any unimplemented method will crash. Tests implement the method(s)
4760// they need.
4761type funcServer struct {
4762	testpb.TestServiceServer
4763	unaryCall          func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error)
4764	streamingInputCall func(stream testpb.TestService_StreamingInputCallServer) error
4765	fullDuplexCall     func(stream testpb.TestService_FullDuplexCallServer) error
4766}
4767
4768func (s *funcServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
4769	return s.unaryCall(ctx, in)
4770}
4771
4772func (s *funcServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error {
4773	return s.streamingInputCall(stream)
4774}
4775
4776func (s *funcServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error {
4777	return s.fullDuplexCall(stream)
4778}
4779
4780func (s) TestClientRequestBodyErrorUnexpectedEOF(t *testing.T) {
4781	for _, e := range listTestEnv() {
4782		testClientRequestBodyErrorUnexpectedEOF(t, e)
4783	}
4784}
4785
4786func testClientRequestBodyErrorUnexpectedEOF(t *testing.T, e env) {
4787	te := newTest(t, e)
4788	ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
4789		errUnexpectedCall := errors.New("unexpected call func server method")
4790		t.Error(errUnexpectedCall)
4791		return nil, errUnexpectedCall
4792	}}
4793	te.startServer(ts)
4794	defer te.tearDown()
4795	te.withServerTester(func(st *serverTester) {
4796		st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall", false)
4797		// Say we have 5 bytes coming, but set END_STREAM flag:
4798		st.writeData(1, true, []byte{0, 0, 0, 0, 5})
4799		st.wantAnyFrame() // wait for server to crash (it used to crash)
4800	})
4801}
4802
4803func (s) TestClientRequestBodyErrorCloseAfterLength(t *testing.T) {
4804	for _, e := range listTestEnv() {
4805		testClientRequestBodyErrorCloseAfterLength(t, e)
4806	}
4807}
4808
4809func testClientRequestBodyErrorCloseAfterLength(t *testing.T, e env) {
4810	te := newTest(t, e)
4811	te.declareLogNoise("Server.processUnaryRPC failed to write status")
4812	ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
4813		errUnexpectedCall := errors.New("unexpected call func server method")
4814		t.Error(errUnexpectedCall)
4815		return nil, errUnexpectedCall
4816	}}
4817	te.startServer(ts)
4818	defer te.tearDown()
4819	te.withServerTester(func(st *serverTester) {
4820		st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall", false)
4821		// say we're sending 5 bytes, but then close the connection instead.
4822		st.writeData(1, false, []byte{0, 0, 0, 0, 5})
4823		st.cc.Close()
4824	})
4825}
4826
4827func (s) TestClientRequestBodyErrorCancel(t *testing.T) {
4828	for _, e := range listTestEnv() {
4829		testClientRequestBodyErrorCancel(t, e)
4830	}
4831}
4832
4833func testClientRequestBodyErrorCancel(t *testing.T, e env) {
4834	te := newTest(t, e)
4835	gotCall := make(chan bool, 1)
4836	ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
4837		gotCall <- true
4838		return new(testpb.SimpleResponse), nil
4839	}}
4840	te.startServer(ts)
4841	defer te.tearDown()
4842	te.withServerTester(func(st *serverTester) {
4843		st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall", false)
4844		// Say we have 5 bytes coming, but cancel it instead.
4845		st.writeRSTStream(1, http2.ErrCodeCancel)
4846		st.writeData(1, false, []byte{0, 0, 0, 0, 5})
4847
4848		// Verify we didn't a call yet.
4849		select {
4850		case <-gotCall:
4851			t.Fatal("unexpected call")
4852		default:
4853		}
4854
4855		// And now send an uncanceled (but still invalid), just to get a response.
4856		st.writeHeadersGRPC(3, "/grpc.testing.TestService/UnaryCall", false)
4857		st.writeData(3, true, []byte{0, 0, 0, 0, 0})
4858		<-gotCall
4859		st.wantAnyFrame()
4860	})
4861}
4862
4863func (s) TestClientRequestBodyErrorCancelStreamingInput(t *testing.T) {
4864	for _, e := range listTestEnv() {
4865		testClientRequestBodyErrorCancelStreamingInput(t, e)
4866	}
4867}
4868
4869func testClientRequestBodyErrorCancelStreamingInput(t *testing.T, e env) {
4870	te := newTest(t, e)
4871	recvErr := make(chan error, 1)
4872	ts := &funcServer{streamingInputCall: func(stream testpb.TestService_StreamingInputCallServer) error {
4873		_, err := stream.Recv()
4874		recvErr <- err
4875		return nil
4876	}}
4877	te.startServer(ts)
4878	defer te.tearDown()
4879	te.withServerTester(func(st *serverTester) {
4880		st.writeHeadersGRPC(1, "/grpc.testing.TestService/StreamingInputCall", false)
4881		// Say we have 5 bytes coming, but cancel it instead.
4882		st.writeData(1, false, []byte{0, 0, 0, 0, 5})
4883		st.writeRSTStream(1, http2.ErrCodeCancel)
4884
4885		var got error
4886		select {
4887		case got = <-recvErr:
4888		case <-time.After(3 * time.Second):
4889			t.Fatal("timeout waiting for error")
4890		}
4891		if grpc.Code(got) != codes.Canceled {
4892			t.Errorf("error = %#v; want error code %s", got, codes.Canceled)
4893		}
4894	})
4895}
4896
4897func (s) TestClientInitialHeaderEndStream(t *testing.T) {
4898	for _, e := range listTestEnv() {
4899		if e.httpHandler {
4900			continue
4901		}
4902		testClientInitialHeaderEndStream(t, e)
4903	}
4904}
4905
4906func testClientInitialHeaderEndStream(t *testing.T, e env) {
4907	// To ensure RST_STREAM is sent for illegal data write and not normal stream
4908	// close.
4909	frameCheckingDone := make(chan struct{})
4910	// To ensure goroutine for test does not end before RPC handler performs error
4911	// checking.
4912	handlerDone := make(chan struct{})
4913	te := newTest(t, e)
4914	ts := &funcServer{streamingInputCall: func(stream testpb.TestService_StreamingInputCallServer) error {
4915		defer close(handlerDone)
4916		// Block on serverTester receiving RST_STREAM. This ensures server has closed
4917		// stream before stream.Recv().
4918		<-frameCheckingDone
4919		data, err := stream.Recv()
4920		if err == nil {
4921			t.Errorf("unexpected data received in func server method: '%v'", data)
4922		} else if status.Code(err) != codes.Canceled {
4923			t.Errorf("expected canceled error, instead received '%v'", err)
4924		}
4925		return nil
4926	}}
4927	te.startServer(ts)
4928	defer te.tearDown()
4929	te.withServerTester(func(st *serverTester) {
4930		// Send a headers with END_STREAM flag, but then write data.
4931		st.writeHeadersGRPC(1, "/grpc.testing.TestService/StreamingInputCall", true)
4932		st.writeData(1, false, []byte{0, 0, 0, 0, 0})
4933		st.wantAnyFrame()
4934		st.wantAnyFrame()
4935		st.wantRSTStream(http2.ErrCodeStreamClosed)
4936		close(frameCheckingDone)
4937		<-handlerDone
4938	})
4939}
4940
4941func (s) TestClientSendDataAfterCloseSend(t *testing.T) {
4942	for _, e := range listTestEnv() {
4943		if e.httpHandler {
4944			continue
4945		}
4946		testClientSendDataAfterCloseSend(t, e)
4947	}
4948}
4949
4950func testClientSendDataAfterCloseSend(t *testing.T, e env) {
4951	// To ensure RST_STREAM is sent for illegal data write prior to execution of RPC
4952	// handler.
4953	frameCheckingDone := make(chan struct{})
4954	// To ensure goroutine for test does not end before RPC handler performs error
4955	// checking.
4956	handlerDone := make(chan struct{})
4957	te := newTest(t, e)
4958	ts := &funcServer{streamingInputCall: func(stream testpb.TestService_StreamingInputCallServer) error {
4959		defer close(handlerDone)
4960		// Block on serverTester receiving RST_STREAM. This ensures server has closed
4961		// stream before stream.Recv().
4962		<-frameCheckingDone
4963		for {
4964			_, err := stream.Recv()
4965			if err == io.EOF {
4966				break
4967			}
4968			if err != nil {
4969				if status.Code(err) != codes.Canceled {
4970					t.Errorf("expected canceled error, instead received '%v'", err)
4971				}
4972				break
4973			}
4974		}
4975		if err := stream.SendMsg(nil); err == nil {
4976			t.Error("expected error sending message on stream after stream closed due to illegal data")
4977		} else if status.Code(err) != codes.Internal {
4978			t.Errorf("expected internal error, instead received '%v'", err)
4979		}
4980		return nil
4981	}}
4982	te.startServer(ts)
4983	defer te.tearDown()
4984	te.withServerTester(func(st *serverTester) {
4985		st.writeHeadersGRPC(1, "/grpc.testing.TestService/StreamingInputCall", false)
4986		// Send data with END_STREAM flag, but then write more data.
4987		st.writeData(1, true, []byte{0, 0, 0, 0, 0})
4988		st.writeData(1, false, []byte{0, 0, 0, 0, 0})
4989		st.wantAnyFrame()
4990		st.wantAnyFrame()
4991		st.wantRSTStream(http2.ErrCodeStreamClosed)
4992		close(frameCheckingDone)
4993		<-handlerDone
4994	})
4995}
4996
4997func (s) TestClientResourceExhaustedCancelFullDuplex(t *testing.T) {
4998	for _, e := range listTestEnv() {
4999		if e.httpHandler {
5000			// httpHandler write won't be blocked on flow control window.
5001			continue
5002		}
5003		testClientResourceExhaustedCancelFullDuplex(t, e)
5004	}
5005}
5006
5007func testClientResourceExhaustedCancelFullDuplex(t *testing.T, e env) {
5008	te := newTest(t, e)
5009	recvErr := make(chan error, 1)
5010	ts := &funcServer{fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error {
5011		defer close(recvErr)
5012		_, err := stream.Recv()
5013		if err != nil {
5014			return status.Errorf(codes.Internal, "stream.Recv() got error: %v, want <nil>", err)
5015		}
5016		// create a payload that's larger than the default flow control window.
5017		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 10)
5018		if err != nil {
5019			return err
5020		}
5021		resp := &testpb.StreamingOutputCallResponse{
5022			Payload: payload,
5023		}
5024		ce := make(chan error, 1)
5025		go func() {
5026			var err error
5027			for {
5028				if err = stream.Send(resp); err != nil {
5029					break
5030				}
5031			}
5032			ce <- err
5033		}()
5034		select {
5035		case err = <-ce:
5036		case <-time.After(10 * time.Second):
5037			err = errors.New("10s timeout reached")
5038		}
5039		recvErr <- err
5040		return err
5041	}}
5042	te.startServer(ts)
5043	defer te.tearDown()
5044	// set a low limit on receive message size to error with Resource Exhausted on
5045	// client side when server send a large message.
5046	te.maxClientReceiveMsgSize = newInt(10)
5047	cc := te.clientConn()
5048	tc := testpb.NewTestServiceClient(cc)
5049
5050	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
5051	defer cancel()
5052	stream, err := tc.FullDuplexCall(ctx)
5053	if err != nil {
5054		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
5055	}
5056	req := &testpb.StreamingOutputCallRequest{}
5057	if err := stream.Send(req); err != nil {
5058		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
5059	}
5060	if _, err := stream.Recv(); status.Code(err) != codes.ResourceExhausted {
5061		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
5062	}
5063	err = <-recvErr
5064	if status.Code(err) != codes.Canceled {
5065		t.Fatalf("server got error %v, want error code: %s", err, codes.Canceled)
5066	}
5067}
5068
5069type clientFailCreds struct{}
5070
5071func (c *clientFailCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
5072	return rawConn, nil, nil
5073}
5074func (c *clientFailCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
5075	return nil, nil, fmt.Errorf("client handshake fails with fatal error")
5076}
5077func (c *clientFailCreds) Info() credentials.ProtocolInfo {
5078	return credentials.ProtocolInfo{}
5079}
5080func (c *clientFailCreds) Clone() credentials.TransportCredentials {
5081	return c
5082}
5083func (c *clientFailCreds) OverrideServerName(s string) error {
5084	return nil
5085}
5086
5087// This test makes sure that failfast RPCs fail if client handshake fails with
5088// fatal errors.
5089func (s) TestFailfastRPCFailOnFatalHandshakeError(t *testing.T) {
5090	lis, err := net.Listen("tcp", "localhost:0")
5091	if err != nil {
5092		t.Fatalf("Failed to listen: %v", err)
5093	}
5094	defer lis.Close()
5095
5096	cc, err := grpc.Dial("passthrough:///"+lis.Addr().String(), grpc.WithTransportCredentials(&clientFailCreds{}))
5097	if err != nil {
5098		t.Fatalf("grpc.Dial(_) = %v", err)
5099	}
5100	defer cc.Close()
5101
5102	tc := testpb.NewTestServiceClient(cc)
5103	// This unary call should fail, but not timeout.
5104	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
5105	defer cancel()
5106	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(false)); status.Code(err) != codes.Unavailable {
5107		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want <Unavailable>", err)
5108	}
5109}
5110
5111func (s) TestFlowControlLogicalRace(t *testing.T) {
5112	// Test for a regression of https://github.com/grpc/grpc-go/issues/632,
5113	// and other flow control bugs.
5114
5115	const (
5116		itemCount   = 100
5117		itemSize    = 1 << 10
5118		recvCount   = 2
5119		maxFailures = 3
5120
5121		requestTimeout = time.Second * 5
5122	)
5123
5124	requestCount := 10000
5125	if raceMode {
5126		requestCount = 1000
5127	}
5128
5129	lis, err := net.Listen("tcp", "localhost:0")
5130	if err != nil {
5131		t.Fatalf("Failed to listen: %v", err)
5132	}
5133	defer lis.Close()
5134
5135	s := grpc.NewServer()
5136	testpb.RegisterTestServiceServer(s, &flowControlLogicalRaceServer{
5137		itemCount: itemCount,
5138		itemSize:  itemSize,
5139	})
5140	defer s.Stop()
5141
5142	go s.Serve(lis)
5143
5144	ctx := context.Background()
5145	cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure(), grpc.WithBlock())
5146	if err != nil {
5147		t.Fatalf("grpc.Dial(%q) = %v", lis.Addr().String(), err)
5148	}
5149	defer cc.Close()
5150	cl := testpb.NewTestServiceClient(cc)
5151
5152	failures := 0
5153	for i := 0; i < requestCount; i++ {
5154		ctx, cancel := context.WithTimeout(ctx, requestTimeout)
5155		output, err := cl.StreamingOutputCall(ctx, &testpb.StreamingOutputCallRequest{})
5156		if err != nil {
5157			t.Fatalf("StreamingOutputCall; err = %q", err)
5158		}
5159
5160		j := 0
5161	loop:
5162		for ; j < recvCount; j++ {
5163			_, err := output.Recv()
5164			if err != nil {
5165				if err == io.EOF {
5166					break loop
5167				}
5168				switch status.Code(err) {
5169				case codes.DeadlineExceeded:
5170					break loop
5171				default:
5172					t.Fatalf("Recv; err = %q", err)
5173				}
5174			}
5175		}
5176		cancel()
5177		<-ctx.Done()
5178
5179		if j < recvCount {
5180			t.Errorf("got %d responses to request %d", j, i)
5181			failures++
5182			if failures >= maxFailures {
5183				// Continue past the first failure to see if the connection is
5184				// entirely broken, or if only a single RPC was affected
5185				break
5186			}
5187		}
5188	}
5189}
5190
5191type flowControlLogicalRaceServer struct {
5192	testpb.TestServiceServer
5193
5194	itemSize  int
5195	itemCount int
5196}
5197
5198func (s *flowControlLogicalRaceServer) StreamingOutputCall(req *testpb.StreamingOutputCallRequest, srv testpb.TestService_StreamingOutputCallServer) error {
5199	for i := 0; i < s.itemCount; i++ {
5200		err := srv.Send(&testpb.StreamingOutputCallResponse{
5201			Payload: &testpb.Payload{
5202				// Sending a large stream of data which the client reject
5203				// helps to trigger some types of flow control bugs.
5204				//
5205				// Reallocating memory here is inefficient, but the stress it
5206				// puts on the GC leads to more frequent flow control
5207				// failures. The GC likely causes more variety in the
5208				// goroutine scheduling orders.
5209				Body: bytes.Repeat([]byte("a"), s.itemSize),
5210			},
5211		})
5212		if err != nil {
5213			return err
5214		}
5215	}
5216	return nil
5217}
5218
5219type lockingWriter struct {
5220	mu sync.Mutex
5221	w  io.Writer
5222}
5223
5224func (lw *lockingWriter) Write(p []byte) (n int, err error) {
5225	lw.mu.Lock()
5226	defer lw.mu.Unlock()
5227	return lw.w.Write(p)
5228}
5229
5230func (lw *lockingWriter) setWriter(w io.Writer) {
5231	lw.mu.Lock()
5232	defer lw.mu.Unlock()
5233	lw.w = w
5234}
5235
5236var testLogOutput = &lockingWriter{w: os.Stderr}
5237
5238// awaitNewConnLogOutput waits for any of grpc.NewConn's goroutines to
5239// terminate, if they're still running. It spams logs with this
5240// message.  We wait for it so our log filter is still
5241// active. Otherwise the "defer restore()" at the top of various test
5242// functions restores our log filter and then the goroutine spams.
5243func awaitNewConnLogOutput() {
5244	awaitLogOutput(50*time.Millisecond, "grpc: the client connection is closing; please retry")
5245}
5246
5247func awaitLogOutput(maxWait time.Duration, phrase string) {
5248	pb := []byte(phrase)
5249
5250	timer := time.NewTimer(maxWait)
5251	defer timer.Stop()
5252	wakeup := make(chan bool, 1)
5253	for {
5254		if logOutputHasContents(pb, wakeup) {
5255			return
5256		}
5257		select {
5258		case <-timer.C:
5259			// Too slow. Oh well.
5260			return
5261		case <-wakeup:
5262		}
5263	}
5264}
5265
5266func logOutputHasContents(v []byte, wakeup chan<- bool) bool {
5267	testLogOutput.mu.Lock()
5268	defer testLogOutput.mu.Unlock()
5269	fw, ok := testLogOutput.w.(*filterWriter)
5270	if !ok {
5271		return false
5272	}
5273	fw.mu.Lock()
5274	defer fw.mu.Unlock()
5275	if bytes.Contains(fw.buf.Bytes(), v) {
5276		return true
5277	}
5278	fw.wakeup = wakeup
5279	return false
5280}
5281
5282var verboseLogs = flag.Bool("verbose_logs", false, "show all log output, without filtering")
5283
5284func noop() {}
5285
5286// declareLogNoise declares that t is expected to emit the following noisy
5287// phrases, even on success. Those phrases will be filtered from log output and
5288// only be shown if *verbose_logs or t ends up failing. The returned restore
5289// function should be called with defer to be run before the test ends.
5290func declareLogNoise(t *testing.T, phrases ...string) (restore func()) {
5291	if *verboseLogs {
5292		return noop
5293	}
5294	fw := &filterWriter{dst: os.Stderr, filter: phrases}
5295	testLogOutput.setWriter(fw)
5296	return func() {
5297		if t.Failed() {
5298			fw.mu.Lock()
5299			defer fw.mu.Unlock()
5300			if fw.buf.Len() > 0 {
5301				t.Logf("Complete log output:\n%s", fw.buf.Bytes())
5302			}
5303		}
5304		testLogOutput.setWriter(os.Stderr)
5305	}
5306}
5307
5308type filterWriter struct {
5309	dst    io.Writer
5310	filter []string
5311
5312	mu     sync.Mutex
5313	buf    bytes.Buffer
5314	wakeup chan<- bool // if non-nil, gets true on write
5315}
5316
5317func (fw *filterWriter) Write(p []byte) (n int, err error) {
5318	fw.mu.Lock()
5319	fw.buf.Write(p)
5320	if fw.wakeup != nil {
5321		select {
5322		case fw.wakeup <- true:
5323		default:
5324		}
5325	}
5326	fw.mu.Unlock()
5327
5328	ps := string(p)
5329	for _, f := range fw.filter {
5330		if strings.Contains(ps, f) {
5331			return len(p), nil
5332		}
5333	}
5334	return fw.dst.Write(p)
5335}
5336
5337func (s) TestGRPCMethod(t *testing.T) {
5338	var method string
5339	var ok bool
5340
5341	ss := &stubserver.StubServer{
5342		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
5343			method, ok = grpc.Method(ctx)
5344			return &testpb.Empty{}, nil
5345		},
5346	}
5347	if err := ss.Start(nil); err != nil {
5348		t.Fatalf("Error starting endpoint server: %v", err)
5349	}
5350	defer ss.Stop()
5351
5352	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
5353	defer cancel()
5354
5355	if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil {
5356		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err)
5357	}
5358
5359	if want := "/grpc.testing.TestService/EmptyCall"; !ok || method != want {
5360		t.Fatalf("grpc.Method(_) = %q, %v; want %q, true", method, ok, want)
5361	}
5362}
5363
5364// renameProtoCodec is an encoding.Codec wrapper that allows customizing the
5365// Name() of another codec.
5366type renameProtoCodec struct {
5367	encoding.Codec
5368	name string
5369}
5370
5371func (r *renameProtoCodec) Name() string { return r.name }
5372
5373// TestForceCodecName confirms that the ForceCodec call option sets the subtype
5374// in the content-type header according to the Name() of the codec provided.
5375func (s) TestForceCodecName(t *testing.T) {
5376	wantContentTypeCh := make(chan []string, 1)
5377	defer close(wantContentTypeCh)
5378
5379	ss := &stubserver.StubServer{
5380		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
5381			md, ok := metadata.FromIncomingContext(ctx)
5382			if !ok {
5383				return nil, status.Errorf(codes.Internal, "no metadata in context")
5384			}
5385			if got, want := md["content-type"], <-wantContentTypeCh; !reflect.DeepEqual(got, want) {
5386				return nil, status.Errorf(codes.Internal, "got content-type=%q; want [%q]", got, want)
5387			}
5388			return &testpb.Empty{}, nil
5389		},
5390	}
5391	if err := ss.Start([]grpc.ServerOption{grpc.ForceServerCodec(encoding.GetCodec("proto"))}); err != nil {
5392		t.Fatalf("Error starting endpoint server: %v", err)
5393	}
5394	defer ss.Stop()
5395
5396	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
5397	defer cancel()
5398
5399	codec := &renameProtoCodec{Codec: encoding.GetCodec("proto"), name: "some-test-name"}
5400	wantContentTypeCh <- []string{"application/grpc+some-test-name"}
5401	if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}, grpc.ForceCodec(codec)); err != nil {
5402		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err)
5403	}
5404
5405	// Confirm the name is converted to lowercase before transmitting.
5406	codec.name = "aNoTHeRNaME"
5407	wantContentTypeCh <- []string{"application/grpc+anothername"}
5408	if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}, grpc.ForceCodec(codec)); err != nil {
5409		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err)
5410	}
5411}
5412
5413func (s) TestForceServerCodec(t *testing.T) {
5414	ss := &stubserver.StubServer{
5415		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
5416			return &testpb.Empty{}, nil
5417		},
5418	}
5419	codec := &countingProtoCodec{}
5420	if err := ss.Start([]grpc.ServerOption{grpc.ForceServerCodec(codec)}); err != nil {
5421		t.Fatalf("Error starting endpoint server: %v", err)
5422	}
5423	defer ss.Stop()
5424
5425	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
5426	defer cancel()
5427
5428	if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil {
5429		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err)
5430	}
5431
5432	unmarshalCount := atomic.LoadInt32(&codec.unmarshalCount)
5433	const wantUnmarshalCount = 1
5434	if unmarshalCount != wantUnmarshalCount {
5435		t.Fatalf("protoCodec.unmarshalCount = %d; want %d", unmarshalCount, wantUnmarshalCount)
5436	}
5437	marshalCount := atomic.LoadInt32(&codec.marshalCount)
5438	const wantMarshalCount = 1
5439	if marshalCount != wantMarshalCount {
5440		t.Fatalf("protoCodec.marshalCount = %d; want %d", marshalCount, wantMarshalCount)
5441	}
5442}
5443
5444func (s) TestUnaryProxyDoesNotForwardMetadata(t *testing.T) {
5445	const mdkey = "somedata"
5446
5447	// endpoint ensures mdkey is NOT in metadata and returns an error if it is.
5448	endpoint := &stubserver.StubServer{
5449		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
5450			if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] != nil {
5451				return nil, status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey)
5452			}
5453			return &testpb.Empty{}, nil
5454		},
5455	}
5456	if err := endpoint.Start(nil); err != nil {
5457		t.Fatalf("Error starting endpoint server: %v", err)
5458	}
5459	defer endpoint.Stop()
5460
5461	// proxy ensures mdkey IS in metadata, then forwards the RPC to endpoint
5462	// without explicitly copying the metadata.
5463	proxy := &stubserver.StubServer{
5464		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
5465			if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] == nil {
5466				return nil, status.Errorf(codes.Internal, "proxy: md=%v; want contains(%q)", md, mdkey)
5467			}
5468			return endpoint.Client.EmptyCall(ctx, in)
5469		},
5470	}
5471	if err := proxy.Start(nil); err != nil {
5472		t.Fatalf("Error starting proxy server: %v", err)
5473	}
5474	defer proxy.Stop()
5475
5476	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
5477	defer cancel()
5478	md := metadata.Pairs(mdkey, "val")
5479	ctx = metadata.NewOutgoingContext(ctx, md)
5480
5481	// Sanity check that endpoint properly errors when it sees mdkey.
5482	_, err := endpoint.Client.EmptyCall(ctx, &testpb.Empty{})
5483	if s, ok := status.FromError(err); !ok || s.Code() != codes.Internal {
5484		t.Fatalf("endpoint.Client.EmptyCall(_, _) = _, %v; want _, <status with Code()=Internal>", err)
5485	}
5486
5487	if _, err := proxy.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil {
5488		t.Fatal(err.Error())
5489	}
5490}
5491
5492func (s) TestStreamingProxyDoesNotForwardMetadata(t *testing.T) {
5493	const mdkey = "somedata"
5494
5495	// doFDC performs a FullDuplexCall with client and returns the error from the
5496	// first stream.Recv call, or nil if that error is io.EOF.  Calls t.Fatal if
5497	// the stream cannot be established.
5498	doFDC := func(ctx context.Context, client testpb.TestServiceClient) error {
5499		stream, err := client.FullDuplexCall(ctx)
5500		if err != nil {
5501			t.Fatalf("Unwanted error: %v", err)
5502		}
5503		if _, err := stream.Recv(); err != io.EOF {
5504			return err
5505		}
5506		return nil
5507	}
5508
5509	// endpoint ensures mdkey is NOT in metadata and returns an error if it is.
5510	endpoint := &stubserver.StubServer{
5511		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
5512			ctx := stream.Context()
5513			if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] != nil {
5514				return status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey)
5515			}
5516			return nil
5517		},
5518	}
5519	if err := endpoint.Start(nil); err != nil {
5520		t.Fatalf("Error starting endpoint server: %v", err)
5521	}
5522	defer endpoint.Stop()
5523
5524	// proxy ensures mdkey IS in metadata, then forwards the RPC to endpoint
5525	// without explicitly copying the metadata.
5526	proxy := &stubserver.StubServer{
5527		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
5528			ctx := stream.Context()
5529			if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] == nil {
5530				return status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey)
5531			}
5532			return doFDC(ctx, endpoint.Client)
5533		},
5534	}
5535	if err := proxy.Start(nil); err != nil {
5536		t.Fatalf("Error starting proxy server: %v", err)
5537	}
5538	defer proxy.Stop()
5539
5540	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
5541	defer cancel()
5542	md := metadata.Pairs(mdkey, "val")
5543	ctx = metadata.NewOutgoingContext(ctx, md)
5544
5545	// Sanity check that endpoint properly errors when it sees mdkey in ctx.
5546	err := doFDC(ctx, endpoint.Client)
5547	if s, ok := status.FromError(err); !ok || s.Code() != codes.Internal {
5548		t.Fatalf("stream.Recv() = _, %v; want _, <status with Code()=Internal>", err)
5549	}
5550
5551	if err := doFDC(ctx, proxy.Client); err != nil {
5552		t.Fatalf("doFDC(_, proxy.Client) = %v; want nil", err)
5553	}
5554}
5555
5556func (s) TestStatsTagsAndTrace(t *testing.T) {
5557	// Data added to context by client (typically in a stats handler).
5558	tags := []byte{1, 5, 2, 4, 3}
5559	trace := []byte{5, 2, 1, 3, 4}
5560
5561	// endpoint ensures Tags() and Trace() in context match those that were added
5562	// by the client and returns an error if not.
5563	endpoint := &stubserver.StubServer{
5564		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
5565			md, _ := metadata.FromIncomingContext(ctx)
5566			if tg := stats.Tags(ctx); !reflect.DeepEqual(tg, tags) {
5567				return nil, status.Errorf(codes.Internal, "stats.Tags(%v)=%v; want %v", ctx, tg, tags)
5568			}
5569			if !reflect.DeepEqual(md["grpc-tags-bin"], []string{string(tags)}) {
5570				return nil, status.Errorf(codes.Internal, "md['grpc-tags-bin']=%v; want %v", md["grpc-tags-bin"], tags)
5571			}
5572			if tr := stats.Trace(ctx); !reflect.DeepEqual(tr, trace) {
5573				return nil, status.Errorf(codes.Internal, "stats.Trace(%v)=%v; want %v", ctx, tr, trace)
5574			}
5575			if !reflect.DeepEqual(md["grpc-trace-bin"], []string{string(trace)}) {
5576				return nil, status.Errorf(codes.Internal, "md['grpc-trace-bin']=%v; want %v", md["grpc-trace-bin"], trace)
5577			}
5578			return &testpb.Empty{}, nil
5579		},
5580	}
5581	if err := endpoint.Start(nil); err != nil {
5582		t.Fatalf("Error starting endpoint server: %v", err)
5583	}
5584	defer endpoint.Stop()
5585
5586	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
5587	defer cancel()
5588
5589	testCases := []struct {
5590		ctx  context.Context
5591		want codes.Code
5592	}{
5593		{ctx: ctx, want: codes.Internal},
5594		{ctx: stats.SetTags(ctx, tags), want: codes.Internal},
5595		{ctx: stats.SetTrace(ctx, trace), want: codes.Internal},
5596		{ctx: stats.SetTags(stats.SetTrace(ctx, tags), tags), want: codes.Internal},
5597		{ctx: stats.SetTags(stats.SetTrace(ctx, trace), tags), want: codes.OK},
5598	}
5599
5600	for _, tc := range testCases {
5601		_, err := endpoint.Client.EmptyCall(tc.ctx, &testpb.Empty{})
5602		if tc.want == codes.OK && err != nil {
5603			t.Fatalf("endpoint.Client.EmptyCall(%v, _) = _, %v; want _, nil", tc.ctx, err)
5604		}
5605		if s, ok := status.FromError(err); !ok || s.Code() != tc.want {
5606			t.Fatalf("endpoint.Client.EmptyCall(%v, _) = _, %v; want _, <status with Code()=%v>", tc.ctx, err, tc.want)
5607		}
5608	}
5609}
5610
5611func (s) TestTapTimeout(t *testing.T) {
5612	sopts := []grpc.ServerOption{
5613		grpc.InTapHandle(func(ctx context.Context, _ *tap.Info) (context.Context, error) {
5614			c, cancel := context.WithCancel(ctx)
5615			// Call cancel instead of setting a deadline so we can detect which error
5616			// occurred -- this cancellation (desired) or the client's deadline
5617			// expired (indicating this cancellation did not affect the RPC).
5618			time.AfterFunc(10*time.Millisecond, cancel)
5619			return c, nil
5620		}),
5621	}
5622
5623	ss := &stubserver.StubServer{
5624		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
5625			<-ctx.Done()
5626			return nil, status.Errorf(codes.Canceled, ctx.Err().Error())
5627		},
5628	}
5629	if err := ss.Start(sopts); err != nil {
5630		t.Fatalf("Error starting endpoint server: %v", err)
5631	}
5632	defer ss.Stop()
5633
5634	// This was known to be flaky; test several times.
5635	for i := 0; i < 10; i++ {
5636		// Set our own deadline in case the server hangs.
5637		ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
5638		res, err := ss.Client.EmptyCall(ctx, &testpb.Empty{})
5639		cancel()
5640		if s, ok := status.FromError(err); !ok || s.Code() != codes.Canceled {
5641			t.Fatalf("ss.Client.EmptyCall(ctx, _) = %v, %v; want nil, <status with Code()=Canceled>", res, err)
5642		}
5643	}
5644
5645}
5646
5647func (s) TestClientWriteFailsAfterServerClosesStream(t *testing.T) {
5648	ss := &stubserver.StubServer{
5649		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
5650			return status.Errorf(codes.Internal, "")
5651		},
5652	}
5653	sopts := []grpc.ServerOption{}
5654	if err := ss.Start(sopts); err != nil {
5655		t.Fatalf("Error starting endpoint server: %v", err)
5656	}
5657	defer ss.Stop()
5658	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
5659	defer cancel()
5660	stream, err := ss.Client.FullDuplexCall(ctx)
5661	if err != nil {
5662		t.Fatalf("Error while creating stream: %v", err)
5663	}
5664	for {
5665		if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err == nil {
5666			time.Sleep(5 * time.Millisecond)
5667		} else if err == io.EOF {
5668			break // Success.
5669		} else {
5670			t.Fatalf("stream.Send(_) = %v, want io.EOF", err)
5671		}
5672	}
5673}
5674
5675type windowSizeConfig struct {
5676	serverStream int32
5677	serverConn   int32
5678	clientStream int32
5679	clientConn   int32
5680}
5681
5682func max(a, b int32) int32 {
5683	if a > b {
5684		return a
5685	}
5686	return b
5687}
5688
5689func (s) TestConfigurableWindowSizeWithLargeWindow(t *testing.T) {
5690	wc := windowSizeConfig{
5691		serverStream: 8 * 1024 * 1024,
5692		serverConn:   12 * 1024 * 1024,
5693		clientStream: 6 * 1024 * 1024,
5694		clientConn:   8 * 1024 * 1024,
5695	}
5696	for _, e := range listTestEnv() {
5697		testConfigurableWindowSize(t, e, wc)
5698	}
5699}
5700
5701func (s) TestConfigurableWindowSizeWithSmallWindow(t *testing.T) {
5702	wc := windowSizeConfig{
5703		serverStream: 1,
5704		serverConn:   1,
5705		clientStream: 1,
5706		clientConn:   1,
5707	}
5708	for _, e := range listTestEnv() {
5709		testConfigurableWindowSize(t, e, wc)
5710	}
5711}
5712
5713func testConfigurableWindowSize(t *testing.T, e env, wc windowSizeConfig) {
5714	te := newTest(t, e)
5715	te.serverInitialWindowSize = wc.serverStream
5716	te.serverInitialConnWindowSize = wc.serverConn
5717	te.clientInitialWindowSize = wc.clientStream
5718	te.clientInitialConnWindowSize = wc.clientConn
5719
5720	te.startServer(&testServer{security: e.security})
5721	defer te.tearDown()
5722
5723	cc := te.clientConn()
5724	tc := testpb.NewTestServiceClient(cc)
5725	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
5726	defer cancel()
5727	stream, err := tc.FullDuplexCall(ctx)
5728	if err != nil {
5729		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
5730	}
5731	numOfIter := 11
5732	// Set message size to exhaust largest of window sizes.
5733	messageSize := max(max(wc.serverStream, wc.serverConn), max(wc.clientStream, wc.clientConn)) / int32(numOfIter-1)
5734	messageSize = max(messageSize, 64*1024)
5735	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, messageSize)
5736	if err != nil {
5737		t.Fatal(err)
5738	}
5739	respParams := []*testpb.ResponseParameters{
5740		{
5741			Size: messageSize,
5742		},
5743	}
5744	req := &testpb.StreamingOutputCallRequest{
5745		ResponseType:       testpb.PayloadType_COMPRESSABLE,
5746		ResponseParameters: respParams,
5747		Payload:            payload,
5748	}
5749	for i := 0; i < numOfIter; i++ {
5750		if err := stream.Send(req); err != nil {
5751			t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
5752		}
5753		if _, err := stream.Recv(); err != nil {
5754			t.Fatalf("%v.Recv() = _, %v, want _, <nil>", stream, err)
5755		}
5756	}
5757	if err := stream.CloseSend(); err != nil {
5758		t.Fatalf("%v.CloseSend() = %v, want <nil>", stream, err)
5759	}
5760}
5761
5762func (s) TestWaitForReadyConnection(t *testing.T) {
5763	for _, e := range listTestEnv() {
5764		testWaitForReadyConnection(t, e)
5765	}
5766
5767}
5768
5769func testWaitForReadyConnection(t *testing.T, e env) {
5770	te := newTest(t, e)
5771	te.userAgent = testAppUA
5772	te.startServer(&testServer{security: e.security})
5773	defer te.tearDown()
5774
5775	cc := te.clientConn() // Non-blocking dial.
5776	tc := testpb.NewTestServiceClient(cc)
5777	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
5778	defer cancel()
5779	state := cc.GetState()
5780	// Wait for connection to be Ready.
5781	for ; state != connectivity.Ready && cc.WaitForStateChange(ctx, state); state = cc.GetState() {
5782	}
5783	if state != connectivity.Ready {
5784		t.Fatalf("Want connection state to be Ready, got %v", state)
5785	}
5786	ctx, cancel = context.WithTimeout(context.Background(), time.Second)
5787	defer cancel()
5788	// Make a fail-fast RPC.
5789	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
5790		t.Fatalf("TestService/EmptyCall(_,_) = _, %v, want _, nil", err)
5791	}
5792}
5793
5794type errCodec struct {
5795	noError bool
5796}
5797
5798func (c *errCodec) Marshal(v interface{}) ([]byte, error) {
5799	if c.noError {
5800		return []byte{}, nil
5801	}
5802	return nil, fmt.Errorf("3987^12 + 4365^12 = 4472^12")
5803}
5804
5805func (c *errCodec) Unmarshal(data []byte, v interface{}) error {
5806	return nil
5807}
5808
5809func (c *errCodec) Name() string {
5810	return "Fermat's near-miss."
5811}
5812
5813type countingProtoCodec struct {
5814	marshalCount   int32
5815	unmarshalCount int32
5816}
5817
5818func (p *countingProtoCodec) Marshal(v interface{}) ([]byte, error) {
5819	atomic.AddInt32(&p.marshalCount, 1)
5820	vv, ok := v.(proto.Message)
5821	if !ok {
5822		return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
5823	}
5824	return proto.Marshal(vv)
5825}
5826
5827func (p *countingProtoCodec) Unmarshal(data []byte, v interface{}) error {
5828	atomic.AddInt32(&p.unmarshalCount, 1)
5829	vv, ok := v.(proto.Message)
5830	if !ok {
5831		return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v)
5832	}
5833	return proto.Unmarshal(data, vv)
5834}
5835
5836func (*countingProtoCodec) Name() string {
5837	return "proto"
5838}
5839
5840func (s) TestEncodeDoesntPanic(t *testing.T) {
5841	for _, e := range listTestEnv() {
5842		testEncodeDoesntPanic(t, e)
5843	}
5844}
5845
5846func testEncodeDoesntPanic(t *testing.T, e env) {
5847	te := newTest(t, e)
5848	erc := &errCodec{}
5849	te.customCodec = erc
5850	te.startServer(&testServer{security: e.security})
5851	defer te.tearDown()
5852	te.customCodec = nil
5853	tc := testpb.NewTestServiceClient(te.clientConn())
5854	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
5855	defer cancel()
5856	// Failure case, should not panic.
5857	tc.EmptyCall(ctx, &testpb.Empty{})
5858	erc.noError = true
5859	// Passing case.
5860	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
5861		t.Fatalf("EmptyCall(_, _) = _, %v, want _, <nil>", err)
5862	}
5863}
5864
5865func (s) TestSvrWriteStatusEarlyWrite(t *testing.T) {
5866	for _, e := range listTestEnv() {
5867		testSvrWriteStatusEarlyWrite(t, e)
5868	}
5869}
5870
5871func testSvrWriteStatusEarlyWrite(t *testing.T, e env) {
5872	te := newTest(t, e)
5873	const smallSize = 1024
5874	const largeSize = 2048
5875	const extraLargeSize = 4096
5876	te.maxServerReceiveMsgSize = newInt(largeSize)
5877	te.maxServerSendMsgSize = newInt(largeSize)
5878	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
5879	if err != nil {
5880		t.Fatal(err)
5881	}
5882	extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize)
5883	if err != nil {
5884		t.Fatal(err)
5885	}
5886	te.startServer(&testServer{security: e.security})
5887	defer te.tearDown()
5888	tc := testpb.NewTestServiceClient(te.clientConn())
5889	respParam := []*testpb.ResponseParameters{
5890		{
5891			Size: int32(smallSize),
5892		},
5893	}
5894	sreq := &testpb.StreamingOutputCallRequest{
5895		ResponseType:       testpb.PayloadType_COMPRESSABLE,
5896		ResponseParameters: respParam,
5897		Payload:            extraLargePayload,
5898	}
5899	// Test recv case: server receives a message larger than maxServerReceiveMsgSize.
5900	stream, err := tc.FullDuplexCall(te.ctx)
5901	if err != nil {
5902		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
5903	}
5904	if err = stream.Send(sreq); err != nil {
5905		t.Fatalf("%v.Send() = _, %v, want <nil>", stream, err)
5906	}
5907	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
5908		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
5909	}
5910	// Test send case: server sends a message larger than maxServerSendMsgSize.
5911	sreq.Payload = smallPayload
5912	respParam[0].Size = int32(extraLargeSize)
5913
5914	stream, err = tc.FullDuplexCall(te.ctx)
5915	if err != nil {
5916		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
5917	}
5918	if err = stream.Send(sreq); err != nil {
5919		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
5920	}
5921	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
5922		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
5923	}
5924}
5925
5926// The following functions with function name ending with TD indicates that they
5927// should be deleted after old service config API is deprecated and deleted.
5928func testServiceConfigSetupTD(t *testing.T, e env) (*test, chan grpc.ServiceConfig) {
5929	te := newTest(t, e)
5930	// We write before read.
5931	ch := make(chan grpc.ServiceConfig, 1)
5932	te.sc = ch
5933	te.userAgent = testAppUA
5934	te.declareLogNoise(
5935		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
5936		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
5937		"grpc: addrConn.resetTransport failed to create client transport: connection error",
5938		"Failed to dial : context canceled; please retry.",
5939	)
5940	return te, ch
5941}
5942
5943func (s) TestServiceConfigGetMethodConfigTD(t *testing.T) {
5944	for _, e := range listTestEnv() {
5945		testGetMethodConfigTD(t, e)
5946	}
5947}
5948
5949func testGetMethodConfigTD(t *testing.T, e env) {
5950	te, ch := testServiceConfigSetupTD(t, e)
5951	defer te.tearDown()
5952
5953	mc1 := grpc.MethodConfig{
5954		WaitForReady: newBool(true),
5955		Timeout:      newDuration(time.Millisecond),
5956	}
5957	mc2 := grpc.MethodConfig{WaitForReady: newBool(false)}
5958	m := make(map[string]grpc.MethodConfig)
5959	m["/grpc.testing.TestService/EmptyCall"] = mc1
5960	m["/grpc.testing.TestService/"] = mc2
5961	sc := grpc.ServiceConfig{
5962		Methods: m,
5963	}
5964	ch <- sc
5965
5966	cc := te.clientConn()
5967	tc := testpb.NewTestServiceClient(cc)
5968	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
5969	defer cancel()
5970	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
5971	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
5972		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
5973	}
5974
5975	m = make(map[string]grpc.MethodConfig)
5976	m["/grpc.testing.TestService/UnaryCall"] = mc1
5977	m["/grpc.testing.TestService/"] = mc2
5978	sc = grpc.ServiceConfig{
5979		Methods: m,
5980	}
5981	ch <- sc
5982	// Wait for the new service config to propagate.
5983	for {
5984		if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
5985			break
5986		}
5987	}
5988	// The following RPCs are expected to become fail-fast.
5989	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable {
5990		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.Unavailable)
5991	}
5992}
5993
5994func (s) TestServiceConfigWaitForReadyTD(t *testing.T) {
5995	for _, e := range listTestEnv() {
5996		testServiceConfigWaitForReadyTD(t, e)
5997	}
5998}
5999
6000func testServiceConfigWaitForReadyTD(t *testing.T, e env) {
6001	te, ch := testServiceConfigSetupTD(t, e)
6002	defer te.tearDown()
6003
6004	// Case1: Client API set failfast to be false, and service config set wait_for_ready to be false, Client API should win, and the rpc will wait until deadline exceeds.
6005	mc := grpc.MethodConfig{
6006		WaitForReady: newBool(false),
6007		Timeout:      newDuration(time.Millisecond),
6008	}
6009	m := make(map[string]grpc.MethodConfig)
6010	m["/grpc.testing.TestService/EmptyCall"] = mc
6011	m["/grpc.testing.TestService/FullDuplexCall"] = mc
6012	sc := grpc.ServiceConfig{
6013		Methods: m,
6014	}
6015	ch <- sc
6016
6017	cc := te.clientConn()
6018	tc := testpb.NewTestServiceClient(cc)
6019	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
6020	defer cancel()
6021	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
6022	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
6023		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
6024	}
6025	if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
6026		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
6027	}
6028
6029	// Generate a service config update.
6030	// Case2: Client API does not set failfast, and service config set wait_for_ready to be true, and the rpc will wait until deadline exceeds.
6031	mc.WaitForReady = newBool(true)
6032	m = make(map[string]grpc.MethodConfig)
6033	m["/grpc.testing.TestService/EmptyCall"] = mc
6034	m["/grpc.testing.TestService/FullDuplexCall"] = mc
6035	sc = grpc.ServiceConfig{
6036		Methods: m,
6037	}
6038	ch <- sc
6039
6040	// Wait for the new service config to take effect.
6041	mc = cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall")
6042	for {
6043		if !*mc.WaitForReady {
6044			time.Sleep(100 * time.Millisecond)
6045			mc = cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall")
6046			continue
6047		}
6048		break
6049	}
6050	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
6051	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
6052		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
6053	}
6054	if _, err := tc.FullDuplexCall(ctx); status.Code(err) != codes.DeadlineExceeded {
6055		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
6056	}
6057}
6058
6059func (s) TestServiceConfigTimeoutTD(t *testing.T) {
6060	for _, e := range listTestEnv() {
6061		testServiceConfigTimeoutTD(t, e)
6062	}
6063}
6064
6065func testServiceConfigTimeoutTD(t *testing.T, e env) {
6066	te, ch := testServiceConfigSetupTD(t, e)
6067	defer te.tearDown()
6068
6069	// Case1: Client API sets timeout to be 1ns and ServiceConfig sets timeout to be 1hr. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds.
6070	mc := grpc.MethodConfig{
6071		Timeout: newDuration(time.Hour),
6072	}
6073	m := make(map[string]grpc.MethodConfig)
6074	m["/grpc.testing.TestService/EmptyCall"] = mc
6075	m["/grpc.testing.TestService/FullDuplexCall"] = mc
6076	sc := grpc.ServiceConfig{
6077		Methods: m,
6078	}
6079	ch <- sc
6080
6081	cc := te.clientConn()
6082	tc := testpb.NewTestServiceClient(cc)
6083	// The following RPCs are expected to become non-fail-fast ones with 1ns deadline.
6084	ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond)
6085	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
6086		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
6087	}
6088	cancel()
6089	ctx, cancel = context.WithTimeout(context.Background(), time.Nanosecond)
6090	if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
6091		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
6092	}
6093	cancel()
6094
6095	// Generate a service config update.
6096	// Case2: Client API sets timeout to be 1hr and ServiceConfig sets timeout to be 1ns. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds.
6097	mc.Timeout = newDuration(time.Nanosecond)
6098	m = make(map[string]grpc.MethodConfig)
6099	m["/grpc.testing.TestService/EmptyCall"] = mc
6100	m["/grpc.testing.TestService/FullDuplexCall"] = mc
6101	sc = grpc.ServiceConfig{
6102		Methods: m,
6103	}
6104	ch <- sc
6105
6106	// Wait for the new service config to take effect.
6107	mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall")
6108	for {
6109		if *mc.Timeout != time.Nanosecond {
6110			time.Sleep(100 * time.Millisecond)
6111			mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall")
6112			continue
6113		}
6114		break
6115	}
6116
6117	ctx, cancel = context.WithTimeout(context.Background(), time.Hour)
6118	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
6119		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
6120	}
6121	cancel()
6122
6123	ctx, cancel = context.WithTimeout(context.Background(), time.Hour)
6124	if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
6125		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
6126	}
6127	cancel()
6128}
6129
6130func (s) TestServiceConfigMaxMsgSizeTD(t *testing.T) {
6131	for _, e := range listTestEnv() {
6132		testServiceConfigMaxMsgSizeTD(t, e)
6133	}
6134}
6135
6136func testServiceConfigMaxMsgSizeTD(t *testing.T, e env) {
6137	// Setting up values and objects shared across all test cases.
6138	const smallSize = 1
6139	const largeSize = 1024
6140	const extraLargeSize = 2048
6141
6142	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
6143	if err != nil {
6144		t.Fatal(err)
6145	}
6146	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
6147	if err != nil {
6148		t.Fatal(err)
6149	}
6150	extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize)
6151	if err != nil {
6152		t.Fatal(err)
6153	}
6154
6155	mc := grpc.MethodConfig{
6156		MaxReqSize:  newInt(extraLargeSize),
6157		MaxRespSize: newInt(extraLargeSize),
6158	}
6159
6160	m := make(map[string]grpc.MethodConfig)
6161	m["/grpc.testing.TestService/UnaryCall"] = mc
6162	m["/grpc.testing.TestService/FullDuplexCall"] = mc
6163	sc := grpc.ServiceConfig{
6164		Methods: m,
6165	}
6166	// Case1: sc set maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
6167	te1, ch1 := testServiceConfigSetupTD(t, e)
6168	te1.startServer(&testServer{security: e.security})
6169	defer te1.tearDown()
6170
6171	ch1 <- sc
6172	tc := testpb.NewTestServiceClient(te1.clientConn())
6173
6174	req := &testpb.SimpleRequest{
6175		ResponseType: testpb.PayloadType_COMPRESSABLE,
6176		ResponseSize: int32(extraLargeSize),
6177		Payload:      smallPayload,
6178	}
6179
6180	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
6181	defer cancel()
6182	// Test for unary RPC recv.
6183	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
6184		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
6185	}
6186
6187	// Test for unary RPC send.
6188	req.Payload = extraLargePayload
6189	req.ResponseSize = int32(smallSize)
6190	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
6191		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
6192	}
6193
6194	// Test for streaming RPC recv.
6195	respParam := []*testpb.ResponseParameters{
6196		{
6197			Size: int32(extraLargeSize),
6198		},
6199	}
6200	sreq := &testpb.StreamingOutputCallRequest{
6201		ResponseType:       testpb.PayloadType_COMPRESSABLE,
6202		ResponseParameters: respParam,
6203		Payload:            smallPayload,
6204	}
6205	stream, err := tc.FullDuplexCall(te1.ctx)
6206	if err != nil {
6207		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
6208	}
6209	if err := stream.Send(sreq); err != nil {
6210		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
6211	}
6212	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
6213		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
6214	}
6215
6216	// Test for streaming RPC send.
6217	respParam[0].Size = int32(smallSize)
6218	sreq.Payload = extraLargePayload
6219	stream, err = tc.FullDuplexCall(te1.ctx)
6220	if err != nil {
6221		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
6222	}
6223	if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
6224		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
6225	}
6226
6227	// Case2: Client API set maxReqSize to 1024 (send), maxRespSize to 1024 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
6228	te2, ch2 := testServiceConfigSetupTD(t, e)
6229	te2.maxClientReceiveMsgSize = newInt(1024)
6230	te2.maxClientSendMsgSize = newInt(1024)
6231	te2.startServer(&testServer{security: e.security})
6232	defer te2.tearDown()
6233	ch2 <- sc
6234	tc = testpb.NewTestServiceClient(te2.clientConn())
6235
6236	// Test for unary RPC recv.
6237	req.Payload = smallPayload
6238	req.ResponseSize = int32(largeSize)
6239
6240	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
6241		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
6242	}
6243
6244	// Test for unary RPC send.
6245	req.Payload = largePayload
6246	req.ResponseSize = int32(smallSize)
6247	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
6248		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
6249	}
6250
6251	// Test for streaming RPC recv.
6252	stream, err = tc.FullDuplexCall(te2.ctx)
6253	respParam[0].Size = int32(largeSize)
6254	sreq.Payload = smallPayload
6255	if err != nil {
6256		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
6257	}
6258	if err := stream.Send(sreq); err != nil {
6259		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
6260	}
6261	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
6262		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
6263	}
6264
6265	// Test for streaming RPC send.
6266	respParam[0].Size = int32(smallSize)
6267	sreq.Payload = largePayload
6268	stream, err = tc.FullDuplexCall(te2.ctx)
6269	if err != nil {
6270		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
6271	}
6272	if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
6273		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
6274	}
6275
6276	// Case3: Client API set maxReqSize to 4096 (send), maxRespSize to 4096 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
6277	te3, ch3 := testServiceConfigSetupTD(t, e)
6278	te3.maxClientReceiveMsgSize = newInt(4096)
6279	te3.maxClientSendMsgSize = newInt(4096)
6280	te3.startServer(&testServer{security: e.security})
6281	defer te3.tearDown()
6282	ch3 <- sc
6283	tc = testpb.NewTestServiceClient(te3.clientConn())
6284
6285	// Test for unary RPC recv.
6286	req.Payload = smallPayload
6287	req.ResponseSize = int32(largeSize)
6288
6289	if _, err := tc.UnaryCall(ctx, req); err != nil {
6290		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want <nil>", err)
6291	}
6292
6293	req.ResponseSize = int32(extraLargeSize)
6294	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
6295		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
6296	}
6297
6298	// Test for unary RPC send.
6299	req.Payload = largePayload
6300	req.ResponseSize = int32(smallSize)
6301	if _, err := tc.UnaryCall(ctx, req); err != nil {
6302		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want <nil>", err)
6303	}
6304
6305	req.Payload = extraLargePayload
6306	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
6307		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
6308	}
6309
6310	// Test for streaming RPC recv.
6311	stream, err = tc.FullDuplexCall(te3.ctx)
6312	if err != nil {
6313		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
6314	}
6315	respParam[0].Size = int32(largeSize)
6316	sreq.Payload = smallPayload
6317
6318	if err := stream.Send(sreq); err != nil {
6319		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
6320	}
6321	if _, err := stream.Recv(); err != nil {
6322		t.Fatalf("%v.Recv() = _, %v, want <nil>", stream, err)
6323	}
6324
6325	respParam[0].Size = int32(extraLargeSize)
6326
6327	if err := stream.Send(sreq); err != nil {
6328		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
6329	}
6330	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
6331		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
6332	}
6333
6334	// Test for streaming RPC send.
6335	respParam[0].Size = int32(smallSize)
6336	sreq.Payload = largePayload
6337	stream, err = tc.FullDuplexCall(te3.ctx)
6338	if err != nil {
6339		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
6340	}
6341	if err := stream.Send(sreq); err != nil {
6342		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
6343	}
6344	sreq.Payload = extraLargePayload
6345	if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
6346		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
6347	}
6348}
6349
6350// TestMalformedStreamMethod starts a test server and sends an RPC with a
6351// malformed method name. The server should respond with an UNIMPLEMENTED status
6352// code in this case.
6353func (s) TestMalformedStreamMethod(t *testing.T) {
6354	const testMethod = "a-method-name-without-any-slashes"
6355	te := newTest(t, tcpClearRREnv)
6356	te.startServer(nil)
6357	defer te.tearDown()
6358
6359	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
6360	defer cancel()
6361	err := te.clientConn().Invoke(ctx, testMethod, nil, nil)
6362	if gotCode := status.Code(err); gotCode != codes.Unimplemented {
6363		t.Fatalf("Invoke with method %q, got code %s, want %s", testMethod, gotCode, codes.Unimplemented)
6364	}
6365}
6366
6367func (s) TestMethodFromServerStream(t *testing.T) {
6368	const testMethod = "/package.service/method"
6369	e := tcpClearRREnv
6370	te := newTest(t, e)
6371	var method string
6372	var ok bool
6373	te.unknownHandler = func(srv interface{}, stream grpc.ServerStream) error {
6374		method, ok = grpc.MethodFromServerStream(stream)
6375		return nil
6376	}
6377
6378	te.startServer(nil)
6379	defer te.tearDown()
6380	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
6381	defer cancel()
6382	_ = te.clientConn().Invoke(ctx, testMethod, nil, nil)
6383	if !ok || method != testMethod {
6384		t.Fatalf("Invoke with method %q, got %q, %v, want %q, true", testMethod, method, ok, testMethod)
6385	}
6386}
6387
6388func (s) TestInterceptorCanAccessCallOptions(t *testing.T) {
6389	e := tcpClearRREnv
6390	te := newTest(t, e)
6391	te.startServer(&testServer{security: e.security})
6392	defer te.tearDown()
6393
6394	type observedOptions struct {
6395		headers     []*metadata.MD
6396		trailers    []*metadata.MD
6397		peer        []*peer.Peer
6398		creds       []credentials.PerRPCCredentials
6399		failFast    []bool
6400		maxRecvSize []int
6401		maxSendSize []int
6402		compressor  []string
6403		subtype     []string
6404	}
6405	var observedOpts observedOptions
6406	populateOpts := func(opts []grpc.CallOption) {
6407		for _, o := range opts {
6408			switch o := o.(type) {
6409			case grpc.HeaderCallOption:
6410				observedOpts.headers = append(observedOpts.headers, o.HeaderAddr)
6411			case grpc.TrailerCallOption:
6412				observedOpts.trailers = append(observedOpts.trailers, o.TrailerAddr)
6413			case grpc.PeerCallOption:
6414				observedOpts.peer = append(observedOpts.peer, o.PeerAddr)
6415			case grpc.PerRPCCredsCallOption:
6416				observedOpts.creds = append(observedOpts.creds, o.Creds)
6417			case grpc.FailFastCallOption:
6418				observedOpts.failFast = append(observedOpts.failFast, o.FailFast)
6419			case grpc.MaxRecvMsgSizeCallOption:
6420				observedOpts.maxRecvSize = append(observedOpts.maxRecvSize, o.MaxRecvMsgSize)
6421			case grpc.MaxSendMsgSizeCallOption:
6422				observedOpts.maxSendSize = append(observedOpts.maxSendSize, o.MaxSendMsgSize)
6423			case grpc.CompressorCallOption:
6424				observedOpts.compressor = append(observedOpts.compressor, o.CompressorType)
6425			case grpc.ContentSubtypeCallOption:
6426				observedOpts.subtype = append(observedOpts.subtype, o.ContentSubtype)
6427			}
6428		}
6429	}
6430
6431	te.unaryClientInt = func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
6432		populateOpts(opts)
6433		return nil
6434	}
6435	te.streamClientInt = func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
6436		populateOpts(opts)
6437		return nil, nil
6438	}
6439
6440	defaults := []grpc.CallOption{
6441		grpc.WaitForReady(true),
6442		grpc.MaxCallRecvMsgSize(1010),
6443	}
6444	tc := testpb.NewTestServiceClient(te.clientConn(grpc.WithDefaultCallOptions(defaults...)))
6445
6446	var headers metadata.MD
6447	var trailers metadata.MD
6448	var pr peer.Peer
6449	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
6450	defer cancel()
6451	tc.UnaryCall(ctx, &testpb.SimpleRequest{},
6452		grpc.MaxCallRecvMsgSize(100),
6453		grpc.MaxCallSendMsgSize(200),
6454		grpc.PerRPCCredentials(testPerRPCCredentials{}),
6455		grpc.Header(&headers),
6456		grpc.Trailer(&trailers),
6457		grpc.Peer(&pr))
6458	expected := observedOptions{
6459		failFast:    []bool{false},
6460		maxRecvSize: []int{1010, 100},
6461		maxSendSize: []int{200},
6462		creds:       []credentials.PerRPCCredentials{testPerRPCCredentials{}},
6463		headers:     []*metadata.MD{&headers},
6464		trailers:    []*metadata.MD{&trailers},
6465		peer:        []*peer.Peer{&pr},
6466	}
6467
6468	if !reflect.DeepEqual(expected, observedOpts) {
6469		t.Errorf("unary call did not observe expected options: expected %#v, got %#v", expected, observedOpts)
6470	}
6471
6472	observedOpts = observedOptions{} // reset
6473
6474	tc.StreamingInputCall(ctx,
6475		grpc.WaitForReady(false),
6476		grpc.MaxCallSendMsgSize(2020),
6477		grpc.UseCompressor("comp-type"),
6478		grpc.CallContentSubtype("json"))
6479	expected = observedOptions{
6480		failFast:    []bool{false, true},
6481		maxRecvSize: []int{1010},
6482		maxSendSize: []int{2020},
6483		compressor:  []string{"comp-type"},
6484		subtype:     []string{"json"},
6485	}
6486
6487	if !reflect.DeepEqual(expected, observedOpts) {
6488		t.Errorf("streaming call did not observe expected options: expected %#v, got %#v", expected, observedOpts)
6489	}
6490}
6491
6492func (s) TestCompressorRegister(t *testing.T) {
6493	for _, e := range listTestEnv() {
6494		testCompressorRegister(t, e)
6495	}
6496}
6497
6498func testCompressorRegister(t *testing.T, e env) {
6499	te := newTest(t, e)
6500	te.clientCompression = false
6501	te.serverCompression = false
6502	te.clientUseCompression = true
6503
6504	te.startServer(&testServer{security: e.security})
6505	defer te.tearDown()
6506	tc := testpb.NewTestServiceClient(te.clientConn())
6507
6508	// Unary call
6509	const argSize = 271828
6510	const respSize = 314159
6511	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
6512	if err != nil {
6513		t.Fatal(err)
6514	}
6515	req := &testpb.SimpleRequest{
6516		ResponseType: testpb.PayloadType_COMPRESSABLE,
6517		ResponseSize: respSize,
6518		Payload:      payload,
6519	}
6520	ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something"))
6521	if _, err := tc.UnaryCall(ctx, req); err != nil {
6522		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err)
6523	}
6524	// Streaming RPC
6525	ctx, cancel := context.WithCancel(context.Background())
6526	defer cancel()
6527	stream, err := tc.FullDuplexCall(ctx)
6528	if err != nil {
6529		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
6530	}
6531	respParam := []*testpb.ResponseParameters{
6532		{
6533			Size: 31415,
6534		},
6535	}
6536	payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415))
6537	if err != nil {
6538		t.Fatal(err)
6539	}
6540	sreq := &testpb.StreamingOutputCallRequest{
6541		ResponseType:       testpb.PayloadType_COMPRESSABLE,
6542		ResponseParameters: respParam,
6543		Payload:            payload,
6544	}
6545	if err := stream.Send(sreq); err != nil {
6546		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
6547	}
6548	if _, err := stream.Recv(); err != nil {
6549		t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
6550	}
6551}
6552
6553func (s) TestServeExitsWhenListenerClosed(t *testing.T) {
6554	ss := &stubserver.StubServer{
6555		EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) {
6556			return &testpb.Empty{}, nil
6557		},
6558	}
6559
6560	s := grpc.NewServer()
6561	defer s.Stop()
6562	testpb.RegisterTestServiceServer(s, ss)
6563
6564	lis, err := net.Listen("tcp", "localhost:0")
6565	if err != nil {
6566		t.Fatalf("Failed to create listener: %v", err)
6567	}
6568
6569	done := make(chan struct{})
6570	go func() {
6571		s.Serve(lis)
6572		close(done)
6573	}()
6574
6575	cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure(), grpc.WithBlock())
6576	if err != nil {
6577		t.Fatalf("Failed to dial server: %v", err)
6578	}
6579	defer cc.Close()
6580	c := testpb.NewTestServiceClient(cc)
6581	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
6582	defer cancel()
6583	if _, err := c.EmptyCall(ctx, &testpb.Empty{}); err != nil {
6584		t.Fatalf("Failed to send test RPC to server: %v", err)
6585	}
6586
6587	if err := lis.Close(); err != nil {
6588		t.Fatalf("Failed to close listener: %v", err)
6589	}
6590	const timeout = 5 * time.Second
6591	timer := time.NewTimer(timeout)
6592	select {
6593	case <-done:
6594		return
6595	case <-timer.C:
6596		t.Fatalf("Serve did not return after %v", timeout)
6597	}
6598}
6599
6600// Service handler returns status with invalid utf8 message.
6601func (s) TestStatusInvalidUTF8Message(t *testing.T) {
6602	var (
6603		origMsg = string([]byte{0xff, 0xfe, 0xfd})
6604		wantMsg = "���"
6605	)
6606
6607	ss := &stubserver.StubServer{
6608		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
6609			return nil, status.Errorf(codes.Internal, origMsg)
6610		},
6611	}
6612	if err := ss.Start(nil); err != nil {
6613		t.Fatalf("Error starting endpoint server: %v", err)
6614	}
6615	defer ss.Stop()
6616
6617	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
6618	defer cancel()
6619
6620	if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); status.Convert(err).Message() != wantMsg {
6621		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v (msg %q); want _, err with msg %q", err, status.Convert(err).Message(), wantMsg)
6622	}
6623}
6624
6625// Service handler returns status with details and invalid utf8 message. Proto
6626// will fail to marshal the status because of the invalid utf8 message. Details
6627// will be dropped when sending.
6628func (s) TestStatusInvalidUTF8Details(t *testing.T) {
6629	grpctest.TLogger.ExpectError("transport: failed to marshal rpc status")
6630
6631	var (
6632		origMsg = string([]byte{0xff, 0xfe, 0xfd})
6633		wantMsg = "���"
6634	)
6635
6636	ss := &stubserver.StubServer{
6637		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
6638			st := status.New(codes.Internal, origMsg)
6639			st, err := st.WithDetails(&testpb.Empty{})
6640			if err != nil {
6641				return nil, err
6642			}
6643			return nil, st.Err()
6644		},
6645	}
6646	if err := ss.Start(nil); err != nil {
6647		t.Fatalf("Error starting endpoint server: %v", err)
6648	}
6649	defer ss.Stop()
6650
6651	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
6652	defer cancel()
6653
6654	_, err := ss.Client.EmptyCall(ctx, &testpb.Empty{})
6655	st := status.Convert(err)
6656	if st.Message() != wantMsg {
6657		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v (msg %q); want _, err with msg %q", err, st.Message(), wantMsg)
6658	}
6659	if len(st.Details()) != 0 {
6660		// Details should be dropped on the server side.
6661		t.Fatalf("RPC status contain details: %v, want no details", st.Details())
6662	}
6663}
6664
6665func (s) TestClientDoesntDeadlockWhileWritingErrornousLargeMessages(t *testing.T) {
6666	for _, e := range listTestEnv() {
6667		if e.httpHandler {
6668			continue
6669		}
6670		testClientDoesntDeadlockWhileWritingErrornousLargeMessages(t, e)
6671	}
6672}
6673
6674func testClientDoesntDeadlockWhileWritingErrornousLargeMessages(t *testing.T, e env) {
6675	te := newTest(t, e)
6676	te.userAgent = testAppUA
6677	smallSize := 1024
6678	te.maxServerReceiveMsgSize = &smallSize
6679	te.startServer(&testServer{security: e.security})
6680	defer te.tearDown()
6681	tc := testpb.NewTestServiceClient(te.clientConn())
6682	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 1048576)
6683	if err != nil {
6684		t.Fatal(err)
6685	}
6686	req := &testpb.SimpleRequest{
6687		ResponseType: testpb.PayloadType_COMPRESSABLE,
6688		Payload:      payload,
6689	}
6690	var wg sync.WaitGroup
6691	for i := 0; i < 10; i++ {
6692		wg.Add(1)
6693		go func() {
6694			defer wg.Done()
6695			for j := 0; j < 100; j++ {
6696				ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10))
6697				defer cancel()
6698				if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.ResourceExhausted {
6699					t.Errorf("TestService/UnaryCall(_,_) = _. %v, want code: %s", err, codes.ResourceExhausted)
6700					return
6701				}
6702			}
6703		}()
6704	}
6705	wg.Wait()
6706}
6707
6708func (s) TestRPCTimeout(t *testing.T) {
6709	for _, e := range listTestEnv() {
6710		testRPCTimeout(t, e)
6711	}
6712}
6713
6714func testRPCTimeout(t *testing.T, e env) {
6715	te := newTest(t, e)
6716	te.startServer(&testServer{security: e.security, unaryCallSleepTime: 500 * time.Millisecond})
6717	defer te.tearDown()
6718
6719	cc := te.clientConn()
6720	tc := testpb.NewTestServiceClient(cc)
6721
6722	const argSize = 2718
6723	const respSize = 314
6724
6725	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
6726	if err != nil {
6727		t.Fatal(err)
6728	}
6729
6730	req := &testpb.SimpleRequest{
6731		ResponseType: testpb.PayloadType_COMPRESSABLE,
6732		ResponseSize: respSize,
6733		Payload:      payload,
6734	}
6735	for i := -1; i <= 10; i++ {
6736		ctx, cancel := context.WithTimeout(context.Background(), time.Duration(i)*time.Millisecond)
6737		if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.DeadlineExceeded {
6738			t.Fatalf("TestService/UnaryCallv(_, _) = _, %v; want <nil>, error code: %s", err, codes.DeadlineExceeded)
6739		}
6740		cancel()
6741	}
6742}
6743
6744func (s) TestDisabledIOBuffers(t *testing.T) {
6745	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(60000))
6746	if err != nil {
6747		t.Fatalf("Failed to create payload: %v", err)
6748	}
6749	req := &testpb.StreamingOutputCallRequest{
6750		Payload: payload,
6751	}
6752	resp := &testpb.StreamingOutputCallResponse{
6753		Payload: payload,
6754	}
6755
6756	ss := &stubserver.StubServer{
6757		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
6758			for {
6759				in, err := stream.Recv()
6760				if err == io.EOF {
6761					return nil
6762				}
6763				if err != nil {
6764					t.Errorf("stream.Recv() = _, %v, want _, <nil>", err)
6765					return err
6766				}
6767				if !reflect.DeepEqual(in.Payload.Body, payload.Body) {
6768					t.Errorf("Received message(len: %v) on server not what was expected(len: %v).", len(in.Payload.Body), len(payload.Body))
6769					return err
6770				}
6771				if err := stream.Send(resp); err != nil {
6772					t.Errorf("stream.Send(_)= %v, want <nil>", err)
6773					return err
6774				}
6775
6776			}
6777		},
6778	}
6779
6780	s := grpc.NewServer(grpc.WriteBufferSize(0), grpc.ReadBufferSize(0))
6781	testpb.RegisterTestServiceServer(s, ss)
6782
6783	lis, err := net.Listen("tcp", "localhost:0")
6784	if err != nil {
6785		t.Fatalf("Failed to create listener: %v", err)
6786	}
6787
6788	go func() {
6789		s.Serve(lis)
6790	}()
6791	defer s.Stop()
6792	dctx, dcancel := context.WithTimeout(context.Background(), 5*time.Second)
6793	defer dcancel()
6794	cc, err := grpc.DialContext(dctx, lis.Addr().String(), grpc.WithInsecure(), grpc.WithBlock(), grpc.WithWriteBufferSize(0), grpc.WithReadBufferSize(0))
6795	if err != nil {
6796		t.Fatalf("Failed to dial server")
6797	}
6798	defer cc.Close()
6799	c := testpb.NewTestServiceClient(cc)
6800	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
6801	defer cancel()
6802	stream, err := c.FullDuplexCall(ctx, grpc.WaitForReady(true))
6803	if err != nil {
6804		t.Fatalf("Failed to send test RPC to server")
6805	}
6806	for i := 0; i < 10; i++ {
6807		if err := stream.Send(req); err != nil {
6808			t.Fatalf("stream.Send(_) = %v, want <nil>", err)
6809		}
6810		in, err := stream.Recv()
6811		if err != nil {
6812			t.Fatalf("stream.Recv() = _, %v, want _, <nil>", err)
6813		}
6814		if !reflect.DeepEqual(in.Payload.Body, payload.Body) {
6815			t.Fatalf("Received message(len: %v) on client not what was expected(len: %v).", len(in.Payload.Body), len(payload.Body))
6816		}
6817	}
6818	stream.CloseSend()
6819	if _, err := stream.Recv(); err != io.EOF {
6820		t.Fatalf("stream.Recv() = _, %v, want _, io.EOF", err)
6821	}
6822}
6823
6824func (s) TestServerMaxHeaderListSizeClientUserViolation(t *testing.T) {
6825	for _, e := range listTestEnv() {
6826		if e.httpHandler {
6827			continue
6828		}
6829		testServerMaxHeaderListSizeClientUserViolation(t, e)
6830	}
6831}
6832
6833func testServerMaxHeaderListSizeClientUserViolation(t *testing.T, e env) {
6834	te := newTest(t, e)
6835	te.maxServerHeaderListSize = new(uint32)
6836	*te.maxServerHeaderListSize = 216
6837	te.startServer(&testServer{security: e.security})
6838	defer te.tearDown()
6839
6840	cc := te.clientConn()
6841	tc := testpb.NewTestServiceClient(cc)
6842	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
6843	defer cancel()
6844	metadata.AppendToOutgoingContext(ctx, "oversize", string(make([]byte, 216)))
6845	var err error
6846	if err = verifyResultWithDelay(func() (bool, error) {
6847		if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) == codes.Internal {
6848			return true, nil
6849		}
6850		return false, fmt.Errorf("tc.EmptyCall() = _, err: %v, want _, error code: %v", err, codes.Internal)
6851	}); err != nil {
6852		t.Fatal(err)
6853	}
6854}
6855
6856func (s) TestClientMaxHeaderListSizeServerUserViolation(t *testing.T) {
6857	for _, e := range listTestEnv() {
6858		if e.httpHandler {
6859			continue
6860		}
6861		testClientMaxHeaderListSizeServerUserViolation(t, e)
6862	}
6863}
6864
6865func testClientMaxHeaderListSizeServerUserViolation(t *testing.T, e env) {
6866	te := newTest(t, e)
6867	te.maxClientHeaderListSize = new(uint32)
6868	*te.maxClientHeaderListSize = 1 // any header server sends will violate
6869	te.startServer(&testServer{security: e.security})
6870	defer te.tearDown()
6871
6872	cc := te.clientConn()
6873	tc := testpb.NewTestServiceClient(cc)
6874	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
6875	defer cancel()
6876	var err error
6877	if err = verifyResultWithDelay(func() (bool, error) {
6878		if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) == codes.Internal {
6879			return true, nil
6880		}
6881		return false, fmt.Errorf("tc.EmptyCall() = _, err: %v, want _, error code: %v", err, codes.Internal)
6882	}); err != nil {
6883		t.Fatal(err)
6884	}
6885}
6886
6887func (s) TestServerMaxHeaderListSizeClientIntentionalViolation(t *testing.T) {
6888	for _, e := range listTestEnv() {
6889		if e.httpHandler || e.security == "tls" {
6890			continue
6891		}
6892		testServerMaxHeaderListSizeClientIntentionalViolation(t, e)
6893	}
6894}
6895
6896func testServerMaxHeaderListSizeClientIntentionalViolation(t *testing.T, e env) {
6897	te := newTest(t, e)
6898	te.maxServerHeaderListSize = new(uint32)
6899	*te.maxServerHeaderListSize = 512
6900	te.startServer(&testServer{security: e.security})
6901	defer te.tearDown()
6902
6903	cc, dw := te.clientConnWithConnControl()
6904	tc := &testServiceClientWrapper{TestServiceClient: testpb.NewTestServiceClient(cc)}
6905	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
6906	defer cancel()
6907	stream, err := tc.FullDuplexCall(ctx)
6908	if err != nil {
6909		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, <nil>", tc, err)
6910	}
6911	rcw := dw.getRawConnWrapper()
6912	val := make([]string, 512)
6913	for i := range val {
6914		val[i] = "a"
6915	}
6916	// allow for client to send the initial header
6917	time.Sleep(100 * time.Millisecond)
6918	rcw.writeHeaders(http2.HeadersFrameParam{
6919		StreamID:      tc.getCurrentStreamID(),
6920		BlockFragment: rcw.encodeHeader("oversize", strings.Join(val, "")),
6921		EndStream:     false,
6922		EndHeaders:    true,
6923	})
6924	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.Internal {
6925		t.Fatalf("stream.Recv() = _, %v, want _, error code: %v", err, codes.Internal)
6926	}
6927}
6928
6929func (s) TestClientMaxHeaderListSizeServerIntentionalViolation(t *testing.T) {
6930	for _, e := range listTestEnv() {
6931		if e.httpHandler || e.security == "tls" {
6932			continue
6933		}
6934		testClientMaxHeaderListSizeServerIntentionalViolation(t, e)
6935	}
6936}
6937
6938func testClientMaxHeaderListSizeServerIntentionalViolation(t *testing.T, e env) {
6939	te := newTest(t, e)
6940	te.maxClientHeaderListSize = new(uint32)
6941	*te.maxClientHeaderListSize = 200
6942	lw := te.startServerWithConnControl(&testServer{security: e.security, setHeaderOnly: true})
6943	defer te.tearDown()
6944	cc, _ := te.clientConnWithConnControl()
6945	tc := &testServiceClientWrapper{TestServiceClient: testpb.NewTestServiceClient(cc)}
6946	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
6947	defer cancel()
6948	stream, err := tc.FullDuplexCall(ctx)
6949	if err != nil {
6950		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, <nil>", tc, err)
6951	}
6952	var i int
6953	var rcw *rawConnWrapper
6954	for i = 0; i < 100; i++ {
6955		rcw = lw.getLastConn()
6956		if rcw != nil {
6957			break
6958		}
6959		time.Sleep(10 * time.Millisecond)
6960		continue
6961	}
6962	if i == 100 {
6963		t.Fatalf("failed to create server transport after 1s")
6964	}
6965
6966	val := make([]string, 200)
6967	for i := range val {
6968		val[i] = "a"
6969	}
6970	// allow for client to send the initial header.
6971	time.Sleep(100 * time.Millisecond)
6972	rcw.writeHeaders(http2.HeadersFrameParam{
6973		StreamID:      tc.getCurrentStreamID(),
6974		BlockFragment: rcw.encodeRawHeader("oversize", strings.Join(val, "")),
6975		EndStream:     false,
6976		EndHeaders:    true,
6977	})
6978	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.Internal {
6979		t.Fatalf("stream.Recv() = _, %v, want _, error code: %v", err, codes.Internal)
6980	}
6981}
6982
6983func (s) TestNetPipeConn(t *testing.T) {
6984	// This test will block indefinitely if grpc writes both client and server
6985	// prefaces without either reading from the Conn.
6986	pl := testutils.NewPipeListener()
6987	s := grpc.NewServer()
6988	defer s.Stop()
6989	ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
6990		return &testpb.SimpleResponse{}, nil
6991	}}
6992	testpb.RegisterTestServiceServer(s, ts)
6993	go s.Serve(pl)
6994	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
6995	defer cancel()
6996	cc, err := grpc.DialContext(ctx, "", grpc.WithInsecure(), grpc.WithDialer(pl.Dialer()))
6997	if err != nil {
6998		t.Fatalf("Error creating client: %v", err)
6999	}
7000	defer cc.Close()
7001	client := testpb.NewTestServiceClient(cc)
7002	if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
7003		t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err)
7004	}
7005}
7006
7007func (s) TestLargeTimeout(t *testing.T) {
7008	for _, e := range listTestEnv() {
7009		testLargeTimeout(t, e)
7010	}
7011}
7012
7013func testLargeTimeout(t *testing.T, e env) {
7014	te := newTest(t, e)
7015	te.declareLogNoise("Server.processUnaryRPC failed to write status")
7016
7017	ts := &funcServer{}
7018	te.startServer(ts)
7019	defer te.tearDown()
7020	tc := testpb.NewTestServiceClient(te.clientConn())
7021
7022	timeouts := []time.Duration{
7023		time.Duration(math.MaxInt64), // will be (correctly) converted to
7024		// 2562048 hours, which overflows upon converting back to an int64
7025		2562047 * time.Hour, // the largest timeout that does not overflow
7026	}
7027
7028	for i, maxTimeout := range timeouts {
7029		ts.unaryCall = func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
7030			deadline, ok := ctx.Deadline()
7031			timeout := time.Until(deadline)
7032			minTimeout := maxTimeout - 5*time.Second
7033			if !ok || timeout < minTimeout || timeout > maxTimeout {
7034				t.Errorf("ctx.Deadline() = (now+%v), %v; want [%v, %v], true", timeout, ok, minTimeout, maxTimeout)
7035				return nil, status.Error(codes.OutOfRange, "deadline error")
7036			}
7037			return &testpb.SimpleResponse{}, nil
7038		}
7039
7040		ctx, cancel := context.WithTimeout(context.Background(), maxTimeout)
7041		defer cancel()
7042
7043		if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
7044			t.Errorf("case %v: UnaryCall(_) = _, %v; want _, nil", i, err)
7045		}
7046	}
7047}
7048
7049// Proxies typically send GO_AWAY followed by connection closure a minute or so later. This
7050// test ensures that the connection is re-created after GO_AWAY and not affected by the
7051// subsequent (old) connection closure.
7052func (s) TestGoAwayThenClose(t *testing.T) {
7053	ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
7054	defer cancel()
7055
7056	lis1, err := net.Listen("tcp", "localhost:0")
7057	if err != nil {
7058		t.Fatalf("Error while listening. Err: %v", err)
7059	}
7060	s1 := grpc.NewServer()
7061	defer s1.Stop()
7062	ts := &funcServer{
7063		unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
7064			return &testpb.SimpleResponse{}, nil
7065		},
7066		fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error {
7067			if err := stream.Send(&testpb.StreamingOutputCallResponse{}); err != nil {
7068				t.Errorf("unexpected error from send: %v", err)
7069				return err
7070			}
7071			// Wait forever.
7072			_, err := stream.Recv()
7073			if err == nil {
7074				t.Error("expected to never receive any message")
7075			}
7076			return err
7077		},
7078	}
7079	testpb.RegisterTestServiceServer(s1, ts)
7080	go s1.Serve(lis1)
7081
7082	conn2Established := grpcsync.NewEvent()
7083	lis2, err := listenWithNotifyingListener("tcp", "localhost:0", conn2Established)
7084	if err != nil {
7085		t.Fatalf("Error while listening. Err: %v", err)
7086	}
7087	s2 := grpc.NewServer()
7088	defer s2.Stop()
7089	testpb.RegisterTestServiceServer(s2, ts)
7090	go s2.Serve(lis2)
7091
7092	r := manual.NewBuilderWithScheme("whatever")
7093	r.InitialState(resolver.State{Addresses: []resolver.Address{
7094		{Addr: lis1.Addr().String()},
7095	}})
7096	cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithInsecure())
7097	if err != nil {
7098		t.Fatalf("Error creating client: %v", err)
7099	}
7100	defer cc.Close()
7101
7102	client := testpb.NewTestServiceClient(cc)
7103
7104	// We make a streaming RPC and do an one-message-round-trip to make sure
7105	// it's created on connection 1.
7106	//
7107	// We use a long-lived RPC because it will cause GracefulStop to send
7108	// GO_AWAY, but the connection doesn't get closed until the server stops and
7109	// the client receives the error.
7110	stream, err := client.FullDuplexCall(ctx)
7111	if err != nil {
7112		t.Fatalf("FullDuplexCall(_) = _, %v; want _, nil", err)
7113	}
7114	if _, err = stream.Recv(); err != nil {
7115		t.Fatalf("unexpected error from first recv: %v", err)
7116	}
7117
7118	r.UpdateState(resolver.State{Addresses: []resolver.Address{
7119		{Addr: lis1.Addr().String()},
7120		{Addr: lis2.Addr().String()},
7121	}})
7122
7123	// Send GO_AWAY to connection 1.
7124	go s1.GracefulStop()
7125
7126	// Wait for connection 2 to be established.
7127	<-conn2Established.Done()
7128
7129	// Close connection 1.
7130	s1.Stop()
7131
7132	// Wait for client to close.
7133	if _, err = stream.Recv(); err == nil {
7134		t.Fatal("expected the stream to die, but got a successful Recv")
7135	}
7136
7137	// Do a bunch of RPCs, make sure it stays stable. These should go to connection 2.
7138	for i := 0; i < 10; i++ {
7139		if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
7140			t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err)
7141		}
7142	}
7143}
7144
7145func listenWithNotifyingListener(network, address string, event *grpcsync.Event) (net.Listener, error) {
7146	lis, err := net.Listen(network, address)
7147	if err != nil {
7148		return nil, err
7149	}
7150	return notifyingListener{connEstablished: event, Listener: lis}, nil
7151}
7152
7153type notifyingListener struct {
7154	connEstablished *grpcsync.Event
7155	net.Listener
7156}
7157
7158func (lis notifyingListener) Accept() (net.Conn, error) {
7159	defer lis.connEstablished.Fire()
7160	return lis.Listener.Accept()
7161}
7162
7163func (s) TestRPCWaitsForResolver(t *testing.T) {
7164	te := testServiceConfigSetup(t, tcpClearRREnv)
7165	te.startServer(&testServer{security: tcpClearRREnv.security})
7166	defer te.tearDown()
7167	r := manual.NewBuilderWithScheme("whatever")
7168
7169	te.resolverScheme = r.Scheme()
7170	te.nonBlockingDial = true
7171	cc := te.clientConn(grpc.WithResolvers(r))
7172	tc := testpb.NewTestServiceClient(cc)
7173
7174	ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
7175	defer cancel()
7176	// With no resolved addresses yet, this will timeout.
7177	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
7178		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
7179	}
7180
7181	ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
7182	defer cancel()
7183	go func() {
7184		time.Sleep(time.Second)
7185		r.UpdateState(resolver.State{
7186			Addresses: []resolver.Address{{Addr: te.srvAddr}},
7187			ServiceConfig: parseCfg(r, `{
7188		    "methodConfig": [
7189		        {
7190		            "name": [
7191		                {
7192		                    "service": "grpc.testing.TestService",
7193		                    "method": "UnaryCall"
7194		                }
7195		            ],
7196                    "maxRequestMessageBytes": 0
7197		        }
7198		    ]
7199		}`)})
7200	}()
7201	// We wait a second before providing a service config and resolving
7202	// addresses.  So this will wait for that and then honor the
7203	// maxRequestMessageBytes it contains.
7204	if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{ResponseType: testpb.PayloadType_UNCOMPRESSABLE}); status.Code(err) != codes.ResourceExhausted {
7205		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, nil", err)
7206	}
7207	if got := ctx.Err(); got != nil {
7208		t.Fatalf("ctx.Err() = %v; want nil (deadline should be set short by service config)", got)
7209	}
7210	if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
7211		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, nil", err)
7212	}
7213}
7214
7215func (s) TestHTTPHeaderFrameErrorHandlingHTTPMode(t *testing.T) {
7216	// Non-gRPC content-type fallback path.
7217	for httpCode := range transport.HTTPStatusConvTab {
7218		doHTTPHeaderTest(t, transport.HTTPStatusConvTab[int(httpCode)], []string{
7219			":status", fmt.Sprintf("%d", httpCode),
7220			"content-type", "text/html", // non-gRPC content type to switch to HTTP mode.
7221			"grpc-status", "1", // Make up a gRPC status error
7222			"grpc-status-details-bin", "???", // Make up a gRPC field parsing error
7223		})
7224	}
7225
7226	// Missing content-type fallback path.
7227	for httpCode := range transport.HTTPStatusConvTab {
7228		doHTTPHeaderTest(t, transport.HTTPStatusConvTab[int(httpCode)], []string{
7229			":status", fmt.Sprintf("%d", httpCode),
7230			// Omitting content type to switch to HTTP mode.
7231			"grpc-status", "1", // Make up a gRPC status error
7232			"grpc-status-details-bin", "???", // Make up a gRPC field parsing error
7233		})
7234	}
7235
7236	// Malformed HTTP status when fallback.
7237	doHTTPHeaderTest(t, codes.Internal, []string{
7238		":status", "abc",
7239		// Omitting content type to switch to HTTP mode.
7240		"grpc-status", "1", // Make up a gRPC status error
7241		"grpc-status-details-bin", "???", // Make up a gRPC field parsing error
7242	})
7243}
7244
7245// Testing erroneous ResponseHeader or Trailers-only (delivered in the first HEADERS frame).
7246func (s) TestHTTPHeaderFrameErrorHandlingInitialHeader(t *testing.T) {
7247	for _, test := range []struct {
7248		header  []string
7249		errCode codes.Code
7250	}{
7251		{
7252			// missing gRPC status.
7253			header: []string{
7254				":status", "403",
7255				"content-type", "application/grpc",
7256			},
7257			errCode: codes.PermissionDenied,
7258		},
7259		{
7260			// malformed grpc-status.
7261			header: []string{
7262				":status", "502",
7263				"content-type", "application/grpc",
7264				"grpc-status", "abc",
7265			},
7266			errCode: codes.Internal,
7267		},
7268		{
7269			// Malformed grpc-tags-bin field.
7270			header: []string{
7271				":status", "502",
7272				"content-type", "application/grpc",
7273				"grpc-status", "0",
7274				"grpc-tags-bin", "???",
7275			},
7276			errCode: codes.Unavailable,
7277		},
7278		{
7279			// gRPC status error.
7280			header: []string{
7281				":status", "502",
7282				"content-type", "application/grpc",
7283				"grpc-status", "3",
7284			},
7285			errCode: codes.Unavailable,
7286		},
7287	} {
7288		doHTTPHeaderTest(t, test.errCode, test.header)
7289	}
7290}
7291
7292// Testing non-Trailers-only Trailers (delivered in second HEADERS frame)
7293func (s) TestHTTPHeaderFrameErrorHandlingNormalTrailer(t *testing.T) {
7294	for _, test := range []struct {
7295		responseHeader []string
7296		trailer        []string
7297		errCode        codes.Code
7298	}{
7299		{
7300			responseHeader: []string{
7301				":status", "200",
7302				"content-type", "application/grpc",
7303			},
7304			trailer: []string{
7305				// trailer missing grpc-status
7306				":status", "502",
7307			},
7308			errCode: codes.Unavailable,
7309		},
7310		{
7311			responseHeader: []string{
7312				":status", "404",
7313				"content-type", "application/grpc",
7314			},
7315			trailer: []string{
7316				// malformed grpc-status-details-bin field
7317				"grpc-status", "0",
7318				"grpc-status-details-bin", "????",
7319			},
7320			errCode: codes.Unimplemented,
7321		},
7322		{
7323			responseHeader: []string{
7324				":status", "200",
7325				"content-type", "application/grpc",
7326			},
7327			trailer: []string{
7328				// malformed grpc-status-details-bin field
7329				"grpc-status", "0",
7330				"grpc-status-details-bin", "????",
7331			},
7332			errCode: codes.Internal,
7333		},
7334	} {
7335		doHTTPHeaderTest(t, test.errCode, test.responseHeader, test.trailer)
7336	}
7337}
7338
7339func (s) TestHTTPHeaderFrameErrorHandlingMoreThanTwoHeaders(t *testing.T) {
7340	header := []string{
7341		":status", "200",
7342		"content-type", "application/grpc",
7343	}
7344	doHTTPHeaderTest(t, codes.Internal, header, header, header)
7345}
7346
7347type httpServer struct {
7348	headerFields [][]string
7349	refuseStream func(uint32) bool
7350}
7351
7352func (s *httpServer) writeHeader(framer *http2.Framer, sid uint32, headerFields []string, endStream bool) error {
7353	if len(headerFields)%2 == 1 {
7354		panic("odd number of kv args")
7355	}
7356
7357	var buf bytes.Buffer
7358	henc := hpack.NewEncoder(&buf)
7359	for len(headerFields) > 0 {
7360		k, v := headerFields[0], headerFields[1]
7361		headerFields = headerFields[2:]
7362		henc.WriteField(hpack.HeaderField{Name: k, Value: v})
7363	}
7364
7365	return framer.WriteHeaders(http2.HeadersFrameParam{
7366		StreamID:      sid,
7367		BlockFragment: buf.Bytes(),
7368		EndStream:     endStream,
7369		EndHeaders:    true,
7370	})
7371}
7372
7373func (s *httpServer) start(t *testing.T, lis net.Listener) {
7374	// Launch an HTTP server to send back header.
7375	go func() {
7376		conn, err := lis.Accept()
7377		if err != nil {
7378			t.Errorf("Error accepting connection: %v", err)
7379			return
7380		}
7381		defer conn.Close()
7382		// Read preface sent by client.
7383		if _, err = io.ReadFull(conn, make([]byte, len(http2.ClientPreface))); err != nil {
7384			t.Errorf("Error at server-side while reading preface from client. Err: %v", err)
7385			return
7386		}
7387		reader := bufio.NewReader(conn)
7388		writer := bufio.NewWriter(conn)
7389		framer := http2.NewFramer(writer, reader)
7390		if err = framer.WriteSettingsAck(); err != nil {
7391			t.Errorf("Error at server-side while sending Settings ack. Err: %v", err)
7392			return
7393		}
7394		writer.Flush() // necessary since client is expecting preface before declaring connection fully setup.
7395
7396		var sid uint32
7397		// Loop until conn is closed and framer returns io.EOF
7398		for {
7399			// Read frames until a header is received.
7400			for {
7401				frame, err := framer.ReadFrame()
7402				if err != nil {
7403					if err != io.EOF {
7404						t.Errorf("Error at server-side while reading frame. Err: %v", err)
7405					}
7406					return
7407				}
7408				if hframe, ok := frame.(*http2.HeadersFrame); ok {
7409					sid = hframe.Header().StreamID
7410					if s.refuseStream == nil || !s.refuseStream(sid) {
7411						break
7412					}
7413					framer.WriteRSTStream(sid, http2.ErrCodeRefusedStream)
7414					writer.Flush()
7415				}
7416			}
7417			for i, headers := range s.headerFields {
7418				if err = s.writeHeader(framer, sid, headers, i == len(s.headerFields)-1); err != nil {
7419					t.Errorf("Error at server-side while writing headers. Err: %v", err)
7420					return
7421				}
7422				writer.Flush()
7423			}
7424		}
7425	}()
7426}
7427
7428func doHTTPHeaderTest(t *testing.T, errCode codes.Code, headerFields ...[]string) {
7429	t.Helper()
7430	lis, err := net.Listen("tcp", "localhost:0")
7431	if err != nil {
7432		t.Fatalf("Failed to listen. Err: %v", err)
7433	}
7434	defer lis.Close()
7435	server := &httpServer{
7436		headerFields: headerFields,
7437	}
7438	server.start(t, lis)
7439	cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
7440	if err != nil {
7441		t.Fatalf("failed to dial due to err: %v", err)
7442	}
7443	defer cc.Close()
7444	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
7445	defer cancel()
7446	client := testpb.NewTestServiceClient(cc)
7447	stream, err := client.FullDuplexCall(ctx)
7448	if err != nil {
7449		t.Fatalf("error creating stream due to err: %v", err)
7450	}
7451	if _, err := stream.Recv(); err == nil || status.Code(err) != errCode {
7452		t.Fatalf("stream.Recv() = _, %v, want error code: %v", err, errCode)
7453	}
7454}
7455
7456func parseCfg(r *manual.Resolver, s string) *serviceconfig.ParseResult {
7457	g := r.CC.ParseServiceConfig(s)
7458	if g.Err != nil {
7459		panic(fmt.Sprintf("Error parsing config %q: %v", s, g.Err))
7460	}
7461	return g
7462}
7463
7464func (s) TestClientCancellationPropagatesUnary(t *testing.T) {
7465	wg := &sync.WaitGroup{}
7466	called, done := make(chan struct{}), make(chan struct{})
7467	ss := &stubserver.StubServer{
7468		EmptyCallF: func(ctx context.Context, _ *testpb.Empty) (*testpb.Empty, error) {
7469			close(called)
7470			<-ctx.Done()
7471			err := ctx.Err()
7472			if err != context.Canceled {
7473				t.Errorf("ctx.Err() = %v; want context.Canceled", err)
7474			}
7475			close(done)
7476			return nil, err
7477		},
7478	}
7479	if err := ss.Start(nil); err != nil {
7480		t.Fatalf("Error starting endpoint server: %v", err)
7481	}
7482	defer ss.Stop()
7483
7484	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
7485
7486	wg.Add(1)
7487	go func() {
7488		if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Canceled {
7489			t.Errorf("ss.Client.EmptyCall() = _, %v; want _, Code()=codes.Canceled", err)
7490		}
7491		wg.Done()
7492	}()
7493
7494	select {
7495	case <-called:
7496	case <-time.After(5 * time.Second):
7497		t.Fatalf("failed to perform EmptyCall after 10s")
7498	}
7499	cancel()
7500	select {
7501	case <-done:
7502	case <-time.After(5 * time.Second):
7503		t.Fatalf("server failed to close done chan due to cancellation propagation")
7504	}
7505	wg.Wait()
7506}
7507
7508type badGzipCompressor struct{}
7509
7510func (badGzipCompressor) Do(w io.Writer, p []byte) error {
7511	buf := &bytes.Buffer{}
7512	gzw := gzip.NewWriter(buf)
7513	if _, err := gzw.Write(p); err != nil {
7514		return err
7515	}
7516	err := gzw.Close()
7517	bs := buf.Bytes()
7518	if len(bs) >= 6 {
7519		bs[len(bs)-6] ^= 1 // modify checksum at end by 1 byte
7520	}
7521	w.Write(bs)
7522	return err
7523}
7524
7525func (badGzipCompressor) Type() string {
7526	return "gzip"
7527}
7528
7529func (s) TestGzipBadChecksum(t *testing.T) {
7530	ss := &stubserver.StubServer{
7531		UnaryCallF: func(ctx context.Context, _ *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
7532			return &testpb.SimpleResponse{}, nil
7533		},
7534	}
7535	if err := ss.Start(nil, grpc.WithCompressor(badGzipCompressor{})); err != nil {
7536		t.Fatalf("Error starting endpoint server: %v", err)
7537	}
7538	defer ss.Stop()
7539
7540	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
7541	defer cancel()
7542
7543	p, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1024))
7544	if err != nil {
7545		t.Fatalf("Unexpected error from newPayload: %v", err)
7546	}
7547	if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: p}); err == nil ||
7548		status.Code(err) != codes.Internal ||
7549		!strings.Contains(status.Convert(err).Message(), gzip.ErrChecksum.Error()) {
7550		t.Errorf("ss.Client.UnaryCall(_) = _, %v\n\twant: _, status(codes.Internal, contains %q)", err, gzip.ErrChecksum)
7551	}
7552}
7553
7554// When an RPC is canceled, it's possible that the last Recv() returns before
7555// all call options' after are executed.
7556func (s) TestCanceledRPCCallOptionRace(t *testing.T) {
7557	ss := &stubserver.StubServer{
7558		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
7559			err := stream.Send(&testpb.StreamingOutputCallResponse{})
7560			if err != nil {
7561				return err
7562			}
7563			<-stream.Context().Done()
7564			return nil
7565		},
7566	}
7567	if err := ss.Start(nil); err != nil {
7568		t.Fatalf("Error starting endpoint server: %v", err)
7569	}
7570	defer ss.Stop()
7571
7572	const count = 1000
7573	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
7574	defer cancel()
7575
7576	var wg sync.WaitGroup
7577	wg.Add(count)
7578	for i := 0; i < count; i++ {
7579		go func() {
7580			defer wg.Done()
7581			var p peer.Peer
7582			ctx, cancel := context.WithCancel(ctx)
7583			defer cancel()
7584			stream, err := ss.Client.FullDuplexCall(ctx, grpc.Peer(&p))
7585			if err != nil {
7586				t.Errorf("_.FullDuplexCall(_) = _, %v", err)
7587				return
7588			}
7589			if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err != nil {
7590				t.Errorf("_ has error %v while sending", err)
7591				return
7592			}
7593			if _, err := stream.Recv(); err != nil {
7594				t.Errorf("%v.Recv() = %v", stream, err)
7595				return
7596			}
7597			cancel()
7598			if _, err := stream.Recv(); status.Code(err) != codes.Canceled {
7599				t.Errorf("%v compleled with error %v, want %s", stream, err, codes.Canceled)
7600				return
7601			}
7602			// If recv returns before call options are executed, peer.Addr is not set,
7603			// fail the test.
7604			if p.Addr == nil {
7605				t.Errorf("peer.Addr is nil, want non-nil")
7606				return
7607			}
7608		}()
7609	}
7610	wg.Wait()
7611}
7612
7613func (s) TestClientSettingsFloodCloseConn(t *testing.T) {
7614	// Tests that the server properly closes its transport if the client floods
7615	// settings frames and then closes the connection.
7616
7617	// Minimize buffer sizes to stimulate failure condition more quickly.
7618	s := grpc.NewServer(grpc.WriteBufferSize(20))
7619	l := bufconn.Listen(20)
7620	go s.Serve(l)
7621
7622	// Dial our server and handshake.
7623	conn, err := l.Dial()
7624	if err != nil {
7625		t.Fatalf("Error dialing bufconn: %v", err)
7626	}
7627
7628	n, err := conn.Write([]byte(http2.ClientPreface))
7629	if err != nil || n != len(http2.ClientPreface) {
7630		t.Fatalf("Error writing client preface: %v, %v", n, err)
7631	}
7632
7633	fr := http2.NewFramer(conn, conn)
7634	f, err := fr.ReadFrame()
7635	if err != nil {
7636		t.Fatalf("Error reading initial settings frame: %v", err)
7637	}
7638	if _, ok := f.(*http2.SettingsFrame); ok {
7639		if err := fr.WriteSettingsAck(); err != nil {
7640			t.Fatalf("Error writing settings ack: %v", err)
7641		}
7642	} else {
7643		t.Fatalf("Error reading initial settings frame: type=%T", f)
7644	}
7645
7646	// Confirm settings can be written, and that an ack is read.
7647	if err = fr.WriteSettings(); err != nil {
7648		t.Fatalf("Error writing settings frame: %v", err)
7649	}
7650	if f, err = fr.ReadFrame(); err != nil {
7651		t.Fatalf("Error reading frame: %v", err)
7652	}
7653	if sf, ok := f.(*http2.SettingsFrame); !ok || !sf.IsAck() {
7654		t.Fatalf("Unexpected frame: %v", f)
7655	}
7656
7657	// Flood settings frames until a timeout occurs, indiciating the server has
7658	// stopped reading from the connection, then close the conn.
7659	for {
7660		conn.SetWriteDeadline(time.Now().Add(50 * time.Millisecond))
7661		if err := fr.WriteSettings(); err != nil {
7662			if to, ok := err.(interface{ Timeout() bool }); !ok || !to.Timeout() {
7663				t.Fatalf("Received unexpected write error: %v", err)
7664			}
7665			break
7666		}
7667	}
7668	conn.Close()
7669
7670	// If the server does not handle this situation correctly, it will never
7671	// close the transport.  This is because its loopyWriter.run() will have
7672	// exited, and thus not handle the goAway the draining process initiates.
7673	// Also, we would see a goroutine leak in this case, as the reader would be
7674	// blocked on the controlBuf's throttle() method indefinitely.
7675
7676	timer := time.AfterFunc(5*time.Second, func() {
7677		t.Errorf("Timeout waiting for GracefulStop to return")
7678		s.Stop()
7679	})
7680	s.GracefulStop()
7681	timer.Stop()
7682}
7683
7684// TestDeadlineSetOnConnectionOnClientCredentialHandshake tests that there is a deadline
7685// set on the net.Conn when a credential handshake happens in http2_client.
7686func (s) TestDeadlineSetOnConnectionOnClientCredentialHandshake(t *testing.T) {
7687	lis, err := net.Listen("tcp", "localhost:0")
7688	if err != nil {
7689		t.Fatalf("Failed to listen: %v", err)
7690	}
7691	connCh := make(chan net.Conn, 1)
7692	go func() {
7693		defer close(connCh)
7694		conn, err := lis.Accept()
7695		if err != nil {
7696			t.Errorf("Error accepting connection: %v", err)
7697			return
7698		}
7699		connCh <- conn
7700	}()
7701	defer func() {
7702		conn := <-connCh
7703		if conn != nil {
7704			conn.Close()
7705		}
7706	}()
7707	deadlineCh := testutils.NewChannel()
7708	cvd := &credentialsVerifyDeadline{
7709		deadlineCh: deadlineCh,
7710	}
7711	dOpt := grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) {
7712		conn, err := (&net.Dialer{}).DialContext(ctx, "tcp", addr)
7713		if err != nil {
7714			return nil, err
7715		}
7716		return &infoConn{Conn: conn}, nil
7717	})
7718	cc, err := grpc.Dial(lis.Addr().String(), dOpt, grpc.WithTransportCredentials(cvd))
7719	if err != nil {
7720		t.Fatalf("Failed to dial: %v", err)
7721	}
7722	defer cc.Close()
7723
7724	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
7725	defer cancel()
7726	deadline, err := deadlineCh.Receive(ctx)
7727	if err != nil {
7728		t.Fatalf("Error receiving from credsInvoked: %v", err)
7729	}
7730	// Default connection timeout is 20 seconds, so if the deadline exceeds now
7731	// + 18 seconds it should be valid.
7732	if !deadline.(time.Time).After(time.Now().Add(time.Second * 18)) {
7733		t.Fatalf("Connection did not have deadline set.")
7734	}
7735}
7736
7737type infoConn struct {
7738	net.Conn
7739	deadline time.Time
7740}
7741
7742func (c *infoConn) SetDeadline(t time.Time) error {
7743	c.deadline = t
7744	return c.Conn.SetDeadline(t)
7745}
7746
7747type credentialsVerifyDeadline struct {
7748	deadlineCh *testutils.Channel
7749}
7750
7751func (cvd *credentialsVerifyDeadline) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
7752	return rawConn, nil, nil
7753}
7754
7755func (cvd *credentialsVerifyDeadline) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
7756	cvd.deadlineCh.Send(rawConn.(*infoConn).deadline)
7757	return rawConn, nil, nil
7758}
7759
7760func (cvd *credentialsVerifyDeadline) Info() credentials.ProtocolInfo {
7761	return credentials.ProtocolInfo{}
7762}
7763func (cvd *credentialsVerifyDeadline) Clone() credentials.TransportCredentials {
7764	return cvd
7765}
7766func (cvd *credentialsVerifyDeadline) OverrideServerName(s string) error {
7767	return nil
7768}
7769