1/*
2 *
3 * Copyright 2014 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 *     http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19//go:generate protoc --go_out=plugins=grpc:. codec_perf/perf.proto
20//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto
21
22package test
23
24import (
25	"bufio"
26	"bytes"
27	"compress/gzip"
28	"context"
29	"crypto/tls"
30	"errors"
31	"flag"
32	"fmt"
33	"io"
34	"math"
35	"net"
36	"net/http"
37	"os"
38	"reflect"
39	"runtime"
40	"strings"
41	"sync"
42	"sync/atomic"
43	"syscall"
44	"testing"
45	"time"
46
47	"github.com/golang/protobuf/proto"
48	anypb "github.com/golang/protobuf/ptypes/any"
49	"golang.org/x/net/http2"
50	"golang.org/x/net/http2/hpack"
51	spb "google.golang.org/genproto/googleapis/rpc/status"
52	"google.golang.org/grpc"
53	"google.golang.org/grpc/balancer/roundrobin"
54	"google.golang.org/grpc/codes"
55	"google.golang.org/grpc/connectivity"
56	"google.golang.org/grpc/credentials"
57	"google.golang.org/grpc/encoding"
58	_ "google.golang.org/grpc/encoding/gzip"
59	"google.golang.org/grpc/health"
60	healthgrpc "google.golang.org/grpc/health/grpc_health_v1"
61	healthpb "google.golang.org/grpc/health/grpc_health_v1"
62	"google.golang.org/grpc/internal/channelz"
63	"google.golang.org/grpc/internal/grpcsync"
64	"google.golang.org/grpc/internal/grpctest"
65	"google.golang.org/grpc/internal/grpctest/tlogger"
66	"google.golang.org/grpc/internal/leakcheck"
67	"google.golang.org/grpc/internal/testutils"
68	"google.golang.org/grpc/internal/transport"
69	"google.golang.org/grpc/metadata"
70	"google.golang.org/grpc/peer"
71	"google.golang.org/grpc/resolver"
72	"google.golang.org/grpc/resolver/manual"
73	"google.golang.org/grpc/serviceconfig"
74	"google.golang.org/grpc/stats"
75	"google.golang.org/grpc/status"
76	"google.golang.org/grpc/tap"
77	testpb "google.golang.org/grpc/test/grpc_testing"
78	"google.golang.org/grpc/testdata"
79)
80
81const defaultHealthService = "grpc.health.v1.Health"
82
83func init() {
84	channelz.TurnOn()
85}
86
87type s struct{}
88
89var lcFailed uint32
90
91type errorer struct {
92	t *testing.T
93}
94
95func (e errorer) Errorf(format string, args ...interface{}) {
96	atomic.StoreUint32(&lcFailed, 1)
97	e.t.Errorf(format, args...)
98}
99
100func (s) Setup(t *testing.T) {
101	tlogger.Update(t)
102}
103
104func (s) Teardown(t *testing.T) {
105	if atomic.LoadUint32(&lcFailed) == 1 {
106		return
107	}
108	leakcheck.Check(errorer{t: t})
109	if atomic.LoadUint32(&lcFailed) == 1 {
110		t.Log("Leak check disabled for future tests")
111	}
112}
113
114func Test(t *testing.T) {
115	grpctest.RunSubTests(t, s{})
116}
117
118var (
119	// For headers:
120	testMetadata = metadata.MD{
121		"key1":     []string{"value1"},
122		"key2":     []string{"value2"},
123		"key3-bin": []string{"binvalue1", string([]byte{1, 2, 3})},
124	}
125	testMetadata2 = metadata.MD{
126		"key1": []string{"value12"},
127		"key2": []string{"value22"},
128	}
129	// For trailers:
130	testTrailerMetadata = metadata.MD{
131		"tkey1":     []string{"trailerValue1"},
132		"tkey2":     []string{"trailerValue2"},
133		"tkey3-bin": []string{"trailerbinvalue1", string([]byte{3, 2, 1})},
134	}
135	testTrailerMetadata2 = metadata.MD{
136		"tkey1": []string{"trailerValue12"},
137		"tkey2": []string{"trailerValue22"},
138	}
139	// capital "Key" is illegal in HTTP/2.
140	malformedHTTP2Metadata = metadata.MD{
141		"Key": []string{"foo"},
142	}
143	testAppUA     = "myApp1/1.0 myApp2/0.9"
144	failAppUA     = "fail-this-RPC"
145	detailedError = status.ErrorProto(&spb.Status{
146		Code:    int32(codes.DataLoss),
147		Message: "error for testing: " + failAppUA,
148		Details: []*anypb.Any{{
149			TypeUrl: "url",
150			Value:   []byte{6, 0, 0, 6, 1, 3},
151		}},
152	})
153)
154
155var raceMode bool // set by race.go in race mode
156
157type testServer struct {
158	testpb.UnimplementedTestServiceServer
159
160	security           string // indicate the authentication protocol used by this server.
161	earlyFail          bool   // whether to error out the execution of a service handler prematurely.
162	setAndSendHeader   bool   // whether to call setHeader and sendHeader.
163	setHeaderOnly      bool   // whether to only call setHeader, not sendHeader.
164	multipleSetTrailer bool   // whether to call setTrailer multiple times.
165	unaryCallSleepTime time.Duration
166}
167
168func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
169	if md, ok := metadata.FromIncomingContext(ctx); ok {
170		// For testing purpose, returns an error if user-agent is failAppUA.
171		// To test that client gets the correct error.
172		if ua, ok := md["user-agent"]; !ok || strings.HasPrefix(ua[0], failAppUA) {
173			return nil, detailedError
174		}
175		var str []string
176		for _, entry := range md["user-agent"] {
177			str = append(str, "ua", entry)
178		}
179		grpc.SendHeader(ctx, metadata.Pairs(str...))
180	}
181	return new(testpb.Empty), nil
182}
183
184func newPayload(t testpb.PayloadType, size int32) (*testpb.Payload, error) {
185	if size < 0 {
186		return nil, fmt.Errorf("requested a response with invalid length %d", size)
187	}
188	body := make([]byte, size)
189	switch t {
190	case testpb.PayloadType_COMPRESSABLE:
191	case testpb.PayloadType_UNCOMPRESSABLE:
192		return nil, fmt.Errorf("PayloadType UNCOMPRESSABLE is not supported")
193	default:
194		return nil, fmt.Errorf("unsupported payload type: %d", t)
195	}
196	return &testpb.Payload{
197		Type: t,
198		Body: body,
199	}, nil
200}
201
202func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
203	md, ok := metadata.FromIncomingContext(ctx)
204	if ok {
205		if _, exists := md[":authority"]; !exists {
206			return nil, status.Errorf(codes.DataLoss, "expected an :authority metadata: %v", md)
207		}
208		if s.setAndSendHeader {
209			if err := grpc.SetHeader(ctx, md); err != nil {
210				return nil, status.Errorf(status.Code(err), "grpc.SetHeader(_, %v) = %v, want <nil>", md, err)
211			}
212			if err := grpc.SendHeader(ctx, testMetadata2); err != nil {
213				return nil, status.Errorf(status.Code(err), "grpc.SendHeader(_, %v) = %v, want <nil>", testMetadata2, err)
214			}
215		} else if s.setHeaderOnly {
216			if err := grpc.SetHeader(ctx, md); err != nil {
217				return nil, status.Errorf(status.Code(err), "grpc.SetHeader(_, %v) = %v, want <nil>", md, err)
218			}
219			if err := grpc.SetHeader(ctx, testMetadata2); err != nil {
220				return nil, status.Errorf(status.Code(err), "grpc.SetHeader(_, %v) = %v, want <nil>", testMetadata2, err)
221			}
222		} else {
223			if err := grpc.SendHeader(ctx, md); err != nil {
224				return nil, status.Errorf(status.Code(err), "grpc.SendHeader(_, %v) = %v, want <nil>", md, err)
225			}
226		}
227		if err := grpc.SetTrailer(ctx, testTrailerMetadata); err != nil {
228			return nil, status.Errorf(status.Code(err), "grpc.SetTrailer(_, %v) = %v, want <nil>", testTrailerMetadata, err)
229		}
230		if s.multipleSetTrailer {
231			if err := grpc.SetTrailer(ctx, testTrailerMetadata2); err != nil {
232				return nil, status.Errorf(status.Code(err), "grpc.SetTrailer(_, %v) = %v, want <nil>", testTrailerMetadata2, err)
233			}
234		}
235	}
236	pr, ok := peer.FromContext(ctx)
237	if !ok {
238		return nil, status.Error(codes.DataLoss, "failed to get peer from ctx")
239	}
240	if pr.Addr == net.Addr(nil) {
241		return nil, status.Error(codes.DataLoss, "failed to get peer address")
242	}
243	if s.security != "" {
244		// Check Auth info
245		var authType, serverName string
246		switch info := pr.AuthInfo.(type) {
247		case credentials.TLSInfo:
248			authType = info.AuthType()
249			serverName = info.State.ServerName
250		default:
251			return nil, status.Error(codes.Unauthenticated, "Unknown AuthInfo type")
252		}
253		if authType != s.security {
254			return nil, status.Errorf(codes.Unauthenticated, "Wrong auth type: got %q, want %q", authType, s.security)
255		}
256		if serverName != "x.test.youtube.com" {
257			return nil, status.Errorf(codes.Unauthenticated, "Unknown server name %q", serverName)
258		}
259	}
260	// Simulate some service delay.
261	time.Sleep(s.unaryCallSleepTime)
262
263	payload, err := newPayload(in.GetResponseType(), in.GetResponseSize())
264	if err != nil {
265		return nil, err
266	}
267
268	return &testpb.SimpleResponse{
269		Payload: payload,
270	}, nil
271}
272
273func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error {
274	if md, ok := metadata.FromIncomingContext(stream.Context()); ok {
275		if _, exists := md[":authority"]; !exists {
276			return status.Errorf(codes.DataLoss, "expected an :authority metadata: %v", md)
277		}
278		// For testing purpose, returns an error if user-agent is failAppUA.
279		// To test that client gets the correct error.
280		if ua, ok := md["user-agent"]; !ok || strings.HasPrefix(ua[0], failAppUA) {
281			return status.Error(codes.DataLoss, "error for testing: "+failAppUA)
282		}
283	}
284	cs := args.GetResponseParameters()
285	for _, c := range cs {
286		if us := c.GetIntervalUs(); us > 0 {
287			time.Sleep(time.Duration(us) * time.Microsecond)
288		}
289
290		payload, err := newPayload(args.GetResponseType(), c.GetSize())
291		if err != nil {
292			return err
293		}
294
295		if err := stream.Send(&testpb.StreamingOutputCallResponse{
296			Payload: payload,
297		}); err != nil {
298			return err
299		}
300	}
301	return nil
302}
303
304func (s *testServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error {
305	var sum int
306	for {
307		in, err := stream.Recv()
308		if err == io.EOF {
309			return stream.SendAndClose(&testpb.StreamingInputCallResponse{
310				AggregatedPayloadSize: int32(sum),
311			})
312		}
313		if err != nil {
314			return err
315		}
316		p := in.GetPayload().GetBody()
317		sum += len(p)
318		if s.earlyFail {
319			return status.Error(codes.NotFound, "not found")
320		}
321	}
322}
323
324func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error {
325	md, ok := metadata.FromIncomingContext(stream.Context())
326	if ok {
327		if s.setAndSendHeader {
328			if err := stream.SetHeader(md); err != nil {
329				return status.Errorf(status.Code(err), "%v.SetHeader(_, %v) = %v, want <nil>", stream, md, err)
330			}
331			if err := stream.SendHeader(testMetadata2); err != nil {
332				return status.Errorf(status.Code(err), "%v.SendHeader(_, %v) = %v, want <nil>", stream, testMetadata2, err)
333			}
334		} else if s.setHeaderOnly {
335			if err := stream.SetHeader(md); err != nil {
336				return status.Errorf(status.Code(err), "%v.SetHeader(_, %v) = %v, want <nil>", stream, md, err)
337			}
338			if err := stream.SetHeader(testMetadata2); err != nil {
339				return status.Errorf(status.Code(err), "%v.SetHeader(_, %v) = %v, want <nil>", stream, testMetadata2, err)
340			}
341		} else {
342			if err := stream.SendHeader(md); err != nil {
343				return status.Errorf(status.Code(err), "%v.SendHeader(%v) = %v, want %v", stream, md, err, nil)
344			}
345		}
346		stream.SetTrailer(testTrailerMetadata)
347		if s.multipleSetTrailer {
348			stream.SetTrailer(testTrailerMetadata2)
349		}
350	}
351	for {
352		in, err := stream.Recv()
353		if err == io.EOF {
354			// read done.
355			return nil
356		}
357		if err != nil {
358			// to facilitate testSvrWriteStatusEarlyWrite
359			if status.Code(err) == codes.ResourceExhausted {
360				return status.Errorf(codes.Internal, "fake error for test testSvrWriteStatusEarlyWrite. true error: %s", err.Error())
361			}
362			return err
363		}
364		cs := in.GetResponseParameters()
365		for _, c := range cs {
366			if us := c.GetIntervalUs(); us > 0 {
367				time.Sleep(time.Duration(us) * time.Microsecond)
368			}
369
370			payload, err := newPayload(in.GetResponseType(), c.GetSize())
371			if err != nil {
372				return err
373			}
374
375			if err := stream.Send(&testpb.StreamingOutputCallResponse{
376				Payload: payload,
377			}); err != nil {
378				// to facilitate testSvrWriteStatusEarlyWrite
379				if status.Code(err) == codes.ResourceExhausted {
380					return status.Errorf(codes.Internal, "fake error for test testSvrWriteStatusEarlyWrite. true error: %s", err.Error())
381				}
382				return err
383			}
384		}
385	}
386}
387
388func (s *testServer) HalfDuplexCall(stream testpb.TestService_HalfDuplexCallServer) error {
389	var msgBuf []*testpb.StreamingOutputCallRequest
390	for {
391		in, err := stream.Recv()
392		if err == io.EOF {
393			// read done.
394			break
395		}
396		if err != nil {
397			return err
398		}
399		msgBuf = append(msgBuf, in)
400	}
401	for _, m := range msgBuf {
402		cs := m.GetResponseParameters()
403		for _, c := range cs {
404			if us := c.GetIntervalUs(); us > 0 {
405				time.Sleep(time.Duration(us) * time.Microsecond)
406			}
407
408			payload, err := newPayload(m.GetResponseType(), c.GetSize())
409			if err != nil {
410				return err
411			}
412
413			if err := stream.Send(&testpb.StreamingOutputCallResponse{
414				Payload: payload,
415			}); err != nil {
416				return err
417			}
418		}
419	}
420	return nil
421}
422
423type env struct {
424	name         string
425	network      string // The type of network such as tcp, unix, etc.
426	security     string // The security protocol such as TLS, SSH, etc.
427	httpHandler  bool   // whether to use the http.Handler ServerTransport; requires TLS
428	balancer     string // One of "round_robin", "pick_first", "v1", or "".
429	customDialer func(string, string, time.Duration) (net.Conn, error)
430}
431
432func (e env) runnable() bool {
433	if runtime.GOOS == "windows" && e.network == "unix" {
434		return false
435	}
436	return true
437}
438
439func (e env) dialer(addr string, timeout time.Duration) (net.Conn, error) {
440	if e.customDialer != nil {
441		return e.customDialer(e.network, addr, timeout)
442	}
443	return net.DialTimeout(e.network, addr, timeout)
444}
445
446var (
447	tcpClearEnv   = env{name: "tcp-clear-v1-balancer", network: "tcp", balancer: "v1"}
448	tcpTLSEnv     = env{name: "tcp-tls-v1-balancer", network: "tcp", security: "tls", balancer: "v1"}
449	tcpClearRREnv = env{name: "tcp-clear", network: "tcp", balancer: "round_robin"}
450	tcpTLSRREnv   = env{name: "tcp-tls", network: "tcp", security: "tls", balancer: "round_robin"}
451	handlerEnv    = env{name: "handler-tls", network: "tcp", security: "tls", httpHandler: true, balancer: "round_robin"}
452	noBalancerEnv = env{name: "no-balancer", network: "tcp", security: "tls"}
453	allEnv        = []env{tcpClearEnv, tcpTLSEnv, tcpClearRREnv, tcpTLSRREnv, handlerEnv, noBalancerEnv}
454)
455
456var onlyEnv = flag.String("only_env", "", "If non-empty, one of 'tcp-clear', 'tcp-tls', 'unix-clear', 'unix-tls', or 'handler-tls' to only run the tests for that environment. Empty means all.")
457
458func listTestEnv() (envs []env) {
459	if *onlyEnv != "" {
460		for _, e := range allEnv {
461			if e.name == *onlyEnv {
462				if !e.runnable() {
463					panic(fmt.Sprintf("--only_env environment %q does not run on %s", *onlyEnv, runtime.GOOS))
464				}
465				return []env{e}
466			}
467		}
468		panic(fmt.Sprintf("invalid --only_env value %q", *onlyEnv))
469	}
470	for _, e := range allEnv {
471		if e.runnable() {
472			envs = append(envs, e)
473		}
474	}
475	return envs
476}
477
478// test is an end-to-end test. It should be created with the newTest
479// func, modified as needed, and then started with its startServer method.
480// It should be cleaned up with the tearDown method.
481type test struct {
482	// The following are setup in newTest().
483	t      *testing.T
484	e      env
485	ctx    context.Context // valid for life of test, before tearDown
486	cancel context.CancelFunc
487
488	// The following knobs are for the server-side, and should be set after
489	// calling newTest() and before calling startServer().
490
491	// whether or not to expose the server's health via the default health
492	// service implementation.
493	enableHealthServer bool
494	// In almost all cases, one should set the 'enableHealthServer' flag above to
495	// expose the server's health using the default health service
496	// implementation. This should only be used when a non-default health service
497	// implementation is required.
498	healthServer            healthpb.HealthServer
499	maxStream               uint32
500	tapHandle               tap.ServerInHandle
501	maxServerMsgSize        *int
502	maxServerReceiveMsgSize *int
503	maxServerSendMsgSize    *int
504	maxServerHeaderListSize *uint32
505	// Used to test the deprecated API WithCompressor and WithDecompressor.
506	serverCompression           bool
507	unknownHandler              grpc.StreamHandler
508	unaryServerInt              grpc.UnaryServerInterceptor
509	streamServerInt             grpc.StreamServerInterceptor
510	serverInitialWindowSize     int32
511	serverInitialConnWindowSize int32
512	customServerOptions         []grpc.ServerOption
513
514	// The following knobs are for the client-side, and should be set after
515	// calling newTest() and before calling clientConn().
516	maxClientMsgSize        *int
517	maxClientReceiveMsgSize *int
518	maxClientSendMsgSize    *int
519	maxClientHeaderListSize *uint32
520	userAgent               string
521	// Used to test the deprecated API WithCompressor and WithDecompressor.
522	clientCompression bool
523	// Used to test the new compressor registration API UseCompressor.
524	clientUseCompression bool
525	// clientNopCompression is set to create a compressor whose type is not supported.
526	clientNopCompression        bool
527	unaryClientInt              grpc.UnaryClientInterceptor
528	streamClientInt             grpc.StreamClientInterceptor
529	sc                          <-chan grpc.ServiceConfig
530	customCodec                 encoding.Codec
531	clientInitialWindowSize     int32
532	clientInitialConnWindowSize int32
533	perRPCCreds                 credentials.PerRPCCredentials
534	customDialOptions           []grpc.DialOption
535	resolverScheme              string
536
537	// All test dialing is blocking by default. Set this to true if dial
538	// should be non-blocking.
539	nonBlockingDial bool
540
541	// These are are set once startServer is called. The common case is to have
542	// only one testServer.
543	srv     stopper
544	hSrv    healthpb.HealthServer
545	srvAddr string
546
547	// These are are set once startServers is called.
548	srvs     []stopper
549	hSrvs    []healthpb.HealthServer
550	srvAddrs []string
551
552	cc          *grpc.ClientConn // nil until requested via clientConn
553	restoreLogs func()           // nil unless declareLogNoise is used
554}
555
556type stopper interface {
557	Stop()
558	GracefulStop()
559}
560
561func (te *test) tearDown() {
562	if te.cancel != nil {
563		te.cancel()
564		te.cancel = nil
565	}
566
567	if te.cc != nil {
568		te.cc.Close()
569		te.cc = nil
570	}
571
572	if te.restoreLogs != nil {
573		te.restoreLogs()
574		te.restoreLogs = nil
575	}
576
577	if te.srv != nil {
578		te.srv.Stop()
579	}
580	for _, s := range te.srvs {
581		s.Stop()
582	}
583}
584
585// newTest returns a new test using the provided testing.T and
586// environment.  It is returned with default values. Tests should
587// modify it before calling its startServer and clientConn methods.
588func newTest(t *testing.T, e env) *test {
589	te := &test{
590		t:         t,
591		e:         e,
592		maxStream: math.MaxUint32,
593	}
594	te.ctx, te.cancel = context.WithCancel(context.Background())
595	return te
596}
597
598func (te *test) listenAndServe(ts testpb.TestServiceServer, listen func(network, address string) (net.Listener, error)) net.Listener {
599	te.t.Logf("Running test in %s environment...", te.e.name)
600	sopts := []grpc.ServerOption{grpc.MaxConcurrentStreams(te.maxStream)}
601	if te.maxServerMsgSize != nil {
602		sopts = append(sopts, grpc.MaxMsgSize(*te.maxServerMsgSize))
603	}
604	if te.maxServerReceiveMsgSize != nil {
605		sopts = append(sopts, grpc.MaxRecvMsgSize(*te.maxServerReceiveMsgSize))
606	}
607	if te.maxServerSendMsgSize != nil {
608		sopts = append(sopts, grpc.MaxSendMsgSize(*te.maxServerSendMsgSize))
609	}
610	if te.maxServerHeaderListSize != nil {
611		sopts = append(sopts, grpc.MaxHeaderListSize(*te.maxServerHeaderListSize))
612	}
613	if te.tapHandle != nil {
614		sopts = append(sopts, grpc.InTapHandle(te.tapHandle))
615	}
616	if te.serverCompression {
617		sopts = append(sopts,
618			grpc.RPCCompressor(grpc.NewGZIPCompressor()),
619			grpc.RPCDecompressor(grpc.NewGZIPDecompressor()),
620		)
621	}
622	if te.unaryServerInt != nil {
623		sopts = append(sopts, grpc.UnaryInterceptor(te.unaryServerInt))
624	}
625	if te.streamServerInt != nil {
626		sopts = append(sopts, grpc.StreamInterceptor(te.streamServerInt))
627	}
628	if te.unknownHandler != nil {
629		sopts = append(sopts, grpc.UnknownServiceHandler(te.unknownHandler))
630	}
631	if te.serverInitialWindowSize > 0 {
632		sopts = append(sopts, grpc.InitialWindowSize(te.serverInitialWindowSize))
633	}
634	if te.serverInitialConnWindowSize > 0 {
635		sopts = append(sopts, grpc.InitialConnWindowSize(te.serverInitialConnWindowSize))
636	}
637	la := "localhost:0"
638	switch te.e.network {
639	case "unix":
640		la = "/tmp/testsock" + fmt.Sprintf("%d", time.Now().UnixNano())
641		syscall.Unlink(la)
642	}
643	lis, err := listen(te.e.network, la)
644	if err != nil {
645		te.t.Fatalf("Failed to listen: %v", err)
646	}
647	switch te.e.security {
648	case "tls":
649		creds, err := credentials.NewServerTLSFromFile(testdata.Path("server1.pem"), testdata.Path("server1.key"))
650		if err != nil {
651			te.t.Fatalf("Failed to generate credentials %v", err)
652		}
653		sopts = append(sopts, grpc.Creds(creds))
654	case "clientTimeoutCreds":
655		sopts = append(sopts, grpc.Creds(&clientTimeoutCreds{}))
656	}
657	sopts = append(sopts, te.customServerOptions...)
658	s := grpc.NewServer(sopts...)
659	if ts != nil {
660		testpb.RegisterTestServiceServer(s, ts)
661	}
662
663	// Create a new default health server if enableHealthServer is set, or use
664	// the provided one.
665	hs := te.healthServer
666	if te.enableHealthServer {
667		hs = health.NewServer()
668	}
669	if hs != nil {
670		healthgrpc.RegisterHealthServer(s, hs)
671	}
672
673	addr := la
674	switch te.e.network {
675	case "unix":
676	default:
677		_, port, err := net.SplitHostPort(lis.Addr().String())
678		if err != nil {
679			te.t.Fatalf("Failed to parse listener address: %v", err)
680		}
681		addr = "localhost:" + port
682	}
683
684	te.srv = s
685	te.hSrv = hs
686	te.srvAddr = addr
687
688	if te.e.httpHandler {
689		if te.e.security != "tls" {
690			te.t.Fatalf("unsupported environment settings")
691		}
692		cert, err := tls.LoadX509KeyPair(testdata.Path("server1.pem"), testdata.Path("server1.key"))
693		if err != nil {
694			te.t.Fatal("tls.LoadX509KeyPair(server1.pem, server1.key) failed: ", err)
695		}
696		hs := &http.Server{
697			Handler:   s,
698			TLSConfig: &tls.Config{Certificates: []tls.Certificate{cert}},
699		}
700		if err := http2.ConfigureServer(hs, &http2.Server{MaxConcurrentStreams: te.maxStream}); err != nil {
701			te.t.Fatal("http2.ConfigureServer(_, _) failed: ", err)
702		}
703		te.srv = wrapHS{hs}
704		tlsListener := tls.NewListener(lis, hs.TLSConfig)
705		go hs.Serve(tlsListener)
706		return lis
707	}
708
709	go s.Serve(lis)
710	return lis
711}
712
713type wrapHS struct {
714	s *http.Server
715}
716
717func (w wrapHS) GracefulStop() {
718	w.s.Shutdown(context.Background())
719}
720
721func (w wrapHS) Stop() {
722	w.s.Close()
723}
724
725func (te *test) startServerWithConnControl(ts testpb.TestServiceServer) *listenerWrapper {
726	l := te.listenAndServe(ts, listenWithConnControl)
727	return l.(*listenerWrapper)
728}
729
730// startServer starts a gRPC server exposing the provided TestService
731// implementation. Callers should defer a call to te.tearDown to clean up
732func (te *test) startServer(ts testpb.TestServiceServer) {
733	te.listenAndServe(ts, net.Listen)
734}
735
736// startServers starts 'num' gRPC servers exposing the provided TestService.
737func (te *test) startServers(ts testpb.TestServiceServer, num int) {
738	for i := 0; i < num; i++ {
739		te.startServer(ts)
740		te.srvs = append(te.srvs, te.srv.(*grpc.Server))
741		te.hSrvs = append(te.hSrvs, te.hSrv)
742		te.srvAddrs = append(te.srvAddrs, te.srvAddr)
743		te.srv = nil
744		te.hSrv = nil
745		te.srvAddr = ""
746	}
747}
748
749// setHealthServingStatus is a helper function to set the health status.
750func (te *test) setHealthServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) {
751	hs, ok := te.hSrv.(*health.Server)
752	if !ok {
753		panic(fmt.Sprintf("SetServingStatus(%v, %v) called for health server of type %T", service, status, hs))
754	}
755	hs.SetServingStatus(service, status)
756}
757
758type nopCompressor struct {
759	grpc.Compressor
760}
761
762// NewNopCompressor creates a compressor to test the case that type is not supported.
763func NewNopCompressor() grpc.Compressor {
764	return &nopCompressor{grpc.NewGZIPCompressor()}
765}
766
767func (c *nopCompressor) Type() string {
768	return "nop"
769}
770
771type nopDecompressor struct {
772	grpc.Decompressor
773}
774
775// NewNopDecompressor creates a decompressor to test the case that type is not supported.
776func NewNopDecompressor() grpc.Decompressor {
777	return &nopDecompressor{grpc.NewGZIPDecompressor()}
778}
779
780func (d *nopDecompressor) Type() string {
781	return "nop"
782}
783
784func (te *test) configDial(opts ...grpc.DialOption) ([]grpc.DialOption, string) {
785	opts = append(opts, grpc.WithDialer(te.e.dialer), grpc.WithUserAgent(te.userAgent))
786
787	if te.sc != nil {
788		opts = append(opts, grpc.WithServiceConfig(te.sc))
789	}
790
791	if te.clientCompression {
792		opts = append(opts,
793			grpc.WithCompressor(grpc.NewGZIPCompressor()),
794			grpc.WithDecompressor(grpc.NewGZIPDecompressor()),
795		)
796	}
797	if te.clientUseCompression {
798		opts = append(opts, grpc.WithDefaultCallOptions(grpc.UseCompressor("gzip")))
799	}
800	if te.clientNopCompression {
801		opts = append(opts,
802			grpc.WithCompressor(NewNopCompressor()),
803			grpc.WithDecompressor(NewNopDecompressor()),
804		)
805	}
806	if te.unaryClientInt != nil {
807		opts = append(opts, grpc.WithUnaryInterceptor(te.unaryClientInt))
808	}
809	if te.streamClientInt != nil {
810		opts = append(opts, grpc.WithStreamInterceptor(te.streamClientInt))
811	}
812	if te.maxClientMsgSize != nil {
813		opts = append(opts, grpc.WithMaxMsgSize(*te.maxClientMsgSize))
814	}
815	if te.maxClientReceiveMsgSize != nil {
816		opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(*te.maxClientReceiveMsgSize)))
817	}
818	if te.maxClientSendMsgSize != nil {
819		opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(*te.maxClientSendMsgSize)))
820	}
821	if te.maxClientHeaderListSize != nil {
822		opts = append(opts, grpc.WithMaxHeaderListSize(*te.maxClientHeaderListSize))
823	}
824	switch te.e.security {
825	case "tls":
826		creds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), "x.test.youtube.com")
827		if err != nil {
828			te.t.Fatalf("Failed to load credentials: %v", err)
829		}
830		opts = append(opts, grpc.WithTransportCredentials(creds))
831	case "clientTimeoutCreds":
832		opts = append(opts, grpc.WithTransportCredentials(&clientTimeoutCreds{}))
833	case "empty":
834		// Don't add any transport creds option.
835	default:
836		opts = append(opts, grpc.WithInsecure())
837	}
838	// TODO(bar) switch balancer case "pick_first".
839	var scheme string
840	if te.resolverScheme == "" {
841		scheme = "passthrough:///"
842	} else {
843		scheme = te.resolverScheme + ":///"
844	}
845	switch te.e.balancer {
846	case "v1":
847		opts = append(opts, grpc.WithBalancer(grpc.RoundRobin(nil)))
848	case "round_robin":
849		opts = append(opts, grpc.WithBalancerName(roundrobin.Name))
850	}
851	if te.clientInitialWindowSize > 0 {
852		opts = append(opts, grpc.WithInitialWindowSize(te.clientInitialWindowSize))
853	}
854	if te.clientInitialConnWindowSize > 0 {
855		opts = append(opts, grpc.WithInitialConnWindowSize(te.clientInitialConnWindowSize))
856	}
857	if te.perRPCCreds != nil {
858		opts = append(opts, grpc.WithPerRPCCredentials(te.perRPCCreds))
859	}
860	if te.customCodec != nil {
861		opts = append(opts, grpc.WithDefaultCallOptions(grpc.ForceCodec(te.customCodec)))
862	}
863	if !te.nonBlockingDial && te.srvAddr != "" {
864		// Only do a blocking dial if server is up.
865		opts = append(opts, grpc.WithBlock())
866	}
867	if te.srvAddr == "" {
868		te.srvAddr = "client.side.only.test"
869	}
870	opts = append(opts, te.customDialOptions...)
871	return opts, scheme
872}
873
874func (te *test) clientConnWithConnControl() (*grpc.ClientConn, *dialerWrapper) {
875	if te.cc != nil {
876		return te.cc, nil
877	}
878	opts, scheme := te.configDial()
879	dw := &dialerWrapper{}
880	// overwrite the dialer before
881	opts = append(opts, grpc.WithDialer(dw.dialer))
882	var err error
883	te.cc, err = grpc.Dial(scheme+te.srvAddr, opts...)
884	if err != nil {
885		te.t.Fatalf("Dial(%q) = %v", scheme+te.srvAddr, err)
886	}
887	return te.cc, dw
888}
889
890func (te *test) clientConn(opts ...grpc.DialOption) *grpc.ClientConn {
891	if te.cc != nil {
892		return te.cc
893	}
894	var scheme string
895	opts, scheme = te.configDial(opts...)
896	var err error
897	te.cc, err = grpc.Dial(scheme+te.srvAddr, opts...)
898	if err != nil {
899		te.t.Fatalf("Dial(%q) = %v", scheme+te.srvAddr, err)
900	}
901	return te.cc
902}
903
904func (te *test) declareLogNoise(phrases ...string) {
905	te.restoreLogs = declareLogNoise(te.t, phrases...)
906}
907
908func (te *test) withServerTester(fn func(st *serverTester)) {
909	c, err := te.e.dialer(te.srvAddr, 10*time.Second)
910	if err != nil {
911		te.t.Fatal(err)
912	}
913	defer c.Close()
914	if te.e.security == "tls" {
915		c = tls.Client(c, &tls.Config{
916			InsecureSkipVerify: true,
917			NextProtos:         []string{http2.NextProtoTLS},
918		})
919	}
920	st := newServerTesterFromConn(te.t, c)
921	st.greet()
922	fn(st)
923}
924
925type lazyConn struct {
926	net.Conn
927	beLazy int32
928}
929
930func (l *lazyConn) Write(b []byte) (int, error) {
931	if atomic.LoadInt32(&(l.beLazy)) == 1 {
932		time.Sleep(time.Second)
933	}
934	return l.Conn.Write(b)
935}
936
937func (s) TestContextDeadlineNotIgnored(t *testing.T) {
938	e := noBalancerEnv
939	var lc *lazyConn
940	e.customDialer = func(network, addr string, timeout time.Duration) (net.Conn, error) {
941		conn, err := net.DialTimeout(network, addr, timeout)
942		if err != nil {
943			return nil, err
944		}
945		lc = &lazyConn{Conn: conn}
946		return lc, nil
947	}
948
949	te := newTest(t, e)
950	te.startServer(&testServer{security: e.security})
951	defer te.tearDown()
952
953	cc := te.clientConn()
954	tc := testpb.NewTestServiceClient(cc)
955	if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil {
956		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
957	}
958	atomic.StoreInt32(&(lc.beLazy), 1)
959	ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
960	defer cancel()
961	t1 := time.Now()
962	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
963		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, context.DeadlineExceeded", err)
964	}
965	if time.Since(t1) > 2*time.Second {
966		t.Fatalf("TestService/EmptyCall(_, _) ran over the deadline")
967	}
968}
969
970func (s) TestTimeoutOnDeadServer(t *testing.T) {
971	for _, e := range listTestEnv() {
972		testTimeoutOnDeadServer(t, e)
973	}
974}
975
976func testTimeoutOnDeadServer(t *testing.T, e env) {
977	te := newTest(t, e)
978	te.userAgent = testAppUA
979	te.declareLogNoise(
980		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
981		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
982		"grpc: addrConn.resetTransport failed to create client transport: connection error",
983	)
984	te.startServer(&testServer{security: e.security})
985	defer te.tearDown()
986
987	cc := te.clientConn()
988	tc := testpb.NewTestServiceClient(cc)
989	if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
990		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
991	}
992	te.srv.Stop()
993
994	// Wait for the client to notice the connection is gone.
995	ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
996	state := cc.GetState()
997	for ; state == connectivity.Ready && cc.WaitForStateChange(ctx, state); state = cc.GetState() {
998	}
999	cancel()
1000	if state == connectivity.Ready {
1001		t.Fatalf("Timed out waiting for non-ready state")
1002	}
1003	ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond)
1004	_, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true))
1005	cancel()
1006	if e.balancer != "" && status.Code(err) != codes.DeadlineExceeded {
1007		// If e.balancer == nil, the ac will stop reconnecting because the dialer returns non-temp error,
1008		// the error will be an internal error.
1009		t.Fatalf("TestService/EmptyCall(%v, _) = _, %v, want _, error code: %s", ctx, err, codes.DeadlineExceeded)
1010	}
1011	awaitNewConnLogOutput()
1012}
1013
1014func (s) TestServerGracefulStopIdempotent(t *testing.T) {
1015	for _, e := range listTestEnv() {
1016		if e.name == "handler-tls" {
1017			continue
1018		}
1019		testServerGracefulStopIdempotent(t, e)
1020	}
1021}
1022
1023func testServerGracefulStopIdempotent(t *testing.T, e env) {
1024	te := newTest(t, e)
1025	te.userAgent = testAppUA
1026	te.startServer(&testServer{security: e.security})
1027	defer te.tearDown()
1028
1029	for i := 0; i < 3; i++ {
1030		te.srv.GracefulStop()
1031	}
1032}
1033
1034func (s) TestServerGoAway(t *testing.T) {
1035	for _, e := range listTestEnv() {
1036		if e.name == "handler-tls" {
1037			continue
1038		}
1039		testServerGoAway(t, e)
1040	}
1041}
1042
1043func testServerGoAway(t *testing.T, e env) {
1044	te := newTest(t, e)
1045	te.userAgent = testAppUA
1046	te.startServer(&testServer{security: e.security})
1047	defer te.tearDown()
1048
1049	cc := te.clientConn()
1050	tc := testpb.NewTestServiceClient(cc)
1051	// Finish an RPC to make sure the connection is good.
1052	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
1053	defer cancel()
1054	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
1055		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
1056	}
1057	ch := make(chan struct{})
1058	go func() {
1059		te.srv.GracefulStop()
1060		close(ch)
1061	}()
1062	// Loop until the server side GoAway signal is propagated to the client.
1063	for {
1064		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
1065		if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) != codes.DeadlineExceeded {
1066			cancel()
1067			break
1068		}
1069		cancel()
1070	}
1071	// A new RPC should fail.
1072	ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
1073	defer cancel()
1074	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable && status.Code(err) != codes.Internal {
1075		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s or %s", err, codes.Unavailable, codes.Internal)
1076	}
1077	<-ch
1078	awaitNewConnLogOutput()
1079}
1080
1081func (s) TestServerGoAwayPendingRPC(t *testing.T) {
1082	for _, e := range listTestEnv() {
1083		if e.name == "handler-tls" {
1084			continue
1085		}
1086		testServerGoAwayPendingRPC(t, e)
1087	}
1088}
1089
1090func testServerGoAwayPendingRPC(t *testing.T, e env) {
1091	te := newTest(t, e)
1092	te.userAgent = testAppUA
1093	te.declareLogNoise(
1094		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
1095		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
1096		"grpc: addrConn.resetTransport failed to create client transport: connection error",
1097	)
1098	te.startServer(&testServer{security: e.security})
1099	defer te.tearDown()
1100
1101	cc := te.clientConn()
1102	tc := testpb.NewTestServiceClient(cc)
1103	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
1104	stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
1105	if err != nil {
1106		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
1107	}
1108	// Finish an RPC to make sure the connection is good.
1109	if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
1110		t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, <nil>", tc, err)
1111	}
1112	ch := make(chan struct{})
1113	go func() {
1114		te.srv.GracefulStop()
1115		close(ch)
1116	}()
1117	// Loop until the server side GoAway signal is propagated to the client.
1118	start := time.Now()
1119	errored := false
1120	for time.Since(start) < time.Second {
1121		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
1122		_, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true))
1123		cancel()
1124		if err != nil {
1125			errored = true
1126			break
1127		}
1128	}
1129	if !errored {
1130		t.Fatalf("GoAway never received by client")
1131	}
1132	respParam := []*testpb.ResponseParameters{{Size: 1}}
1133	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100))
1134	if err != nil {
1135		t.Fatal(err)
1136	}
1137	req := &testpb.StreamingOutputCallRequest{
1138		ResponseType:       testpb.PayloadType_COMPRESSABLE,
1139		ResponseParameters: respParam,
1140		Payload:            payload,
1141	}
1142	// The existing RPC should be still good to proceed.
1143	if err := stream.Send(req); err != nil {
1144		t.Fatalf("%v.Send(_) = %v, want <nil>", stream, err)
1145	}
1146	if _, err := stream.Recv(); err != nil {
1147		t.Fatalf("%v.Recv() = _, %v, want _, <nil>", stream, err)
1148	}
1149	// The RPC will run until canceled.
1150	cancel()
1151	<-ch
1152	awaitNewConnLogOutput()
1153}
1154
1155func (s) TestServerMultipleGoAwayPendingRPC(t *testing.T) {
1156	for _, e := range listTestEnv() {
1157		if e.name == "handler-tls" {
1158			continue
1159		}
1160		testServerMultipleGoAwayPendingRPC(t, e)
1161	}
1162}
1163
1164func testServerMultipleGoAwayPendingRPC(t *testing.T, e env) {
1165	te := newTest(t, e)
1166	te.userAgent = testAppUA
1167	te.declareLogNoise(
1168		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
1169		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
1170		"grpc: addrConn.resetTransport failed to create client transport: connection error",
1171	)
1172	te.startServer(&testServer{security: e.security})
1173	defer te.tearDown()
1174
1175	cc := te.clientConn()
1176	tc := testpb.NewTestServiceClient(cc)
1177	ctx, cancel := context.WithCancel(context.Background())
1178	stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
1179	if err != nil {
1180		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
1181	}
1182	// Finish an RPC to make sure the connection is good.
1183	if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
1184		t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, <nil>", tc, err)
1185	}
1186	ch1 := make(chan struct{})
1187	go func() {
1188		te.srv.GracefulStop()
1189		close(ch1)
1190	}()
1191	ch2 := make(chan struct{})
1192	go func() {
1193		te.srv.GracefulStop()
1194		close(ch2)
1195	}()
1196	// Loop until the server side GoAway signal is propagated to the client.
1197	for {
1198		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
1199		if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
1200			cancel()
1201			break
1202		}
1203		cancel()
1204	}
1205	select {
1206	case <-ch1:
1207		t.Fatal("GracefulStop() terminated early")
1208	case <-ch2:
1209		t.Fatal("GracefulStop() terminated early")
1210	default:
1211	}
1212	respParam := []*testpb.ResponseParameters{
1213		{
1214			Size: 1,
1215		},
1216	}
1217	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100))
1218	if err != nil {
1219		t.Fatal(err)
1220	}
1221	req := &testpb.StreamingOutputCallRequest{
1222		ResponseType:       testpb.PayloadType_COMPRESSABLE,
1223		ResponseParameters: respParam,
1224		Payload:            payload,
1225	}
1226	// The existing RPC should be still good to proceed.
1227	if err := stream.Send(req); err != nil {
1228		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
1229	}
1230	if _, err := stream.Recv(); err != nil {
1231		t.Fatalf("%v.Recv() = _, %v, want _, <nil>", stream, err)
1232	}
1233	if err := stream.CloseSend(); err != nil {
1234		t.Fatalf("%v.CloseSend() = %v, want <nil>", stream, err)
1235	}
1236	<-ch1
1237	<-ch2
1238	cancel()
1239	awaitNewConnLogOutput()
1240}
1241
1242func (s) TestConcurrentClientConnCloseAndServerGoAway(t *testing.T) {
1243	for _, e := range listTestEnv() {
1244		if e.name == "handler-tls" {
1245			continue
1246		}
1247		testConcurrentClientConnCloseAndServerGoAway(t, e)
1248	}
1249}
1250
1251func testConcurrentClientConnCloseAndServerGoAway(t *testing.T, e env) {
1252	te := newTest(t, e)
1253	te.userAgent = testAppUA
1254	te.declareLogNoise(
1255		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
1256		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
1257		"grpc: addrConn.resetTransport failed to create client transport: connection error",
1258	)
1259	te.startServer(&testServer{security: e.security})
1260	defer te.tearDown()
1261
1262	cc := te.clientConn()
1263	tc := testpb.NewTestServiceClient(cc)
1264	if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
1265		t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, <nil>", tc, err)
1266	}
1267	ch := make(chan struct{})
1268	// Close ClientConn and Server concurrently.
1269	go func() {
1270		te.srv.GracefulStop()
1271		close(ch)
1272	}()
1273	go func() {
1274		cc.Close()
1275	}()
1276	<-ch
1277}
1278
1279func (s) TestConcurrentServerStopAndGoAway(t *testing.T) {
1280	for _, e := range listTestEnv() {
1281		if e.name == "handler-tls" {
1282			continue
1283		}
1284		testConcurrentServerStopAndGoAway(t, e)
1285	}
1286}
1287
1288func testConcurrentServerStopAndGoAway(t *testing.T, e env) {
1289	te := newTest(t, e)
1290	te.userAgent = testAppUA
1291	te.declareLogNoise(
1292		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
1293		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
1294		"grpc: addrConn.resetTransport failed to create client transport: connection error",
1295	)
1296	te.startServer(&testServer{security: e.security})
1297	defer te.tearDown()
1298
1299	cc := te.clientConn()
1300	tc := testpb.NewTestServiceClient(cc)
1301	stream, err := tc.FullDuplexCall(context.Background(), grpc.WaitForReady(true))
1302	if err != nil {
1303		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
1304	}
1305	// Finish an RPC to make sure the connection is good.
1306	if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
1307		t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, <nil>", tc, err)
1308	}
1309	ch := make(chan struct{})
1310	go func() {
1311		te.srv.GracefulStop()
1312		close(ch)
1313	}()
1314	// Loop until the server side GoAway signal is propagated to the client.
1315	for {
1316		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
1317		if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
1318			cancel()
1319			break
1320		}
1321		cancel()
1322	}
1323	// Stop the server and close all the connections.
1324	te.srv.Stop()
1325	respParam := []*testpb.ResponseParameters{
1326		{
1327			Size: 1,
1328		},
1329	}
1330	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100))
1331	if err != nil {
1332		t.Fatal(err)
1333	}
1334	req := &testpb.StreamingOutputCallRequest{
1335		ResponseType:       testpb.PayloadType_COMPRESSABLE,
1336		ResponseParameters: respParam,
1337		Payload:            payload,
1338	}
1339	sendStart := time.Now()
1340	for {
1341		if err := stream.Send(req); err == io.EOF {
1342			// stream.Send should eventually send io.EOF
1343			break
1344		} else if err != nil {
1345			// Send should never return a transport-level error.
1346			t.Fatalf("stream.Send(%v) = %v; want <nil or io.EOF>", req, err)
1347		}
1348		if time.Since(sendStart) > 2*time.Second {
1349			t.Fatalf("stream.Send(_) did not return io.EOF after 2s")
1350		}
1351		time.Sleep(time.Millisecond)
1352	}
1353	if _, err := stream.Recv(); err == nil || err == io.EOF {
1354		t.Fatalf("%v.Recv() = _, %v, want _, <non-nil, non-EOF>", stream, err)
1355	}
1356	<-ch
1357	awaitNewConnLogOutput()
1358}
1359
1360func (s) TestClientConnCloseAfterGoAwayWithActiveStream(t *testing.T) {
1361	for _, e := range listTestEnv() {
1362		if e.name == "handler-tls" {
1363			continue
1364		}
1365		testClientConnCloseAfterGoAwayWithActiveStream(t, e)
1366	}
1367}
1368
1369func testClientConnCloseAfterGoAwayWithActiveStream(t *testing.T, e env) {
1370	te := newTest(t, e)
1371	te.startServer(&testServer{security: e.security})
1372	defer te.tearDown()
1373	cc := te.clientConn()
1374	tc := testpb.NewTestServiceClient(cc)
1375
1376	ctx, cancel := context.WithCancel(context.Background())
1377	defer cancel()
1378	if _, err := tc.FullDuplexCall(ctx); err != nil {
1379		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, <nil>", tc, err)
1380	}
1381	done := make(chan struct{})
1382	go func() {
1383		te.srv.GracefulStop()
1384		close(done)
1385	}()
1386	time.Sleep(50 * time.Millisecond)
1387	cc.Close()
1388	timeout := time.NewTimer(time.Second)
1389	select {
1390	case <-done:
1391	case <-timeout.C:
1392		t.Fatalf("Test timed-out.")
1393	}
1394}
1395
1396func (s) TestFailFast(t *testing.T) {
1397	for _, e := range listTestEnv() {
1398		testFailFast(t, e)
1399	}
1400}
1401
1402func testFailFast(t *testing.T, e env) {
1403	te := newTest(t, e)
1404	te.userAgent = testAppUA
1405	te.declareLogNoise(
1406		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
1407		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
1408		"grpc: addrConn.resetTransport failed to create client transport: connection error",
1409	)
1410	te.startServer(&testServer{security: e.security})
1411	defer te.tearDown()
1412
1413	cc := te.clientConn()
1414	tc := testpb.NewTestServiceClient(cc)
1415	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
1416	defer cancel()
1417	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
1418		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
1419	}
1420	// Stop the server and tear down all the existing connections.
1421	te.srv.Stop()
1422	// Loop until the server teardown is propagated to the client.
1423	for {
1424		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
1425		_, err := tc.EmptyCall(ctx, &testpb.Empty{})
1426		cancel()
1427		if status.Code(err) == codes.Unavailable {
1428			break
1429		}
1430		t.Logf("%v.EmptyCall(_, _) = _, %v", tc, err)
1431		time.Sleep(10 * time.Millisecond)
1432	}
1433	// The client keeps reconnecting and ongoing fail-fast RPCs should fail with code.Unavailable.
1434	if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) != codes.Unavailable {
1435		t.Fatalf("TestService/EmptyCall(_, _, _) = _, %v, want _, error code: %s", err, codes.Unavailable)
1436	}
1437	if _, err := tc.StreamingInputCall(context.Background()); status.Code(err) != codes.Unavailable {
1438		t.Fatalf("TestService/StreamingInputCall(_) = _, %v, want _, error code: %s", err, codes.Unavailable)
1439	}
1440
1441	awaitNewConnLogOutput()
1442}
1443
1444func testServiceConfigSetup(t *testing.T, e env) *test {
1445	te := newTest(t, e)
1446	te.userAgent = testAppUA
1447	te.declareLogNoise(
1448		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
1449		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
1450		"grpc: addrConn.resetTransport failed to create client transport: connection error",
1451		"Failed to dial : context canceled; please retry.",
1452	)
1453	return te
1454}
1455
1456func newBool(b bool) (a *bool) {
1457	return &b
1458}
1459
1460func newInt(b int) (a *int) {
1461	return &b
1462}
1463
1464func newDuration(b time.Duration) (a *time.Duration) {
1465	a = new(time.Duration)
1466	*a = b
1467	return
1468}
1469
1470func (s) TestGetMethodConfig(t *testing.T) {
1471	te := testServiceConfigSetup(t, tcpClearRREnv)
1472	defer te.tearDown()
1473	r, rcleanup := manual.GenerateAndRegisterManualResolver()
1474	defer rcleanup()
1475
1476	te.resolverScheme = r.Scheme()
1477	cc := te.clientConn()
1478	addrs := []resolver.Address{{Addr: te.srvAddr}}
1479	r.UpdateState(resolver.State{
1480		Addresses: addrs,
1481		ServiceConfig: parseCfg(r, `{
1482    "methodConfig": [
1483        {
1484            "name": [
1485                {
1486                    "service": "grpc.testing.TestService",
1487                    "method": "EmptyCall"
1488                }
1489            ],
1490            "waitForReady": true,
1491            "timeout": ".001s"
1492        },
1493        {
1494            "name": [
1495                {
1496                    "service": "grpc.testing.TestService"
1497                }
1498            ],
1499            "waitForReady": false
1500        }
1501    ]
1502}`)})
1503
1504	tc := testpb.NewTestServiceClient(cc)
1505
1506	// Make sure service config has been processed by grpc.
1507	for {
1508		if cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall").WaitForReady != nil {
1509			break
1510		}
1511		time.Sleep(time.Millisecond)
1512	}
1513
1514	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
1515	var err error
1516	if _, err = tc.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
1517		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
1518	}
1519
1520	r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: parseCfg(r, `{
1521    "methodConfig": [
1522        {
1523            "name": [
1524                {
1525                    "service": "grpc.testing.TestService",
1526                    "method": "UnaryCall"
1527                }
1528            ],
1529            "waitForReady": true,
1530            "timeout": ".001s"
1531        },
1532        {
1533            "name": [
1534                {
1535                    "service": "grpc.testing.TestService"
1536                }
1537            ],
1538            "waitForReady": false
1539        }
1540    ]
1541}`)})
1542
1543	// Make sure service config has been processed by grpc.
1544	for {
1545		if mc := cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall"); mc.WaitForReady != nil && !*mc.WaitForReady {
1546			break
1547		}
1548		time.Sleep(time.Millisecond)
1549	}
1550	// The following RPCs are expected to become fail-fast.
1551	if _, err = tc.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) != codes.Unavailable {
1552		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.Unavailable)
1553	}
1554}
1555
1556func (s) TestServiceConfigWaitForReady(t *testing.T) {
1557	te := testServiceConfigSetup(t, tcpClearRREnv)
1558	defer te.tearDown()
1559	r, rcleanup := manual.GenerateAndRegisterManualResolver()
1560	defer rcleanup()
1561
1562	// Case1: Client API set failfast to be false, and service config set wait_for_ready to be false, Client API should win, and the rpc will wait until deadline exceeds.
1563	te.resolverScheme = r.Scheme()
1564	cc := te.clientConn()
1565	addrs := []resolver.Address{{Addr: te.srvAddr}}
1566	r.UpdateState(resolver.State{
1567		Addresses: addrs,
1568		ServiceConfig: parseCfg(r, `{
1569    "methodConfig": [
1570        {
1571            "name": [
1572                {
1573                    "service": "grpc.testing.TestService",
1574                    "method": "EmptyCall"
1575                },
1576                {
1577                    "service": "grpc.testing.TestService",
1578                    "method": "FullDuplexCall"
1579                }
1580            ],
1581            "waitForReady": false,
1582            "timeout": ".001s"
1583        }
1584    ]
1585}`)})
1586
1587	tc := testpb.NewTestServiceClient(cc)
1588
1589	// Make sure service config has been processed by grpc.
1590	for {
1591		if cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").WaitForReady != nil {
1592			break
1593		}
1594		time.Sleep(time.Millisecond)
1595	}
1596
1597	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
1598	var err error
1599	if _, err = tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
1600		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
1601	}
1602	if _, err := tc.FullDuplexCall(context.Background(), grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
1603		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
1604	}
1605
1606	// Generate a service config update.
1607	// Case2:Client API set failfast to be false, and service config set wait_for_ready to be true, and the rpc will wait until deadline exceeds.
1608	r.UpdateState(resolver.State{
1609		Addresses: addrs,
1610		ServiceConfig: parseCfg(r, `{
1611    "methodConfig": [
1612        {
1613            "name": [
1614                {
1615                    "service": "grpc.testing.TestService",
1616                    "method": "EmptyCall"
1617                },
1618                {
1619                    "service": "grpc.testing.TestService",
1620                    "method": "FullDuplexCall"
1621                }
1622            ],
1623            "waitForReady": true,
1624            "timeout": ".001s"
1625        }
1626    ]
1627}`)})
1628
1629	// Wait for the new service config to take effect.
1630	for {
1631		if mc := cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall"); mc.WaitForReady != nil && *mc.WaitForReady {
1632			break
1633		}
1634		time.Sleep(time.Millisecond)
1635	}
1636	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
1637	if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
1638		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
1639	}
1640	if _, err := tc.FullDuplexCall(context.Background()); status.Code(err) != codes.DeadlineExceeded {
1641		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
1642	}
1643}
1644
1645func (s) TestServiceConfigTimeout(t *testing.T) {
1646	te := testServiceConfigSetup(t, tcpClearRREnv)
1647	defer te.tearDown()
1648	r, rcleanup := manual.GenerateAndRegisterManualResolver()
1649	defer rcleanup()
1650
1651	// Case1: Client API sets timeout to be 1ns and ServiceConfig sets timeout to be 1hr. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds.
1652	te.resolverScheme = r.Scheme()
1653	cc := te.clientConn()
1654	addrs := []resolver.Address{{Addr: te.srvAddr}}
1655	r.UpdateState(resolver.State{
1656		Addresses: addrs,
1657		ServiceConfig: parseCfg(r, `{
1658    "methodConfig": [
1659        {
1660            "name": [
1661                {
1662                    "service": "grpc.testing.TestService",
1663                    "method": "EmptyCall"
1664                },
1665                {
1666                    "service": "grpc.testing.TestService",
1667                    "method": "FullDuplexCall"
1668                }
1669            ],
1670            "waitForReady": true,
1671            "timeout": "3600s"
1672        }
1673    ]
1674}`)})
1675
1676	tc := testpb.NewTestServiceClient(cc)
1677
1678	// Make sure service config has been processed by grpc.
1679	for {
1680		if cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").Timeout != nil {
1681			break
1682		}
1683		time.Sleep(time.Millisecond)
1684	}
1685
1686	// The following RPCs are expected to become non-fail-fast ones with 1ns deadline.
1687	var err error
1688	ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond)
1689	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
1690		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
1691	}
1692	cancel()
1693
1694	ctx, cancel = context.WithTimeout(context.Background(), time.Nanosecond)
1695	if _, err = tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
1696		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
1697	}
1698	cancel()
1699
1700	// Generate a service config update.
1701	// Case2: Client API sets timeout to be 1hr and ServiceConfig sets timeout to be 1ns. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds.
1702	r.UpdateState(resolver.State{
1703		Addresses: addrs,
1704		ServiceConfig: parseCfg(r, `{
1705    "methodConfig": [
1706        {
1707            "name": [
1708                {
1709                    "service": "grpc.testing.TestService",
1710                    "method": "EmptyCall"
1711                },
1712                {
1713                    "service": "grpc.testing.TestService",
1714                    "method": "FullDuplexCall"
1715                }
1716            ],
1717            "waitForReady": true,
1718            "timeout": ".000000001s"
1719        }
1720    ]
1721}`)})
1722
1723	// Wait for the new service config to take effect.
1724	for {
1725		if mc := cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall"); mc.Timeout != nil && *mc.Timeout == time.Nanosecond {
1726			break
1727		}
1728		time.Sleep(time.Millisecond)
1729	}
1730
1731	ctx, cancel = context.WithTimeout(context.Background(), time.Hour)
1732	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
1733		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
1734	}
1735	cancel()
1736
1737	ctx, cancel = context.WithTimeout(context.Background(), time.Hour)
1738	if _, err = tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
1739		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
1740	}
1741	cancel()
1742}
1743
1744func (s) TestServiceConfigMaxMsgSize(t *testing.T) {
1745	e := tcpClearRREnv
1746	r, rcleanup := manual.GenerateAndRegisterManualResolver()
1747	defer rcleanup()
1748
1749	// Setting up values and objects shared across all test cases.
1750	const smallSize = 1
1751	const largeSize = 1024
1752	const extraLargeSize = 2048
1753
1754	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
1755	if err != nil {
1756		t.Fatal(err)
1757	}
1758	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
1759	if err != nil {
1760		t.Fatal(err)
1761	}
1762	extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize)
1763	if err != nil {
1764		t.Fatal(err)
1765	}
1766
1767	// Case1: sc set maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
1768	te1 := testServiceConfigSetup(t, e)
1769	defer te1.tearDown()
1770
1771	te1.resolverScheme = r.Scheme()
1772	te1.nonBlockingDial = true
1773	te1.startServer(&testServer{security: e.security})
1774	cc1 := te1.clientConn()
1775
1776	addrs := []resolver.Address{{Addr: te1.srvAddr}}
1777	sc := parseCfg(r, `{
1778    "methodConfig": [
1779        {
1780            "name": [
1781                {
1782                    "service": "grpc.testing.TestService",
1783                    "method": "UnaryCall"
1784                },
1785                {
1786                    "service": "grpc.testing.TestService",
1787                    "method": "FullDuplexCall"
1788                }
1789            ],
1790            "maxRequestMessageBytes": 2048,
1791            "maxResponseMessageBytes": 2048
1792        }
1793    ]
1794}`)
1795	r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: sc})
1796	tc := testpb.NewTestServiceClient(cc1)
1797
1798	req := &testpb.SimpleRequest{
1799		ResponseType: testpb.PayloadType_COMPRESSABLE,
1800		ResponseSize: int32(extraLargeSize),
1801		Payload:      smallPayload,
1802	}
1803
1804	for {
1805		if cc1.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").MaxReqSize != nil {
1806			break
1807		}
1808		time.Sleep(time.Millisecond)
1809	}
1810
1811	// Test for unary RPC recv.
1812	if _, err = tc.UnaryCall(context.Background(), req, grpc.WaitForReady(true)); err == nil || status.Code(err) != codes.ResourceExhausted {
1813		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
1814	}
1815
1816	// Test for unary RPC send.
1817	req.Payload = extraLargePayload
1818	req.ResponseSize = int32(smallSize)
1819	if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted {
1820		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
1821	}
1822
1823	// Test for streaming RPC recv.
1824	respParam := []*testpb.ResponseParameters{
1825		{
1826			Size: int32(extraLargeSize),
1827		},
1828	}
1829	sreq := &testpb.StreamingOutputCallRequest{
1830		ResponseType:       testpb.PayloadType_COMPRESSABLE,
1831		ResponseParameters: respParam,
1832		Payload:            smallPayload,
1833	}
1834	stream, err := tc.FullDuplexCall(te1.ctx)
1835	if err != nil {
1836		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
1837	}
1838	if err = stream.Send(sreq); err != nil {
1839		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
1840	}
1841	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
1842		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
1843	}
1844
1845	// Test for streaming RPC send.
1846	respParam[0].Size = int32(smallSize)
1847	sreq.Payload = extraLargePayload
1848	stream, err = tc.FullDuplexCall(te1.ctx)
1849	if err != nil {
1850		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
1851	}
1852	if err = stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
1853		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
1854	}
1855
1856	// Case2: Client API set maxReqSize to 1024 (send), maxRespSize to 1024 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
1857	te2 := testServiceConfigSetup(t, e)
1858	te2.resolverScheme = r.Scheme()
1859	te2.nonBlockingDial = true
1860	te2.maxClientReceiveMsgSize = newInt(1024)
1861	te2.maxClientSendMsgSize = newInt(1024)
1862
1863	te2.startServer(&testServer{security: e.security})
1864	defer te2.tearDown()
1865	cc2 := te2.clientConn()
1866	r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: te2.srvAddr}}, ServiceConfig: sc})
1867	tc = testpb.NewTestServiceClient(cc2)
1868
1869	for {
1870		if cc2.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").MaxReqSize != nil {
1871			break
1872		}
1873		time.Sleep(time.Millisecond)
1874	}
1875
1876	// Test for unary RPC recv.
1877	req.Payload = smallPayload
1878	req.ResponseSize = int32(largeSize)
1879
1880	if _, err = tc.UnaryCall(context.Background(), req, grpc.WaitForReady(true)); err == nil || status.Code(err) != codes.ResourceExhausted {
1881		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
1882	}
1883
1884	// Test for unary RPC send.
1885	req.Payload = largePayload
1886	req.ResponseSize = int32(smallSize)
1887	if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted {
1888		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
1889	}
1890
1891	// Test for streaming RPC recv.
1892	stream, err = tc.FullDuplexCall(te2.ctx)
1893	respParam[0].Size = int32(largeSize)
1894	sreq.Payload = smallPayload
1895	if err != nil {
1896		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
1897	}
1898	if err = stream.Send(sreq); err != nil {
1899		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
1900	}
1901	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
1902		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
1903	}
1904
1905	// Test for streaming RPC send.
1906	respParam[0].Size = int32(smallSize)
1907	sreq.Payload = largePayload
1908	stream, err = tc.FullDuplexCall(te2.ctx)
1909	if err != nil {
1910		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
1911	}
1912	if err = stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
1913		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
1914	}
1915
1916	// Case3: Client API set maxReqSize to 4096 (send), maxRespSize to 4096 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
1917	te3 := testServiceConfigSetup(t, e)
1918	te3.resolverScheme = r.Scheme()
1919	te3.nonBlockingDial = true
1920	te3.maxClientReceiveMsgSize = newInt(4096)
1921	te3.maxClientSendMsgSize = newInt(4096)
1922
1923	te3.startServer(&testServer{security: e.security})
1924	defer te3.tearDown()
1925
1926	cc3 := te3.clientConn()
1927	r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: te3.srvAddr}}, ServiceConfig: sc})
1928	tc = testpb.NewTestServiceClient(cc3)
1929
1930	for {
1931		if cc3.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").MaxReqSize != nil {
1932			break
1933		}
1934		time.Sleep(time.Millisecond)
1935	}
1936
1937	// Test for unary RPC recv.
1938	req.Payload = smallPayload
1939	req.ResponseSize = int32(largeSize)
1940
1941	if _, err = tc.UnaryCall(context.Background(), req, grpc.WaitForReady(true)); err != nil {
1942		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want <nil>", err)
1943	}
1944
1945	req.ResponseSize = int32(extraLargeSize)
1946	if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted {
1947		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
1948	}
1949
1950	// Test for unary RPC send.
1951	req.Payload = largePayload
1952	req.ResponseSize = int32(smallSize)
1953	if _, err := tc.UnaryCall(context.Background(), req); err != nil {
1954		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want <nil>", err)
1955	}
1956
1957	req.Payload = extraLargePayload
1958	if _, err = tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted {
1959		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
1960	}
1961
1962	// Test for streaming RPC recv.
1963	stream, err = tc.FullDuplexCall(te3.ctx)
1964	if err != nil {
1965		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
1966	}
1967	respParam[0].Size = int32(largeSize)
1968	sreq.Payload = smallPayload
1969
1970	if err = stream.Send(sreq); err != nil {
1971		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
1972	}
1973	if _, err = stream.Recv(); err != nil {
1974		t.Fatalf("%v.Recv() = _, %v, want <nil>", stream, err)
1975	}
1976
1977	respParam[0].Size = int32(extraLargeSize)
1978
1979	if err = stream.Send(sreq); err != nil {
1980		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
1981	}
1982	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
1983		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
1984	}
1985
1986	// Test for streaming RPC send.
1987	respParam[0].Size = int32(smallSize)
1988	sreq.Payload = largePayload
1989	stream, err = tc.FullDuplexCall(te3.ctx)
1990	if err != nil {
1991		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
1992	}
1993	if err := stream.Send(sreq); err != nil {
1994		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
1995	}
1996	sreq.Payload = extraLargePayload
1997	if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
1998		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
1999	}
2000}
2001
2002// Reading from a streaming RPC may fail with context canceled if timeout was
2003// set by service config (https://github.com/grpc/grpc-go/issues/1818). This
2004// test makes sure read from streaming RPC doesn't fail in this case.
2005func (s) TestStreamingRPCWithTimeoutInServiceConfigRecv(t *testing.T) {
2006	te := testServiceConfigSetup(t, tcpClearRREnv)
2007	te.startServer(&testServer{security: tcpClearRREnv.security})
2008	defer te.tearDown()
2009	r, rcleanup := manual.GenerateAndRegisterManualResolver()
2010	defer rcleanup()
2011
2012	te.resolverScheme = r.Scheme()
2013	te.nonBlockingDial = true
2014	cc := te.clientConn()
2015	tc := testpb.NewTestServiceClient(cc)
2016
2017	r.UpdateState(resolver.State{
2018		Addresses: []resolver.Address{{Addr: te.srvAddr}},
2019		ServiceConfig: parseCfg(r, `{
2020	    "methodConfig": [
2021	        {
2022	            "name": [
2023	                {
2024	                    "service": "grpc.testing.TestService",
2025	                    "method": "FullDuplexCall"
2026	                }
2027	            ],
2028	            "waitForReady": true,
2029	            "timeout": "10s"
2030	        }
2031	    ]
2032	}`)})
2033	// Make sure service config has been processed by grpc.
2034	for {
2035		if cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").Timeout != nil {
2036			break
2037		}
2038		time.Sleep(time.Millisecond)
2039	}
2040
2041	ctx, cancel := context.WithCancel(context.Background())
2042	defer cancel()
2043	stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
2044	if err != nil {
2045		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want <nil>", err)
2046	}
2047
2048	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 0)
2049	if err != nil {
2050		t.Fatalf("failed to newPayload: %v", err)
2051	}
2052	req := &testpb.StreamingOutputCallRequest{
2053		ResponseType:       testpb.PayloadType_COMPRESSABLE,
2054		ResponseParameters: []*testpb.ResponseParameters{{Size: 0}},
2055		Payload:            payload,
2056	}
2057	if err := stream.Send(req); err != nil {
2058		t.Fatalf("stream.Send(%v) = %v, want <nil>", req, err)
2059	}
2060	stream.CloseSend()
2061	time.Sleep(time.Second)
2062	// Sleep 1 second before recv to make sure the final status is received
2063	// before the recv.
2064	if _, err := stream.Recv(); err != nil {
2065		t.Fatalf("stream.Recv = _, %v, want _, <nil>", err)
2066	}
2067	// Keep reading to drain the stream.
2068	for {
2069		if _, err := stream.Recv(); err != nil {
2070			break
2071		}
2072	}
2073}
2074
2075func (s) TestPreloaderClientSend(t *testing.T) {
2076	for _, e := range listTestEnv() {
2077		testPreloaderClientSend(t, e)
2078	}
2079}
2080
2081func testPreloaderClientSend(t *testing.T, e env) {
2082	te := newTest(t, e)
2083	te.userAgent = testAppUA
2084	te.declareLogNoise(
2085		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
2086		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
2087		"grpc: addrConn.resetTransport failed to create client transport: connection error",
2088		"Failed to dial : context canceled; please retry.",
2089	)
2090	te.startServer(&testServer{security: e.security})
2091
2092	defer te.tearDown()
2093	tc := testpb.NewTestServiceClient(te.clientConn())
2094
2095	// Test for streaming RPC recv.
2096	// Set context for send with proper RPC Information
2097	stream, err := tc.FullDuplexCall(te.ctx, grpc.UseCompressor("gzip"))
2098	if err != nil {
2099		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
2100	}
2101	var index int
2102	for index < len(reqSizes) {
2103		respParam := []*testpb.ResponseParameters{
2104			{
2105				Size: int32(respSizes[index]),
2106			},
2107		}
2108
2109		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index]))
2110		if err != nil {
2111			t.Fatal(err)
2112		}
2113
2114		req := &testpb.StreamingOutputCallRequest{
2115			ResponseType:       testpb.PayloadType_COMPRESSABLE,
2116			ResponseParameters: respParam,
2117			Payload:            payload,
2118		}
2119		preparedMsg := &grpc.PreparedMsg{}
2120		err = preparedMsg.Encode(stream, req)
2121		if err != nil {
2122			t.Fatalf("PrepareMsg failed for size %d : %v", reqSizes[index], err)
2123		}
2124		if err := stream.SendMsg(preparedMsg); err != nil {
2125			t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
2126		}
2127		reply, err := stream.Recv()
2128		if err != nil {
2129			t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
2130		}
2131		pt := reply.GetPayload().GetType()
2132		if pt != testpb.PayloadType_COMPRESSABLE {
2133			t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE)
2134		}
2135		size := len(reply.GetPayload().GetBody())
2136		if size != int(respSizes[index]) {
2137			t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index])
2138		}
2139		index++
2140	}
2141	if err := stream.CloseSend(); err != nil {
2142		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
2143	}
2144	if _, err := stream.Recv(); err != io.EOF {
2145		t.Fatalf("%v failed to complele the ping pong test: %v", stream, err)
2146	}
2147}
2148
2149func (s) TestMaxMsgSizeClientDefault(t *testing.T) {
2150	for _, e := range listTestEnv() {
2151		testMaxMsgSizeClientDefault(t, e)
2152	}
2153}
2154
2155func testMaxMsgSizeClientDefault(t *testing.T, e env) {
2156	te := newTest(t, e)
2157	te.userAgent = testAppUA
2158	te.declareLogNoise(
2159		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
2160		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
2161		"grpc: addrConn.resetTransport failed to create client transport: connection error",
2162		"Failed to dial : context canceled; please retry.",
2163	)
2164	te.startServer(&testServer{security: e.security})
2165
2166	defer te.tearDown()
2167	tc := testpb.NewTestServiceClient(te.clientConn())
2168
2169	const smallSize = 1
2170	const largeSize = 4 * 1024 * 1024
2171	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
2172	if err != nil {
2173		t.Fatal(err)
2174	}
2175	req := &testpb.SimpleRequest{
2176		ResponseType: testpb.PayloadType_COMPRESSABLE,
2177		ResponseSize: int32(largeSize),
2178		Payload:      smallPayload,
2179	}
2180	// Test for unary RPC recv.
2181	if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted {
2182		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
2183	}
2184
2185	respParam := []*testpb.ResponseParameters{
2186		{
2187			Size: int32(largeSize),
2188		},
2189	}
2190	sreq := &testpb.StreamingOutputCallRequest{
2191		ResponseType:       testpb.PayloadType_COMPRESSABLE,
2192		ResponseParameters: respParam,
2193		Payload:            smallPayload,
2194	}
2195
2196	// Test for streaming RPC recv.
2197	stream, err := tc.FullDuplexCall(te.ctx)
2198	if err != nil {
2199		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
2200	}
2201	if err := stream.Send(sreq); err != nil {
2202		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
2203	}
2204	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
2205		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
2206	}
2207}
2208
2209func (s) TestMaxMsgSizeClientAPI(t *testing.T) {
2210	for _, e := range listTestEnv() {
2211		testMaxMsgSizeClientAPI(t, e)
2212	}
2213}
2214
2215func testMaxMsgSizeClientAPI(t *testing.T, e env) {
2216	te := newTest(t, e)
2217	te.userAgent = testAppUA
2218	// To avoid error on server side.
2219	te.maxServerSendMsgSize = newInt(5 * 1024 * 1024)
2220	te.maxClientReceiveMsgSize = newInt(1024)
2221	te.maxClientSendMsgSize = newInt(1024)
2222	te.declareLogNoise(
2223		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
2224		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
2225		"grpc: addrConn.resetTransport failed to create client transport: connection error",
2226		"Failed to dial : context canceled; please retry.",
2227	)
2228	te.startServer(&testServer{security: e.security})
2229
2230	defer te.tearDown()
2231	tc := testpb.NewTestServiceClient(te.clientConn())
2232
2233	const smallSize = 1
2234	const largeSize = 1024
2235	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
2236	if err != nil {
2237		t.Fatal(err)
2238	}
2239
2240	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
2241	if err != nil {
2242		t.Fatal(err)
2243	}
2244	req := &testpb.SimpleRequest{
2245		ResponseType: testpb.PayloadType_COMPRESSABLE,
2246		ResponseSize: int32(largeSize),
2247		Payload:      smallPayload,
2248	}
2249	// Test for unary RPC recv.
2250	if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted {
2251		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
2252	}
2253
2254	// Test for unary RPC send.
2255	req.Payload = largePayload
2256	req.ResponseSize = int32(smallSize)
2257	if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted {
2258		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
2259	}
2260
2261	respParam := []*testpb.ResponseParameters{
2262		{
2263			Size: int32(largeSize),
2264		},
2265	}
2266	sreq := &testpb.StreamingOutputCallRequest{
2267		ResponseType:       testpb.PayloadType_COMPRESSABLE,
2268		ResponseParameters: respParam,
2269		Payload:            smallPayload,
2270	}
2271
2272	// Test for streaming RPC recv.
2273	stream, err := tc.FullDuplexCall(te.ctx)
2274	if err != nil {
2275		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
2276	}
2277	if err := stream.Send(sreq); err != nil {
2278		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
2279	}
2280	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
2281		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
2282	}
2283
2284	// Test for streaming RPC send.
2285	respParam[0].Size = int32(smallSize)
2286	sreq.Payload = largePayload
2287	stream, err = tc.FullDuplexCall(te.ctx)
2288	if err != nil {
2289		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
2290	}
2291	if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
2292		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
2293	}
2294}
2295
2296func (s) TestMaxMsgSizeServerAPI(t *testing.T) {
2297	for _, e := range listTestEnv() {
2298		testMaxMsgSizeServerAPI(t, e)
2299	}
2300}
2301
2302func testMaxMsgSizeServerAPI(t *testing.T, e env) {
2303	te := newTest(t, e)
2304	te.userAgent = testAppUA
2305	te.maxServerReceiveMsgSize = newInt(1024)
2306	te.maxServerSendMsgSize = newInt(1024)
2307	te.declareLogNoise(
2308		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
2309		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
2310		"grpc: addrConn.resetTransport failed to create client transport: connection error",
2311		"Failed to dial : context canceled; please retry.",
2312	)
2313	te.startServer(&testServer{security: e.security})
2314
2315	defer te.tearDown()
2316	tc := testpb.NewTestServiceClient(te.clientConn())
2317
2318	const smallSize = 1
2319	const largeSize = 1024
2320	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
2321	if err != nil {
2322		t.Fatal(err)
2323	}
2324
2325	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
2326	if err != nil {
2327		t.Fatal(err)
2328	}
2329	req := &testpb.SimpleRequest{
2330		ResponseType: testpb.PayloadType_COMPRESSABLE,
2331		ResponseSize: int32(largeSize),
2332		Payload:      smallPayload,
2333	}
2334	// Test for unary RPC send.
2335	if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted {
2336		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
2337	}
2338
2339	// Test for unary RPC recv.
2340	req.Payload = largePayload
2341	req.ResponseSize = int32(smallSize)
2342	if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted {
2343		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
2344	}
2345
2346	respParam := []*testpb.ResponseParameters{
2347		{
2348			Size: int32(largeSize),
2349		},
2350	}
2351	sreq := &testpb.StreamingOutputCallRequest{
2352		ResponseType:       testpb.PayloadType_COMPRESSABLE,
2353		ResponseParameters: respParam,
2354		Payload:            smallPayload,
2355	}
2356
2357	// Test for streaming RPC send.
2358	stream, err := tc.FullDuplexCall(te.ctx)
2359	if err != nil {
2360		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
2361	}
2362	if err := stream.Send(sreq); err != nil {
2363		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
2364	}
2365	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
2366		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
2367	}
2368
2369	// Test for streaming RPC recv.
2370	respParam[0].Size = int32(smallSize)
2371	sreq.Payload = largePayload
2372	stream, err = tc.FullDuplexCall(te.ctx)
2373	if err != nil {
2374		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
2375	}
2376	if err := stream.Send(sreq); err != nil {
2377		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
2378	}
2379	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
2380		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
2381	}
2382}
2383
2384func (s) TestTap(t *testing.T) {
2385	for _, e := range listTestEnv() {
2386		if e.name == "handler-tls" {
2387			continue
2388		}
2389		testTap(t, e)
2390	}
2391}
2392
2393type myTap struct {
2394	cnt int
2395}
2396
2397func (t *myTap) handle(ctx context.Context, info *tap.Info) (context.Context, error) {
2398	if info != nil {
2399		if info.FullMethodName == "/grpc.testing.TestService/EmptyCall" {
2400			t.cnt++
2401		} else if info.FullMethodName == "/grpc.testing.TestService/UnaryCall" {
2402			return nil, fmt.Errorf("tap error")
2403		}
2404	}
2405	return ctx, nil
2406}
2407
2408func testTap(t *testing.T, e env) {
2409	te := newTest(t, e)
2410	te.userAgent = testAppUA
2411	ttap := &myTap{}
2412	te.tapHandle = ttap.handle
2413	te.declareLogNoise(
2414		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
2415		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
2416		"grpc: addrConn.resetTransport failed to create client transport: connection error",
2417	)
2418	te.startServer(&testServer{security: e.security})
2419	defer te.tearDown()
2420
2421	cc := te.clientConn()
2422	tc := testpb.NewTestServiceClient(cc)
2423	if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil {
2424		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
2425	}
2426	if ttap.cnt != 1 {
2427		t.Fatalf("Get the count in ttap %d, want 1", ttap.cnt)
2428	}
2429
2430	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 31)
2431	if err != nil {
2432		t.Fatal(err)
2433	}
2434
2435	req := &testpb.SimpleRequest{
2436		ResponseType: testpb.PayloadType_COMPRESSABLE,
2437		ResponseSize: 45,
2438		Payload:      payload,
2439	}
2440	if _, err := tc.UnaryCall(context.Background(), req); status.Code(err) != codes.Unavailable {
2441		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, %s", err, codes.Unavailable)
2442	}
2443}
2444
2445// healthCheck is a helper function to make a unary health check RPC and return
2446// the response.
2447func healthCheck(d time.Duration, cc *grpc.ClientConn, service string) (*healthpb.HealthCheckResponse, error) {
2448	ctx, cancel := context.WithTimeout(context.Background(), d)
2449	defer cancel()
2450	hc := healthgrpc.NewHealthClient(cc)
2451	return hc.Check(ctx, &healthpb.HealthCheckRequest{Service: service})
2452}
2453
2454// verifyHealthCheckStatus is a helper function to verify that the current
2455// health status of the service matches the one passed in 'wantStatus'.
2456func verifyHealthCheckStatus(t *testing.T, d time.Duration, cc *grpc.ClientConn, service string, wantStatus healthpb.HealthCheckResponse_ServingStatus) {
2457	t.Helper()
2458	resp, err := healthCheck(d, cc, service)
2459	if err != nil {
2460		t.Fatalf("Health/Check(_, _) = _, %v, want _, <nil>", err)
2461	}
2462	if resp.Status != wantStatus {
2463		t.Fatalf("Got the serving status %v, want %v", resp.Status, wantStatus)
2464	}
2465}
2466
2467// verifyHealthCheckErrCode is a helper function to verify that a unary health
2468// check RPC returns an error with a code set to 'wantCode'.
2469func verifyHealthCheckErrCode(t *testing.T, d time.Duration, cc *grpc.ClientConn, service string, wantCode codes.Code) {
2470	t.Helper()
2471	if _, err := healthCheck(d, cc, service); status.Code(err) != wantCode {
2472		t.Fatalf("Health/Check() got errCode %v, want %v", status.Code(err), wantCode)
2473	}
2474}
2475
2476// newHealthCheckStream is a helper function to start a health check streaming
2477// RPC, and returns the stream.
2478func newHealthCheckStream(t *testing.T, cc *grpc.ClientConn, service string) (healthgrpc.Health_WatchClient, context.CancelFunc) {
2479	t.Helper()
2480	ctx, cancel := context.WithCancel(context.Background())
2481	hc := healthgrpc.NewHealthClient(cc)
2482	stream, err := hc.Watch(ctx, &healthpb.HealthCheckRequest{Service: service})
2483	if err != nil {
2484		t.Fatalf("hc.Watch(_, %v) failed: %v", service, err)
2485	}
2486	return stream, cancel
2487}
2488
2489// healthWatchChecker is a helper function to verify that the next health
2490// status returned on the given stream matches the one passed in 'wantStatus'.
2491func healthWatchChecker(t *testing.T, stream healthgrpc.Health_WatchClient, wantStatus healthpb.HealthCheckResponse_ServingStatus) {
2492	t.Helper()
2493	response, err := stream.Recv()
2494	if err != nil {
2495		t.Fatalf("stream.Recv() failed: %v", err)
2496	}
2497	if response.Status != wantStatus {
2498		t.Fatalf("got servingStatus %v, want %v", response.Status, wantStatus)
2499	}
2500}
2501
2502// TestHealthCheckSuccess invokes the unary Check() RPC on the health server in
2503// a successful case.
2504func (s) TestHealthCheckSuccess(t *testing.T) {
2505	for _, e := range listTestEnv() {
2506		testHealthCheckSuccess(t, e)
2507	}
2508}
2509
2510func testHealthCheckSuccess(t *testing.T, e env) {
2511	te := newTest(t, e)
2512	te.enableHealthServer = true
2513	te.startServer(&testServer{security: e.security})
2514	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
2515	defer te.tearDown()
2516
2517	verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), defaultHealthService, codes.OK)
2518}
2519
2520// TestHealthCheckFailure invokes the unary Check() RPC on the health server
2521// with an expired context and expects the RPC to fail.
2522func (s) TestHealthCheckFailure(t *testing.T) {
2523	for _, e := range listTestEnv() {
2524		testHealthCheckFailure(t, e)
2525	}
2526}
2527
2528func testHealthCheckFailure(t *testing.T, e env) {
2529	te := newTest(t, e)
2530	te.declareLogNoise(
2531		"Failed to dial ",
2532		"grpc: the client connection is closing; please retry",
2533	)
2534	te.enableHealthServer = true
2535	te.startServer(&testServer{security: e.security})
2536	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
2537	defer te.tearDown()
2538
2539	verifyHealthCheckErrCode(t, 0*time.Second, te.clientConn(), defaultHealthService, codes.DeadlineExceeded)
2540	awaitNewConnLogOutput()
2541}
2542
2543// TestHealthCheckOff makes a unary Check() RPC on the health server where the
2544// health status of the defaultHealthService is not set, and therefore expects
2545// an error code 'codes.NotFound'.
2546func (s) TestHealthCheckOff(t *testing.T) {
2547	for _, e := range listTestEnv() {
2548		// TODO(bradfitz): Temporarily skip this env due to #619.
2549		if e.name == "handler-tls" {
2550			continue
2551		}
2552		testHealthCheckOff(t, e)
2553	}
2554}
2555
2556func testHealthCheckOff(t *testing.T, e env) {
2557	te := newTest(t, e)
2558	te.enableHealthServer = true
2559	te.startServer(&testServer{security: e.security})
2560	defer te.tearDown()
2561
2562	verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), defaultHealthService, codes.NotFound)
2563}
2564
2565// TestHealthWatchMultipleClients makes a streaming Watch() RPC on the health
2566// server with multiple clients and expects the same status on both streams.
2567func (s) TestHealthWatchMultipleClients(t *testing.T) {
2568	for _, e := range listTestEnv() {
2569		testHealthWatchMultipleClients(t, e)
2570	}
2571}
2572
2573func testHealthWatchMultipleClients(t *testing.T, e env) {
2574	te := newTest(t, e)
2575	te.enableHealthServer = true
2576	te.startServer(&testServer{security: e.security})
2577	defer te.tearDown()
2578
2579	cc := te.clientConn()
2580	stream1, cf1 := newHealthCheckStream(t, cc, defaultHealthService)
2581	defer cf1()
2582	healthWatchChecker(t, stream1, healthpb.HealthCheckResponse_SERVICE_UNKNOWN)
2583
2584	stream2, cf2 := newHealthCheckStream(t, cc, defaultHealthService)
2585	defer cf2()
2586	healthWatchChecker(t, stream2, healthpb.HealthCheckResponse_SERVICE_UNKNOWN)
2587
2588	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING)
2589	healthWatchChecker(t, stream1, healthpb.HealthCheckResponse_NOT_SERVING)
2590	healthWatchChecker(t, stream2, healthpb.HealthCheckResponse_NOT_SERVING)
2591}
2592
2593// TestHealthWatchSameStatusmakes a streaming Watch() RPC on the health server
2594// and makes sure that the health status of the server is as expected after
2595// multiple calls to SetServingStatus with the same status.
2596func (s) TestHealthWatchSameStatus(t *testing.T) {
2597	for _, e := range listTestEnv() {
2598		testHealthWatchSameStatus(t, e)
2599	}
2600}
2601
2602func testHealthWatchSameStatus(t *testing.T, e env) {
2603	te := newTest(t, e)
2604	te.enableHealthServer = true
2605	te.startServer(&testServer{security: e.security})
2606	defer te.tearDown()
2607
2608	stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService)
2609	defer cf()
2610
2611	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVICE_UNKNOWN)
2612	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
2613	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING)
2614	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
2615	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING)
2616	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_NOT_SERVING)
2617}
2618
2619// TestHealthWatchServiceStatusSetBeforeStartingServer starts a health server
2620// on which the health status for the defaultService is set before the gRPC
2621// server is started, and expects the correct health status to be returned.
2622func (s) TestHealthWatchServiceStatusSetBeforeStartingServer(t *testing.T) {
2623	for _, e := range listTestEnv() {
2624		testHealthWatchSetServiceStatusBeforeStartingServer(t, e)
2625	}
2626}
2627
2628func testHealthWatchSetServiceStatusBeforeStartingServer(t *testing.T, e env) {
2629	hs := health.NewServer()
2630	te := newTest(t, e)
2631	te.healthServer = hs
2632	hs.SetServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
2633	te.startServer(&testServer{security: e.security})
2634	defer te.tearDown()
2635
2636	stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService)
2637	defer cf()
2638	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING)
2639}
2640
2641// TestHealthWatchDefaultStatusChange verifies the simple case where the
2642// service starts off with a SERVICE_UNKNOWN status (because SetServingStatus
2643// hasn't been called yet) and then moves to SERVING after SetServingStatus is
2644// called.
2645func (s) TestHealthWatchDefaultStatusChange(t *testing.T) {
2646	for _, e := range listTestEnv() {
2647		testHealthWatchDefaultStatusChange(t, e)
2648	}
2649}
2650
2651func testHealthWatchDefaultStatusChange(t *testing.T, e env) {
2652	te := newTest(t, e)
2653	te.enableHealthServer = true
2654	te.startServer(&testServer{security: e.security})
2655	defer te.tearDown()
2656
2657	stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService)
2658	defer cf()
2659	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVICE_UNKNOWN)
2660	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
2661	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING)
2662}
2663
2664// TestHealthWatchSetServiceStatusBeforeClientCallsWatch verifies the case
2665// where the health status is set to SERVING before the client calls Watch().
2666func (s) TestHealthWatchSetServiceStatusBeforeClientCallsWatch(t *testing.T) {
2667	for _, e := range listTestEnv() {
2668		testHealthWatchSetServiceStatusBeforeClientCallsWatch(t, e)
2669	}
2670}
2671
2672func testHealthWatchSetServiceStatusBeforeClientCallsWatch(t *testing.T, e env) {
2673	te := newTest(t, e)
2674	te.enableHealthServer = true
2675	te.startServer(&testServer{security: e.security})
2676	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
2677	defer te.tearDown()
2678
2679	stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService)
2680	defer cf()
2681	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING)
2682}
2683
2684// TestHealthWatchOverallServerHealthChange verifies setting the overall status
2685// of the server by using the empty service name.
2686func (s) TestHealthWatchOverallServerHealthChange(t *testing.T) {
2687	for _, e := range listTestEnv() {
2688		testHealthWatchOverallServerHealthChange(t, e)
2689	}
2690}
2691
2692func testHealthWatchOverallServerHealthChange(t *testing.T, e env) {
2693	te := newTest(t, e)
2694	te.enableHealthServer = true
2695	te.startServer(&testServer{security: e.security})
2696	defer te.tearDown()
2697
2698	stream, cf := newHealthCheckStream(t, te.clientConn(), "")
2699	defer cf()
2700	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING)
2701	te.setHealthServingStatus("", healthpb.HealthCheckResponse_NOT_SERVING)
2702	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_NOT_SERVING)
2703}
2704
2705// TestUnknownHandler verifies that an expected error is returned (by setting
2706// the unknownHandler on the server) for a service which is not exposed to the
2707// client.
2708func (s) TestUnknownHandler(t *testing.T) {
2709	// An example unknownHandler that returns a different code and a different
2710	// method, making sure that we do not expose what methods are implemented to
2711	// a client that is not authenticated.
2712	unknownHandler := func(srv interface{}, stream grpc.ServerStream) error {
2713		return status.Error(codes.Unauthenticated, "user unauthenticated")
2714	}
2715	for _, e := range listTestEnv() {
2716		// TODO(bradfitz): Temporarily skip this env due to #619.
2717		if e.name == "handler-tls" {
2718			continue
2719		}
2720		testUnknownHandler(t, e, unknownHandler)
2721	}
2722}
2723
2724func testUnknownHandler(t *testing.T, e env, unknownHandler grpc.StreamHandler) {
2725	te := newTest(t, e)
2726	te.unknownHandler = unknownHandler
2727	te.startServer(&testServer{security: e.security})
2728	defer te.tearDown()
2729	verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), "", codes.Unauthenticated)
2730}
2731
2732// TestHealthCheckServingStatus makes a streaming Watch() RPC on the health
2733// server and verifies a bunch of health status transitions.
2734func (s) TestHealthCheckServingStatus(t *testing.T) {
2735	for _, e := range listTestEnv() {
2736		testHealthCheckServingStatus(t, e)
2737	}
2738}
2739
2740func testHealthCheckServingStatus(t *testing.T, e env) {
2741	te := newTest(t, e)
2742	te.enableHealthServer = true
2743	te.startServer(&testServer{security: e.security})
2744	defer te.tearDown()
2745
2746	cc := te.clientConn()
2747	verifyHealthCheckStatus(t, 1*time.Second, cc, "", healthpb.HealthCheckResponse_SERVING)
2748	verifyHealthCheckErrCode(t, 1*time.Second, cc, defaultHealthService, codes.NotFound)
2749	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
2750	verifyHealthCheckStatus(t, 1*time.Second, cc, defaultHealthService, healthpb.HealthCheckResponse_SERVING)
2751	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING)
2752	verifyHealthCheckStatus(t, 1*time.Second, cc, defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING)
2753}
2754
2755func (s) TestEmptyUnaryWithUserAgent(t *testing.T) {
2756	for _, e := range listTestEnv() {
2757		testEmptyUnaryWithUserAgent(t, e)
2758	}
2759}
2760
2761func testEmptyUnaryWithUserAgent(t *testing.T, e env) {
2762	te := newTest(t, e)
2763	te.userAgent = testAppUA
2764	te.startServer(&testServer{security: e.security})
2765	defer te.tearDown()
2766
2767	cc := te.clientConn()
2768	tc := testpb.NewTestServiceClient(cc)
2769	var header metadata.MD
2770	reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Header(&header))
2771	if err != nil || !proto.Equal(&testpb.Empty{}, reply) {
2772		t.Fatalf("TestService/EmptyCall(_, _) = %v, %v, want %v, <nil>", reply, err, &testpb.Empty{})
2773	}
2774	if v, ok := header["ua"]; !ok || !strings.HasPrefix(v[0], testAppUA) {
2775		t.Fatalf("header[\"ua\"] = %q, %t, want string with prefix %q, true", v, ok, testAppUA)
2776	}
2777
2778	te.srv.Stop()
2779}
2780
2781func (s) TestFailedEmptyUnary(t *testing.T) {
2782	for _, e := range listTestEnv() {
2783		if e.name == "handler-tls" {
2784			// This test covers status details, but
2785			// Grpc-Status-Details-Bin is not support in handler_server.
2786			continue
2787		}
2788		testFailedEmptyUnary(t, e)
2789	}
2790}
2791
2792func testFailedEmptyUnary(t *testing.T, e env) {
2793	te := newTest(t, e)
2794	te.userAgent = failAppUA
2795	te.startServer(&testServer{security: e.security})
2796	defer te.tearDown()
2797	tc := testpb.NewTestServiceClient(te.clientConn())
2798
2799	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
2800	wantErr := detailedError
2801	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); !testutils.StatusErrEqual(err, wantErr) {
2802		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, wantErr)
2803	}
2804}
2805
2806func (s) TestLargeUnary(t *testing.T) {
2807	for _, e := range listTestEnv() {
2808		testLargeUnary(t, e)
2809	}
2810}
2811
2812func testLargeUnary(t *testing.T, e env) {
2813	te := newTest(t, e)
2814	te.startServer(&testServer{security: e.security})
2815	defer te.tearDown()
2816	tc := testpb.NewTestServiceClient(te.clientConn())
2817
2818	const argSize = 271828
2819	const respSize = 314159
2820
2821	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
2822	if err != nil {
2823		t.Fatal(err)
2824	}
2825
2826	req := &testpb.SimpleRequest{
2827		ResponseType: testpb.PayloadType_COMPRESSABLE,
2828		ResponseSize: respSize,
2829		Payload:      payload,
2830	}
2831	reply, err := tc.UnaryCall(context.Background(), req)
2832	if err != nil {
2833		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err)
2834	}
2835	pt := reply.GetPayload().GetType()
2836	ps := len(reply.GetPayload().GetBody())
2837	if pt != testpb.PayloadType_COMPRESSABLE || ps != respSize {
2838		t.Fatalf("Got the reply with type %d len %d; want %d, %d", pt, ps, testpb.PayloadType_COMPRESSABLE, respSize)
2839	}
2840}
2841
2842// Test backward-compatibility API for setting msg size limit.
2843func (s) TestExceedMsgLimit(t *testing.T) {
2844	for _, e := range listTestEnv() {
2845		testExceedMsgLimit(t, e)
2846	}
2847}
2848
2849func testExceedMsgLimit(t *testing.T, e env) {
2850	te := newTest(t, e)
2851	maxMsgSize := 1024
2852	te.maxServerMsgSize, te.maxClientMsgSize = newInt(maxMsgSize), newInt(maxMsgSize)
2853	te.startServer(&testServer{security: e.security})
2854	defer te.tearDown()
2855	tc := testpb.NewTestServiceClient(te.clientConn())
2856
2857	largeSize := int32(maxMsgSize + 1)
2858	const smallSize = 1
2859
2860	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
2861	if err != nil {
2862		t.Fatal(err)
2863	}
2864	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
2865	if err != nil {
2866		t.Fatal(err)
2867	}
2868
2869	// Make sure the server cannot receive a unary RPC of largeSize.
2870	req := &testpb.SimpleRequest{
2871		ResponseType: testpb.PayloadType_COMPRESSABLE,
2872		ResponseSize: smallSize,
2873		Payload:      largePayload,
2874	}
2875	if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted {
2876		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
2877	}
2878	// Make sure the client cannot receive a unary RPC of largeSize.
2879	req.ResponseSize = largeSize
2880	req.Payload = smallPayload
2881	if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted {
2882		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
2883	}
2884
2885	// Make sure the server cannot receive a streaming RPC of largeSize.
2886	stream, err := tc.FullDuplexCall(te.ctx)
2887	if err != nil {
2888		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
2889	}
2890	respParam := []*testpb.ResponseParameters{
2891		{
2892			Size: 1,
2893		},
2894	}
2895
2896	sreq := &testpb.StreamingOutputCallRequest{
2897		ResponseType:       testpb.PayloadType_COMPRESSABLE,
2898		ResponseParameters: respParam,
2899		Payload:            largePayload,
2900	}
2901	if err := stream.Send(sreq); err != nil {
2902		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
2903	}
2904	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
2905		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
2906	}
2907
2908	// Test on client side for streaming RPC.
2909	stream, err = tc.FullDuplexCall(te.ctx)
2910	if err != nil {
2911		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
2912	}
2913	respParam[0].Size = largeSize
2914	sreq.Payload = smallPayload
2915	if err := stream.Send(sreq); err != nil {
2916		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
2917	}
2918	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
2919		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
2920	}
2921}
2922
2923func (s) TestPeerClientSide(t *testing.T) {
2924	for _, e := range listTestEnv() {
2925		testPeerClientSide(t, e)
2926	}
2927}
2928
2929func testPeerClientSide(t *testing.T, e env) {
2930	te := newTest(t, e)
2931	te.userAgent = testAppUA
2932	te.startServer(&testServer{security: e.security})
2933	defer te.tearDown()
2934	tc := testpb.NewTestServiceClient(te.clientConn())
2935	peer := new(peer.Peer)
2936	if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil {
2937		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
2938	}
2939	pa := peer.Addr.String()
2940	if e.network == "unix" {
2941		if pa != te.srvAddr {
2942			t.Fatalf("peer.Addr = %v, want %v", pa, te.srvAddr)
2943		}
2944		return
2945	}
2946	_, pp, err := net.SplitHostPort(pa)
2947	if err != nil {
2948		t.Fatalf("Failed to parse address from peer.")
2949	}
2950	_, sp, err := net.SplitHostPort(te.srvAddr)
2951	if err != nil {
2952		t.Fatalf("Failed to parse address of test server.")
2953	}
2954	if pp != sp {
2955		t.Fatalf("peer.Addr = localhost:%v, want localhost:%v", pp, sp)
2956	}
2957}
2958
2959// TestPeerNegative tests that if call fails setting peer
2960// doesn't cause a segmentation fault.
2961// issue#1141 https://github.com/grpc/grpc-go/issues/1141
2962func (s) TestPeerNegative(t *testing.T) {
2963	for _, e := range listTestEnv() {
2964		testPeerNegative(t, e)
2965	}
2966}
2967
2968func testPeerNegative(t *testing.T, e env) {
2969	te := newTest(t, e)
2970	te.startServer(&testServer{security: e.security})
2971	defer te.tearDown()
2972
2973	cc := te.clientConn()
2974	tc := testpb.NewTestServiceClient(cc)
2975	peer := new(peer.Peer)
2976	ctx, cancel := context.WithCancel(context.Background())
2977	cancel()
2978	tc.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer))
2979}
2980
2981func (s) TestPeerFailedRPC(t *testing.T) {
2982	for _, e := range listTestEnv() {
2983		testPeerFailedRPC(t, e)
2984	}
2985}
2986
2987func testPeerFailedRPC(t *testing.T, e env) {
2988	te := newTest(t, e)
2989	te.maxServerReceiveMsgSize = newInt(1 * 1024)
2990	te.startServer(&testServer{security: e.security})
2991
2992	defer te.tearDown()
2993	tc := testpb.NewTestServiceClient(te.clientConn())
2994
2995	// first make a successful request to the server
2996	if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil {
2997		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
2998	}
2999
3000	// make a second request that will be rejected by the server
3001	const largeSize = 5 * 1024
3002	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
3003	if err != nil {
3004		t.Fatal(err)
3005	}
3006	req := &testpb.SimpleRequest{
3007		ResponseType: testpb.PayloadType_COMPRESSABLE,
3008		Payload:      largePayload,
3009	}
3010
3011	peer := new(peer.Peer)
3012	if _, err := tc.UnaryCall(context.Background(), req, grpc.Peer(peer)); err == nil || status.Code(err) != codes.ResourceExhausted {
3013		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
3014	} else {
3015		pa := peer.Addr.String()
3016		if e.network == "unix" {
3017			if pa != te.srvAddr {
3018				t.Fatalf("peer.Addr = %v, want %v", pa, te.srvAddr)
3019			}
3020			return
3021		}
3022		_, pp, err := net.SplitHostPort(pa)
3023		if err != nil {
3024			t.Fatalf("Failed to parse address from peer.")
3025		}
3026		_, sp, err := net.SplitHostPort(te.srvAddr)
3027		if err != nil {
3028			t.Fatalf("Failed to parse address of test server.")
3029		}
3030		if pp != sp {
3031			t.Fatalf("peer.Addr = localhost:%v, want localhost:%v", pp, sp)
3032		}
3033	}
3034}
3035
3036func (s) TestMetadataUnaryRPC(t *testing.T) {
3037	for _, e := range listTestEnv() {
3038		testMetadataUnaryRPC(t, e)
3039	}
3040}
3041
3042func testMetadataUnaryRPC(t *testing.T, e env) {
3043	te := newTest(t, e)
3044	te.startServer(&testServer{security: e.security})
3045	defer te.tearDown()
3046	tc := testpb.NewTestServiceClient(te.clientConn())
3047
3048	const argSize = 2718
3049	const respSize = 314
3050
3051	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
3052	if err != nil {
3053		t.Fatal(err)
3054	}
3055
3056	req := &testpb.SimpleRequest{
3057		ResponseType: testpb.PayloadType_COMPRESSABLE,
3058		ResponseSize: respSize,
3059		Payload:      payload,
3060	}
3061	var header, trailer metadata.MD
3062	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
3063	if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.Trailer(&trailer)); err != nil {
3064		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
3065	}
3066	// Ignore optional response headers that Servers may set:
3067	if header != nil {
3068		delete(header, "trailer") // RFC 2616 says server SHOULD (but optional) declare trailers
3069		delete(header, "date")    // the Date header is also optional
3070		delete(header, "user-agent")
3071		delete(header, "content-type")
3072	}
3073	if !reflect.DeepEqual(header, testMetadata) {
3074		t.Fatalf("Received header metadata %v, want %v", header, testMetadata)
3075	}
3076	if !reflect.DeepEqual(trailer, testTrailerMetadata) {
3077		t.Fatalf("Received trailer metadata %v, want %v", trailer, testTrailerMetadata)
3078	}
3079}
3080
3081func (s) TestMetadataOrderUnaryRPC(t *testing.T) {
3082	for _, e := range listTestEnv() {
3083		testMetadataOrderUnaryRPC(t, e)
3084	}
3085}
3086
3087func testMetadataOrderUnaryRPC(t *testing.T, e env) {
3088	te := newTest(t, e)
3089	te.startServer(&testServer{security: e.security})
3090	defer te.tearDown()
3091	tc := testpb.NewTestServiceClient(te.clientConn())
3092
3093	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
3094	ctx = metadata.AppendToOutgoingContext(ctx, "key1", "value2")
3095	ctx = metadata.AppendToOutgoingContext(ctx, "key1", "value3")
3096
3097	// using Join to built expected metadata instead of FromOutgoingContext
3098	newMetadata := metadata.Join(testMetadata, metadata.Pairs("key1", "value2", "key1", "value3"))
3099
3100	var header metadata.MD
3101	if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{}, grpc.Header(&header)); err != nil {
3102		t.Fatal(err)
3103	}
3104
3105	// Ignore optional response headers that Servers may set:
3106	if header != nil {
3107		delete(header, "trailer") // RFC 2616 says server SHOULD (but optional) declare trailers
3108		delete(header, "date")    // the Date header is also optional
3109		delete(header, "user-agent")
3110		delete(header, "content-type")
3111	}
3112
3113	if !reflect.DeepEqual(header, newMetadata) {
3114		t.Fatalf("Received header metadata %v, want %v", header, newMetadata)
3115	}
3116}
3117
3118func (s) TestMultipleSetTrailerUnaryRPC(t *testing.T) {
3119	for _, e := range listTestEnv() {
3120		testMultipleSetTrailerUnaryRPC(t, e)
3121	}
3122}
3123
3124func testMultipleSetTrailerUnaryRPC(t *testing.T, e env) {
3125	te := newTest(t, e)
3126	te.startServer(&testServer{security: e.security, multipleSetTrailer: true})
3127	defer te.tearDown()
3128	tc := testpb.NewTestServiceClient(te.clientConn())
3129
3130	const (
3131		argSize  = 1
3132		respSize = 1
3133	)
3134	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
3135	if err != nil {
3136		t.Fatal(err)
3137	}
3138
3139	req := &testpb.SimpleRequest{
3140		ResponseType: testpb.PayloadType_COMPRESSABLE,
3141		ResponseSize: respSize,
3142		Payload:      payload,
3143	}
3144	var trailer metadata.MD
3145	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
3146	if _, err := tc.UnaryCall(ctx, req, grpc.Trailer(&trailer), grpc.WaitForReady(true)); err != nil {
3147		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
3148	}
3149	expectedTrailer := metadata.Join(testTrailerMetadata, testTrailerMetadata2)
3150	if !reflect.DeepEqual(trailer, expectedTrailer) {
3151		t.Fatalf("Received trailer metadata %v, want %v", trailer, expectedTrailer)
3152	}
3153}
3154
3155func (s) TestMultipleSetTrailerStreamingRPC(t *testing.T) {
3156	for _, e := range listTestEnv() {
3157		testMultipleSetTrailerStreamingRPC(t, e)
3158	}
3159}
3160
3161func testMultipleSetTrailerStreamingRPC(t *testing.T, e env) {
3162	te := newTest(t, e)
3163	te.startServer(&testServer{security: e.security, multipleSetTrailer: true})
3164	defer te.tearDown()
3165	tc := testpb.NewTestServiceClient(te.clientConn())
3166
3167	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
3168	stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
3169	if err != nil {
3170		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
3171	}
3172	if err := stream.CloseSend(); err != nil {
3173		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
3174	}
3175	if _, err := stream.Recv(); err != io.EOF {
3176		t.Fatalf("%v failed to complele the FullDuplexCall: %v", stream, err)
3177	}
3178
3179	trailer := stream.Trailer()
3180	expectedTrailer := metadata.Join(testTrailerMetadata, testTrailerMetadata2)
3181	if !reflect.DeepEqual(trailer, expectedTrailer) {
3182		t.Fatalf("Received trailer metadata %v, want %v", trailer, expectedTrailer)
3183	}
3184}
3185
3186func (s) TestSetAndSendHeaderUnaryRPC(t *testing.T) {
3187	for _, e := range listTestEnv() {
3188		if e.name == "handler-tls" {
3189			continue
3190		}
3191		testSetAndSendHeaderUnaryRPC(t, e)
3192	}
3193}
3194
3195// To test header metadata is sent on SendHeader().
3196func testSetAndSendHeaderUnaryRPC(t *testing.T, e env) {
3197	te := newTest(t, e)
3198	te.startServer(&testServer{security: e.security, setAndSendHeader: true})
3199	defer te.tearDown()
3200	tc := testpb.NewTestServiceClient(te.clientConn())
3201
3202	const (
3203		argSize  = 1
3204		respSize = 1
3205	)
3206	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
3207	if err != nil {
3208		t.Fatal(err)
3209	}
3210
3211	req := &testpb.SimpleRequest{
3212		ResponseType: testpb.PayloadType_COMPRESSABLE,
3213		ResponseSize: respSize,
3214		Payload:      payload,
3215	}
3216	var header metadata.MD
3217	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
3218	if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.WaitForReady(true)); err != nil {
3219		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
3220	}
3221	delete(header, "user-agent")
3222	delete(header, "content-type")
3223	expectedHeader := metadata.Join(testMetadata, testMetadata2)
3224	if !reflect.DeepEqual(header, expectedHeader) {
3225		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
3226	}
3227}
3228
3229func (s) TestMultipleSetHeaderUnaryRPC(t *testing.T) {
3230	for _, e := range listTestEnv() {
3231		if e.name == "handler-tls" {
3232			continue
3233		}
3234		testMultipleSetHeaderUnaryRPC(t, e)
3235	}
3236}
3237
3238// To test header metadata is sent when sending response.
3239func testMultipleSetHeaderUnaryRPC(t *testing.T, e env) {
3240	te := newTest(t, e)
3241	te.startServer(&testServer{security: e.security, setHeaderOnly: true})
3242	defer te.tearDown()
3243	tc := testpb.NewTestServiceClient(te.clientConn())
3244
3245	const (
3246		argSize  = 1
3247		respSize = 1
3248	)
3249	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
3250	if err != nil {
3251		t.Fatal(err)
3252	}
3253
3254	req := &testpb.SimpleRequest{
3255		ResponseType: testpb.PayloadType_COMPRESSABLE,
3256		ResponseSize: respSize,
3257		Payload:      payload,
3258	}
3259
3260	var header metadata.MD
3261	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
3262	if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.WaitForReady(true)); err != nil {
3263		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
3264	}
3265	delete(header, "user-agent")
3266	delete(header, "content-type")
3267	expectedHeader := metadata.Join(testMetadata, testMetadata2)
3268	if !reflect.DeepEqual(header, expectedHeader) {
3269		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
3270	}
3271}
3272
3273func (s) TestMultipleSetHeaderUnaryRPCError(t *testing.T) {
3274	for _, e := range listTestEnv() {
3275		if e.name == "handler-tls" {
3276			continue
3277		}
3278		testMultipleSetHeaderUnaryRPCError(t, e)
3279	}
3280}
3281
3282// To test header metadata is sent when sending status.
3283func testMultipleSetHeaderUnaryRPCError(t *testing.T, e env) {
3284	te := newTest(t, e)
3285	te.startServer(&testServer{security: e.security, setHeaderOnly: true})
3286	defer te.tearDown()
3287	tc := testpb.NewTestServiceClient(te.clientConn())
3288
3289	const (
3290		argSize  = 1
3291		respSize = -1 // Invalid respSize to make RPC fail.
3292	)
3293	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
3294	if err != nil {
3295		t.Fatal(err)
3296	}
3297
3298	req := &testpb.SimpleRequest{
3299		ResponseType: testpb.PayloadType_COMPRESSABLE,
3300		ResponseSize: respSize,
3301		Payload:      payload,
3302	}
3303	var header metadata.MD
3304	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
3305	if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.WaitForReady(true)); err == nil {
3306		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <non-nil>", ctx, err)
3307	}
3308	delete(header, "user-agent")
3309	delete(header, "content-type")
3310	expectedHeader := metadata.Join(testMetadata, testMetadata2)
3311	if !reflect.DeepEqual(header, expectedHeader) {
3312		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
3313	}
3314}
3315
3316func (s) TestSetAndSendHeaderStreamingRPC(t *testing.T) {
3317	for _, e := range listTestEnv() {
3318		if e.name == "handler-tls" {
3319			continue
3320		}
3321		testSetAndSendHeaderStreamingRPC(t, e)
3322	}
3323}
3324
3325// To test header metadata is sent on SendHeader().
3326func testSetAndSendHeaderStreamingRPC(t *testing.T, e env) {
3327	te := newTest(t, e)
3328	te.startServer(&testServer{security: e.security, setAndSendHeader: true})
3329	defer te.tearDown()
3330	tc := testpb.NewTestServiceClient(te.clientConn())
3331
3332	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
3333	stream, err := tc.FullDuplexCall(ctx)
3334	if err != nil {
3335		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
3336	}
3337	if err := stream.CloseSend(); err != nil {
3338		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
3339	}
3340	if _, err := stream.Recv(); err != io.EOF {
3341		t.Fatalf("%v failed to complele the FullDuplexCall: %v", stream, err)
3342	}
3343
3344	header, err := stream.Header()
3345	if err != nil {
3346		t.Fatalf("%v.Header() = _, %v, want _, <nil>", stream, err)
3347	}
3348	delete(header, "user-agent")
3349	delete(header, "content-type")
3350	expectedHeader := metadata.Join(testMetadata, testMetadata2)
3351	if !reflect.DeepEqual(header, expectedHeader) {
3352		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
3353	}
3354}
3355
3356func (s) TestMultipleSetHeaderStreamingRPC(t *testing.T) {
3357	for _, e := range listTestEnv() {
3358		if e.name == "handler-tls" {
3359			continue
3360		}
3361		testMultipleSetHeaderStreamingRPC(t, e)
3362	}
3363}
3364
3365// To test header metadata is sent when sending response.
3366func testMultipleSetHeaderStreamingRPC(t *testing.T, e env) {
3367	te := newTest(t, e)
3368	te.startServer(&testServer{security: e.security, setHeaderOnly: true})
3369	defer te.tearDown()
3370	tc := testpb.NewTestServiceClient(te.clientConn())
3371
3372	const (
3373		argSize  = 1
3374		respSize = 1
3375	)
3376	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
3377	stream, err := tc.FullDuplexCall(ctx)
3378	if err != nil {
3379		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
3380	}
3381
3382	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
3383	if err != nil {
3384		t.Fatal(err)
3385	}
3386
3387	req := &testpb.StreamingOutputCallRequest{
3388		ResponseType: testpb.PayloadType_COMPRESSABLE,
3389		ResponseParameters: []*testpb.ResponseParameters{
3390			{Size: respSize},
3391		},
3392		Payload: payload,
3393	}
3394	if err := stream.Send(req); err != nil {
3395		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
3396	}
3397	if _, err := stream.Recv(); err != nil {
3398		t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
3399	}
3400	if err := stream.CloseSend(); err != nil {
3401		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
3402	}
3403	if _, err := stream.Recv(); err != io.EOF {
3404		t.Fatalf("%v failed to complele the FullDuplexCall: %v", stream, err)
3405	}
3406
3407	header, err := stream.Header()
3408	if err != nil {
3409		t.Fatalf("%v.Header() = _, %v, want _, <nil>", stream, err)
3410	}
3411	delete(header, "user-agent")
3412	delete(header, "content-type")
3413	expectedHeader := metadata.Join(testMetadata, testMetadata2)
3414	if !reflect.DeepEqual(header, expectedHeader) {
3415		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
3416	}
3417
3418}
3419
3420func (s) TestMultipleSetHeaderStreamingRPCError(t *testing.T) {
3421	for _, e := range listTestEnv() {
3422		if e.name == "handler-tls" {
3423			continue
3424		}
3425		testMultipleSetHeaderStreamingRPCError(t, e)
3426	}
3427}
3428
3429// To test header metadata is sent when sending status.
3430func testMultipleSetHeaderStreamingRPCError(t *testing.T, e env) {
3431	te := newTest(t, e)
3432	te.startServer(&testServer{security: e.security, setHeaderOnly: true})
3433	defer te.tearDown()
3434	tc := testpb.NewTestServiceClient(te.clientConn())
3435
3436	const (
3437		argSize  = 1
3438		respSize = -1
3439	)
3440	ctx, cancel := context.WithCancel(context.Background())
3441	defer cancel()
3442	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
3443	stream, err := tc.FullDuplexCall(ctx)
3444	if err != nil {
3445		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
3446	}
3447
3448	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
3449	if err != nil {
3450		t.Fatal(err)
3451	}
3452
3453	req := &testpb.StreamingOutputCallRequest{
3454		ResponseType: testpb.PayloadType_COMPRESSABLE,
3455		ResponseParameters: []*testpb.ResponseParameters{
3456			{Size: respSize},
3457		},
3458		Payload: payload,
3459	}
3460	if err := stream.Send(req); err != nil {
3461		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
3462	}
3463	if _, err := stream.Recv(); err == nil {
3464		t.Fatalf("%v.Recv() = %v, want <non-nil>", stream, err)
3465	}
3466
3467	header, err := stream.Header()
3468	if err != nil {
3469		t.Fatalf("%v.Header() = _, %v, want _, <nil>", stream, err)
3470	}
3471	delete(header, "user-agent")
3472	delete(header, "content-type")
3473	expectedHeader := metadata.Join(testMetadata, testMetadata2)
3474	if !reflect.DeepEqual(header, expectedHeader) {
3475		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
3476	}
3477	if err := stream.CloseSend(); err != nil {
3478		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
3479	}
3480}
3481
3482// TestMalformedHTTP2Metadata verfies the returned error when the client
3483// sends an illegal metadata.
3484func (s) TestMalformedHTTP2Metadata(t *testing.T) {
3485	for _, e := range listTestEnv() {
3486		if e.name == "handler-tls" {
3487			// Failed with "server stops accepting new RPCs".
3488			// Server stops accepting new RPCs when the client sends an illegal http2 header.
3489			continue
3490		}
3491		testMalformedHTTP2Metadata(t, e)
3492	}
3493}
3494
3495func testMalformedHTTP2Metadata(t *testing.T, e env) {
3496	te := newTest(t, e)
3497	te.startServer(&testServer{security: e.security})
3498	defer te.tearDown()
3499	tc := testpb.NewTestServiceClient(te.clientConn())
3500
3501	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 2718)
3502	if err != nil {
3503		t.Fatal(err)
3504	}
3505
3506	req := &testpb.SimpleRequest{
3507		ResponseType: testpb.PayloadType_COMPRESSABLE,
3508		ResponseSize: 314,
3509		Payload:      payload,
3510	}
3511	ctx := metadata.NewOutgoingContext(context.Background(), malformedHTTP2Metadata)
3512	if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.Internal {
3513		t.Fatalf("TestService.UnaryCall(%v, _) = _, %v; want _, %s", ctx, err, codes.Internal)
3514	}
3515}
3516
3517func (s) TestTransparentRetry(t *testing.T) {
3518	for _, e := range listTestEnv() {
3519		if e.name == "handler-tls" {
3520			// Fails with RST_STREAM / FLOW_CONTROL_ERROR
3521			continue
3522		}
3523		testTransparentRetry(t, e)
3524	}
3525}
3526
3527// This test makes sure RPCs are retried times when they receive a RST_STREAM
3528// with the REFUSED_STREAM error code, which the InTapHandle provokes.
3529func testTransparentRetry(t *testing.T, e env) {
3530	te := newTest(t, e)
3531	attempts := 0
3532	successAttempt := 2
3533	te.tapHandle = func(ctx context.Context, _ *tap.Info) (context.Context, error) {
3534		attempts++
3535		if attempts < successAttempt {
3536			return nil, errors.New("not now")
3537		}
3538		return ctx, nil
3539	}
3540	te.startServer(&testServer{security: e.security})
3541	defer te.tearDown()
3542
3543	cc := te.clientConn()
3544	tsc := testpb.NewTestServiceClient(cc)
3545	testCases := []struct {
3546		successAttempt int
3547		failFast       bool
3548		errCode        codes.Code
3549	}{{
3550		successAttempt: 1,
3551	}, {
3552		successAttempt: 2,
3553	}, {
3554		successAttempt: 3,
3555		errCode:        codes.Unavailable,
3556	}, {
3557		successAttempt: 1,
3558		failFast:       true,
3559	}, {
3560		successAttempt: 2,
3561		failFast:       true,
3562	}, {
3563		successAttempt: 3,
3564		failFast:       true,
3565		errCode:        codes.Unavailable,
3566	}}
3567	for _, tc := range testCases {
3568		attempts = 0
3569		successAttempt = tc.successAttempt
3570
3571		ctx, cancel := context.WithTimeout(context.Background(), time.Second)
3572		_, err := tsc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(!tc.failFast))
3573		cancel()
3574		if status.Code(err) != tc.errCode {
3575			t.Errorf("%+v: tsc.EmptyCall(_, _) = _, %v, want _, Code=%v", tc, err, tc.errCode)
3576		}
3577	}
3578}
3579
3580func (s) TestCancel(t *testing.T) {
3581	for _, e := range listTestEnv() {
3582		testCancel(t, e)
3583	}
3584}
3585
3586func testCancel(t *testing.T, e env) {
3587	te := newTest(t, e)
3588	te.declareLogNoise("grpc: the client connection is closing; please retry")
3589	te.startServer(&testServer{security: e.security, unaryCallSleepTime: time.Second})
3590	defer te.tearDown()
3591
3592	cc := te.clientConn()
3593	tc := testpb.NewTestServiceClient(cc)
3594
3595	const argSize = 2718
3596	const respSize = 314
3597
3598	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
3599	if err != nil {
3600		t.Fatal(err)
3601	}
3602
3603	req := &testpb.SimpleRequest{
3604		ResponseType: testpb.PayloadType_COMPRESSABLE,
3605		ResponseSize: respSize,
3606		Payload:      payload,
3607	}
3608	ctx, cancel := context.WithCancel(context.Background())
3609	time.AfterFunc(1*time.Millisecond, cancel)
3610	if r, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.Canceled {
3611		t.Fatalf("TestService/UnaryCall(_, _) = %v, %v; want _, error code: %s", r, err, codes.Canceled)
3612	}
3613	awaitNewConnLogOutput()
3614}
3615
3616func (s) TestCancelNoIO(t *testing.T) {
3617	for _, e := range listTestEnv() {
3618		testCancelNoIO(t, e)
3619	}
3620}
3621
3622func testCancelNoIO(t *testing.T, e env) {
3623	te := newTest(t, e)
3624	te.declareLogNoise("http2Client.notifyError got notified that the client transport was broken")
3625	te.maxStream = 1 // Only allows 1 live stream per server transport.
3626	te.startServer(&testServer{security: e.security})
3627	defer te.tearDown()
3628
3629	cc := te.clientConn()
3630	tc := testpb.NewTestServiceClient(cc)
3631
3632	// Start one blocked RPC for which we'll never send streaming
3633	// input. This will consume the 1 maximum concurrent streams,
3634	// causing future RPCs to hang.
3635	ctx, cancelFirst := context.WithCancel(context.Background())
3636	_, err := tc.StreamingInputCall(ctx)
3637	if err != nil {
3638		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err)
3639	}
3640
3641	// Loop until the ClientConn receives the initial settings
3642	// frame from the server, notifying it about the maximum
3643	// concurrent streams. We know when it's received it because
3644	// an RPC will fail with codes.DeadlineExceeded instead of
3645	// succeeding.
3646	// TODO(bradfitz): add internal test hook for this (Issue 534)
3647	for {
3648		ctx, cancelSecond := context.WithTimeout(context.Background(), 50*time.Millisecond)
3649		_, err := tc.StreamingInputCall(ctx)
3650		cancelSecond()
3651		if err == nil {
3652			continue
3653		}
3654		if status.Code(err) == codes.DeadlineExceeded {
3655			break
3656		}
3657		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %s", tc, err, codes.DeadlineExceeded)
3658	}
3659	// If there are any RPCs in flight before the client receives
3660	// the max streams setting, let them be expired.
3661	// TODO(bradfitz): add internal test hook for this (Issue 534)
3662	time.Sleep(50 * time.Millisecond)
3663
3664	go func() {
3665		time.Sleep(50 * time.Millisecond)
3666		cancelFirst()
3667	}()
3668
3669	// This should be blocked until the 1st is canceled, then succeed.
3670	ctx, cancelThird := context.WithTimeout(context.Background(), 500*time.Millisecond)
3671	if _, err := tc.StreamingInputCall(ctx); err != nil {
3672		t.Errorf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err)
3673	}
3674	cancelThird()
3675}
3676
3677// The following tests the gRPC streaming RPC implementations.
3678// TODO(zhaoq): Have better coverage on error cases.
3679var (
3680	reqSizes  = []int{27182, 8, 1828, 45904}
3681	respSizes = []int{31415, 9, 2653, 58979}
3682)
3683
3684func (s) TestNoService(t *testing.T) {
3685	for _, e := range listTestEnv() {
3686		testNoService(t, e)
3687	}
3688}
3689
3690func testNoService(t *testing.T, e env) {
3691	te := newTest(t, e)
3692	te.startServer(nil)
3693	defer te.tearDown()
3694
3695	cc := te.clientConn()
3696	tc := testpb.NewTestServiceClient(cc)
3697
3698	stream, err := tc.FullDuplexCall(te.ctx, grpc.WaitForReady(true))
3699	if err != nil {
3700		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
3701	}
3702	if _, err := stream.Recv(); status.Code(err) != codes.Unimplemented {
3703		t.Fatalf("stream.Recv() = _, %v, want _, error code %s", err, codes.Unimplemented)
3704	}
3705}
3706
3707func (s) TestPingPong(t *testing.T) {
3708	for _, e := range listTestEnv() {
3709		testPingPong(t, e)
3710	}
3711}
3712
3713func testPingPong(t *testing.T, e env) {
3714	te := newTest(t, e)
3715	te.startServer(&testServer{security: e.security})
3716	defer te.tearDown()
3717	tc := testpb.NewTestServiceClient(te.clientConn())
3718
3719	stream, err := tc.FullDuplexCall(te.ctx)
3720	if err != nil {
3721		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
3722	}
3723	var index int
3724	for index < len(reqSizes) {
3725		respParam := []*testpb.ResponseParameters{
3726			{
3727				Size: int32(respSizes[index]),
3728			},
3729		}
3730
3731		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index]))
3732		if err != nil {
3733			t.Fatal(err)
3734		}
3735
3736		req := &testpb.StreamingOutputCallRequest{
3737			ResponseType:       testpb.PayloadType_COMPRESSABLE,
3738			ResponseParameters: respParam,
3739			Payload:            payload,
3740		}
3741		if err := stream.Send(req); err != nil {
3742			t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
3743		}
3744		reply, err := stream.Recv()
3745		if err != nil {
3746			t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
3747		}
3748		pt := reply.GetPayload().GetType()
3749		if pt != testpb.PayloadType_COMPRESSABLE {
3750			t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE)
3751		}
3752		size := len(reply.GetPayload().GetBody())
3753		if size != int(respSizes[index]) {
3754			t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index])
3755		}
3756		index++
3757	}
3758	if err := stream.CloseSend(); err != nil {
3759		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
3760	}
3761	if _, err := stream.Recv(); err != io.EOF {
3762		t.Fatalf("%v failed to complele the ping pong test: %v", stream, err)
3763	}
3764}
3765
3766func (s) TestMetadataStreamingRPC(t *testing.T) {
3767	for _, e := range listTestEnv() {
3768		testMetadataStreamingRPC(t, e)
3769	}
3770}
3771
3772func testMetadataStreamingRPC(t *testing.T, e env) {
3773	te := newTest(t, e)
3774	te.startServer(&testServer{security: e.security})
3775	defer te.tearDown()
3776	tc := testpb.NewTestServiceClient(te.clientConn())
3777
3778	ctx := metadata.NewOutgoingContext(te.ctx, testMetadata)
3779	stream, err := tc.FullDuplexCall(ctx)
3780	if err != nil {
3781		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
3782	}
3783	go func() {
3784		headerMD, err := stream.Header()
3785		if e.security == "tls" {
3786			delete(headerMD, "transport_security_type")
3787		}
3788		delete(headerMD, "trailer") // ignore if present
3789		delete(headerMD, "user-agent")
3790		delete(headerMD, "content-type")
3791		if err != nil || !reflect.DeepEqual(testMetadata, headerMD) {
3792			t.Errorf("#1 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata)
3793		}
3794		// test the cached value.
3795		headerMD, err = stream.Header()
3796		delete(headerMD, "trailer") // ignore if present
3797		delete(headerMD, "user-agent")
3798		delete(headerMD, "content-type")
3799		if err != nil || !reflect.DeepEqual(testMetadata, headerMD) {
3800			t.Errorf("#2 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata)
3801		}
3802		err = func() error {
3803			for index := 0; index < len(reqSizes); index++ {
3804				respParam := []*testpb.ResponseParameters{
3805					{
3806						Size: int32(respSizes[index]),
3807					},
3808				}
3809
3810				payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index]))
3811				if err != nil {
3812					return err
3813				}
3814
3815				req := &testpb.StreamingOutputCallRequest{
3816					ResponseType:       testpb.PayloadType_COMPRESSABLE,
3817					ResponseParameters: respParam,
3818					Payload:            payload,
3819				}
3820				if err := stream.Send(req); err != nil {
3821					return fmt.Errorf("%v.Send(%v) = %v, want <nil>", stream, req, err)
3822				}
3823			}
3824			return nil
3825		}()
3826		// Tell the server we're done sending args.
3827		stream.CloseSend()
3828		if err != nil {
3829			t.Error(err)
3830		}
3831	}()
3832	for {
3833		if _, err := stream.Recv(); err != nil {
3834			break
3835		}
3836	}
3837	trailerMD := stream.Trailer()
3838	if !reflect.DeepEqual(testTrailerMetadata, trailerMD) {
3839		t.Fatalf("%v.Trailer() = %v, want %v", stream, trailerMD, testTrailerMetadata)
3840	}
3841}
3842
3843func (s) TestServerStreaming(t *testing.T) {
3844	for _, e := range listTestEnv() {
3845		testServerStreaming(t, e)
3846	}
3847}
3848
3849func testServerStreaming(t *testing.T, e env) {
3850	te := newTest(t, e)
3851	te.startServer(&testServer{security: e.security})
3852	defer te.tearDown()
3853	tc := testpb.NewTestServiceClient(te.clientConn())
3854
3855	respParam := make([]*testpb.ResponseParameters, len(respSizes))
3856	for i, s := range respSizes {
3857		respParam[i] = &testpb.ResponseParameters{
3858			Size: int32(s),
3859		}
3860	}
3861	req := &testpb.StreamingOutputCallRequest{
3862		ResponseType:       testpb.PayloadType_COMPRESSABLE,
3863		ResponseParameters: respParam,
3864	}
3865	stream, err := tc.StreamingOutputCall(context.Background(), req)
3866	if err != nil {
3867		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err)
3868	}
3869	var rpcStatus error
3870	var respCnt int
3871	var index int
3872	for {
3873		reply, err := stream.Recv()
3874		if err != nil {
3875			rpcStatus = err
3876			break
3877		}
3878		pt := reply.GetPayload().GetType()
3879		if pt != testpb.PayloadType_COMPRESSABLE {
3880			t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE)
3881		}
3882		size := len(reply.GetPayload().GetBody())
3883		if size != int(respSizes[index]) {
3884			t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index])
3885		}
3886		index++
3887		respCnt++
3888	}
3889	if rpcStatus != io.EOF {
3890		t.Fatalf("Failed to finish the server streaming rpc: %v, want <EOF>", rpcStatus)
3891	}
3892	if respCnt != len(respSizes) {
3893		t.Fatalf("Got %d reply, want %d", len(respSizes), respCnt)
3894	}
3895}
3896
3897func (s) TestFailedServerStreaming(t *testing.T) {
3898	for _, e := range listTestEnv() {
3899		testFailedServerStreaming(t, e)
3900	}
3901}
3902
3903func testFailedServerStreaming(t *testing.T, e env) {
3904	te := newTest(t, e)
3905	te.userAgent = failAppUA
3906	te.startServer(&testServer{security: e.security})
3907	defer te.tearDown()
3908	tc := testpb.NewTestServiceClient(te.clientConn())
3909
3910	respParam := make([]*testpb.ResponseParameters, len(respSizes))
3911	for i, s := range respSizes {
3912		respParam[i] = &testpb.ResponseParameters{
3913			Size: int32(s),
3914		}
3915	}
3916	req := &testpb.StreamingOutputCallRequest{
3917		ResponseType:       testpb.PayloadType_COMPRESSABLE,
3918		ResponseParameters: respParam,
3919	}
3920	ctx := metadata.NewOutgoingContext(te.ctx, testMetadata)
3921	stream, err := tc.StreamingOutputCall(ctx, req)
3922	if err != nil {
3923		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err)
3924	}
3925	wantErr := status.Error(codes.DataLoss, "error for testing: "+failAppUA)
3926	if _, err := stream.Recv(); !equalError(err, wantErr) {
3927		t.Fatalf("%v.Recv() = _, %v, want _, %v", stream, err, wantErr)
3928	}
3929}
3930
3931func equalError(x, y error) bool {
3932	return x == y || (x != nil && y != nil && x.Error() == y.Error())
3933}
3934
3935// concurrentSendServer is a TestServiceServer whose
3936// StreamingOutputCall makes ten serial Send calls, sending payloads
3937// "0".."9", inclusive.  TestServerStreamingConcurrent verifies they
3938// were received in the correct order, and that there were no races.
3939//
3940// All other TestServiceServer methods crash if called.
3941type concurrentSendServer struct {
3942	testpb.TestServiceServer
3943}
3944
3945func (s concurrentSendServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error {
3946	for i := 0; i < 10; i++ {
3947		stream.Send(&testpb.StreamingOutputCallResponse{
3948			Payload: &testpb.Payload{
3949				Body: []byte{'0' + uint8(i)},
3950			},
3951		})
3952	}
3953	return nil
3954}
3955
3956// Tests doing a bunch of concurrent streaming output calls.
3957func (s) TestServerStreamingConcurrent(t *testing.T) {
3958	for _, e := range listTestEnv() {
3959		testServerStreamingConcurrent(t, e)
3960	}
3961}
3962
3963func testServerStreamingConcurrent(t *testing.T, e env) {
3964	te := newTest(t, e)
3965	te.startServer(concurrentSendServer{})
3966	defer te.tearDown()
3967
3968	cc := te.clientConn()
3969	tc := testpb.NewTestServiceClient(cc)
3970
3971	doStreamingCall := func() {
3972		req := &testpb.StreamingOutputCallRequest{}
3973		stream, err := tc.StreamingOutputCall(context.Background(), req)
3974		if err != nil {
3975			t.Errorf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err)
3976			return
3977		}
3978		var ngot int
3979		var buf bytes.Buffer
3980		for {
3981			reply, err := stream.Recv()
3982			if err == io.EOF {
3983				break
3984			}
3985			if err != nil {
3986				t.Fatal(err)
3987			}
3988			ngot++
3989			if buf.Len() > 0 {
3990				buf.WriteByte(',')
3991			}
3992			buf.Write(reply.GetPayload().GetBody())
3993		}
3994		if want := 10; ngot != want {
3995			t.Errorf("Got %d replies, want %d", ngot, want)
3996		}
3997		if got, want := buf.String(), "0,1,2,3,4,5,6,7,8,9"; got != want {
3998			t.Errorf("Got replies %q; want %q", got, want)
3999		}
4000	}
4001
4002	var wg sync.WaitGroup
4003	for i := 0; i < 20; i++ {
4004		wg.Add(1)
4005		go func() {
4006			defer wg.Done()
4007			doStreamingCall()
4008		}()
4009	}
4010	wg.Wait()
4011
4012}
4013
4014func generatePayloadSizes() [][]int {
4015	reqSizes := [][]int{
4016		{27182, 8, 1828, 45904},
4017	}
4018
4019	num8KPayloads := 1024
4020	eightKPayloads := []int{}
4021	for i := 0; i < num8KPayloads; i++ {
4022		eightKPayloads = append(eightKPayloads, (1 << 13))
4023	}
4024	reqSizes = append(reqSizes, eightKPayloads)
4025
4026	num2MPayloads := 8
4027	twoMPayloads := []int{}
4028	for i := 0; i < num2MPayloads; i++ {
4029		twoMPayloads = append(twoMPayloads, (1 << 21))
4030	}
4031	reqSizes = append(reqSizes, twoMPayloads)
4032
4033	return reqSizes
4034}
4035
4036func (s) TestClientStreaming(t *testing.T) {
4037	for _, s := range generatePayloadSizes() {
4038		for _, e := range listTestEnv() {
4039			testClientStreaming(t, e, s)
4040		}
4041	}
4042}
4043
4044func testClientStreaming(t *testing.T, e env, sizes []int) {
4045	te := newTest(t, e)
4046	te.startServer(&testServer{security: e.security})
4047	defer te.tearDown()
4048	tc := testpb.NewTestServiceClient(te.clientConn())
4049
4050	ctx, cancel := context.WithTimeout(te.ctx, time.Second*30)
4051	defer cancel()
4052	stream, err := tc.StreamingInputCall(ctx)
4053	if err != nil {
4054		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want <nil>", tc, err)
4055	}
4056
4057	var sum int
4058	for _, s := range sizes {
4059		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(s))
4060		if err != nil {
4061			t.Fatal(err)
4062		}
4063
4064		req := &testpb.StreamingInputCallRequest{
4065			Payload: payload,
4066		}
4067		if err := stream.Send(req); err != nil {
4068			t.Fatalf("%v.Send(_) = %v, want <nil>", stream, err)
4069		}
4070		sum += s
4071	}
4072	reply, err := stream.CloseAndRecv()
4073	if err != nil {
4074		t.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil)
4075	}
4076	if reply.GetAggregatedPayloadSize() != int32(sum) {
4077		t.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum)
4078	}
4079}
4080
4081func (s) TestClientStreamingError(t *testing.T) {
4082	for _, e := range listTestEnv() {
4083		if e.name == "handler-tls" {
4084			continue
4085		}
4086		testClientStreamingError(t, e)
4087	}
4088}
4089
4090func testClientStreamingError(t *testing.T, e env) {
4091	te := newTest(t, e)
4092	te.startServer(&testServer{security: e.security, earlyFail: true})
4093	defer te.tearDown()
4094	tc := testpb.NewTestServiceClient(te.clientConn())
4095
4096	stream, err := tc.StreamingInputCall(te.ctx)
4097	if err != nil {
4098		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want <nil>", tc, err)
4099	}
4100	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 1)
4101	if err != nil {
4102		t.Fatal(err)
4103	}
4104
4105	req := &testpb.StreamingInputCallRequest{
4106		Payload: payload,
4107	}
4108	// The 1st request should go through.
4109	if err := stream.Send(req); err != nil {
4110		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
4111	}
4112	for {
4113		if err := stream.Send(req); err != io.EOF {
4114			continue
4115		}
4116		if _, err := stream.CloseAndRecv(); status.Code(err) != codes.NotFound {
4117			t.Fatalf("%v.CloseAndRecv() = %v, want error %s", stream, err, codes.NotFound)
4118		}
4119		break
4120	}
4121}
4122
4123func (s) TestExceedMaxStreamsLimit(t *testing.T) {
4124	for _, e := range listTestEnv() {
4125		testExceedMaxStreamsLimit(t, e)
4126	}
4127}
4128
4129func testExceedMaxStreamsLimit(t *testing.T, e env) {
4130	te := newTest(t, e)
4131	te.declareLogNoise(
4132		"http2Client.notifyError got notified that the client transport was broken",
4133		"Conn.resetTransport failed to create client transport",
4134		"grpc: the connection is closing",
4135	)
4136	te.maxStream = 1 // Only allows 1 live stream per server transport.
4137	te.startServer(&testServer{security: e.security})
4138	defer te.tearDown()
4139
4140	cc := te.clientConn()
4141	tc := testpb.NewTestServiceClient(cc)
4142
4143	_, err := tc.StreamingInputCall(te.ctx)
4144	if err != nil {
4145		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err)
4146	}
4147	// Loop until receiving the new max stream setting from the server.
4148	for {
4149		ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
4150		defer cancel()
4151		_, err := tc.StreamingInputCall(ctx)
4152		if err == nil {
4153			time.Sleep(50 * time.Millisecond)
4154			continue
4155		}
4156		if status.Code(err) == codes.DeadlineExceeded {
4157			break
4158		}
4159		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %s", tc, err, codes.DeadlineExceeded)
4160	}
4161}
4162
4163func (s) TestStreamsQuotaRecovery(t *testing.T) {
4164	for _, e := range listTestEnv() {
4165		testStreamsQuotaRecovery(t, e)
4166	}
4167}
4168
4169func testStreamsQuotaRecovery(t *testing.T, e env) {
4170	te := newTest(t, e)
4171	te.declareLogNoise(
4172		"http2Client.notifyError got notified that the client transport was broken",
4173		"Conn.resetTransport failed to create client transport",
4174		"grpc: the connection is closing",
4175	)
4176	te.maxStream = 1 // Allows 1 live stream.
4177	te.startServer(&testServer{security: e.security})
4178	defer te.tearDown()
4179
4180	cc := te.clientConn()
4181	tc := testpb.NewTestServiceClient(cc)
4182	ctx, cancel := context.WithCancel(context.Background())
4183	defer cancel()
4184	if _, err := tc.StreamingInputCall(ctx); err != nil {
4185		t.Fatalf("tc.StreamingInputCall(_) = _, %v, want _, <nil>", err)
4186	}
4187	// Loop until the new max stream setting is effective.
4188	for {
4189		ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
4190		_, err := tc.StreamingInputCall(ctx)
4191		cancel()
4192		if err == nil {
4193			time.Sleep(5 * time.Millisecond)
4194			continue
4195		}
4196		if status.Code(err) == codes.DeadlineExceeded {
4197			break
4198		}
4199		t.Fatalf("tc.StreamingInputCall(_) = _, %v, want _, %s", err, codes.DeadlineExceeded)
4200	}
4201
4202	var wg sync.WaitGroup
4203	for i := 0; i < 10; i++ {
4204		wg.Add(1)
4205		go func() {
4206			defer wg.Done()
4207			payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 314)
4208			if err != nil {
4209				t.Error(err)
4210				return
4211			}
4212			req := &testpb.SimpleRequest{
4213				ResponseType: testpb.PayloadType_COMPRESSABLE,
4214				ResponseSize: 1592,
4215				Payload:      payload,
4216			}
4217			// No rpc should go through due to the max streams limit.
4218			ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
4219			defer cancel()
4220			if _, err := tc.UnaryCall(ctx, req, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
4221				t.Errorf("tc.UnaryCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
4222			}
4223		}()
4224	}
4225	wg.Wait()
4226
4227	cancel()
4228	// A new stream should be allowed after canceling the first one.
4229	ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
4230	defer cancel()
4231	if _, err := tc.StreamingInputCall(ctx); err != nil {
4232		t.Fatalf("tc.StreamingInputCall(_) = _, %v, want _, %v", err, nil)
4233	}
4234}
4235
4236func (s) TestCompressServerHasNoSupport(t *testing.T) {
4237	for _, e := range listTestEnv() {
4238		testCompressServerHasNoSupport(t, e)
4239	}
4240}
4241
4242func testCompressServerHasNoSupport(t *testing.T, e env) {
4243	te := newTest(t, e)
4244	te.serverCompression = false
4245	te.clientCompression = false
4246	te.clientNopCompression = true
4247	te.startServer(&testServer{security: e.security})
4248	defer te.tearDown()
4249	tc := testpb.NewTestServiceClient(te.clientConn())
4250
4251	const argSize = 271828
4252	const respSize = 314159
4253	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
4254	if err != nil {
4255		t.Fatal(err)
4256	}
4257	req := &testpb.SimpleRequest{
4258		ResponseType: testpb.PayloadType_COMPRESSABLE,
4259		ResponseSize: respSize,
4260		Payload:      payload,
4261	}
4262	if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.Unimplemented {
4263		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code %s", err, codes.Unimplemented)
4264	}
4265	// Streaming RPC
4266	stream, err := tc.FullDuplexCall(context.Background())
4267	if err != nil {
4268		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
4269	}
4270	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.Unimplemented {
4271		t.Fatalf("%v.Recv() = %v, want error code %s", stream, err, codes.Unimplemented)
4272	}
4273}
4274
4275func (s) TestCompressOK(t *testing.T) {
4276	for _, e := range listTestEnv() {
4277		testCompressOK(t, e)
4278	}
4279}
4280
4281func testCompressOK(t *testing.T, e env) {
4282	te := newTest(t, e)
4283	te.serverCompression = true
4284	te.clientCompression = true
4285	te.startServer(&testServer{security: e.security})
4286	defer te.tearDown()
4287	tc := testpb.NewTestServiceClient(te.clientConn())
4288
4289	// Unary call
4290	const argSize = 271828
4291	const respSize = 314159
4292	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
4293	if err != nil {
4294		t.Fatal(err)
4295	}
4296	req := &testpb.SimpleRequest{
4297		ResponseType: testpb.PayloadType_COMPRESSABLE,
4298		ResponseSize: respSize,
4299		Payload:      payload,
4300	}
4301	ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something"))
4302	if _, err := tc.UnaryCall(ctx, req); err != nil {
4303		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err)
4304	}
4305	// Streaming RPC
4306	ctx, cancel := context.WithCancel(context.Background())
4307	defer cancel()
4308	stream, err := tc.FullDuplexCall(ctx)
4309	if err != nil {
4310		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
4311	}
4312	respParam := []*testpb.ResponseParameters{
4313		{
4314			Size: 31415,
4315		},
4316	}
4317	payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415))
4318	if err != nil {
4319		t.Fatal(err)
4320	}
4321	sreq := &testpb.StreamingOutputCallRequest{
4322		ResponseType:       testpb.PayloadType_COMPRESSABLE,
4323		ResponseParameters: respParam,
4324		Payload:            payload,
4325	}
4326	if err := stream.Send(sreq); err != nil {
4327		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
4328	}
4329	stream.CloseSend()
4330	if _, err := stream.Recv(); err != nil {
4331		t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
4332	}
4333	if _, err := stream.Recv(); err != io.EOF {
4334		t.Fatalf("%v.Recv() = %v, want io.EOF", stream, err)
4335	}
4336}
4337
4338func (s) TestIdentityEncoding(t *testing.T) {
4339	for _, e := range listTestEnv() {
4340		testIdentityEncoding(t, e)
4341	}
4342}
4343
4344func testIdentityEncoding(t *testing.T, e env) {
4345	te := newTest(t, e)
4346	te.startServer(&testServer{security: e.security})
4347	defer te.tearDown()
4348	tc := testpb.NewTestServiceClient(te.clientConn())
4349
4350	// Unary call
4351	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 5)
4352	if err != nil {
4353		t.Fatal(err)
4354	}
4355	req := &testpb.SimpleRequest{
4356		ResponseType: testpb.PayloadType_COMPRESSABLE,
4357		ResponseSize: 10,
4358		Payload:      payload,
4359	}
4360	ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something"))
4361	if _, err := tc.UnaryCall(ctx, req); err != nil {
4362		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err)
4363	}
4364	// Streaming RPC
4365	ctx, cancel := context.WithCancel(context.Background())
4366	defer cancel()
4367	stream, err := tc.FullDuplexCall(ctx, grpc.UseCompressor("identity"))
4368	if err != nil {
4369		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
4370	}
4371	payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415))
4372	if err != nil {
4373		t.Fatal(err)
4374	}
4375	sreq := &testpb.StreamingOutputCallRequest{
4376		ResponseType:       testpb.PayloadType_COMPRESSABLE,
4377		ResponseParameters: []*testpb.ResponseParameters{{Size: 10}},
4378		Payload:            payload,
4379	}
4380	if err := stream.Send(sreq); err != nil {
4381		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
4382	}
4383	stream.CloseSend()
4384	if _, err := stream.Recv(); err != nil {
4385		t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
4386	}
4387	if _, err := stream.Recv(); err != io.EOF {
4388		t.Fatalf("%v.Recv() = %v, want io.EOF", stream, err)
4389	}
4390}
4391
4392func (s) TestUnaryClientInterceptor(t *testing.T) {
4393	for _, e := range listTestEnv() {
4394		testUnaryClientInterceptor(t, e)
4395	}
4396}
4397
4398func failOkayRPC(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
4399	err := invoker(ctx, method, req, reply, cc, opts...)
4400	if err == nil {
4401		return status.Error(codes.NotFound, "")
4402	}
4403	return err
4404}
4405
4406func testUnaryClientInterceptor(t *testing.T, e env) {
4407	te := newTest(t, e)
4408	te.userAgent = testAppUA
4409	te.unaryClientInt = failOkayRPC
4410	te.startServer(&testServer{security: e.security})
4411	defer te.tearDown()
4412
4413	tc := testpb.NewTestServiceClient(te.clientConn())
4414	if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) != codes.NotFound {
4415		t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, error code %s", tc, err, codes.NotFound)
4416	}
4417}
4418
4419func (s) TestStreamClientInterceptor(t *testing.T) {
4420	for _, e := range listTestEnv() {
4421		testStreamClientInterceptor(t, e)
4422	}
4423}
4424
4425func failOkayStream(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
4426	s, err := streamer(ctx, desc, cc, method, opts...)
4427	if err == nil {
4428		return nil, status.Error(codes.NotFound, "")
4429	}
4430	return s, nil
4431}
4432
4433func testStreamClientInterceptor(t *testing.T, e env) {
4434	te := newTest(t, e)
4435	te.streamClientInt = failOkayStream
4436	te.startServer(&testServer{security: e.security})
4437	defer te.tearDown()
4438
4439	tc := testpb.NewTestServiceClient(te.clientConn())
4440	respParam := []*testpb.ResponseParameters{
4441		{
4442			Size: int32(1),
4443		},
4444	}
4445	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1))
4446	if err != nil {
4447		t.Fatal(err)
4448	}
4449	req := &testpb.StreamingOutputCallRequest{
4450		ResponseType:       testpb.PayloadType_COMPRESSABLE,
4451		ResponseParameters: respParam,
4452		Payload:            payload,
4453	}
4454	if _, err := tc.StreamingOutputCall(context.Background(), req); status.Code(err) != codes.NotFound {
4455		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want _, error code %s", tc, err, codes.NotFound)
4456	}
4457}
4458
4459func (s) TestUnaryServerInterceptor(t *testing.T) {
4460	for _, e := range listTestEnv() {
4461		testUnaryServerInterceptor(t, e)
4462	}
4463}
4464
4465func errInjector(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
4466	return nil, status.Error(codes.PermissionDenied, "")
4467}
4468
4469func testUnaryServerInterceptor(t *testing.T, e env) {
4470	te := newTest(t, e)
4471	te.unaryServerInt = errInjector
4472	te.startServer(&testServer{security: e.security})
4473	defer te.tearDown()
4474
4475	tc := testpb.NewTestServiceClient(te.clientConn())
4476	if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) != codes.PermissionDenied {
4477		t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, error code %s", tc, err, codes.PermissionDenied)
4478	}
4479}
4480
4481func (s) TestStreamServerInterceptor(t *testing.T) {
4482	for _, e := range listTestEnv() {
4483		// TODO(bradfitz): Temporarily skip this env due to #619.
4484		if e.name == "handler-tls" {
4485			continue
4486		}
4487		testStreamServerInterceptor(t, e)
4488	}
4489}
4490
4491func fullDuplexOnly(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
4492	if info.FullMethod == "/grpc.testing.TestService/FullDuplexCall" {
4493		return handler(srv, ss)
4494	}
4495	// Reject the other methods.
4496	return status.Error(codes.PermissionDenied, "")
4497}
4498
4499func testStreamServerInterceptor(t *testing.T, e env) {
4500	te := newTest(t, e)
4501	te.streamServerInt = fullDuplexOnly
4502	te.startServer(&testServer{security: e.security})
4503	defer te.tearDown()
4504
4505	tc := testpb.NewTestServiceClient(te.clientConn())
4506	respParam := []*testpb.ResponseParameters{
4507		{
4508			Size: int32(1),
4509		},
4510	}
4511	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1))
4512	if err != nil {
4513		t.Fatal(err)
4514	}
4515	req := &testpb.StreamingOutputCallRequest{
4516		ResponseType:       testpb.PayloadType_COMPRESSABLE,
4517		ResponseParameters: respParam,
4518		Payload:            payload,
4519	}
4520	s1, err := tc.StreamingOutputCall(context.Background(), req)
4521	if err != nil {
4522		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want _, <nil>", tc, err)
4523	}
4524	if _, err := s1.Recv(); status.Code(err) != codes.PermissionDenied {
4525		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, error code %s", tc, err, codes.PermissionDenied)
4526	}
4527	s2, err := tc.FullDuplexCall(context.Background())
4528	if err != nil {
4529		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
4530	}
4531	if err := s2.Send(req); err != nil {
4532		t.Fatalf("%v.Send(_) = %v, want <nil>", s2, err)
4533	}
4534	if _, err := s2.Recv(); err != nil {
4535		t.Fatalf("%v.Recv() = _, %v, want _, <nil>", s2, err)
4536	}
4537}
4538
4539// funcServer implements methods of TestServiceServer using funcs,
4540// similar to an http.HandlerFunc.
4541// Any unimplemented method will crash. Tests implement the method(s)
4542// they need.
4543type funcServer struct {
4544	testpb.TestServiceServer
4545	unaryCall          func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error)
4546	streamingInputCall func(stream testpb.TestService_StreamingInputCallServer) error
4547	fullDuplexCall     func(stream testpb.TestService_FullDuplexCallServer) error
4548}
4549
4550func (s *funcServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
4551	return s.unaryCall(ctx, in)
4552}
4553
4554func (s *funcServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error {
4555	return s.streamingInputCall(stream)
4556}
4557
4558func (s *funcServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error {
4559	return s.fullDuplexCall(stream)
4560}
4561
4562func (s) TestClientRequestBodyErrorUnexpectedEOF(t *testing.T) {
4563	for _, e := range listTestEnv() {
4564		testClientRequestBodyErrorUnexpectedEOF(t, e)
4565	}
4566}
4567
4568func testClientRequestBodyErrorUnexpectedEOF(t *testing.T, e env) {
4569	te := newTest(t, e)
4570	ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
4571		errUnexpectedCall := errors.New("unexpected call func server method")
4572		t.Error(errUnexpectedCall)
4573		return nil, errUnexpectedCall
4574	}}
4575	te.startServer(ts)
4576	defer te.tearDown()
4577	te.withServerTester(func(st *serverTester) {
4578		st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall")
4579		// Say we have 5 bytes coming, but set END_STREAM flag:
4580		st.writeData(1, true, []byte{0, 0, 0, 0, 5})
4581		st.wantAnyFrame() // wait for server to crash (it used to crash)
4582	})
4583}
4584
4585func (s) TestClientRequestBodyErrorCloseAfterLength(t *testing.T) {
4586	for _, e := range listTestEnv() {
4587		testClientRequestBodyErrorCloseAfterLength(t, e)
4588	}
4589}
4590
4591func testClientRequestBodyErrorCloseAfterLength(t *testing.T, e env) {
4592	te := newTest(t, e)
4593	te.declareLogNoise("Server.processUnaryRPC failed to write status")
4594	ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
4595		errUnexpectedCall := errors.New("unexpected call func server method")
4596		t.Error(errUnexpectedCall)
4597		return nil, errUnexpectedCall
4598	}}
4599	te.startServer(ts)
4600	defer te.tearDown()
4601	te.withServerTester(func(st *serverTester) {
4602		st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall")
4603		// say we're sending 5 bytes, but then close the connection instead.
4604		st.writeData(1, false, []byte{0, 0, 0, 0, 5})
4605		st.cc.Close()
4606	})
4607}
4608
4609func (s) TestClientRequestBodyErrorCancel(t *testing.T) {
4610	for _, e := range listTestEnv() {
4611		testClientRequestBodyErrorCancel(t, e)
4612	}
4613}
4614
4615func testClientRequestBodyErrorCancel(t *testing.T, e env) {
4616	te := newTest(t, e)
4617	gotCall := make(chan bool, 1)
4618	ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
4619		gotCall <- true
4620		return new(testpb.SimpleResponse), nil
4621	}}
4622	te.startServer(ts)
4623	defer te.tearDown()
4624	te.withServerTester(func(st *serverTester) {
4625		st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall")
4626		// Say we have 5 bytes coming, but cancel it instead.
4627		st.writeRSTStream(1, http2.ErrCodeCancel)
4628		st.writeData(1, false, []byte{0, 0, 0, 0, 5})
4629
4630		// Verify we didn't a call yet.
4631		select {
4632		case <-gotCall:
4633			t.Fatal("unexpected call")
4634		default:
4635		}
4636
4637		// And now send an uncanceled (but still invalid), just to get a response.
4638		st.writeHeadersGRPC(3, "/grpc.testing.TestService/UnaryCall")
4639		st.writeData(3, true, []byte{0, 0, 0, 0, 0})
4640		<-gotCall
4641		st.wantAnyFrame()
4642	})
4643}
4644
4645func (s) TestClientRequestBodyErrorCancelStreamingInput(t *testing.T) {
4646	for _, e := range listTestEnv() {
4647		testClientRequestBodyErrorCancelStreamingInput(t, e)
4648	}
4649}
4650
4651func testClientRequestBodyErrorCancelStreamingInput(t *testing.T, e env) {
4652	te := newTest(t, e)
4653	recvErr := make(chan error, 1)
4654	ts := &funcServer{streamingInputCall: func(stream testpb.TestService_StreamingInputCallServer) error {
4655		_, err := stream.Recv()
4656		recvErr <- err
4657		return nil
4658	}}
4659	te.startServer(ts)
4660	defer te.tearDown()
4661	te.withServerTester(func(st *serverTester) {
4662		st.writeHeadersGRPC(1, "/grpc.testing.TestService/StreamingInputCall")
4663		// Say we have 5 bytes coming, but cancel it instead.
4664		st.writeData(1, false, []byte{0, 0, 0, 0, 5})
4665		st.writeRSTStream(1, http2.ErrCodeCancel)
4666
4667		var got error
4668		select {
4669		case got = <-recvErr:
4670		case <-time.After(3 * time.Second):
4671			t.Fatal("timeout waiting for error")
4672		}
4673		if grpc.Code(got) != codes.Canceled {
4674			t.Errorf("error = %#v; want error code %s", got, codes.Canceled)
4675		}
4676	})
4677}
4678
4679func (s) TestClientResourceExhaustedCancelFullDuplex(t *testing.T) {
4680	for _, e := range listTestEnv() {
4681		if e.httpHandler {
4682			// httpHandler write won't be blocked on flow control window.
4683			continue
4684		}
4685		testClientResourceExhaustedCancelFullDuplex(t, e)
4686	}
4687}
4688
4689func testClientResourceExhaustedCancelFullDuplex(t *testing.T, e env) {
4690	te := newTest(t, e)
4691	recvErr := make(chan error, 1)
4692	ts := &funcServer{fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error {
4693		defer close(recvErr)
4694		_, err := stream.Recv()
4695		if err != nil {
4696			return status.Errorf(codes.Internal, "stream.Recv() got error: %v, want <nil>", err)
4697		}
4698		// create a payload that's larger than the default flow control window.
4699		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 10)
4700		if err != nil {
4701			return err
4702		}
4703		resp := &testpb.StreamingOutputCallResponse{
4704			Payload: payload,
4705		}
4706		ce := make(chan error, 1)
4707		go func() {
4708			var err error
4709			for {
4710				if err = stream.Send(resp); err != nil {
4711					break
4712				}
4713			}
4714			ce <- err
4715		}()
4716		select {
4717		case err = <-ce:
4718		case <-time.After(10 * time.Second):
4719			err = errors.New("10s timeout reached")
4720		}
4721		recvErr <- err
4722		return err
4723	}}
4724	te.startServer(ts)
4725	defer te.tearDown()
4726	// set a low limit on receive message size to error with Resource Exhausted on
4727	// client side when server send a large message.
4728	te.maxClientReceiveMsgSize = newInt(10)
4729	cc := te.clientConn()
4730	tc := testpb.NewTestServiceClient(cc)
4731	stream, err := tc.FullDuplexCall(context.Background())
4732	if err != nil {
4733		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
4734	}
4735	req := &testpb.StreamingOutputCallRequest{}
4736	if err := stream.Send(req); err != nil {
4737		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
4738	}
4739	if _, err := stream.Recv(); status.Code(err) != codes.ResourceExhausted {
4740		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
4741	}
4742	err = <-recvErr
4743	if status.Code(err) != codes.Canceled {
4744		t.Fatalf("server got error %v, want error code: %s", err, codes.Canceled)
4745	}
4746}
4747
4748type clientTimeoutCreds struct {
4749	timeoutReturned bool
4750}
4751
4752func (c *clientTimeoutCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
4753	if !c.timeoutReturned {
4754		c.timeoutReturned = true
4755		return nil, nil, context.DeadlineExceeded
4756	}
4757	return rawConn, nil, nil
4758}
4759func (c *clientTimeoutCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
4760	return rawConn, nil, nil
4761}
4762func (c *clientTimeoutCreds) Info() credentials.ProtocolInfo {
4763	return credentials.ProtocolInfo{}
4764}
4765func (c *clientTimeoutCreds) Clone() credentials.TransportCredentials {
4766	return nil
4767}
4768func (c *clientTimeoutCreds) OverrideServerName(s string) error {
4769	return nil
4770}
4771
4772func (s) TestNonFailFastRPCSucceedOnTimeoutCreds(t *testing.T) {
4773	te := newTest(t, env{name: "timeout-cred", network: "tcp", security: "clientTimeoutCreds", balancer: "v1"})
4774	te.userAgent = testAppUA
4775	te.startServer(&testServer{security: te.e.security})
4776	defer te.tearDown()
4777
4778	cc := te.clientConn()
4779	tc := testpb.NewTestServiceClient(cc)
4780	// This unary call should succeed, because ClientHandshake will succeed for the second time.
4781	if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
4782		te.t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want <nil>", err)
4783	}
4784}
4785
4786type serverDispatchCred struct {
4787	rawConnCh chan net.Conn
4788}
4789
4790func newServerDispatchCred() *serverDispatchCred {
4791	return &serverDispatchCred{
4792		rawConnCh: make(chan net.Conn, 1),
4793	}
4794}
4795func (c *serverDispatchCred) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
4796	return rawConn, nil, nil
4797}
4798func (c *serverDispatchCred) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
4799	select {
4800	case c.rawConnCh <- rawConn:
4801	default:
4802	}
4803	return nil, nil, credentials.ErrConnDispatched
4804}
4805func (c *serverDispatchCred) Info() credentials.ProtocolInfo {
4806	return credentials.ProtocolInfo{}
4807}
4808func (c *serverDispatchCred) Clone() credentials.TransportCredentials {
4809	return nil
4810}
4811func (c *serverDispatchCred) OverrideServerName(s string) error {
4812	return nil
4813}
4814func (c *serverDispatchCred) getRawConn() net.Conn {
4815	return <-c.rawConnCh
4816}
4817
4818func (s) TestServerCredsDispatch(t *testing.T) {
4819	lis, err := net.Listen("tcp", "localhost:0")
4820	if err != nil {
4821		t.Fatalf("Failed to listen: %v", err)
4822	}
4823	cred := newServerDispatchCred()
4824	s := grpc.NewServer(grpc.Creds(cred))
4825	go s.Serve(lis)
4826	defer s.Stop()
4827
4828	cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(cred))
4829	if err != nil {
4830		t.Fatalf("grpc.Dial(%q) = %v", lis.Addr().String(), err)
4831	}
4832	defer cc.Close()
4833
4834	rawConn := cred.getRawConn()
4835	// Give grpc a chance to see the error and potentially close the connection.
4836	// And check that connection is not closed after that.
4837	time.Sleep(100 * time.Millisecond)
4838	// Check rawConn is not closed.
4839	if n, err := rawConn.Write([]byte{0}); n <= 0 || err != nil {
4840		t.Errorf("Read() = %v, %v; want n>0, <nil>", n, err)
4841	}
4842}
4843
4844type authorityCheckCreds struct {
4845	got string
4846}
4847
4848func (c *authorityCheckCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
4849	return rawConn, nil, nil
4850}
4851func (c *authorityCheckCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
4852	c.got = authority
4853	return rawConn, nil, nil
4854}
4855func (c *authorityCheckCreds) Info() credentials.ProtocolInfo {
4856	return credentials.ProtocolInfo{}
4857}
4858func (c *authorityCheckCreds) Clone() credentials.TransportCredentials {
4859	return c
4860}
4861func (c *authorityCheckCreds) OverrideServerName(s string) error {
4862	return nil
4863}
4864
4865// This test makes sure that the authority client handshake gets is the endpoint
4866// in dial target, not the resolved ip address.
4867func (s) TestCredsHandshakeAuthority(t *testing.T) {
4868	const testAuthority = "test.auth.ori.ty"
4869
4870	lis, err := net.Listen("tcp", "localhost:0")
4871	if err != nil {
4872		t.Fatalf("Failed to listen: %v", err)
4873	}
4874	cred := &authorityCheckCreds{}
4875	s := grpc.NewServer()
4876	go s.Serve(lis)
4877	defer s.Stop()
4878
4879	r, rcleanup := manual.GenerateAndRegisterManualResolver()
4880	defer rcleanup()
4881
4882	cc, err := grpc.Dial(r.Scheme()+":///"+testAuthority, grpc.WithTransportCredentials(cred))
4883	if err != nil {
4884		t.Fatalf("grpc.Dial(%q) = %v", lis.Addr().String(), err)
4885	}
4886	defer cc.Close()
4887	r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}})
4888
4889	ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
4890	defer cancel()
4891	for {
4892		s := cc.GetState()
4893		if s == connectivity.Ready {
4894			break
4895		}
4896		if !cc.WaitForStateChange(ctx, s) {
4897			// ctx got timeout or canceled.
4898			t.Fatalf("ClientConn is not ready after 100 ms")
4899		}
4900	}
4901
4902	if cred.got != testAuthority {
4903		t.Fatalf("client creds got authority: %q, want: %q", cred.got, testAuthority)
4904	}
4905}
4906
4907// This test makes sure that the authority client handshake gets is the endpoint
4908// of the ServerName of the address when it is set.
4909func (s) TestCredsHandshakeServerNameAuthority(t *testing.T) {
4910	const testAuthority = "test.auth.ori.ty"
4911	const testServerName = "test.server.name"
4912
4913	lis, err := net.Listen("tcp", "localhost:0")
4914	if err != nil {
4915		t.Fatalf("Failed to listen: %v", err)
4916	}
4917	cred := &authorityCheckCreds{}
4918	s := grpc.NewServer()
4919	go s.Serve(lis)
4920	defer s.Stop()
4921
4922	r, rcleanup := manual.GenerateAndRegisterManualResolver()
4923	defer rcleanup()
4924
4925	cc, err := grpc.Dial(r.Scheme()+":///"+testAuthority, grpc.WithTransportCredentials(cred))
4926	if err != nil {
4927		t.Fatalf("grpc.Dial(%q) = %v", lis.Addr().String(), err)
4928	}
4929	defer cc.Close()
4930	r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String(), ServerName: testServerName}}})
4931
4932	ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
4933	defer cancel()
4934	for {
4935		s := cc.GetState()
4936		if s == connectivity.Ready {
4937			break
4938		}
4939		if !cc.WaitForStateChange(ctx, s) {
4940			// ctx got timeout or canceled.
4941			t.Fatalf("ClientConn is not ready after 100 ms")
4942		}
4943	}
4944
4945	if cred.got != testServerName {
4946		t.Fatalf("client creds got authority: %q, want: %q", cred.got, testAuthority)
4947	}
4948}
4949
4950type clientFailCreds struct{}
4951
4952func (c *clientFailCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
4953	return rawConn, nil, nil
4954}
4955func (c *clientFailCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
4956	return nil, nil, fmt.Errorf("client handshake fails with fatal error")
4957}
4958func (c *clientFailCreds) Info() credentials.ProtocolInfo {
4959	return credentials.ProtocolInfo{}
4960}
4961func (c *clientFailCreds) Clone() credentials.TransportCredentials {
4962	return c
4963}
4964func (c *clientFailCreds) OverrideServerName(s string) error {
4965	return nil
4966}
4967
4968// This test makes sure that failfast RPCs fail if client handshake fails with
4969// fatal errors.
4970func (s) TestFailfastRPCFailOnFatalHandshakeError(t *testing.T) {
4971	lis, err := net.Listen("tcp", "localhost:0")
4972	if err != nil {
4973		t.Fatalf("Failed to listen: %v", err)
4974	}
4975	defer lis.Close()
4976
4977	cc, err := grpc.Dial("passthrough:///"+lis.Addr().String(), grpc.WithTransportCredentials(&clientFailCreds{}))
4978	if err != nil {
4979		t.Fatalf("grpc.Dial(_) = %v", err)
4980	}
4981	defer cc.Close()
4982
4983	tc := testpb.NewTestServiceClient(cc)
4984	// This unary call should fail, but not timeout.
4985	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
4986	defer cancel()
4987	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(false)); status.Code(err) != codes.Unavailable {
4988		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want <Unavailable>", err)
4989	}
4990}
4991
4992func (s) TestFlowControlLogicalRace(t *testing.T) {
4993	// Test for a regression of https://github.com/grpc/grpc-go/issues/632,
4994	// and other flow control bugs.
4995
4996	const (
4997		itemCount   = 100
4998		itemSize    = 1 << 10
4999		recvCount   = 2
5000		maxFailures = 3
5001
5002		requestTimeout = time.Second * 5
5003	)
5004
5005	requestCount := 10000
5006	if raceMode {
5007		requestCount = 1000
5008	}
5009
5010	lis, err := net.Listen("tcp", "localhost:0")
5011	if err != nil {
5012		t.Fatalf("Failed to listen: %v", err)
5013	}
5014	defer lis.Close()
5015
5016	s := grpc.NewServer()
5017	testpb.RegisterTestServiceServer(s, &flowControlLogicalRaceServer{
5018		itemCount: itemCount,
5019		itemSize:  itemSize,
5020	})
5021	defer s.Stop()
5022
5023	go s.Serve(lis)
5024
5025	ctx := context.Background()
5026
5027	cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure(), grpc.WithBlock())
5028	if err != nil {
5029		t.Fatalf("grpc.Dial(%q) = %v", lis.Addr().String(), err)
5030	}
5031	defer cc.Close()
5032	cl := testpb.NewTestServiceClient(cc)
5033
5034	failures := 0
5035	for i := 0; i < requestCount; i++ {
5036		ctx, cancel := context.WithTimeout(ctx, requestTimeout)
5037		output, err := cl.StreamingOutputCall(ctx, &testpb.StreamingOutputCallRequest{})
5038		if err != nil {
5039			t.Fatalf("StreamingOutputCall; err = %q", err)
5040		}
5041
5042		j := 0
5043	loop:
5044		for ; j < recvCount; j++ {
5045			_, err := output.Recv()
5046			if err != nil {
5047				if err == io.EOF {
5048					break loop
5049				}
5050				switch status.Code(err) {
5051				case codes.DeadlineExceeded:
5052					break loop
5053				default:
5054					t.Fatalf("Recv; err = %q", err)
5055				}
5056			}
5057		}
5058		cancel()
5059		<-ctx.Done()
5060
5061		if j < recvCount {
5062			t.Errorf("got %d responses to request %d", j, i)
5063			failures++
5064			if failures >= maxFailures {
5065				// Continue past the first failure to see if the connection is
5066				// entirely broken, or if only a single RPC was affected
5067				break
5068			}
5069		}
5070	}
5071}
5072
5073type flowControlLogicalRaceServer struct {
5074	testpb.TestServiceServer
5075
5076	itemSize  int
5077	itemCount int
5078}
5079
5080func (s *flowControlLogicalRaceServer) StreamingOutputCall(req *testpb.StreamingOutputCallRequest, srv testpb.TestService_StreamingOutputCallServer) error {
5081	for i := 0; i < s.itemCount; i++ {
5082		err := srv.Send(&testpb.StreamingOutputCallResponse{
5083			Payload: &testpb.Payload{
5084				// Sending a large stream of data which the client reject
5085				// helps to trigger some types of flow control bugs.
5086				//
5087				// Reallocating memory here is inefficient, but the stress it
5088				// puts on the GC leads to more frequent flow control
5089				// failures. The GC likely causes more variety in the
5090				// goroutine scheduling orders.
5091				Body: bytes.Repeat([]byte("a"), s.itemSize),
5092			},
5093		})
5094		if err != nil {
5095			return err
5096		}
5097	}
5098	return nil
5099}
5100
5101type lockingWriter struct {
5102	mu sync.Mutex
5103	w  io.Writer
5104}
5105
5106func (lw *lockingWriter) Write(p []byte) (n int, err error) {
5107	lw.mu.Lock()
5108	defer lw.mu.Unlock()
5109	return lw.w.Write(p)
5110}
5111
5112func (lw *lockingWriter) setWriter(w io.Writer) {
5113	lw.mu.Lock()
5114	defer lw.mu.Unlock()
5115	lw.w = w
5116}
5117
5118var testLogOutput = &lockingWriter{w: os.Stderr}
5119
5120// awaitNewConnLogOutput waits for any of grpc.NewConn's goroutines to
5121// terminate, if they're still running. It spams logs with this
5122// message.  We wait for it so our log filter is still
5123// active. Otherwise the "defer restore()" at the top of various test
5124// functions restores our log filter and then the goroutine spams.
5125func awaitNewConnLogOutput() {
5126	awaitLogOutput(50*time.Millisecond, "grpc: the client connection is closing; please retry")
5127}
5128
5129func awaitLogOutput(maxWait time.Duration, phrase string) {
5130	pb := []byte(phrase)
5131
5132	timer := time.NewTimer(maxWait)
5133	defer timer.Stop()
5134	wakeup := make(chan bool, 1)
5135	for {
5136		if logOutputHasContents(pb, wakeup) {
5137			return
5138		}
5139		select {
5140		case <-timer.C:
5141			// Too slow. Oh well.
5142			return
5143		case <-wakeup:
5144		}
5145	}
5146}
5147
5148func logOutputHasContents(v []byte, wakeup chan<- bool) bool {
5149	testLogOutput.mu.Lock()
5150	defer testLogOutput.mu.Unlock()
5151	fw, ok := testLogOutput.w.(*filterWriter)
5152	if !ok {
5153		return false
5154	}
5155	fw.mu.Lock()
5156	defer fw.mu.Unlock()
5157	if bytes.Contains(fw.buf.Bytes(), v) {
5158		return true
5159	}
5160	fw.wakeup = wakeup
5161	return false
5162}
5163
5164var verboseLogs = flag.Bool("verbose_logs", false, "show all grpclog output, without filtering")
5165
5166func noop() {}
5167
5168// declareLogNoise declares that t is expected to emit the following noisy phrases,
5169// even on success. Those phrases will be filtered from grpclog output
5170// and only be shown if *verbose_logs or t ends up failing.
5171// The returned restore function should be called with defer to be run
5172// before the test ends.
5173func declareLogNoise(t *testing.T, phrases ...string) (restore func()) {
5174	if *verboseLogs {
5175		return noop
5176	}
5177	fw := &filterWriter{dst: os.Stderr, filter: phrases}
5178	testLogOutput.setWriter(fw)
5179	return func() {
5180		if t.Failed() {
5181			fw.mu.Lock()
5182			defer fw.mu.Unlock()
5183			if fw.buf.Len() > 0 {
5184				t.Logf("Complete log output:\n%s", fw.buf.Bytes())
5185			}
5186		}
5187		testLogOutput.setWriter(os.Stderr)
5188	}
5189}
5190
5191type filterWriter struct {
5192	dst    io.Writer
5193	filter []string
5194
5195	mu     sync.Mutex
5196	buf    bytes.Buffer
5197	wakeup chan<- bool // if non-nil, gets true on write
5198}
5199
5200func (fw *filterWriter) Write(p []byte) (n int, err error) {
5201	fw.mu.Lock()
5202	fw.buf.Write(p)
5203	if fw.wakeup != nil {
5204		select {
5205		case fw.wakeup <- true:
5206		default:
5207		}
5208	}
5209	fw.mu.Unlock()
5210
5211	ps := string(p)
5212	for _, f := range fw.filter {
5213		if strings.Contains(ps, f) {
5214			return len(p), nil
5215		}
5216	}
5217	return fw.dst.Write(p)
5218}
5219
5220// stubServer is a server that is easy to customize within individual test
5221// cases.
5222type stubServer struct {
5223	// Guarantees we satisfy this interface; panics if unimplemented methods are called.
5224	testpb.TestServiceServer
5225
5226	// Customizable implementations of server handlers.
5227	emptyCall      func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error)
5228	unaryCall      func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error)
5229	fullDuplexCall func(stream testpb.TestService_FullDuplexCallServer) error
5230
5231	// A client connected to this service the test may use.  Created in Start().
5232	client testpb.TestServiceClient
5233	cc     *grpc.ClientConn
5234	s      *grpc.Server
5235
5236	addr string // address of listener
5237
5238	cleanups []func() // Lambdas executed in Stop(); populated by Start().
5239
5240	r *manual.Resolver
5241}
5242
5243func (ss *stubServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
5244	return ss.emptyCall(ctx, in)
5245}
5246
5247func (ss *stubServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
5248	return ss.unaryCall(ctx, in)
5249}
5250
5251func (ss *stubServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error {
5252	return ss.fullDuplexCall(stream)
5253}
5254
5255// Start starts the server and creates a client connected to it.
5256func (ss *stubServer) Start(sopts []grpc.ServerOption, dopts ...grpc.DialOption) error {
5257	r, cleanup := manual.GenerateAndRegisterManualResolver()
5258	ss.r = r
5259	ss.cleanups = append(ss.cleanups, cleanup)
5260
5261	lis, err := net.Listen("tcp", "localhost:0")
5262	if err != nil {
5263		return fmt.Errorf(`net.Listen("tcp", "localhost:0") = %v`, err)
5264	}
5265	ss.addr = lis.Addr().String()
5266	ss.cleanups = append(ss.cleanups, func() { lis.Close() })
5267
5268	s := grpc.NewServer(sopts...)
5269	testpb.RegisterTestServiceServer(s, ss)
5270	go s.Serve(lis)
5271	ss.cleanups = append(ss.cleanups, s.Stop)
5272	ss.s = s
5273
5274	target := ss.r.Scheme() + ":///" + ss.addr
5275
5276	opts := append([]grpc.DialOption{grpc.WithInsecure()}, dopts...)
5277	cc, err := grpc.Dial(target, opts...)
5278	if err != nil {
5279		return fmt.Errorf("grpc.Dial(%q) = %v", target, err)
5280	}
5281	ss.cc = cc
5282	ss.r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: ss.addr}}})
5283	if err := ss.waitForReady(cc); err != nil {
5284		return err
5285	}
5286
5287	ss.cleanups = append(ss.cleanups, func() { cc.Close() })
5288
5289	ss.client = testpb.NewTestServiceClient(cc)
5290	return nil
5291}
5292
5293func (ss *stubServer) newServiceConfig(sc string) {
5294	ss.r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: ss.addr}}, ServiceConfig: parseCfg(ss.r, sc)})
5295}
5296
5297func (ss *stubServer) waitForReady(cc *grpc.ClientConn) error {
5298	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
5299	defer cancel()
5300	for {
5301		s := cc.GetState()
5302		if s == connectivity.Ready {
5303			return nil
5304		}
5305		if !cc.WaitForStateChange(ctx, s) {
5306			// ctx got timeout or canceled.
5307			return ctx.Err()
5308		}
5309	}
5310}
5311
5312func (ss *stubServer) Stop() {
5313	for i := len(ss.cleanups) - 1; i >= 0; i-- {
5314		ss.cleanups[i]()
5315	}
5316}
5317
5318func (s) TestGRPCMethod(t *testing.T) {
5319	var method string
5320	var ok bool
5321
5322	ss := &stubServer{
5323		emptyCall: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
5324			method, ok = grpc.Method(ctx)
5325			return &testpb.Empty{}, nil
5326		},
5327	}
5328	if err := ss.Start(nil); err != nil {
5329		t.Fatalf("Error starting endpoint server: %v", err)
5330	}
5331	defer ss.Stop()
5332
5333	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
5334	defer cancel()
5335
5336	if _, err := ss.client.EmptyCall(ctx, &testpb.Empty{}); err != nil {
5337		t.Fatalf("ss.client.EmptyCall(_, _) = _, %v; want _, nil", err)
5338	}
5339
5340	if want := "/grpc.testing.TestService/EmptyCall"; !ok || method != want {
5341		t.Fatalf("grpc.Method(_) = %q, %v; want %q, true", method, ok, want)
5342	}
5343}
5344
5345func (s) TestUnaryProxyDoesNotForwardMetadata(t *testing.T) {
5346	const mdkey = "somedata"
5347
5348	// endpoint ensures mdkey is NOT in metadata and returns an error if it is.
5349	endpoint := &stubServer{
5350		emptyCall: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
5351			if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] != nil {
5352				return nil, status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey)
5353			}
5354			return &testpb.Empty{}, nil
5355		},
5356	}
5357	if err := endpoint.Start(nil); err != nil {
5358		t.Fatalf("Error starting endpoint server: %v", err)
5359	}
5360	defer endpoint.Stop()
5361
5362	// proxy ensures mdkey IS in metadata, then forwards the RPC to endpoint
5363	// without explicitly copying the metadata.
5364	proxy := &stubServer{
5365		emptyCall: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
5366			if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] == nil {
5367				return nil, status.Errorf(codes.Internal, "proxy: md=%v; want contains(%q)", md, mdkey)
5368			}
5369			return endpoint.client.EmptyCall(ctx, in)
5370		},
5371	}
5372	if err := proxy.Start(nil); err != nil {
5373		t.Fatalf("Error starting proxy server: %v", err)
5374	}
5375	defer proxy.Stop()
5376
5377	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
5378	defer cancel()
5379	md := metadata.Pairs(mdkey, "val")
5380	ctx = metadata.NewOutgoingContext(ctx, md)
5381
5382	// Sanity check that endpoint properly errors when it sees mdkey.
5383	_, err := endpoint.client.EmptyCall(ctx, &testpb.Empty{})
5384	if s, ok := status.FromError(err); !ok || s.Code() != codes.Internal {
5385		t.Fatalf("endpoint.client.EmptyCall(_, _) = _, %v; want _, <status with Code()=Internal>", err)
5386	}
5387
5388	if _, err := proxy.client.EmptyCall(ctx, &testpb.Empty{}); err != nil {
5389		t.Fatal(err.Error())
5390	}
5391}
5392
5393func (s) TestStreamingProxyDoesNotForwardMetadata(t *testing.T) {
5394	const mdkey = "somedata"
5395
5396	// doFDC performs a FullDuplexCall with client and returns the error from the
5397	// first stream.Recv call, or nil if that error is io.EOF.  Calls t.Fatal if
5398	// the stream cannot be established.
5399	doFDC := func(ctx context.Context, client testpb.TestServiceClient) error {
5400		stream, err := client.FullDuplexCall(ctx)
5401		if err != nil {
5402			t.Fatalf("Unwanted error: %v", err)
5403		}
5404		if _, err := stream.Recv(); err != io.EOF {
5405			return err
5406		}
5407		return nil
5408	}
5409
5410	// endpoint ensures mdkey is NOT in metadata and returns an error if it is.
5411	endpoint := &stubServer{
5412		fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error {
5413			ctx := stream.Context()
5414			if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] != nil {
5415				return status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey)
5416			}
5417			return nil
5418		},
5419	}
5420	if err := endpoint.Start(nil); err != nil {
5421		t.Fatalf("Error starting endpoint server: %v", err)
5422	}
5423	defer endpoint.Stop()
5424
5425	// proxy ensures mdkey IS in metadata, then forwards the RPC to endpoint
5426	// without explicitly copying the metadata.
5427	proxy := &stubServer{
5428		fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error {
5429			ctx := stream.Context()
5430			if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] == nil {
5431				return status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey)
5432			}
5433			return doFDC(ctx, endpoint.client)
5434		},
5435	}
5436	if err := proxy.Start(nil); err != nil {
5437		t.Fatalf("Error starting proxy server: %v", err)
5438	}
5439	defer proxy.Stop()
5440
5441	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
5442	defer cancel()
5443	md := metadata.Pairs(mdkey, "val")
5444	ctx = metadata.NewOutgoingContext(ctx, md)
5445
5446	// Sanity check that endpoint properly errors when it sees mdkey in ctx.
5447	err := doFDC(ctx, endpoint.client)
5448	if s, ok := status.FromError(err); !ok || s.Code() != codes.Internal {
5449		t.Fatalf("stream.Recv() = _, %v; want _, <status with Code()=Internal>", err)
5450	}
5451
5452	if err := doFDC(ctx, proxy.client); err != nil {
5453		t.Fatalf("doFDC(_, proxy.client) = %v; want nil", err)
5454	}
5455}
5456
5457func (s) TestStatsTagsAndTrace(t *testing.T) {
5458	// Data added to context by client (typically in a stats handler).
5459	tags := []byte{1, 5, 2, 4, 3}
5460	trace := []byte{5, 2, 1, 3, 4}
5461
5462	// endpoint ensures Tags() and Trace() in context match those that were added
5463	// by the client and returns an error if not.
5464	endpoint := &stubServer{
5465		emptyCall: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
5466			md, _ := metadata.FromIncomingContext(ctx)
5467			if tg := stats.Tags(ctx); !reflect.DeepEqual(tg, tags) {
5468				return nil, status.Errorf(codes.Internal, "stats.Tags(%v)=%v; want %v", ctx, tg, tags)
5469			}
5470			if !reflect.DeepEqual(md["grpc-tags-bin"], []string{string(tags)}) {
5471				return nil, status.Errorf(codes.Internal, "md['grpc-tags-bin']=%v; want %v", md["grpc-tags-bin"], tags)
5472			}
5473			if tr := stats.Trace(ctx); !reflect.DeepEqual(tr, trace) {
5474				return nil, status.Errorf(codes.Internal, "stats.Trace(%v)=%v; want %v", ctx, tr, trace)
5475			}
5476			if !reflect.DeepEqual(md["grpc-trace-bin"], []string{string(trace)}) {
5477				return nil, status.Errorf(codes.Internal, "md['grpc-trace-bin']=%v; want %v", md["grpc-trace-bin"], trace)
5478			}
5479			return &testpb.Empty{}, nil
5480		},
5481	}
5482	if err := endpoint.Start(nil); err != nil {
5483		t.Fatalf("Error starting endpoint server: %v", err)
5484	}
5485	defer endpoint.Stop()
5486
5487	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
5488	defer cancel()
5489
5490	testCases := []struct {
5491		ctx  context.Context
5492		want codes.Code
5493	}{
5494		{ctx: ctx, want: codes.Internal},
5495		{ctx: stats.SetTags(ctx, tags), want: codes.Internal},
5496		{ctx: stats.SetTrace(ctx, trace), want: codes.Internal},
5497		{ctx: stats.SetTags(stats.SetTrace(ctx, tags), tags), want: codes.Internal},
5498		{ctx: stats.SetTags(stats.SetTrace(ctx, trace), tags), want: codes.OK},
5499	}
5500
5501	for _, tc := range testCases {
5502		_, err := endpoint.client.EmptyCall(tc.ctx, &testpb.Empty{})
5503		if tc.want == codes.OK && err != nil {
5504			t.Fatalf("endpoint.client.EmptyCall(%v, _) = _, %v; want _, nil", tc.ctx, err)
5505		}
5506		if s, ok := status.FromError(err); !ok || s.Code() != tc.want {
5507			t.Fatalf("endpoint.client.EmptyCall(%v, _) = _, %v; want _, <status with Code()=%v>", tc.ctx, err, tc.want)
5508		}
5509	}
5510}
5511
5512func (s) TestTapTimeout(t *testing.T) {
5513	sopts := []grpc.ServerOption{
5514		grpc.InTapHandle(func(ctx context.Context, _ *tap.Info) (context.Context, error) {
5515			c, cancel := context.WithCancel(ctx)
5516			// Call cancel instead of setting a deadline so we can detect which error
5517			// occurred -- this cancellation (desired) or the client's deadline
5518			// expired (indicating this cancellation did not affect the RPC).
5519			time.AfterFunc(10*time.Millisecond, cancel)
5520			return c, nil
5521		}),
5522	}
5523
5524	ss := &stubServer{
5525		emptyCall: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
5526			<-ctx.Done()
5527			return nil, status.Errorf(codes.Canceled, ctx.Err().Error())
5528		},
5529	}
5530	if err := ss.Start(sopts); err != nil {
5531		t.Fatalf("Error starting endpoint server: %v", err)
5532	}
5533	defer ss.Stop()
5534
5535	// This was known to be flaky; test several times.
5536	for i := 0; i < 10; i++ {
5537		// Set our own deadline in case the server hangs.
5538		ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
5539		res, err := ss.client.EmptyCall(ctx, &testpb.Empty{})
5540		cancel()
5541		if s, ok := status.FromError(err); !ok || s.Code() != codes.Canceled {
5542			t.Fatalf("ss.client.EmptyCall(context.Background(), _) = %v, %v; want nil, <status with Code()=Canceled>", res, err)
5543		}
5544	}
5545
5546}
5547
5548func (s) TestClientWriteFailsAfterServerClosesStream(t *testing.T) {
5549	ss := &stubServer{
5550		fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error {
5551			return status.Errorf(codes.Internal, "")
5552		},
5553	}
5554	sopts := []grpc.ServerOption{}
5555	if err := ss.Start(sopts); err != nil {
5556		t.Fatalf("Error starting endpoint server: %v", err)
5557	}
5558	defer ss.Stop()
5559	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
5560	defer cancel()
5561	stream, err := ss.client.FullDuplexCall(ctx)
5562	if err != nil {
5563		t.Fatalf("Error while creating stream: %v", err)
5564	}
5565	for {
5566		if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err == nil {
5567			time.Sleep(5 * time.Millisecond)
5568		} else if err == io.EOF {
5569			break // Success.
5570		} else {
5571			t.Fatalf("stream.Send(_) = %v, want io.EOF", err)
5572		}
5573	}
5574}
5575
5576type windowSizeConfig struct {
5577	serverStream int32
5578	serverConn   int32
5579	clientStream int32
5580	clientConn   int32
5581}
5582
5583func max(a, b int32) int32 {
5584	if a > b {
5585		return a
5586	}
5587	return b
5588}
5589
5590func (s) TestConfigurableWindowSizeWithLargeWindow(t *testing.T) {
5591	wc := windowSizeConfig{
5592		serverStream: 8 * 1024 * 1024,
5593		serverConn:   12 * 1024 * 1024,
5594		clientStream: 6 * 1024 * 1024,
5595		clientConn:   8 * 1024 * 1024,
5596	}
5597	for _, e := range listTestEnv() {
5598		testConfigurableWindowSize(t, e, wc)
5599	}
5600}
5601
5602func (s) TestConfigurableWindowSizeWithSmallWindow(t *testing.T) {
5603	wc := windowSizeConfig{
5604		serverStream: 1,
5605		serverConn:   1,
5606		clientStream: 1,
5607		clientConn:   1,
5608	}
5609	for _, e := range listTestEnv() {
5610		testConfigurableWindowSize(t, e, wc)
5611	}
5612}
5613
5614func testConfigurableWindowSize(t *testing.T, e env, wc windowSizeConfig) {
5615	te := newTest(t, e)
5616	te.serverInitialWindowSize = wc.serverStream
5617	te.serverInitialConnWindowSize = wc.serverConn
5618	te.clientInitialWindowSize = wc.clientStream
5619	te.clientInitialConnWindowSize = wc.clientConn
5620
5621	te.startServer(&testServer{security: e.security})
5622	defer te.tearDown()
5623
5624	cc := te.clientConn()
5625	tc := testpb.NewTestServiceClient(cc)
5626	stream, err := tc.FullDuplexCall(context.Background())
5627	if err != nil {
5628		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
5629	}
5630	numOfIter := 11
5631	// Set message size to exhaust largest of window sizes.
5632	messageSize := max(max(wc.serverStream, wc.serverConn), max(wc.clientStream, wc.clientConn)) / int32(numOfIter-1)
5633	messageSize = max(messageSize, 64*1024)
5634	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, messageSize)
5635	if err != nil {
5636		t.Fatal(err)
5637	}
5638	respParams := []*testpb.ResponseParameters{
5639		{
5640			Size: messageSize,
5641		},
5642	}
5643	req := &testpb.StreamingOutputCallRequest{
5644		ResponseType:       testpb.PayloadType_COMPRESSABLE,
5645		ResponseParameters: respParams,
5646		Payload:            payload,
5647	}
5648	for i := 0; i < numOfIter; i++ {
5649		if err := stream.Send(req); err != nil {
5650			t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
5651		}
5652		if _, err := stream.Recv(); err != nil {
5653			t.Fatalf("%v.Recv() = _, %v, want _, <nil>", stream, err)
5654		}
5655	}
5656	if err := stream.CloseSend(); err != nil {
5657		t.Fatalf("%v.CloseSend() = %v, want <nil>", stream, err)
5658	}
5659}
5660
5661var (
5662	// test authdata
5663	authdata = map[string]string{
5664		"test-key":      "test-value",
5665		"test-key2-bin": string([]byte{1, 2, 3}),
5666	}
5667)
5668
5669type testPerRPCCredentials struct{}
5670
5671func (cr testPerRPCCredentials) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
5672	return authdata, nil
5673}
5674
5675func (cr testPerRPCCredentials) RequireTransportSecurity() bool {
5676	return false
5677}
5678
5679func authHandle(ctx context.Context, info *tap.Info) (context.Context, error) {
5680	md, ok := metadata.FromIncomingContext(ctx)
5681	if !ok {
5682		return ctx, fmt.Errorf("didn't find metadata in context")
5683	}
5684	for k, vwant := range authdata {
5685		vgot, ok := md[k]
5686		if !ok {
5687			return ctx, fmt.Errorf("didn't find authdata key %v in context", k)
5688		}
5689		if vgot[0] != vwant {
5690			return ctx, fmt.Errorf("for key %v, got value %v, want %v", k, vgot, vwant)
5691		}
5692	}
5693	return ctx, nil
5694}
5695
5696func (s) TestPerRPCCredentialsViaDialOptions(t *testing.T) {
5697	for _, e := range listTestEnv() {
5698		testPerRPCCredentialsViaDialOptions(t, e)
5699	}
5700}
5701
5702func testPerRPCCredentialsViaDialOptions(t *testing.T, e env) {
5703	te := newTest(t, e)
5704	te.tapHandle = authHandle
5705	te.perRPCCreds = testPerRPCCredentials{}
5706	te.startServer(&testServer{security: e.security})
5707	defer te.tearDown()
5708
5709	cc := te.clientConn()
5710	tc := testpb.NewTestServiceClient(cc)
5711	if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil {
5712		t.Fatalf("Test failed. Reason: %v", err)
5713	}
5714}
5715
5716func (s) TestPerRPCCredentialsViaCallOptions(t *testing.T) {
5717	for _, e := range listTestEnv() {
5718		testPerRPCCredentialsViaCallOptions(t, e)
5719	}
5720}
5721
5722func testPerRPCCredentialsViaCallOptions(t *testing.T, e env) {
5723	te := newTest(t, e)
5724	te.tapHandle = authHandle
5725	te.startServer(&testServer{security: e.security})
5726	defer te.tearDown()
5727
5728	cc := te.clientConn()
5729	tc := testpb.NewTestServiceClient(cc)
5730	if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.PerRPCCredentials(testPerRPCCredentials{})); err != nil {
5731		t.Fatalf("Test failed. Reason: %v", err)
5732	}
5733}
5734
5735func (s) TestPerRPCCredentialsViaDialOptionsAndCallOptions(t *testing.T) {
5736	for _, e := range listTestEnv() {
5737		testPerRPCCredentialsViaDialOptionsAndCallOptions(t, e)
5738	}
5739}
5740
5741func testPerRPCCredentialsViaDialOptionsAndCallOptions(t *testing.T, e env) {
5742	te := newTest(t, e)
5743	te.perRPCCreds = testPerRPCCredentials{}
5744	// When credentials are provided via both dial options and call options,
5745	// we apply both sets.
5746	te.tapHandle = func(ctx context.Context, _ *tap.Info) (context.Context, error) {
5747		md, ok := metadata.FromIncomingContext(ctx)
5748		if !ok {
5749			return ctx, fmt.Errorf("couldn't find metadata in context")
5750		}
5751		for k, vwant := range authdata {
5752			vgot, ok := md[k]
5753			if !ok {
5754				return ctx, fmt.Errorf("couldn't find metadata for key %v", k)
5755			}
5756			if len(vgot) != 2 {
5757				return ctx, fmt.Errorf("len of value for key %v was %v, want 2", k, len(vgot))
5758			}
5759			if vgot[0] != vwant || vgot[1] != vwant {
5760				return ctx, fmt.Errorf("value for %v was %v, want [%v, %v]", k, vgot, vwant, vwant)
5761			}
5762		}
5763		return ctx, nil
5764	}
5765	te.startServer(&testServer{security: e.security})
5766	defer te.tearDown()
5767
5768	cc := te.clientConn()
5769	tc := testpb.NewTestServiceClient(cc)
5770	if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.PerRPCCredentials(testPerRPCCredentials{})); err != nil {
5771		t.Fatalf("Test failed. Reason: %v", err)
5772	}
5773}
5774
5775func (s) TestWaitForReadyConnection(t *testing.T) {
5776	for _, e := range listTestEnv() {
5777		testWaitForReadyConnection(t, e)
5778	}
5779
5780}
5781
5782func testWaitForReadyConnection(t *testing.T, e env) {
5783	te := newTest(t, e)
5784	te.userAgent = testAppUA
5785	te.startServer(&testServer{security: e.security})
5786	defer te.tearDown()
5787
5788	cc := te.clientConn() // Non-blocking dial.
5789	tc := testpb.NewTestServiceClient(cc)
5790	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
5791	defer cancel()
5792	state := cc.GetState()
5793	// Wait for connection to be Ready.
5794	for ; state != connectivity.Ready && cc.WaitForStateChange(ctx, state); state = cc.GetState() {
5795	}
5796	if state != connectivity.Ready {
5797		t.Fatalf("Want connection state to be Ready, got %v", state)
5798	}
5799	ctx, cancel = context.WithTimeout(context.Background(), time.Second)
5800	defer cancel()
5801	// Make a fail-fast RPC.
5802	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
5803		t.Fatalf("TestService/EmptyCall(_,_) = _, %v, want _, nil", err)
5804	}
5805}
5806
5807type errCodec struct {
5808	noError bool
5809}
5810
5811func (c *errCodec) Marshal(v interface{}) ([]byte, error) {
5812	if c.noError {
5813		return []byte{}, nil
5814	}
5815	return nil, fmt.Errorf("3987^12 + 4365^12 = 4472^12")
5816}
5817
5818func (c *errCodec) Unmarshal(data []byte, v interface{}) error {
5819	return nil
5820}
5821
5822func (c *errCodec) Name() string {
5823	return "Fermat's near-miss."
5824}
5825
5826func (s) TestEncodeDoesntPanic(t *testing.T) {
5827	for _, e := range listTestEnv() {
5828		testEncodeDoesntPanic(t, e)
5829	}
5830}
5831
5832func testEncodeDoesntPanic(t *testing.T, e env) {
5833	te := newTest(t, e)
5834	erc := &errCodec{}
5835	te.customCodec = erc
5836	te.startServer(&testServer{security: e.security})
5837	defer te.tearDown()
5838	te.customCodec = nil
5839	tc := testpb.NewTestServiceClient(te.clientConn())
5840	// Failure case, should not panic.
5841	tc.EmptyCall(context.Background(), &testpb.Empty{})
5842	erc.noError = true
5843	// Passing case.
5844	if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil {
5845		t.Fatalf("EmptyCall(_, _) = _, %v, want _, <nil>", err)
5846	}
5847}
5848
5849func (s) TestSvrWriteStatusEarlyWrite(t *testing.T) {
5850	for _, e := range listTestEnv() {
5851		testSvrWriteStatusEarlyWrite(t, e)
5852	}
5853}
5854
5855func testSvrWriteStatusEarlyWrite(t *testing.T, e env) {
5856	te := newTest(t, e)
5857	const smallSize = 1024
5858	const largeSize = 2048
5859	const extraLargeSize = 4096
5860	te.maxServerReceiveMsgSize = newInt(largeSize)
5861	te.maxServerSendMsgSize = newInt(largeSize)
5862	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
5863	if err != nil {
5864		t.Fatal(err)
5865	}
5866	extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize)
5867	if err != nil {
5868		t.Fatal(err)
5869	}
5870	te.startServer(&testServer{security: e.security})
5871	defer te.tearDown()
5872	tc := testpb.NewTestServiceClient(te.clientConn())
5873	respParam := []*testpb.ResponseParameters{
5874		{
5875			Size: int32(smallSize),
5876		},
5877	}
5878	sreq := &testpb.StreamingOutputCallRequest{
5879		ResponseType:       testpb.PayloadType_COMPRESSABLE,
5880		ResponseParameters: respParam,
5881		Payload:            extraLargePayload,
5882	}
5883	// Test recv case: server receives a message larger than maxServerReceiveMsgSize.
5884	stream, err := tc.FullDuplexCall(te.ctx)
5885	if err != nil {
5886		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
5887	}
5888	if err = stream.Send(sreq); err != nil {
5889		t.Fatalf("%v.Send() = _, %v, want <nil>", stream, err)
5890	}
5891	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
5892		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
5893	}
5894	// Test send case: server sends a message larger than maxServerSendMsgSize.
5895	sreq.Payload = smallPayload
5896	respParam[0].Size = int32(extraLargeSize)
5897
5898	stream, err = tc.FullDuplexCall(te.ctx)
5899	if err != nil {
5900		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
5901	}
5902	if err = stream.Send(sreq); err != nil {
5903		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
5904	}
5905	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
5906		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
5907	}
5908}
5909
5910// The following functions with function name ending with TD indicates that they
5911// should be deleted after old service config API is deprecated and deleted.
5912func testServiceConfigSetupTD(t *testing.T, e env) (*test, chan grpc.ServiceConfig) {
5913	te := newTest(t, e)
5914	// We write before read.
5915	ch := make(chan grpc.ServiceConfig, 1)
5916	te.sc = ch
5917	te.userAgent = testAppUA
5918	te.declareLogNoise(
5919		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
5920		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
5921		"grpc: addrConn.resetTransport failed to create client transport: connection error",
5922		"Failed to dial : context canceled; please retry.",
5923	)
5924	return te, ch
5925}
5926
5927func (s) TestServiceConfigGetMethodConfigTD(t *testing.T) {
5928	for _, e := range listTestEnv() {
5929		testGetMethodConfigTD(t, e)
5930	}
5931}
5932
5933func testGetMethodConfigTD(t *testing.T, e env) {
5934	te, ch := testServiceConfigSetupTD(t, e)
5935	defer te.tearDown()
5936
5937	mc1 := grpc.MethodConfig{
5938		WaitForReady: newBool(true),
5939		Timeout:      newDuration(time.Millisecond),
5940	}
5941	mc2 := grpc.MethodConfig{WaitForReady: newBool(false)}
5942	m := make(map[string]grpc.MethodConfig)
5943	m["/grpc.testing.TestService/EmptyCall"] = mc1
5944	m["/grpc.testing.TestService/"] = mc2
5945	sc := grpc.ServiceConfig{
5946		Methods: m,
5947	}
5948	ch <- sc
5949
5950	cc := te.clientConn()
5951	tc := testpb.NewTestServiceClient(cc)
5952	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
5953	if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
5954		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
5955	}
5956
5957	m = make(map[string]grpc.MethodConfig)
5958	m["/grpc.testing.TestService/UnaryCall"] = mc1
5959	m["/grpc.testing.TestService/"] = mc2
5960	sc = grpc.ServiceConfig{
5961		Methods: m,
5962	}
5963	ch <- sc
5964	// Wait for the new service config to propagate.
5965	for {
5966		if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) == codes.DeadlineExceeded {
5967			continue
5968		}
5969		break
5970	}
5971	// The following RPCs are expected to become fail-fast.
5972	if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) != codes.Unavailable {
5973		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.Unavailable)
5974	}
5975}
5976
5977func (s) TestServiceConfigWaitForReadyTD(t *testing.T) {
5978	for _, e := range listTestEnv() {
5979		testServiceConfigWaitForReadyTD(t, e)
5980	}
5981}
5982
5983func testServiceConfigWaitForReadyTD(t *testing.T, e env) {
5984	te, ch := testServiceConfigSetupTD(t, e)
5985	defer te.tearDown()
5986
5987	// Case1: Client API set failfast to be false, and service config set wait_for_ready to be false, Client API should win, and the rpc will wait until deadline exceeds.
5988	mc := grpc.MethodConfig{
5989		WaitForReady: newBool(false),
5990		Timeout:      newDuration(time.Millisecond),
5991	}
5992	m := make(map[string]grpc.MethodConfig)
5993	m["/grpc.testing.TestService/EmptyCall"] = mc
5994	m["/grpc.testing.TestService/FullDuplexCall"] = mc
5995	sc := grpc.ServiceConfig{
5996		Methods: m,
5997	}
5998	ch <- sc
5999
6000	cc := te.clientConn()
6001	tc := testpb.NewTestServiceClient(cc)
6002	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
6003	if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
6004		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
6005	}
6006	if _, err := tc.FullDuplexCall(context.Background(), grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
6007		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
6008	}
6009
6010	// Generate a service config update.
6011	// Case2: Client API does not set failfast, and service config set wait_for_ready to be true, and the rpc will wait until deadline exceeds.
6012	mc.WaitForReady = newBool(true)
6013	m = make(map[string]grpc.MethodConfig)
6014	m["/grpc.testing.TestService/EmptyCall"] = mc
6015	m["/grpc.testing.TestService/FullDuplexCall"] = mc
6016	sc = grpc.ServiceConfig{
6017		Methods: m,
6018	}
6019	ch <- sc
6020
6021	// Wait for the new service config to take effect.
6022	mc = cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall")
6023	for {
6024		if !*mc.WaitForReady {
6025			time.Sleep(100 * time.Millisecond)
6026			mc = cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall")
6027			continue
6028		}
6029		break
6030	}
6031	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
6032	if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
6033		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
6034	}
6035	if _, err := tc.FullDuplexCall(context.Background()); status.Code(err) != codes.DeadlineExceeded {
6036		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
6037	}
6038}
6039
6040func (s) TestServiceConfigTimeoutTD(t *testing.T) {
6041	for _, e := range listTestEnv() {
6042		testServiceConfigTimeoutTD(t, e)
6043	}
6044}
6045
6046func testServiceConfigTimeoutTD(t *testing.T, e env) {
6047	te, ch := testServiceConfigSetupTD(t, e)
6048	defer te.tearDown()
6049
6050	// Case1: Client API sets timeout to be 1ns and ServiceConfig sets timeout to be 1hr. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds.
6051	mc := grpc.MethodConfig{
6052		Timeout: newDuration(time.Hour),
6053	}
6054	m := make(map[string]grpc.MethodConfig)
6055	m["/grpc.testing.TestService/EmptyCall"] = mc
6056	m["/grpc.testing.TestService/FullDuplexCall"] = mc
6057	sc := grpc.ServiceConfig{
6058		Methods: m,
6059	}
6060	ch <- sc
6061
6062	cc := te.clientConn()
6063	tc := testpb.NewTestServiceClient(cc)
6064	// The following RPCs are expected to become non-fail-fast ones with 1ns deadline.
6065	ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond)
6066	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
6067		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
6068	}
6069	cancel()
6070	ctx, cancel = context.WithTimeout(context.Background(), time.Nanosecond)
6071	if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
6072		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
6073	}
6074	cancel()
6075
6076	// Generate a service config update.
6077	// Case2: Client API sets timeout to be 1hr and ServiceConfig sets timeout to be 1ns. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds.
6078	mc.Timeout = newDuration(time.Nanosecond)
6079	m = make(map[string]grpc.MethodConfig)
6080	m["/grpc.testing.TestService/EmptyCall"] = mc
6081	m["/grpc.testing.TestService/FullDuplexCall"] = mc
6082	sc = grpc.ServiceConfig{
6083		Methods: m,
6084	}
6085	ch <- sc
6086
6087	// Wait for the new service config to take effect.
6088	mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall")
6089	for {
6090		if *mc.Timeout != time.Nanosecond {
6091			time.Sleep(100 * time.Millisecond)
6092			mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall")
6093			continue
6094		}
6095		break
6096	}
6097
6098	ctx, cancel = context.WithTimeout(context.Background(), time.Hour)
6099	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
6100		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
6101	}
6102	cancel()
6103
6104	ctx, cancel = context.WithTimeout(context.Background(), time.Hour)
6105	if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
6106		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
6107	}
6108	cancel()
6109}
6110
6111func (s) TestServiceConfigMaxMsgSizeTD(t *testing.T) {
6112	for _, e := range listTestEnv() {
6113		testServiceConfigMaxMsgSizeTD(t, e)
6114	}
6115}
6116
6117func testServiceConfigMaxMsgSizeTD(t *testing.T, e env) {
6118	// Setting up values and objects shared across all test cases.
6119	const smallSize = 1
6120	const largeSize = 1024
6121	const extraLargeSize = 2048
6122
6123	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
6124	if err != nil {
6125		t.Fatal(err)
6126	}
6127	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
6128	if err != nil {
6129		t.Fatal(err)
6130	}
6131	extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize)
6132	if err != nil {
6133		t.Fatal(err)
6134	}
6135
6136	mc := grpc.MethodConfig{
6137		MaxReqSize:  newInt(extraLargeSize),
6138		MaxRespSize: newInt(extraLargeSize),
6139	}
6140
6141	m := make(map[string]grpc.MethodConfig)
6142	m["/grpc.testing.TestService/UnaryCall"] = mc
6143	m["/grpc.testing.TestService/FullDuplexCall"] = mc
6144	sc := grpc.ServiceConfig{
6145		Methods: m,
6146	}
6147	// Case1: sc set maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
6148	te1, ch1 := testServiceConfigSetupTD(t, e)
6149	te1.startServer(&testServer{security: e.security})
6150	defer te1.tearDown()
6151
6152	ch1 <- sc
6153	tc := testpb.NewTestServiceClient(te1.clientConn())
6154
6155	req := &testpb.SimpleRequest{
6156		ResponseType: testpb.PayloadType_COMPRESSABLE,
6157		ResponseSize: int32(extraLargeSize),
6158		Payload:      smallPayload,
6159	}
6160	// Test for unary RPC recv.
6161	if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted {
6162		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
6163	}
6164
6165	// Test for unary RPC send.
6166	req.Payload = extraLargePayload
6167	req.ResponseSize = int32(smallSize)
6168	if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted {
6169		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
6170	}
6171
6172	// Test for streaming RPC recv.
6173	respParam := []*testpb.ResponseParameters{
6174		{
6175			Size: int32(extraLargeSize),
6176		},
6177	}
6178	sreq := &testpb.StreamingOutputCallRequest{
6179		ResponseType:       testpb.PayloadType_COMPRESSABLE,
6180		ResponseParameters: respParam,
6181		Payload:            smallPayload,
6182	}
6183	stream, err := tc.FullDuplexCall(te1.ctx)
6184	if err != nil {
6185		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
6186	}
6187	if err := stream.Send(sreq); err != nil {
6188		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
6189	}
6190	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
6191		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
6192	}
6193
6194	// Test for streaming RPC send.
6195	respParam[0].Size = int32(smallSize)
6196	sreq.Payload = extraLargePayload
6197	stream, err = tc.FullDuplexCall(te1.ctx)
6198	if err != nil {
6199		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
6200	}
6201	if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
6202		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
6203	}
6204
6205	// Case2: Client API set maxReqSize to 1024 (send), maxRespSize to 1024 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
6206	te2, ch2 := testServiceConfigSetupTD(t, e)
6207	te2.maxClientReceiveMsgSize = newInt(1024)
6208	te2.maxClientSendMsgSize = newInt(1024)
6209	te2.startServer(&testServer{security: e.security})
6210	defer te2.tearDown()
6211	ch2 <- sc
6212	tc = testpb.NewTestServiceClient(te2.clientConn())
6213
6214	// Test for unary RPC recv.
6215	req.Payload = smallPayload
6216	req.ResponseSize = int32(largeSize)
6217
6218	if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted {
6219		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
6220	}
6221
6222	// Test for unary RPC send.
6223	req.Payload = largePayload
6224	req.ResponseSize = int32(smallSize)
6225	if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted {
6226		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
6227	}
6228
6229	// Test for streaming RPC recv.
6230	stream, err = tc.FullDuplexCall(te2.ctx)
6231	respParam[0].Size = int32(largeSize)
6232	sreq.Payload = smallPayload
6233	if err != nil {
6234		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
6235	}
6236	if err := stream.Send(sreq); err != nil {
6237		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
6238	}
6239	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
6240		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
6241	}
6242
6243	// Test for streaming RPC send.
6244	respParam[0].Size = int32(smallSize)
6245	sreq.Payload = largePayload
6246	stream, err = tc.FullDuplexCall(te2.ctx)
6247	if err != nil {
6248		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
6249	}
6250	if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
6251		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
6252	}
6253
6254	// Case3: Client API set maxReqSize to 4096 (send), maxRespSize to 4096 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
6255	te3, ch3 := testServiceConfigSetupTD(t, e)
6256	te3.maxClientReceiveMsgSize = newInt(4096)
6257	te3.maxClientSendMsgSize = newInt(4096)
6258	te3.startServer(&testServer{security: e.security})
6259	defer te3.tearDown()
6260	ch3 <- sc
6261	tc = testpb.NewTestServiceClient(te3.clientConn())
6262
6263	// Test for unary RPC recv.
6264	req.Payload = smallPayload
6265	req.ResponseSize = int32(largeSize)
6266
6267	if _, err := tc.UnaryCall(context.Background(), req); err != nil {
6268		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want <nil>", err)
6269	}
6270
6271	req.ResponseSize = int32(extraLargeSize)
6272	if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted {
6273		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
6274	}
6275
6276	// Test for unary RPC send.
6277	req.Payload = largePayload
6278	req.ResponseSize = int32(smallSize)
6279	if _, err := tc.UnaryCall(context.Background(), req); err != nil {
6280		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want <nil>", err)
6281	}
6282
6283	req.Payload = extraLargePayload
6284	if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted {
6285		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
6286	}
6287
6288	// Test for streaming RPC recv.
6289	stream, err = tc.FullDuplexCall(te3.ctx)
6290	if err != nil {
6291		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
6292	}
6293	respParam[0].Size = int32(largeSize)
6294	sreq.Payload = smallPayload
6295
6296	if err := stream.Send(sreq); err != nil {
6297		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
6298	}
6299	if _, err := stream.Recv(); err != nil {
6300		t.Fatalf("%v.Recv() = _, %v, want <nil>", stream, err)
6301	}
6302
6303	respParam[0].Size = int32(extraLargeSize)
6304
6305	if err := stream.Send(sreq); err != nil {
6306		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
6307	}
6308	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
6309		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
6310	}
6311
6312	// Test for streaming RPC send.
6313	respParam[0].Size = int32(smallSize)
6314	sreq.Payload = largePayload
6315	stream, err = tc.FullDuplexCall(te3.ctx)
6316	if err != nil {
6317		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
6318	}
6319	if err := stream.Send(sreq); err != nil {
6320		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
6321	}
6322	sreq.Payload = extraLargePayload
6323	if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
6324		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
6325	}
6326}
6327
6328func (s) TestMethodFromServerStream(t *testing.T) {
6329	const testMethod = "/package.service/method"
6330	e := tcpClearRREnv
6331	te := newTest(t, e)
6332	var method string
6333	var ok bool
6334	te.unknownHandler = func(srv interface{}, stream grpc.ServerStream) error {
6335		method, ok = grpc.MethodFromServerStream(stream)
6336		return nil
6337	}
6338
6339	te.startServer(nil)
6340	defer te.tearDown()
6341	_ = te.clientConn().Invoke(context.Background(), testMethod, nil, nil)
6342	if !ok || method != testMethod {
6343		t.Fatalf("Invoke with method %q, got %q, %v, want %q, true", testMethod, method, ok, testMethod)
6344	}
6345}
6346
6347func (s) TestInterceptorCanAccessCallOptions(t *testing.T) {
6348	e := tcpClearRREnv
6349	te := newTest(t, e)
6350	te.startServer(&testServer{security: e.security})
6351	defer te.tearDown()
6352
6353	type observedOptions struct {
6354		headers     []*metadata.MD
6355		trailers    []*metadata.MD
6356		peer        []*peer.Peer
6357		creds       []credentials.PerRPCCredentials
6358		failFast    []bool
6359		maxRecvSize []int
6360		maxSendSize []int
6361		compressor  []string
6362		subtype     []string
6363	}
6364	var observedOpts observedOptions
6365	populateOpts := func(opts []grpc.CallOption) {
6366		for _, o := range opts {
6367			switch o := o.(type) {
6368			case grpc.HeaderCallOption:
6369				observedOpts.headers = append(observedOpts.headers, o.HeaderAddr)
6370			case grpc.TrailerCallOption:
6371				observedOpts.trailers = append(observedOpts.trailers, o.TrailerAddr)
6372			case grpc.PeerCallOption:
6373				observedOpts.peer = append(observedOpts.peer, o.PeerAddr)
6374			case grpc.PerRPCCredsCallOption:
6375				observedOpts.creds = append(observedOpts.creds, o.Creds)
6376			case grpc.FailFastCallOption:
6377				observedOpts.failFast = append(observedOpts.failFast, o.FailFast)
6378			case grpc.MaxRecvMsgSizeCallOption:
6379				observedOpts.maxRecvSize = append(observedOpts.maxRecvSize, o.MaxRecvMsgSize)
6380			case grpc.MaxSendMsgSizeCallOption:
6381				observedOpts.maxSendSize = append(observedOpts.maxSendSize, o.MaxSendMsgSize)
6382			case grpc.CompressorCallOption:
6383				observedOpts.compressor = append(observedOpts.compressor, o.CompressorType)
6384			case grpc.ContentSubtypeCallOption:
6385				observedOpts.subtype = append(observedOpts.subtype, o.ContentSubtype)
6386			}
6387		}
6388	}
6389
6390	te.unaryClientInt = func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
6391		populateOpts(opts)
6392		return nil
6393	}
6394	te.streamClientInt = func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
6395		populateOpts(opts)
6396		return nil, nil
6397	}
6398
6399	defaults := []grpc.CallOption{
6400		grpc.WaitForReady(true),
6401		grpc.MaxCallRecvMsgSize(1010),
6402	}
6403	tc := testpb.NewTestServiceClient(te.clientConn(grpc.WithDefaultCallOptions(defaults...)))
6404
6405	var headers metadata.MD
6406	var trailers metadata.MD
6407	var pr peer.Peer
6408	tc.UnaryCall(context.Background(), &testpb.SimpleRequest{},
6409		grpc.MaxCallRecvMsgSize(100),
6410		grpc.MaxCallSendMsgSize(200),
6411		grpc.PerRPCCredentials(testPerRPCCredentials{}),
6412		grpc.Header(&headers),
6413		grpc.Trailer(&trailers),
6414		grpc.Peer(&pr))
6415	expected := observedOptions{
6416		failFast:    []bool{false},
6417		maxRecvSize: []int{1010, 100},
6418		maxSendSize: []int{200},
6419		creds:       []credentials.PerRPCCredentials{testPerRPCCredentials{}},
6420		headers:     []*metadata.MD{&headers},
6421		trailers:    []*metadata.MD{&trailers},
6422		peer:        []*peer.Peer{&pr},
6423	}
6424
6425	if !reflect.DeepEqual(expected, observedOpts) {
6426		t.Errorf("unary call did not observe expected options: expected %#v, got %#v", expected, observedOpts)
6427	}
6428
6429	observedOpts = observedOptions{} // reset
6430
6431	tc.StreamingInputCall(context.Background(),
6432		grpc.WaitForReady(false),
6433		grpc.MaxCallSendMsgSize(2020),
6434		grpc.UseCompressor("comp-type"),
6435		grpc.CallContentSubtype("json"))
6436	expected = observedOptions{
6437		failFast:    []bool{false, true},
6438		maxRecvSize: []int{1010},
6439		maxSendSize: []int{2020},
6440		compressor:  []string{"comp-type"},
6441		subtype:     []string{"json"},
6442	}
6443
6444	if !reflect.DeepEqual(expected, observedOpts) {
6445		t.Errorf("streaming call did not observe expected options: expected %#v, got %#v", expected, observedOpts)
6446	}
6447}
6448
6449func (s) TestCompressorRegister(t *testing.T) {
6450	for _, e := range listTestEnv() {
6451		testCompressorRegister(t, e)
6452	}
6453}
6454
6455func testCompressorRegister(t *testing.T, e env) {
6456	te := newTest(t, e)
6457	te.clientCompression = false
6458	te.serverCompression = false
6459	te.clientUseCompression = true
6460
6461	te.startServer(&testServer{security: e.security})
6462	defer te.tearDown()
6463	tc := testpb.NewTestServiceClient(te.clientConn())
6464
6465	// Unary call
6466	const argSize = 271828
6467	const respSize = 314159
6468	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
6469	if err != nil {
6470		t.Fatal(err)
6471	}
6472	req := &testpb.SimpleRequest{
6473		ResponseType: testpb.PayloadType_COMPRESSABLE,
6474		ResponseSize: respSize,
6475		Payload:      payload,
6476	}
6477	ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something"))
6478	if _, err := tc.UnaryCall(ctx, req); err != nil {
6479		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err)
6480	}
6481	// Streaming RPC
6482	ctx, cancel := context.WithCancel(context.Background())
6483	defer cancel()
6484	stream, err := tc.FullDuplexCall(ctx)
6485	if err != nil {
6486		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
6487	}
6488	respParam := []*testpb.ResponseParameters{
6489		{
6490			Size: 31415,
6491		},
6492	}
6493	payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415))
6494	if err != nil {
6495		t.Fatal(err)
6496	}
6497	sreq := &testpb.StreamingOutputCallRequest{
6498		ResponseType:       testpb.PayloadType_COMPRESSABLE,
6499		ResponseParameters: respParam,
6500		Payload:            payload,
6501	}
6502	if err := stream.Send(sreq); err != nil {
6503		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
6504	}
6505	if _, err := stream.Recv(); err != nil {
6506		t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
6507	}
6508}
6509
6510func (s) TestServeExitsWhenListenerClosed(t *testing.T) {
6511	ss := &stubServer{
6512		emptyCall: func(context.Context, *testpb.Empty) (*testpb.Empty, error) {
6513			return &testpb.Empty{}, nil
6514		},
6515	}
6516
6517	s := grpc.NewServer()
6518	defer s.Stop()
6519	testpb.RegisterTestServiceServer(s, ss)
6520
6521	lis, err := net.Listen("tcp", "localhost:0")
6522	if err != nil {
6523		t.Fatalf("Failed to create listener: %v", err)
6524	}
6525
6526	done := make(chan struct{})
6527	go func() {
6528		s.Serve(lis)
6529		close(done)
6530	}()
6531
6532	cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure(), grpc.WithBlock())
6533	if err != nil {
6534		t.Fatalf("Failed to dial server: %v", err)
6535	}
6536	defer cc.Close()
6537	c := testpb.NewTestServiceClient(cc)
6538	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
6539	defer cancel()
6540	if _, err := c.EmptyCall(ctx, &testpb.Empty{}); err != nil {
6541		t.Fatalf("Failed to send test RPC to server: %v", err)
6542	}
6543
6544	if err := lis.Close(); err != nil {
6545		t.Fatalf("Failed to close listener: %v", err)
6546	}
6547	const timeout = 5 * time.Second
6548	timer := time.NewTimer(timeout)
6549	select {
6550	case <-done:
6551		return
6552	case <-timer.C:
6553		t.Fatalf("Serve did not return after %v", timeout)
6554	}
6555}
6556
6557// Service handler returns status with invalid utf8 message.
6558func (s) TestStatusInvalidUTF8Message(t *testing.T) {
6559
6560	var (
6561		origMsg = string([]byte{0xff, 0xfe, 0xfd})
6562		wantMsg = "���"
6563	)
6564
6565	ss := &stubServer{
6566		emptyCall: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
6567			return nil, status.Errorf(codes.Internal, origMsg)
6568		},
6569	}
6570	if err := ss.Start(nil); err != nil {
6571		t.Fatalf("Error starting endpoint server: %v", err)
6572	}
6573	defer ss.Stop()
6574
6575	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
6576	defer cancel()
6577
6578	if _, err := ss.client.EmptyCall(ctx, &testpb.Empty{}); status.Convert(err).Message() != wantMsg {
6579		t.Fatalf("ss.client.EmptyCall(_, _) = _, %v (msg %q); want _, err with msg %q", err, status.Convert(err).Message(), wantMsg)
6580	}
6581}
6582
6583// Service handler returns status with details and invalid utf8 message. Proto
6584// will fail to marshal the status because of the invalid utf8 message. Details
6585// will be dropped when sending.
6586func (s) TestStatusInvalidUTF8Details(t *testing.T) {
6587
6588	var (
6589		origMsg = string([]byte{0xff, 0xfe, 0xfd})
6590		wantMsg = "���"
6591	)
6592
6593	ss := &stubServer{
6594		emptyCall: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
6595			st := status.New(codes.Internal, origMsg)
6596			st, err := st.WithDetails(&testpb.Empty{})
6597			if err != nil {
6598				return nil, err
6599			}
6600			return nil, st.Err()
6601		},
6602	}
6603	if err := ss.Start(nil); err != nil {
6604		t.Fatalf("Error starting endpoint server: %v", err)
6605	}
6606	defer ss.Stop()
6607
6608	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
6609	defer cancel()
6610
6611	_, err := ss.client.EmptyCall(ctx, &testpb.Empty{})
6612	st := status.Convert(err)
6613	if st.Message() != wantMsg {
6614		t.Fatalf("ss.client.EmptyCall(_, _) = _, %v (msg %q); want _, err with msg %q", err, st.Message(), wantMsg)
6615	}
6616	if len(st.Details()) != 0 {
6617		// Details should be dropped on the server side.
6618		t.Fatalf("RPC status contain details: %v, want no details", st.Details())
6619	}
6620}
6621
6622func (s) TestClientDoesntDeadlockWhileWritingErrornousLargeMessages(t *testing.T) {
6623	for _, e := range listTestEnv() {
6624		if e.httpHandler {
6625			continue
6626		}
6627		testClientDoesntDeadlockWhileWritingErrornousLargeMessages(t, e)
6628	}
6629}
6630
6631func testClientDoesntDeadlockWhileWritingErrornousLargeMessages(t *testing.T, e env) {
6632	te := newTest(t, e)
6633	te.userAgent = testAppUA
6634	smallSize := 1024
6635	te.maxServerReceiveMsgSize = &smallSize
6636	te.startServer(&testServer{security: e.security})
6637	defer te.tearDown()
6638	tc := testpb.NewTestServiceClient(te.clientConn())
6639	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 1048576)
6640	if err != nil {
6641		t.Fatal(err)
6642	}
6643	req := &testpb.SimpleRequest{
6644		ResponseType: testpb.PayloadType_COMPRESSABLE,
6645		Payload:      payload,
6646	}
6647	var wg sync.WaitGroup
6648	for i := 0; i < 10; i++ {
6649		wg.Add(1)
6650		go func() {
6651			defer wg.Done()
6652			for j := 0; j < 100; j++ {
6653				ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10))
6654				defer cancel()
6655				if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.ResourceExhausted {
6656					t.Errorf("TestService/UnaryCall(_,_) = _. %v, want code: %s", err, codes.ResourceExhausted)
6657					return
6658				}
6659			}
6660		}()
6661	}
6662	wg.Wait()
6663}
6664
6665const clientAlwaysFailCredErrorMsg = "clientAlwaysFailCred always fails"
6666
6667var errClientAlwaysFailCred = errors.New(clientAlwaysFailCredErrorMsg)
6668
6669type clientAlwaysFailCred struct{}
6670
6671func (c clientAlwaysFailCred) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
6672	return nil, nil, errClientAlwaysFailCred
6673}
6674func (c clientAlwaysFailCred) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
6675	return rawConn, nil, nil
6676}
6677func (c clientAlwaysFailCred) Info() credentials.ProtocolInfo {
6678	return credentials.ProtocolInfo{}
6679}
6680func (c clientAlwaysFailCred) Clone() credentials.TransportCredentials {
6681	return nil
6682}
6683func (c clientAlwaysFailCred) OverrideServerName(s string) error {
6684	return nil
6685}
6686
6687func (s) TestFailFastRPCErrorOnBadCertificates(t *testing.T) {
6688	te := newTest(t, env{name: "bad-cred", network: "tcp", security: "clientAlwaysFailCred", balancer: "round_robin"})
6689	te.startServer(&testServer{security: te.e.security})
6690	defer te.tearDown()
6691
6692	opts := []grpc.DialOption{grpc.WithTransportCredentials(clientAlwaysFailCred{})}
6693	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
6694	defer cancel()
6695	cc, err := grpc.DialContext(ctx, te.srvAddr, opts...)
6696	if err != nil {
6697		t.Fatalf("Dial(_) = %v, want %v", err, nil)
6698	}
6699	defer cc.Close()
6700
6701	tc := testpb.NewTestServiceClient(cc)
6702	for i := 0; i < 1000; i++ {
6703		// This loop runs for at most 1 second. The first several RPCs will fail
6704		// with Unavailable because the connection hasn't started. When the
6705		// first connection failed with creds error, the next RPC should also
6706		// fail with the expected error.
6707		if _, err = tc.EmptyCall(context.Background(), &testpb.Empty{}); strings.Contains(err.Error(), clientAlwaysFailCredErrorMsg) {
6708			return
6709		}
6710		time.Sleep(time.Millisecond)
6711	}
6712	te.t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want err.Error() contains %q", err, clientAlwaysFailCredErrorMsg)
6713}
6714
6715func (s) TestWaitForReadyRPCErrorOnBadCertificates(t *testing.T) {
6716	te := newTest(t, env{name: "bad-cred", network: "tcp", security: "clientAlwaysFailCred", balancer: "round_robin"})
6717	te.startServer(&testServer{security: te.e.security})
6718	defer te.tearDown()
6719
6720	opts := []grpc.DialOption{grpc.WithTransportCredentials(clientAlwaysFailCred{})}
6721	dctx, dcancel := context.WithTimeout(context.Background(), 10*time.Second)
6722	defer dcancel()
6723	cc, err := grpc.DialContext(dctx, te.srvAddr, opts...)
6724	if err != nil {
6725		t.Fatalf("Dial(_) = %v, want %v", err, nil)
6726	}
6727	defer cc.Close()
6728
6729	tc := testpb.NewTestServiceClient(cc)
6730	ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
6731	defer cancel()
6732	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); strings.Contains(err.Error(), clientAlwaysFailCredErrorMsg) {
6733		return
6734	}
6735	te.t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want err.Error() contains %q", err, clientAlwaysFailCredErrorMsg)
6736}
6737
6738func (s) TestRPCTimeout(t *testing.T) {
6739	for _, e := range listTestEnv() {
6740		testRPCTimeout(t, e)
6741	}
6742}
6743
6744func testRPCTimeout(t *testing.T, e env) {
6745	te := newTest(t, e)
6746	te.startServer(&testServer{security: e.security, unaryCallSleepTime: 500 * time.Millisecond})
6747	defer te.tearDown()
6748
6749	cc := te.clientConn()
6750	tc := testpb.NewTestServiceClient(cc)
6751
6752	const argSize = 2718
6753	const respSize = 314
6754
6755	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
6756	if err != nil {
6757		t.Fatal(err)
6758	}
6759
6760	req := &testpb.SimpleRequest{
6761		ResponseType: testpb.PayloadType_COMPRESSABLE,
6762		ResponseSize: respSize,
6763		Payload:      payload,
6764	}
6765	for i := -1; i <= 10; i++ {
6766		ctx, cancel := context.WithTimeout(context.Background(), time.Duration(i)*time.Millisecond)
6767		if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.DeadlineExceeded {
6768			t.Fatalf("TestService/UnaryCallv(_, _) = _, %v; want <nil>, error code: %s", err, codes.DeadlineExceeded)
6769		}
6770		cancel()
6771	}
6772}
6773
6774func (s) TestDisabledIOBuffers(t *testing.T) {
6775
6776	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(60000))
6777	if err != nil {
6778		t.Fatalf("Failed to create payload: %v", err)
6779	}
6780	req := &testpb.StreamingOutputCallRequest{
6781		Payload: payload,
6782	}
6783	resp := &testpb.StreamingOutputCallResponse{
6784		Payload: payload,
6785	}
6786
6787	ss := &stubServer{
6788		fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error {
6789			for {
6790				in, err := stream.Recv()
6791				if err == io.EOF {
6792					return nil
6793				}
6794				if err != nil {
6795					t.Errorf("stream.Recv() = _, %v, want _, <nil>", err)
6796					return err
6797				}
6798				if !reflect.DeepEqual(in.Payload.Body, payload.Body) {
6799					t.Errorf("Received message(len: %v) on server not what was expected(len: %v).", len(in.Payload.Body), len(payload.Body))
6800					return err
6801				}
6802				if err := stream.Send(resp); err != nil {
6803					t.Errorf("stream.Send(_)= %v, want <nil>", err)
6804					return err
6805				}
6806
6807			}
6808		},
6809	}
6810
6811	s := grpc.NewServer(grpc.WriteBufferSize(0), grpc.ReadBufferSize(0))
6812	testpb.RegisterTestServiceServer(s, ss)
6813
6814	lis, err := net.Listen("tcp", "localhost:0")
6815	if err != nil {
6816		t.Fatalf("Failed to create listener: %v", err)
6817	}
6818
6819	done := make(chan struct{})
6820	go func() {
6821		s.Serve(lis)
6822		close(done)
6823	}()
6824	defer s.Stop()
6825	dctx, dcancel := context.WithTimeout(context.Background(), 5*time.Second)
6826	defer dcancel()
6827	cc, err := grpc.DialContext(dctx, lis.Addr().String(), grpc.WithInsecure(), grpc.WithBlock(), grpc.WithWriteBufferSize(0), grpc.WithReadBufferSize(0))
6828	if err != nil {
6829		t.Fatalf("Failed to dial server")
6830	}
6831	defer cc.Close()
6832	c := testpb.NewTestServiceClient(cc)
6833	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
6834	defer cancel()
6835	stream, err := c.FullDuplexCall(ctx, grpc.WaitForReady(true))
6836	if err != nil {
6837		t.Fatalf("Failed to send test RPC to server")
6838	}
6839	for i := 0; i < 10; i++ {
6840		if err := stream.Send(req); err != nil {
6841			t.Fatalf("stream.Send(_) = %v, want <nil>", err)
6842		}
6843		in, err := stream.Recv()
6844		if err != nil {
6845			t.Fatalf("stream.Recv() = _, %v, want _, <nil>", err)
6846		}
6847		if !reflect.DeepEqual(in.Payload.Body, payload.Body) {
6848			t.Fatalf("Received message(len: %v) on client not what was expected(len: %v).", len(in.Payload.Body), len(payload.Body))
6849		}
6850	}
6851	stream.CloseSend()
6852	if _, err := stream.Recv(); err != io.EOF {
6853		t.Fatalf("stream.Recv() = _, %v, want _, io.EOF", err)
6854	}
6855}
6856
6857func (s) TestServerMaxHeaderListSizeClientUserViolation(t *testing.T) {
6858	for _, e := range listTestEnv() {
6859		if e.httpHandler {
6860			continue
6861		}
6862		testServerMaxHeaderListSizeClientUserViolation(t, e)
6863	}
6864}
6865
6866func testServerMaxHeaderListSizeClientUserViolation(t *testing.T, e env) {
6867	te := newTest(t, e)
6868	te.maxServerHeaderListSize = new(uint32)
6869	*te.maxServerHeaderListSize = 216
6870	te.startServer(&testServer{security: e.security})
6871	defer te.tearDown()
6872
6873	cc := te.clientConn()
6874	tc := testpb.NewTestServiceClient(cc)
6875	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
6876	defer cancel()
6877	metadata.AppendToOutgoingContext(ctx, "oversize", string(make([]byte, 216)))
6878	var err error
6879	if err = verifyResultWithDelay(func() (bool, error) {
6880		if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) == codes.Internal {
6881			return true, nil
6882		}
6883		return false, fmt.Errorf("tc.EmptyCall() = _, err: %v, want _, error code: %v", err, codes.Internal)
6884	}); err != nil {
6885		t.Fatal(err)
6886	}
6887}
6888
6889func (s) TestClientMaxHeaderListSizeServerUserViolation(t *testing.T) {
6890	for _, e := range listTestEnv() {
6891		if e.httpHandler {
6892			continue
6893		}
6894		testClientMaxHeaderListSizeServerUserViolation(t, e)
6895	}
6896}
6897
6898func testClientMaxHeaderListSizeServerUserViolation(t *testing.T, e env) {
6899	te := newTest(t, e)
6900	te.maxClientHeaderListSize = new(uint32)
6901	*te.maxClientHeaderListSize = 1 // any header server sends will violate
6902	te.startServer(&testServer{security: e.security})
6903	defer te.tearDown()
6904
6905	cc := te.clientConn()
6906	tc := testpb.NewTestServiceClient(cc)
6907	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
6908	defer cancel()
6909	var err error
6910	if err = verifyResultWithDelay(func() (bool, error) {
6911		if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) == codes.Internal {
6912			return true, nil
6913		}
6914		return false, fmt.Errorf("tc.EmptyCall() = _, err: %v, want _, error code: %v", err, codes.Internal)
6915	}); err != nil {
6916		t.Fatal(err)
6917	}
6918}
6919
6920func (s) TestServerMaxHeaderListSizeClientIntentionalViolation(t *testing.T) {
6921	for _, e := range listTestEnv() {
6922		if e.httpHandler || e.security == "tls" {
6923			continue
6924		}
6925		testServerMaxHeaderListSizeClientIntentionalViolation(t, e)
6926	}
6927}
6928
6929func testServerMaxHeaderListSizeClientIntentionalViolation(t *testing.T, e env) {
6930	te := newTest(t, e)
6931	te.maxServerHeaderListSize = new(uint32)
6932	*te.maxServerHeaderListSize = 512
6933	te.startServer(&testServer{security: e.security})
6934	defer te.tearDown()
6935
6936	cc, dw := te.clientConnWithConnControl()
6937	tc := &testServiceClientWrapper{TestServiceClient: testpb.NewTestServiceClient(cc)}
6938	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
6939	defer cancel()
6940	stream, err := tc.FullDuplexCall(ctx)
6941	if err != nil {
6942		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, <nil>", tc, err)
6943	}
6944	rcw := dw.getRawConnWrapper()
6945	val := make([]string, 512)
6946	for i := range val {
6947		val[i] = "a"
6948	}
6949	// allow for client to send the initial header
6950	time.Sleep(100 * time.Millisecond)
6951	rcw.writeHeaders(http2.HeadersFrameParam{
6952		StreamID:      tc.getCurrentStreamID(),
6953		BlockFragment: rcw.encodeHeader("oversize", strings.Join(val, "")),
6954		EndStream:     false,
6955		EndHeaders:    true,
6956	})
6957	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.Internal {
6958		t.Fatalf("stream.Recv() = _, %v, want _, error code: %v", err, codes.Internal)
6959	}
6960}
6961
6962func (s) TestClientMaxHeaderListSizeServerIntentionalViolation(t *testing.T) {
6963	for _, e := range listTestEnv() {
6964		if e.httpHandler || e.security == "tls" {
6965			continue
6966		}
6967		testClientMaxHeaderListSizeServerIntentionalViolation(t, e)
6968	}
6969}
6970
6971func testClientMaxHeaderListSizeServerIntentionalViolation(t *testing.T, e env) {
6972	te := newTest(t, e)
6973	te.maxClientHeaderListSize = new(uint32)
6974	*te.maxClientHeaderListSize = 200
6975	lw := te.startServerWithConnControl(&testServer{security: e.security, setHeaderOnly: true})
6976	defer te.tearDown()
6977	cc, _ := te.clientConnWithConnControl()
6978	tc := &testServiceClientWrapper{TestServiceClient: testpb.NewTestServiceClient(cc)}
6979	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
6980	defer cancel()
6981	stream, err := tc.FullDuplexCall(ctx)
6982	if err != nil {
6983		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, <nil>", tc, err)
6984	}
6985	var i int
6986	var rcw *rawConnWrapper
6987	for i = 0; i < 100; i++ {
6988		rcw = lw.getLastConn()
6989		if rcw != nil {
6990			break
6991		}
6992		time.Sleep(10 * time.Millisecond)
6993		continue
6994	}
6995	if i == 100 {
6996		t.Fatalf("failed to create server transport after 1s")
6997	}
6998
6999	val := make([]string, 200)
7000	for i := range val {
7001		val[i] = "a"
7002	}
7003	// allow for client to send the initial header.
7004	time.Sleep(100 * time.Millisecond)
7005	rcw.writeHeaders(http2.HeadersFrameParam{
7006		StreamID:      tc.getCurrentStreamID(),
7007		BlockFragment: rcw.encodeRawHeader("oversize", strings.Join(val, "")),
7008		EndStream:     false,
7009		EndHeaders:    true,
7010	})
7011	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.Internal {
7012		t.Fatalf("stream.Recv() = _, %v, want _, error code: %v", err, codes.Internal)
7013	}
7014}
7015
7016func (s) TestNetPipeConn(t *testing.T) {
7017	// This test will block indefinitely if grpc writes both client and server
7018	// prefaces without either reading from the Conn.
7019	pl := testutils.NewPipeListener()
7020	s := grpc.NewServer()
7021	defer s.Stop()
7022	ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
7023		return &testpb.SimpleResponse{}, nil
7024	}}
7025	testpb.RegisterTestServiceServer(s, ts)
7026	go s.Serve(pl)
7027	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
7028	defer cancel()
7029	cc, err := grpc.DialContext(ctx, "", grpc.WithInsecure(), grpc.WithDialer(pl.Dialer()))
7030	if err != nil {
7031		t.Fatalf("Error creating client: %v", err)
7032	}
7033	defer cc.Close()
7034	client := testpb.NewTestServiceClient(cc)
7035	if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
7036		t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err)
7037	}
7038}
7039
7040func (s) TestLargeTimeout(t *testing.T) {
7041	for _, e := range listTestEnv() {
7042		testLargeTimeout(t, e)
7043	}
7044}
7045
7046func testLargeTimeout(t *testing.T, e env) {
7047	te := newTest(t, e)
7048	te.declareLogNoise("Server.processUnaryRPC failed to write status")
7049
7050	ts := &funcServer{}
7051	te.startServer(ts)
7052	defer te.tearDown()
7053	tc := testpb.NewTestServiceClient(te.clientConn())
7054
7055	timeouts := []time.Duration{
7056		time.Duration(math.MaxInt64), // will be (correctly) converted to
7057		// 2562048 hours, which overflows upon converting back to an int64
7058		2562047 * time.Hour, // the largest timeout that does not overflow
7059	}
7060
7061	for i, maxTimeout := range timeouts {
7062		ts.unaryCall = func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
7063			deadline, ok := ctx.Deadline()
7064			timeout := time.Until(deadline)
7065			minTimeout := maxTimeout - 5*time.Second
7066			if !ok || timeout < minTimeout || timeout > maxTimeout {
7067				t.Errorf("ctx.Deadline() = (now+%v), %v; want [%v, %v], true", timeout, ok, minTimeout, maxTimeout)
7068				return nil, status.Error(codes.OutOfRange, "deadline error")
7069			}
7070			return &testpb.SimpleResponse{}, nil
7071		}
7072
7073		ctx, cancel := context.WithTimeout(context.Background(), maxTimeout)
7074		defer cancel()
7075
7076		if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
7077			t.Errorf("case %v: UnaryCall(_) = _, %v; want _, nil", i, err)
7078		}
7079	}
7080}
7081
7082// Proxies typically send GO_AWAY followed by connection closure a minute or so later. This
7083// test ensures that the connection is re-created after GO_AWAY and not affected by the
7084// subsequent (old) connection closure.
7085func (s) TestGoAwayThenClose(t *testing.T) {
7086	ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
7087	defer cancel()
7088
7089	lis1, err := net.Listen("tcp", "localhost:0")
7090	if err != nil {
7091		t.Fatalf("Error while listening. Err: %v", err)
7092	}
7093	s1 := grpc.NewServer()
7094	defer s1.Stop()
7095	ts := &funcServer{
7096		unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
7097			return &testpb.SimpleResponse{}, nil
7098		},
7099		fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error {
7100			// Wait forever.
7101			_, err := stream.Recv()
7102			if err == nil {
7103				t.Error("expected to never receive any message")
7104			}
7105			return err
7106		},
7107	}
7108	testpb.RegisterTestServiceServer(s1, ts)
7109	go s1.Serve(lis1)
7110
7111	conn2Established := grpcsync.NewEvent()
7112	lis2, err := listenWithNotifyingListener("tcp", "localhost:0", conn2Established)
7113	if err != nil {
7114		t.Fatalf("Error while listening. Err: %v", err)
7115	}
7116	s2 := grpc.NewServer()
7117	defer s2.Stop()
7118	testpb.RegisterTestServiceServer(s2, ts)
7119	go s2.Serve(lis2)
7120
7121	r, rcleanup := manual.GenerateAndRegisterManualResolver()
7122	defer rcleanup()
7123	r.InitialState(resolver.State{Addresses: []resolver.Address{
7124		{Addr: lis1.Addr().String()},
7125	}})
7126	cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithInsecure())
7127	if err != nil {
7128		t.Fatalf("Error creating client: %v", err)
7129	}
7130	defer cc.Close()
7131
7132	client := testpb.NewTestServiceClient(cc)
7133
7134	// Should go on connection 1. We use a long-lived RPC because it will cause GracefulStop to send GO_AWAY, but the
7135	// connection doesn't get closed until the server stops and the client receives.
7136	stream, err := client.FullDuplexCall(ctx)
7137	if err != nil {
7138		t.Fatalf("FullDuplexCall(_) = _, %v; want _, nil", err)
7139	}
7140
7141	r.UpdateState(resolver.State{Addresses: []resolver.Address{
7142		{Addr: lis1.Addr().String()},
7143		{Addr: lis2.Addr().String()},
7144	}})
7145
7146	// Send GO_AWAY to connection 1.
7147	go s1.GracefulStop()
7148
7149	// Wait for connection 2 to be established.
7150	<-conn2Established.Done()
7151
7152	// Close connection 1.
7153	s1.Stop()
7154
7155	// Wait for client to close.
7156	_, err = stream.Recv()
7157	if err == nil {
7158		t.Fatal("expected the stream to die, but got a successful Recv")
7159	}
7160
7161	// Do a bunch of RPCs, make sure it stays stable. These should go to connection 2.
7162	for i := 0; i < 10; i++ {
7163		if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
7164			t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err)
7165		}
7166	}
7167}
7168
7169func listenWithNotifyingListener(network, address string, event *grpcsync.Event) (net.Listener, error) {
7170	lis, err := net.Listen(network, address)
7171	if err != nil {
7172		return nil, err
7173	}
7174	return notifyingListener{connEstablished: event, Listener: lis}, nil
7175}
7176
7177type notifyingListener struct {
7178	connEstablished *grpcsync.Event
7179	net.Listener
7180}
7181
7182func (lis notifyingListener) Accept() (net.Conn, error) {
7183	defer lis.connEstablished.Fire()
7184	return lis.Listener.Accept()
7185}
7186
7187func (s) TestRPCWaitsForResolver(t *testing.T) {
7188	te := testServiceConfigSetup(t, tcpClearRREnv)
7189	te.startServer(&testServer{security: tcpClearRREnv.security})
7190	defer te.tearDown()
7191	r, rcleanup := manual.GenerateAndRegisterManualResolver()
7192	defer rcleanup()
7193
7194	te.resolverScheme = r.Scheme()
7195	te.nonBlockingDial = true
7196	cc := te.clientConn()
7197	tc := testpb.NewTestServiceClient(cc)
7198
7199	ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
7200	defer cancel()
7201	// With no resolved addresses yet, this will timeout.
7202	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
7203		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
7204	}
7205
7206	ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
7207	defer cancel()
7208	go func() {
7209		time.Sleep(time.Second)
7210		r.UpdateState(resolver.State{
7211			Addresses: []resolver.Address{{Addr: te.srvAddr}},
7212			ServiceConfig: parseCfg(r, `{
7213		    "methodConfig": [
7214		        {
7215		            "name": [
7216		                {
7217		                    "service": "grpc.testing.TestService",
7218		                    "method": "UnaryCall"
7219		                }
7220		            ],
7221                    "maxRequestMessageBytes": 0
7222		        }
7223		    ]
7224		}`)})
7225	}()
7226	// We wait a second before providing a service config and resolving
7227	// addresses.  So this will wait for that and then honor the
7228	// maxRequestMessageBytes it contains.
7229	if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{ResponseType: testpb.PayloadType_UNCOMPRESSABLE}); status.Code(err) != codes.ResourceExhausted {
7230		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, nil", err)
7231	}
7232	if got := ctx.Err(); got != nil {
7233		t.Fatalf("ctx.Err() = %v; want nil (deadline should be set short by service config)", got)
7234	}
7235	if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
7236		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, nil", err)
7237	}
7238}
7239
7240func (s) TestHTTPHeaderFrameErrorHandlingHTTPMode(t *testing.T) {
7241	// Non-gRPC content-type fallback path.
7242	for httpCode := range transport.HTTPStatusConvTab {
7243		doHTTPHeaderTest(t, transport.HTTPStatusConvTab[int(httpCode)], []string{
7244			":status", fmt.Sprintf("%d", httpCode),
7245			"content-type", "text/html", // non-gRPC content type to switch to HTTP mode.
7246			"grpc-status", "1", // Make up a gRPC status error
7247			"grpc-status-details-bin", "???", // Make up a gRPC field parsing error
7248		})
7249	}
7250
7251	// Missing content-type fallback path.
7252	for httpCode := range transport.HTTPStatusConvTab {
7253		doHTTPHeaderTest(t, transport.HTTPStatusConvTab[int(httpCode)], []string{
7254			":status", fmt.Sprintf("%d", httpCode),
7255			// Omitting content type to switch to HTTP mode.
7256			"grpc-status", "1", // Make up a gRPC status error
7257			"grpc-status-details-bin", "???", // Make up a gRPC field parsing error
7258		})
7259	}
7260
7261	// Malformed HTTP status when fallback.
7262	doHTTPHeaderTest(t, codes.Internal, []string{
7263		":status", "abc",
7264		// Omitting content type to switch to HTTP mode.
7265		"grpc-status", "1", // Make up a gRPC status error
7266		"grpc-status-details-bin", "???", // Make up a gRPC field parsing error
7267	})
7268}
7269
7270// Testing erroneous ResponseHeader or Trailers-only (delivered in the first HEADERS frame).
7271func (s) TestHTTPHeaderFrameErrorHandlingInitialHeader(t *testing.T) {
7272	for _, test := range []struct {
7273		header  []string
7274		errCode codes.Code
7275	}{
7276		{
7277			// missing gRPC status.
7278			header: []string{
7279				":status", "403",
7280				"content-type", "application/grpc",
7281			},
7282			errCode: codes.Unknown,
7283		},
7284		{
7285			// malformed grpc-status.
7286			header: []string{
7287				":status", "502",
7288				"content-type", "application/grpc",
7289				"grpc-status", "abc",
7290			},
7291			errCode: codes.Internal,
7292		},
7293		{
7294			// Malformed grpc-tags-bin field.
7295			header: []string{
7296				":status", "502",
7297				"content-type", "application/grpc",
7298				"grpc-status", "0",
7299				"grpc-tags-bin", "???",
7300			},
7301			errCode: codes.Internal,
7302		},
7303		{
7304			// gRPC status error.
7305			header: []string{
7306				":status", "502",
7307				"content-type", "application/grpc",
7308				"grpc-status", "3",
7309			},
7310			errCode: codes.InvalidArgument,
7311		},
7312	} {
7313		doHTTPHeaderTest(t, test.errCode, test.header)
7314	}
7315}
7316
7317// Testing non-Trailers-only Trailers (delievered in second HEADERS frame)
7318func (s) TestHTTPHeaderFrameErrorHandlingNormalTrailer(t *testing.T) {
7319	for _, test := range []struct {
7320		responseHeader []string
7321		trailer        []string
7322		errCode        codes.Code
7323	}{
7324		{
7325			responseHeader: []string{
7326				":status", "200",
7327				"content-type", "application/grpc",
7328			},
7329			trailer: []string{
7330				// trailer missing grpc-status
7331				":status", "502",
7332			},
7333			errCode: codes.Unknown,
7334		},
7335		{
7336			responseHeader: []string{
7337				":status", "404",
7338				"content-type", "application/grpc",
7339			},
7340			trailer: []string{
7341				// malformed grpc-status-details-bin field
7342				"grpc-status", "0",
7343				"grpc-status-details-bin", "????",
7344			},
7345			errCode: codes.Internal,
7346		},
7347	} {
7348		doHTTPHeaderTest(t, test.errCode, test.responseHeader, test.trailer)
7349	}
7350}
7351
7352func (s) TestHTTPHeaderFrameErrorHandlingMoreThanTwoHeaders(t *testing.T) {
7353	header := []string{
7354		":status", "200",
7355		"content-type", "application/grpc",
7356	}
7357	doHTTPHeaderTest(t, codes.Internal, header, header, header)
7358}
7359
7360type httpServer struct {
7361	headerFields [][]string
7362}
7363
7364func (s *httpServer) writeHeader(framer *http2.Framer, sid uint32, headerFields []string, endStream bool) error {
7365	if len(headerFields)%2 == 1 {
7366		panic("odd number of kv args")
7367	}
7368
7369	var buf bytes.Buffer
7370	henc := hpack.NewEncoder(&buf)
7371	for len(headerFields) > 0 {
7372		k, v := headerFields[0], headerFields[1]
7373		headerFields = headerFields[2:]
7374		henc.WriteField(hpack.HeaderField{Name: k, Value: v})
7375	}
7376
7377	return framer.WriteHeaders(http2.HeadersFrameParam{
7378		StreamID:      sid,
7379		BlockFragment: buf.Bytes(),
7380		EndStream:     endStream,
7381		EndHeaders:    true,
7382	})
7383}
7384
7385func (s *httpServer) start(t *testing.T, lis net.Listener) {
7386	// Launch an HTTP server to send back header.
7387	go func() {
7388		conn, err := lis.Accept()
7389		if err != nil {
7390			t.Errorf("Error accepting connection: %v", err)
7391			return
7392		}
7393		defer conn.Close()
7394		// Read preface sent by client.
7395		if _, err = io.ReadFull(conn, make([]byte, len(http2.ClientPreface))); err != nil {
7396			t.Errorf("Error at server-side while reading preface from client. Err: %v", err)
7397			return
7398		}
7399		reader := bufio.NewReader(conn)
7400		writer := bufio.NewWriter(conn)
7401		framer := http2.NewFramer(writer, reader)
7402		if err = framer.WriteSettingsAck(); err != nil {
7403			t.Errorf("Error at server-side while sending Settings ack. Err: %v", err)
7404			return
7405		}
7406		writer.Flush() // necessary since client is expecting preface before declaring connection fully setup.
7407
7408		var sid uint32
7409		// Read frames until a header is received.
7410		for {
7411			frame, err := framer.ReadFrame()
7412			if err != nil {
7413				t.Errorf("Error at server-side while reading frame. Err: %v", err)
7414				return
7415			}
7416			if hframe, ok := frame.(*http2.HeadersFrame); ok {
7417				sid = hframe.Header().StreamID
7418				break
7419			}
7420		}
7421		for i, headers := range s.headerFields {
7422			if err = s.writeHeader(framer, sid, headers, i == len(s.headerFields)-1); err != nil {
7423				t.Errorf("Error at server-side while writing headers. Err: %v", err)
7424				return
7425			}
7426			writer.Flush()
7427		}
7428	}()
7429}
7430
7431func doHTTPHeaderTest(t *testing.T, errCode codes.Code, headerFields ...[]string) {
7432	t.Helper()
7433	lis, err := net.Listen("tcp", "localhost:0")
7434	if err != nil {
7435		t.Fatalf("Failed to listen. Err: %v", err)
7436	}
7437	defer lis.Close()
7438	server := &httpServer{
7439		headerFields: headerFields,
7440	}
7441	server.start(t, lis)
7442	cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
7443	if err != nil {
7444		t.Fatalf("failed to dial due to err: %v", err)
7445	}
7446	defer cc.Close()
7447	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
7448	defer cancel()
7449	client := testpb.NewTestServiceClient(cc)
7450	stream, err := client.FullDuplexCall(ctx)
7451	if err != nil {
7452		t.Fatalf("error creating stream due to err: %v", err)
7453	}
7454	if _, err := stream.Recv(); err == nil || status.Code(err) != errCode {
7455		t.Fatalf("stream.Recv() = _, %v, want error code: %v", err, errCode)
7456	}
7457}
7458
7459func parseCfg(r *manual.Resolver, s string) *serviceconfig.ParseResult {
7460	g := r.CC.ParseServiceConfig(s)
7461	if g.Err != nil {
7462		panic(fmt.Sprintf("Error parsing config %q: %v", s, g.Err))
7463	}
7464	return g
7465}
7466
7467type methodTestCreds struct{}
7468
7469func (m methodTestCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
7470	ri, _ := credentials.RequestInfoFromContext(ctx)
7471	return nil, status.Errorf(codes.Unknown, ri.Method)
7472}
7473
7474func (m methodTestCreds) RequireTransportSecurity() bool {
7475	return false
7476}
7477
7478func (s) TestGRPCMethodAccessibleToCredsViaContextRequestInfo(t *testing.T) {
7479	const wantMethod = "/grpc.testing.TestService/EmptyCall"
7480	ss := &stubServer{}
7481	if err := ss.Start(nil, grpc.WithPerRPCCredentials(methodTestCreds{})); err != nil {
7482		t.Fatalf("Error starting endpoint server: %v", err)
7483	}
7484	defer ss.Stop()
7485
7486	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
7487	defer cancel()
7488
7489	if _, err := ss.client.EmptyCall(ctx, &testpb.Empty{}); status.Convert(err).Message() != wantMethod {
7490		t.Fatalf("ss.client.EmptyCall(_, _) = _, %v; want _, _.Message()=%q", err, wantMethod)
7491	}
7492}
7493
7494func (s) TestClientCancellationPropagatesUnary(t *testing.T) {
7495	wg := &sync.WaitGroup{}
7496	called, done := make(chan struct{}), make(chan struct{})
7497	ss := &stubServer{
7498		emptyCall: func(ctx context.Context, _ *testpb.Empty) (*testpb.Empty, error) {
7499			close(called)
7500			<-ctx.Done()
7501			err := ctx.Err()
7502			if err != context.Canceled {
7503				t.Errorf("ctx.Err() = %v; want context.Canceled", err)
7504			}
7505			close(done)
7506			return nil, err
7507		},
7508	}
7509	if err := ss.Start(nil); err != nil {
7510		t.Fatalf("Error starting endpoint server: %v", err)
7511	}
7512	defer ss.Stop()
7513
7514	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
7515
7516	wg.Add(1)
7517	go func() {
7518		if _, err := ss.client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Canceled {
7519			t.Errorf("ss.client.EmptyCall() = _, %v; want _, Code()=codes.Canceled", err)
7520		}
7521		wg.Done()
7522	}()
7523
7524	select {
7525	case <-called:
7526	case <-time.After(5 * time.Second):
7527		t.Fatalf("failed to perform EmptyCall after 10s")
7528	}
7529	cancel()
7530	select {
7531	case <-done:
7532	case <-time.After(5 * time.Second):
7533		t.Fatalf("server failed to close done chan due to cancellation propagation")
7534	}
7535	wg.Wait()
7536}
7537
7538type badGzipCompressor struct{}
7539
7540func (badGzipCompressor) Do(w io.Writer, p []byte) error {
7541	buf := &bytes.Buffer{}
7542	gzw := gzip.NewWriter(buf)
7543	if _, err := gzw.Write(p); err != nil {
7544		return err
7545	}
7546	err := gzw.Close()
7547	bs := buf.Bytes()
7548	if len(bs) >= 6 {
7549		bs[len(bs)-6] ^= 1 // modify checksum at end by 1 byte
7550	}
7551	w.Write(bs)
7552	return err
7553}
7554
7555func (badGzipCompressor) Type() string {
7556	return "gzip"
7557}
7558
7559func (s) TestGzipBadChecksum(t *testing.T) {
7560	ss := &stubServer{
7561		unaryCall: func(ctx context.Context, _ *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
7562			return &testpb.SimpleResponse{}, nil
7563		},
7564	}
7565	if err := ss.Start(nil, grpc.WithCompressor(badGzipCompressor{})); err != nil {
7566		t.Fatalf("Error starting endpoint server: %v", err)
7567	}
7568	defer ss.Stop()
7569
7570	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
7571	defer cancel()
7572
7573	p, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1024))
7574	if err != nil {
7575		t.Fatalf("Unexpected error from newPayload: %v", err)
7576	}
7577	if _, err := ss.client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: p}); err == nil ||
7578		status.Code(err) != codes.Internal ||
7579		!strings.Contains(status.Convert(err).Message(), gzip.ErrChecksum.Error()) {
7580		t.Errorf("ss.client.UnaryCall(_) = _, %v\n\twant: _, status(codes.Internal, contains %q)", err, gzip.ErrChecksum)
7581	}
7582}
7583