1package agent
2
3import (
4	"bytes"
5	"context"
6	"crypto/tls"
7	"crypto/x509"
8	"encoding/json"
9	"fmt"
10	"io"
11	"io/ioutil"
12	"net/http"
13	"net/http/httptest"
14	"net/url"
15	"os"
16	"strconv"
17	"strings"
18	"testing"
19	"time"
20
21	"github.com/hashicorp/go-uuid"
22	"github.com/hashicorp/serf/serf"
23	"github.com/mitchellh/hashstructure"
24	"github.com/stretchr/testify/assert"
25	"github.com/stretchr/testify/require"
26	"golang.org/x/time/rate"
27
28	"github.com/hashicorp/consul/acl"
29	"github.com/hashicorp/consul/agent/config"
30	"github.com/hashicorp/consul/agent/connect"
31	"github.com/hashicorp/consul/agent/connect/ca"
32	"github.com/hashicorp/consul/agent/consul"
33	"github.com/hashicorp/consul/agent/debug"
34	"github.com/hashicorp/consul/agent/local"
35	"github.com/hashicorp/consul/agent/structs"
36	"github.com/hashicorp/consul/agent/token"
37	tokenStore "github.com/hashicorp/consul/agent/token"
38	"github.com/hashicorp/consul/agent/xds/proxysupport"
39	"github.com/hashicorp/consul/api"
40	"github.com/hashicorp/consul/sdk/testutil"
41	"github.com/hashicorp/consul/sdk/testutil/retry"
42	"github.com/hashicorp/consul/testrpc"
43	"github.com/hashicorp/consul/types"
44)
45
46func makeReadOnlyAgentACL(t *testing.T, srv *HTTPHandlers) string {
47	args := map[string]interface{}{
48		"Name":  "User Token",
49		"Type":  "client",
50		"Rules": `agent "" { policy = "read" }`,
51	}
52	req, _ := http.NewRequest("PUT", "/v1/acl/create?token=root", jsonReader(args))
53	resp := httptest.NewRecorder()
54	obj, err := srv.ACLCreate(resp, req)
55	if err != nil {
56		t.Fatalf("err: %v", err)
57	}
58	aclResp := obj.(aclCreateResponse)
59	return aclResp.ID
60}
61
62func TestAgent_Services(t *testing.T) {
63	if testing.Short() {
64		t.Skip("too slow for testing.Short")
65	}
66
67	t.Parallel()
68	a := NewTestAgent(t, "")
69	defer a.Shutdown()
70
71	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
72	srv1 := &structs.NodeService{
73		ID:      "mysql",
74		Service: "mysql",
75		Tags:    []string{"master"},
76		Meta: map[string]string{
77			"foo": "bar",
78		},
79		Port: 5000,
80	}
81	require.NoError(t, a.State.AddService(srv1, ""))
82
83	req, _ := http.NewRequest("GET", "/v1/agent/services", nil)
84	obj, err := a.srv.AgentServices(nil, req)
85	if err != nil {
86		t.Fatalf("Err: %v", err)
87	}
88	val := obj.(map[string]*api.AgentService)
89	assert.Lenf(t, val, 1, "bad services: %v", obj)
90	assert.Equal(t, 5000, val["mysql"].Port)
91	assert.Equal(t, srv1.Meta, val["mysql"].Meta)
92}
93
94func TestAgent_ServicesFiltered(t *testing.T) {
95	if testing.Short() {
96		t.Skip("too slow for testing.Short")
97	}
98
99	t.Parallel()
100	a := NewTestAgent(t, "")
101	defer a.Shutdown()
102
103	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
104	srv1 := &structs.NodeService{
105		ID:      "mysql",
106		Service: "mysql",
107		Tags:    []string{"master"},
108		Meta: map[string]string{
109			"foo": "bar",
110		},
111		Port: 5000,
112	}
113	require.NoError(t, a.State.AddService(srv1, ""))
114
115	// Add another service
116	srv2 := &structs.NodeService{
117		ID:      "redis",
118		Service: "redis",
119		Tags:    []string{"kv"},
120		Meta: map[string]string{
121			"foo": "bar",
122		},
123		Port: 1234,
124	}
125	require.NoError(t, a.State.AddService(srv2, ""))
126
127	req, _ := http.NewRequest("GET", "/v1/agent/services?filter="+url.QueryEscape("foo in Meta"), nil)
128	obj, err := a.srv.AgentServices(nil, req)
129	require.NoError(t, err)
130	val := obj.(map[string]*api.AgentService)
131	require.Len(t, val, 2)
132
133	req, _ = http.NewRequest("GET", "/v1/agent/services?filter="+url.QueryEscape("kv in Tags"), nil)
134	obj, err = a.srv.AgentServices(nil, req)
135	require.NoError(t, err)
136	val = obj.(map[string]*api.AgentService)
137	require.Len(t, val, 1)
138}
139
140// This tests that the agent services endpoint (/v1/agent/services) returns
141// Connect proxies.
142func TestAgent_Services_ExternalConnectProxy(t *testing.T) {
143	if testing.Short() {
144		t.Skip("too slow for testing.Short")
145	}
146
147	t.Parallel()
148
149	assert := assert.New(t)
150	a := NewTestAgent(t, "")
151	defer a.Shutdown()
152
153	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
154	srv1 := &structs.NodeService{
155		Kind:    structs.ServiceKindConnectProxy,
156		ID:      "db-proxy",
157		Service: "db-proxy",
158		Port:    5000,
159		Proxy: structs.ConnectProxyConfig{
160			DestinationServiceName: "db",
161			Upstreams:              structs.TestUpstreams(t),
162		},
163	}
164	a.State.AddService(srv1, "")
165
166	req, _ := http.NewRequest("GET", "/v1/agent/services", nil)
167	obj, err := a.srv.AgentServices(nil, req)
168	assert.Nil(err)
169	val := obj.(map[string]*api.AgentService)
170	assert.Len(val, 1)
171	actual := val["db-proxy"]
172	assert.Equal(api.ServiceKindConnectProxy, actual.Kind)
173	assert.Equal(srv1.Proxy.ToAPI(), actual.Proxy)
174}
175
176// Thie tests that a sidecar-registered service is returned as expected.
177func TestAgent_Services_Sidecar(t *testing.T) {
178	if testing.Short() {
179		t.Skip("too slow for testing.Short")
180	}
181
182	t.Parallel()
183
184	require := require.New(t)
185	assert := assert.New(t)
186	a := NewTestAgent(t, "")
187	defer a.Shutdown()
188
189	testrpc.WaitForLeader(t, a.RPC, "dc1")
190	srv1 := &structs.NodeService{
191		Kind:    structs.ServiceKindConnectProxy,
192		ID:      "db-sidecar-proxy",
193		Service: "db-sidecar-proxy",
194		Port:    5000,
195		// Set this internal state that we expect sidecar registrations to have.
196		LocallyRegisteredAsSidecar: true,
197		Proxy: structs.ConnectProxyConfig{
198			DestinationServiceName: "db",
199			Upstreams:              structs.TestUpstreams(t),
200			Mode:                   structs.ProxyModeTransparent,
201			TransparentProxy: structs.TransparentProxyConfig{
202				OutboundListenerPort: 10101,
203			},
204		},
205	}
206	a.State.AddService(srv1, "")
207
208	req, _ := http.NewRequest("GET", "/v1/agent/services", nil)
209	obj, err := a.srv.AgentServices(nil, req)
210	require.NoError(err)
211	val := obj.(map[string]*api.AgentService)
212	assert.Len(val, 1)
213	actual := val["db-sidecar-proxy"]
214	require.NotNil(actual)
215	assert.Equal(api.ServiceKindConnectProxy, actual.Kind)
216	assert.Equal(srv1.Proxy.ToAPI(), actual.Proxy)
217
218	// Sanity check that LocalRegisteredAsSidecar is not in the output (assuming
219	// JSON encoding). Right now this is not the case because the services
220	// endpoint happens to use the api struct which doesn't include that field,
221	// but this test serves as a regression test incase we change the endpoint to
222	// return the internal struct later and accidentally expose some "internal"
223	// state.
224	output, err := json.Marshal(obj)
225	require.NoError(err)
226	assert.NotContains(string(output), "LocallyRegisteredAsSidecar")
227	assert.NotContains(string(output), "locally_registered_as_sidecar")
228}
229
230// This tests that a mesh gateway service is returned as expected.
231func TestAgent_Services_MeshGateway(t *testing.T) {
232	if testing.Short() {
233		t.Skip("too slow for testing.Short")
234	}
235
236	t.Parallel()
237
238	a := NewTestAgent(t, "")
239	defer a.Shutdown()
240
241	testrpc.WaitForLeader(t, a.RPC, "dc1")
242	srv1 := &structs.NodeService{
243		Kind:    structs.ServiceKindMeshGateway,
244		ID:      "mg-dc1-01",
245		Service: "mg-dc1",
246		Port:    8443,
247		Proxy: structs.ConnectProxyConfig{
248			Config: map[string]interface{}{
249				"foo": "bar",
250			},
251		},
252	}
253	a.State.AddService(srv1, "")
254
255	req, _ := http.NewRequest("GET", "/v1/agent/services", nil)
256	obj, err := a.srv.AgentServices(nil, req)
257	require.NoError(t, err)
258	val := obj.(map[string]*api.AgentService)
259	require.Len(t, val, 1)
260	actual := val["mg-dc1-01"]
261	require.NotNil(t, actual)
262	require.Equal(t, api.ServiceKindMeshGateway, actual.Kind)
263	require.Equal(t, srv1.Proxy.ToAPI(), actual.Proxy)
264}
265
266// This tests that a terminating gateway service is returned as expected.
267func TestAgent_Services_TerminatingGateway(t *testing.T) {
268	if testing.Short() {
269		t.Skip("too slow for testing.Short")
270	}
271
272	t.Parallel()
273
274	a := NewTestAgent(t, "")
275	defer a.Shutdown()
276
277	testrpc.WaitForLeader(t, a.RPC, "dc1")
278	srv1 := &structs.NodeService{
279		Kind:    structs.ServiceKindTerminatingGateway,
280		ID:      "tg-dc1-01",
281		Service: "tg-dc1",
282		Port:    8443,
283		Proxy: structs.ConnectProxyConfig{
284			Config: map[string]interface{}{
285				"foo": "bar",
286			},
287		},
288	}
289	require.NoError(t, a.State.AddService(srv1, ""))
290
291	req, _ := http.NewRequest("GET", "/v1/agent/services", nil)
292	obj, err := a.srv.AgentServices(nil, req)
293	require.NoError(t, err)
294	val := obj.(map[string]*api.AgentService)
295	require.Len(t, val, 1)
296	actual := val["tg-dc1-01"]
297	require.NotNil(t, actual)
298	require.Equal(t, api.ServiceKindTerminatingGateway, actual.Kind)
299	require.Equal(t, srv1.Proxy.ToAPI(), actual.Proxy)
300}
301
302func TestAgent_Services_ACLFilter(t *testing.T) {
303	if testing.Short() {
304		t.Skip("too slow for testing.Short")
305	}
306
307	t.Parallel()
308	a := NewTestAgent(t, TestACLConfig())
309	defer a.Shutdown()
310
311	testrpc.WaitForLeader(t, a.RPC, "dc1")
312	srv1 := &structs.NodeService{
313		ID:      "mysql",
314		Service: "mysql",
315		Tags:    []string{"master"},
316		Port:    5000,
317	}
318	a.State.AddService(srv1, "")
319
320	t.Run("no token", func(t *testing.T) {
321		req, _ := http.NewRequest("GET", "/v1/agent/services", nil)
322		obj, err := a.srv.AgentServices(nil, req)
323		if err != nil {
324			t.Fatalf("Err: %v", err)
325		}
326		val := obj.(map[string]*api.AgentService)
327		if len(val) != 0 {
328			t.Fatalf("bad: %v", obj)
329		}
330	})
331
332	t.Run("root token", func(t *testing.T) {
333		req, _ := http.NewRequest("GET", "/v1/agent/services?token=root", nil)
334		obj, err := a.srv.AgentServices(nil, req)
335		if err != nil {
336			t.Fatalf("Err: %v", err)
337		}
338		val := obj.(map[string]*api.AgentService)
339		if len(val) != 1 {
340			t.Fatalf("bad: %v", obj)
341		}
342	})
343}
344
345func TestAgent_Service(t *testing.T) {
346	if testing.Short() {
347		t.Skip("too slow for testing.Short")
348	}
349
350	t.Parallel()
351
352	a := NewTestAgent(t, TestACLConfig()+`
353	services {
354		name = "web"
355		port = 8181
356		tagged_addresses {
357			wan {
358				address = "198.18.0.1"
359				port = 1818
360			}
361		}
362	}
363	`)
364	defer a.Shutdown()
365	testrpc.WaitForLeader(t, a.RPC, "dc1")
366
367	proxy := structs.TestConnectProxyConfig(t)
368	proxy.DestinationServiceID = "web1"
369
370	// Define a valid local sidecar proxy service
371	sidecarProxy := &structs.ServiceDefinition{
372		Kind: structs.ServiceKindConnectProxy,
373		Name: "web-sidecar-proxy",
374		Check: structs.CheckType{
375			TCP:      "127.0.0.1:8000",
376			Interval: 10 * time.Second,
377		},
378		Port:  8000,
379		Proxy: &proxy,
380		Weights: &structs.Weights{
381			Passing: 1,
382			Warning: 1,
383		},
384		EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
385	}
386
387	// Define an updated version. Be careful to copy it.
388	updatedProxy := *sidecarProxy
389	updatedProxy.Port = 9999
390
391	// Mangle the proxy config/upstreams into the expected for with defaults and
392	// API struct types.
393	expectProxy := proxy
394	expectProxy.Upstreams =
395		structs.TestAddDefaultsToUpstreams(t, sidecarProxy.Proxy.Upstreams, *structs.DefaultEnterpriseMeta())
396
397	expectedResponse := &api.AgentService{
398		Kind:    api.ServiceKindConnectProxy,
399		ID:      "web-sidecar-proxy",
400		Service: "web-sidecar-proxy",
401		Port:    8000,
402		Proxy:   expectProxy.ToAPI(),
403		Weights: api.AgentWeights{
404			Passing: 1,
405			Warning: 1,
406		},
407		Meta:       map[string]string{},
408		Tags:       []string{},
409		Datacenter: "dc1",
410	}
411	fillAgentServiceEnterpriseMeta(expectedResponse, structs.DefaultEnterpriseMeta())
412	hash1, err := hashstructure.Hash(expectedResponse, nil)
413	require.NoError(t, err, "failed to generate hash")
414	expectedResponse.ContentHash = fmt.Sprintf("%x", hash1)
415
416	// Copy and modify
417	updatedResponse := *expectedResponse
418	updatedResponse.Port = 9999
419	updatedResponse.ContentHash = "" // clear field before hashing
420	hash2, err := hashstructure.Hash(updatedResponse, nil)
421	require.NoError(t, err, "failed to generate hash")
422	updatedResponse.ContentHash = fmt.Sprintf("%x", hash2)
423
424	// Simple response for non-proxy service registered in TestAgent config
425	expectWebResponse := &api.AgentService{
426		ID:      "web",
427		Service: "web",
428		Port:    8181,
429		Weights: api.AgentWeights{
430			Passing: 1,
431			Warning: 1,
432		},
433		TaggedAddresses: map[string]api.ServiceAddress{
434			"wan": {
435				Address: "198.18.0.1",
436				Port:    1818,
437			},
438		},
439		Meta:       map[string]string{},
440		Tags:       []string{},
441		Datacenter: "dc1",
442	}
443	fillAgentServiceEnterpriseMeta(expectWebResponse, structs.DefaultEnterpriseMeta())
444	hash3, err := hashstructure.Hash(expectWebResponse, nil)
445	require.NoError(t, err, "failed to generate hash")
446	expectWebResponse.ContentHash = fmt.Sprintf("%x", hash3)
447
448	tests := []struct {
449		name       string
450		tokenRules string
451		url        string
452		updateFunc func()
453		wantWait   time.Duration
454		wantCode   int
455		wantErr    string
456		wantResp   *api.AgentService
457	}{
458		{
459			name:     "simple fetch - proxy",
460			url:      "/v1/agent/service/web-sidecar-proxy",
461			wantCode: 200,
462			wantResp: expectedResponse,
463		},
464		{
465			name:     "simple fetch - non-proxy",
466			url:      "/v1/agent/service/web",
467			wantCode: 200,
468			wantResp: expectWebResponse,
469		},
470		{
471			name:     "blocking fetch timeout, no change",
472			url:      "/v1/agent/service/web-sidecar-proxy?hash=" + expectedResponse.ContentHash + "&wait=100ms",
473			wantWait: 100 * time.Millisecond,
474			wantCode: 200,
475			wantResp: expectedResponse,
476		},
477		{
478			name:     "blocking fetch old hash should return immediately",
479			url:      "/v1/agent/service/web-sidecar-proxy?hash=123456789abcd&wait=10m",
480			wantCode: 200,
481			wantResp: expectedResponse,
482		},
483		{
484			name: "blocking fetch returns change",
485			url:  "/v1/agent/service/web-sidecar-proxy?hash=" + expectedResponse.ContentHash,
486			updateFunc: func() {
487				time.Sleep(100 * time.Millisecond)
488				// Re-register with new proxy config, make sure we copy the struct so we
489				// don't alter it and affect later test cases.
490				req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(updatedProxy))
491				resp := httptest.NewRecorder()
492				_, err := a.srv.AgentRegisterService(resp, req)
493				require.NoError(t, err)
494				require.Equal(t, 200, resp.Code, "body: %s", resp.Body.String())
495			},
496			wantWait: 100 * time.Millisecond,
497			wantCode: 200,
498			wantResp: &updatedResponse,
499		},
500		{
501			// This test exercises a case that caused a busy loop to eat CPU for the
502			// entire duration of the blocking query. If a service gets re-registered
503			// wth same proxy config then the old proxy config chan is closed causing
504			// blocked watchset.Watch to return false indicating a change. But since
505			// the hash is the same when the blocking fn is re-called we should just
506			// keep blocking on the next iteration. The bug hit was that the WatchSet
507			// ws was not being reset in the loop and so when you try to `Watch` it
508			// the second time it just returns immediately making the blocking loop
509			// into a busy-poll!
510			//
511			// This test though doesn't catch that because busy poll still has the
512			// correct external behavior. I don't want to instrument the loop to
513			// assert it's not executing too fast here as I can't think of a clean way
514			// and the issue is fixed now so this test doesn't actually catch the
515			// error, but does provide an easy way to verify the behavior by hand:
516			//  1. Make this test fail e.g. change wantErr to true
517			//  2. Add a log.Println or similar into the blocking loop/function
518			//  3. See whether it's called just once or many times in a tight loop.
519			name: "blocking fetch interrupted with no change (same hash)",
520			url:  "/v1/agent/service/web-sidecar-proxy?wait=200ms&hash=" + expectedResponse.ContentHash,
521			updateFunc: func() {
522				time.Sleep(100 * time.Millisecond)
523				// Re-register with _same_ proxy config
524				req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(sidecarProxy))
525				resp := httptest.NewRecorder()
526				_, err := a.srv.AgentRegisterService(resp, req)
527				require.NoError(t, err)
528				require.Equal(t, 200, resp.Code, "body: %s", resp.Body.String())
529			},
530			wantWait: 200 * time.Millisecond,
531			wantCode: 200,
532			wantResp: expectedResponse,
533		},
534		{
535			// When we reload config, the agent pauses Anti-entropy, then clears all
536			// services (which causes their watch chans to be closed) before loading
537			// state from config/snapshot again). If we do that naively then we don't
538			// just get a spurios wakeup on the watch if the service didn't change,
539			// but we get it wakeup and then race with the reload and probably see no
540			// services and return a 404 error which is gross. This test exercises
541			// that - even though the registrations were from API not config, they are
542			// persisted and cleared/reloaded from snapshot which has same effect.
543			//
544			// The fix for this test is to allow the same mechanism that pauses
545			// Anti-entropy during reload to also pause the hash blocking loop so we
546			// don't resume until the state is reloaded and we get a chance to see if
547			// it actually changed or not.
548			name: "blocking fetch interrupted by reload shouldn't 404 - no change",
549			url:  "/v1/agent/service/web-sidecar-proxy?wait=200ms&hash=" + expectedResponse.ContentHash,
550			updateFunc: func() {
551				time.Sleep(100 * time.Millisecond)
552				// Reload
553				require.NoError(t, a.reloadConfigInternal(a.Config))
554			},
555			// Should eventually timeout since there is no actual change
556			wantWait: 200 * time.Millisecond,
557			wantCode: 200,
558			wantResp: expectedResponse,
559		},
560		{
561			// As above but test actually altering the service with the config reload.
562			// This simulates the API registration being overridden by a different one
563			// on disk during reload.
564			name: "blocking fetch interrupted by reload shouldn't 404 - changes",
565			url:  "/v1/agent/service/web-sidecar-proxy?wait=10m&hash=" + expectedResponse.ContentHash,
566			updateFunc: func() {
567				time.Sleep(100 * time.Millisecond)
568				// Reload
569				newConfig := *a.Config
570				newConfig.Services = append(newConfig.Services, &updatedProxy)
571				require.NoError(t, a.reloadConfigInternal(&newConfig))
572			},
573			wantWait: 100 * time.Millisecond,
574			wantCode: 200,
575			wantResp: &updatedResponse,
576		},
577		{
578			name:     "err: non-existent proxy",
579			url:      "/v1/agent/service/nope",
580			wantCode: 404,
581		},
582		{
583			name: "err: bad ACL for service",
584			url:  "/v1/agent/service/web-sidecar-proxy",
585			// Limited token doesn't grant read to the service
586			tokenRules: `
587			key "" {
588				policy = "read"
589			}
590			`,
591			// Note that because we return ErrPermissionDenied and handle writing
592			// status at a higher level helper this actually gets a 200 in this test
593			// case so just assert that it was an error.
594			wantErr: "Permission denied",
595		},
596		{
597			name: "good ACL for service",
598			url:  "/v1/agent/service/web-sidecar-proxy",
599			// Limited token doesn't grant read to the service
600			tokenRules: `
601			service "web-sidecar-proxy" {
602				policy = "read"
603			}
604			`,
605			wantCode: 200,
606			wantResp: expectedResponse,
607		},
608	}
609
610	for _, tt := range tests {
611		t.Run(tt.name, func(t *testing.T) {
612			assert := assert.New(t)
613			require := require.New(t)
614
615			// Register the basic service to ensure it's in a known state to start.
616			{
617				req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(sidecarProxy))
618				resp := httptest.NewRecorder()
619				_, err := a.srv.AgentRegisterService(resp, req)
620				require.NoError(err)
621				require.Equal(200, resp.Code, "body: %s", resp.Body.String())
622			}
623
624			req, _ := http.NewRequest("GET", tt.url, nil)
625
626			// Inject the root token for tests that don't care about ACL
627			var token = "root"
628			if tt.tokenRules != "" {
629				// Create new token and use that.
630				token = testCreateToken(t, a, tt.tokenRules)
631			}
632			req.Header.Set("X-Consul-Token", token)
633			resp := httptest.NewRecorder()
634			if tt.updateFunc != nil {
635				go tt.updateFunc()
636			}
637			start := time.Now()
638			obj, err := a.srv.AgentService(resp, req)
639			elapsed := time.Since(start)
640
641			if tt.wantErr != "" {
642				require.Error(err)
643				require.Contains(strings.ToLower(err.Error()), strings.ToLower(tt.wantErr))
644			} else {
645				require.NoError(err)
646			}
647			if tt.wantCode != 0 {
648				require.Equal(tt.wantCode, resp.Code, "body: %s", resp.Body.String())
649			}
650			if tt.wantWait != 0 {
651				assert.True(elapsed >= tt.wantWait, "should have waited at least %s, "+
652					"took %s", tt.wantWait, elapsed)
653			} else {
654				assert.True(elapsed < 10*time.Millisecond, "should not have waited, "+
655					"took %s", elapsed)
656			}
657
658			if tt.wantResp != nil {
659				assert.Equal(tt.wantResp, obj)
660				assert.Equal(tt.wantResp.ContentHash, resp.Header().Get("X-Consul-ContentHash"))
661			} else {
662				// Janky but Equal doesn't help here because nil !=
663				// *api.AgentService((*api.AgentService)(nil))
664				assert.Nil(obj)
665			}
666		})
667	}
668}
669
670func TestAgent_Checks(t *testing.T) {
671	if testing.Short() {
672		t.Skip("too slow for testing.Short")
673	}
674
675	t.Parallel()
676	a := NewTestAgent(t, "")
677	defer a.Shutdown()
678
679	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
680	chk1 := &structs.HealthCheck{
681		Node:     a.Config.NodeName,
682		CheckID:  "mysql",
683		Name:     "mysql",
684		Interval: "30s",
685		Timeout:  "5s",
686		Status:   api.HealthPassing,
687	}
688	a.State.AddCheck(chk1, "")
689
690	req, _ := http.NewRequest("GET", "/v1/agent/checks", nil)
691	obj, err := a.srv.AgentChecks(nil, req)
692	if err != nil {
693		t.Fatalf("Err: %v", err)
694	}
695	val := obj.(map[types.CheckID]*structs.HealthCheck)
696	if len(val) != 1 {
697		t.Fatalf("bad checks: %v", obj)
698	}
699	if val["mysql"].Status != api.HealthPassing {
700		t.Fatalf("bad check: %v", obj)
701	}
702	if val["mysql"].Node != chk1.Node {
703		t.Fatalf("bad check: %v", obj)
704	}
705	if val["mysql"].Interval != chk1.Interval {
706		t.Fatalf("bad check: %v", obj)
707	}
708	if val["mysql"].Timeout != chk1.Timeout {
709		t.Fatalf("bad check: %v", obj)
710	}
711}
712
713func TestAgent_ChecksWithFilter(t *testing.T) {
714	if testing.Short() {
715		t.Skip("too slow for testing.Short")
716	}
717
718	t.Parallel()
719	a := NewTestAgent(t, "")
720	defer a.Shutdown()
721
722	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
723	chk1 := &structs.HealthCheck{
724		Node:    a.Config.NodeName,
725		CheckID: "mysql",
726		Name:    "mysql",
727		Status:  api.HealthPassing,
728	}
729	a.State.AddCheck(chk1, "")
730
731	chk2 := &structs.HealthCheck{
732		Node:    a.Config.NodeName,
733		CheckID: "redis",
734		Name:    "redis",
735		Status:  api.HealthPassing,
736	}
737	a.State.AddCheck(chk2, "")
738
739	req, _ := http.NewRequest("GET", "/v1/agent/checks?filter="+url.QueryEscape("Name == `redis`"), nil)
740	obj, err := a.srv.AgentChecks(nil, req)
741	require.NoError(t, err)
742	val := obj.(map[types.CheckID]*structs.HealthCheck)
743	require.Len(t, val, 1)
744	_, ok := val["redis"]
745	require.True(t, ok)
746}
747
748func TestAgent_HealthServiceByID(t *testing.T) {
749	if testing.Short() {
750		t.Skip("too slow for testing.Short")
751	}
752
753	t.Parallel()
754	a := NewTestAgent(t, "")
755	defer a.Shutdown()
756	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
757
758	service := &structs.NodeService{
759		ID:      "mysql",
760		Service: "mysql",
761	}
762	if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil {
763		t.Fatalf("err: %v", err)
764	}
765	service = &structs.NodeService{
766		ID:      "mysql2",
767		Service: "mysql2",
768	}
769	if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil {
770		t.Fatalf("err: %v", err)
771	}
772	service = &structs.NodeService{
773		ID:      "mysql3",
774		Service: "mysql3",
775	}
776	if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil {
777		t.Fatalf("err: %v", err)
778	}
779
780	chk1 := &structs.HealthCheck{
781		Node:      a.Config.NodeName,
782		CheckID:   "mysql",
783		Name:      "mysql",
784		ServiceID: "mysql",
785		Status:    api.HealthPassing,
786	}
787	err := a.State.AddCheck(chk1, "")
788	if err != nil {
789		t.Fatalf("Err: %v", err)
790	}
791
792	chk2 := &structs.HealthCheck{
793		Node:      a.Config.NodeName,
794		CheckID:   "mysql",
795		Name:      "mysql",
796		ServiceID: "mysql",
797		Status:    api.HealthPassing,
798	}
799	err = a.State.AddCheck(chk2, "")
800	if err != nil {
801		t.Fatalf("Err: %v", err)
802	}
803
804	chk3 := &structs.HealthCheck{
805		Node:      a.Config.NodeName,
806		CheckID:   "mysql2",
807		Name:      "mysql2",
808		ServiceID: "mysql2",
809		Status:    api.HealthPassing,
810	}
811	err = a.State.AddCheck(chk3, "")
812	if err != nil {
813		t.Fatalf("Err: %v", err)
814	}
815
816	chk4 := &structs.HealthCheck{
817		Node:      a.Config.NodeName,
818		CheckID:   "mysql2",
819		Name:      "mysql2",
820		ServiceID: "mysql2",
821		Status:    api.HealthWarning,
822	}
823	err = a.State.AddCheck(chk4, "")
824	if err != nil {
825		t.Fatalf("Err: %v", err)
826	}
827
828	chk5 := &structs.HealthCheck{
829		Node:      a.Config.NodeName,
830		CheckID:   "mysql3",
831		Name:      "mysql3",
832		ServiceID: "mysql3",
833		Status:    api.HealthMaint,
834	}
835	err = a.State.AddCheck(chk5, "")
836	if err != nil {
837		t.Fatalf("Err: %v", err)
838	}
839
840	chk6 := &structs.HealthCheck{
841		Node:      a.Config.NodeName,
842		CheckID:   "mysql3",
843		Name:      "mysql3",
844		ServiceID: "mysql3",
845		Status:    api.HealthCritical,
846	}
847	err = a.State.AddCheck(chk6, "")
848	if err != nil {
849		t.Fatalf("Err: %v", err)
850	}
851
852	eval := func(t *testing.T, url string, expectedCode int, expected string) {
853		t.Helper()
854		t.Run("format=text", func(t *testing.T) {
855			t.Helper()
856			req, _ := http.NewRequest("GET", url+"?format=text", nil)
857			resp := httptest.NewRecorder()
858			data, err := a.srv.AgentHealthServiceByID(resp, req)
859			codeWithPayload, ok := err.(CodeWithPayloadError)
860			if !ok {
861				t.Fatalf("Err: %v", err)
862			}
863			if got, want := codeWithPayload.StatusCode, expectedCode; got != want {
864				t.Fatalf("returned bad status: expected %d, but had: %d in %#v", expectedCode, codeWithPayload.StatusCode, codeWithPayload)
865			}
866			body, ok := data.(string)
867			if !ok {
868				t.Fatalf("Cannot get result as string in := %#v", data)
869			}
870			if got, want := body, expected; got != want {
871				t.Fatalf("got body %q want %q", got, want)
872			}
873			if got, want := codeWithPayload.Reason, expected; got != want {
874				t.Fatalf("got body %q want %q", got, want)
875			}
876		})
877		t.Run("format=json", func(t *testing.T) {
878			req, _ := http.NewRequest("GET", url, nil)
879			resp := httptest.NewRecorder()
880			dataRaw, err := a.srv.AgentHealthServiceByID(resp, req)
881			codeWithPayload, ok := err.(CodeWithPayloadError)
882			if !ok {
883				t.Fatalf("Err: %v", err)
884			}
885			if got, want := codeWithPayload.StatusCode, expectedCode; got != want {
886				t.Fatalf("returned bad status: expected %d, but had: %d in %#v", expectedCode, codeWithPayload.StatusCode, codeWithPayload)
887			}
888			data, ok := dataRaw.(*api.AgentServiceChecksInfo)
889			if !ok {
890				t.Fatalf("Cannot connvert result to JSON: %#v", dataRaw)
891			}
892			if codeWithPayload.StatusCode != http.StatusNotFound {
893				if data != nil && data.AggregatedStatus != expected {
894					t.Fatalf("got body %v want %v", data, expected)
895				}
896			}
897		})
898	}
899
900	t.Run("passing checks", func(t *testing.T) {
901		eval(t, "/v1/agent/health/service/id/mysql", http.StatusOK, "passing")
902	})
903	t.Run("warning checks", func(t *testing.T) {
904		eval(t, "/v1/agent/health/service/id/mysql2", http.StatusTooManyRequests, "warning")
905	})
906	t.Run("critical checks", func(t *testing.T) {
907		eval(t, "/v1/agent/health/service/id/mysql3", http.StatusServiceUnavailable, "critical")
908	})
909	t.Run("unknown serviceid", func(t *testing.T) {
910		eval(t, "/v1/agent/health/service/id/mysql1", http.StatusNotFound, fmt.Sprintf("ServiceId %s not found", structs.ServiceIDString("mysql1", nil)))
911	})
912
913	nodeCheck := &structs.HealthCheck{
914		Node:    a.Config.NodeName,
915		CheckID: "diskCheck",
916		Name:    "diskCheck",
917		Status:  api.HealthCritical,
918	}
919	err = a.State.AddCheck(nodeCheck, "")
920
921	if err != nil {
922		t.Fatalf("Err: %v", err)
923	}
924	t.Run("critical check on node", func(t *testing.T) {
925		eval(t, "/v1/agent/health/service/id/mysql", http.StatusServiceUnavailable, "critical")
926	})
927
928	err = a.State.RemoveCheck(nodeCheck.CompoundCheckID())
929	if err != nil {
930		t.Fatalf("Err: %v", err)
931	}
932	nodeCheck = &structs.HealthCheck{
933		Node:    a.Config.NodeName,
934		CheckID: "_node_maintenance",
935		Name:    "_node_maintenance",
936		Status:  api.HealthMaint,
937	}
938	err = a.State.AddCheck(nodeCheck, "")
939	if err != nil {
940		t.Fatalf("Err: %v", err)
941	}
942	t.Run("maintenance check on node", func(t *testing.T) {
943		eval(t, "/v1/agent/health/service/id/mysql", http.StatusServiceUnavailable, "maintenance")
944	})
945}
946
947func TestAgent_HealthServiceByName(t *testing.T) {
948	if testing.Short() {
949		t.Skip("too slow for testing.Short")
950	}
951
952	t.Parallel()
953	a := NewTestAgent(t, "")
954	defer a.Shutdown()
955
956	service := &structs.NodeService{
957		ID:      "mysql1",
958		Service: "mysql-pool-r",
959	}
960	if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil {
961		t.Fatalf("err: %v", err)
962	}
963	service = &structs.NodeService{
964		ID:      "mysql2",
965		Service: "mysql-pool-r",
966	}
967	if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil {
968		t.Fatalf("err: %v", err)
969	}
970	service = &structs.NodeService{
971		ID:      "mysql3",
972		Service: "mysql-pool-rw",
973	}
974	if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil {
975		t.Fatalf("err: %v", err)
976	}
977	service = &structs.NodeService{
978		ID:      "mysql4",
979		Service: "mysql-pool-rw",
980	}
981	if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil {
982		t.Fatalf("err: %v", err)
983	}
984	service = &structs.NodeService{
985		ID:      "httpd1",
986		Service: "httpd",
987	}
988	if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil {
989		t.Fatalf("err: %v", err)
990	}
991	service = &structs.NodeService{
992		ID:      "httpd2",
993		Service: "httpd",
994	}
995	if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil {
996		t.Fatalf("err: %v", err)
997	}
998
999	chk1 := &structs.HealthCheck{
1000		Node:        a.Config.NodeName,
1001		CheckID:     "mysql1",
1002		Name:        "mysql1",
1003		ServiceID:   "mysql1",
1004		ServiceName: "mysql-pool-r",
1005		Status:      api.HealthPassing,
1006	}
1007	err := a.State.AddCheck(chk1, "")
1008	if err != nil {
1009		t.Fatalf("Err: %v", err)
1010	}
1011
1012	chk2 := &structs.HealthCheck{
1013		Node:        a.Config.NodeName,
1014		CheckID:     "mysql1",
1015		Name:        "mysql1",
1016		ServiceID:   "mysql1",
1017		ServiceName: "mysql-pool-r",
1018		Status:      api.HealthWarning,
1019	}
1020	err = a.State.AddCheck(chk2, "")
1021	if err != nil {
1022		t.Fatalf("Err: %v", err)
1023	}
1024
1025	chk3 := &structs.HealthCheck{
1026		Node:        a.Config.NodeName,
1027		CheckID:     "mysql2",
1028		Name:        "mysql2",
1029		ServiceID:   "mysql2",
1030		ServiceName: "mysql-pool-r",
1031		Status:      api.HealthPassing,
1032	}
1033	err = a.State.AddCheck(chk3, "")
1034	if err != nil {
1035		t.Fatalf("Err: %v", err)
1036	}
1037
1038	chk4 := &structs.HealthCheck{
1039		Node:        a.Config.NodeName,
1040		CheckID:     "mysql2",
1041		Name:        "mysql2",
1042		ServiceID:   "mysql2",
1043		ServiceName: "mysql-pool-r",
1044		Status:      api.HealthCritical,
1045	}
1046	err = a.State.AddCheck(chk4, "")
1047	if err != nil {
1048		t.Fatalf("Err: %v", err)
1049	}
1050
1051	chk5 := &structs.HealthCheck{
1052		Node:        a.Config.NodeName,
1053		CheckID:     "mysql3",
1054		Name:        "mysql3",
1055		ServiceID:   "mysql3",
1056		ServiceName: "mysql-pool-rw",
1057		Status:      api.HealthWarning,
1058	}
1059	err = a.State.AddCheck(chk5, "")
1060	if err != nil {
1061		t.Fatalf("Err: %v", err)
1062	}
1063
1064	chk6 := &structs.HealthCheck{
1065		Node:        a.Config.NodeName,
1066		CheckID:     "mysql4",
1067		Name:        "mysql4",
1068		ServiceID:   "mysql4",
1069		ServiceName: "mysql-pool-rw",
1070		Status:      api.HealthPassing,
1071	}
1072	err = a.State.AddCheck(chk6, "")
1073	if err != nil {
1074		t.Fatalf("Err: %v", err)
1075	}
1076
1077	chk7 := &structs.HealthCheck{
1078		Node:        a.Config.NodeName,
1079		CheckID:     "httpd1",
1080		Name:        "httpd1",
1081		ServiceID:   "httpd1",
1082		ServiceName: "httpd",
1083		Status:      api.HealthPassing,
1084	}
1085	err = a.State.AddCheck(chk7, "")
1086	if err != nil {
1087		t.Fatalf("Err: %v", err)
1088	}
1089
1090	chk8 := &structs.HealthCheck{
1091		Node:        a.Config.NodeName,
1092		CheckID:     "httpd2",
1093		Name:        "httpd2",
1094		ServiceID:   "httpd2",
1095		ServiceName: "httpd",
1096		Status:      api.HealthPassing,
1097	}
1098	err = a.State.AddCheck(chk8, "")
1099	if err != nil {
1100		t.Fatalf("Err: %v", err)
1101	}
1102
1103	eval := func(t *testing.T, url string, expectedCode int, expected string) {
1104		t.Helper()
1105		t.Run("format=text", func(t *testing.T) {
1106			t.Helper()
1107			req, _ := http.NewRequest("GET", url+"?format=text", nil)
1108			resp := httptest.NewRecorder()
1109			data, err := a.srv.AgentHealthServiceByName(resp, req)
1110			codeWithPayload, ok := err.(CodeWithPayloadError)
1111			if !ok {
1112				t.Fatalf("Err: %v", err)
1113			}
1114			if got, want := codeWithPayload.StatusCode, expectedCode; got != want {
1115				t.Fatalf("returned bad status: %d. Body: %q", resp.Code, resp.Body.String())
1116			}
1117			if got, want := codeWithPayload.Reason, expected; got != want {
1118				t.Fatalf("got reason %q want %q", got, want)
1119			}
1120			if got, want := data, expected; got != want {
1121				t.Fatalf("got body %q want %q", got, want)
1122			}
1123		})
1124		t.Run("format=json", func(t *testing.T) {
1125			t.Helper()
1126			req, _ := http.NewRequest("GET", url, nil)
1127			resp := httptest.NewRecorder()
1128			dataRaw, err := a.srv.AgentHealthServiceByName(resp, req)
1129			codeWithPayload, ok := err.(CodeWithPayloadError)
1130			if !ok {
1131				t.Fatalf("Err: %v", err)
1132			}
1133			data, ok := dataRaw.([]api.AgentServiceChecksInfo)
1134			if !ok {
1135				t.Fatalf("Cannot connvert result to JSON")
1136			}
1137			if got, want := codeWithPayload.StatusCode, expectedCode; got != want {
1138				t.Fatalf("returned bad code: %d. Body: %#v", resp.Code, data)
1139			}
1140			if resp.Code != http.StatusNotFound {
1141				if codeWithPayload.Reason != expected {
1142					t.Fatalf("got wrong status %#v want %#v", codeWithPayload, expected)
1143				}
1144			}
1145		})
1146	}
1147
1148	t.Run("passing checks", func(t *testing.T) {
1149		eval(t, "/v1/agent/health/service/name/httpd", http.StatusOK, "passing")
1150	})
1151	t.Run("warning checks", func(t *testing.T) {
1152		eval(t, "/v1/agent/health/service/name/mysql-pool-rw", http.StatusTooManyRequests, "warning")
1153	})
1154	t.Run("critical checks", func(t *testing.T) {
1155		eval(t, "/v1/agent/health/service/name/mysql-pool-r", http.StatusServiceUnavailable, "critical")
1156	})
1157	t.Run("unknown serviceName", func(t *testing.T) {
1158		eval(t, "/v1/agent/health/service/name/test", http.StatusNotFound, "ServiceName test Not Found")
1159	})
1160	nodeCheck := &structs.HealthCheck{
1161		Node:    a.Config.NodeName,
1162		CheckID: "diskCheck",
1163		Name:    "diskCheck",
1164		Status:  api.HealthCritical,
1165	}
1166	err = a.State.AddCheck(nodeCheck, "")
1167
1168	if err != nil {
1169		t.Fatalf("Err: %v", err)
1170	}
1171	t.Run("critical check on node", func(t *testing.T) {
1172		eval(t, "/v1/agent/health/service/name/mysql-pool-r", http.StatusServiceUnavailable, "critical")
1173	})
1174
1175	err = a.State.RemoveCheck(nodeCheck.CompoundCheckID())
1176	if err != nil {
1177		t.Fatalf("Err: %v", err)
1178	}
1179	nodeCheck = &structs.HealthCheck{
1180		Node:    a.Config.NodeName,
1181		CheckID: "_node_maintenance",
1182		Name:    "_node_maintenance",
1183		Status:  api.HealthMaint,
1184	}
1185	err = a.State.AddCheck(nodeCheck, "")
1186	if err != nil {
1187		t.Fatalf("Err: %v", err)
1188	}
1189	t.Run("maintenance check on node", func(t *testing.T) {
1190		eval(t, "/v1/agent/health/service/name/mysql-pool-r", http.StatusServiceUnavailable, "maintenance")
1191	})
1192}
1193
1194func TestAgent_HealthServicesACLEnforcement(t *testing.T) {
1195	if testing.Short() {
1196		t.Skip("too slow for testing.Short")
1197	}
1198
1199	t.Parallel()
1200	a := NewTestAgent(t, TestACLConfigWithParams(nil))
1201	defer a.Shutdown()
1202
1203	service := &structs.NodeService{
1204		ID:      "mysql1",
1205		Service: "mysql",
1206	}
1207	require.NoError(t, a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal))
1208
1209	service = &structs.NodeService{
1210		ID:      "foo1",
1211		Service: "foo",
1212	}
1213	require.NoError(t, a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal))
1214
1215	// no token
1216	t.Run("no-token-health-by-id", func(t *testing.T) {
1217		req, err := http.NewRequest("GET", "/v1/agent/health/service/id/mysql1", nil)
1218		require.NoError(t, err)
1219		resp := httptest.NewRecorder()
1220		_, err = a.srv.AgentHealthServiceByID(resp, req)
1221		require.Equal(t, acl.ErrPermissionDenied, err)
1222	})
1223
1224	t.Run("no-token-health-by-name", func(t *testing.T) {
1225		req, err := http.NewRequest("GET", "/v1/agent/health/service/name/mysql", nil)
1226		require.NoError(t, err)
1227		resp := httptest.NewRecorder()
1228		_, err = a.srv.AgentHealthServiceByName(resp, req)
1229		require.Equal(t, acl.ErrPermissionDenied, err)
1230	})
1231
1232	t.Run("root-token-health-by-id", func(t *testing.T) {
1233		req, err := http.NewRequest("GET", "/v1/agent/health/service/id/foo1", nil)
1234		require.NoError(t, err)
1235		req.Header.Add("X-Consul-Token", TestDefaultMasterToken)
1236		resp := httptest.NewRecorder()
1237		_, err = a.srv.AgentHealthServiceByID(resp, req)
1238		require.NotEqual(t, acl.ErrPermissionDenied, err)
1239	})
1240
1241	t.Run("root-token-health-by-name", func(t *testing.T) {
1242		req, err := http.NewRequest("GET", "/v1/agent/health/service/name/foo", nil)
1243		require.NoError(t, err)
1244		req.Header.Add("X-Consul-Token", TestDefaultMasterToken)
1245		resp := httptest.NewRecorder()
1246		_, err = a.srv.AgentHealthServiceByName(resp, req)
1247		require.NotEqual(t, acl.ErrPermissionDenied, err)
1248	})
1249}
1250
1251func TestAgent_Checks_ACLFilter(t *testing.T) {
1252	if testing.Short() {
1253		t.Skip("too slow for testing.Short")
1254	}
1255
1256	t.Parallel()
1257	a := NewTestAgent(t, TestACLConfig())
1258	defer a.Shutdown()
1259
1260	testrpc.WaitForLeader(t, a.RPC, "dc1")
1261	chk1 := &structs.HealthCheck{
1262		Node:    a.Config.NodeName,
1263		CheckID: "mysql",
1264		Name:    "mysql",
1265		Status:  api.HealthPassing,
1266	}
1267	a.State.AddCheck(chk1, "")
1268
1269	t.Run("no token", func(t *testing.T) {
1270		req, _ := http.NewRequest("GET", "/v1/agent/checks", nil)
1271		obj, err := a.srv.AgentChecks(nil, req)
1272		if err != nil {
1273			t.Fatalf("Err: %v", err)
1274		}
1275		val := obj.(map[types.CheckID]*structs.HealthCheck)
1276		if len(val) != 0 {
1277			t.Fatalf("bad checks: %v", obj)
1278		}
1279	})
1280
1281	t.Run("root token", func(t *testing.T) {
1282		req, _ := http.NewRequest("GET", "/v1/agent/checks?token=root", nil)
1283		obj, err := a.srv.AgentChecks(nil, req)
1284		if err != nil {
1285			t.Fatalf("Err: %v", err)
1286		}
1287		val := obj.(map[types.CheckID]*structs.HealthCheck)
1288		if len(val) != 1 {
1289			t.Fatalf("bad checks: %v", obj)
1290		}
1291	})
1292}
1293
1294func TestAgent_Self(t *testing.T) {
1295	if testing.Short() {
1296		t.Skip("too slow for testing.Short")
1297	}
1298
1299	t.Parallel()
1300
1301	cases := map[string]struct {
1302		hcl       string
1303		expectXDS bool
1304	}{
1305		"normal": {
1306			hcl: `
1307			node_meta {
1308				somekey = "somevalue"
1309			}
1310			`,
1311			expectXDS: true,
1312		},
1313		"no grpc": {
1314			hcl: `
1315			node_meta {
1316				somekey = "somevalue"
1317			}
1318			ports = {
1319				grpc = -1
1320			}
1321			`,
1322			expectXDS: false,
1323		},
1324	}
1325
1326	for name, tc := range cases {
1327		tc := tc
1328		t.Run(name, func(t *testing.T) {
1329			a := NewTestAgent(t, tc.hcl)
1330			defer a.Shutdown()
1331
1332			testrpc.WaitForTestAgent(t, a.RPC, "dc1")
1333			req, _ := http.NewRequest("GET", "/v1/agent/self", nil)
1334			obj, err := a.srv.AgentSelf(nil, req)
1335			require.NoError(t, err)
1336
1337			val := obj.(Self)
1338			require.Equal(t, a.Config.SerfPortLAN, int(val.Member.Port))
1339			require.Equal(t, a.Config.SerfPortLAN, val.DebugConfig["SerfPortLAN"].(int))
1340
1341			cs, err := a.GetLANCoordinate()
1342			require.NoError(t, err)
1343			require.Equal(t, cs[a.config.SegmentName], val.Coord)
1344
1345			delete(val.Meta, structs.MetaSegmentKey) // Added later, not in config.
1346			require.Equal(t, a.config.NodeMeta, val.Meta)
1347
1348			if tc.expectXDS {
1349				require.NotNil(t, val.XDS, "xds component missing when gRPC is enabled")
1350				require.Equal(t,
1351					map[string][]string{"envoy": proxysupport.EnvoyVersions},
1352					val.XDS.SupportedProxies,
1353				)
1354
1355			} else {
1356				require.Nil(t, val.XDS, "xds component should be missing when gRPC is disabled")
1357			}
1358		})
1359	}
1360}
1361
1362func TestAgent_Self_ACLDeny(t *testing.T) {
1363	if testing.Short() {
1364		t.Skip("too slow for testing.Short")
1365	}
1366
1367	t.Parallel()
1368	a := NewTestAgent(t, TestACLConfig())
1369	defer a.Shutdown()
1370
1371	testrpc.WaitForLeader(t, a.RPC, "dc1")
1372	t.Run("no token", func(t *testing.T) {
1373		req, _ := http.NewRequest("GET", "/v1/agent/self", nil)
1374		if _, err := a.srv.AgentSelf(nil, req); !acl.IsErrPermissionDenied(err) {
1375			t.Fatalf("err: %v", err)
1376		}
1377	})
1378
1379	t.Run("agent master token", func(t *testing.T) {
1380		req, _ := http.NewRequest("GET", "/v1/agent/self?token=towel", nil)
1381		if _, err := a.srv.AgentSelf(nil, req); err != nil {
1382			t.Fatalf("err: %v", err)
1383		}
1384	})
1385
1386	t.Run("read-only token", func(t *testing.T) {
1387		ro := makeReadOnlyAgentACL(t, a.srv)
1388		req, _ := http.NewRequest("GET", fmt.Sprintf("/v1/agent/self?token=%s", ro), nil)
1389		if _, err := a.srv.AgentSelf(nil, req); err != nil {
1390			t.Fatalf("err: %v", err)
1391		}
1392	})
1393}
1394
1395func TestAgent_Metrics_ACLDeny(t *testing.T) {
1396	if testing.Short() {
1397		t.Skip("too slow for testing.Short")
1398	}
1399
1400	t.Parallel()
1401	a := NewTestAgent(t, TestACLConfig())
1402	defer a.Shutdown()
1403
1404	testrpc.WaitForLeader(t, a.RPC, "dc1")
1405	t.Run("no token", func(t *testing.T) {
1406		req, _ := http.NewRequest("GET", "/v1/agent/metrics", nil)
1407		if _, err := a.srv.AgentMetrics(nil, req); !acl.IsErrPermissionDenied(err) {
1408			t.Fatalf("err: %v", err)
1409		}
1410	})
1411
1412	t.Run("agent master token", func(t *testing.T) {
1413		req, _ := http.NewRequest("GET", "/v1/agent/metrics?token=towel", nil)
1414		if _, err := a.srv.AgentMetrics(nil, req); err != nil {
1415			t.Fatalf("err: %v", err)
1416		}
1417	})
1418
1419	t.Run("read-only token", func(t *testing.T) {
1420		ro := makeReadOnlyAgentACL(t, a.srv)
1421		req, _ := http.NewRequest("GET", fmt.Sprintf("/v1/agent/metrics?token=%s", ro), nil)
1422		if _, err := a.srv.AgentMetrics(nil, req); err != nil {
1423			t.Fatalf("err: %v", err)
1424		}
1425	})
1426}
1427
1428func TestAgent_Reload(t *testing.T) {
1429	if testing.Short() {
1430		t.Skip("too slow for testing.Short")
1431	}
1432
1433	t.Parallel()
1434	dc1 := "dc1"
1435	a := NewTestAgent(t, `
1436		services = [
1437			{
1438				name = "redis"
1439			}
1440		]
1441		watches = [
1442			{
1443				datacenter = "`+dc1+`"
1444				type = "key"
1445				key = "test"
1446				handler = "true"
1447			}
1448		]
1449    limits = {
1450      rpc_rate=1
1451      rpc_max_burst=100
1452    }
1453	`)
1454	defer a.Shutdown()
1455
1456	testrpc.WaitForTestAgent(t, a.RPC, dc1)
1457	if a.State.Service(structs.NewServiceID("redis", nil)) == nil {
1458		t.Fatal("missing redis service")
1459	}
1460
1461	cfg2 := TestConfig(testutil.Logger(t), config.FileSource{
1462		Name:   "reload",
1463		Format: "hcl",
1464		Data: `
1465			data_dir = "` + a.Config.DataDir + `"
1466			node_id = "` + string(a.Config.NodeID) + `"
1467			node_name = "` + a.Config.NodeName + `"
1468
1469			services = [
1470				{
1471					name = "redis-reloaded"
1472				}
1473			]
1474      limits = {
1475        rpc_rate=2
1476        rpc_max_burst=200
1477      }
1478		`,
1479	})
1480
1481	shim := &delegateConfigReloadShim{delegate: a.delegate}
1482	a.delegate = shim
1483	if err := a.reloadConfigInternal(cfg2); err != nil {
1484		t.Fatalf("got error %v want nil", err)
1485	}
1486	if a.State.Service(structs.NewServiceID("redis-reloaded", nil)) == nil {
1487		t.Fatal("missing redis-reloaded service")
1488	}
1489
1490	require.Equal(t, rate.Limit(2), shim.newCfg.RPCRateLimit)
1491	require.Equal(t, 200, shim.newCfg.RPCMaxBurst)
1492
1493	for _, wp := range a.watchPlans {
1494		if !wp.IsStopped() {
1495			t.Fatalf("Reloading configs should stop watch plans of the previous configuration")
1496		}
1497	}
1498}
1499
1500type delegateConfigReloadShim struct {
1501	delegate
1502	newCfg consul.ReloadableConfig
1503}
1504
1505func (s *delegateConfigReloadShim) ReloadConfig(cfg consul.ReloadableConfig) error {
1506	s.newCfg = cfg
1507	return s.delegate.ReloadConfig(cfg)
1508}
1509
1510// TestAgent_ReloadDoesNotTriggerWatch Ensure watches not triggered after reload
1511// see https://github.com/hashicorp/consul/issues/7446
1512func TestAgent_ReloadDoesNotTriggerWatch(t *testing.T) {
1513	if testing.Short() {
1514		t.Skip("too slow for testing.Short")
1515	}
1516
1517	dc1 := "dc1"
1518	tmpFileRaw, err := ioutil.TempFile("", "rexec")
1519	require.NoError(t, err)
1520	tmpFile := tmpFileRaw.Name()
1521	defer os.Remove(tmpFile)
1522	handlerShell := fmt.Sprintf("(cat ; echo CONSUL_INDEX $CONSUL_INDEX) | tee '%s.atomic' ; mv '%s.atomic' '%s'", tmpFile, tmpFile, tmpFile)
1523
1524	a := NewTestAgent(t, `
1525		services = [
1526			{
1527				name = "redis"
1528				checks = [
1529					{
1530						id =  "red-is-dead"
1531						ttl = "30s"
1532						notes = "initial check"
1533					}
1534				]
1535			}
1536		]
1537		watches = [
1538			{
1539				datacenter = "`+dc1+`"
1540				type = "service"
1541				service = "redis"
1542				args = ["bash", "-c", "`+handlerShell+`"]
1543			}
1544		]
1545	`)
1546	checkID := structs.NewCheckID("red-is-dead", nil)
1547	defer a.Shutdown()
1548
1549	testrpc.WaitForTestAgent(t, a.RPC, dc1)
1550	require.NoError(t, a.updateTTLCheck(checkID, api.HealthPassing, "testing-agent-reload-001"))
1551
1552	checkStr := func(r *retry.R, evaluator func(string) error) {
1553		t.Helper()
1554		contentsStr := ""
1555		// Wait for watch to be populated
1556		for i := 1; i < 7; i++ {
1557			contents, err := ioutil.ReadFile(tmpFile)
1558			if err != nil {
1559				t.Fatalf("should be able to read file, but had: %#v", err)
1560			}
1561			contentsStr = string(contents)
1562			if contentsStr != "" {
1563				break
1564			}
1565			time.Sleep(time.Duration(i) * time.Second)
1566			testutil.Logger(t).Info("Watch not yet populated, retrying")
1567		}
1568		if err := evaluator(contentsStr); err != nil {
1569			r.Errorf("ERROR: Test failing: %s", err)
1570		}
1571	}
1572	ensureNothingCritical := func(r *retry.R, mustContain string) {
1573		t.Helper()
1574		eval := func(contentsStr string) error {
1575			if strings.Contains(contentsStr, "critical") {
1576				return fmt.Errorf("MUST NOT contain critical:= %s", contentsStr)
1577			}
1578			if !strings.Contains(contentsStr, mustContain) {
1579				return fmt.Errorf("MUST contain '%s' := %s", mustContain, contentsStr)
1580			}
1581			return nil
1582		}
1583		checkStr(r, eval)
1584	}
1585
1586	retriesWithDelay := func() *retry.Counter {
1587		return &retry.Counter{Count: 10, Wait: 1 * time.Second}
1588	}
1589
1590	retry.RunWith(retriesWithDelay(), t, func(r *retry.R) {
1591		testutil.Logger(t).Info("Consul is now ready")
1592		// it should contain the output
1593		checkStr(r, func(contentStr string) error {
1594			if contentStr == "[]" {
1595				return fmt.Errorf("Consul is still starting up")
1596			}
1597			return nil
1598		})
1599	})
1600
1601	retry.RunWith(retriesWithDelay(), t, func(r *retry.R) {
1602		ensureNothingCritical(r, "testing-agent-reload-001")
1603	})
1604
1605	// Let's take almost the same config
1606	cfg2 := TestConfig(testutil.Logger(t), config.FileSource{
1607		Name:   "reload",
1608		Format: "hcl",
1609		Data: `
1610			data_dir = "` + a.Config.DataDir + `"
1611			node_id = "` + string(a.Config.NodeID) + `"
1612			node_name = "` + a.Config.NodeName + `"
1613
1614			services = [
1615				{
1616					name = "redis"
1617					checks = [
1618						{
1619							id  = "red-is-dead"
1620							ttl = "30s"
1621							notes = "initial check"
1622						}
1623					]
1624				}
1625			]
1626			watches = [
1627				{
1628					datacenter = "` + dc1 + `"
1629					type = "service"
1630					service = "redis"
1631					args = ["bash", "-c", "` + handlerShell + `"]
1632				}
1633			]
1634		`,
1635	})
1636
1637	justOnce := func() *retry.Counter {
1638		return &retry.Counter{Count: 1, Wait: 25 * time.Millisecond}
1639	}
1640
1641	retry.RunWith(justOnce(), t, func(r *retry.R) {
1642		// We check that reload does not go to critical
1643		ensureNothingCritical(r, "red-is-dead")
1644
1645		if err := a.reloadConfigInternal(cfg2); err != nil {
1646			t.Fatalf("got error %v want nil", err)
1647		}
1648
1649		// We check that reload does not go to critical
1650		ensureNothingCritical(r, "red-is-dead")
1651		ensureNothingCritical(r, "testing-agent-reload-001")
1652
1653		require.NoError(t, a.updateTTLCheck(checkID, api.HealthPassing, "testing-agent-reload-002"))
1654
1655		ensureNothingCritical(r, "red-is-dead")
1656	})
1657}
1658
1659func TestAgent_Reload_ACLDeny(t *testing.T) {
1660	if testing.Short() {
1661		t.Skip("too slow for testing.Short")
1662	}
1663
1664	t.Parallel()
1665	a := NewTestAgent(t, TestACLConfig())
1666	defer a.Shutdown()
1667
1668	testrpc.WaitForLeader(t, a.RPC, "dc1")
1669	t.Run("no token", func(t *testing.T) {
1670		req, _ := http.NewRequest("PUT", "/v1/agent/reload", nil)
1671		if _, err := a.srv.AgentReload(nil, req); !acl.IsErrPermissionDenied(err) {
1672			t.Fatalf("err: %v", err)
1673		}
1674	})
1675
1676	t.Run("read-only token", func(t *testing.T) {
1677		ro := makeReadOnlyAgentACL(t, a.srv)
1678		req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/reload?token=%s", ro), nil)
1679		if _, err := a.srv.AgentReload(nil, req); !acl.IsErrPermissionDenied(err) {
1680			t.Fatalf("err: %v", err)
1681		}
1682	})
1683
1684	// This proves we call the ACL function, and we've got the other reload
1685	// test to prove we do the reload, which should be sufficient.
1686	// The reload logic is a little complex to set up so isn't worth
1687	// repeating again here.
1688}
1689
1690func TestAgent_Members(t *testing.T) {
1691	if testing.Short() {
1692		t.Skip("too slow for testing.Short")
1693	}
1694
1695	t.Parallel()
1696	a := NewTestAgent(t, "")
1697	defer a.Shutdown()
1698
1699	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
1700	req, _ := http.NewRequest("GET", "/v1/agent/members", nil)
1701	obj, err := a.srv.AgentMembers(nil, req)
1702	if err != nil {
1703		t.Fatalf("Err: %v", err)
1704	}
1705	val := obj.([]serf.Member)
1706	if len(val) == 0 {
1707		t.Fatalf("bad members: %v", obj)
1708	}
1709
1710	if int(val[0].Port) != a.Config.SerfPortLAN {
1711		t.Fatalf("not lan: %v", obj)
1712	}
1713}
1714
1715func TestAgent_Members_WAN(t *testing.T) {
1716	if testing.Short() {
1717		t.Skip("too slow for testing.Short")
1718	}
1719
1720	t.Parallel()
1721	a := NewTestAgent(t, "")
1722	defer a.Shutdown()
1723
1724	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
1725	req, _ := http.NewRequest("GET", "/v1/agent/members?wan=true", nil)
1726	obj, err := a.srv.AgentMembers(nil, req)
1727	if err != nil {
1728		t.Fatalf("Err: %v", err)
1729	}
1730	val := obj.([]serf.Member)
1731	if len(val) == 0 {
1732		t.Fatalf("bad members: %v", obj)
1733	}
1734
1735	if int(val[0].Port) != a.Config.SerfPortWAN {
1736		t.Fatalf("not wan: %v", obj)
1737	}
1738}
1739
1740func TestAgent_Members_ACLFilter(t *testing.T) {
1741	if testing.Short() {
1742		t.Skip("too slow for testing.Short")
1743	}
1744
1745	t.Parallel()
1746	a := NewTestAgent(t, TestACLConfig())
1747	defer a.Shutdown()
1748
1749	testrpc.WaitForLeader(t, a.RPC, "dc1")
1750	t.Run("no token", func(t *testing.T) {
1751		req, _ := http.NewRequest("GET", "/v1/agent/members", nil)
1752		obj, err := a.srv.AgentMembers(nil, req)
1753		if err != nil {
1754			t.Fatalf("Err: %v", err)
1755		}
1756		val := obj.([]serf.Member)
1757		if len(val) != 0 {
1758			t.Fatalf("bad members: %v", obj)
1759		}
1760	})
1761
1762	t.Run("root token", func(t *testing.T) {
1763		req, _ := http.NewRequest("GET", "/v1/agent/members?token=root", nil)
1764		obj, err := a.srv.AgentMembers(nil, req)
1765		if err != nil {
1766			t.Fatalf("Err: %v", err)
1767		}
1768		val := obj.([]serf.Member)
1769		if len(val) != 1 {
1770			t.Fatalf("bad members: %v", obj)
1771		}
1772	})
1773}
1774
1775func TestAgent_Join(t *testing.T) {
1776	if testing.Short() {
1777		t.Skip("too slow for testing.Short")
1778	}
1779
1780	t.Parallel()
1781	a1 := NewTestAgent(t, "")
1782	defer a1.Shutdown()
1783	a2 := NewTestAgent(t, "")
1784	defer a2.Shutdown()
1785	testrpc.WaitForLeader(t, a1.RPC, "dc1")
1786	testrpc.WaitForLeader(t, a2.RPC, "dc1")
1787
1788	addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortLAN)
1789	req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s", addr), nil)
1790	obj, err := a1.srv.AgentJoin(nil, req)
1791	if err != nil {
1792		t.Fatalf("Err: %v", err)
1793	}
1794	if obj != nil {
1795		t.Fatalf("Err: %v", obj)
1796	}
1797
1798	if len(a1.LANMembers()) != 2 {
1799		t.Fatalf("should have 2 members")
1800	}
1801
1802	retry.Run(t, func(r *retry.R) {
1803		if got, want := len(a2.LANMembers()), 2; got != want {
1804			r.Fatalf("got %d LAN members want %d", got, want)
1805		}
1806	})
1807}
1808
1809func TestAgent_Join_WAN(t *testing.T) {
1810	if testing.Short() {
1811		t.Skip("too slow for testing.Short")
1812	}
1813
1814	t.Parallel()
1815	a1 := NewTestAgent(t, "")
1816	defer a1.Shutdown()
1817	a2 := NewTestAgent(t, "")
1818	defer a2.Shutdown()
1819	testrpc.WaitForLeader(t, a1.RPC, "dc1")
1820	testrpc.WaitForLeader(t, a2.RPC, "dc1")
1821
1822	addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortWAN)
1823	req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s?wan=true", addr), nil)
1824	obj, err := a1.srv.AgentJoin(nil, req)
1825	if err != nil {
1826		t.Fatalf("Err: %v", err)
1827	}
1828	if obj != nil {
1829		t.Fatalf("Err: %v", obj)
1830	}
1831
1832	if len(a1.WANMembers()) != 2 {
1833		t.Fatalf("should have 2 members")
1834	}
1835
1836	retry.Run(t, func(r *retry.R) {
1837		if got, want := len(a2.WANMembers()), 2; got != want {
1838			r.Fatalf("got %d WAN members want %d", got, want)
1839		}
1840	})
1841}
1842
1843func TestAgent_Join_ACLDeny(t *testing.T) {
1844	if testing.Short() {
1845		t.Skip("too slow for testing.Short")
1846	}
1847
1848	t.Parallel()
1849	a1 := NewTestAgent(t, TestACLConfig())
1850	defer a1.Shutdown()
1851	a2 := NewTestAgent(t, "")
1852	defer a2.Shutdown()
1853	testrpc.WaitForLeader(t, a1.RPC, "dc1")
1854	testrpc.WaitForLeader(t, a2.RPC, "dc1")
1855
1856	addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortLAN)
1857
1858	t.Run("no token", func(t *testing.T) {
1859		req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s", addr), nil)
1860		if _, err := a1.srv.AgentJoin(nil, req); !acl.IsErrPermissionDenied(err) {
1861			t.Fatalf("err: %v", err)
1862		}
1863	})
1864
1865	t.Run("agent master token", func(t *testing.T) {
1866		req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s?token=towel", addr), nil)
1867		_, err := a1.srv.AgentJoin(nil, req)
1868		if err != nil {
1869			t.Fatalf("err: %v", err)
1870		}
1871	})
1872
1873	t.Run("read-only token", func(t *testing.T) {
1874		ro := makeReadOnlyAgentACL(t, a1.srv)
1875		req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s?token=%s", addr, ro), nil)
1876		if _, err := a1.srv.AgentJoin(nil, req); !acl.IsErrPermissionDenied(err) {
1877			t.Fatalf("err: %v", err)
1878		}
1879	})
1880}
1881
1882type mockNotifier struct{ s string }
1883
1884func (n *mockNotifier) Notify(state string) error {
1885	n.s = state
1886	return nil
1887}
1888
1889func TestAgent_JoinLANNotify(t *testing.T) {
1890	if testing.Short() {
1891		t.Skip("too slow for testing.Short")
1892	}
1893
1894	t.Parallel()
1895	a1 := NewTestAgent(t, "")
1896	defer a1.Shutdown()
1897	testrpc.WaitForLeader(t, a1.RPC, "dc1")
1898
1899	a2 := NewTestAgent(t, `
1900		server = false
1901		bootstrap = false
1902	`)
1903	defer a2.Shutdown()
1904
1905	notif := &mockNotifier{}
1906	a1.joinLANNotifier = notif
1907
1908	addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortLAN)
1909	_, err := a1.JoinLAN([]string{addr})
1910	if err != nil {
1911		t.Fatalf("err: %v", err)
1912	}
1913
1914	if got, want := notif.s, "READY=1"; got != want {
1915		t.Fatalf("got joinLAN notification %q want %q", got, want)
1916	}
1917}
1918
1919func TestAgent_Leave(t *testing.T) {
1920	if testing.Short() {
1921		t.Skip("too slow for testing.Short")
1922	}
1923
1924	t.Parallel()
1925	a1 := NewTestAgent(t, "")
1926	defer a1.Shutdown()
1927	testrpc.WaitForLeader(t, a1.RPC, "dc1")
1928
1929	a2 := NewTestAgent(t, `
1930 		server = false
1931 		bootstrap = false
1932 	`)
1933	defer a2.Shutdown()
1934
1935	// Join first
1936	addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortLAN)
1937	_, err := a1.JoinLAN([]string{addr})
1938	if err != nil {
1939		t.Fatalf("err: %v", err)
1940	}
1941
1942	// Graceful leave now
1943	req, _ := http.NewRequest("PUT", "/v1/agent/leave", nil)
1944	obj, err := a2.srv.AgentLeave(nil, req)
1945	if err != nil {
1946		t.Fatalf("Err: %v", err)
1947	}
1948	if obj != nil {
1949		t.Fatalf("Err: %v", obj)
1950	}
1951	retry.Run(t, func(r *retry.R) {
1952		m := a1.LANMembers()
1953		if got, want := m[1].Status, serf.StatusLeft; got != want {
1954			r.Fatalf("got status %q want %q", got, want)
1955		}
1956	})
1957}
1958
1959func TestAgent_Leave_ACLDeny(t *testing.T) {
1960	if testing.Short() {
1961		t.Skip("too slow for testing.Short")
1962	}
1963
1964	t.Parallel()
1965	a := NewTestAgent(t, TestACLConfig())
1966	defer a.Shutdown()
1967	testrpc.WaitForLeader(t, a.RPC, "dc1")
1968
1969	t.Run("no token", func(t *testing.T) {
1970		req, _ := http.NewRequest("PUT", "/v1/agent/leave", nil)
1971		if _, err := a.srv.AgentLeave(nil, req); !acl.IsErrPermissionDenied(err) {
1972			t.Fatalf("err: %v", err)
1973		}
1974	})
1975
1976	t.Run("read-only token", func(t *testing.T) {
1977		ro := makeReadOnlyAgentACL(t, a.srv)
1978		req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/leave?token=%s", ro), nil)
1979		if _, err := a.srv.AgentLeave(nil, req); !acl.IsErrPermissionDenied(err) {
1980			t.Fatalf("err: %v", err)
1981		}
1982	})
1983
1984	// this sub-test will change the state so that there is no leader.
1985	// it must therefore be the last one in this list.
1986	t.Run("agent master token", func(t *testing.T) {
1987		req, _ := http.NewRequest("PUT", "/v1/agent/leave?token=towel", nil)
1988		if _, err := a.srv.AgentLeave(nil, req); err != nil {
1989			t.Fatalf("err: %v", err)
1990		}
1991	})
1992}
1993
1994func TestAgent_ForceLeave(t *testing.T) {
1995	if testing.Short() {
1996		t.Skip("too slow for testing.Short")
1997	}
1998
1999	t.Parallel()
2000	a1 := NewTestAgent(t, "")
2001	defer a1.Shutdown()
2002	a2 := NewTestAgent(t, "")
2003	testrpc.WaitForLeader(t, a1.RPC, "dc1")
2004	testrpc.WaitForLeader(t, a2.RPC, "dc1")
2005
2006	// Join first
2007	addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortLAN)
2008	_, err := a1.JoinLAN([]string{addr})
2009	if err != nil {
2010		t.Fatalf("err: %v", err)
2011	}
2012
2013	// this test probably needs work
2014	a2.Shutdown()
2015	// Wait for agent being marked as failed, so we wait for full shutdown of Agent
2016	retry.Run(t, func(r *retry.R) {
2017		m := a1.LANMembers()
2018		if got, want := m[1].Status, serf.StatusFailed; got != want {
2019			r.Fatalf("got status %q want %q", got, want)
2020		}
2021	})
2022
2023	// Force leave now
2024	req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/force-leave/%s", a2.Config.NodeName), nil)
2025	obj, err := a1.srv.AgentForceLeave(nil, req)
2026	if err != nil {
2027		t.Fatalf("Err: %v", err)
2028	}
2029	if obj != nil {
2030		t.Fatalf("Err: %v", obj)
2031	}
2032	retry.Run(t, func(r *retry.R) {
2033		m := a1.LANMembers()
2034		if got, want := m[1].Status, serf.StatusLeft; got != want {
2035			r.Fatalf("got status %q want %q", got, want)
2036		}
2037	})
2038
2039}
2040
2041func TestOpenMetricsMimeTypeHeaders(t *testing.T) {
2042	t.Parallel()
2043	assert.False(t, acceptsOpenMetricsMimeType(""))
2044	assert.False(t, acceptsOpenMetricsMimeType(";;;"))
2045	assert.False(t, acceptsOpenMetricsMimeType(",,,"))
2046	assert.False(t, acceptsOpenMetricsMimeType("text/plain"))
2047	assert.True(t, acceptsOpenMetricsMimeType("text/plain;version=0.4.0,"))
2048	assert.True(t, acceptsOpenMetricsMimeType("text/plain;version=0.4.0;q=1,*/*;q=0.1"))
2049	assert.True(t, acceptsOpenMetricsMimeType("text/plain   ;   version=0.4.0"))
2050	assert.True(t, acceptsOpenMetricsMimeType("*/*, application/openmetrics-text ;"))
2051	assert.True(t, acceptsOpenMetricsMimeType("*/*, application/openmetrics-text ;q=1"))
2052	assert.True(t, acceptsOpenMetricsMimeType("application/openmetrics-text, text/plain;version=0.4.0"))
2053	assert.True(t, acceptsOpenMetricsMimeType("application/openmetrics-text; version=0.0.1,text/plain;version=0.0.4;q=0.5,*/*;q=0.1"))
2054}
2055
2056func TestAgent_ForceLeave_ACLDeny(t *testing.T) {
2057	if testing.Short() {
2058		t.Skip("too slow for testing.Short")
2059	}
2060
2061	t.Parallel()
2062	a := NewTestAgent(t, TestACLConfig())
2063	defer a.Shutdown()
2064	testrpc.WaitForLeader(t, a.RPC, "dc1")
2065
2066	uri := fmt.Sprintf("/v1/agent/force-leave/%s", a.Config.NodeName)
2067
2068	t.Run("no token", func(t *testing.T) {
2069		req, _ := http.NewRequest("PUT", uri, nil)
2070		if _, err := a.srv.AgentForceLeave(nil, req); !acl.IsErrPermissionDenied(err) {
2071			t.Fatalf("err: %v", err)
2072		}
2073	})
2074
2075	t.Run("agent master token", func(t *testing.T) {
2076		req, _ := http.NewRequest("PUT", uri+"?token=towel", nil)
2077		if _, err := a.srv.AgentForceLeave(nil, req); !acl.IsErrPermissionDenied(err) {
2078			t.Fatalf("err: %v", err)
2079		}
2080	})
2081
2082	t.Run("read-only token", func(t *testing.T) {
2083		ro := makeReadOnlyAgentACL(t, a.srv)
2084		req, _ := http.NewRequest("PUT", fmt.Sprintf(uri+"?token=%s", ro), nil)
2085		if _, err := a.srv.AgentForceLeave(nil, req); !acl.IsErrPermissionDenied(err) {
2086			t.Fatalf("err: %v", err)
2087		}
2088	})
2089
2090	t.Run("operator write token", func(t *testing.T) {
2091		// Create an ACL with operator read permissions.
2092		var rules = `
2093                    operator = "write"
2094                `
2095		opToken := testCreateToken(t, a, rules)
2096
2097		req, _ := http.NewRequest("PUT", fmt.Sprintf(uri+"?token=%s", opToken), nil)
2098		if _, err := a.srv.AgentForceLeave(nil, req); err != nil {
2099			t.Fatalf("err: %v", err)
2100		}
2101	})
2102}
2103
2104func TestAgent_ForceLeavePrune(t *testing.T) {
2105	if testing.Short() {
2106		t.Skip("too slow for testing.Short")
2107	}
2108
2109	t.Parallel()
2110	a1 := StartTestAgent(t, TestAgent{Name: "Agent1"})
2111	defer a1.Shutdown()
2112	a2 := StartTestAgent(t, TestAgent{Name: "Agent2"})
2113	testrpc.WaitForLeader(t, a1.RPC, "dc1")
2114	testrpc.WaitForLeader(t, a2.RPC, "dc1")
2115
2116	// Join first
2117	addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortLAN)
2118	_, err := a1.JoinLAN([]string{addr})
2119	if err != nil {
2120		t.Fatalf("err: %v", err)
2121	}
2122
2123	// this test probably needs work
2124	a2.Shutdown()
2125	// Wait for agent being marked as failed, so we wait for full shutdown of Agent
2126	retry.Run(t, func(r *retry.R) {
2127		m := a1.LANMembers()
2128		for _, member := range m {
2129			if member.Name == a2.Config.NodeName {
2130				if member.Status != serf.StatusFailed {
2131					r.Fatalf("got status %q want %q", member.Status, serf.StatusFailed)
2132				}
2133
2134			}
2135		}
2136	})
2137
2138	// Force leave now
2139	req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/force-leave/%s?prune=true", a2.Config.NodeName), nil)
2140	obj, err := a1.srv.AgentForceLeave(nil, req)
2141	if err != nil {
2142		t.Fatalf("Err: %v", err)
2143	}
2144	if obj != nil {
2145		t.Fatalf("Err: %v", obj)
2146	}
2147	retry.Run(t, func(r *retry.R) {
2148		m := len(a1.LANMembers())
2149		if m != 1 {
2150			r.Fatalf("want one member, got %v", m)
2151		}
2152	})
2153
2154}
2155
2156func TestAgent_RegisterCheck(t *testing.T) {
2157	if testing.Short() {
2158		t.Skip("too slow for testing.Short")
2159	}
2160
2161	t.Parallel()
2162	a := NewTestAgent(t, "")
2163	defer a.Shutdown()
2164	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
2165
2166	args := &structs.CheckDefinition{
2167		Name: "test",
2168		TTL:  15 * time.Second,
2169	}
2170	req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token=abc123", jsonReader(args))
2171	obj, err := a.srv.AgentRegisterCheck(nil, req)
2172	if err != nil {
2173		t.Fatalf("err: %v", err)
2174	}
2175	if obj != nil {
2176		t.Fatalf("bad: %v", obj)
2177	}
2178
2179	// Ensure we have a check mapping
2180	checkID := structs.NewCheckID("test", nil)
2181	if existing := a.State.Check(checkID); existing == nil {
2182		t.Fatalf("missing test check")
2183	}
2184
2185	if _, ok := a.checkTTLs[checkID]; !ok {
2186		t.Fatalf("missing test check ttl")
2187	}
2188
2189	// Ensure the token was configured
2190	if token := a.State.CheckToken(checkID); token == "" {
2191		t.Fatalf("missing token")
2192	}
2193
2194	// By default, checks start in critical state.
2195	state := a.State.Check(checkID)
2196	if state.Status != api.HealthCritical {
2197		t.Fatalf("bad: %v", state)
2198	}
2199}
2200
2201// This verifies all the forms of the new args-style check that we need to
2202// support as a result of https://github.com/hashicorp/consul/issues/3587.
2203func TestAgent_RegisterCheck_Scripts(t *testing.T) {
2204	if testing.Short() {
2205		t.Skip("too slow for testing.Short")
2206	}
2207
2208	t.Parallel()
2209	a := NewTestAgent(t, `
2210		enable_script_checks = true
2211`)
2212	defer a.Shutdown()
2213	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
2214
2215	tests := []struct {
2216		name  string
2217		check map[string]interface{}
2218	}{
2219		{
2220			"== Consul 1.0.0",
2221			map[string]interface{}{
2222				"Name":       "test",
2223				"Interval":   "2s",
2224				"ScriptArgs": []string{"true"},
2225			},
2226		},
2227		{
2228			"> Consul 1.0.0 (fixup)",
2229			map[string]interface{}{
2230				"Name":        "test",
2231				"Interval":    "2s",
2232				"script_args": []string{"true"},
2233			},
2234		},
2235		{
2236			"> Consul 1.0.0",
2237			map[string]interface{}{
2238				"Name":     "test",
2239				"Interval": "2s",
2240				"Args":     []string{"true"},
2241			},
2242		},
2243	}
2244	for _, tt := range tests {
2245		t.Run(tt.name+" as node check", func(t *testing.T) {
2246			req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(tt.check))
2247			resp := httptest.NewRecorder()
2248			if _, err := a.srv.AgentRegisterCheck(resp, req); err != nil {
2249				t.Fatalf("err: %v", err)
2250			}
2251			if resp.Code != http.StatusOK {
2252				t.Fatalf("bad: %d", resp.Code)
2253			}
2254		})
2255
2256		t.Run(tt.name+" as top-level service check", func(t *testing.T) {
2257			args := map[string]interface{}{
2258				"Name":  "a",
2259				"Port":  1234,
2260				"Check": tt.check,
2261			}
2262
2263			req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
2264			resp := httptest.NewRecorder()
2265			if _, err := a.srv.AgentRegisterService(resp, req); err != nil {
2266				t.Fatalf("err: %v", err)
2267			}
2268			if resp.Code != http.StatusOK {
2269				t.Fatalf("bad: %d", resp.Code)
2270			}
2271		})
2272
2273		t.Run(tt.name+" as slice-based service check", func(t *testing.T) {
2274			args := map[string]interface{}{
2275				"Name":   "a",
2276				"Port":   1234,
2277				"Checks": []map[string]interface{}{tt.check},
2278			}
2279
2280			req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
2281			resp := httptest.NewRecorder()
2282			if _, err := a.srv.AgentRegisterService(resp, req); err != nil {
2283				t.Fatalf("err: %v", err)
2284			}
2285			if resp.Code != http.StatusOK {
2286				t.Fatalf("bad: %d", resp.Code)
2287			}
2288		})
2289	}
2290}
2291
2292func TestAgent_RegisterCheckScriptsExecDisable(t *testing.T) {
2293	if testing.Short() {
2294		t.Skip("too slow for testing.Short")
2295	}
2296
2297	t.Parallel()
2298	a := NewTestAgent(t, "")
2299	defer a.Shutdown()
2300	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
2301
2302	args := &structs.CheckDefinition{
2303		Name:       "test",
2304		ScriptArgs: []string{"true"},
2305		Interval:   time.Second,
2306	}
2307	req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token=abc123", jsonReader(args))
2308	res := httptest.NewRecorder()
2309	_, err := a.srv.AgentRegisterCheck(res, req)
2310	if err == nil {
2311		t.Fatalf("expected error but got nil")
2312	}
2313	if !strings.Contains(err.Error(), "Scripts are disabled on this agent") {
2314		t.Fatalf("expected script disabled error, got: %s", err)
2315	}
2316	checkID := structs.NewCheckID("test", nil)
2317	require.Nil(t, a.State.Check(checkID), "check registered with exec disabled")
2318}
2319
2320func TestAgent_RegisterCheckScriptsExecRemoteDisable(t *testing.T) {
2321	if testing.Short() {
2322		t.Skip("too slow for testing.Short")
2323	}
2324
2325	t.Parallel()
2326	a := NewTestAgent(t, `
2327		enable_local_script_checks = true
2328	`)
2329	defer a.Shutdown()
2330	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
2331
2332	args := &structs.CheckDefinition{
2333		Name:       "test",
2334		ScriptArgs: []string{"true"},
2335		Interval:   time.Second,
2336	}
2337	req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token=abc123", jsonReader(args))
2338	res := httptest.NewRecorder()
2339	_, err := a.srv.AgentRegisterCheck(res, req)
2340	if err == nil {
2341		t.Fatalf("expected error but got nil")
2342	}
2343	if !strings.Contains(err.Error(), "Scripts are disabled on this agent") {
2344		t.Fatalf("expected script disabled error, got: %s", err)
2345	}
2346	checkID := structs.NewCheckID("test", nil)
2347	require.Nil(t, a.State.Check(checkID), "check registered with exec disabled")
2348}
2349
2350func TestAgent_RegisterCheck_Passing(t *testing.T) {
2351	if testing.Short() {
2352		t.Skip("too slow for testing.Short")
2353	}
2354
2355	t.Parallel()
2356	a := NewTestAgent(t, "")
2357	defer a.Shutdown()
2358	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
2359
2360	args := &structs.CheckDefinition{
2361		Name:   "test",
2362		TTL:    15 * time.Second,
2363		Status: api.HealthPassing,
2364	}
2365	req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(args))
2366	obj, err := a.srv.AgentRegisterCheck(nil, req)
2367	if err != nil {
2368		t.Fatalf("err: %v", err)
2369	}
2370	if obj != nil {
2371		t.Fatalf("bad: %v", obj)
2372	}
2373
2374	// Ensure we have a check mapping
2375	checkID := structs.NewCheckID("test", nil)
2376	if existing := a.State.Check(checkID); existing == nil {
2377		t.Fatalf("missing test check")
2378	}
2379
2380	if _, ok := a.checkTTLs[checkID]; !ok {
2381		t.Fatalf("missing test check ttl")
2382	}
2383
2384	state := a.State.Check(checkID)
2385	if state.Status != api.HealthPassing {
2386		t.Fatalf("bad: %v", state)
2387	}
2388}
2389
2390func TestAgent_RegisterCheck_BadStatus(t *testing.T) {
2391	if testing.Short() {
2392		t.Skip("too slow for testing.Short")
2393	}
2394
2395	t.Parallel()
2396	a := NewTestAgent(t, "")
2397	defer a.Shutdown()
2398	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
2399
2400	args := &structs.CheckDefinition{
2401		Name:   "test",
2402		TTL:    15 * time.Second,
2403		Status: "fluffy",
2404	}
2405	req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(args))
2406	resp := httptest.NewRecorder()
2407	if _, err := a.srv.AgentRegisterCheck(resp, req); err != nil {
2408		t.Fatalf("err: %v", err)
2409	}
2410	if resp.Code != 400 {
2411		t.Fatalf("accepted bad status")
2412	}
2413}
2414
2415func TestAgent_RegisterCheck_ACLDeny(t *testing.T) {
2416	if testing.Short() {
2417		t.Skip("too slow for testing.Short")
2418	}
2419
2420	t.Parallel()
2421	a := NewTestAgent(t, TestACLConfigNew())
2422	defer a.Shutdown()
2423	testrpc.WaitForLeader(t, a.RPC, "dc1")
2424
2425	nodeCheck := &structs.CheckDefinition{
2426		Name: "test",
2427		TTL:  15 * time.Second,
2428	}
2429
2430	svc := &structs.ServiceDefinition{
2431		ID:   "foo:1234",
2432		Name: "foo",
2433		Port: 1234,
2434	}
2435
2436	svcCheck := &structs.CheckDefinition{
2437		Name:      "test2",
2438		ServiceID: "foo:1234",
2439		TTL:       15 * time.Second,
2440	}
2441
2442	// ensure the service is ready for registering a check for it.
2443	req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(svc))
2444	resp := httptest.NewRecorder()
2445	_, err := a.srv.AgentRegisterService(resp, req)
2446	require.NoError(t, err)
2447
2448	// create a policy that has write on service foo
2449	policyReq := &structs.ACLPolicy{
2450		Name:  "write-foo",
2451		Rules: `service "foo" { policy = "write"}`,
2452	}
2453
2454	req, _ = http.NewRequest("PUT", "/v1/acl/policy?token=root", jsonReader(policyReq))
2455	resp = httptest.NewRecorder()
2456	_, err = a.srv.ACLPolicyCreate(resp, req)
2457	require.NoError(t, err)
2458
2459	// create a policy that has write on the node name of the agent
2460	policyReq = &structs.ACLPolicy{
2461		Name:  "write-node",
2462		Rules: fmt.Sprintf(`node "%s" { policy = "write" }`, a.config.NodeName),
2463	}
2464
2465	req, _ = http.NewRequest("PUT", "/v1/acl/policy?token=root", jsonReader(policyReq))
2466	resp = httptest.NewRecorder()
2467	_, err = a.srv.ACLPolicyCreate(resp, req)
2468	require.NoError(t, err)
2469
2470	// create a token using the write-foo policy
2471	tokenReq := &structs.ACLToken{
2472		Description: "write-foo",
2473		Policies: []structs.ACLTokenPolicyLink{
2474			{
2475				Name: "write-foo",
2476			},
2477		},
2478	}
2479
2480	req, _ = http.NewRequest("PUT", "/v1/acl/token?token=root", jsonReader(tokenReq))
2481	resp = httptest.NewRecorder()
2482	tokInf, err := a.srv.ACLTokenCreate(resp, req)
2483	require.NoError(t, err)
2484	svcToken, ok := tokInf.(*structs.ACLToken)
2485	require.True(t, ok)
2486	require.NotNil(t, svcToken)
2487
2488	// create a token using the write-node policy
2489	tokenReq = &structs.ACLToken{
2490		Description: "write-node",
2491		Policies: []structs.ACLTokenPolicyLink{
2492			{
2493				Name: "write-node",
2494			},
2495		},
2496	}
2497
2498	req, _ = http.NewRequest("PUT", "/v1/acl/token?token=root", jsonReader(tokenReq))
2499	resp = httptest.NewRecorder()
2500	tokInf, err = a.srv.ACLTokenCreate(resp, req)
2501	require.NoError(t, err)
2502	nodeToken, ok := tokInf.(*structs.ACLToken)
2503	require.True(t, ok)
2504	require.NotNil(t, nodeToken)
2505
2506	t.Run("no token - node check", func(t *testing.T) {
2507		retry.Run(t, func(r *retry.R) {
2508			req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(nodeCheck))
2509			_, err := a.srv.AgentRegisterCheck(nil, req)
2510			require.True(r, acl.IsErrPermissionDenied(err))
2511		})
2512	})
2513
2514	t.Run("svc token - node check", func(t *testing.T) {
2515		retry.Run(t, func(r *retry.R) {
2516			req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token="+svcToken.SecretID, jsonReader(nodeCheck))
2517			_, err := a.srv.AgentRegisterCheck(nil, req)
2518			require.True(r, acl.IsErrPermissionDenied(err))
2519		})
2520	})
2521
2522	t.Run("node token - node check", func(t *testing.T) {
2523		retry.Run(t, func(r *retry.R) {
2524			req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token="+nodeToken.SecretID, jsonReader(nodeCheck))
2525			_, err := a.srv.AgentRegisterCheck(nil, req)
2526			require.NoError(r, err)
2527		})
2528	})
2529
2530	t.Run("no token - svc check", func(t *testing.T) {
2531		retry.Run(t, func(r *retry.R) {
2532			req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(svcCheck))
2533			_, err := a.srv.AgentRegisterCheck(nil, req)
2534			require.True(r, acl.IsErrPermissionDenied(err))
2535		})
2536	})
2537
2538	t.Run("node token - svc check", func(t *testing.T) {
2539		retry.Run(t, func(r *retry.R) {
2540			req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token="+nodeToken.SecretID, jsonReader(svcCheck))
2541			_, err := a.srv.AgentRegisterCheck(nil, req)
2542			require.True(r, acl.IsErrPermissionDenied(err))
2543		})
2544	})
2545
2546	t.Run("svc token - svc check", func(t *testing.T) {
2547		retry.Run(t, func(r *retry.R) {
2548			req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token="+svcToken.SecretID, jsonReader(svcCheck))
2549			_, err := a.srv.AgentRegisterCheck(nil, req)
2550			require.NoError(r, err)
2551		})
2552	})
2553
2554}
2555
2556func TestAgent_DeregisterCheck(t *testing.T) {
2557	if testing.Short() {
2558		t.Skip("too slow for testing.Short")
2559	}
2560
2561	t.Parallel()
2562	a := NewTestAgent(t, "")
2563	defer a.Shutdown()
2564	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
2565
2566	chk := &structs.HealthCheck{Name: "test", CheckID: "test"}
2567	if err := a.AddCheck(chk, nil, false, "", ConfigSourceLocal); err != nil {
2568		t.Fatalf("err: %v", err)
2569	}
2570
2571	req, _ := http.NewRequest("PUT", "/v1/agent/check/deregister/test", nil)
2572	obj, err := a.srv.AgentDeregisterCheck(nil, req)
2573	if err != nil {
2574		t.Fatalf("err: %v", err)
2575	}
2576	if obj != nil {
2577		t.Fatalf("bad: %v", obj)
2578	}
2579
2580	// Ensure we have a check mapping
2581	requireCheckMissing(t, a, "test")
2582}
2583
2584func TestAgent_DeregisterCheckACLDeny(t *testing.T) {
2585	if testing.Short() {
2586		t.Skip("too slow for testing.Short")
2587	}
2588
2589	t.Parallel()
2590	a := NewTestAgent(t, TestACLConfig())
2591	defer a.Shutdown()
2592	testrpc.WaitForTestAgent(t, a.RPC, "dc1", testrpc.WithToken("root"))
2593
2594	chk := &structs.HealthCheck{Name: "test", CheckID: "test"}
2595	if err := a.AddCheck(chk, nil, false, "", ConfigSourceLocal); err != nil {
2596		t.Fatalf("err: %v", err)
2597	}
2598
2599	t.Run("no token", func(t *testing.T) {
2600		req, _ := http.NewRequest("PUT", "/v1/agent/check/deregister/test", nil)
2601		if _, err := a.srv.AgentDeregisterCheck(nil, req); !acl.IsErrPermissionDenied(err) {
2602			t.Fatalf("err: %v", err)
2603		}
2604	})
2605
2606	t.Run("root token", func(t *testing.T) {
2607		req, _ := http.NewRequest("PUT", "/v1/agent/check/deregister/test?token=root", nil)
2608		if _, err := a.srv.AgentDeregisterCheck(nil, req); err != nil {
2609			t.Fatalf("err: %v", err)
2610		}
2611	})
2612}
2613
2614func TestAgent_PassCheck(t *testing.T) {
2615	if testing.Short() {
2616		t.Skip("too slow for testing.Short")
2617	}
2618
2619	t.Parallel()
2620	a := NewTestAgent(t, "")
2621	defer a.Shutdown()
2622	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
2623
2624	chk := &structs.HealthCheck{Name: "test", CheckID: "test"}
2625	chkType := &structs.CheckType{TTL: 15 * time.Second}
2626	if err := a.AddCheck(chk, chkType, false, "", ConfigSourceLocal); err != nil {
2627		t.Fatalf("err: %v", err)
2628	}
2629
2630	req, _ := http.NewRequest("PUT", "/v1/agent/check/pass/test", nil)
2631	obj, err := a.srv.AgentCheckPass(nil, req)
2632	if err != nil {
2633		t.Fatalf("err: %v", err)
2634	}
2635	if obj != nil {
2636		t.Fatalf("bad: %v", obj)
2637	}
2638
2639	// Ensure we have a check mapping
2640	state := a.State.Check(structs.NewCheckID("test", nil))
2641	if state.Status != api.HealthPassing {
2642		t.Fatalf("bad: %v", state)
2643	}
2644}
2645
2646func TestAgent_PassCheck_ACLDeny(t *testing.T) {
2647	if testing.Short() {
2648		t.Skip("too slow for testing.Short")
2649	}
2650
2651	t.Parallel()
2652	a := NewTestAgent(t, TestACLConfig())
2653	defer a.Shutdown()
2654	testrpc.WaitForLeader(t, a.RPC, "dc1")
2655
2656	chk := &structs.HealthCheck{Name: "test", CheckID: "test"}
2657	chkType := &structs.CheckType{TTL: 15 * time.Second}
2658	if err := a.AddCheck(chk, chkType, false, "", ConfigSourceLocal); err != nil {
2659		t.Fatalf("err: %v", err)
2660	}
2661
2662	t.Run("no token", func(t *testing.T) {
2663		req, _ := http.NewRequest("PUT", "/v1/agent/check/pass/test", nil)
2664		if _, err := a.srv.AgentCheckPass(nil, req); !acl.IsErrPermissionDenied(err) {
2665			t.Fatalf("err: %v", err)
2666		}
2667	})
2668
2669	t.Run("root token", func(t *testing.T) {
2670		req, _ := http.NewRequest("PUT", "/v1/agent/check/pass/test?token=root", nil)
2671		if _, err := a.srv.AgentCheckPass(nil, req); err != nil {
2672			t.Fatalf("err: %v", err)
2673		}
2674	})
2675}
2676
2677func TestAgent_WarnCheck(t *testing.T) {
2678	if testing.Short() {
2679		t.Skip("too slow for testing.Short")
2680	}
2681
2682	t.Parallel()
2683	a := NewTestAgent(t, "")
2684	defer a.Shutdown()
2685	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
2686
2687	chk := &structs.HealthCheck{Name: "test", CheckID: "test"}
2688	chkType := &structs.CheckType{TTL: 15 * time.Second}
2689	if err := a.AddCheck(chk, chkType, false, "", ConfigSourceLocal); err != nil {
2690		t.Fatalf("err: %v", err)
2691	}
2692
2693	req, _ := http.NewRequest("PUT", "/v1/agent/check/warn/test", nil)
2694	obj, err := a.srv.AgentCheckWarn(nil, req)
2695	if err != nil {
2696		t.Fatalf("err: %v", err)
2697	}
2698	if obj != nil {
2699		t.Fatalf("bad: %v", obj)
2700	}
2701
2702	// Ensure we have a check mapping
2703	state := a.State.Check(structs.NewCheckID("test", nil))
2704	if state.Status != api.HealthWarning {
2705		t.Fatalf("bad: %v", state)
2706	}
2707}
2708
2709func TestAgent_WarnCheck_ACLDeny(t *testing.T) {
2710	if testing.Short() {
2711		t.Skip("too slow for testing.Short")
2712	}
2713
2714	t.Parallel()
2715	a := NewTestAgent(t, TestACLConfig())
2716	defer a.Shutdown()
2717	testrpc.WaitForLeader(t, a.RPC, "dc1")
2718
2719	chk := &structs.HealthCheck{Name: "test", CheckID: "test"}
2720	chkType := &structs.CheckType{TTL: 15 * time.Second}
2721	if err := a.AddCheck(chk, chkType, false, "", ConfigSourceLocal); err != nil {
2722		t.Fatalf("err: %v", err)
2723	}
2724
2725	t.Run("no token", func(t *testing.T) {
2726		req, _ := http.NewRequest("PUT", "/v1/agent/check/warn/test", nil)
2727		if _, err := a.srv.AgentCheckWarn(nil, req); !acl.IsErrPermissionDenied(err) {
2728			t.Fatalf("err: %v", err)
2729		}
2730	})
2731
2732	t.Run("root token", func(t *testing.T) {
2733		req, _ := http.NewRequest("PUT", "/v1/agent/check/warn/test?token=root", nil)
2734		if _, err := a.srv.AgentCheckWarn(nil, req); err != nil {
2735			t.Fatalf("err: %v", err)
2736		}
2737	})
2738}
2739
2740func TestAgent_FailCheck(t *testing.T) {
2741	if testing.Short() {
2742		t.Skip("too slow for testing.Short")
2743	}
2744
2745	t.Parallel()
2746	a := NewTestAgent(t, "")
2747	defer a.Shutdown()
2748	testrpc.WaitForLeader(t, a.RPC, "dc1")
2749
2750	chk := &structs.HealthCheck{Name: "test", CheckID: "test"}
2751	chkType := &structs.CheckType{TTL: 15 * time.Second}
2752	if err := a.AddCheck(chk, chkType, false, "", ConfigSourceLocal); err != nil {
2753		t.Fatalf("err: %v", err)
2754	}
2755
2756	req, _ := http.NewRequest("PUT", "/v1/agent/check/fail/test", nil)
2757	obj, err := a.srv.AgentCheckFail(nil, req)
2758	if err != nil {
2759		t.Fatalf("err: %v", err)
2760	}
2761	if obj != nil {
2762		t.Fatalf("bad: %v", obj)
2763	}
2764
2765	// Ensure we have a check mapping
2766	state := a.State.Check(structs.NewCheckID("test", nil))
2767	if state.Status != api.HealthCritical {
2768		t.Fatalf("bad: %v", state)
2769	}
2770}
2771
2772func TestAgent_FailCheck_ACLDeny(t *testing.T) {
2773	if testing.Short() {
2774		t.Skip("too slow for testing.Short")
2775	}
2776
2777	t.Parallel()
2778	a := NewTestAgent(t, TestACLConfig())
2779	defer a.Shutdown()
2780	testrpc.WaitForLeader(t, a.RPC, "dc1")
2781
2782	chk := &structs.HealthCheck{Name: "test", CheckID: "test"}
2783	chkType := &structs.CheckType{TTL: 15 * time.Second}
2784	if err := a.AddCheck(chk, chkType, false, "", ConfigSourceLocal); err != nil {
2785		t.Fatalf("err: %v", err)
2786	}
2787
2788	t.Run("no token", func(t *testing.T) {
2789		req, _ := http.NewRequest("PUT", "/v1/agent/check/fail/test", nil)
2790		if _, err := a.srv.AgentCheckFail(nil, req); !acl.IsErrPermissionDenied(err) {
2791			t.Fatalf("err: %v", err)
2792		}
2793	})
2794
2795	t.Run("root token", func(t *testing.T) {
2796		req, _ := http.NewRequest("PUT", "/v1/agent/check/fail/test?token=root", nil)
2797		if _, err := a.srv.AgentCheckFail(nil, req); err != nil {
2798			t.Fatalf("err: %v", err)
2799		}
2800	})
2801}
2802
2803func TestAgent_UpdateCheck(t *testing.T) {
2804	if testing.Short() {
2805		t.Skip("too slow for testing.Short")
2806	}
2807
2808	t.Parallel()
2809	maxChecksSize := 256
2810	a := NewTestAgent(t, fmt.Sprintf("check_output_max_size=%d", maxChecksSize))
2811	defer a.Shutdown()
2812	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
2813
2814	chk := &structs.HealthCheck{Name: "test", CheckID: "test"}
2815	chkType := &structs.CheckType{TTL: 15 * time.Second}
2816	if err := a.AddCheck(chk, chkType, false, "", ConfigSourceLocal); err != nil {
2817		t.Fatalf("err: %v", err)
2818	}
2819
2820	cases := []checkUpdate{
2821		{api.HealthPassing, "hello-passing"},
2822		{api.HealthCritical, "hello-critical"},
2823		{api.HealthWarning, "hello-warning"},
2824	}
2825
2826	for _, c := range cases {
2827		t.Run(c.Status, func(t *testing.T) {
2828			req, _ := http.NewRequest("PUT", "/v1/agent/check/update/test", jsonReader(c))
2829			resp := httptest.NewRecorder()
2830			obj, err := a.srv.AgentCheckUpdate(resp, req)
2831			if err != nil {
2832				t.Fatalf("err: %v", err)
2833			}
2834			if obj != nil {
2835				t.Fatalf("bad: %v", obj)
2836			}
2837			if resp.Code != 200 {
2838				t.Fatalf("expected 200, got %d", resp.Code)
2839			}
2840
2841			state := a.State.Check(structs.NewCheckID("test", nil))
2842			if state.Status != c.Status || state.Output != c.Output {
2843				t.Fatalf("bad: %v", state)
2844			}
2845		})
2846	}
2847
2848	t.Run("log output limit", func(t *testing.T) {
2849		args := checkUpdate{
2850			Status: api.HealthPassing,
2851			Output: strings.Repeat("-= bad -=", 5*maxChecksSize),
2852		}
2853		req, _ := http.NewRequest("PUT", "/v1/agent/check/update/test", jsonReader(args))
2854		resp := httptest.NewRecorder()
2855		obj, err := a.srv.AgentCheckUpdate(resp, req)
2856		if err != nil {
2857			t.Fatalf("err: %v", err)
2858		}
2859		if obj != nil {
2860			t.Fatalf("bad: %v", obj)
2861		}
2862		if resp.Code != 200 {
2863			t.Fatalf("expected 200, got %d", resp.Code)
2864		}
2865
2866		// Since we append some notes about truncating, we just do a
2867		// rough check that the output buffer was cut down so this test
2868		// isn't super brittle.
2869		state := a.State.Check(structs.NewCheckID("test", nil))
2870		if state.Status != api.HealthPassing || len(state.Output) > 2*maxChecksSize {
2871			t.Fatalf("bad: %v, (len:=%d)", state, len(state.Output))
2872		}
2873	})
2874
2875	t.Run("bogus status", func(t *testing.T) {
2876		args := checkUpdate{Status: "itscomplicated"}
2877		req, _ := http.NewRequest("PUT", "/v1/agent/check/update/test", jsonReader(args))
2878		resp := httptest.NewRecorder()
2879		obj, err := a.srv.AgentCheckUpdate(resp, req)
2880		if err != nil {
2881			t.Fatalf("err: %v", err)
2882		}
2883		if obj != nil {
2884			t.Fatalf("bad: %v", obj)
2885		}
2886		if resp.Code != 400 {
2887			t.Fatalf("expected 400, got %d", resp.Code)
2888		}
2889	})
2890}
2891
2892func TestAgent_UpdateCheck_ACLDeny(t *testing.T) {
2893	if testing.Short() {
2894		t.Skip("too slow for testing.Short")
2895	}
2896
2897	t.Parallel()
2898	a := NewTestAgent(t, TestACLConfig())
2899	defer a.Shutdown()
2900	testrpc.WaitForLeader(t, a.RPC, "dc1")
2901
2902	chk := &structs.HealthCheck{Name: "test", CheckID: "test"}
2903	chkType := &structs.CheckType{TTL: 15 * time.Second}
2904	if err := a.AddCheck(chk, chkType, false, "", ConfigSourceLocal); err != nil {
2905		t.Fatalf("err: %v", err)
2906	}
2907
2908	t.Run("no token", func(t *testing.T) {
2909		args := checkUpdate{api.HealthPassing, "hello-passing"}
2910		req, _ := http.NewRequest("PUT", "/v1/agent/check/update/test", jsonReader(args))
2911		if _, err := a.srv.AgentCheckUpdate(nil, req); !acl.IsErrPermissionDenied(err) {
2912			t.Fatalf("err: %v", err)
2913		}
2914	})
2915
2916	t.Run("root token", func(t *testing.T) {
2917		args := checkUpdate{api.HealthPassing, "hello-passing"}
2918		req, _ := http.NewRequest("PUT", "/v1/agent/check/update/test?token=root", jsonReader(args))
2919		if _, err := a.srv.AgentCheckUpdate(nil, req); err != nil {
2920			t.Fatalf("err: %v", err)
2921		}
2922	})
2923}
2924
2925func TestAgent_RegisterService(t *testing.T) {
2926	if testing.Short() {
2927		t.Skip("too slow for testing.Short")
2928	}
2929
2930	t.Run("normal", func(t *testing.T) {
2931		t.Parallel()
2932		testAgent_RegisterService(t, "enable_central_service_config = false")
2933	})
2934	t.Run("service manager", func(t *testing.T) {
2935		t.Parallel()
2936		testAgent_RegisterService(t, "enable_central_service_config = true")
2937	})
2938}
2939
2940func testAgent_RegisterService(t *testing.T, extraHCL string) {
2941	t.Helper()
2942
2943	a := NewTestAgent(t, extraHCL)
2944	defer a.Shutdown()
2945	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
2946
2947	args := &structs.ServiceDefinition{
2948		Name: "test",
2949		Meta: map[string]string{"hello": "world"},
2950		Tags: []string{"master"},
2951		Port: 8000,
2952		Check: structs.CheckType{
2953			TTL: 15 * time.Second,
2954		},
2955		Checks: []*structs.CheckType{
2956			{
2957				TTL: 20 * time.Second,
2958			},
2959			{
2960				TTL: 30 * time.Second,
2961			},
2962		},
2963		Weights: &structs.Weights{
2964			Passing: 100,
2965			Warning: 3,
2966		},
2967	}
2968	req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args))
2969
2970	obj, err := a.srv.AgentRegisterService(nil, req)
2971	if err != nil {
2972		t.Fatalf("err: %v", err)
2973	}
2974	if obj != nil {
2975		t.Fatalf("bad: %v", obj)
2976	}
2977
2978	// Ensure the service
2979	sid := structs.NewServiceID("test", nil)
2980	svc := a.State.Service(sid)
2981	if svc == nil {
2982		t.Fatalf("missing test service")
2983	}
2984	if val := svc.Meta["hello"]; val != "world" {
2985		t.Fatalf("Missing meta: %v", svc.Meta)
2986	}
2987	if val := svc.Weights.Passing; val != 100 {
2988		t.Fatalf("Expected 100 for Weights.Passing, got: %v", val)
2989	}
2990	if val := svc.Weights.Warning; val != 3 {
2991		t.Fatalf("Expected 3 for Weights.Warning, got: %v", val)
2992	}
2993
2994	// Ensure we have a check mapping
2995	checks := a.State.Checks(structs.WildcardEnterpriseMeta())
2996	if len(checks) != 3 {
2997		t.Fatalf("bad: %v", checks)
2998	}
2999	for _, c := range checks {
3000		if c.Type != "ttl" {
3001			t.Fatalf("expected ttl check type, got %s", c.Type)
3002		}
3003	}
3004
3005	if len(a.checkTTLs) != 3 {
3006		t.Fatalf("missing test check ttls: %v", a.checkTTLs)
3007	}
3008
3009	// Ensure the token was configured
3010	if token := a.State.ServiceToken(sid); token == "" {
3011		t.Fatalf("missing token")
3012	}
3013}
3014
3015func TestAgent_RegisterService_ReRegister(t *testing.T) {
3016	if testing.Short() {
3017		t.Skip("too slow for testing.Short")
3018	}
3019
3020	t.Run("normal", func(t *testing.T) {
3021		t.Parallel()
3022		testAgent_RegisterService_ReRegister(t, "enable_central_service_config = false")
3023	})
3024	t.Run("service manager", func(t *testing.T) {
3025		t.Parallel()
3026		testAgent_RegisterService_ReRegister(t, "enable_central_service_config = true")
3027	})
3028}
3029
3030func testAgent_RegisterService_ReRegister(t *testing.T, extraHCL string) {
3031	t.Helper()
3032
3033	a := NewTestAgent(t, extraHCL)
3034	defer a.Shutdown()
3035	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
3036
3037	args := &structs.ServiceDefinition{
3038		Name: "test",
3039		Meta: map[string]string{"hello": "world"},
3040		Tags: []string{"master"},
3041		Port: 8000,
3042		Checks: []*structs.CheckType{
3043			{
3044				CheckID: types.CheckID("check_1"),
3045				TTL:     20 * time.Second,
3046			},
3047			{
3048				CheckID: types.CheckID("check_2"),
3049				TTL:     30 * time.Second,
3050			},
3051		},
3052		Weights: &structs.Weights{
3053			Passing: 100,
3054			Warning: 3,
3055		},
3056	}
3057	req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
3058	_, err := a.srv.AgentRegisterService(nil, req)
3059	require.NoError(t, err)
3060
3061	args = &structs.ServiceDefinition{
3062		Name: "test",
3063		Meta: map[string]string{"hello": "world"},
3064		Tags: []string{"master"},
3065		Port: 8000,
3066		Checks: []*structs.CheckType{
3067			{
3068				CheckID: types.CheckID("check_1"),
3069				TTL:     20 * time.Second,
3070			},
3071			{
3072				CheckID: types.CheckID("check_3"),
3073				TTL:     30 * time.Second,
3074			},
3075		},
3076		Weights: &structs.Weights{
3077			Passing: 100,
3078			Warning: 3,
3079		},
3080	}
3081	req, _ = http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
3082	_, err = a.srv.AgentRegisterService(nil, req)
3083	require.NoError(t, err)
3084
3085	checks := a.State.Checks(structs.DefaultEnterpriseMeta())
3086	require.Equal(t, 3, len(checks))
3087
3088	checkIDs := []string{}
3089	for id := range checks {
3090		checkIDs = append(checkIDs, string(id.ID))
3091	}
3092	require.ElementsMatch(t, []string{"check_1", "check_2", "check_3"}, checkIDs)
3093}
3094
3095func TestAgent_RegisterService_ReRegister_ReplaceExistingChecks(t *testing.T) {
3096	if testing.Short() {
3097		t.Skip("too slow for testing.Short")
3098	}
3099
3100	t.Run("normal", func(t *testing.T) {
3101		t.Parallel()
3102		testAgent_RegisterService_ReRegister_ReplaceExistingChecks(t, "enable_central_service_config = false")
3103	})
3104	t.Run("service manager", func(t *testing.T) {
3105		t.Parallel()
3106		testAgent_RegisterService_ReRegister_ReplaceExistingChecks(t, "enable_central_service_config = true")
3107	})
3108}
3109
3110func testAgent_RegisterService_ReRegister_ReplaceExistingChecks(t *testing.T, extraHCL string) {
3111	t.Helper()
3112	a := NewTestAgent(t, extraHCL)
3113	defer a.Shutdown()
3114	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
3115
3116	args := &structs.ServiceDefinition{
3117		Name: "test",
3118		Meta: map[string]string{"hello": "world"},
3119		Tags: []string{"master"},
3120		Port: 8000,
3121		Checks: []*structs.CheckType{
3122			{
3123				// explicitly not setting the check id to let it be auto-generated
3124				// we want to ensure that we are testing out the cases with autogenerated names/ids
3125				TTL: 20 * time.Second,
3126			},
3127			{
3128				CheckID: types.CheckID("check_2"),
3129				TTL:     30 * time.Second,
3130			},
3131		},
3132		Weights: &structs.Weights{
3133			Passing: 100,
3134			Warning: 3,
3135		},
3136	}
3137	req, _ := http.NewRequest("PUT", "/v1/agent/service/register?replace-existing-checks", jsonReader(args))
3138	_, err := a.srv.AgentRegisterService(nil, req)
3139	require.NoError(t, err)
3140
3141	args = &structs.ServiceDefinition{
3142		Name: "test",
3143		Meta: map[string]string{"hello": "world"},
3144		Tags: []string{"master"},
3145		Port: 8000,
3146		Checks: []*structs.CheckType{
3147			{
3148				TTL: 20 * time.Second,
3149			},
3150			{
3151				CheckID: types.CheckID("check_3"),
3152				TTL:     30 * time.Second,
3153			},
3154		},
3155		Weights: &structs.Weights{
3156			Passing: 100,
3157			Warning: 3,
3158		},
3159	}
3160	req, _ = http.NewRequest("PUT", "/v1/agent/service/register?replace-existing-checks", jsonReader(args))
3161	_, err = a.srv.AgentRegisterService(nil, req)
3162	require.NoError(t, err)
3163
3164	checks := a.State.Checks(structs.DefaultEnterpriseMeta())
3165	require.Len(t, checks, 2)
3166
3167	checkIDs := []string{}
3168	for id := range checks {
3169		checkIDs = append(checkIDs, string(id.ID))
3170	}
3171	require.ElementsMatch(t, []string{"service:test:1", "check_3"}, checkIDs)
3172}
3173
3174func TestAgent_RegisterService_TranslateKeys(t *testing.T) {
3175	if testing.Short() {
3176		t.Skip("too slow for testing.Short")
3177	}
3178
3179	t.Run("normal", func(t *testing.T) {
3180		t.Parallel()
3181		testAgent_RegisterService_TranslateKeys(t, "enable_central_service_config = false")
3182	})
3183	t.Run("service manager", func(t *testing.T) {
3184		t.Parallel()
3185		testAgent_RegisterService_TranslateKeys(t, "enable_central_service_config = true")
3186	})
3187}
3188
3189func testAgent_RegisterService_TranslateKeys(t *testing.T, extraHCL string) {
3190	t.Helper()
3191
3192	tests := []struct {
3193		ip                    string
3194		expectedTCPCheckStart string
3195	}{
3196		{"127.0.0.1", "127.0.0.1:"}, // private network address
3197		{"::1", "[::1]:"},           // shared address space
3198	}
3199	for _, tt := range tests {
3200		t.Run(tt.ip, func(t *testing.T) {
3201			a := NewTestAgent(t, `
3202	connect {}
3203`+extraHCL)
3204			defer a.Shutdown()
3205			testrpc.WaitForTestAgent(t, a.RPC, "dc1")
3206
3207			json := `
3208	{
3209		"name":"test",
3210		"port":8000,
3211		"enable_tag_override": true,
3212		"tagged_addresses": {
3213			"lan": {
3214				"address": "1.2.3.4",
3215				"port": 5353
3216			},
3217			"wan": {
3218				"address": "2.3.4.5",
3219				"port": 53
3220			}
3221		},
3222		"meta": {
3223			"some": "meta",
3224			"enable_tag_override": "meta is 'opaque' so should not get translated"
3225		},
3226		"kind": "connect-proxy",` +
3227				// Note the uppercase P is important here - it ensures translation works
3228				// correctly in case-insensitive way. Without it this test can pass even
3229				// when translation is broken for other valid inputs.
3230				`"Proxy": {
3231			"destination_service_name": "web",
3232			"destination_service_id": "web",
3233			"local_service_port": 1234,
3234			"local_service_address": "` + tt.ip + `",
3235			"config": {
3236				"destination_type": "proxy.config is 'opaque' so should not get translated"
3237			},
3238			"upstreams": [
3239				{
3240					"destination_type": "service",
3241					"destination_namespace": "default",
3242					"destination_name": "db",
3243		      "local_bind_address": "` + tt.ip + `",
3244		      "local_bind_port": 1234,
3245					"config": {
3246						"destination_type": "proxy.upstreams.config is 'opaque' so should not get translated"
3247					}
3248				}
3249			]
3250		},
3251		"connect": {
3252			"sidecar_service": {
3253				"name":"test-proxy",
3254				"port":8001,
3255				"enable_tag_override": true,
3256				"meta": {
3257					"some": "meta",
3258					"enable_tag_override": "sidecar_service.meta is 'opaque' so should not get translated"
3259				},
3260				"kind": "connect-proxy",
3261				"proxy": {
3262					"destination_service_name": "test",
3263					"destination_service_id": "test",
3264					"local_service_port": 4321,
3265					"local_service_address": "` + tt.ip + `",
3266					"upstreams": [
3267						{
3268							"destination_type": "service",
3269							"destination_namespace": "default",
3270							"destination_name": "db",
3271							"local_bind_address": "` + tt.ip + `",
3272							"local_bind_port": 1234,
3273							"config": {
3274								"destination_type": "sidecar_service.proxy.upstreams.config is 'opaque' so should not get translated"
3275							}
3276						}
3277					]
3278				}
3279			}
3280		},
3281		"weights":{
3282			"passing": 16
3283		}
3284	}`
3285			req, _ := http.NewRequest("PUT", "/v1/agent/service/register", strings.NewReader(json))
3286
3287			rr := httptest.NewRecorder()
3288			obj, err := a.srv.AgentRegisterService(rr, req)
3289			require.NoError(t, err)
3290			require.Nil(t, obj)
3291			require.Equal(t, 200, rr.Code, "body: %s", rr.Body)
3292
3293			svc := &structs.NodeService{
3294				ID:      "test",
3295				Service: "test",
3296				TaggedAddresses: map[string]structs.ServiceAddress{
3297					"lan": {
3298						Address: "1.2.3.4",
3299						Port:    5353,
3300					},
3301					"wan": {
3302						Address: "2.3.4.5",
3303						Port:    53,
3304					},
3305				},
3306				Meta: map[string]string{
3307					"some":                "meta",
3308					"enable_tag_override": "meta is 'opaque' so should not get translated",
3309				},
3310				Port:              8000,
3311				EnableTagOverride: true,
3312				Weights:           &structs.Weights{Passing: 16, Warning: 0},
3313				Kind:              structs.ServiceKindConnectProxy,
3314				Proxy: structs.ConnectProxyConfig{
3315					DestinationServiceName: "web",
3316					DestinationServiceID:   "web",
3317					LocalServiceAddress:    tt.ip,
3318					LocalServicePort:       1234,
3319					Config: map[string]interface{}{
3320						"destination_type": "proxy.config is 'opaque' so should not get translated",
3321					},
3322					Upstreams: structs.Upstreams{
3323						{
3324							DestinationType:      structs.UpstreamDestTypeService,
3325							DestinationName:      "db",
3326							DestinationNamespace: "default",
3327							LocalBindAddress:     tt.ip,
3328							LocalBindPort:        1234,
3329							Config: map[string]interface{}{
3330								"destination_type": "proxy.upstreams.config is 'opaque' so should not get translated",
3331							},
3332						},
3333					},
3334				},
3335				Connect: structs.ServiceConnect{
3336					// The sidecar service is nilled since it is only config sugar and
3337					// shouldn't be represented in state. We assert that the translations
3338					// there worked by inspecting the registered sidecar below.
3339					SidecarService: nil,
3340				},
3341				EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
3342			}
3343
3344			got := a.State.Service(structs.NewServiceID("test", nil))
3345			require.Equal(t, svc, got)
3346
3347			sidecarSvc := &structs.NodeService{
3348				Kind:    structs.ServiceKindConnectProxy,
3349				ID:      "test-sidecar-proxy",
3350				Service: "test-proxy",
3351				Meta: map[string]string{
3352					"some":                "meta",
3353					"enable_tag_override": "sidecar_service.meta is 'opaque' so should not get translated",
3354				},
3355				TaggedAddresses:            map[string]structs.ServiceAddress{},
3356				Port:                       8001,
3357				EnableTagOverride:          true,
3358				Weights:                    &structs.Weights{Passing: 1, Warning: 1},
3359				LocallyRegisteredAsSidecar: true,
3360				Proxy: structs.ConnectProxyConfig{
3361					DestinationServiceName: "test",
3362					DestinationServiceID:   "test",
3363					LocalServiceAddress:    tt.ip,
3364					LocalServicePort:       4321,
3365					Upstreams: structs.Upstreams{
3366						{
3367							DestinationType:      structs.UpstreamDestTypeService,
3368							DestinationName:      "db",
3369							DestinationNamespace: "default",
3370							LocalBindAddress:     tt.ip,
3371							LocalBindPort:        1234,
3372							Config: map[string]interface{}{
3373								"destination_type": "sidecar_service.proxy.upstreams.config is 'opaque' so should not get translated",
3374							},
3375						},
3376					},
3377				},
3378				EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
3379			}
3380			gotSidecar := a.State.Service(structs.NewServiceID("test-sidecar-proxy", nil))
3381			hasNoCorrectTCPCheck := true
3382			for _, v := range a.checkTCPs {
3383				if strings.HasPrefix(v.TCP, tt.expectedTCPCheckStart) {
3384					hasNoCorrectTCPCheck = false
3385					break
3386				}
3387				fmt.Println("TCP Check:= ", v)
3388			}
3389			if hasNoCorrectTCPCheck {
3390				t.Fatalf("Did not find the expected TCP Healtcheck '%s' in %#v ", tt.expectedTCPCheckStart, a.checkTCPs)
3391			}
3392			require.Equal(t, sidecarSvc, gotSidecar)
3393		})
3394	}
3395}
3396
3397func TestAgent_RegisterService_ACLDeny(t *testing.T) {
3398	if testing.Short() {
3399		t.Skip("too slow for testing.Short")
3400	}
3401
3402	t.Run("normal", func(t *testing.T) {
3403		t.Parallel()
3404		testAgent_RegisterService_ACLDeny(t, "enable_central_service_config = false")
3405	})
3406	t.Run("service manager", func(t *testing.T) {
3407		t.Parallel()
3408		testAgent_RegisterService_ACLDeny(t, "enable_central_service_config = true")
3409	})
3410}
3411
3412func testAgent_RegisterService_ACLDeny(t *testing.T, extraHCL string) {
3413	t.Helper()
3414
3415	a := NewTestAgent(t, TestACLConfig()+" "+extraHCL)
3416	defer a.Shutdown()
3417	testrpc.WaitForLeader(t, a.RPC, "dc1")
3418
3419	args := &structs.ServiceDefinition{
3420		Name: "test",
3421		Tags: []string{"master"},
3422		Port: 8000,
3423		Check: structs.CheckType{
3424			TTL: 15 * time.Second,
3425		},
3426		Checks: []*structs.CheckType{
3427			{
3428				TTL: 20 * time.Second,
3429			},
3430			{
3431				TTL: 30 * time.Second,
3432			},
3433		},
3434	}
3435
3436	t.Run("no token", func(t *testing.T) {
3437		req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
3438		if _, err := a.srv.AgentRegisterService(nil, req); !acl.IsErrPermissionDenied(err) {
3439			t.Fatalf("err: %v", err)
3440		}
3441	})
3442
3443	t.Run("root token", func(t *testing.T) {
3444		req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(args))
3445		if _, err := a.srv.AgentRegisterService(nil, req); err != nil {
3446			t.Fatalf("err: %v", err)
3447		}
3448	})
3449}
3450
3451func TestAgent_RegisterService_InvalidAddress(t *testing.T) {
3452	if testing.Short() {
3453		t.Skip("too slow for testing.Short")
3454	}
3455
3456	t.Run("normal", func(t *testing.T) {
3457		t.Parallel()
3458		testAgent_RegisterService_InvalidAddress(t, "enable_central_service_config = false")
3459	})
3460	t.Run("service manager", func(t *testing.T) {
3461		t.Parallel()
3462		testAgent_RegisterService_InvalidAddress(t, "enable_central_service_config = true")
3463	})
3464}
3465
3466func testAgent_RegisterService_InvalidAddress(t *testing.T, extraHCL string) {
3467	t.Helper()
3468
3469	a := NewTestAgent(t, extraHCL)
3470	defer a.Shutdown()
3471	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
3472
3473	for _, addr := range []string{"0.0.0.0", "::", "[::]"} {
3474		t.Run("addr "+addr, func(t *testing.T) {
3475			args := &structs.ServiceDefinition{
3476				Name:    "test",
3477				Address: addr,
3478				Port:    8000,
3479			}
3480			req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args))
3481			resp := httptest.NewRecorder()
3482			_, err := a.srv.AgentRegisterService(resp, req)
3483			if err != nil {
3484				t.Fatalf("got error %v want nil", err)
3485			}
3486			if got, want := resp.Code, 400; got != want {
3487				t.Fatalf("got code %d want %d", got, want)
3488			}
3489			if got, want := resp.Body.String(), "Invalid service address"; got != want {
3490				t.Fatalf("got body %q want %q", got, want)
3491			}
3492		})
3493	}
3494}
3495
3496// This tests local agent service registration of a unmanaged connect proxy.
3497// This verifies that it is put in the local state store properly for syncing
3498// later.
3499func TestAgent_RegisterService_UnmanagedConnectProxy(t *testing.T) {
3500	if testing.Short() {
3501		t.Skip("too slow for testing.Short")
3502	}
3503
3504	t.Run("normal", func(t *testing.T) {
3505		t.Parallel()
3506		testAgent_RegisterService_UnmanagedConnectProxy(t, "enable_central_service_config = false")
3507	})
3508	t.Run("service manager", func(t *testing.T) {
3509		t.Parallel()
3510		testAgent_RegisterService_UnmanagedConnectProxy(t, "enable_central_service_config = true")
3511	})
3512}
3513
3514func testAgent_RegisterService_UnmanagedConnectProxy(t *testing.T, extraHCL string) {
3515	t.Helper()
3516
3517	a := NewTestAgent(t, extraHCL)
3518	defer a.Shutdown()
3519	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
3520
3521	// Register a proxy. Note that the destination doesn't exist here on this
3522	// agent or in the catalog at all. This is intended and part of the design.
3523	args := &api.AgentServiceRegistration{
3524		Kind: api.ServiceKindConnectProxy,
3525		Name: "connect-proxy",
3526		Port: 8000,
3527		Proxy: &api.AgentServiceConnectProxyConfig{
3528			DestinationServiceName: "web",
3529			Upstreams: []api.Upstream{
3530				{
3531					// No type to force default
3532					DestinationName: "db",
3533					LocalBindPort:   1234,
3534				},
3535				{
3536					DestinationType: "prepared_query",
3537					DestinationName: "geo-cache",
3538					LocalBindPort:   1235,
3539				},
3540			},
3541			Mode: api.ProxyModeTransparent,
3542			TransparentProxy: &api.TransparentProxyConfig{
3543				OutboundListenerPort: 808,
3544			},
3545		},
3546	}
3547
3548	req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args))
3549	resp := httptest.NewRecorder()
3550	obj, err := a.srv.AgentRegisterService(resp, req)
3551	require.NoError(t, err)
3552	require.Nil(t, obj)
3553
3554	// Ensure the service
3555	sid := structs.NewServiceID("connect-proxy", nil)
3556	svc := a.State.Service(sid)
3557	require.NotNil(t, svc, "has service")
3558	require.Equal(t, structs.ServiceKindConnectProxy, svc.Kind)
3559
3560	// Registration sets default types and namespaces
3561	for i := range args.Proxy.Upstreams {
3562		if args.Proxy.Upstreams[i].DestinationType == "" {
3563			args.Proxy.Upstreams[i].DestinationType = api.UpstreamDestTypeService
3564		}
3565		if args.Proxy.Upstreams[i].DestinationNamespace == "" {
3566			args.Proxy.Upstreams[i].DestinationNamespace =
3567				structs.DefaultEnterpriseMeta().NamespaceOrEmpty()
3568		}
3569	}
3570
3571	require.Equal(t, args.Proxy, svc.Proxy.ToAPI())
3572
3573	// Ensure the token was configured
3574	require.Equal(t, "abc123", a.State.ServiceToken(structs.NewServiceID("connect-proxy", nil)))
3575}
3576
3577func testDefaultSidecar(svc string, port int, fns ...func(*structs.NodeService)) *structs.NodeService {
3578	ns := &structs.NodeService{
3579		ID:              svc + "-sidecar-proxy",
3580		Kind:            structs.ServiceKindConnectProxy,
3581		Service:         svc + "-sidecar-proxy",
3582		Port:            2222,
3583		TaggedAddresses: map[string]structs.ServiceAddress{},
3584		Weights: &structs.Weights{
3585			Passing: 1,
3586			Warning: 1,
3587		},
3588		// Note that LocallyRegisteredAsSidecar should be true on the internal
3589		// NodeService, but that we never want to see it in the HTTP response as
3590		// it's internal only state. This is being compared directly to local state
3591		// so should be present here.
3592		LocallyRegisteredAsSidecar: true,
3593		Proxy: structs.ConnectProxyConfig{
3594			DestinationServiceName: svc,
3595			DestinationServiceID:   svc,
3596			LocalServiceAddress:    "127.0.0.1",
3597			LocalServicePort:       port,
3598		},
3599		EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
3600	}
3601	for _, fn := range fns {
3602		fn(ns)
3603	}
3604	return ns
3605}
3606
3607// testCreateToken creates a Policy for the provided rules and a Token linked to that Policy.
3608func testCreateToken(t *testing.T, a *TestAgent, rules string) string {
3609	policyName, err := uuid.GenerateUUID() // we just need a unique name for the test and UUIDs are definitely unique
3610	require.NoError(t, err)
3611
3612	policyID := testCreatePolicy(t, a, policyName, rules)
3613
3614	args := map[string]interface{}{
3615		"Description": "User Token",
3616		"Policies": []map[string]interface{}{
3617			{
3618				"ID": policyID,
3619			},
3620		},
3621		"Local": false,
3622	}
3623	req, _ := http.NewRequest("PUT", "/v1/acl/token?token=root", jsonReader(args))
3624	resp := httptest.NewRecorder()
3625	obj, err := a.srv.ACLTokenCreate(resp, req)
3626	require.NoError(t, err)
3627	require.NotNil(t, obj)
3628	aclResp := obj.(*structs.ACLToken)
3629	return aclResp.SecretID
3630}
3631
3632func testCreatePolicy(t *testing.T, a *TestAgent, name, rules string) string {
3633	args := map[string]interface{}{
3634		"Name":  name,
3635		"Rules": rules,
3636	}
3637	req, _ := http.NewRequest("PUT", "/v1/acl/policy?token=root", jsonReader(args))
3638	resp := httptest.NewRecorder()
3639	obj, err := a.srv.ACLPolicyCreate(resp, req)
3640	require.NoError(t, err)
3641	require.NotNil(t, obj)
3642	aclResp := obj.(*structs.ACLPolicy)
3643	return aclResp.ID
3644}
3645
3646// This tests local agent service registration with a sidecar service. Note we
3647// only test simple defaults for the sidecar here since the actual logic for
3648// handling sidecar defaults and port assignment is tested thoroughly in
3649// TestAgent_sidecarServiceFromNodeService. Note it also tests Deregister
3650// explicitly too since setup is identical.
3651func TestAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T) {
3652	if testing.Short() {
3653		t.Skip("too slow for testing.Short")
3654	}
3655
3656	t.Run("normal", func(t *testing.T) {
3657		t.Parallel()
3658		testAgent_RegisterServiceDeregisterService_Sidecar(t, "enable_central_service_config = false")
3659	})
3660	t.Run("service manager", func(t *testing.T) {
3661		t.Parallel()
3662		testAgent_RegisterServiceDeregisterService_Sidecar(t, "enable_central_service_config = true")
3663	})
3664}
3665
3666func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL string) {
3667	t.Helper()
3668
3669	tests := []struct {
3670		name                      string
3671		preRegister, preRegister2 *structs.NodeService
3672		// Use raw JSON payloads rather than encoding to avoid subtleties with some
3673		// internal representations and different ways they encode and decode. We
3674		// rely on the payload being Unmarshalable to structs.ServiceDefinition
3675		// directly.
3676		json                        string
3677		enableACL                   bool
3678		tokenRules                  string
3679		wantNS                      *structs.NodeService
3680		wantErr                     string
3681		wantSidecarIDLeftAfterDereg bool
3682		assertStateFn               func(t *testing.T, state *local.State)
3683	}{
3684		{
3685			name: "sanity check no sidecar case",
3686			json: `
3687			{
3688				"name": "web",
3689				"port": 1111
3690			}
3691			`,
3692			wantNS:  nil,
3693			wantErr: "",
3694		},
3695		{
3696			name: "default sidecar",
3697			json: `
3698			{
3699				"name": "web",
3700				"port": 1111,
3701				"connect": {
3702					"SidecarService": {}
3703				}
3704			}
3705			`,
3706			wantNS:  testDefaultSidecar("web", 1111),
3707			wantErr: "",
3708		},
3709		{
3710			name: "ACL OK defaults",
3711			json: `
3712			{
3713				"name": "web",
3714				"port": 1111,
3715				"connect": {
3716					"SidecarService": {}
3717				}
3718			}
3719			`,
3720			enableACL: true,
3721			tokenRules: `
3722			service "web-sidecar-proxy" {
3723				policy = "write"
3724			}
3725			service "web" {
3726				policy = "write"
3727			}`,
3728			wantNS:  testDefaultSidecar("web", 1111),
3729			wantErr: "",
3730		},
3731		{
3732			name: "ACL denied",
3733			json: `
3734			{
3735				"name": "web",
3736				"port": 1111,
3737				"connect": {
3738					"SidecarService": {}
3739				}
3740			}
3741			`,
3742			enableACL:  true,
3743			tokenRules: ``, // No token rules means no valid token
3744			wantNS:     nil,
3745			wantErr:    "Permission denied",
3746		},
3747		{
3748			name: "ACL OK for service but not for sidecar",
3749			json: `
3750			{
3751				"name": "web",
3752				"port": 1111,
3753				"connect": {
3754					"SidecarService": {}
3755				}
3756			}
3757			`,
3758			enableACL: true,
3759			// This will become more common/reasonable when ACLs support exact match.
3760			tokenRules: `
3761			service "web-sidecar-proxy" {
3762				policy = "deny"
3763			}
3764			service "web" {
3765				policy = "write"
3766			}`,
3767			wantNS:  nil,
3768			wantErr: "Permission denied",
3769		},
3770		{
3771			name: "ACL OK for service and sidecar but not sidecar's overridden destination",
3772			json: `
3773			{
3774				"name": "web",
3775				"port": 1111,
3776				"connect": {
3777					"SidecarService": {
3778						"proxy": {
3779							"DestinationServiceName": "foo"
3780						}
3781					}
3782				}
3783			}
3784			`,
3785			enableACL: true,
3786			tokenRules: `
3787			service "web-sidecar-proxy" {
3788				policy = "write"
3789			}
3790			service "web" {
3791				policy = "write"
3792			}`,
3793			wantNS:  nil,
3794			wantErr: "Permission denied",
3795		},
3796		{
3797			name: "ACL OK for service but not for overridden sidecar",
3798			json: `
3799			{
3800				"name": "web",
3801				"port": 1111,
3802				"connect": {
3803					"SidecarService": {
3804						"name": "foo-sidecar-proxy"
3805					}
3806				}
3807			}
3808			`,
3809			enableACL: true,
3810			tokenRules: `
3811			service "web-sidecar-proxy" {
3812				policy = "write"
3813			}
3814			service "web" {
3815				policy = "write"
3816			}`,
3817			wantNS:  nil,
3818			wantErr: "Permission denied",
3819		},
3820		{
3821			name: "ACL OK for service but and overridden for sidecar",
3822			// This test ensures that if the sidecar embeds it's own token with
3823			// different privs from the main request token it will be honored for the
3824			// sidecar registration. We use the test root token since that should have
3825			// permission.
3826			json: `
3827			{
3828				"name": "web",
3829				"port": 1111,
3830				"connect": {
3831					"SidecarService": {
3832						"name": "foo",
3833						"token": "root"
3834					}
3835				}
3836			}
3837			`,
3838			enableACL: true,
3839			tokenRules: `
3840			service "web-sidecar-proxy" {
3841				policy = "write"
3842			}
3843			service "web" {
3844				policy = "write"
3845			}`,
3846			wantNS: testDefaultSidecar("web", 1111, func(ns *structs.NodeService) {
3847				ns.Service = "foo"
3848			}),
3849			wantErr: "",
3850		},
3851		{
3852			name: "invalid check definition in sidecar",
3853			// Note no interval in the TCP check should fail validation
3854			json: `
3855			{
3856				"name": "web",
3857				"port": 1111,
3858				"connect": {
3859					"SidecarService": {
3860						"check": {
3861							"TCP": "foo"
3862						}
3863					}
3864				}
3865			}
3866			`,
3867			wantNS:  nil,
3868			wantErr: "invalid check in sidecar_service",
3869		},
3870		{
3871			name: "invalid checks definitions in sidecar",
3872			// Note no interval in the TCP check should fail validation
3873			json: `
3874			{
3875				"name": "web",
3876				"port": 1111,
3877				"connect": {
3878					"SidecarService": {
3879						"checks": [{
3880							"TCP": "foo"
3881						}]
3882					}
3883				}
3884			}
3885			`,
3886			wantNS:  nil,
3887			wantErr: "invalid check in sidecar_service",
3888		},
3889		{
3890			name: "invalid check status in sidecar",
3891			// Note no interval in the TCP check should fail validation
3892			json: `
3893			{
3894				"name": "web",
3895				"port": 1111,
3896				"connect": {
3897					"SidecarService": {
3898						"check": {
3899							"TCP": "foo",
3900							"Interval": 10,
3901							"Status": "unsupported-status"
3902						}
3903					}
3904				}
3905			}
3906			`,
3907			wantNS:  nil,
3908			wantErr: "Status for checks must 'passing', 'warning', 'critical'",
3909		},
3910		{
3911			name: "invalid checks status in sidecar",
3912			// Note no interval in the TCP check should fail validation
3913			json: `
3914			{
3915				"name": "web",
3916				"port": 1111,
3917				"connect": {
3918					"SidecarService": {
3919						"checks": [{
3920							"TCP": "foo",
3921							"Interval": 10,
3922							"Status": "unsupported-status"
3923						}]
3924					}
3925				}
3926			}
3927			`,
3928			wantNS:  nil,
3929			wantErr: "Status for checks must 'passing', 'warning', 'critical'",
3930		},
3931		{
3932			name: "another service registered with same ID as a sidecar should not be deregistered",
3933			// Add another service with the same ID that a sidecar for web would have
3934			preRegister: &structs.NodeService{
3935				ID:      "web-sidecar-proxy",
3936				Service: "fake-sidecar",
3937				Port:    9999,
3938			},
3939			// Register web with NO SIDECAR
3940			json: `
3941			{
3942				"name": "web",
3943				"port": 1111
3944			}
3945			`,
3946			// Note here that although the registration here didn't register it, we
3947			// should still see the NodeService we pre-registered here.
3948			wantNS: &structs.NodeService{
3949				ID:              "web-sidecar-proxy",
3950				Service:         "fake-sidecar",
3951				Port:            9999,
3952				TaggedAddresses: map[string]structs.ServiceAddress{},
3953				Weights: &structs.Weights{
3954					Passing: 1,
3955					Warning: 1,
3956				},
3957				EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
3958			},
3959			// After we deregister the web service above, the fake sidecar with
3960			// clashing ID SHOULD NOT have been removed since it wasn't part of the
3961			// original registration.
3962			wantSidecarIDLeftAfterDereg: true,
3963		},
3964		{
3965			name: "updates to sidecar should work",
3966			// Add a valid sidecar already registered
3967			preRegister: &structs.NodeService{
3968				ID:                         "web-sidecar-proxy",
3969				Service:                    "web-sidecar-proxy",
3970				LocallyRegisteredAsSidecar: true,
3971				Port:                       9999,
3972			},
3973			// Register web with Sidecar on different port
3974			json: `
3975			{
3976				"name": "web",
3977				"port": 1111,
3978				"connect": {
3979					"SidecarService": {
3980						"Port": 6666
3981					}
3982				}
3983			}
3984			`,
3985			// Note here that although the registration here didn't register it, we
3986			// should still see the NodeService we pre-registered here.
3987			wantNS: &structs.NodeService{
3988				Kind:                       "connect-proxy",
3989				ID:                         "web-sidecar-proxy",
3990				Service:                    "web-sidecar-proxy",
3991				LocallyRegisteredAsSidecar: true,
3992				Port:                       6666,
3993				TaggedAddresses:            map[string]structs.ServiceAddress{},
3994				Weights: &structs.Weights{
3995					Passing: 1,
3996					Warning: 1,
3997				},
3998				Proxy: structs.ConnectProxyConfig{
3999					DestinationServiceName: "web",
4000					DestinationServiceID:   "web",
4001					LocalServiceAddress:    "127.0.0.1",
4002					LocalServicePort:       1111,
4003				},
4004				EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
4005			},
4006		},
4007		{
4008			name: "update that removes sidecar should NOT deregister it",
4009			// Add web with a valid sidecar already registered
4010			preRegister: &structs.NodeService{
4011				ID:      "web",
4012				Service: "web",
4013				Port:    1111,
4014			},
4015			preRegister2: testDefaultSidecar("web", 1111),
4016			// Register (update) web and remove sidecar (and port for sanity check)
4017			json: `
4018			{
4019				"name": "web",
4020				"port": 2222
4021			}
4022			`,
4023			// Sidecar should still be there such that API can update registration
4024			// without accidentally removing a sidecar. This is equivalent to embedded
4025			// checks which are not removed by just not being included in an update.
4026			// We will document that sidecar registrations via API must be explicitiy
4027			// deregistered.
4028			wantNS: testDefaultSidecar("web", 1111),
4029			// Sanity check the rest of the update happened though.
4030			assertStateFn: func(t *testing.T, state *local.State) {
4031				svc := state.Service(structs.NewServiceID("web", nil))
4032				require.NotNil(t, svc)
4033				require.Equal(t, 2222, svc.Port)
4034			},
4035		},
4036	}
4037
4038	for _, tt := range tests {
4039		t.Run(tt.name, func(t *testing.T) {
4040			assert := assert.New(t)
4041			require := require.New(t)
4042
4043			// Constrain auto ports to 1 available to make it deterministic
4044			hcl := `ports {
4045				sidecar_min_port = 2222
4046				sidecar_max_port = 2222
4047			}
4048			`
4049			if tt.enableACL {
4050				hcl = hcl + TestACLConfig()
4051			}
4052
4053			a := NewTestAgent(t, hcl+" "+extraHCL)
4054			defer a.Shutdown()
4055			testrpc.WaitForLeader(t, a.RPC, "dc1")
4056
4057			if tt.preRegister != nil {
4058				require.NoError(a.addServiceFromSource(tt.preRegister, nil, false, "", ConfigSourceLocal))
4059			}
4060			if tt.preRegister2 != nil {
4061				require.NoError(a.addServiceFromSource(tt.preRegister2, nil, false, "", ConfigSourceLocal))
4062			}
4063
4064			// Create an ACL token with require policy
4065			var token string
4066			if tt.enableACL && tt.tokenRules != "" {
4067				token = testCreateToken(t, a, tt.tokenRules)
4068			}
4069
4070			br := bytes.NewBufferString(tt.json)
4071
4072			req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token="+token, br)
4073			resp := httptest.NewRecorder()
4074			obj, err := a.srv.AgentRegisterService(resp, req)
4075			if tt.wantErr != "" {
4076				require.Error(err, "response code=%d, body:\n%s",
4077					resp.Code, resp.Body.String())
4078				require.Contains(strings.ToLower(err.Error()), strings.ToLower(tt.wantErr))
4079				return
4080			}
4081			require.NoError(err)
4082			assert.Nil(obj)
4083			require.Equal(200, resp.Code, "request failed with body: %s",
4084				resp.Body.String())
4085
4086			// Sanity the target service registration
4087			svcs := a.State.Services(nil)
4088
4089			// Parse the expected definition into a ServiceDefinition
4090			var sd structs.ServiceDefinition
4091			err = json.Unmarshal([]byte(tt.json), &sd)
4092			require.NoError(err)
4093			require.NotEmpty(sd.Name)
4094
4095			svcID := sd.ID
4096			if svcID == "" {
4097				svcID = sd.Name
4098			}
4099			sid := structs.NewServiceID(svcID, nil)
4100			svc, ok := svcs[sid]
4101			require.True(ok, "has service "+sid.String())
4102			assert.Equal(sd.Name, svc.Service)
4103			assert.Equal(sd.Port, svc.Port)
4104			// Ensure that the actual registered service _doesn't_ still have it's
4105			// sidecar info since it's duplicate and we don't want that synced up to
4106			// the catalog or included in responses particularly - it's just
4107			// registration syntax sugar.
4108			assert.Nil(svc.Connect.SidecarService)
4109
4110			if tt.wantNS == nil {
4111				// Sanity check that there was no service registered, we rely on there
4112				// being no services at start of test so we can just use the count.
4113				assert.Len(svcs, 1, "should be no sidecar registered")
4114				return
4115			}
4116
4117			// Ensure sidecar
4118			svc, ok = svcs[structs.NewServiceID(tt.wantNS.ID, nil)]
4119			require.True(ok, "no sidecar registered at "+tt.wantNS.ID)
4120			assert.Equal(tt.wantNS, svc)
4121
4122			if tt.assertStateFn != nil {
4123				tt.assertStateFn(t, a.State)
4124			}
4125
4126			// Now verify deregistration also removes sidecar (if there was one and it
4127			// was added via sidecar not just coincidental ID clash)
4128			{
4129				req := httptest.NewRequest("PUT",
4130					"/v1/agent/service/deregister/"+svcID+"?token="+token, nil)
4131				resp := httptest.NewRecorder()
4132				obj, err := a.srv.AgentDeregisterService(resp, req)
4133				require.NoError(err)
4134				require.Nil(obj)
4135
4136				svcs := a.State.Services(nil)
4137				_, ok = svcs[structs.NewServiceID(tt.wantNS.ID, nil)]
4138				if tt.wantSidecarIDLeftAfterDereg {
4139					require.True(ok, "removed non-sidecar service at "+tt.wantNS.ID)
4140				} else {
4141					require.False(ok, "sidecar not deregistered with service "+svcID)
4142				}
4143			}
4144		})
4145	}
4146}
4147
4148// This tests that connect proxy validation is done for local agent
4149// registration. This doesn't need to test validation exhaustively since
4150// that is done via a table test in the structs package.
4151func TestAgent_RegisterService_UnmanagedConnectProxyInvalid(t *testing.T) {
4152	if testing.Short() {
4153		t.Skip("too slow for testing.Short")
4154	}
4155
4156	t.Run("normal", func(t *testing.T) {
4157		t.Parallel()
4158		testAgent_RegisterService_UnmanagedConnectProxyInvalid(t, "enable_central_service_config = false")
4159	})
4160	t.Run("service manager", func(t *testing.T) {
4161		t.Parallel()
4162		testAgent_RegisterService_UnmanagedConnectProxyInvalid(t, "enable_central_service_config = true")
4163	})
4164}
4165
4166func testAgent_RegisterService_UnmanagedConnectProxyInvalid(t *testing.T, extraHCL string) {
4167	t.Helper()
4168
4169	assert := assert.New(t)
4170	a := NewTestAgent(t, extraHCL)
4171	defer a.Shutdown()
4172	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
4173
4174	args := &structs.ServiceDefinition{
4175		Kind: structs.ServiceKindConnectProxy,
4176		Name: "connect-proxy",
4177		Proxy: &structs.ConnectProxyConfig{
4178			DestinationServiceName: "db",
4179		},
4180		Check: structs.CheckType{
4181			TTL: 15 * time.Second,
4182		},
4183	}
4184
4185	req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args))
4186	resp := httptest.NewRecorder()
4187	obj, err := a.srv.AgentRegisterService(resp, req)
4188	assert.Nil(err)
4189	assert.Nil(obj)
4190	assert.Equal(http.StatusBadRequest, resp.Code)
4191	assert.Contains(resp.Body.String(), "Port")
4192
4193	// Ensure the service doesn't exist
4194	assert.Nil(a.State.Service(structs.NewServiceID("connect-proxy", nil)))
4195}
4196
4197// Tests agent registration of a service that is connect native.
4198func TestAgent_RegisterService_ConnectNative(t *testing.T) {
4199	if testing.Short() {
4200		t.Skip("too slow for testing.Short")
4201	}
4202
4203	t.Run("normal", func(t *testing.T) {
4204		t.Parallel()
4205		testAgent_RegisterService_ConnectNative(t, "enable_central_service_config = false")
4206	})
4207	t.Run("service manager", func(t *testing.T) {
4208		t.Parallel()
4209		testAgent_RegisterService_ConnectNative(t, "enable_central_service_config = true")
4210	})
4211}
4212
4213func testAgent_RegisterService_ConnectNative(t *testing.T, extraHCL string) {
4214	t.Helper()
4215
4216	assert := assert.New(t)
4217	a := NewTestAgent(t, extraHCL)
4218	defer a.Shutdown()
4219	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
4220
4221	// Register a proxy. Note that the destination doesn't exist here on
4222	// this agent or in the catalog at all. This is intended and part
4223	// of the design.
4224	args := &structs.ServiceDefinition{
4225		Name: "web",
4226		Port: 8000,
4227		Check: structs.CheckType{
4228			TTL: 15 * time.Second,
4229		},
4230		Connect: &structs.ServiceConnect{
4231			Native: true,
4232		},
4233	}
4234
4235	req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
4236	resp := httptest.NewRecorder()
4237	obj, err := a.srv.AgentRegisterService(resp, req)
4238	assert.Nil(err)
4239	assert.Nil(obj)
4240
4241	// Ensure the service
4242	svc := a.State.Service(structs.NewServiceID("web", nil))
4243	require.NotNil(t, svc)
4244	assert.True(svc.Connect.Native)
4245}
4246
4247func TestAgent_RegisterService_ScriptCheck_ExecDisable(t *testing.T) {
4248	if testing.Short() {
4249		t.Skip("too slow for testing.Short")
4250	}
4251
4252	t.Run("normal", func(t *testing.T) {
4253		t.Parallel()
4254		testAgent_RegisterService_ScriptCheck_ExecDisable(t, "enable_central_service_config = false")
4255	})
4256	t.Run("service manager", func(t *testing.T) {
4257		t.Parallel()
4258		testAgent_RegisterService_ScriptCheck_ExecDisable(t, "enable_central_service_config = true")
4259	})
4260}
4261
4262func testAgent_RegisterService_ScriptCheck_ExecDisable(t *testing.T, extraHCL string) {
4263	t.Helper()
4264
4265	a := NewTestAgent(t, extraHCL)
4266	defer a.Shutdown()
4267	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
4268
4269	args := &structs.ServiceDefinition{
4270		Name: "test",
4271		Meta: map[string]string{"hello": "world"},
4272		Tags: []string{"master"},
4273		Port: 8000,
4274		Check: structs.CheckType{
4275			Name:       "test-check",
4276			Interval:   time.Second,
4277			ScriptArgs: []string{"true"},
4278		},
4279		Weights: &structs.Weights{
4280			Passing: 100,
4281			Warning: 3,
4282		},
4283	}
4284	req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args))
4285
4286	_, err := a.srv.AgentRegisterService(nil, req)
4287	if err == nil {
4288		t.Fatalf("expected error but got nil")
4289	}
4290	if !strings.Contains(err.Error(), "Scripts are disabled on this agent") {
4291		t.Fatalf("expected script disabled error, got: %s", err)
4292	}
4293	checkID := types.CheckID("test-check")
4294	require.Nil(t, a.State.Check(structs.NewCheckID(checkID, nil)), "check registered with exec disabled")
4295}
4296
4297func TestAgent_RegisterService_ScriptCheck_ExecRemoteDisable(t *testing.T) {
4298	if testing.Short() {
4299		t.Skip("too slow for testing.Short")
4300	}
4301
4302	t.Run("normal", func(t *testing.T) {
4303		t.Parallel()
4304		testAgent_RegisterService_ScriptCheck_ExecRemoteDisable(t, "enable_central_service_config = false")
4305	})
4306	t.Run("service manager", func(t *testing.T) {
4307		t.Parallel()
4308		testAgent_RegisterService_ScriptCheck_ExecRemoteDisable(t, "enable_central_service_config = true")
4309	})
4310}
4311
4312func testAgent_RegisterService_ScriptCheck_ExecRemoteDisable(t *testing.T, extraHCL string) {
4313	t.Helper()
4314
4315	a := NewTestAgent(t, `
4316		enable_local_script_checks = true
4317	`+extraHCL)
4318	defer a.Shutdown()
4319	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
4320
4321	args := &structs.ServiceDefinition{
4322		Name: "test",
4323		Meta: map[string]string{"hello": "world"},
4324		Tags: []string{"master"},
4325		Port: 8000,
4326		Check: structs.CheckType{
4327			Name:       "test-check",
4328			Interval:   time.Second,
4329			ScriptArgs: []string{"true"},
4330		},
4331		Weights: &structs.Weights{
4332			Passing: 100,
4333			Warning: 3,
4334		},
4335	}
4336	req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args))
4337
4338	_, err := a.srv.AgentRegisterService(nil, req)
4339	if err == nil {
4340		t.Fatalf("expected error but got nil")
4341	}
4342	if !strings.Contains(err.Error(), "Scripts are disabled on this agent") {
4343		t.Fatalf("expected script disabled error, got: %s", err)
4344	}
4345	checkID := types.CheckID("test-check")
4346	require.Nil(t, a.State.Check(structs.NewCheckID(checkID, nil)), "check registered with exec disabled")
4347}
4348
4349func TestAgent_DeregisterService(t *testing.T) {
4350	if testing.Short() {
4351		t.Skip("too slow for testing.Short")
4352	}
4353
4354	t.Parallel()
4355	a := NewTestAgent(t, "")
4356	defer a.Shutdown()
4357	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
4358
4359	service := &structs.NodeService{
4360		ID:      "test",
4361		Service: "test",
4362	}
4363	if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil {
4364		t.Fatalf("err: %v", err)
4365	}
4366
4367	req, _ := http.NewRequest("PUT", "/v1/agent/service/deregister/test", nil)
4368	obj, err := a.srv.AgentDeregisterService(nil, req)
4369	if err != nil {
4370		t.Fatalf("err: %v", err)
4371	}
4372	if obj != nil {
4373		t.Fatalf("bad: %v", obj)
4374	}
4375
4376	// Ensure we have a check mapping
4377	assert.Nil(t, a.State.Service(structs.NewServiceID("test", nil)), "have test service")
4378	assert.Nil(t, a.State.Check(structs.NewCheckID("test", nil)), "have test check")
4379}
4380
4381func TestAgent_DeregisterService_ACLDeny(t *testing.T) {
4382	if testing.Short() {
4383		t.Skip("too slow for testing.Short")
4384	}
4385
4386	t.Parallel()
4387	a := NewTestAgent(t, TestACLConfig())
4388	defer a.Shutdown()
4389	testrpc.WaitForLeader(t, a.RPC, "dc1")
4390
4391	service := &structs.NodeService{
4392		ID:      "test",
4393		Service: "test",
4394	}
4395	if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil {
4396		t.Fatalf("err: %v", err)
4397	}
4398
4399	t.Run("no token", func(t *testing.T) {
4400		req, _ := http.NewRequest("PUT", "/v1/agent/service/deregister/test", nil)
4401		if _, err := a.srv.AgentDeregisterService(nil, req); !acl.IsErrPermissionDenied(err) {
4402			t.Fatalf("err: %v", err)
4403		}
4404	})
4405
4406	t.Run("root token", func(t *testing.T) {
4407		req, _ := http.NewRequest("PUT", "/v1/agent/service/deregister/test?token=root", nil)
4408		if _, err := a.srv.AgentDeregisterService(nil, req); err != nil {
4409			t.Fatalf("err: %v", err)
4410		}
4411	})
4412}
4413
4414func TestAgent_ServiceMaintenance_BadRequest(t *testing.T) {
4415	if testing.Short() {
4416		t.Skip("too slow for testing.Short")
4417	}
4418
4419	t.Parallel()
4420	a := NewTestAgent(t, "")
4421	defer a.Shutdown()
4422	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
4423
4424	t.Run("not enabled", func(t *testing.T) {
4425		req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test", nil)
4426		resp := httptest.NewRecorder()
4427		if _, err := a.srv.AgentServiceMaintenance(resp, req); err != nil {
4428			t.Fatalf("err: %s", err)
4429		}
4430		if resp.Code != 400 {
4431			t.Fatalf("expected 400, got %d", resp.Code)
4432		}
4433	})
4434
4435	t.Run("no service id", func(t *testing.T) {
4436		req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/?enable=true", nil)
4437		resp := httptest.NewRecorder()
4438		if _, err := a.srv.AgentServiceMaintenance(resp, req); err != nil {
4439			t.Fatalf("err: %s", err)
4440		}
4441		if resp.Code != 400 {
4442			t.Fatalf("expected 400, got %d", resp.Code)
4443		}
4444	})
4445
4446	t.Run("bad service id", func(t *testing.T) {
4447		req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/_nope_?enable=true", nil)
4448		resp := httptest.NewRecorder()
4449		if _, err := a.srv.AgentServiceMaintenance(resp, req); err != nil {
4450			t.Fatalf("err: %s", err)
4451		}
4452		if resp.Code != 404 {
4453			t.Fatalf("expected 404, got %d", resp.Code)
4454		}
4455	})
4456}
4457
4458func TestAgent_ServiceMaintenance_Enable(t *testing.T) {
4459	if testing.Short() {
4460		t.Skip("too slow for testing.Short")
4461	}
4462
4463	t.Parallel()
4464	a := NewTestAgent(t, "")
4465	defer a.Shutdown()
4466	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
4467
4468	// Register the service
4469	service := &structs.NodeService{
4470		ID:      "test",
4471		Service: "test",
4472	}
4473	if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil {
4474		t.Fatalf("err: %v", err)
4475	}
4476
4477	// Force the service into maintenance mode
4478	req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test?enable=true&reason=broken&token=mytoken", nil)
4479	resp := httptest.NewRecorder()
4480	if _, err := a.srv.AgentServiceMaintenance(resp, req); err != nil {
4481		t.Fatalf("err: %s", err)
4482	}
4483	if resp.Code != 200 {
4484		t.Fatalf("expected 200, got %d", resp.Code)
4485	}
4486
4487	// Ensure the maintenance check was registered
4488	checkID := serviceMaintCheckID(structs.NewServiceID("test", nil))
4489	check := a.State.Check(checkID)
4490	if check == nil {
4491		t.Fatalf("should have registered maintenance check")
4492	}
4493
4494	// Ensure the token was added
4495	if token := a.State.CheckToken(checkID); token != "mytoken" {
4496		t.Fatalf("expected 'mytoken', got '%s'", token)
4497	}
4498
4499	// Ensure the reason was set in notes
4500	if check.Notes != "broken" {
4501		t.Fatalf("bad: %#v", check)
4502	}
4503}
4504
4505func TestAgent_ServiceMaintenance_Disable(t *testing.T) {
4506	if testing.Short() {
4507		t.Skip("too slow for testing.Short")
4508	}
4509
4510	t.Parallel()
4511	a := NewTestAgent(t, "")
4512	defer a.Shutdown()
4513	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
4514
4515	// Register the service
4516	service := &structs.NodeService{
4517		ID:      "test",
4518		Service: "test",
4519	}
4520	if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil {
4521		t.Fatalf("err: %v", err)
4522	}
4523
4524	// Force the service into maintenance mode
4525	if err := a.EnableServiceMaintenance(structs.NewServiceID("test", nil), "", ""); err != nil {
4526		t.Fatalf("err: %s", err)
4527	}
4528
4529	// Leave maintenance mode
4530	req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test?enable=false", nil)
4531	resp := httptest.NewRecorder()
4532	if _, err := a.srv.AgentServiceMaintenance(resp, req); err != nil {
4533		t.Fatalf("err: %s", err)
4534	}
4535	if resp.Code != 200 {
4536		t.Fatalf("expected 200, got %d", resp.Code)
4537	}
4538
4539	// Ensure the maintenance check was removed
4540	checkID := serviceMaintCheckID(structs.NewServiceID("test", nil))
4541	if existing := a.State.Check(checkID); existing != nil {
4542		t.Fatalf("should have removed maintenance check")
4543	}
4544}
4545
4546func TestAgent_ServiceMaintenance_ACLDeny(t *testing.T) {
4547	if testing.Short() {
4548		t.Skip("too slow for testing.Short")
4549	}
4550
4551	t.Parallel()
4552	a := NewTestAgent(t, TestACLConfig())
4553	defer a.Shutdown()
4554	testrpc.WaitForLeader(t, a.RPC, "dc1")
4555
4556	// Register the service.
4557	service := &structs.NodeService{
4558		ID:      "test",
4559		Service: "test",
4560	}
4561	if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil {
4562		t.Fatalf("err: %v", err)
4563	}
4564
4565	t.Run("no token", func(t *testing.T) {
4566		req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test?enable=true&reason=broken", nil)
4567		if _, err := a.srv.AgentServiceMaintenance(nil, req); !acl.IsErrPermissionDenied(err) {
4568			t.Fatalf("err: %v", err)
4569		}
4570	})
4571
4572	t.Run("root token", func(t *testing.T) {
4573		req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test?enable=true&reason=broken&token=root", nil)
4574		if _, err := a.srv.AgentServiceMaintenance(nil, req); err != nil {
4575			t.Fatalf("err: %v", err)
4576		}
4577	})
4578}
4579
4580func TestAgent_NodeMaintenance_BadRequest(t *testing.T) {
4581	if testing.Short() {
4582		t.Skip("too slow for testing.Short")
4583	}
4584
4585	t.Parallel()
4586	a := NewTestAgent(t, "")
4587	defer a.Shutdown()
4588	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
4589
4590	// Fails when no enable flag provided
4591	req, _ := http.NewRequest("PUT", "/v1/agent/self/maintenance", nil)
4592	resp := httptest.NewRecorder()
4593	if _, err := a.srv.AgentNodeMaintenance(resp, req); err != nil {
4594		t.Fatalf("err: %s", err)
4595	}
4596	if resp.Code != 400 {
4597		t.Fatalf("expected 400, got %d", resp.Code)
4598	}
4599}
4600
4601func TestAgent_NodeMaintenance_Enable(t *testing.T) {
4602	if testing.Short() {
4603		t.Skip("too slow for testing.Short")
4604	}
4605
4606	t.Parallel()
4607	a := NewTestAgent(t, "")
4608	defer a.Shutdown()
4609	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
4610
4611	// Force the node into maintenance mode
4612	req, _ := http.NewRequest("PUT", "/v1/agent/self/maintenance?enable=true&reason=broken&token=mytoken", nil)
4613	resp := httptest.NewRecorder()
4614	if _, err := a.srv.AgentNodeMaintenance(resp, req); err != nil {
4615		t.Fatalf("err: %s", err)
4616	}
4617	if resp.Code != 200 {
4618		t.Fatalf("expected 200, got %d", resp.Code)
4619	}
4620
4621	// Ensure the maintenance check was registered
4622	check := a.State.Check(structs.NodeMaintCheckID)
4623	if check == nil {
4624		t.Fatalf("should have registered maintenance check")
4625	}
4626
4627	// Check that the token was used
4628	if token := a.State.CheckToken(structs.NodeMaintCheckID); token != "mytoken" {
4629		t.Fatalf("expected 'mytoken', got '%s'", token)
4630	}
4631
4632	// Ensure the reason was set in notes
4633	if check.Notes != "broken" {
4634		t.Fatalf("bad: %#v", check)
4635	}
4636}
4637
4638func TestAgent_NodeMaintenance_Disable(t *testing.T) {
4639	if testing.Short() {
4640		t.Skip("too slow for testing.Short")
4641	}
4642
4643	t.Parallel()
4644	a := NewTestAgent(t, "")
4645	defer a.Shutdown()
4646	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
4647
4648	// Force the node into maintenance mode
4649	a.EnableNodeMaintenance("", "")
4650
4651	// Leave maintenance mode
4652	req, _ := http.NewRequest("PUT", "/v1/agent/self/maintenance?enable=false", nil)
4653	resp := httptest.NewRecorder()
4654	if _, err := a.srv.AgentNodeMaintenance(resp, req); err != nil {
4655		t.Fatalf("err: %s", err)
4656	}
4657	if resp.Code != 200 {
4658		t.Fatalf("expected 200, got %d", resp.Code)
4659	}
4660
4661	// Ensure the maintenance check was removed
4662	if existing := a.State.Check(structs.NodeMaintCheckID); existing != nil {
4663		t.Fatalf("should have removed maintenance check")
4664	}
4665}
4666
4667func TestAgent_NodeMaintenance_ACLDeny(t *testing.T) {
4668	if testing.Short() {
4669		t.Skip("too slow for testing.Short")
4670	}
4671
4672	t.Parallel()
4673	a := NewTestAgent(t, TestACLConfig())
4674	defer a.Shutdown()
4675	testrpc.WaitForLeader(t, a.RPC, "dc1")
4676
4677	t.Run("no token", func(t *testing.T) {
4678		req, _ := http.NewRequest("PUT", "/v1/agent/self/maintenance?enable=true&reason=broken", nil)
4679		if _, err := a.srv.AgentNodeMaintenance(nil, req); !acl.IsErrPermissionDenied(err) {
4680			t.Fatalf("err: %v", err)
4681		}
4682	})
4683
4684	t.Run("root token", func(t *testing.T) {
4685		req, _ := http.NewRequest("PUT", "/v1/agent/self/maintenance?enable=true&reason=broken&token=root", nil)
4686		if _, err := a.srv.AgentNodeMaintenance(nil, req); err != nil {
4687			t.Fatalf("err: %v", err)
4688		}
4689	})
4690}
4691
4692func TestAgent_RegisterCheck_Service(t *testing.T) {
4693	if testing.Short() {
4694		t.Skip("too slow for testing.Short")
4695	}
4696
4697	t.Parallel()
4698	a := NewTestAgent(t, "")
4699	defer a.Shutdown()
4700	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
4701
4702	args := &structs.ServiceDefinition{
4703		Name: "memcache",
4704		Port: 8000,
4705		Check: structs.CheckType{
4706			TTL: 15 * time.Second,
4707		},
4708	}
4709
4710	// First register the service
4711	req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
4712	if _, err := a.srv.AgentRegisterService(nil, req); err != nil {
4713		t.Fatalf("err: %v", err)
4714	}
4715
4716	// Now register an additional check
4717	checkArgs := &structs.CheckDefinition{
4718		Name:      "memcache_check2",
4719		ServiceID: "memcache",
4720		TTL:       15 * time.Second,
4721	}
4722	req, _ = http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(checkArgs))
4723	if _, err := a.srv.AgentRegisterCheck(nil, req); err != nil {
4724		t.Fatalf("err: %v", err)
4725	}
4726
4727	// Ensure we have a check mapping
4728	result := a.State.Checks(nil)
4729	if _, ok := result[structs.NewCheckID("service:memcache", nil)]; !ok {
4730		t.Fatalf("missing memcached check")
4731	}
4732	if _, ok := result[structs.NewCheckID("memcache_check2", nil)]; !ok {
4733		t.Fatalf("missing memcache_check2 check")
4734	}
4735
4736	// Make sure the new check is associated with the service
4737	if result[structs.NewCheckID("memcache_check2", nil)].ServiceID != "memcache" {
4738		t.Fatalf("bad: %#v", result[structs.NewCheckID("memcached_check2", nil)])
4739	}
4740
4741	// Make sure the new check has the right type
4742	if result[structs.NewCheckID("memcache_check2", nil)].Type != "ttl" {
4743		t.Fatalf("expected TTL type, got %s", result[structs.NewCheckID("memcache_check2", nil)].Type)
4744	}
4745}
4746
4747func TestAgent_Monitor(t *testing.T) {
4748	if testing.Short() {
4749		t.Skip("too slow for testing.Short")
4750	}
4751
4752	t.Parallel()
4753	a := NewTestAgent(t, "")
4754	defer a.Shutdown()
4755	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
4756
4757	t.Run("unknown log level", func(t *testing.T) {
4758		// Try passing an invalid log level
4759		req, _ := http.NewRequest("GET", "/v1/agent/monitor?loglevel=invalid", nil)
4760		resp := httptest.NewRecorder()
4761		_, err := a.srv.AgentMonitor(resp, req)
4762		if err == nil {
4763			t.Fatal("expected BadRequestError to have occurred, got nil")
4764		}
4765
4766		// Note that BadRequestError is handled outside the endpoint handler so we
4767		// still see a 200 if we check here.
4768		if _, ok := err.(BadRequestError); !ok {
4769			t.Fatalf("expected BadRequestError to have occurred, got %#v", err)
4770		}
4771
4772		substring := "Unknown log level"
4773		if !strings.Contains(err.Error(), substring) {
4774			t.Fatalf("got: %s, wanted message containing: %s", err.Error(), substring)
4775		}
4776	})
4777
4778	t.Run("stream unstructured logs", func(t *testing.T) {
4779		// Try to stream logs until we see the expected log line
4780		retry.Run(t, func(r *retry.R) {
4781			req, _ := http.NewRequest("GET", "/v1/agent/monitor?loglevel=debug", nil)
4782			cancelCtx, cancelFunc := context.WithCancel(context.Background())
4783			req = req.WithContext(cancelCtx)
4784
4785			resp := httptest.NewRecorder()
4786			errCh := make(chan error)
4787			go func() {
4788				_, err := a.srv.AgentMonitor(resp, req)
4789				errCh <- err
4790			}()
4791
4792			args := &structs.ServiceDefinition{
4793				Name: "monitor",
4794				Port: 8000,
4795				Check: structs.CheckType{
4796					TTL: 15 * time.Second,
4797				},
4798			}
4799
4800			registerReq, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
4801			if _, err := a.srv.AgentRegisterService(nil, registerReq); err != nil {
4802				t.Fatalf("err: %v", err)
4803			}
4804
4805			// Wait until we have received some type of logging output
4806			require.Eventually(t, func() bool {
4807				return len(resp.Body.Bytes()) > 0
4808			}, 3*time.Second, 100*time.Millisecond)
4809
4810			cancelFunc()
4811			err := <-errCh
4812			require.NoError(t, err)
4813
4814			got := resp.Body.String()
4815
4816			// Only check a substring that we are highly confident in finding
4817			want := "Synced service: service="
4818			if !strings.Contains(got, want) {
4819				r.Fatalf("got %q and did not find %q", got, want)
4820			}
4821		})
4822	})
4823
4824	t.Run("stream compressed unstructured logs", func(t *testing.T) {
4825		// The only purpose of this test is to see something being
4826		// logged. Because /v1/agent/monitor is streaming the response
4827		// it needs special handling with the compression.
4828		retry.Run(t, func(r *retry.R) {
4829			req, _ := http.NewRequest("GET", "/v1/agent/monitor?loglevel=debug", nil)
4830			// Usually this would be automatically set by transport content
4831			// negotiation, but since this call doesn't go through a real
4832			// transport, the header has to be set manually
4833			req.Header["Accept-Encoding"] = []string{"gzip"}
4834			cancelCtx, cancelFunc := context.WithCancel(context.Background())
4835			req = req.WithContext(cancelCtx)
4836
4837			resp := httptest.NewRecorder()
4838			handler := a.srv.handler(true)
4839			go handler.ServeHTTP(resp, req)
4840
4841			args := &structs.ServiceDefinition{
4842				Name: "monitor",
4843				Port: 8000,
4844				Check: structs.CheckType{
4845					TTL: 15 * time.Second,
4846				},
4847			}
4848
4849			registerReq, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
4850			if _, err := a.srv.AgentRegisterService(nil, registerReq); err != nil {
4851				t.Fatalf("err: %v", err)
4852			}
4853
4854			// Wait until we have received some type of logging output
4855			require.Eventually(t, func() bool {
4856				return len(resp.Body.Bytes()) > 0
4857			}, 3*time.Second, 100*time.Millisecond)
4858			cancelFunc()
4859		})
4860	})
4861
4862	t.Run("stream JSON logs", func(t *testing.T) {
4863		// Try to stream logs until we see the expected log line
4864		retry.Run(t, func(r *retry.R) {
4865			req, _ := http.NewRequest("GET", "/v1/agent/monitor?loglevel=debug&logjson", nil)
4866			cancelCtx, cancelFunc := context.WithCancel(context.Background())
4867			req = req.WithContext(cancelCtx)
4868
4869			resp := httptest.NewRecorder()
4870			errCh := make(chan error)
4871			go func() {
4872				_, err := a.srv.AgentMonitor(resp, req)
4873				errCh <- err
4874			}()
4875
4876			args := &structs.ServiceDefinition{
4877				Name: "monitor",
4878				Port: 8000,
4879				Check: structs.CheckType{
4880					TTL: 15 * time.Second,
4881				},
4882			}
4883
4884			registerReq, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
4885			if _, err := a.srv.AgentRegisterService(nil, registerReq); err != nil {
4886				t.Fatalf("err: %v", err)
4887			}
4888
4889			// Wait until we have received some type of logging output
4890			require.Eventually(t, func() bool {
4891				return len(resp.Body.Bytes()) > 0
4892			}, 3*time.Second, 100*time.Millisecond)
4893
4894			cancelFunc()
4895			err := <-errCh
4896			require.NoError(t, err)
4897
4898			// Each line is output as a separate JSON object, we grab the first and
4899			// make sure it can be unmarshalled.
4900			firstLine := bytes.Split(resp.Body.Bytes(), []byte("\n"))[0]
4901			var output map[string]interface{}
4902			if err := json.Unmarshal(firstLine, &output); err != nil {
4903				t.Fatalf("err: %v", err)
4904			}
4905		})
4906	})
4907
4908	// hopefully catch any potential regression in serf/memberlist logging setup.
4909	t.Run("serf shutdown logging", func(t *testing.T) {
4910		req, _ := http.NewRequest("GET", "/v1/agent/monitor?loglevel=debug", nil)
4911		cancelCtx, cancelFunc := context.WithCancel(context.Background())
4912		req = req.WithContext(cancelCtx)
4913
4914		resp := httptest.NewRecorder()
4915		chErr := make(chan error)
4916		chStarted := make(chan struct{})
4917		go func() {
4918			close(chStarted)
4919			_, err := a.srv.AgentMonitor(resp, req)
4920			chErr <- err
4921		}()
4922
4923		<-chStarted
4924		require.NoError(t, a.Shutdown())
4925
4926		// Wait until we have received some type of logging output
4927		require.Eventually(t, func() bool {
4928			return len(resp.Body.Bytes()) > 0
4929		}, 3*time.Second, 100*time.Millisecond)
4930
4931		cancelFunc()
4932		err := <-chErr
4933		require.NoError(t, err)
4934
4935		got := resp.Body.String()
4936		want := "serf: Shutdown without a Leave"
4937		if !strings.Contains(got, want) {
4938			t.Fatalf("got %q and did not find %q", got, want)
4939		}
4940	})
4941}
4942
4943func TestAgent_Monitor_ACLDeny(t *testing.T) {
4944	if testing.Short() {
4945		t.Skip("too slow for testing.Short")
4946	}
4947
4948	t.Parallel()
4949	a := NewTestAgent(t, TestACLConfig())
4950	defer a.Shutdown()
4951	testrpc.WaitForLeader(t, a.RPC, "dc1")
4952
4953	// Try without a token.
4954	req, _ := http.NewRequest("GET", "/v1/agent/monitor", nil)
4955	if _, err := a.srv.AgentMonitor(nil, req); !acl.IsErrPermissionDenied(err) {
4956		t.Fatalf("err: %v", err)
4957	}
4958
4959	// This proves we call the ACL function, and we've got the other monitor
4960	// test to prove monitor works, which should be sufficient. The monitor
4961	// logic is a little complex to set up so isn't worth repeating again
4962	// here.
4963}
4964
4965func TestAgent_TokenTriggersFullSync(t *testing.T) {
4966	if testing.Short() {
4967		t.Skip("too slow for testing.Short")
4968	}
4969
4970	t.Parallel()
4971
4972	body := func(token string) io.Reader {
4973		return jsonReader(&api.AgentToken{Token: token})
4974	}
4975
4976	createNodePolicy := func(t *testing.T, a *TestAgent, policyName string) *structs.ACLPolicy {
4977		policy := &structs.ACLPolicy{
4978			Name:  policyName,
4979			Rules: `node_prefix "" { policy = "write" }`,
4980		}
4981
4982		req, err := http.NewRequest("PUT", "/v1/acl/policy?token=root", jsonBody(policy))
4983		require.NoError(t, err)
4984
4985		resp := httptest.NewRecorder()
4986		obj, err := a.srv.ACLPolicyCreate(resp, req)
4987		require.NoError(t, err)
4988
4989		policy, ok := obj.(*structs.ACLPolicy)
4990		require.True(t, ok)
4991		return policy
4992	}
4993
4994	createNodeToken := func(t *testing.T, a *TestAgent, policyName string) *structs.ACLToken {
4995		createNodePolicy(t, a, policyName)
4996
4997		token := &structs.ACLToken{
4998			Description: "test",
4999			Policies: []structs.ACLTokenPolicyLink{
5000				{Name: policyName},
5001			},
5002		}
5003
5004		req, err := http.NewRequest("PUT", "/v1/acl/token?token=root", jsonBody(token))
5005		require.NoError(t, err)
5006
5007		resp := httptest.NewRecorder()
5008		obj, err := a.srv.ACLTokenCreate(resp, req)
5009		require.NoError(t, err)
5010
5011		token, ok := obj.(*structs.ACLToken)
5012		require.True(t, ok)
5013		return token
5014	}
5015
5016	cases := []struct {
5017		path       string
5018		tokenGetFn func(*token.Store) string
5019	}{
5020		{
5021			path:       "acl_agent_token",
5022			tokenGetFn: (*token.Store).AgentToken,
5023		},
5024		{
5025			path:       "agent",
5026			tokenGetFn: (*token.Store).AgentToken,
5027		},
5028		{
5029			path:       "acl_token",
5030			tokenGetFn: (*token.Store).UserToken,
5031		},
5032		{
5033			path:       "default",
5034			tokenGetFn: (*token.Store).UserToken,
5035		},
5036	}
5037
5038	for _, tt := range cases {
5039		tt := tt
5040		t.Run(tt.path, func(t *testing.T) {
5041			url := fmt.Sprintf("/v1/agent/token/%s?token=root", tt.path)
5042
5043			a := NewTestAgent(t, TestACLConfig()+`
5044				acl {
5045					tokens {
5046						default = ""
5047						agent = ""
5048						agent_master = ""
5049						replication = ""
5050					}
5051				}
5052			`)
5053			defer a.Shutdown()
5054			testrpc.WaitForLeader(t, a.RPC, "dc1")
5055
5056			// create node policy and token
5057			token := createNodeToken(t, a, "test")
5058
5059			req, err := http.NewRequest("PUT", url, body(token.SecretID))
5060			require.NoError(t, err)
5061
5062			resp := httptest.NewRecorder()
5063			_, err = a.srv.AgentToken(resp, req)
5064			require.NoError(t, err)
5065
5066			require.Equal(t, http.StatusOK, resp.Code)
5067			require.Equal(t, token.SecretID, tt.tokenGetFn(a.tokens))
5068
5069			testrpc.WaitForTestAgent(t, a.RPC, "dc1",
5070				testrpc.WithToken("root"),
5071				testrpc.WaitForAntiEntropySync())
5072		})
5073	}
5074}
5075
5076func TestAgent_Token(t *testing.T) {
5077	if testing.Short() {
5078		t.Skip("too slow for testing.Short")
5079	}
5080
5081	t.Parallel()
5082
5083	// The behavior of this handler when ACLs are disabled is vetted over
5084	// in TestACL_Disabled_Response since there's already good infra set
5085	// up over there to test this, and it calls the common function.
5086	a := NewTestAgent(t, TestACLConfig()+`
5087		acl {
5088			tokens {
5089				default = ""
5090				agent = ""
5091				agent_master = ""
5092				replication = ""
5093			}
5094		}
5095	`)
5096	defer a.Shutdown()
5097	testrpc.WaitForLeader(t, a.RPC, "dc1")
5098
5099	type tokens struct {
5100		user         string
5101		userSource   tokenStore.TokenSource
5102		agent        string
5103		agentSource  tokenStore.TokenSource
5104		master       string
5105		masterSource tokenStore.TokenSource
5106		repl         string
5107		replSource   tokenStore.TokenSource
5108	}
5109
5110	resetTokens := func(init tokens) {
5111		a.tokens.UpdateUserToken(init.user, init.userSource)
5112		a.tokens.UpdateAgentToken(init.agent, init.agentSource)
5113		a.tokens.UpdateAgentMasterToken(init.master, init.masterSource)
5114		a.tokens.UpdateReplicationToken(init.repl, init.replSource)
5115	}
5116
5117	body := func(token string) io.Reader {
5118		return jsonReader(&api.AgentToken{Token: token})
5119	}
5120
5121	badJSON := func() io.Reader {
5122		return jsonReader(false)
5123	}
5124
5125	tests := []struct {
5126		name        string
5127		method, url string
5128		body        io.Reader
5129		code        int
5130		init        tokens
5131		raw         tokens
5132		effective   tokens
5133		expectedErr error
5134	}{
5135		{
5136			name:        "bad token name",
5137			method:      "PUT",
5138			url:         "nope?token=root",
5139			body:        body("X"),
5140			expectedErr: NotFoundError{Reason: `Token "nope" is unknown`},
5141		},
5142		{
5143			name:   "bad JSON",
5144			method: "PUT",
5145			url:    "acl_token?token=root",
5146			body:   badJSON(),
5147			code:   http.StatusBadRequest,
5148		},
5149		{
5150			name:      "set user legacy",
5151			method:    "PUT",
5152			url:       "acl_token?token=root",
5153			body:      body("U"),
5154			code:      http.StatusOK,
5155			raw:       tokens{user: "U", userSource: tokenStore.TokenSourceAPI},
5156			effective: tokens{user: "U", agent: "U"},
5157		},
5158		{
5159			name:      "set default",
5160			method:    "PUT",
5161			url:       "default?token=root",
5162			body:      body("U"),
5163			code:      http.StatusOK,
5164			raw:       tokens{user: "U", userSource: tokenStore.TokenSourceAPI},
5165			effective: tokens{user: "U", agent: "U"},
5166		},
5167		{
5168			name:      "set agent legacy",
5169			method:    "PUT",
5170			url:       "acl_agent_token?token=root",
5171			body:      body("A"),
5172			code:      http.StatusOK,
5173			init:      tokens{user: "U", agent: "U"},
5174			raw:       tokens{user: "U", agent: "A", agentSource: tokenStore.TokenSourceAPI},
5175			effective: tokens{user: "U", agent: "A"},
5176		},
5177		{
5178			name:      "set agent",
5179			method:    "PUT",
5180			url:       "agent?token=root",
5181			body:      body("A"),
5182			code:      http.StatusOK,
5183			init:      tokens{user: "U", agent: "U"},
5184			raw:       tokens{user: "U", agent: "A", agentSource: tokenStore.TokenSourceAPI},
5185			effective: tokens{user: "U", agent: "A"},
5186		},
5187		{
5188			name:      "set master legacy",
5189			method:    "PUT",
5190			url:       "acl_agent_master_token?token=root",
5191			body:      body("M"),
5192			code:      http.StatusOK,
5193			raw:       tokens{master: "M", masterSource: tokenStore.TokenSourceAPI},
5194			effective: tokens{master: "M"},
5195		},
5196		{
5197			name:      "set master ",
5198			method:    "PUT",
5199			url:       "agent_master?token=root",
5200			body:      body("M"),
5201			code:      http.StatusOK,
5202			raw:       tokens{master: "M", masterSource: tokenStore.TokenSourceAPI},
5203			effective: tokens{master: "M"},
5204		},
5205		{
5206			name:      "set repl legacy",
5207			method:    "PUT",
5208			url:       "acl_replication_token?token=root",
5209			body:      body("R"),
5210			code:      http.StatusOK,
5211			raw:       tokens{repl: "R", replSource: tokenStore.TokenSourceAPI},
5212			effective: tokens{repl: "R"},
5213		},
5214		{
5215			name:      "set repl",
5216			method:    "PUT",
5217			url:       "replication?token=root",
5218			body:      body("R"),
5219			code:      http.StatusOK,
5220			raw:       tokens{repl: "R", replSource: tokenStore.TokenSourceAPI},
5221			effective: tokens{repl: "R"},
5222		},
5223		{
5224			name:   "clear user legacy",
5225			method: "PUT",
5226			url:    "acl_token?token=root",
5227			body:   body(""),
5228			code:   http.StatusOK,
5229			init:   tokens{user: "U"},
5230			raw:    tokens{userSource: tokenStore.TokenSourceAPI},
5231		},
5232		{
5233			name:   "clear default",
5234			method: "PUT",
5235			url:    "default?token=root",
5236			body:   body(""),
5237			code:   http.StatusOK,
5238			init:   tokens{user: "U"},
5239			raw:    tokens{userSource: tokenStore.TokenSourceAPI},
5240		},
5241		{
5242			name:   "clear agent legacy",
5243			method: "PUT",
5244			url:    "acl_agent_token?token=root",
5245			body:   body(""),
5246			code:   http.StatusOK,
5247			init:   tokens{agent: "A"},
5248			raw:    tokens{agentSource: tokenStore.TokenSourceAPI},
5249		},
5250		{
5251			name:   "clear agent",
5252			method: "PUT",
5253			url:    "agent?token=root",
5254			body:   body(""),
5255			code:   http.StatusOK,
5256			init:   tokens{agent: "A"},
5257			raw:    tokens{agentSource: tokenStore.TokenSourceAPI},
5258		},
5259		{
5260			name:   "clear master legacy",
5261			method: "PUT",
5262			url:    "acl_agent_master_token?token=root",
5263			body:   body(""),
5264			code:   http.StatusOK,
5265			init:   tokens{master: "M"},
5266			raw:    tokens{masterSource: tokenStore.TokenSourceAPI},
5267		},
5268		{
5269			name:   "clear master",
5270			method: "PUT",
5271			url:    "agent_master?token=root",
5272			body:   body(""),
5273			code:   http.StatusOK,
5274			init:   tokens{master: "M"},
5275			raw:    tokens{masterSource: tokenStore.TokenSourceAPI},
5276		},
5277		{
5278			name:   "clear repl legacy",
5279			method: "PUT",
5280			url:    "acl_replication_token?token=root",
5281			body:   body(""),
5282			code:   http.StatusOK,
5283			init:   tokens{repl: "R"},
5284			raw:    tokens{replSource: tokenStore.TokenSourceAPI},
5285		},
5286		{
5287			name:   "clear repl",
5288			method: "PUT",
5289			url:    "replication?token=root",
5290			body:   body(""),
5291			code:   http.StatusOK,
5292			init:   tokens{repl: "R"},
5293			raw:    tokens{replSource: tokenStore.TokenSourceAPI},
5294		},
5295	}
5296	for _, tt := range tests {
5297		t.Run(tt.name, func(t *testing.T) {
5298			resetTokens(tt.init)
5299			url := fmt.Sprintf("/v1/agent/token/%s", tt.url)
5300			resp := httptest.NewRecorder()
5301			req, _ := http.NewRequest(tt.method, url, tt.body)
5302
5303			_, err := a.srv.AgentToken(resp, req)
5304			if tt.expectedErr != nil {
5305				require.Equal(t, tt.expectedErr, err)
5306				return
5307			}
5308			require.NoError(t, err)
5309			require.Equal(t, tt.code, resp.Code)
5310			require.Equal(t, tt.effective.user, a.tokens.UserToken())
5311			require.Equal(t, tt.effective.agent, a.tokens.AgentToken())
5312			require.Equal(t, tt.effective.master, a.tokens.AgentMasterToken())
5313			require.Equal(t, tt.effective.repl, a.tokens.ReplicationToken())
5314
5315			tok, src := a.tokens.UserTokenAndSource()
5316			require.Equal(t, tt.raw.user, tok)
5317			require.Equal(t, tt.raw.userSource, src)
5318
5319			tok, src = a.tokens.AgentTokenAndSource()
5320			require.Equal(t, tt.raw.agent, tok)
5321			require.Equal(t, tt.raw.agentSource, src)
5322
5323			tok, src = a.tokens.AgentMasterTokenAndSource()
5324			require.Equal(t, tt.raw.master, tok)
5325			require.Equal(t, tt.raw.masterSource, src)
5326
5327			tok, src = a.tokens.ReplicationTokenAndSource()
5328			require.Equal(t, tt.raw.repl, tok)
5329			require.Equal(t, tt.raw.replSource, src)
5330		})
5331	}
5332
5333	// This one returns an error that is interpreted by the HTTP wrapper, so
5334	// doesn't fit into our table above.
5335	t.Run("permission denied", func(t *testing.T) {
5336		resetTokens(tokens{})
5337		req, _ := http.NewRequest("PUT", "/v1/agent/token/acl_token", body("X"))
5338		_, err := a.srv.AgentToken(nil, req)
5339		require.True(t, acl.IsErrPermissionDenied(err))
5340		require.Equal(t, "", a.tokens.UserToken())
5341	})
5342}
5343
5344func TestAgentConnectCARoots_empty(t *testing.T) {
5345	if testing.Short() {
5346		t.Skip("too slow for testing.Short")
5347	}
5348
5349	t.Parallel()
5350
5351	require := require.New(t)
5352	a := NewTestAgent(t, "connect { enabled = false }")
5353	defer a.Shutdown()
5354	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
5355
5356	req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/roots", nil)
5357	resp := httptest.NewRecorder()
5358	_, err := a.srv.AgentConnectCARoots(resp, req)
5359	require.Error(err)
5360	require.Contains(err.Error(), "Connect must be enabled")
5361}
5362
5363func TestAgentConnectCARoots_list(t *testing.T) {
5364	if testing.Short() {
5365		t.Skip("too slow for testing.Short")
5366	}
5367
5368	t.Parallel()
5369
5370	assert := assert.New(t)
5371	require := require.New(t)
5372	a := NewTestAgent(t, "")
5373	defer a.Shutdown()
5374	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
5375
5376	// Set some CAs. Note that NewTestAgent already bootstraps one CA so this just
5377	// adds a second and makes it active.
5378	ca2 := connect.TestCAConfigSet(t, a, nil)
5379
5380	// List
5381	req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/roots", nil)
5382	resp := httptest.NewRecorder()
5383	obj, err := a.srv.AgentConnectCARoots(resp, req)
5384	require.NoError(err)
5385
5386	value := obj.(structs.IndexedCARoots)
5387	assert.Equal(value.ActiveRootID, ca2.ID)
5388	// Would like to assert that it's the same as the TestAgent domain but the
5389	// only way to access that state via this package is by RPC to the server
5390	// implementation running in TestAgent which is more or less a tautology.
5391	assert.NotEmpty(value.TrustDomain)
5392	assert.Len(value.Roots, 2)
5393
5394	// We should never have the secret information
5395	for _, r := range value.Roots {
5396		assert.Equal("", r.SigningCert)
5397		assert.Equal("", r.SigningKey)
5398	}
5399
5400	assert.Equal("MISS", resp.Header().Get("X-Cache"))
5401
5402	// Test caching
5403	{
5404		// List it again
5405		resp2 := httptest.NewRecorder()
5406		obj2, err := a.srv.AgentConnectCARoots(resp2, req)
5407		require.NoError(err)
5408		assert.Equal(obj, obj2)
5409
5410		// Should cache hit this time and not make request
5411		assert.Equal("HIT", resp2.Header().Get("X-Cache"))
5412	}
5413
5414	// Test that caching is updated in the background
5415	{
5416		// Set a new CA
5417		ca := connect.TestCAConfigSet(t, a, nil)
5418
5419		retry.Run(t, func(r *retry.R) {
5420			// List it again
5421			resp := httptest.NewRecorder()
5422			obj, err := a.srv.AgentConnectCARoots(resp, req)
5423			r.Check(err)
5424
5425			value := obj.(structs.IndexedCARoots)
5426			if ca.ID != value.ActiveRootID {
5427				r.Fatalf("%s != %s", ca.ID, value.ActiveRootID)
5428			}
5429			// There are now 3 CAs because we didn't complete rotation on the original
5430			// 2
5431			if len(value.Roots) != 3 {
5432				r.Fatalf("bad len: %d", len(value.Roots))
5433			}
5434
5435			// Should be a cache hit! The data should've updated in the cache
5436			// in the background so this should've been fetched directly from
5437			// the cache.
5438			if resp.Header().Get("X-Cache") != "HIT" {
5439				r.Fatalf("should be a cache hit")
5440			}
5441		})
5442	}
5443}
5444
5445func TestAgentConnectCALeafCert_aclDefaultDeny(t *testing.T) {
5446	if testing.Short() {
5447		t.Skip("too slow for testing.Short")
5448	}
5449
5450	t.Parallel()
5451
5452	require := require.New(t)
5453	a := NewTestAgent(t, TestACLConfig())
5454	defer a.Shutdown()
5455	testrpc.WaitForLeader(t, a.RPC, "dc1")
5456	testrpc.WaitForActiveCARoot(t, a.RPC, "dc1", nil)
5457
5458	// Register a service with a managed proxy
5459	{
5460		reg := &structs.ServiceDefinition{
5461			ID:      "test-id",
5462			Name:    "test",
5463			Address: "127.0.0.1",
5464			Port:    8000,
5465			Check: structs.CheckType{
5466				TTL: 15 * time.Second,
5467			},
5468			Connect: &structs.ServiceConnect{},
5469		}
5470
5471		req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg))
5472		resp := httptest.NewRecorder()
5473		_, err := a.srv.AgentRegisterService(resp, req)
5474		require.NoError(err)
5475		require.Equal(200, resp.Code, "body: %s", resp.Body.String())
5476	}
5477
5478	req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil)
5479	resp := httptest.NewRecorder()
5480	_, err := a.srv.AgentConnectCALeafCert(resp, req)
5481	require.Error(err)
5482	require.True(acl.IsErrPermissionDenied(err))
5483}
5484
5485func TestAgentConnectCALeafCert_aclServiceWrite(t *testing.T) {
5486	if testing.Short() {
5487		t.Skip("too slow for testing.Short")
5488	}
5489
5490	t.Parallel()
5491
5492	require := require.New(t)
5493	a := NewTestAgent(t, TestACLConfig())
5494	defer a.Shutdown()
5495	testrpc.WaitForLeader(t, a.RPC, "dc1")
5496	testrpc.WaitForActiveCARoot(t, a.RPC, "dc1", nil)
5497
5498	// Register a service with a managed proxy
5499	{
5500		reg := &structs.ServiceDefinition{
5501			ID:      "test-id",
5502			Name:    "test",
5503			Address: "127.0.0.1",
5504			Port:    8000,
5505			Check: structs.CheckType{
5506				TTL: 15 * time.Second,
5507			},
5508			Connect: &structs.ServiceConnect{},
5509		}
5510
5511		req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg))
5512		resp := httptest.NewRecorder()
5513		_, err := a.srv.AgentRegisterService(resp, req)
5514		require.NoError(err)
5515		require.Equal(200, resp.Code, "body: %s", resp.Body.String())
5516	}
5517
5518	// Create an ACL with service:write for our service
5519	var token string
5520	{
5521		args := map[string]interface{}{
5522			"Name":  "User Token",
5523			"Type":  "client",
5524			"Rules": `service "test" { policy = "write" }`,
5525		}
5526		req, _ := http.NewRequest("PUT", "/v1/acl/create?token=root", jsonReader(args))
5527		resp := httptest.NewRecorder()
5528		obj, err := a.srv.ACLCreate(resp, req)
5529		if err != nil {
5530			t.Fatalf("err: %v", err)
5531		}
5532		aclResp := obj.(aclCreateResponse)
5533		token = aclResp.ID
5534	}
5535
5536	req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?token="+token, nil)
5537	resp := httptest.NewRecorder()
5538	obj, err := a.srv.AgentConnectCALeafCert(resp, req)
5539	require.NoError(err)
5540
5541	// Get the issued cert
5542	_, ok := obj.(*structs.IssuedCert)
5543	require.True(ok)
5544}
5545
5546func TestAgentConnectCALeafCert_aclServiceReadDeny(t *testing.T) {
5547	if testing.Short() {
5548		t.Skip("too slow for testing.Short")
5549	}
5550
5551	t.Parallel()
5552
5553	require := require.New(t)
5554	a := NewTestAgent(t, TestACLConfig())
5555	defer a.Shutdown()
5556	testrpc.WaitForLeader(t, a.RPC, "dc1")
5557	testrpc.WaitForActiveCARoot(t, a.RPC, "dc1", nil)
5558
5559	// Register a service with a managed proxy
5560	{
5561		reg := &structs.ServiceDefinition{
5562			ID:      "test-id",
5563			Name:    "test",
5564			Address: "127.0.0.1",
5565			Port:    8000,
5566			Check: structs.CheckType{
5567				TTL: 15 * time.Second,
5568			},
5569			Connect: &structs.ServiceConnect{},
5570		}
5571
5572		req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg))
5573		resp := httptest.NewRecorder()
5574		_, err := a.srv.AgentRegisterService(resp, req)
5575		require.NoError(err)
5576		require.Equal(200, resp.Code, "body: %s", resp.Body.String())
5577	}
5578
5579	// Create an ACL with service:read for our service
5580	var token string
5581	{
5582		args := map[string]interface{}{
5583			"Name":  "User Token",
5584			"Type":  "client",
5585			"Rules": `service "test" { policy = "read" }`,
5586		}
5587		req, _ := http.NewRequest("PUT", "/v1/acl/create?token=root", jsonReader(args))
5588		resp := httptest.NewRecorder()
5589		obj, err := a.srv.ACLCreate(resp, req)
5590		if err != nil {
5591			t.Fatalf("err: %v", err)
5592		}
5593		aclResp := obj.(aclCreateResponse)
5594		token = aclResp.ID
5595	}
5596
5597	req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?token="+token, nil)
5598	resp := httptest.NewRecorder()
5599	_, err := a.srv.AgentConnectCALeafCert(resp, req)
5600	require.Error(err)
5601	require.True(acl.IsErrPermissionDenied(err))
5602}
5603
5604func TestAgentConnectCALeafCert_good(t *testing.T) {
5605	if testing.Short() {
5606		t.Skip("too slow for testing.Short")
5607	}
5608
5609	t.Parallel()
5610
5611	assert := assert.New(t)
5612	require := require.New(t)
5613	a := StartTestAgent(t, TestAgent{Overrides: `
5614		connect {
5615			test_ca_leaf_root_change_spread = "1ns"
5616		}
5617	`})
5618	defer a.Shutdown()
5619	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
5620	testrpc.WaitForActiveCARoot(t, a.RPC, "dc1", nil)
5621
5622	// CA already setup by default by NewTestAgent but force a new one so we can
5623	// verify it was signed easily.
5624	ca1 := connect.TestCAConfigSet(t, a, nil)
5625
5626	{
5627		// Register a local service
5628		args := &structs.ServiceDefinition{
5629			ID:      "foo",
5630			Name:    "test",
5631			Address: "127.0.0.1",
5632			Port:    8000,
5633			Check: structs.CheckType{
5634				TTL: 15 * time.Second,
5635			},
5636		}
5637		req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
5638		resp := httptest.NewRecorder()
5639		_, err := a.srv.AgentRegisterService(resp, req)
5640		require.NoError(err)
5641		if !assert.Equal(200, resp.Code) {
5642			t.Log("Body: ", resp.Body.String())
5643		}
5644	}
5645
5646	// List
5647	req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil)
5648	resp := httptest.NewRecorder()
5649	obj, err := a.srv.AgentConnectCALeafCert(resp, req)
5650	require.NoError(err)
5651	require.Equal("MISS", resp.Header().Get("X-Cache"))
5652
5653	// Get the issued cert
5654	issued, ok := obj.(*structs.IssuedCert)
5655	assert.True(ok)
5656
5657	// Verify that the cert is signed by the CA
5658	requireLeafValidUnderCA(t, issued, ca1)
5659
5660	// Verify blocking index
5661	assert.True(issued.ModifyIndex > 0)
5662	assert.Equal(fmt.Sprintf("%d", issued.ModifyIndex),
5663		resp.Header().Get("X-Consul-Index"))
5664
5665	index := resp.Header().Get("X-Consul-Index")
5666
5667	// Test caching
5668	{
5669		// Fetch it again
5670		resp := httptest.NewRecorder()
5671		obj2, err := a.srv.AgentConnectCALeafCert(resp, req)
5672		require.NoError(err)
5673		require.Equal(obj, obj2)
5674
5675		// Should cache hit this time and not make request
5676		require.Equal("HIT", resp.Header().Get("X-Cache"))
5677	}
5678
5679	// Issue a blocking query to ensure that the cert gets updated appropriately
5680	{
5681		// Set a new CA
5682		ca := connect.TestCAConfigSet(t, a, nil)
5683
5684		resp := httptest.NewRecorder()
5685		req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?index="+index, nil)
5686		obj, err := a.srv.AgentConnectCALeafCert(resp, req)
5687		require.NoError(err)
5688		issued2 := obj.(*structs.IssuedCert)
5689		require.NotEqual(issued.CertPEM, issued2.CertPEM)
5690		require.NotEqual(issued.PrivateKeyPEM, issued2.PrivateKeyPEM)
5691
5692		// Verify that the cert is signed by the new CA
5693		requireLeafValidUnderCA(t, issued2, ca)
5694
5695		// Should not be a cache hit! The data was updated in response to the blocking
5696		// query being made.
5697		require.Equal("MISS", resp.Header().Get("X-Cache"))
5698	}
5699}
5700
5701// Test we can request a leaf cert for a service we have permission for
5702// but is not local to this agent.
5703func TestAgentConnectCALeafCert_goodNotLocal(t *testing.T) {
5704	if testing.Short() {
5705		t.Skip("too slow for testing.Short")
5706	}
5707
5708	t.Parallel()
5709
5710	assert := assert.New(t)
5711	require := require.New(t)
5712	a := StartTestAgent(t, TestAgent{Overrides: `
5713		connect {
5714			test_ca_leaf_root_change_spread = "1ns"
5715		}
5716	`})
5717	defer a.Shutdown()
5718	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
5719	testrpc.WaitForActiveCARoot(t, a.RPC, "dc1", nil)
5720
5721	// CA already setup by default by NewTestAgent but force a new one so we can
5722	// verify it was signed easily.
5723	ca1 := connect.TestCAConfigSet(t, a, nil)
5724
5725	{
5726		// Register a non-local service (central catalog)
5727		args := &structs.RegisterRequest{
5728			Node:    "foo",
5729			Address: "127.0.0.1",
5730			Service: &structs.NodeService{
5731				Service: "test",
5732				Address: "127.0.0.1",
5733				Port:    8080,
5734			},
5735		}
5736		req, _ := http.NewRequest("PUT", "/v1/catalog/register", jsonReader(args))
5737		resp := httptest.NewRecorder()
5738		_, err := a.srv.CatalogRegister(resp, req)
5739		require.NoError(err)
5740		if !assert.Equal(200, resp.Code) {
5741			t.Log("Body: ", resp.Body.String())
5742		}
5743	}
5744
5745	// List
5746	req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil)
5747	resp := httptest.NewRecorder()
5748	obj, err := a.srv.AgentConnectCALeafCert(resp, req)
5749	require.NoError(err)
5750	require.Equal("MISS", resp.Header().Get("X-Cache"))
5751
5752	// Get the issued cert
5753	issued, ok := obj.(*structs.IssuedCert)
5754	assert.True(ok)
5755
5756	// Verify that the cert is signed by the CA
5757	requireLeafValidUnderCA(t, issued, ca1)
5758
5759	// Verify blocking index
5760	assert.True(issued.ModifyIndex > 0)
5761	assert.Equal(fmt.Sprintf("%d", issued.ModifyIndex),
5762		resp.Header().Get("X-Consul-Index"))
5763
5764	// Test caching
5765	{
5766		// Fetch it again
5767		resp := httptest.NewRecorder()
5768		obj2, err := a.srv.AgentConnectCALeafCert(resp, req)
5769		require.NoError(err)
5770		require.Equal(obj, obj2)
5771
5772		// Should cache hit this time and not make request
5773		require.Equal("HIT", resp.Header().Get("X-Cache"))
5774	}
5775
5776	// Test Blocking - see https://github.com/hashicorp/consul/issues/4462
5777	{
5778		// Fetch it again
5779		resp := httptest.NewRecorder()
5780		blockingReq, _ := http.NewRequest("GET", fmt.Sprintf("/v1/agent/connect/ca/leaf/test?wait=125ms&index=%d", issued.ModifyIndex), nil)
5781		doneCh := make(chan struct{})
5782		go func() {
5783			a.srv.AgentConnectCALeafCert(resp, blockingReq)
5784			close(doneCh)
5785		}()
5786
5787		select {
5788		case <-time.After(500 * time.Millisecond):
5789			require.FailNow("Shouldn't block for this long - not respecting wait parameter in the query")
5790
5791		case <-doneCh:
5792		}
5793	}
5794
5795	// Test that caching is updated in the background
5796	{
5797		// Set a new CA
5798		ca := connect.TestCAConfigSet(t, a, nil)
5799
5800		retry.Run(t, func(r *retry.R) {
5801			resp := httptest.NewRecorder()
5802			// Try and sign again (note no index/wait arg since cache should update in
5803			// background even if we aren't actively blocking)
5804			obj, err := a.srv.AgentConnectCALeafCert(resp, req)
5805			r.Check(err)
5806
5807			issued2 := obj.(*structs.IssuedCert)
5808			if issued.CertPEM == issued2.CertPEM {
5809				r.Fatalf("leaf has not updated")
5810			}
5811
5812			// Got a new leaf. Sanity check it's a whole new key as well as different
5813			// cert.
5814			if issued.PrivateKeyPEM == issued2.PrivateKeyPEM {
5815				r.Fatalf("new leaf has same private key as before")
5816			}
5817
5818			// Verify that the cert is signed by the new CA
5819			requireLeafValidUnderCA(t, issued2, ca)
5820
5821			// Should be a cache hit! The data should've updated in the cache
5822			// in the background so this should've been fetched directly from
5823			// the cache.
5824			if resp.Header().Get("X-Cache") != "HIT" {
5825				r.Fatalf("should be a cache hit")
5826			}
5827		})
5828	}
5829}
5830
5831func TestAgentConnectCALeafCert_Vault_doesNotChurnLeafCertsAtIdle(t *testing.T) {
5832	ca.SkipIfVaultNotPresent(t)
5833
5834	if testing.Short() {
5835		t.Skip("too slow for testing.Short")
5836	}
5837
5838	t.Parallel()
5839
5840	testVault := ca.NewTestVaultServer(t)
5841	defer testVault.Stop()
5842
5843	assert := assert.New(t)
5844	require := require.New(t)
5845	a := StartTestAgent(t, TestAgent{Overrides: fmt.Sprintf(`
5846		connect {
5847			test_ca_leaf_root_change_spread = "1ns"
5848			ca_provider = "vault"
5849			ca_config {
5850				address = %[1]q
5851				token = %[2]q
5852				root_pki_path = "pki-root/"
5853				intermediate_pki_path = "pki-intermediate/"
5854			}
5855		}
5856	`, testVault.Addr, testVault.RootToken)})
5857	defer a.Shutdown()
5858	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
5859	testrpc.WaitForActiveCARoot(t, a.RPC, "dc1", nil)
5860
5861	var ca1 *structs.CARoot
5862	{
5863		args := &structs.DCSpecificRequest{Datacenter: "dc1"}
5864		var reply structs.IndexedCARoots
5865		require.NoError(a.RPC("ConnectCA.Roots", args, &reply))
5866		for _, r := range reply.Roots {
5867			if r.ID == reply.ActiveRootID {
5868				ca1 = r
5869				break
5870			}
5871		}
5872		require.NotNil(ca1)
5873	}
5874
5875	{
5876		// Register a local service
5877		args := &structs.ServiceDefinition{
5878			ID:      "foo",
5879			Name:    "test",
5880			Address: "127.0.0.1",
5881			Port:    8000,
5882			Check: structs.CheckType{
5883				TTL: 15 * time.Second,
5884			},
5885		}
5886		req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
5887		resp := httptest.NewRecorder()
5888		_, err := a.srv.AgentRegisterService(resp, req)
5889		require.NoError(err)
5890		if !assert.Equal(200, resp.Code) {
5891			t.Log("Body: ", resp.Body.String())
5892		}
5893	}
5894
5895	// List
5896	req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil)
5897	resp := httptest.NewRecorder()
5898	obj, err := a.srv.AgentConnectCALeafCert(resp, req)
5899	require.NoError(err)
5900	require.Equal("MISS", resp.Header().Get("X-Cache"))
5901
5902	// Get the issued cert
5903	issued, ok := obj.(*structs.IssuedCert)
5904	assert.True(ok)
5905
5906	// Verify that the cert is signed by the CA
5907	requireLeafValidUnderCA(t, issued, ca1)
5908
5909	// Verify blocking index
5910	assert.True(issued.ModifyIndex > 0)
5911	assert.Equal(fmt.Sprintf("%d", issued.ModifyIndex),
5912		resp.Header().Get("X-Consul-Index"))
5913
5914	// Test caching
5915	{
5916		// Fetch it again
5917		resp := httptest.NewRecorder()
5918		obj2, err := a.srv.AgentConnectCALeafCert(resp, req)
5919		require.NoError(err)
5920		require.Equal(obj, obj2)
5921
5922		// Should cache hit this time and not make request
5923		require.Equal("HIT", resp.Header().Get("X-Cache"))
5924	}
5925
5926	// Test that we aren't churning leaves for no reason at idle.
5927	{
5928		ch := make(chan error, 1)
5929		go func() {
5930			req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?index="+strconv.Itoa(int(issued.ModifyIndex)), nil)
5931			resp := httptest.NewRecorder()
5932			obj, err := a.srv.AgentConnectCALeafCert(resp, req)
5933			if err != nil {
5934				ch <- err
5935			} else {
5936				issued2 := obj.(*structs.IssuedCert)
5937				if issued.CertPEM == issued2.CertPEM {
5938					ch <- fmt.Errorf("leaf woke up unexpectedly with same cert")
5939				} else {
5940					ch <- fmt.Errorf("leaf woke up unexpectedly with new cert")
5941				}
5942			}
5943		}()
5944
5945		start := time.Now()
5946
5947		select {
5948		case <-time.After(5 * time.Second):
5949		case err := <-ch:
5950			dur := time.Since(start)
5951			t.Fatalf("unexpected return from blocking query; leaf churned during idle period, took %s: %v", dur, err)
5952		}
5953	}
5954}
5955
5956func TestAgentConnectCALeafCert_secondaryDC_good(t *testing.T) {
5957	if testing.Short() {
5958		t.Skip("too slow for testing.Short")
5959	}
5960
5961	t.Parallel()
5962
5963	assert := assert.New(t)
5964	require := require.New(t)
5965
5966	a1 := StartTestAgent(t, TestAgent{Name: "dc1", HCL: `
5967		datacenter = "dc1"
5968		primary_datacenter = "dc1"
5969	`, Overrides: `
5970		connect {
5971			test_ca_leaf_root_change_spread = "1ns"
5972		}
5973	`})
5974	defer a1.Shutdown()
5975	testrpc.WaitForTestAgent(t, a1.RPC, "dc1")
5976
5977	a2 := StartTestAgent(t, TestAgent{Name: "dc2", HCL: `
5978		datacenter = "dc2"
5979		primary_datacenter = "dc1"
5980	`, Overrides: `
5981		connect {
5982			test_ca_leaf_root_change_spread = "1ns"
5983		}
5984	`})
5985	defer a2.Shutdown()
5986	testrpc.WaitForTestAgent(t, a2.RPC, "dc2")
5987
5988	// Wait for the WAN join.
5989	addr := fmt.Sprintf("127.0.0.1:%d", a1.Config.SerfPortWAN)
5990	_, err := a2.JoinWAN([]string{addr})
5991	require.NoError(err)
5992
5993	testrpc.WaitForLeader(t, a1.RPC, "dc1")
5994	testrpc.WaitForLeader(t, a2.RPC, "dc2")
5995	retry.Run(t, func(r *retry.R) {
5996		if got, want := len(a1.WANMembers()), 2; got < want {
5997			r.Fatalf("got %d WAN members want at least %d", got, want)
5998		}
5999	})
6000
6001	// CA already setup by default by NewTestAgent but force a new one so we can
6002	// verify it was signed easily.
6003	dc1_ca1 := connect.TestCAConfigSet(t, a1, nil)
6004
6005	// Wait until root is updated in both dcs.
6006	waitForActiveCARoot(t, a1.srv, dc1_ca1)
6007	waitForActiveCARoot(t, a2.srv, dc1_ca1)
6008
6009	{
6010		// Register a local service in the SECONDARY
6011		args := &structs.ServiceDefinition{
6012			ID:      "foo",
6013			Name:    "test",
6014			Address: "127.0.0.1",
6015			Port:    8000,
6016			Check: structs.CheckType{
6017				TTL: 15 * time.Second,
6018			},
6019		}
6020		req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
6021		resp := httptest.NewRecorder()
6022		_, err := a2.srv.AgentRegisterService(resp, req)
6023		require.NoError(err)
6024		if !assert.Equal(200, resp.Code) {
6025			t.Log("Body: ", resp.Body.String())
6026		}
6027	}
6028
6029	// List
6030	req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil)
6031	resp := httptest.NewRecorder()
6032	obj, err := a2.srv.AgentConnectCALeafCert(resp, req)
6033	require.NoError(err)
6034	require.Equal("MISS", resp.Header().Get("X-Cache"))
6035
6036	// Get the issued cert
6037	issued, ok := obj.(*structs.IssuedCert)
6038	assert.True(ok)
6039
6040	// Verify that the cert is signed by the CA
6041	requireLeafValidUnderCA(t, issued, dc1_ca1)
6042
6043	// Verify blocking index
6044	assert.True(issued.ModifyIndex > 0)
6045	assert.Equal(fmt.Sprintf("%d", issued.ModifyIndex),
6046		resp.Header().Get("X-Consul-Index"))
6047
6048	// Test caching
6049	{
6050		// Fetch it again
6051		resp := httptest.NewRecorder()
6052		obj2, err := a2.srv.AgentConnectCALeafCert(resp, req)
6053		require.NoError(err)
6054		require.Equal(obj, obj2)
6055
6056		// Should cache hit this time and not make request
6057		require.Equal("HIT", resp.Header().Get("X-Cache"))
6058	}
6059
6060	// Test that we aren't churning leaves for no reason at idle.
6061	{
6062		ch := make(chan error, 1)
6063		go func() {
6064			req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?index="+strconv.Itoa(int(issued.ModifyIndex)), nil)
6065			resp := httptest.NewRecorder()
6066			obj, err := a2.srv.AgentConnectCALeafCert(resp, req)
6067			if err != nil {
6068				ch <- err
6069			} else {
6070				issued2 := obj.(*structs.IssuedCert)
6071				if issued.CertPEM == issued2.CertPEM {
6072					ch <- fmt.Errorf("leaf woke up unexpectedly with same cert")
6073				} else {
6074					ch <- fmt.Errorf("leaf woke up unexpectedly with new cert")
6075				}
6076			}
6077		}()
6078
6079		start := time.Now()
6080
6081		// Before applying the fix from PR-6513 this would reliably wake up
6082		// after ~20ms with a new cert. Since this test is necessarily a bit
6083		// timing dependent we'll chill out for 5 seconds which should be enough
6084		// time to disprove the original bug.
6085		select {
6086		case <-time.After(5 * time.Second):
6087		case err := <-ch:
6088			dur := time.Since(start)
6089			t.Fatalf("unexpected return from blocking query; leaf churned during idle period, took %s: %v", dur, err)
6090		}
6091	}
6092
6093	// Set a new CA
6094	dc1_ca2 := connect.TestCAConfigSet(t, a2, nil)
6095
6096	// Wait until root is updated in both dcs.
6097	waitForActiveCARoot(t, a1.srv, dc1_ca2)
6098	waitForActiveCARoot(t, a2.srv, dc1_ca2)
6099
6100	// Test that caching is updated in the background
6101	retry.Run(t, func(r *retry.R) {
6102		resp := httptest.NewRecorder()
6103		// Try and sign again (note no index/wait arg since cache should update in
6104		// background even if we aren't actively blocking)
6105		obj, err := a2.srv.AgentConnectCALeafCert(resp, req)
6106		r.Check(err)
6107
6108		issued2 := obj.(*structs.IssuedCert)
6109		if issued.CertPEM == issued2.CertPEM {
6110			r.Fatalf("leaf has not updated")
6111		}
6112
6113		// Got a new leaf. Sanity check it's a whole new key as well as different
6114		// cert.
6115		if issued.PrivateKeyPEM == issued2.PrivateKeyPEM {
6116			r.Fatalf("new leaf has same private key as before")
6117		}
6118
6119		// Verify that the cert is signed by the new CA
6120		requireLeafValidUnderCA(t, issued2, dc1_ca2)
6121
6122		// Should be a cache hit! The data should've updated in the cache
6123		// in the background so this should've been fetched directly from
6124		// the cache.
6125		if resp.Header().Get("X-Cache") != "HIT" {
6126			r.Fatalf("should be a cache hit")
6127		}
6128	})
6129}
6130
6131func waitForActiveCARoot(t *testing.T, srv *HTTPHandlers, expect *structs.CARoot) {
6132	retry.Run(t, func(r *retry.R) {
6133		req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/roots", nil)
6134		resp := httptest.NewRecorder()
6135		obj, err := srv.AgentConnectCARoots(resp, req)
6136		if err != nil {
6137			r.Fatalf("err: %v", err)
6138		}
6139
6140		roots, ok := obj.(structs.IndexedCARoots)
6141		if !ok {
6142			r.Fatalf("response is wrong type %T", obj)
6143		}
6144
6145		var root *structs.CARoot
6146		for _, r := range roots.Roots {
6147			if r.ID == roots.ActiveRootID {
6148				root = r
6149				break
6150			}
6151		}
6152		if root == nil {
6153			r.Fatal("no active root")
6154		}
6155		if root.ID != expect.ID {
6156			r.Fatalf("current active root is %s; waiting for %s", root.ID, expect.ID)
6157		}
6158	})
6159}
6160
6161func requireLeafValidUnderCA(t *testing.T, issued *structs.IssuedCert, ca *structs.CARoot) {
6162	leaf, intermediates, err := connect.ParseLeafCerts(issued.CertPEM)
6163	require.NoError(t, err)
6164
6165	roots := x509.NewCertPool()
6166	require.True(t, roots.AppendCertsFromPEM([]byte(ca.RootCert)))
6167
6168	_, err = leaf.Verify(x509.VerifyOptions{
6169		Roots:         roots,
6170		Intermediates: intermediates,
6171	})
6172	require.NoError(t, err)
6173
6174	// Verify the private key matches. tls.LoadX509Keypair does this for us!
6175	_, err = tls.X509KeyPair([]byte(issued.CertPEM), []byte(issued.PrivateKeyPEM))
6176	require.NoError(t, err)
6177}
6178
6179func TestAgentConnectAuthorize_badBody(t *testing.T) {
6180	if testing.Short() {
6181		t.Skip("too slow for testing.Short")
6182	}
6183
6184	t.Parallel()
6185
6186	assert := assert.New(t)
6187	require := require.New(t)
6188	a := NewTestAgent(t, "")
6189	defer a.Shutdown()
6190
6191	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
6192	args := []string{}
6193	req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args))
6194	resp := httptest.NewRecorder()
6195	respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
6196	require.Error(err)
6197	assert.Nil(respRaw)
6198	// Note that BadRequestError is handled outside the endpoint handler so we
6199	// still see a 200 if we check here.
6200	assert.Contains(err.Error(), "decode failed")
6201}
6202
6203func TestAgentConnectAuthorize_noTarget(t *testing.T) {
6204	if testing.Short() {
6205		t.Skip("too slow for testing.Short")
6206	}
6207
6208	t.Parallel()
6209
6210	assert := assert.New(t)
6211	require := require.New(t)
6212	a := NewTestAgent(t, "")
6213	defer a.Shutdown()
6214
6215	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
6216	args := &structs.ConnectAuthorizeRequest{}
6217	req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args))
6218	resp := httptest.NewRecorder()
6219	respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
6220	require.Error(err)
6221	assert.Nil(respRaw)
6222	// Note that BadRequestError is handled outside the endpoint handler so we
6223	// still see a 200 if we check here.
6224	assert.Contains(err.Error(), "Target service must be specified")
6225}
6226
6227// Client ID is not in the valid URI format
6228func TestAgentConnectAuthorize_idInvalidFormat(t *testing.T) {
6229	if testing.Short() {
6230		t.Skip("too slow for testing.Short")
6231	}
6232
6233	t.Parallel()
6234
6235	assert := assert.New(t)
6236	require := require.New(t)
6237	a := NewTestAgent(t, "")
6238	defer a.Shutdown()
6239
6240	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
6241	args := &structs.ConnectAuthorizeRequest{
6242		Target:        "web",
6243		ClientCertURI: "tubes",
6244	}
6245	req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args))
6246	resp := httptest.NewRecorder()
6247	respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
6248	require.Error(err)
6249	assert.Nil(respRaw)
6250	// Note that BadRequestError is handled outside the endpoint handler so we
6251	// still see a 200 if we check here.
6252	assert.Contains(err.Error(), "ClientCertURI not a valid Connect identifier")
6253}
6254
6255// Client ID is a valid URI but its not a service URI
6256func TestAgentConnectAuthorize_idNotService(t *testing.T) {
6257	if testing.Short() {
6258		t.Skip("too slow for testing.Short")
6259	}
6260
6261	t.Parallel()
6262
6263	assert := assert.New(t)
6264	require := require.New(t)
6265	a := NewTestAgent(t, "")
6266	defer a.Shutdown()
6267
6268	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
6269	args := &structs.ConnectAuthorizeRequest{
6270		Target:        "web",
6271		ClientCertURI: "spiffe://1234.consul",
6272	}
6273	req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args))
6274	resp := httptest.NewRecorder()
6275	respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
6276	require.Error(err)
6277	assert.Nil(respRaw)
6278	// Note that BadRequestError is handled outside the endpoint handler so we
6279	// still see a 200 if we check here.
6280	assert.Contains(err.Error(), "ClientCertURI not a valid Service identifier")
6281}
6282
6283// Test when there is an intention allowing the connection
6284func TestAgentConnectAuthorize_allow(t *testing.T) {
6285	if testing.Short() {
6286		t.Skip("too slow for testing.Short")
6287	}
6288
6289	t.Parallel()
6290
6291	require := require.New(t)
6292	a := NewTestAgent(t, "")
6293	defer a.Shutdown()
6294
6295	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
6296	target := "db"
6297
6298	// Create some intentions
6299	var ixnId string
6300	{
6301		req := structs.IntentionRequest{
6302			Datacenter: "dc1",
6303			Op:         structs.IntentionOpCreate,
6304			Intention:  structs.TestIntention(t),
6305		}
6306		req.Intention.SourceNS = structs.IntentionDefaultNamespace
6307		req.Intention.SourceName = "web"
6308		req.Intention.DestinationNS = structs.IntentionDefaultNamespace
6309		req.Intention.DestinationName = target
6310		req.Intention.Action = structs.IntentionActionAllow
6311
6312		require.Nil(a.RPC("Intention.Apply", &req, &ixnId))
6313	}
6314
6315	args := &structs.ConnectAuthorizeRequest{
6316		Target:        target,
6317		ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(),
6318	}
6319	req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args))
6320	resp := httptest.NewRecorder()
6321	respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
6322	require.Nil(err)
6323	require.Equal(200, resp.Code)
6324	require.Equal("MISS", resp.Header().Get("X-Cache"))
6325
6326	obj := respRaw.(*connectAuthorizeResp)
6327	require.True(obj.Authorized)
6328	require.Contains(obj.Reason, "Matched")
6329
6330	// Make the request again
6331	{
6332		req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args))
6333		resp := httptest.NewRecorder()
6334		respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
6335		require.Nil(err)
6336		require.Equal(200, resp.Code)
6337
6338		obj := respRaw.(*connectAuthorizeResp)
6339		require.True(obj.Authorized)
6340		require.Contains(obj.Reason, "Matched")
6341
6342		// That should've been a cache hit.
6343		require.Equal("HIT", resp.Header().Get("X-Cache"))
6344	}
6345
6346	// Change the intention
6347	{
6348		req := structs.IntentionRequest{
6349			Datacenter: "dc1",
6350			Op:         structs.IntentionOpUpdate,
6351			Intention:  structs.TestIntention(t),
6352		}
6353		req.Intention.ID = ixnId
6354		req.Intention.SourceNS = structs.IntentionDefaultNamespace
6355		req.Intention.SourceName = "web"
6356		req.Intention.DestinationNS = structs.IntentionDefaultNamespace
6357		req.Intention.DestinationName = target
6358		req.Intention.Action = structs.IntentionActionDeny
6359
6360		require.Nil(a.RPC("Intention.Apply", &req, &ixnId))
6361	}
6362
6363	// Short sleep lets the cache background refresh happen
6364	time.Sleep(100 * time.Millisecond)
6365
6366	// Make the request again
6367	{
6368		req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args))
6369		resp := httptest.NewRecorder()
6370		respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
6371		require.Nil(err)
6372		require.Equal(200, resp.Code)
6373
6374		obj := respRaw.(*connectAuthorizeResp)
6375		require.False(obj.Authorized)
6376		require.Contains(obj.Reason, "Matched")
6377
6378		// That should've been a cache hit, too, since it updated in the
6379		// background.
6380		require.Equal("HIT", resp.Header().Get("X-Cache"))
6381	}
6382}
6383
6384// Test when there is an intention denying the connection
6385func TestAgentConnectAuthorize_deny(t *testing.T) {
6386	if testing.Short() {
6387		t.Skip("too slow for testing.Short")
6388	}
6389
6390	t.Parallel()
6391
6392	assert := assert.New(t)
6393	a := NewTestAgent(t, "")
6394	defer a.Shutdown()
6395
6396	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
6397	target := "db"
6398
6399	// Create some intentions
6400	{
6401		req := structs.IntentionRequest{
6402			Datacenter: "dc1",
6403			Op:         structs.IntentionOpCreate,
6404			Intention:  structs.TestIntention(t),
6405		}
6406		req.Intention.SourceNS = structs.IntentionDefaultNamespace
6407		req.Intention.SourceName = "web"
6408		req.Intention.DestinationNS = structs.IntentionDefaultNamespace
6409		req.Intention.DestinationName = target
6410		req.Intention.Action = structs.IntentionActionDeny
6411
6412		var reply string
6413		assert.Nil(a.RPC("Intention.Apply", &req, &reply))
6414	}
6415
6416	args := &structs.ConnectAuthorizeRequest{
6417		Target:        target,
6418		ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(),
6419	}
6420	req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args))
6421	resp := httptest.NewRecorder()
6422	respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
6423	assert.Nil(err)
6424	assert.Equal(200, resp.Code)
6425
6426	obj := respRaw.(*connectAuthorizeResp)
6427	assert.False(obj.Authorized)
6428	assert.Contains(obj.Reason, "Matched")
6429}
6430
6431// Test when there is an intention allowing service with a different trust
6432// domain. We allow this because migration between trust domains shouldn't cause
6433// an outage even if we have stale info about current trusted domains. It's safe
6434// because the CA root is either unique to this cluster and not used to sign
6435// anything external, or path validation can be used to ensure that the CA can
6436// only issue certs that are valid for the specific cluster trust domain at x509
6437// level which is enforced by TLS handshake.
6438func TestAgentConnectAuthorize_allowTrustDomain(t *testing.T) {
6439	if testing.Short() {
6440		t.Skip("too slow for testing.Short")
6441	}
6442
6443	t.Parallel()
6444
6445	assert := assert.New(t)
6446	require := require.New(t)
6447	a := NewTestAgent(t, "")
6448	defer a.Shutdown()
6449
6450	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
6451	target := "db"
6452
6453	// Create some intentions
6454	{
6455		req := structs.IntentionRequest{
6456			Datacenter: "dc1",
6457			Op:         structs.IntentionOpCreate,
6458			Intention:  structs.TestIntention(t),
6459		}
6460		req.Intention.SourceNS = structs.IntentionDefaultNamespace
6461		req.Intention.SourceName = "web"
6462		req.Intention.DestinationNS = structs.IntentionDefaultNamespace
6463		req.Intention.DestinationName = target
6464		req.Intention.Action = structs.IntentionActionAllow
6465
6466		var reply string
6467		require.NoError(a.RPC("Intention.Apply", &req, &reply))
6468	}
6469
6470	{
6471		args := &structs.ConnectAuthorizeRequest{
6472			Target:        target,
6473			ClientCertURI: "spiffe://fake-domain.consul/ns/default/dc/dc1/svc/web",
6474		}
6475		req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args))
6476		resp := httptest.NewRecorder()
6477		respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
6478		require.NoError(err)
6479		assert.Equal(200, resp.Code)
6480
6481		obj := respRaw.(*connectAuthorizeResp)
6482		require.True(obj.Authorized)
6483		require.Contains(obj.Reason, "Matched")
6484	}
6485}
6486
6487func TestAgentConnectAuthorize_denyWildcard(t *testing.T) {
6488	if testing.Short() {
6489		t.Skip("too slow for testing.Short")
6490	}
6491
6492	t.Parallel()
6493
6494	assert := assert.New(t)
6495	require := require.New(t)
6496	a := NewTestAgent(t, "")
6497	defer a.Shutdown()
6498	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
6499
6500	target := "db"
6501
6502	// Create some intentions
6503	{
6504		// Deny wildcard to DB
6505		req := structs.IntentionRequest{
6506			Datacenter: "dc1",
6507			Op:         structs.IntentionOpCreate,
6508			Intention:  structs.TestIntention(t),
6509		}
6510		req.Intention.SourceNS = structs.IntentionDefaultNamespace
6511		req.Intention.SourceName = "*"
6512		req.Intention.DestinationNS = structs.IntentionDefaultNamespace
6513		req.Intention.DestinationName = target
6514		req.Intention.Action = structs.IntentionActionDeny
6515
6516		var reply string
6517		require.NoError(a.RPC("Intention.Apply", &req, &reply))
6518	}
6519	{
6520		// Allow web to DB
6521		req := structs.IntentionRequest{
6522			Datacenter: "dc1",
6523			Op:         structs.IntentionOpCreate,
6524			Intention:  structs.TestIntention(t),
6525		}
6526		req.Intention.SourceNS = structs.IntentionDefaultNamespace
6527		req.Intention.SourceName = "web"
6528		req.Intention.DestinationNS = structs.IntentionDefaultNamespace
6529		req.Intention.DestinationName = target
6530		req.Intention.Action = structs.IntentionActionAllow
6531
6532		var reply string
6533		assert.Nil(a.RPC("Intention.Apply", &req, &reply))
6534	}
6535
6536	// Web should be allowed
6537	{
6538		args := &structs.ConnectAuthorizeRequest{
6539			Target:        target,
6540			ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(),
6541		}
6542		req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args))
6543		resp := httptest.NewRecorder()
6544		respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
6545		require.NoError(err)
6546		assert.Equal(200, resp.Code)
6547
6548		obj := respRaw.(*connectAuthorizeResp)
6549		assert.True(obj.Authorized)
6550		assert.Contains(obj.Reason, "Matched")
6551	}
6552
6553	// API should be denied
6554	{
6555		args := &structs.ConnectAuthorizeRequest{
6556			Target:        target,
6557			ClientCertURI: connect.TestSpiffeIDService(t, "api").URI().String(),
6558		}
6559		req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args))
6560		resp := httptest.NewRecorder()
6561		respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
6562		require.NoError(err)
6563		assert.Equal(200, resp.Code)
6564
6565		obj := respRaw.(*connectAuthorizeResp)
6566		assert.False(obj.Authorized)
6567		assert.Contains(obj.Reason, "Matched")
6568	}
6569}
6570
6571// Test that authorize fails without service:write for the target service.
6572func TestAgentConnectAuthorize_serviceWrite(t *testing.T) {
6573	if testing.Short() {
6574		t.Skip("too slow for testing.Short")
6575	}
6576
6577	t.Parallel()
6578
6579	assert := assert.New(t)
6580	a := NewTestAgent(t, TestACLConfig())
6581	defer a.Shutdown()
6582	testrpc.WaitForLeader(t, a.RPC, "dc1")
6583
6584	// Create an ACL
6585	var token string
6586	{
6587		args := map[string]interface{}{
6588			"Name":  "User Token",
6589			"Type":  "client",
6590			"Rules": `service "foo" { policy = "read" }`,
6591		}
6592		req, _ := http.NewRequest("PUT", "/v1/acl/create?token=root", jsonReader(args))
6593		resp := httptest.NewRecorder()
6594		obj, err := a.srv.ACLCreate(resp, req)
6595		if err != nil {
6596			t.Fatalf("err: %v", err)
6597		}
6598		aclResp := obj.(aclCreateResponse)
6599		token = aclResp.ID
6600	}
6601
6602	args := &structs.ConnectAuthorizeRequest{
6603		Target:        "foo",
6604		ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(),
6605	}
6606	req, _ := http.NewRequest("POST",
6607		"/v1/agent/connect/authorize?token="+token, jsonReader(args))
6608	resp := httptest.NewRecorder()
6609	_, err := a.srv.AgentConnectAuthorize(resp, req)
6610	assert.True(acl.IsErrPermissionDenied(err))
6611}
6612
6613// Test when no intentions match w/ a default deny policy
6614func TestAgentConnectAuthorize_defaultDeny(t *testing.T) {
6615	if testing.Short() {
6616		t.Skip("too slow for testing.Short")
6617	}
6618
6619	t.Parallel()
6620
6621	assert := assert.New(t)
6622	a := NewTestAgent(t, TestACLConfig())
6623	defer a.Shutdown()
6624	testrpc.WaitForLeader(t, a.RPC, "dc1")
6625
6626	args := &structs.ConnectAuthorizeRequest{
6627		Target:        "foo",
6628		ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(),
6629	}
6630	req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize?token=root", jsonReader(args))
6631	resp := httptest.NewRecorder()
6632	respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
6633	assert.Nil(err)
6634	assert.Equal(200, resp.Code)
6635
6636	obj := respRaw.(*connectAuthorizeResp)
6637	assert.False(obj.Authorized)
6638	assert.Contains(obj.Reason, "Default behavior")
6639}
6640
6641// Test when no intentions match w/ a default allow policy
6642func TestAgentConnectAuthorize_defaultAllow(t *testing.T) {
6643	if testing.Short() {
6644		t.Skip("too slow for testing.Short")
6645	}
6646
6647	t.Parallel()
6648
6649	assert := assert.New(t)
6650	dc1 := "dc1"
6651	a := NewTestAgent(t, `
6652		acl_datacenter = "`+dc1+`"
6653		acl_default_policy = "allow"
6654		acl_master_token = "root"
6655		acl_agent_token = "root"
6656		acl_agent_master_token = "towel"
6657	`)
6658	defer a.Shutdown()
6659	testrpc.WaitForTestAgent(t, a.RPC, dc1)
6660
6661	args := &structs.ConnectAuthorizeRequest{
6662		Target:        "foo",
6663		ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(),
6664	}
6665	req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize?token=root", jsonReader(args))
6666	resp := httptest.NewRecorder()
6667	respRaw, err := a.srv.AgentConnectAuthorize(resp, req)
6668	assert.Nil(err)
6669	assert.Equal(200, resp.Code)
6670	assert.NotNil(respRaw)
6671
6672	obj := respRaw.(*connectAuthorizeResp)
6673	assert.True(obj.Authorized)
6674	assert.Contains(obj.Reason, "Default behavior")
6675}
6676
6677func TestAgent_Host(t *testing.T) {
6678	if testing.Short() {
6679		t.Skip("too slow for testing.Short")
6680	}
6681
6682	t.Parallel()
6683	assert := assert.New(t)
6684
6685	dc1 := "dc1"
6686	a := NewTestAgent(t, `
6687	acl_datacenter = "`+dc1+`"
6688	acl_default_policy = "allow"
6689	acl_master_token = "master"
6690	acl_agent_token = "agent"
6691	acl_agent_master_token = "towel"
6692`)
6693	defer a.Shutdown()
6694
6695	testrpc.WaitForLeader(t, a.RPC, "dc1")
6696	req, _ := http.NewRequest("GET", "/v1/agent/host?token=master", nil)
6697	resp := httptest.NewRecorder()
6698	respRaw, err := a.srv.AgentHost(resp, req)
6699	assert.Nil(err)
6700	assert.Equal(http.StatusOK, resp.Code)
6701	assert.NotNil(respRaw)
6702
6703	obj := respRaw.(*debug.HostInfo)
6704	assert.NotNil(obj.CollectionTime)
6705	assert.Empty(obj.Errors)
6706}
6707
6708func TestAgent_HostBadACL(t *testing.T) {
6709	if testing.Short() {
6710		t.Skip("too slow for testing.Short")
6711	}
6712
6713	t.Parallel()
6714	assert := assert.New(t)
6715
6716	dc1 := "dc1"
6717	a := NewTestAgent(t, `
6718	acl_datacenter = "`+dc1+`"
6719	acl_default_policy = "deny"
6720	acl_master_token = "root"
6721	acl_agent_token = "agent"
6722	acl_agent_master_token = "towel"
6723`)
6724	defer a.Shutdown()
6725
6726	testrpc.WaitForLeader(t, a.RPC, "dc1")
6727	req, _ := http.NewRequest("GET", "/v1/agent/host?token=agent", nil)
6728	resp := httptest.NewRecorder()
6729	respRaw, err := a.srv.AgentHost(resp, req)
6730	assert.EqualError(err, "ACL not found")
6731	assert.Equal(http.StatusOK, resp.Code)
6732	assert.Nil(respRaw)
6733}
6734
6735// Thie tests that a proxy with an ExposeConfig is returned as expected.
6736func TestAgent_Services_ExposeConfig(t *testing.T) {
6737	if testing.Short() {
6738		t.Skip("too slow for testing.Short")
6739	}
6740
6741	t.Parallel()
6742
6743	a := NewTestAgent(t, "")
6744	defer a.Shutdown()
6745
6746	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
6747	srv1 := &structs.NodeService{
6748		Kind:    structs.ServiceKindConnectProxy,
6749		ID:      "proxy-id",
6750		Service: "proxy-name",
6751		Port:    8443,
6752		Proxy: structs.ConnectProxyConfig{
6753			Expose: structs.ExposeConfig{
6754				Checks: true,
6755				Paths: []structs.ExposePath{
6756					{
6757						ListenerPort:  8080,
6758						LocalPathPort: 21500,
6759						Protocol:      "http2",
6760						Path:          "/metrics",
6761					},
6762				},
6763			},
6764		},
6765	}
6766	a.State.AddService(srv1, "")
6767
6768	req, _ := http.NewRequest("GET", "/v1/agent/services", nil)
6769	obj, err := a.srv.AgentServices(nil, req)
6770	require.NoError(t, err)
6771	val := obj.(map[string]*api.AgentService)
6772	require.Len(t, val, 1)
6773	actual := val["proxy-id"]
6774	require.NotNil(t, actual)
6775	require.Equal(t, api.ServiceKindConnectProxy, actual.Kind)
6776	require.Equal(t, srv1.Proxy.ToAPI(), actual.Proxy)
6777}
6778