1package promtail
2
3import (
4	"context"
5	"fmt"
6	"io"
7	"io/ioutil"
8	"math"
9	"math/rand"
10	"net/http"
11	"net/url"
12	"os"
13	"path/filepath"
14	"sync"
15	"testing"
16	"time"
17
18	"github.com/cortexproject/cortex/pkg/util"
19	util_log "github.com/cortexproject/cortex/pkg/util/log"
20	"github.com/go-kit/kit/log"
21	"github.com/go-kit/kit/log/level"
22	"github.com/grafana/dskit/flagext"
23	"github.com/pkg/errors"
24	"github.com/prometheus/client_golang/prometheus"
25	"github.com/prometheus/common/model"
26	"github.com/prometheus/prometheus/discovery"
27	"github.com/prometheus/prometheus/discovery/targetgroup"
28	"github.com/prometheus/prometheus/pkg/labels"
29	"github.com/prometheus/prometheus/pkg/textparse"
30	"github.com/prometheus/prometheus/promql/parser"
31	"github.com/stretchr/testify/assert"
32	"github.com/stretchr/testify/require"
33	serverww "github.com/weaveworks/common/server"
34
35	"github.com/grafana/loki/clients/pkg/logentry/stages"
36	"github.com/grafana/loki/clients/pkg/promtail/client"
37	"github.com/grafana/loki/clients/pkg/promtail/config"
38	"github.com/grafana/loki/clients/pkg/promtail/positions"
39	"github.com/grafana/loki/clients/pkg/promtail/scrapeconfig"
40	"github.com/grafana/loki/clients/pkg/promtail/server"
41	file2 "github.com/grafana/loki/clients/pkg/promtail/targets/file"
42
43	"github.com/grafana/loki/pkg/logproto"
44)
45
46const httpTestPort = 9080
47
48func TestPromtail(t *testing.T) {
49	// Setup.
50	w := log.NewSyncWriter(os.Stderr)
51	logger := log.NewLogfmtLogger(w)
52	logger = level.NewFilter(logger, level.AllowInfo())
53	util_log.Logger = logger
54
55	initRandom()
56	dirName := "/tmp/promtail_test_" + randName()
57	positionsFileName := dirName + "/positions.yml"
58
59	err := os.MkdirAll(dirName, 0750)
60	if err != nil {
61		t.Error(err)
62		return
63	}
64
65	defer func() { _ = os.RemoveAll(dirName) }()
66
67	testDir := dirName + "/logs"
68	err = os.MkdirAll(testDir, 0750)
69	if err != nil {
70		t.Error(err)
71		return
72	}
73
74	handler := &testServerHandler{
75		receivedMap:    map[string][]logproto.Entry{},
76		receivedLabels: map[string][]labels.Labels{},
77		recMtx:         sync.Mutex{},
78		t:              t,
79	}
80	http.Handle("/loki/api/v1/push", handler)
81	var (
82		wg        sync.WaitGroup
83		listenErr error
84		server    = &http.Server{Addr: "localhost:3100", Handler: nil}
85	)
86	defer func() {
87		fmt.Fprintf(os.Stdout, "wait close")
88		wg.Wait()
89		if err != nil {
90			t.Fatal(err)
91		}
92		if listenErr != nil && listenErr != http.ErrServerClosed {
93			t.Fatal(listenErr)
94		}
95	}()
96	wg.Add(1)
97	go func() {
98		defer wg.Done()
99		listenErr = server.ListenAndServe()
100	}()
101	defer func() {
102		_ = server.Shutdown(context.Background())
103	}()
104	// Run.
105
106	p, err := New(buildTestConfig(t, positionsFileName, testDir), false)
107	if err != nil {
108		t.Error("error creating promtail", err)
109		return
110	}
111	wg.Add(1)
112	go func() {
113		defer wg.Done()
114		err = p.Run()
115		if err != nil {
116			err = errors.Wrap(err, "Failed to start promtail")
117		}
118	}()
119
120	expectedCounts := map[string]int{}
121
122	startupMarkerFile := testDir + "/startupMarker.log"
123	expectedCounts[startupMarkerFile] = createStartupFile(t, startupMarkerFile)
124
125	// Wait for promtail to startup and send entry from our startup marker file.
126	if err := waitForEntries(10, handler, expectedCounts); err != nil {
127		t.Fatal("Timed out waiting for promtail to start")
128	}
129
130	// Run test file scenarios.
131
132	logFile1 := testDir + "/testSingle.log"
133	prefix1 := "single"
134	expectedCounts[logFile1] = singleFile(t, logFile1, prefix1)
135
136	logFile2 := testDir + "/testFileRoll.log"
137	prefix2 := "roll"
138	expectedCounts[logFile2] = fileRoll(t, logFile2, prefix2)
139
140	logFile3 := testDir + "/testSymlinkRoll.log"
141	prefix3 := "sym"
142	expectedCounts[logFile3] = symlinkRoll(t, testDir, logFile3, prefix3)
143
144	logFile4 := testDir + "/testsubdir/testFile.log"
145	prefix4 := "sub"
146	expectedCounts[logFile4] = subdirSingleFile(t, logFile4, prefix4)
147
148	logFile5 := testDir + "/testPipeline.log"
149	entries := []string{
150		`{"log":"11.11.11.11 - frank [25/Jan/2000:14:00:01 -0500] \"GET /1986.js HTTP/1.1\" 200 932 \"-\" \"Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6\"","stream":"stderr","time":"2019-04-30T02:12:41.8443515Z"}`,
151		`{"log":"11.11.11.12 - - [19/May/2015:04:05:16 -0500] \"POST /blog HTTP/1.1\" 200 10975 \"http://grafana.com/test/\" \"Mozilla/5.0 (Windows NT 6.1; WOW64) Gecko/20091221 Firefox/3.5.7 GTB6\"","stream":"stdout","time":"2019-04-30T02:12:42.8443515Z"}`,
152	}
153	expectedCounts[logFile5] = pipelineFile(t, logFile5, entries)
154	expectedEntries := make(map[string]int)
155	entriesArray := []string{
156		`11.11.11.11 - frank [25/Jan/2000:14:00:01 -0500] "GET /1986.js HTTP/1.1" 200 932 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6"`,
157		`11.11.11.12 - - [19/May/2015:04:05:16 -0500] "POST /blog HTTP/1.1" 200 10975 "http://grafana.com/test/" "Mozilla/5.0 (Windows NT 6.1; WOW64) Gecko/20091221 Firefox/3.5.7 GTB6"`,
158	}
159	for i, entry := range entriesArray {
160		expectedEntries[entry] = i
161	}
162	lbls := []labels.Labels{}
163	lbls = append(lbls, labels.Labels{
164		labels.Label{Name: "action", Value: "GET"},
165		labels.Label{Name: "filename", Value: dirName + "/logs/testPipeline.log"},
166		labels.Label{Name: "job", Value: "varlogs"},
167		labels.Label{Name: "localhost", Value: ""},
168		labels.Label{Name: "match", Value: "true"},
169		labels.Label{Name: "stream", Value: "stderr"},
170	})
171
172	lbls = append(lbls, labels.Labels{
173		labels.Label{Name: "action", Value: "POST"},
174		labels.Label{Name: "filename", Value: dirName + "/logs/testPipeline.log"},
175		labels.Label{Name: "job", Value: "varlogs"},
176		labels.Label{Name: "localhost", Value: ""},
177		labels.Label{Name: "match", Value: "true"},
178		labels.Label{Name: "stream", Value: "stdout"},
179	})
180	expectedLabels := make(map[string]int)
181	for i, label := range lbls {
182		expectedLabels[label.String()] = i
183	}
184
185	// Wait for all lines to be received.
186	if err := waitForEntries(20, handler, expectedCounts); err != nil {
187		t.Fatal("Timed out waiting for log entries: ", err)
188	}
189
190	// Delete one of the log files so we can verify metrics are clean up
191	err = os.Remove(logFile1)
192	if err != nil {
193		t.Fatal("Could not delete a log file to verify metrics are removed: ", err)
194	}
195
196	// Sync period is 500ms in tests, need to wait for at least one sync period for tailer to be cleaned up
197	<-time.After(500 * time.Millisecond)
198
199	// Pull out some prometheus metrics before shutting down
200	metricsBytes, contentType := getPromMetrics(t)
201
202	p.Shutdown()
203
204	// Verify.
205	verifyFile(t, expectedCounts[logFile1], prefix1, handler.receivedMap[logFile1])
206	verifyFile(t, expectedCounts[logFile2], prefix2, handler.receivedMap[logFile2])
207	verifyFile(t, expectedCounts[logFile3], prefix3, handler.receivedMap[logFile3])
208	verifyFile(t, expectedCounts[logFile4], prefix4, handler.receivedMap[logFile4])
209	verifyPipeline(t, expectedCounts[logFile5], expectedEntries, handler.receivedMap[logFile5], handler.receivedLabels[logFile5], expectedLabels)
210
211	if len(handler.receivedMap) != len(expectedCounts) {
212		t.Error("Somehow we ended up tailing more files than we were supposed to, this is likely a bug")
213	}
214
215	readBytesMetrics := parsePromMetrics(t, metricsBytes, contentType, "promtail_read_bytes_total", "path")
216	fileBytesMetrics := parsePromMetrics(t, metricsBytes, contentType, "promtail_file_bytes_total", "path")
217
218	verifyMetricAbsent(t, readBytesMetrics, "promtail_read_bytes_total", logFile1)
219	verifyMetricAbsent(t, fileBytesMetrics, "promtail_file_bytes_total", logFile1)
220
221	verifyMetric(t, readBytesMetrics, "promtail_read_bytes_total", logFile2, 800)
222	verifyMetric(t, fileBytesMetrics, "promtail_file_bytes_total", logFile2, 800)
223
224	verifyMetric(t, readBytesMetrics, "promtail_read_bytes_total", logFile3, 700)
225	verifyMetric(t, fileBytesMetrics, "promtail_file_bytes_total", logFile3, 700)
226
227	verifyMetric(t, readBytesMetrics, "promtail_read_bytes_total", logFile4, 590)
228	verifyMetric(t, fileBytesMetrics, "promtail_file_bytes_total", logFile4, 590)
229}
230
231func createStartupFile(t *testing.T, filename string) int {
232	f, err := os.Create(filename)
233	if err != nil {
234		t.Fatal(err)
235	}
236	_, err = f.WriteString("marker\n")
237	if err != nil {
238		t.Fatal(err)
239	}
240	return 1
241}
242
243func verifyFile(t *testing.T, expected int, prefix string, entries []logproto.Entry) {
244	for i := 0; i < expected; i++ {
245		if entries[i].Line != fmt.Sprintf("%s%d", prefix, i) {
246			t.Errorf("Received out of order or incorrect log event, expected test%d, received %s", i, entries[i].Line)
247		}
248	}
249}
250
251func verifyPipeline(t *testing.T, expected int, expectedEntries map[string]int, entries []logproto.Entry, labels []labels.Labels, expectedLabels map[string]int) {
252	for i := 0; i < expected; i++ {
253		if _, ok := expectedLabels[labels[i].String()]; !ok {
254			t.Errorf("Did not receive expected labels, expected %v, received %s", expectedLabels, labels[i])
255		}
256	}
257
258	for i := 0; i < expected; i++ {
259		if _, ok := expectedEntries[entries[i].Line]; !ok {
260			t.Errorf("Did not receive expected log entry, expected %v, received %s", expectedEntries, entries[i].Line)
261		}
262	}
263}
264
265func verifyMetricAbsent(t *testing.T, metrics map[string]float64, metric string, label string) {
266	if _, ok := metrics[label]; ok {
267		t.Error("Found metric", metric, "with label", label, "which was not expected, "+
268			"this metric should not be present")
269	}
270}
271
272func verifyMetric(t *testing.T, metrics map[string]float64, metric string, label string, expected float64) {
273	if _, ok := metrics[label]; !ok {
274		t.Error("Expected to find metric ", metric, " with", label, "but it was not present")
275	} else {
276		actualBytes := metrics[label]
277		assert.Equal(t, expected, actualBytes, "found incorrect value for metric %s and label %s", metric, label)
278	}
279}
280
281func singleFile(t *testing.T, filename string, prefix string) int {
282	f, err := os.Create(filename)
283	if err != nil {
284		t.Fatal(err)
285	}
286	entries := 100
287	for i := 0; i < entries; i++ {
288		entry := fmt.Sprintf("%s%d\n", prefix, i)
289		_, err = f.WriteString(entry)
290		if err != nil {
291			t.Fatal(err)
292		}
293		time.Sleep(1 * time.Millisecond)
294	}
295
296	return entries
297}
298
299func pipelineFile(t *testing.T, filename string, entries []string) int {
300	f, err := os.Create(filename)
301	if err != nil {
302		t.Fatal(err)
303	}
304
305	for _, entry := range entries {
306		line := fmt.Sprintf("%s\n", entry)
307		_, err = f.WriteString(line)
308		if err != nil {
309			t.Fatal(err)
310		}
311		time.Sleep(1 * time.Millisecond)
312	}
313
314	return len(entries)
315}
316
317func fileRoll(t *testing.T, filename string, prefix string) int {
318	f, err := os.Create(filename)
319	if err != nil {
320		t.Fatal(err)
321	}
322	for i := 0; i < 100; i++ {
323		entry := fmt.Sprintf("%s%d\n", prefix, i)
324		_, err = f.WriteString(entry)
325		if err != nil {
326			t.Fatal(err)
327		}
328		time.Sleep(1 * time.Millisecond)
329	}
330
331	if err = os.Rename(filename, filename+".1"); err != nil {
332		t.Fatal("Failed to rename file for test: ", err)
333	}
334	f, err = os.Create(filename)
335	if err != nil {
336		t.Fatal(err)
337	}
338	for i := 100; i < 200; i++ {
339		entry := fmt.Sprintf("%s%d\n", prefix, i)
340		_, err = f.WriteString(entry)
341		if err != nil {
342			t.Fatal(err)
343		}
344		time.Sleep(1 * time.Millisecond)
345	}
346
347	return 200
348}
349
350func symlinkRoll(t *testing.T, testDir string, filename string, prefix string) int {
351	symlinkDir := testDir + "/symlink"
352	if err := os.Mkdir(symlinkDir, 0750); err != nil {
353		t.Fatal(err)
354	}
355
356	// Create a file for the logs, make sure it doesn't end in .log
357	symlinkFile := symlinkDir + "/log1.notail"
358	f, err := os.Create(symlinkFile)
359	if err != nil {
360		t.Fatal(err)
361	}
362
363	// Link to that file with the provided file name.
364	if err := os.Symlink(symlinkFile, filename); err != nil {
365		t.Fatal(err)
366	}
367	for i := 0; i < 100; i++ {
368		entry := fmt.Sprintf("%s%d\n", prefix, i)
369		_, err = f.WriteString(entry)
370		if err != nil {
371			t.Fatal(err)
372		}
373		time.Sleep(1 * time.Millisecond)
374	}
375
376	// Remove the link, make a new file, link to the new file.
377	if err := os.Remove(filename); err != nil {
378		t.Fatal(err)
379	}
380	symlinkFile2 := symlinkDir + "/log2.notail"
381	f, err = os.Create(symlinkFile2)
382	if err != nil {
383		t.Fatal(err)
384	}
385	if err := os.Symlink(symlinkFile2, filename); err != nil {
386		t.Fatal(err)
387	}
388	for i := 100; i < 200; i++ {
389		entry := fmt.Sprintf("%s%d\n", prefix, i)
390		_, err = f.WriteString(entry)
391		if err != nil {
392			t.Fatal(err)
393		}
394		time.Sleep(1 * time.Millisecond)
395	}
396
397	return 200
398}
399
400func subdirSingleFile(t *testing.T, filename string, prefix string) int {
401	if err := os.MkdirAll(filepath.Dir(filename), 0750); err != nil {
402		t.Fatal(err)
403	}
404	f, err := os.Create(filename)
405	if err != nil {
406		t.Fatal(err)
407	}
408	entries := 100
409	for i := 0; i < entries; i++ {
410		entry := fmt.Sprintf("%s%d\n", prefix, i)
411		_, err = f.WriteString(entry)
412		if err != nil {
413			t.Fatal(err)
414		}
415		time.Sleep(1 * time.Millisecond)
416	}
417
418	return entries
419}
420
421func waitForEntries(timeoutSec int, handler *testServerHandler, expectedCounts map[string]int) error {
422	timeout := timeoutSec * 10
423	for timeout > 0 {
424		countReady := 0
425		for file, expectedCount := range expectedCounts {
426			handler.recMtx.Lock()
427			if rcvd, ok := handler.receivedMap[file]; ok && len(rcvd) == expectedCount {
428				countReady++
429			}
430			handler.recMtx.Unlock()
431		}
432		if countReady == len(expectedCounts) {
433			break
434		}
435		time.Sleep(100 * time.Millisecond)
436		timeout--
437	}
438
439	if timeout <= 0 {
440		waiting := ""
441		for file, expectedCount := range expectedCounts {
442			if rcvd, ok := handler.receivedMap[file]; !ok || len(rcvd) != expectedCount {
443				waiting = waiting + " " + file
444				for _, e := range rcvd {
445					level.Info(util_log.Logger).Log("file", file, "entry", e.Line)
446				}
447			}
448		}
449		return errors.New("still waiting for logs from" + waiting)
450	}
451	return nil
452}
453
454type testServerHandler struct {
455	receivedMap    map[string][]logproto.Entry
456	receivedLabels map[string][]labels.Labels
457	recMtx         sync.Mutex
458	t              *testing.T
459}
460
461func (h *testServerHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
462	var req logproto.PushRequest
463	if err := util.ParseProtoReader(r.Context(), r.Body, int(r.ContentLength), math.MaxInt32, &req, util.RawSnappy); err != nil {
464		http.Error(w, err.Error(), http.StatusBadRequest)
465		return
466	}
467	h.recMtx.Lock()
468	for _, s := range req.Streams {
469		parsedLabels, err := parser.ParseMetric(s.Labels)
470		if err != nil {
471			h.t.Error("Failed to parse incoming labels", err)
472			return
473		}
474		file := ""
475		for _, label := range parsedLabels {
476			if label.Name == file2.FilenameLabel {
477				file = label.Value
478				continue
479			}
480		}
481		if file == "" {
482			h.t.Error("Expected to find a label with name `filename` but did not!")
483			return
484		}
485
486		h.receivedMap[file] = append(h.receivedMap[file], s.Entries...)
487		h.receivedLabels[file] = append(h.receivedLabels[file], parsedLabels)
488
489	}
490
491	h.recMtx.Unlock()
492}
493
494func getPromMetrics(t *testing.T) ([]byte, string) {
495	resp, err := http.Get(fmt.Sprintf("http://localhost:%d/metrics", httpTestPort))
496	if err != nil {
497		t.Fatal("Could not query metrics endpoint", err)
498	}
499
500	if resp.StatusCode != http.StatusOK {
501		t.Fatal("Received a non 200 status code from /metrics endpoint", resp.StatusCode)
502	}
503
504	b, err := ioutil.ReadAll(resp.Body)
505	if err != nil {
506		t.Fatal("Error reading response body from /metrics endpoint", err)
507	}
508	ct := resp.Header.Get("Content-Type")
509	return b, ct
510}
511
512func parsePromMetrics(t *testing.T, bytes []byte, contentType string, metricName string, label string) map[string]float64 {
513	rb := map[string]float64{}
514
515	pr := textparse.New(bytes, contentType)
516	for {
517		et, err := pr.Next()
518		if err == io.EOF {
519			break
520		}
521		if err != nil {
522			t.Fatal("Failed to parse prometheus metrics", err)
523		}
524		switch et {
525		case textparse.EntrySeries:
526			var res labels.Labels
527			_, _, v := pr.Series()
528			pr.Metric(&res)
529			switch res.Get(labels.MetricName) {
530			case metricName:
531				rb[res.Get(label)] = v
532				continue
533			default:
534				continue
535			}
536		default:
537			continue
538		}
539	}
540	return rb
541}
542
543func buildTestConfig(t *testing.T, positionsFileName string, logDirName string) config.Config {
544	var clientURL flagext.URLValue
545	err := clientURL.Set("http://localhost:3100/loki/api/v1/push")
546	if err != nil {
547		t.Fatal("Failed to parse client URL")
548	}
549
550	cfg := config.Config{}
551	// Init everything with default values.
552	flagext.RegisterFlags(&cfg)
553
554	const hostname = "localhost"
555	cfg.ServerConfig.HTTPListenAddress = hostname
556	cfg.ServerConfig.ExternalURL = hostname
557	cfg.ServerConfig.GRPCListenAddress = hostname
558	cfg.ServerConfig.HTTPListenPort = httpTestPort
559
560	// Override some of those defaults
561	cfg.ClientConfig.URL = clientURL
562	cfg.ClientConfig.BatchWait = 10 * time.Millisecond
563	cfg.ClientConfig.BatchSize = 10 * 1024
564
565	cfg.PositionsConfig.SyncPeriod = 100 * time.Millisecond
566	cfg.PositionsConfig.PositionsFile = positionsFileName
567
568	pipeline := stages.PipelineStages{
569		stages.PipelineStage{
570			stages.StageTypeMatch: stages.MatcherConfig{
571				PipelineName: nil,
572				Selector:     "{match=\"true\"}",
573				Stages: stages.PipelineStages{
574					stages.PipelineStage{
575						stages.StageTypeDocker: nil,
576					},
577					stages.PipelineStage{
578						stages.StageTypeRegex: stages.RegexConfig{
579							Expression: "^(?P<ip>\\S+) (?P<identd>\\S+) (?P<user>\\S+) \\[(?P<timestamp>[\\w:/]+\\s[+\\-]\\d{4})\\] \"(?P<action>\\S+)\\s?(?P<path>\\S+)?\\s?(?P<protocol>\\S+)?\" (?P<status>\\d{3}|-) (?P<size>\\d+|-)\\s?\"?(?P<referer>[^\"]*)\"?\\s?\"?(?P<useragent>[^\"]*)?\"?$",
580							Source:     nil,
581						},
582					},
583					stages.PipelineStage{
584						stages.StageTypeTimestamp: stages.TimestampConfig{
585							Source: "timestamp",
586							Format: "02/Jan/2006:15:04:05 -0700",
587						},
588					},
589					stages.PipelineStage{
590						stages.StageTypeLabel: stages.LabelsConfig{
591							"action": nil,
592						},
593					},
594				},
595			},
596		},
597	}
598
599	targetGroup := targetgroup.Group{
600		Targets: []model.LabelSet{{
601			"localhost": "",
602		}},
603		Labels: model.LabelSet{
604			"job":      "varlogs",
605			"match":    "true",
606			"__path__": model.LabelValue(logDirName + "/**/*.log"),
607		},
608		Source: "",
609	}
610	scrapeConfig := scrapeconfig.Config{
611		JobName:        "",
612		PipelineStages: pipeline,
613		RelabelConfigs: nil,
614		ServiceDiscoveryConfig: scrapeconfig.ServiceDiscoveryConfig{
615			StaticConfigs: discovery.StaticConfig{
616				&targetGroup,
617			},
618		},
619	}
620
621	cfg.ScrapeConfig = append(cfg.ScrapeConfig, scrapeConfig)
622
623	// Make sure the SyncPeriod is fast for test purposes, but not faster than the poll interval (250ms)
624	// to avoid a race between the sync() function and the tailers noticing when files are deleted
625	cfg.TargetConfig.SyncPeriod = 500 * time.Millisecond
626
627	return cfg
628}
629
630func initRandom() {
631	rand.Seed(time.Now().UnixNano())
632}
633
634var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
635
636func randName() string {
637	b := make([]rune, 10)
638	for i := range b {
639		b[i] = letters[rand.Intn(len(letters))]
640	}
641	return string(b)
642}
643
644func Test_DryRun(t *testing.T) {
645	f, err := ioutil.TempFile("/tmp", "Test_DryRun")
646	require.NoError(t, err)
647	defer os.Remove(f.Name())
648
649	_, err = New(config.Config{}, true)
650	require.Error(t, err)
651
652	// Set the minimum config needed to start a server. We need to do this since we
653	// aren't doing any CLI parsing ala RegisterFlags and thus don't get the defaults.
654	// Required because a hardcoded value became a configuration setting in this commit
655	// https://github.com/weaveworks/common/commit/c44eeb028a671c5931b047976f9a0171910571ce
656	serverCfg := server.Config{
657		Config: serverww.Config{
658			HTTPListenNetwork: serverww.DefaultNetwork,
659			GRPCListenNetwork: serverww.DefaultNetwork,
660		},
661	}
662
663	prometheus.DefaultRegisterer = prometheus.NewRegistry() // reset registry, otherwise you can't create 2 weavework server.
664	_, err = New(config.Config{
665		ServerConfig: serverCfg,
666		ClientConfig: client.Config{URL: flagext.URLValue{URL: &url.URL{Host: "string"}}},
667		PositionsConfig: positions.Config{
668			PositionsFile: f.Name(),
669			SyncPeriod:    time.Second,
670		},
671	}, true)
672	require.NoError(t, err)
673
674	prometheus.DefaultRegisterer = prometheus.NewRegistry()
675
676	p, err := New(config.Config{
677		ServerConfig: serverCfg,
678		ClientConfig: client.Config{URL: flagext.URLValue{URL: &url.URL{Host: "string"}}},
679		PositionsConfig: positions.Config{
680			PositionsFile: f.Name(),
681			SyncPeriod:    time.Second,
682		},
683	}, false)
684	require.NoError(t, err)
685	require.IsType(t, &client.MultiClient{}, p.client)
686}
687