1package scheduler
2
3import (
4	"fmt"
5	"reflect"
6	"testing"
7
8	"github.com/stretchr/testify/require"
9
10	"github.com/hashicorp/nomad/helper"
11	"github.com/hashicorp/nomad/helper/testlog"
12	"github.com/hashicorp/nomad/helper/uuid"
13	"github.com/hashicorp/nomad/nomad/mock"
14	"github.com/hashicorp/nomad/nomad/state"
15	"github.com/hashicorp/nomad/nomad/structs"
16)
17
18func TestMaterializeTaskGroups(t *testing.T) {
19	job := mock.Job()
20	index := materializeTaskGroups(job)
21	require.Equal(t, 10, len(index))
22
23	for i := 0; i < 10; i++ {
24		name := fmt.Sprintf("my-job.web[%d]", i)
25		require.Contains(t, index, name)
26		require.Equal(t, job.TaskGroups[0], index[name])
27	}
28}
29
30func TestDiffSystemAllocsForNode(t *testing.T) {
31	job := mock.Job()
32	required := materializeTaskGroups(job)
33
34	// The "old" job has a previous modify index
35	oldJob := new(structs.Job)
36	*oldJob = *job
37	oldJob.JobModifyIndex -= 1
38
39	eligibleNode := mock.Node()
40	eligibleNode.ID = "zip"
41
42	drainNode := mock.DrainNode()
43
44	deadNode := mock.Node()
45	deadNode.Status = structs.NodeStatusDown
46
47	tainted := map[string]*structs.Node{
48		"dead":      deadNode,
49		"drainNode": drainNode,
50	}
51
52	eligible := map[string]*structs.Node{
53		eligibleNode.ID: eligibleNode,
54	}
55
56	allocs := []*structs.Allocation{
57		// Update the 1st
58		{
59			ID:     uuid.Generate(),
60			NodeID: "zip",
61			Name:   "my-job.web[0]",
62			Job:    oldJob,
63		},
64
65		// Ignore the 2rd
66		{
67			ID:     uuid.Generate(),
68			NodeID: "zip",
69			Name:   "my-job.web[1]",
70			Job:    job,
71		},
72
73		// Evict 11th
74		{
75			ID:     uuid.Generate(),
76			NodeID: "zip",
77			Name:   "my-job.web[10]",
78			Job:    oldJob,
79		},
80
81		// Migrate the 3rd
82		{
83			ID:     uuid.Generate(),
84			NodeID: "drainNode",
85			Name:   "my-job.web[2]",
86			Job:    oldJob,
87			DesiredTransition: structs.DesiredTransition{
88				Migrate: helper.BoolToPtr(true),
89			},
90		},
91		// Mark the 4th lost
92		{
93			ID:     uuid.Generate(),
94			NodeID: "dead",
95			Name:   "my-job.web[3]",
96			Job:    oldJob,
97		},
98	}
99
100	// Have three terminal allocs
101	terminalAllocs := map[string]*structs.Allocation{
102		"my-job.web[4]": {
103			ID:     uuid.Generate(),
104			NodeID: "zip",
105			Name:   "my-job.web[4]",
106			Job:    job,
107		},
108		"my-job.web[5]": {
109			ID:     uuid.Generate(),
110			NodeID: "zip",
111			Name:   "my-job.web[5]",
112			Job:    job,
113		},
114		"my-job.web[6]": {
115			ID:     uuid.Generate(),
116			NodeID: "zip",
117			Name:   "my-job.web[6]",
118			Job:    job,
119		},
120	}
121
122	diff := diffSystemAllocsForNode(job, "zip", eligible, tainted, required, allocs, terminalAllocs)
123	place := diff.place
124	update := diff.update
125	migrate := diff.migrate
126	stop := diff.stop
127	ignore := diff.ignore
128	lost := diff.lost
129
130	// We should update the first alloc
131	require.True(t, len(update) == 1 && update[0].Alloc == allocs[0])
132
133	// We should ignore the second alloc
134	require.True(t, len(ignore) == 1 && ignore[0].Alloc == allocs[1])
135
136	// We should stop the 3rd alloc
137	require.True(t, len(stop) == 1 && stop[0].Alloc == allocs[2])
138
139	// We should migrate the 4rd alloc
140	require.True(t, len(migrate) == 1 && migrate[0].Alloc == allocs[3])
141
142	// We should mark the 5th alloc as lost
143	require.True(t, len(lost) == 1 && lost[0].Alloc == allocs[4])
144
145	// We should place 6
146	require.Equal(t, 6, len(place))
147
148	// Ensure that the allocations which are replacements of terminal allocs are
149	// annotated
150	for name, alloc := range terminalAllocs {
151		for _, allocTuple := range diff.place {
152			if name == allocTuple.Name {
153				require.True(t, reflect.DeepEqual(alloc, allocTuple.Alloc),
154					"expected: %#v, actual: %#v", alloc, allocTuple.Alloc)
155			}
156		}
157	}
158}
159
160// Test the desired diff for an updated system job running on a
161// ineligible node
162func TestDiffSystemAllocsForNode_ExistingAllocIneligibleNode(t *testing.T) {
163	job := mock.Job()
164	job.TaskGroups[0].Count = 1
165	required := materializeTaskGroups(job)
166
167	// The "old" job has a previous modify index
168	oldJob := new(structs.Job)
169	*oldJob = *job
170	oldJob.JobModifyIndex -= 1
171
172	eligibleNode := mock.Node()
173	ineligibleNode := mock.Node()
174	ineligibleNode.SchedulingEligibility = structs.NodeSchedulingIneligible
175
176	tainted := map[string]*structs.Node{}
177
178	eligible := map[string]*structs.Node{
179		eligibleNode.ID: eligibleNode,
180	}
181
182	allocs := []*structs.Allocation{
183		// Update the TG alloc running on eligible node
184		{
185			ID:     uuid.Generate(),
186			NodeID: eligibleNode.ID,
187			Name:   "my-job.web[0]",
188			Job:    oldJob,
189		},
190
191		// Ignore the TG alloc running on ineligible node
192		{
193			ID:     uuid.Generate(),
194			NodeID: ineligibleNode.ID,
195			Name:   "my-job.web[0]",
196			Job:    job,
197		},
198	}
199
200	// No terminal allocs
201	terminalAllocs := map[string]*structs.Allocation{}
202
203	diff := diffSystemAllocsForNode(job, eligibleNode.ID, eligible, tainted, required, allocs, terminalAllocs)
204	place := diff.place
205	update := diff.update
206	migrate := diff.migrate
207	stop := diff.stop
208	ignore := diff.ignore
209	lost := diff.lost
210
211	require.Len(t, place, 0)
212	require.Len(t, update, 1)
213	require.Len(t, migrate, 0)
214	require.Len(t, stop, 0)
215	require.Len(t, ignore, 1)
216	require.Len(t, lost, 0)
217}
218
219func TestDiffSystemAllocs(t *testing.T) {
220	job := mock.SystemJob()
221
222	drainNode := mock.DrainNode()
223
224	deadNode := mock.Node()
225	deadNode.Status = structs.NodeStatusDown
226
227	tainted := map[string]*structs.Node{
228		deadNode.ID:  deadNode,
229		drainNode.ID: drainNode,
230	}
231
232	// Create three alive nodes.
233	nodes := []*structs.Node{{ID: "foo"}, {ID: "bar"}, {ID: "baz"},
234		{ID: "pipe"}, {ID: drainNode.ID}, {ID: deadNode.ID}}
235
236	// The "old" job has a previous modify index
237	oldJob := new(structs.Job)
238	*oldJob = *job
239	oldJob.JobModifyIndex -= 1
240
241	allocs := []*structs.Allocation{
242		// Update allocation on baz
243		{
244			ID:     uuid.Generate(),
245			NodeID: "baz",
246			Name:   "my-job.web[0]",
247			Job:    oldJob,
248		},
249
250		// Ignore allocation on bar
251		{
252			ID:     uuid.Generate(),
253			NodeID: "bar",
254			Name:   "my-job.web[0]",
255			Job:    job,
256		},
257
258		// Stop allocation on draining node.
259		{
260			ID:     uuid.Generate(),
261			NodeID: drainNode.ID,
262			Name:   "my-job.web[0]",
263			Job:    oldJob,
264			DesiredTransition: structs.DesiredTransition{
265				Migrate: helper.BoolToPtr(true),
266			},
267		},
268		// Mark as lost on a dead node
269		{
270			ID:     uuid.Generate(),
271			NodeID: deadNode.ID,
272			Name:   "my-job.web[0]",
273			Job:    oldJob,
274		},
275	}
276
277	// Have three terminal allocs
278	terminalAllocs := map[string]*structs.Allocation{
279		"my-job.web[0]": {
280			ID:     uuid.Generate(),
281			NodeID: "pipe",
282			Name:   "my-job.web[0]",
283			Job:    job,
284		},
285	}
286
287	diff := diffSystemAllocs(job, nodes, tainted, allocs, terminalAllocs)
288	place := diff.place
289	update := diff.update
290	migrate := diff.migrate
291	stop := diff.stop
292	ignore := diff.ignore
293	lost := diff.lost
294
295	// We should update the first alloc
296	require.True(t, len(update) == 1 && update[0].Alloc == allocs[0])
297
298	// We should ignore the second alloc
299	require.True(t, len(ignore) == 1 && ignore[0].Alloc == allocs[1])
300
301	// We should stop the third alloc
302	require.Empty(t, stop)
303
304	// There should be no migrates.
305	require.True(t, len(migrate) == 1 && migrate[0].Alloc == allocs[2])
306
307	// We should mark the 5th alloc as lost
308	require.True(t, len(lost) == 1 && lost[0].Alloc == allocs[3])
309
310	// We should place 1
311	require.Equal(t, 2, len(place))
312
313	// Ensure that the allocations which are replacements of terminal allocs are
314	// annotated
315	for _, alloc := range terminalAllocs {
316		for _, allocTuple := range diff.place {
317			if alloc.NodeID == allocTuple.Alloc.NodeID {
318				require.True(t, reflect.DeepEqual(alloc, allocTuple.Alloc),
319					"expected: %#v, actual: %#v", alloc, allocTuple.Alloc)
320			}
321		}
322	}
323}
324
325func TestReadyNodesInDCs(t *testing.T) {
326	state := state.TestStateStore(t)
327	node1 := mock.Node()
328	node2 := mock.Node()
329	node2.Datacenter = "dc2"
330	node3 := mock.Node()
331	node3.Datacenter = "dc2"
332	node3.Status = structs.NodeStatusDown
333	node4 := mock.DrainNode()
334
335	require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1000, node1))
336	require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1001, node2))
337	require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1002, node3))
338	require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1003, node4))
339
340	nodes, dc, err := readyNodesInDCs(state, []string{"dc1", "dc2"})
341	require.NoError(t, err)
342	require.Equal(t, 2, len(nodes))
343	require.True(t, nodes[0].ID != node3.ID && nodes[1].ID != node3.ID)
344
345	require.Contains(t, dc, "dc1")
346	require.Equal(t, 1, dc["dc1"])
347	require.Contains(t, dc, "dc2")
348	require.Equal(t, 1, dc["dc2"])
349}
350
351func TestRetryMax(t *testing.T) {
352	calls := 0
353	bad := func() (bool, error) {
354		calls += 1
355		return false, nil
356	}
357	err := retryMax(3, bad, nil)
358	require.Error(t, err)
359	require.Equal(t, 3, calls, "mis match")
360
361	calls = 0
362	first := true
363	reset := func() bool {
364		if calls == 3 && first {
365			first = false
366			return true
367		}
368		return false
369	}
370	err = retryMax(3, bad, reset)
371	require.Error(t, err)
372	require.Equal(t, 6, calls, "mis match")
373
374	calls = 0
375	good := func() (bool, error) {
376		calls += 1
377		return true, nil
378	}
379	err = retryMax(3, good, nil)
380	require.NoError(t, err)
381	require.Equal(t, 1, calls, "mis match")
382}
383
384func TestTaintedNodes(t *testing.T) {
385	state := state.TestStateStore(t)
386	node1 := mock.Node()
387	node2 := mock.Node()
388	node2.Datacenter = "dc2"
389	node3 := mock.Node()
390	node3.Datacenter = "dc2"
391	node3.Status = structs.NodeStatusDown
392	node4 := mock.DrainNode()
393	require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1000, node1))
394	require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1001, node2))
395	require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1002, node3))
396	require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1003, node4))
397
398	allocs := []*structs.Allocation{
399		{NodeID: node1.ID},
400		{NodeID: node2.ID},
401		{NodeID: node3.ID},
402		{NodeID: node4.ID},
403		{NodeID: "12345678-abcd-efab-cdef-123456789abc"},
404	}
405	tainted, err := taintedNodes(state, allocs)
406	require.NoError(t, err)
407	require.Equal(t, 3, len(tainted))
408	require.NotContains(t, tainted, node1.ID)
409	require.NotContains(t, tainted, node2.ID)
410
411	require.Contains(t, tainted, node3.ID)
412	require.NotNil(t, tainted[node3.ID])
413
414	require.Contains(t, tainted, node4.ID)
415	require.NotNil(t, tainted[node4.ID])
416
417	require.Contains(t, tainted, "12345678-abcd-efab-cdef-123456789abc")
418	require.Nil(t, tainted["12345678-abcd-efab-cdef-123456789abc"])
419}
420
421func TestShuffleNodes(t *testing.T) {
422	// Use a large number of nodes to make the probability of shuffling to the
423	// original order very low.
424	nodes := []*structs.Node{
425		mock.Node(),
426		mock.Node(),
427		mock.Node(),
428		mock.Node(),
429		mock.Node(),
430		mock.Node(),
431		mock.Node(),
432		mock.Node(),
433		mock.Node(),
434		mock.Node(),
435	}
436	orig := make([]*structs.Node, len(nodes))
437	copy(orig, nodes)
438	shuffleNodes(nodes)
439	require.False(t, reflect.DeepEqual(nodes, orig))
440}
441
442func TestTaskUpdatedAffinity(t *testing.T) {
443	j1 := mock.Job()
444	j2 := mock.Job()
445	name := j1.TaskGroups[0].Name
446
447	require.False(t, tasksUpdated(j1, j2, name))
448
449	// TaskGroup Affinity
450	j2.TaskGroups[0].Affinities = []*structs.Affinity{
451		{
452			LTarget: "node.datacenter",
453			RTarget: "dc1",
454			Operand: "=",
455			Weight:  100,
456		},
457	}
458	require.True(t, tasksUpdated(j1, j2, name))
459
460	// TaskGroup Task Affinity
461	j3 := mock.Job()
462	j3.TaskGroups[0].Tasks[0].Affinities = []*structs.Affinity{
463		{
464			LTarget: "node.datacenter",
465			RTarget: "dc1",
466			Operand: "=",
467			Weight:  100,
468		},
469	}
470
471	require.True(t, tasksUpdated(j1, j3, name))
472
473	j4 := mock.Job()
474	j4.TaskGroups[0].Tasks[0].Affinities = []*structs.Affinity{
475		{
476			LTarget: "node.datacenter",
477			RTarget: "dc1",
478			Operand: "=",
479			Weight:  100,
480		},
481	}
482
483	require.True(t, tasksUpdated(j1, j4, name))
484
485	// check different level of same affinity
486	j5 := mock.Job()
487	j5.Affinities = []*structs.Affinity{
488		{
489			LTarget: "node.datacenter",
490			RTarget: "dc1",
491			Operand: "=",
492			Weight:  100,
493		},
494	}
495
496	j6 := mock.Job()
497	j6.Affinities = make([]*structs.Affinity, 0)
498	j6.TaskGroups[0].Affinities = []*structs.Affinity{
499		{
500			LTarget: "node.datacenter",
501			RTarget: "dc1",
502			Operand: "=",
503			Weight:  100,
504		},
505	}
506
507	require.False(t, tasksUpdated(j5, j6, name))
508}
509
510func TestTaskUpdatedSpread(t *testing.T) {
511	j1 := mock.Job()
512	j2 := mock.Job()
513	name := j1.TaskGroups[0].Name
514
515	require.False(t, tasksUpdated(j1, j2, name))
516
517	// TaskGroup Spread
518	j2.TaskGroups[0].Spreads = []*structs.Spread{
519		{
520			Attribute: "node.datacenter",
521			Weight:    100,
522			SpreadTarget: []*structs.SpreadTarget{
523				{
524					Value:   "r1",
525					Percent: 50,
526				},
527				{
528					Value:   "r2",
529					Percent: 50,
530				},
531			},
532		},
533	}
534	require.True(t, tasksUpdated(j1, j2, name))
535
536	// check different level of same constraint
537	j5 := mock.Job()
538	j5.Spreads = []*structs.Spread{
539		{
540			Attribute: "node.datacenter",
541			Weight:    100,
542			SpreadTarget: []*structs.SpreadTarget{
543				{
544					Value:   "r1",
545					Percent: 50,
546				},
547				{
548					Value:   "r2",
549					Percent: 50,
550				},
551			},
552		},
553	}
554
555	j6 := mock.Job()
556	j6.TaskGroups[0].Spreads = []*structs.Spread{
557		{
558			Attribute: "node.datacenter",
559			Weight:    100,
560			SpreadTarget: []*structs.SpreadTarget{
561				{
562					Value:   "r1",
563					Percent: 50,
564				},
565				{
566					Value:   "r2",
567					Percent: 50,
568				},
569			},
570		},
571	}
572
573	require.False(t, tasksUpdated(j5, j6, name))
574}
575func TestTasksUpdated(t *testing.T) {
576	j1 := mock.Job()
577	j2 := mock.Job()
578	name := j1.TaskGroups[0].Name
579	require.False(t, tasksUpdated(j1, j2, name))
580
581	j2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other"
582	require.True(t, tasksUpdated(j1, j2, name))
583
584	j3 := mock.Job()
585	j3.TaskGroups[0].Tasks[0].Name = "foo"
586	require.True(t, tasksUpdated(j1, j3, name))
587
588	j4 := mock.Job()
589	j4.TaskGroups[0].Tasks[0].Driver = "foo"
590	require.True(t, tasksUpdated(j1, j4, name))
591
592	j5 := mock.Job()
593	j5.TaskGroups[0].Tasks = append(j5.TaskGroups[0].Tasks,
594		j5.TaskGroups[0].Tasks[0])
595	require.True(t, tasksUpdated(j1, j5, name))
596
597	j6 := mock.Job()
598	j6.TaskGroups[0].Networks[0].DynamicPorts = []structs.Port{
599		{Label: "http", Value: 0},
600		{Label: "https", Value: 0},
601		{Label: "admin", Value: 0},
602	}
603	require.True(t, tasksUpdated(j1, j6, name))
604
605	j7 := mock.Job()
606	j7.TaskGroups[0].Tasks[0].Env["NEW_ENV"] = "NEW_VALUE"
607	require.True(t, tasksUpdated(j1, j7, name))
608
609	j8 := mock.Job()
610	j8.TaskGroups[0].Tasks[0].User = "foo"
611	require.True(t, tasksUpdated(j1, j8, name))
612
613	j9 := mock.Job()
614	j9.TaskGroups[0].Tasks[0].Artifacts = []*structs.TaskArtifact{
615		{
616			GetterSource: "http://foo.com/bar",
617		},
618	}
619	require.True(t, tasksUpdated(j1, j9, name))
620
621	j10 := mock.Job()
622	j10.TaskGroups[0].Tasks[0].Meta["baz"] = "boom"
623	require.True(t, tasksUpdated(j1, j10, name))
624
625	j11 := mock.Job()
626	j11.TaskGroups[0].Tasks[0].Resources.CPU = 1337
627	require.True(t, tasksUpdated(j1, j11, name))
628
629	j11d1 := mock.Job()
630	j11d1.TaskGroups[0].Tasks[0].Resources.Devices = structs.ResourceDevices{
631		&structs.RequestedDevice{
632			Name:  "gpu",
633			Count: 1,
634		},
635	}
636	j11d2 := mock.Job()
637	j11d2.TaskGroups[0].Tasks[0].Resources.Devices = structs.ResourceDevices{
638		&structs.RequestedDevice{
639			Name:  "gpu",
640			Count: 2,
641		},
642	}
643	require.True(t, tasksUpdated(j11d1, j11d2, name))
644
645	j13 := mock.Job()
646	j13.TaskGroups[0].Networks[0].DynamicPorts[0].Label = "foobar"
647	require.True(t, tasksUpdated(j1, j13, name))
648
649	j14 := mock.Job()
650	j14.TaskGroups[0].Networks[0].ReservedPorts = []structs.Port{{Label: "foo", Value: 1312}}
651	require.True(t, tasksUpdated(j1, j14, name))
652
653	j15 := mock.Job()
654	j15.TaskGroups[0].Tasks[0].Vault = &structs.Vault{Policies: []string{"foo"}}
655	require.True(t, tasksUpdated(j1, j15, name))
656
657	j16 := mock.Job()
658	j16.TaskGroups[0].EphemeralDisk.Sticky = true
659	require.True(t, tasksUpdated(j1, j16, name))
660
661	// Change group meta
662	j17 := mock.Job()
663	j17.TaskGroups[0].Meta["j17_test"] = "roll_baby_roll"
664	require.True(t, tasksUpdated(j1, j17, name))
665
666	// Change job meta
667	j18 := mock.Job()
668	j18.Meta["j18_test"] = "roll_baby_roll"
669	require.True(t, tasksUpdated(j1, j18, name))
670
671	// Change network mode
672	j19 := mock.Job()
673	j19.TaskGroups[0].Networks[0].Mode = "bridge"
674	require.True(t, tasksUpdated(j1, j19, name))
675
676	// Change cores resource
677	j20 := mock.Job()
678	j20.TaskGroups[0].Tasks[0].Resources.CPU = 0
679	j20.TaskGroups[0].Tasks[0].Resources.Cores = 2
680	j21 := mock.Job()
681	j21.TaskGroups[0].Tasks[0].Resources.CPU = 0
682	j21.TaskGroups[0].Tasks[0].Resources.Cores = 4
683	require.True(t, tasksUpdated(j20, j21, name))
684
685}
686
687func TestTasksUpdated_connectServiceUpdated(t *testing.T) {
688	servicesA := []*structs.Service{{
689		Name:      "service1",
690		PortLabel: "1111",
691		Connect: &structs.ConsulConnect{
692			SidecarService: &structs.ConsulSidecarService{
693				Tags: []string{"a"},
694			},
695		},
696	}}
697
698	t.Run("service not updated", func(t *testing.T) {
699		servicesB := []*structs.Service{{
700			Name: "service0",
701		}, {
702			Name:      "service1",
703			PortLabel: "1111",
704			Connect: &structs.ConsulConnect{
705				SidecarService: &structs.ConsulSidecarService{
706					Tags: []string{"a"},
707				},
708			},
709		}, {
710			Name: "service2",
711		}}
712		updated := connectServiceUpdated(servicesA, servicesB)
713		require.False(t, updated)
714	})
715
716	t.Run("service connect tags updated", func(t *testing.T) {
717		servicesB := []*structs.Service{{
718			Name: "service0",
719		}, {
720			Name:      "service1",
721			PortLabel: "1111",
722			Connect: &structs.ConsulConnect{
723				SidecarService: &structs.ConsulSidecarService{
724					Tags: []string{"b"}, // in-place update
725				},
726			},
727		}}
728		updated := connectServiceUpdated(servicesA, servicesB)
729		require.False(t, updated)
730	})
731
732	t.Run("service connect port updated", func(t *testing.T) {
733		servicesB := []*structs.Service{{
734			Name: "service0",
735		}, {
736			Name:      "service1",
737			PortLabel: "1111",
738			Connect: &structs.ConsulConnect{
739				SidecarService: &structs.ConsulSidecarService{
740					Tags: []string{"a"},
741					Port: "2222", // destructive update
742				},
743			},
744		}}
745		updated := connectServiceUpdated(servicesA, servicesB)
746		require.True(t, updated)
747	})
748
749	t.Run("service port label updated", func(t *testing.T) {
750		servicesB := []*structs.Service{{
751			Name: "service0",
752		}, {
753			Name:      "service1",
754			PortLabel: "1112", // destructive update
755			Connect: &structs.ConsulConnect{
756				SidecarService: &structs.ConsulSidecarService{
757					Tags: []string{"1"},
758				},
759			},
760		}}
761		updated := connectServiceUpdated(servicesA, servicesB)
762		require.True(t, updated)
763	})
764}
765
766func TestNetworkUpdated(t *testing.T) {
767	t.Parallel()
768	cases := []struct {
769		name    string
770		a       []*structs.NetworkResource
771		b       []*structs.NetworkResource
772		updated bool
773	}{
774		{
775			name: "mode updated",
776			a: []*structs.NetworkResource{
777				{Mode: "host"},
778			},
779			b: []*structs.NetworkResource{
780				{Mode: "bridge"},
781			},
782			updated: true,
783		},
784		{
785			name: "host_network updated",
786			a: []*structs.NetworkResource{
787				{DynamicPorts: []structs.Port{
788					{Label: "http", To: 8080},
789				}},
790			},
791			b: []*structs.NetworkResource{
792				{DynamicPorts: []structs.Port{
793					{Label: "http", To: 8080, HostNetwork: "public"},
794				}},
795			},
796			updated: true,
797		},
798		{
799			name: "port.To updated",
800			a: []*structs.NetworkResource{
801				{DynamicPorts: []structs.Port{
802					{Label: "http", To: 8080},
803				}},
804			},
805			b: []*structs.NetworkResource{
806				{DynamicPorts: []structs.Port{
807					{Label: "http", To: 8088},
808				}},
809			},
810			updated: true,
811		},
812	}
813
814	for i := range cases {
815		c := cases[i]
816		t.Run(c.name, func(tc *testing.T) {
817			tc.Parallel()
818			require.Equal(tc, c.updated, networkUpdated(c.a, c.b), "unexpected network updated result")
819		})
820	}
821}
822
823func TestEvictAndPlace_LimitLessThanAllocs(t *testing.T) {
824	_, ctx := testContext(t)
825	allocs := []allocTuple{
826		{Alloc: &structs.Allocation{ID: uuid.Generate()}},
827		{Alloc: &structs.Allocation{ID: uuid.Generate()}},
828		{Alloc: &structs.Allocation{ID: uuid.Generate()}},
829		{Alloc: &structs.Allocation{ID: uuid.Generate()}},
830	}
831	diff := &diffResult{}
832
833	limit := 2
834	require.True(t, evictAndPlace(ctx, diff, allocs, "", &limit), "evictAndReplace() should have returned true")
835	require.Zero(t, limit, "evictAndReplace() should decremented limit; got %v; want 0", limit)
836	require.Equal(t, 2, len(diff.place), "evictAndReplace() didn't insert into diffResult properly: %v", diff.place)
837}
838
839func TestEvictAndPlace_LimitEqualToAllocs(t *testing.T) {
840	_, ctx := testContext(t)
841	allocs := []allocTuple{
842		{Alloc: &structs.Allocation{ID: uuid.Generate()}},
843		{Alloc: &structs.Allocation{ID: uuid.Generate()}},
844		{Alloc: &structs.Allocation{ID: uuid.Generate()}},
845		{Alloc: &structs.Allocation{ID: uuid.Generate()}},
846	}
847	diff := &diffResult{}
848
849	limit := 4
850	require.False(t, evictAndPlace(ctx, diff, allocs, "", &limit), "evictAndReplace() should have returned false")
851	require.Zero(t, limit, "evictAndReplace() should decremented limit; got %v; want 0", limit)
852	require.Equal(t, 4, len(diff.place), "evictAndReplace() didn't insert into diffResult properly: %v", diff.place)
853}
854
855func TestSetStatus(t *testing.T) {
856	h := NewHarness(t)
857	logger := testlog.HCLogger(t)
858	eval := mock.Eval()
859	status := "a"
860	desc := "b"
861	require.NoError(t, setStatus(logger, h, eval, nil, nil, nil, status, desc, nil, ""))
862	require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals)
863
864	newEval := h.Evals[0]
865	require.True(t, newEval.ID == eval.ID && newEval.Status == status && newEval.StatusDescription == desc,
866		"setStatus() submited invalid eval: %v", newEval)
867
868	// Test next evals
869	h = NewHarness(t)
870	next := mock.Eval()
871	require.NoError(t, setStatus(logger, h, eval, next, nil, nil, status, desc, nil, ""))
872	require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals)
873
874	newEval = h.Evals[0]
875	require.Equal(t, next.ID, newEval.NextEval, "setStatus() didn't set nextEval correctly: %v", newEval)
876
877	// Test blocked evals
878	h = NewHarness(t)
879	blocked := mock.Eval()
880	require.NoError(t, setStatus(logger, h, eval, nil, blocked, nil, status, desc, nil, ""))
881	require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals)
882
883	newEval = h.Evals[0]
884	require.Equal(t, blocked.ID, newEval.BlockedEval, "setStatus() didn't set BlockedEval correctly: %v", newEval)
885
886	// Test metrics
887	h = NewHarness(t)
888	metrics := map[string]*structs.AllocMetric{"foo": nil}
889	require.NoError(t, setStatus(logger, h, eval, nil, nil, metrics, status, desc, nil, ""))
890	require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals)
891
892	newEval = h.Evals[0]
893	require.True(t, reflect.DeepEqual(newEval.FailedTGAllocs, metrics),
894		"setStatus() didn't set failed task group metrics correctly: %v", newEval)
895
896	// Test queued allocations
897	h = NewHarness(t)
898	queuedAllocs := map[string]int{"web": 1}
899
900	require.NoError(t, setStatus(logger, h, eval, nil, nil, metrics, status, desc, queuedAllocs, ""))
901	require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals)
902
903	newEval = h.Evals[0]
904	require.True(t, reflect.DeepEqual(newEval.QueuedAllocations, queuedAllocs), "setStatus() didn't set failed task group metrics correctly: %v", newEval)
905
906	h = NewHarness(t)
907	dID := uuid.Generate()
908	require.NoError(t, setStatus(logger, h, eval, nil, nil, metrics, status, desc, queuedAllocs, dID))
909	require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals)
910
911	newEval = h.Evals[0]
912	require.Equal(t, dID, newEval.DeploymentID, "setStatus() didn't set deployment id correctly: %v", newEval)
913}
914
915func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) {
916	state, ctx := testContext(t)
917	eval := mock.Eval()
918	job := mock.Job()
919
920	node := mock.Node()
921	require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 900, node))
922
923	// Register an alloc
924	alloc := &structs.Allocation{
925		Namespace: structs.DefaultNamespace,
926		ID:        uuid.Generate(),
927		EvalID:    eval.ID,
928		NodeID:    node.ID,
929		JobID:     job.ID,
930		Job:       job,
931		AllocatedResources: &structs.AllocatedResources{
932			Tasks: map[string]*structs.AllocatedTaskResources{
933				"web": {
934					Cpu: structs.AllocatedCpuResources{
935						CpuShares: 2048,
936					},
937					Memory: structs.AllocatedMemoryResources{
938						MemoryMB: 2048,
939					},
940				},
941			},
942		},
943		DesiredStatus: structs.AllocDesiredStatusRun,
944		TaskGroup:     "web",
945	}
946	alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
947	require.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID)))
948	require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}))
949
950	// Create a new task group that prevents in-place updates.
951	tg := &structs.TaskGroup{}
952	*tg = *job.TaskGroups[0]
953	task := &structs.Task{
954		Name:      "FOO",
955		Resources: &structs.Resources{},
956	}
957	tg.Tasks = nil
958	tg.Tasks = append(tg.Tasks, task)
959
960	updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}}
961	stack := NewGenericStack(false, ctx)
962
963	// Do the inplace update.
964	unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates)
965
966	require.True(t, len(unplaced) == 1 && len(inplace) == 0, "inplaceUpdate incorrectly did an inplace update")
967	require.Empty(t, ctx.plan.NodeAllocation, "inplaceUpdate incorrectly did an inplace update")
968}
969
970func TestInplaceUpdate_AllocatedResources(t *testing.T) {
971	state, ctx := testContext(t)
972	eval := mock.Eval()
973	job := mock.Job()
974
975	node := mock.Node()
976	require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 900, node))
977
978	// Register an alloc
979	alloc := &structs.Allocation{
980		Namespace: structs.DefaultNamespace,
981		ID:        uuid.Generate(),
982		EvalID:    eval.ID,
983		NodeID:    node.ID,
984		JobID:     job.ID,
985		Job:       job,
986		AllocatedResources: &structs.AllocatedResources{
987			Shared: structs.AllocatedSharedResources{
988				Ports: structs.AllocatedPorts{
989					{
990						Label: "api-port",
991						Value: 19910,
992						To:    8080,
993					},
994				},
995			},
996		},
997		DesiredStatus: structs.AllocDesiredStatusRun,
998		TaskGroup:     "web",
999	}
1000	alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
1001	require.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID)))
1002	require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}))
1003
1004	// Update TG to add a new service (inplace)
1005	tg := job.TaskGroups[0]
1006	tg.Services = []*structs.Service{
1007		{
1008			Name: "tg-service",
1009		},
1010	}
1011
1012	updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}}
1013	stack := NewGenericStack(false, ctx)
1014
1015	// Do the inplace update.
1016	unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates)
1017
1018	require.True(t, len(unplaced) == 0 && len(inplace) == 1, "inplaceUpdate incorrectly did not perform an inplace update")
1019	require.NotEmpty(t, ctx.plan.NodeAllocation, "inplaceUpdate incorrectly did an inplace update")
1020	require.NotEmpty(t, ctx.plan.NodeAllocation[node.ID][0].AllocatedResources.Shared.Ports)
1021
1022	port, ok := ctx.plan.NodeAllocation[node.ID][0].AllocatedResources.Shared.Ports.Get("api-port")
1023	require.True(t, ok)
1024	require.Equal(t, 19910, port.Value)
1025}
1026
1027func TestInplaceUpdate_NoMatch(t *testing.T) {
1028	state, ctx := testContext(t)
1029	eval := mock.Eval()
1030	job := mock.Job()
1031
1032	node := mock.Node()
1033	require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 900, node))
1034
1035	// Register an alloc
1036	alloc := &structs.Allocation{
1037		Namespace: structs.DefaultNamespace,
1038		ID:        uuid.Generate(),
1039		EvalID:    eval.ID,
1040		NodeID:    node.ID,
1041		JobID:     job.ID,
1042		Job:       job,
1043		AllocatedResources: &structs.AllocatedResources{
1044			Tasks: map[string]*structs.AllocatedTaskResources{
1045				"web": {
1046					Cpu: structs.AllocatedCpuResources{
1047						CpuShares: 2048,
1048					},
1049					Memory: structs.AllocatedMemoryResources{
1050						MemoryMB: 2048,
1051					},
1052				},
1053			},
1054		},
1055		DesiredStatus: structs.AllocDesiredStatusRun,
1056		TaskGroup:     "web",
1057	}
1058	alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
1059	require.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID)))
1060	require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}))
1061
1062	// Create a new task group that requires too much resources.
1063	tg := &structs.TaskGroup{}
1064	*tg = *job.TaskGroups[0]
1065	resource := &structs.Resources{CPU: 9999}
1066	tg.Tasks[0].Resources = resource
1067
1068	updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}}
1069	stack := NewGenericStack(false, ctx)
1070
1071	// Do the inplace update.
1072	unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates)
1073
1074	require.True(t, len(unplaced) == 1 && len(inplace) == 0, "inplaceUpdate incorrectly did an inplace update")
1075	require.Empty(t, ctx.plan.NodeAllocation, "inplaceUpdate incorrectly did an inplace update")
1076}
1077
1078func TestInplaceUpdate_Success(t *testing.T) {
1079	state, ctx := testContext(t)
1080	eval := mock.Eval()
1081	job := mock.Job()
1082
1083	node := mock.Node()
1084	require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 900, node))
1085
1086	// Register an alloc
1087	alloc := &structs.Allocation{
1088		Namespace: structs.DefaultNamespace,
1089		ID:        uuid.Generate(),
1090		EvalID:    eval.ID,
1091		NodeID:    node.ID,
1092		JobID:     job.ID,
1093		Job:       job,
1094		TaskGroup: job.TaskGroups[0].Name,
1095		AllocatedResources: &structs.AllocatedResources{
1096			Tasks: map[string]*structs.AllocatedTaskResources{
1097				"web": {
1098					Cpu: structs.AllocatedCpuResources{
1099						CpuShares: 2048,
1100					},
1101					Memory: structs.AllocatedMemoryResources{
1102						MemoryMB: 2048,
1103					},
1104				},
1105			},
1106		},
1107		DesiredStatus: structs.AllocDesiredStatusRun,
1108	}
1109	alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
1110	require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)))
1111	require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}))
1112
1113	// Create a new task group that updates the resources.
1114	tg := &structs.TaskGroup{}
1115	*tg = *job.TaskGroups[0]
1116	resource := &structs.Resources{CPU: 737}
1117	tg.Tasks[0].Resources = resource
1118	newServices := []*structs.Service{
1119		{
1120			Name:      "dummy-service",
1121			PortLabel: "http",
1122		},
1123		{
1124			Name:      "dummy-service2",
1125			PortLabel: "http",
1126		},
1127	}
1128
1129	// Delete service 2
1130	tg.Tasks[0].Services = tg.Tasks[0].Services[:1]
1131
1132	// Add the new services
1133	tg.Tasks[0].Services = append(tg.Tasks[0].Services, newServices...)
1134
1135	updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}}
1136	stack := NewGenericStack(false, ctx)
1137	stack.SetJob(job)
1138
1139	// Do the inplace update.
1140	unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates)
1141
1142	require.True(t, len(unplaced) == 0 && len(inplace) == 1, "inplaceUpdate did not do an inplace update")
1143	require.Equal(t, 1, len(ctx.plan.NodeAllocation), "inplaceUpdate did not do an inplace update")
1144	require.Equal(t, alloc.ID, inplace[0].Alloc.ID, "inplaceUpdate returned the wrong, inplace updated alloc: %#v", inplace)
1145
1146	// Get the alloc we inserted.
1147	a := inplace[0].Alloc // TODO(sean@): Verify this is correct vs: ctx.plan.NodeAllocation[alloc.NodeID][0]
1148	require.NotNil(t, a.Job)
1149	require.Equal(t, 1, len(a.Job.TaskGroups))
1150	require.Equal(t, 1, len(a.Job.TaskGroups[0].Tasks))
1151	require.Equal(t, 3, len(a.Job.TaskGroups[0].Tasks[0].Services),
1152		"Expected number of services: %v, Actual: %v", 3, len(a.Job.TaskGroups[0].Tasks[0].Services))
1153
1154	serviceNames := make(map[string]struct{}, 3)
1155	for _, consulService := range a.Job.TaskGroups[0].Tasks[0].Services {
1156		serviceNames[consulService.Name] = struct{}{}
1157	}
1158	require.Equal(t, 3, len(serviceNames))
1159
1160	for _, name := range []string{"dummy-service", "dummy-service2", "web-frontend"} {
1161		if _, found := serviceNames[name]; !found {
1162			t.Errorf("Expected consul service name missing: %v", name)
1163		}
1164	}
1165}
1166
1167func TestEvictAndPlace_LimitGreaterThanAllocs(t *testing.T) {
1168	_, ctx := testContext(t)
1169	allocs := []allocTuple{
1170		{Alloc: &structs.Allocation{ID: uuid.Generate()}},
1171		{Alloc: &structs.Allocation{ID: uuid.Generate()}},
1172		{Alloc: &structs.Allocation{ID: uuid.Generate()}},
1173		{Alloc: &structs.Allocation{ID: uuid.Generate()}},
1174	}
1175	diff := &diffResult{}
1176
1177	limit := 6
1178	require.False(t, evictAndPlace(ctx, diff, allocs, "", &limit))
1179	require.Equal(t, 2, limit, "evictAndReplace() should decremented limit")
1180	require.Equal(t, 4, len(diff.place), "evictAndReplace() didn't insert into diffResult properly: %v", diff.place)
1181}
1182
1183func TestTaskGroupConstraints(t *testing.T) {
1184	constr := &structs.Constraint{RTarget: "bar"}
1185	constr2 := &structs.Constraint{LTarget: "foo"}
1186	constr3 := &structs.Constraint{Operand: "<"}
1187
1188	tg := &structs.TaskGroup{
1189		Name:          "web",
1190		Count:         10,
1191		Constraints:   []*structs.Constraint{constr},
1192		EphemeralDisk: &structs.EphemeralDisk{},
1193		Tasks: []*structs.Task{
1194			{
1195				Driver: "exec",
1196				Resources: &structs.Resources{
1197					CPU:      500,
1198					MemoryMB: 256,
1199				},
1200				Constraints: []*structs.Constraint{constr2},
1201			},
1202			{
1203				Driver: "docker",
1204				Resources: &structs.Resources{
1205					CPU:      500,
1206					MemoryMB: 256,
1207				},
1208				Constraints: []*structs.Constraint{constr3},
1209			},
1210		},
1211	}
1212
1213	// Build the expected values.
1214	expConstr := []*structs.Constraint{constr, constr2, constr3}
1215	expDrivers := map[string]struct{}{"exec": {}, "docker": {}}
1216
1217	actConstrains := taskGroupConstraints(tg)
1218	require.True(t, reflect.DeepEqual(actConstrains.constraints, expConstr),
1219		"taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.constraints, expConstr)
1220	require.True(t, reflect.DeepEqual(actConstrains.drivers, expDrivers),
1221		"taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.drivers, expDrivers)
1222}
1223
1224func TestProgressMade(t *testing.T) {
1225	noopPlan := &structs.PlanResult{}
1226	require.False(t, progressMade(nil) || progressMade(noopPlan), "no progress plan marked as making progress")
1227
1228	m := map[string][]*structs.Allocation{
1229		"foo": {mock.Alloc()},
1230	}
1231	both := &structs.PlanResult{
1232		NodeAllocation: m,
1233		NodeUpdate:     m,
1234	}
1235	update := &structs.PlanResult{NodeUpdate: m}
1236	alloc := &structs.PlanResult{NodeAllocation: m}
1237	deployment := &structs.PlanResult{Deployment: mock.Deployment()}
1238	deploymentUpdates := &structs.PlanResult{
1239		DeploymentUpdates: []*structs.DeploymentStatusUpdate{
1240			{DeploymentID: uuid.Generate()},
1241		},
1242	}
1243
1244	require.True(t, progressMade(both) && progressMade(update) && progressMade(alloc) &&
1245		progressMade(deployment) && progressMade(deploymentUpdates))
1246}
1247
1248func TestDesiredUpdates(t *testing.T) {
1249	tg1 := &structs.TaskGroup{Name: "foo"}
1250	tg2 := &structs.TaskGroup{Name: "bar"}
1251	a2 := &structs.Allocation{TaskGroup: "bar"}
1252
1253	place := []allocTuple{
1254		{TaskGroup: tg1},
1255		{TaskGroup: tg1},
1256		{TaskGroup: tg1},
1257		{TaskGroup: tg2},
1258	}
1259	stop := []allocTuple{
1260		{TaskGroup: tg2, Alloc: a2},
1261		{TaskGroup: tg2, Alloc: a2},
1262	}
1263	ignore := []allocTuple{
1264		{TaskGroup: tg1},
1265	}
1266	migrate := []allocTuple{
1267		{TaskGroup: tg2},
1268	}
1269	inplace := []allocTuple{
1270		{TaskGroup: tg1},
1271		{TaskGroup: tg1},
1272	}
1273	destructive := []allocTuple{
1274		{TaskGroup: tg1},
1275		{TaskGroup: tg2},
1276		{TaskGroup: tg2},
1277	}
1278	diff := &diffResult{
1279		place:   place,
1280		stop:    stop,
1281		ignore:  ignore,
1282		migrate: migrate,
1283	}
1284
1285	expected := map[string]*structs.DesiredUpdates{
1286		"foo": {
1287			Place:             3,
1288			Ignore:            1,
1289			InPlaceUpdate:     2,
1290			DestructiveUpdate: 1,
1291		},
1292		"bar": {
1293			Place:             1,
1294			Stop:              2,
1295			Migrate:           1,
1296			DestructiveUpdate: 2,
1297		},
1298	}
1299
1300	desired := desiredUpdates(diff, inplace, destructive)
1301	require.True(t, reflect.DeepEqual(desired, expected), "desiredUpdates() returned %#v; want %#v", desired, expected)
1302}
1303
1304func TestUtil_AdjustQueuedAllocations(t *testing.T) {
1305	logger := testlog.HCLogger(t)
1306	alloc1 := mock.Alloc()
1307	alloc2 := mock.Alloc()
1308	alloc2.CreateIndex = 4
1309	alloc2.ModifyIndex = 4
1310	alloc3 := mock.Alloc()
1311	alloc3.CreateIndex = 3
1312	alloc3.ModifyIndex = 5
1313	alloc4 := mock.Alloc()
1314	alloc4.CreateIndex = 6
1315	alloc4.ModifyIndex = 8
1316
1317	planResult := structs.PlanResult{
1318		NodeUpdate: map[string][]*structs.Allocation{
1319			"node-1": {alloc1},
1320		},
1321		NodeAllocation: map[string][]*structs.Allocation{
1322			"node-1": {
1323				alloc2,
1324			},
1325			"node-2": {
1326				alloc3, alloc4,
1327			},
1328		},
1329		RefreshIndex: 3,
1330		AllocIndex:   16, // Should not be considered
1331	}
1332
1333	queuedAllocs := map[string]int{"web": 2}
1334	adjustQueuedAllocations(logger, &planResult, queuedAllocs)
1335
1336	require.Equal(t, 1, queuedAllocs["web"])
1337}
1338
1339func TestUtil_UpdateNonTerminalAllocsToLost(t *testing.T) {
1340	node := mock.Node()
1341	node.Status = structs.NodeStatusDown
1342	alloc1 := mock.Alloc()
1343	alloc1.NodeID = node.ID
1344	alloc1.DesiredStatus = structs.AllocDesiredStatusStop
1345
1346	alloc2 := mock.Alloc()
1347	alloc2.NodeID = node.ID
1348	alloc2.DesiredStatus = structs.AllocDesiredStatusStop
1349	alloc2.ClientStatus = structs.AllocClientStatusRunning
1350
1351	alloc3 := mock.Alloc()
1352	alloc3.NodeID = node.ID
1353	alloc3.DesiredStatus = structs.AllocDesiredStatusStop
1354	alloc3.ClientStatus = structs.AllocClientStatusComplete
1355
1356	alloc4 := mock.Alloc()
1357	alloc4.NodeID = node.ID
1358	alloc4.DesiredStatus = structs.AllocDesiredStatusStop
1359	alloc4.ClientStatus = structs.AllocClientStatusFailed
1360
1361	allocs := []*structs.Allocation{alloc1, alloc2, alloc3, alloc4}
1362	plan := structs.Plan{
1363		NodeUpdate: make(map[string][]*structs.Allocation),
1364	}
1365	tainted := map[string]*structs.Node{node.ID: node}
1366
1367	updateNonTerminalAllocsToLost(&plan, tainted, allocs)
1368
1369	allocsLost := make([]string, 0, 2)
1370	for _, alloc := range plan.NodeUpdate[node.ID] {
1371		allocsLost = append(allocsLost, alloc.ID)
1372	}
1373	expected := []string{alloc1.ID, alloc2.ID}
1374	require.True(t, reflect.DeepEqual(allocsLost, expected), "actual: %v, expected: %v", allocsLost, expected)
1375
1376	// Update the node status to ready and try again
1377	plan = structs.Plan{
1378		NodeUpdate: make(map[string][]*structs.Allocation),
1379	}
1380	node.Status = structs.NodeStatusReady
1381	updateNonTerminalAllocsToLost(&plan, tainted, allocs)
1382
1383	allocsLost = make([]string, 0, 2)
1384	for _, alloc := range plan.NodeUpdate[node.ID] {
1385		allocsLost = append(allocsLost, alloc.ID)
1386	}
1387	expected = []string{}
1388	require.True(t, reflect.DeepEqual(allocsLost, expected), "actual: %v, expected: %v", allocsLost, expected)
1389}
1390
1391func TestUtil_connectUpdated(t *testing.T) {
1392	t.Run("both nil", func(t *testing.T) {
1393		require.False(t, connectUpdated(nil, nil))
1394	})
1395
1396	t.Run("one nil", func(t *testing.T) {
1397		require.True(t, connectUpdated(nil, new(structs.ConsulConnect)))
1398	})
1399
1400	t.Run("native differ", func(t *testing.T) {
1401		a := &structs.ConsulConnect{Native: true}
1402		b := &structs.ConsulConnect{Native: false}
1403		require.True(t, connectUpdated(a, b))
1404	})
1405
1406	t.Run("gateway differ", func(t *testing.T) {
1407		a := &structs.ConsulConnect{Gateway: &structs.ConsulGateway{
1408			Ingress: new(structs.ConsulIngressConfigEntry),
1409		}}
1410		b := &structs.ConsulConnect{Gateway: &structs.ConsulGateway{
1411			Terminating: new(structs.ConsulTerminatingConfigEntry),
1412		}}
1413		require.True(t, connectUpdated(a, b))
1414	})
1415
1416	t.Run("sidecar task differ", func(t *testing.T) {
1417		a := &structs.ConsulConnect{SidecarTask: &structs.SidecarTask{
1418			Driver: "exec",
1419		}}
1420		b := &structs.ConsulConnect{SidecarTask: &structs.SidecarTask{
1421			Driver: "docker",
1422		}}
1423		require.True(t, connectUpdated(a, b))
1424	})
1425
1426	t.Run("sidecar service differ", func(t *testing.T) {
1427		a := &structs.ConsulConnect{SidecarService: &structs.ConsulSidecarService{
1428			Port: "1111",
1429		}}
1430		b := &structs.ConsulConnect{SidecarService: &structs.ConsulSidecarService{
1431			Port: "2222",
1432		}}
1433		require.True(t, connectUpdated(a, b))
1434	})
1435
1436	t.Run("same", func(t *testing.T) {
1437		a := new(structs.ConsulConnect)
1438		b := new(structs.ConsulConnect)
1439		require.False(t, connectUpdated(a, b))
1440	})
1441}
1442
1443func TestUtil_connectSidecarServiceUpdated(t *testing.T) {
1444	t.Run("both nil", func(t *testing.T) {
1445		require.False(t, connectSidecarServiceUpdated(nil, nil))
1446	})
1447
1448	t.Run("one nil", func(t *testing.T) {
1449		require.True(t, connectSidecarServiceUpdated(nil, new(structs.ConsulSidecarService)))
1450	})
1451
1452	t.Run("ports differ", func(t *testing.T) {
1453		a := &structs.ConsulSidecarService{Port: "1111"}
1454		b := &structs.ConsulSidecarService{Port: "2222"}
1455		require.True(t, connectSidecarServiceUpdated(a, b))
1456	})
1457
1458	t.Run("same", func(t *testing.T) {
1459		a := &structs.ConsulSidecarService{Port: "1111"}
1460		b := &structs.ConsulSidecarService{Port: "1111"}
1461		require.False(t, connectSidecarServiceUpdated(a, b))
1462	})
1463}
1464