1/*
2Copyright 2016 The Kubernetes Authors.
3
4Licensed under the Apache License, Version 2.0 (the "License");
5you may not use this file except in compliance with the License.
6You may obtain a copy of the License at
7
8    http://www.apache.org/licenses/LICENSE-2.0
9
10Unless required by applicable law or agreed to in writing, software
11distributed under the License is distributed on an "AS IS" BASIS,
12WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13See the License for the specific language governing permissions and
14limitations under the License.
15*/
16
17package disruption
18
19import (
20	"context"
21	"flag"
22	"fmt"
23	"os"
24	"runtime/debug"
25	"strings"
26	"sync"
27	"testing"
28	"time"
29
30	apps "k8s.io/api/apps/v1"
31	autoscalingapi "k8s.io/api/autoscaling/v1"
32	v1 "k8s.io/api/core/v1"
33	policy "k8s.io/api/policy/v1"
34	apiequality "k8s.io/apimachinery/pkg/api/equality"
35	"k8s.io/apimachinery/pkg/api/errors"
36	apimeta "k8s.io/apimachinery/pkg/api/meta"
37	"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
38	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
39	"k8s.io/apimachinery/pkg/runtime"
40	"k8s.io/apimachinery/pkg/runtime/schema"
41	"k8s.io/apimachinery/pkg/types"
42	"k8s.io/apimachinery/pkg/util/intstr"
43	"k8s.io/apimachinery/pkg/util/uuid"
44	"k8s.io/apimachinery/pkg/util/wait"
45	discoveryfake "k8s.io/client-go/discovery/fake"
46	"k8s.io/client-go/informers"
47	"k8s.io/client-go/kubernetes/fake"
48	scalefake "k8s.io/client-go/scale/fake"
49	core "k8s.io/client-go/testing"
50	"k8s.io/client-go/tools/cache"
51	"k8s.io/client-go/util/workqueue"
52	"k8s.io/klog/v2"
53	_ "k8s.io/kubernetes/pkg/apis/core/install"
54	"k8s.io/kubernetes/pkg/controller"
55	utilpointer "k8s.io/utils/pointer"
56)
57
58type pdbStates map[string]policy.PodDisruptionBudget
59
60var alwaysReady = func() bool { return true }
61
62func (ps *pdbStates) Set(pdb *policy.PodDisruptionBudget) error {
63	key, err := controller.KeyFunc(pdb)
64	if err != nil {
65		return err
66	}
67	(*ps)[key] = *pdb.DeepCopy()
68	return nil
69}
70
71func (ps *pdbStates) Get(key string) policy.PodDisruptionBudget {
72	return (*ps)[key]
73}
74
75func (ps *pdbStates) VerifyPdbStatus(t *testing.T, key string, disruptionsAllowed, currentHealthy, desiredHealthy, expectedPods int32,
76	disruptedPodMap map[string]metav1.Time) {
77	actualPDB := ps.Get(key)
78	actualConditions := actualPDB.Status.Conditions
79	actualPDB.Status.Conditions = nil
80	expectedStatus := policy.PodDisruptionBudgetStatus{
81		DisruptionsAllowed: disruptionsAllowed,
82		CurrentHealthy:     currentHealthy,
83		DesiredHealthy:     desiredHealthy,
84		ExpectedPods:       expectedPods,
85		DisruptedPods:      disruptedPodMap,
86		ObservedGeneration: actualPDB.Generation,
87	}
88	actualStatus := actualPDB.Status
89	if !apiequality.Semantic.DeepEqual(actualStatus, expectedStatus) {
90		debug.PrintStack()
91		t.Fatalf("PDB %q status mismatch.  Expected %+v but got %+v.", key, expectedStatus, actualStatus)
92	}
93
94	cond := apimeta.FindStatusCondition(actualConditions, policy.DisruptionAllowedCondition)
95	if cond == nil {
96		t.Fatalf("Expected condition %q, but didn't find it", policy.DisruptionAllowedCondition)
97	}
98	if disruptionsAllowed > 0 {
99		if cond.Status != metav1.ConditionTrue {
100			t.Fatalf("Expected condition %q to have status %q, but was %q",
101				policy.DisruptionAllowedCondition, metav1.ConditionTrue, cond.Status)
102		}
103	} else {
104		if cond.Status != metav1.ConditionFalse {
105			t.Fatalf("Expected condition %q to have status %q, but was %q",
106				policy.DisruptionAllowedCondition, metav1.ConditionFalse, cond.Status)
107		}
108	}
109}
110
111func (ps *pdbStates) VerifyDisruptionAllowed(t *testing.T, key string, disruptionsAllowed int32) {
112	pdb := ps.Get(key)
113	if pdb.Status.DisruptionsAllowed != disruptionsAllowed {
114		debug.PrintStack()
115		t.Fatalf("PodDisruptionAllowed mismatch for PDB %q.  Expected %v but got %v.", key, disruptionsAllowed, pdb.Status.DisruptionsAllowed)
116	}
117}
118
119func (ps *pdbStates) VerifyNoStatusError(t *testing.T, key string) {
120	pdb := ps.Get(key)
121	for _, condition := range pdb.Status.Conditions {
122		if strings.Contains(condition.Message, "found no controller ref") && condition.Reason == policy.SyncFailedReason {
123			t.Fatalf("PodDisruption Controller should not error when unmanaged pods are found but it failed for %q", key)
124		}
125	}
126}
127
128type disruptionController struct {
129	*DisruptionController
130
131	podStore cache.Store
132	pdbStore cache.Store
133	rcStore  cache.Store
134	rsStore  cache.Store
135	dStore   cache.Store
136	ssStore  cache.Store
137
138	coreClient      *fake.Clientset
139	scaleClient     *scalefake.FakeScaleClient
140	discoveryClient *discoveryfake.FakeDiscovery
141}
142
143var customGVK = schema.GroupVersionKind{
144	Group:   "custom.k8s.io",
145	Version: "v1",
146	Kind:    "customresource",
147}
148
149func newFakeDisruptionController() (*disruptionController, *pdbStates) {
150	ps := &pdbStates{}
151
152	coreClient := fake.NewSimpleClientset()
153	informerFactory := informers.NewSharedInformerFactory(coreClient, controller.NoResyncPeriodFunc())
154
155	scheme := runtime.NewScheme()
156	scheme.AddKnownTypeWithName(customGVK, &v1.Service{})
157	fakeScaleClient := &scalefake.FakeScaleClient{}
158	fakeDiscovery := &discoveryfake.FakeDiscovery{
159		Fake: &core.Fake{},
160	}
161
162	dc := NewDisruptionController(
163		informerFactory.Core().V1().Pods(),
164		informerFactory.Policy().V1().PodDisruptionBudgets(),
165		informerFactory.Core().V1().ReplicationControllers(),
166		informerFactory.Apps().V1().ReplicaSets(),
167		informerFactory.Apps().V1().Deployments(),
168		informerFactory.Apps().V1().StatefulSets(),
169		coreClient,
170		testrestmapper.TestOnlyStaticRESTMapper(scheme),
171		fakeScaleClient,
172		fakeDiscovery,
173	)
174	dc.getUpdater = func() updater { return ps.Set }
175	dc.podListerSynced = alwaysReady
176	dc.pdbListerSynced = alwaysReady
177	dc.rcListerSynced = alwaysReady
178	dc.rsListerSynced = alwaysReady
179	dc.dListerSynced = alwaysReady
180	dc.ssListerSynced = alwaysReady
181
182	informerFactory.Start(context.TODO().Done())
183	informerFactory.WaitForCacheSync(nil)
184
185	return &disruptionController{
186		dc,
187		informerFactory.Core().V1().Pods().Informer().GetStore(),
188		informerFactory.Policy().V1().PodDisruptionBudgets().Informer().GetStore(),
189		informerFactory.Core().V1().ReplicationControllers().Informer().GetStore(),
190		informerFactory.Apps().V1().ReplicaSets().Informer().GetStore(),
191		informerFactory.Apps().V1().Deployments().Informer().GetStore(),
192		informerFactory.Apps().V1().StatefulSets().Informer().GetStore(),
193		coreClient,
194		fakeScaleClient,
195		fakeDiscovery,
196	}, ps
197}
198
199func fooBar() map[string]string {
200	return map[string]string{"foo": "bar"}
201}
202
203func newSel(labels map[string]string) *metav1.LabelSelector {
204	return &metav1.LabelSelector{MatchLabels: labels}
205}
206
207func newSelFooBar() *metav1.LabelSelector {
208	return newSel(map[string]string{"foo": "bar"})
209}
210
211func newMinAvailablePodDisruptionBudget(t *testing.T, minAvailable intstr.IntOrString) (*policy.PodDisruptionBudget, string) {
212
213	pdb := &policy.PodDisruptionBudget{
214		TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
215		ObjectMeta: metav1.ObjectMeta{
216			UID:             uuid.NewUUID(),
217			Name:            "foobar",
218			Namespace:       metav1.NamespaceDefault,
219			ResourceVersion: "18",
220		},
221		Spec: policy.PodDisruptionBudgetSpec{
222			MinAvailable: &minAvailable,
223			Selector:     newSelFooBar(),
224		},
225	}
226
227	pdbName, err := controller.KeyFunc(pdb)
228	if err != nil {
229		t.Fatalf("Unexpected error naming pdb %q: %v", pdb.Name, err)
230	}
231
232	return pdb, pdbName
233}
234
235func newMaxUnavailablePodDisruptionBudget(t *testing.T, maxUnavailable intstr.IntOrString) (*policy.PodDisruptionBudget, string) {
236	pdb := &policy.PodDisruptionBudget{
237		TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
238		ObjectMeta: metav1.ObjectMeta{
239			UID:             uuid.NewUUID(),
240			Name:            "foobar",
241			Namespace:       metav1.NamespaceDefault,
242			ResourceVersion: "18",
243		},
244		Spec: policy.PodDisruptionBudgetSpec{
245			MaxUnavailable: &maxUnavailable,
246			Selector:       newSelFooBar(),
247		},
248	}
249
250	pdbName, err := controller.KeyFunc(pdb)
251	if err != nil {
252		t.Fatalf("Unexpected error naming pdb %q: %v", pdb.Name, err)
253	}
254
255	return pdb, pdbName
256}
257
258func updatePodOwnerToRc(t *testing.T, pod *v1.Pod, rc *v1.ReplicationController) {
259	var controllerReference metav1.OwnerReference
260	var trueVar = true
261	controllerReference = metav1.OwnerReference{UID: rc.UID, APIVersion: controllerKindRC.GroupVersion().String(), Kind: controllerKindRC.Kind, Name: rc.Name, Controller: &trueVar}
262	pod.OwnerReferences = append(pod.OwnerReferences, controllerReference)
263}
264
265func updatePodOwnerToRs(t *testing.T, pod *v1.Pod, rs *apps.ReplicaSet) {
266	var controllerReference metav1.OwnerReference
267	var trueVar = true
268	controllerReference = metav1.OwnerReference{UID: rs.UID, APIVersion: controllerKindRS.GroupVersion().String(), Kind: controllerKindRS.Kind, Name: rs.Name, Controller: &trueVar}
269	pod.OwnerReferences = append(pod.OwnerReferences, controllerReference)
270}
271
272//	pod, podName := newPod(t, name)
273func updatePodOwnerToSs(t *testing.T, pod *v1.Pod, ss *apps.StatefulSet) {
274	var controllerReference metav1.OwnerReference
275	var trueVar = true
276	controllerReference = metav1.OwnerReference{UID: ss.UID, APIVersion: controllerKindSS.GroupVersion().String(), Kind: controllerKindSS.Kind, Name: ss.Name, Controller: &trueVar}
277	pod.OwnerReferences = append(pod.OwnerReferences, controllerReference)
278}
279
280func newPod(t *testing.T, name string) (*v1.Pod, string) {
281	pod := &v1.Pod{
282		TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
283		ObjectMeta: metav1.ObjectMeta{
284			UID:             uuid.NewUUID(),
285			Annotations:     make(map[string]string),
286			Name:            name,
287			Namespace:       metav1.NamespaceDefault,
288			ResourceVersion: "18",
289			Labels:          fooBar(),
290		},
291		Spec: v1.PodSpec{},
292		Status: v1.PodStatus{
293			Conditions: []v1.PodCondition{
294				{Type: v1.PodReady, Status: v1.ConditionTrue},
295			},
296		},
297	}
298
299	podName, err := controller.KeyFunc(pod)
300	if err != nil {
301		t.Fatalf("Unexpected error naming pod %q: %v", pod.Name, err)
302	}
303
304	return pod, podName
305}
306
307func newReplicationController(t *testing.T, size int32) (*v1.ReplicationController, string) {
308	rc := &v1.ReplicationController{
309		TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
310		ObjectMeta: metav1.ObjectMeta{
311			UID:             uuid.NewUUID(),
312			Name:            "foobar",
313			Namespace:       metav1.NamespaceDefault,
314			ResourceVersion: "18",
315			Labels:          fooBar(),
316		},
317		Spec: v1.ReplicationControllerSpec{
318			Replicas: &size,
319			Selector: fooBar(),
320		},
321	}
322
323	rcName, err := controller.KeyFunc(rc)
324	if err != nil {
325		t.Fatalf("Unexpected error naming RC %q", rc.Name)
326	}
327
328	return rc, rcName
329}
330
331func newDeployment(t *testing.T, size int32) (*apps.Deployment, string) {
332	d := &apps.Deployment{
333		TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
334		ObjectMeta: metav1.ObjectMeta{
335			UID:             uuid.NewUUID(),
336			Name:            "foobar",
337			Namespace:       metav1.NamespaceDefault,
338			ResourceVersion: "18",
339			Labels:          fooBar(),
340		},
341		Spec: apps.DeploymentSpec{
342			Replicas: &size,
343			Selector: newSelFooBar(),
344		},
345	}
346
347	dName, err := controller.KeyFunc(d)
348	if err != nil {
349		t.Fatalf("Unexpected error naming Deployment %q: %v", d.Name, err)
350	}
351
352	return d, dName
353}
354
355func newReplicaSet(t *testing.T, size int32) (*apps.ReplicaSet, string) {
356	rs := &apps.ReplicaSet{
357		TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
358		ObjectMeta: metav1.ObjectMeta{
359			UID:             uuid.NewUUID(),
360			Name:            "foobar",
361			Namespace:       metav1.NamespaceDefault,
362			ResourceVersion: "18",
363			Labels:          fooBar(),
364		},
365		Spec: apps.ReplicaSetSpec{
366			Replicas: &size,
367			Selector: newSelFooBar(),
368		},
369	}
370
371	rsName, err := controller.KeyFunc(rs)
372	if err != nil {
373		t.Fatalf("Unexpected error naming ReplicaSet %q: %v", rs.Name, err)
374	}
375
376	return rs, rsName
377}
378
379func newStatefulSet(t *testing.T, size int32) (*apps.StatefulSet, string) {
380	ss := &apps.StatefulSet{
381		TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
382		ObjectMeta: metav1.ObjectMeta{
383			UID:             uuid.NewUUID(),
384			Name:            "foobar",
385			Namespace:       metav1.NamespaceDefault,
386			ResourceVersion: "18",
387			Labels:          fooBar(),
388		},
389		Spec: apps.StatefulSetSpec{
390			Replicas: &size,
391			Selector: newSelFooBar(),
392		},
393	}
394
395	ssName, err := controller.KeyFunc(ss)
396	if err != nil {
397		t.Fatalf("Unexpected error naming StatefulSet %q: %v", ss.Name, err)
398	}
399
400	return ss, ssName
401}
402
403func update(t *testing.T, store cache.Store, obj interface{}) {
404	if err := store.Update(obj); err != nil {
405		t.Fatalf("Could not add %+v to %+v: %v", obj, store, err)
406	}
407}
408
409func add(t *testing.T, store cache.Store, obj interface{}) {
410	if err := store.Add(obj); err != nil {
411		t.Fatalf("Could not add %+v to %+v: %v", obj, store, err)
412	}
413}
414
415// Create one with no selector.  Verify it matches all pods
416func TestNoSelector(t *testing.T) {
417	dc, ps := newFakeDisruptionController()
418
419	pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(3))
420	pdb.Spec.Selector = &metav1.LabelSelector{}
421	pod, _ := newPod(t, "yo-yo-yo")
422
423	add(t, dc.pdbStore, pdb)
424	dc.sync(pdbName)
425	ps.VerifyPdbStatus(t, pdbName, 0, 0, 3, 0, map[string]metav1.Time{})
426
427	add(t, dc.podStore, pod)
428	dc.sync(pdbName)
429	ps.VerifyPdbStatus(t, pdbName, 0, 1, 3, 1, map[string]metav1.Time{})
430}
431
432// Verify that available/expected counts go up as we add pods, then verify that
433// available count goes down when we make a pod unavailable.
434func TestUnavailable(t *testing.T) {
435	dc, ps := newFakeDisruptionController()
436
437	pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(3))
438	add(t, dc.pdbStore, pdb)
439	dc.sync(pdbName)
440
441	// Add three pods, verifying that the counts go up at each step.
442	pods := []*v1.Pod{}
443	for i := int32(0); i < 4; i++ {
444		ps.VerifyPdbStatus(t, pdbName, 0, i, 3, i, map[string]metav1.Time{})
445		pod, _ := newPod(t, fmt.Sprintf("yo-yo-yo %d", i))
446		pods = append(pods, pod)
447		add(t, dc.podStore, pod)
448		dc.sync(pdbName)
449	}
450	ps.VerifyPdbStatus(t, pdbName, 1, 4, 3, 4, map[string]metav1.Time{})
451
452	// Now set one pod as unavailable
453	pods[0].Status.Conditions = []v1.PodCondition{}
454	update(t, dc.podStore, pods[0])
455	dc.sync(pdbName)
456
457	// Verify expected update
458	ps.VerifyPdbStatus(t, pdbName, 0, 3, 3, 4, map[string]metav1.Time{})
459}
460
461// Verify that an integer MaxUnavailable won't
462// allow a disruption for pods with no controller.
463func TestIntegerMaxUnavailable(t *testing.T) {
464	dc, ps := newFakeDisruptionController()
465
466	pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt(1))
467	add(t, dc.pdbStore, pdb)
468	dc.sync(pdbName)
469	// This verifies that when a PDB has 0 pods, disruptions are not allowed.
470	ps.VerifyDisruptionAllowed(t, pdbName, 0)
471
472	pod, _ := newPod(t, "naked")
473	add(t, dc.podStore, pod)
474	dc.sync(pdbName)
475
476	ps.VerifyDisruptionAllowed(t, pdbName, 0)
477}
478
479// Verify that an integer MaxUnavailable will recompute allowed disruptions when the scale of
480// the selected pod's controller is modified.
481func TestIntegerMaxUnavailableWithScaling(t *testing.T) {
482	dc, ps := newFakeDisruptionController()
483
484	pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt(2))
485	add(t, dc.pdbStore, pdb)
486
487	rs, _ := newReplicaSet(t, 7)
488	add(t, dc.rsStore, rs)
489
490	pod, _ := newPod(t, "pod")
491	updatePodOwnerToRs(t, pod, rs)
492	add(t, dc.podStore, pod)
493	dc.sync(pdbName)
494	ps.VerifyPdbStatus(t, pdbName, 0, 1, 5, 7, map[string]metav1.Time{})
495
496	// Update scale of ReplicaSet and check PDB
497	rs.Spec.Replicas = utilpointer.Int32Ptr(5)
498	update(t, dc.rsStore, rs)
499
500	dc.sync(pdbName)
501	ps.VerifyPdbStatus(t, pdbName, 0, 1, 3, 5, map[string]metav1.Time{})
502}
503
504// Verify that an percentage MaxUnavailable will recompute allowed disruptions when the scale of
505// the selected pod's controller is modified.
506func TestPercentageMaxUnavailableWithScaling(t *testing.T) {
507	dc, ps := newFakeDisruptionController()
508
509	pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromString("30%"))
510	add(t, dc.pdbStore, pdb)
511
512	rs, _ := newReplicaSet(t, 7)
513	add(t, dc.rsStore, rs)
514
515	pod, _ := newPod(t, "pod")
516	updatePodOwnerToRs(t, pod, rs)
517	add(t, dc.podStore, pod)
518	dc.sync(pdbName)
519	ps.VerifyPdbStatus(t, pdbName, 0, 1, 4, 7, map[string]metav1.Time{})
520
521	// Update scale of ReplicaSet and check PDB
522	rs.Spec.Replicas = utilpointer.Int32Ptr(3)
523	update(t, dc.rsStore, rs)
524
525	dc.sync(pdbName)
526	ps.VerifyPdbStatus(t, pdbName, 0, 1, 2, 3, map[string]metav1.Time{})
527}
528
529// Create a pod  with no controller, and verify that a PDB with a percentage
530// specified won't allow a disruption.
531func TestNakedPod(t *testing.T) {
532	dc, ps := newFakeDisruptionController()
533
534	pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("28%"))
535	add(t, dc.pdbStore, pdb)
536	dc.sync(pdbName)
537	// This verifies that when a PDB has 0 pods, disruptions are not allowed.
538	ps.VerifyDisruptionAllowed(t, pdbName, 0)
539
540	pod, _ := newPod(t, "naked")
541	add(t, dc.podStore, pod)
542	dc.sync(pdbName)
543
544	ps.VerifyDisruptionAllowed(t, pdbName, 0)
545}
546
547// Verify that disruption controller is not erroring when unmanaged pods are found
548func TestStatusForUnmanagedPod(t *testing.T) {
549	dc, ps := newFakeDisruptionController()
550
551	pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("28%"))
552	add(t, dc.pdbStore, pdb)
553	dc.sync(pdbName)
554	// This verifies that when a PDB has 0 pods, disruptions are not allowed.
555	ps.VerifyDisruptionAllowed(t, pdbName, 0)
556
557	pod, _ := newPod(t, "unmanaged")
558	add(t, dc.podStore, pod)
559	dc.sync(pdbName)
560
561	ps.VerifyNoStatusError(t, pdbName)
562
563}
564
565// Check if the unmanaged pods are correctly collected or not
566func TestTotalUnmanagedPods(t *testing.T) {
567	dc, ps := newFakeDisruptionController()
568
569	pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("28%"))
570	add(t, dc.pdbStore, pdb)
571	dc.sync(pdbName)
572	// This verifies that when a PDB has 0 pods, disruptions are not allowed.
573	ps.VerifyDisruptionAllowed(t, pdbName, 0)
574
575	pod, _ := newPod(t, "unmanaged")
576	add(t, dc.podStore, pod)
577	dc.sync(pdbName)
578	var pods []*v1.Pod
579	pods = append(pods, pod)
580	_, unmanagedPods, _ := dc.getExpectedScale(pdb, pods)
581	if len(unmanagedPods) != 1 {
582		t.Fatalf("expected one pod to be unmanaged pod but found %d", len(unmanagedPods))
583	}
584	ps.VerifyNoStatusError(t, pdbName)
585
586}
587
588// Verify that we count the scale of a ReplicaSet even when it has no Deployment.
589func TestReplicaSet(t *testing.T) {
590	dc, ps := newFakeDisruptionController()
591
592	pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("20%"))
593	add(t, dc.pdbStore, pdb)
594
595	rs, _ := newReplicaSet(t, 10)
596	add(t, dc.rsStore, rs)
597
598	pod, _ := newPod(t, "pod")
599	updatePodOwnerToRs(t, pod, rs)
600	add(t, dc.podStore, pod)
601	dc.sync(pdbName)
602	ps.VerifyPdbStatus(t, pdbName, 0, 1, 2, 10, map[string]metav1.Time{})
603}
604
605func TestScaleResource(t *testing.T) {
606	customResourceUID := uuid.NewUUID()
607	replicas := int32(10)
608	pods := int32(4)
609	maxUnavailable := int32(5)
610
611	dc, ps := newFakeDisruptionController()
612
613	dc.scaleClient.AddReactor("get", "customresources", func(action core.Action) (handled bool, ret runtime.Object, err error) {
614		obj := &autoscalingapi.Scale{
615			ObjectMeta: metav1.ObjectMeta{
616				Namespace: metav1.NamespaceDefault,
617				UID:       customResourceUID,
618			},
619			Spec: autoscalingapi.ScaleSpec{
620				Replicas: replicas,
621			},
622		}
623		return true, obj, nil
624	})
625
626	pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt(int(maxUnavailable)))
627	add(t, dc.pdbStore, pdb)
628
629	trueVal := true
630	for i := 0; i < int(pods); i++ {
631		pod, _ := newPod(t, fmt.Sprintf("pod-%d", i))
632		pod.SetOwnerReferences([]metav1.OwnerReference{
633			{
634				Kind:       customGVK.Kind,
635				APIVersion: customGVK.GroupVersion().String(),
636				Controller: &trueVal,
637				UID:        customResourceUID,
638			},
639		})
640		add(t, dc.podStore, pod)
641	}
642
643	dc.sync(pdbName)
644	disruptionsAllowed := int32(0)
645	if replicas-pods < maxUnavailable {
646		disruptionsAllowed = maxUnavailable - (replicas - pods)
647	}
648	ps.VerifyPdbStatus(t, pdbName, disruptionsAllowed, pods, replicas-maxUnavailable, replicas, map[string]metav1.Time{})
649}
650
651func TestScaleFinderNoResource(t *testing.T) {
652	resourceName := "customresources"
653	testCases := map[string]struct {
654		apiResources []metav1.APIResource
655		expectError  bool
656	}{
657		"resource implements scale": {
658			apiResources: []metav1.APIResource{
659				{
660					Kind: customGVK.Kind,
661					Name: resourceName,
662				},
663				{
664					Kind: customGVK.Kind,
665					Name: resourceName + "/scale",
666				},
667			},
668			expectError: false,
669		},
670		"resource does not implement scale": {
671			apiResources: []metav1.APIResource{
672				{
673					Kind: customGVK.Kind,
674					Name: resourceName,
675				},
676			},
677			expectError: true,
678		},
679	}
680
681	for tn, tc := range testCases {
682		t.Run(tn, func(t *testing.T) {
683			customResourceUID := uuid.NewUUID()
684
685			dc, _ := newFakeDisruptionController()
686
687			dc.scaleClient.AddReactor("get", resourceName, func(action core.Action) (handled bool, ret runtime.Object, err error) {
688				gr := schema.GroupResource{
689					Group:    customGVK.Group,
690					Resource: resourceName,
691				}
692				return true, nil, errors.NewNotFound(gr, "name")
693			})
694			dc.discoveryClient.Resources = []*metav1.APIResourceList{
695				{
696					GroupVersion: customGVK.GroupVersion().String(),
697					APIResources: tc.apiResources,
698				},
699			}
700
701			trueVal := true
702			ownerRef := &metav1.OwnerReference{
703				Kind:       customGVK.Kind,
704				APIVersion: customGVK.GroupVersion().String(),
705				Controller: &trueVal,
706				UID:        customResourceUID,
707			}
708
709			_, err := dc.getScaleController(ownerRef, "default")
710
711			if tc.expectError && err == nil {
712				t.Error("expected error, but didn't get one")
713			}
714
715			if !tc.expectError && err != nil {
716				t.Errorf("did not expect error, but got %v", err)
717			}
718		})
719	}
720}
721
722// Verify that multiple controllers doesn't allow the PDB to be set true.
723func TestMultipleControllers(t *testing.T) {
724	const podCount = 2
725
726	dc, ps := newFakeDisruptionController()
727
728	pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("1%"))
729	add(t, dc.pdbStore, pdb)
730
731	pods := []*v1.Pod{}
732	for i := 0; i < podCount; i++ {
733		pod, _ := newPod(t, fmt.Sprintf("pod %d", i))
734		pods = append(pods, pod)
735		add(t, dc.podStore, pod)
736	}
737
738	dc.sync(pdbName)
739
740	// No controllers yet => no disruption allowed
741	ps.VerifyDisruptionAllowed(t, pdbName, 0)
742
743	rc, _ := newReplicationController(t, 1)
744	rc.Name = "rc 1"
745	for i := 0; i < podCount; i++ {
746		updatePodOwnerToRc(t, pods[i], rc)
747	}
748	add(t, dc.rcStore, rc)
749	dc.sync(pdbName)
750	// One RC and 200%>1% healthy => disruption allowed
751	ps.VerifyDisruptionAllowed(t, pdbName, 1)
752
753	rc, _ = newReplicationController(t, 1)
754	rc.Name = "rc 2"
755	for i := 0; i < podCount; i++ {
756		updatePodOwnerToRc(t, pods[i], rc)
757	}
758	add(t, dc.rcStore, rc)
759	dc.sync(pdbName)
760
761	// 100%>1% healthy BUT two RCs => no disruption allowed
762	// TODO: Find out if this assert is still needed
763	//ps.VerifyDisruptionAllowed(t, pdbName, 0)
764}
765
766func TestReplicationController(t *testing.T) {
767	// The budget in this test matches foo=bar, but the RC and its pods match
768	// {foo=bar, baz=quux}.  Later, when we add a rogue pod with only a foo=bar
769	// label, it will match the budget but have no controllers, which should
770	// trigger the controller to set PodDisruptionAllowed to false.
771	labels := map[string]string{
772		"foo": "bar",
773		"baz": "quux",
774	}
775
776	dc, ps := newFakeDisruptionController()
777
778	// 34% should round up to 2
779	pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("34%"))
780	add(t, dc.pdbStore, pdb)
781	rc, _ := newReplicationController(t, 3)
782	rc.Spec.Selector = labels
783	add(t, dc.rcStore, rc)
784	dc.sync(pdbName)
785
786	// It starts out at 0 expected because, with no pods, the PDB doesn't know
787	// about the RC.  This is a known bug.  TODO(mml): file issue
788	ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]metav1.Time{})
789
790	for i := int32(0); i < 3; i++ {
791		pod, _ := newPod(t, fmt.Sprintf("foobar %d", i))
792		updatePodOwnerToRc(t, pod, rc)
793		pod.Labels = labels
794		add(t, dc.podStore, pod)
795		dc.sync(pdbName)
796		if i < 2 {
797			ps.VerifyPdbStatus(t, pdbName, 0, i+1, 2, 3, map[string]metav1.Time{})
798		} else {
799			ps.VerifyPdbStatus(t, pdbName, 1, 3, 2, 3, map[string]metav1.Time{})
800		}
801	}
802
803	rogue, _ := newPod(t, "rogue")
804	add(t, dc.podStore, rogue)
805	dc.sync(pdbName)
806	ps.VerifyDisruptionAllowed(t, pdbName, 2)
807}
808
809func TestStatefulSetController(t *testing.T) {
810	labels := map[string]string{
811		"foo": "bar",
812		"baz": "quux",
813	}
814
815	dc, ps := newFakeDisruptionController()
816
817	// 34% should round up to 2
818	pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("34%"))
819	add(t, dc.pdbStore, pdb)
820	ss, _ := newStatefulSet(t, 3)
821	add(t, dc.ssStore, ss)
822	dc.sync(pdbName)
823
824	// It starts out at 0 expected because, with no pods, the PDB doesn't know
825	// about the SS.  This is a known bug.  TODO(mml): file issue
826	ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]metav1.Time{})
827
828	for i := int32(0); i < 3; i++ {
829		pod, _ := newPod(t, fmt.Sprintf("foobar %d", i))
830		updatePodOwnerToSs(t, pod, ss)
831		pod.Labels = labels
832		add(t, dc.podStore, pod)
833		dc.sync(pdbName)
834		if i < 2 {
835			ps.VerifyPdbStatus(t, pdbName, 0, i+1, 2, 3, map[string]metav1.Time{})
836		} else {
837			ps.VerifyPdbStatus(t, pdbName, 1, 3, 2, 3, map[string]metav1.Time{})
838		}
839	}
840}
841
842func TestTwoControllers(t *testing.T) {
843	// Most of this test is in verifying intermediate cases as we define the
844	// three controllers and create the pods.
845	rcLabels := map[string]string{
846		"foo": "bar",
847		"baz": "quux",
848	}
849	dLabels := map[string]string{
850		"foo": "bar",
851		"baz": "quuux",
852	}
853	dc, ps := newFakeDisruptionController()
854
855	// These constants are related, but I avoid calculating the correct values in
856	// code.  If you update a parameter here, recalculate the correct values for
857	// all of them.  Further down in the test, we use these to control loops, and
858	// that level of logic is enough complexity for me.
859	const collectionSize int32 = 11 // How big each collection is
860	const minimumOne int32 = 4      // integer minimum with one controller
861	const minimumTwo int32 = 7      // integer minimum with two controllers
862
863	pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("28%"))
864	add(t, dc.pdbStore, pdb)
865	rc, _ := newReplicationController(t, collectionSize)
866	rc.Spec.Selector = rcLabels
867	add(t, dc.rcStore, rc)
868	dc.sync(pdbName)
869
870	ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]metav1.Time{})
871
872	pods := []*v1.Pod{}
873
874	unavailablePods := collectionSize - minimumOne - 1
875	for i := int32(1); i <= collectionSize; i++ {
876		pod, _ := newPod(t, fmt.Sprintf("quux %d", i))
877		updatePodOwnerToRc(t, pod, rc)
878		pods = append(pods, pod)
879		pod.Labels = rcLabels
880		if i <= unavailablePods {
881			pod.Status.Conditions = []v1.PodCondition{}
882		}
883		add(t, dc.podStore, pod)
884		dc.sync(pdbName)
885		if i <= unavailablePods {
886			ps.VerifyPdbStatus(t, pdbName, 0, 0, minimumOne, collectionSize, map[string]metav1.Time{})
887		} else if i-unavailablePods <= minimumOne {
888			ps.VerifyPdbStatus(t, pdbName, 0, i-unavailablePods, minimumOne, collectionSize, map[string]metav1.Time{})
889		} else {
890			ps.VerifyPdbStatus(t, pdbName, 1, i-unavailablePods, minimumOne, collectionSize, map[string]metav1.Time{})
891		}
892	}
893
894	d, _ := newDeployment(t, collectionSize)
895	d.Spec.Selector = newSel(dLabels)
896	add(t, dc.dStore, d)
897	dc.sync(pdbName)
898	ps.VerifyPdbStatus(t, pdbName, 1, minimumOne+1, minimumOne, collectionSize, map[string]metav1.Time{})
899
900	rs, _ := newReplicaSet(t, collectionSize)
901	rs.Spec.Selector = newSel(dLabels)
902	rs.Labels = dLabels
903	add(t, dc.rsStore, rs)
904	dc.sync(pdbName)
905	ps.VerifyPdbStatus(t, pdbName, 1, minimumOne+1, minimumOne, collectionSize, map[string]metav1.Time{})
906
907	// By the end of this loop, the number of ready pods should be N+2 (hence minimumTwo+2).
908	unavailablePods = 2*collectionSize - (minimumTwo + 2) - unavailablePods
909	for i := int32(1); i <= collectionSize; i++ {
910		pod, _ := newPod(t, fmt.Sprintf("quuux %d", i))
911		updatePodOwnerToRs(t, pod, rs)
912		pods = append(pods, pod)
913		pod.Labels = dLabels
914		if i <= unavailablePods {
915			pod.Status.Conditions = []v1.PodCondition{}
916		}
917		add(t, dc.podStore, pod)
918		dc.sync(pdbName)
919		if i <= unavailablePods {
920			ps.VerifyPdbStatus(t, pdbName, 0, minimumOne+1, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
921		} else if i-unavailablePods <= minimumTwo-(minimumOne+1) {
922			ps.VerifyPdbStatus(t, pdbName, 0, (minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize, map[string]metav1.Time{})
923		} else {
924			ps.VerifyPdbStatus(t, pdbName, i-unavailablePods-(minimumTwo-(minimumOne+1)),
925				(minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize, map[string]metav1.Time{})
926		}
927	}
928
929	// Now we verify we can bring down 1 pod and a disruption is still permitted,
930	// but if we bring down two, it's not.  Then we make the pod ready again and
931	// verify that a disruption is permitted again.
932	ps.VerifyPdbStatus(t, pdbName, 2, 2+minimumTwo, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
933	pods[collectionSize-1].Status.Conditions = []v1.PodCondition{}
934	update(t, dc.podStore, pods[collectionSize-1])
935	dc.sync(pdbName)
936	ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
937
938	pods[collectionSize-2].Status.Conditions = []v1.PodCondition{}
939	update(t, dc.podStore, pods[collectionSize-2])
940	dc.sync(pdbName)
941	ps.VerifyPdbStatus(t, pdbName, 0, minimumTwo, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
942
943	pods[collectionSize-1].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}
944	update(t, dc.podStore, pods[collectionSize-1])
945	dc.sync(pdbName)
946	ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
947}
948
949// Test pdb doesn't exist
950func TestPDBNotExist(t *testing.T) {
951	dc, _ := newFakeDisruptionController()
952	pdb, _ := newMinAvailablePodDisruptionBudget(t, intstr.FromString("67%"))
953	add(t, dc.pdbStore, pdb)
954	if err := dc.sync("notExist"); err != nil {
955		t.Errorf("Unexpected error: %v, expect nil", err)
956	}
957}
958
959func TestUpdateDisruptedPods(t *testing.T) {
960	dc, ps := newFakeDisruptionController()
961	dc.recheckQueue = workqueue.NewNamedDelayingQueue("pdb_queue")
962	pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(1))
963	currentTime := time.Now()
964	pdb.Status.DisruptedPods = map[string]metav1.Time{
965		"p1":       {Time: currentTime},                       // Should be removed, pod deletion started.
966		"p2":       {Time: currentTime.Add(-5 * time.Minute)}, // Should be removed, expired.
967		"p3":       {Time: currentTime},                       // Should remain, pod untouched.
968		"notthere": {Time: currentTime},                       // Should be removed, pod deleted.
969	}
970	add(t, dc.pdbStore, pdb)
971
972	pod1, _ := newPod(t, "p1")
973	pod1.DeletionTimestamp = &metav1.Time{Time: time.Now()}
974	pod2, _ := newPod(t, "p2")
975	pod3, _ := newPod(t, "p3")
976
977	add(t, dc.podStore, pod1)
978	add(t, dc.podStore, pod2)
979	add(t, dc.podStore, pod3)
980
981	dc.sync(pdbName)
982
983	ps.VerifyPdbStatus(t, pdbName, 0, 1, 1, 3, map[string]metav1.Time{"p3": {Time: currentTime}})
984}
985
986func TestBasicFinderFunctions(t *testing.T) {
987	dc, _ := newFakeDisruptionController()
988
989	rs, _ := newReplicaSet(t, 10)
990	add(t, dc.rsStore, rs)
991	rc, _ := newReplicationController(t, 12)
992	add(t, dc.rcStore, rc)
993	ss, _ := newStatefulSet(t, 14)
994	add(t, dc.ssStore, ss)
995
996	testCases := map[string]struct {
997		finderFunc    podControllerFinder
998		apiVersion    string
999		kind          string
1000		name          string
1001		uid           types.UID
1002		findsScale    bool
1003		expectedScale int32
1004	}{
1005		"replicaset controller with apps group": {
1006			finderFunc:    dc.getPodReplicaSet,
1007			apiVersion:    "apps/v1",
1008			kind:          controllerKindRS.Kind,
1009			name:          rs.Name,
1010			uid:           rs.UID,
1011			findsScale:    true,
1012			expectedScale: 10,
1013		},
1014		"replicaset controller with invalid group": {
1015			finderFunc: dc.getPodReplicaSet,
1016			apiVersion: "invalid/v1",
1017			kind:       controllerKindRS.Kind,
1018			name:       rs.Name,
1019			uid:        rs.UID,
1020			findsScale: false,
1021		},
1022		"replicationcontroller with empty group": {
1023			finderFunc:    dc.getPodReplicationController,
1024			apiVersion:    "/v1",
1025			kind:          controllerKindRC.Kind,
1026			name:          rc.Name,
1027			uid:           rc.UID,
1028			findsScale:    true,
1029			expectedScale: 12,
1030		},
1031		"replicationcontroller with invalid group": {
1032			finderFunc: dc.getPodReplicationController,
1033			apiVersion: "apps/v1",
1034			kind:       controllerKindRC.Kind,
1035			name:       rc.Name,
1036			uid:        rc.UID,
1037			findsScale: false,
1038		},
1039		"statefulset controller with extensions group": {
1040			finderFunc:    dc.getPodStatefulSet,
1041			apiVersion:    "apps/v1",
1042			kind:          controllerKindSS.Kind,
1043			name:          ss.Name,
1044			uid:           ss.UID,
1045			findsScale:    true,
1046			expectedScale: 14,
1047		},
1048		"statefulset controller with invalid kind": {
1049			finderFunc: dc.getPodStatefulSet,
1050			apiVersion: "apps/v1",
1051			kind:       controllerKindRS.Kind,
1052			name:       ss.Name,
1053			uid:        ss.UID,
1054			findsScale: false,
1055		},
1056	}
1057
1058	for tn, tc := range testCases {
1059		t.Run(tn, func(t *testing.T) {
1060			controllerRef := &metav1.OwnerReference{
1061				APIVersion: tc.apiVersion,
1062				Kind:       tc.kind,
1063				Name:       tc.name,
1064				UID:        tc.uid,
1065			}
1066
1067			controllerAndScale, _ := tc.finderFunc(controllerRef, metav1.NamespaceDefault)
1068
1069			if controllerAndScale == nil {
1070				if tc.findsScale {
1071					t.Error("Expected scale, but got nil")
1072				}
1073				return
1074			}
1075
1076			if got, want := controllerAndScale.scale, tc.expectedScale; got != want {
1077				t.Errorf("Expected scale %d, but got %d", want, got)
1078			}
1079
1080			if got, want := controllerAndScale.UID, tc.uid; got != want {
1081				t.Errorf("Expected uid %s, but got %s", want, got)
1082			}
1083		})
1084	}
1085}
1086
1087func TestDeploymentFinderFunction(t *testing.T) {
1088	labels := map[string]string{
1089		"foo": "bar",
1090	}
1091
1092	testCases := map[string]struct {
1093		rsApiVersion  string
1094		rsKind        string
1095		depApiVersion string
1096		depKind       string
1097		findsScale    bool
1098		expectedScale int32
1099	}{
1100		"happy path": {
1101			rsApiVersion:  "apps/v1",
1102			rsKind:        controllerKindRS.Kind,
1103			depApiVersion: "extensions/v1",
1104			depKind:       controllerKindDep.Kind,
1105			findsScale:    true,
1106			expectedScale: 10,
1107		},
1108		"invalid rs apiVersion": {
1109			rsApiVersion:  "invalid/v1",
1110			rsKind:        controllerKindRS.Kind,
1111			depApiVersion: "apps/v1",
1112			depKind:       controllerKindDep.Kind,
1113			findsScale:    false,
1114		},
1115		"invalid rs kind": {
1116			rsApiVersion:  "apps/v1",
1117			rsKind:        "InvalidKind",
1118			depApiVersion: "apps/v1",
1119			depKind:       controllerKindDep.Kind,
1120			findsScale:    false,
1121		},
1122		"invalid deployment apiVersion": {
1123			rsApiVersion:  "extensions/v1",
1124			rsKind:        controllerKindRS.Kind,
1125			depApiVersion: "deployment/v1",
1126			depKind:       controllerKindDep.Kind,
1127			findsScale:    false,
1128		},
1129		"invalid deployment kind": {
1130			rsApiVersion:  "apps/v1",
1131			rsKind:        controllerKindRS.Kind,
1132			depApiVersion: "extensions/v1",
1133			depKind:       "InvalidKind",
1134			findsScale:    false,
1135		},
1136	}
1137
1138	for tn, tc := range testCases {
1139		t.Run(tn, func(t *testing.T) {
1140			dc, _ := newFakeDisruptionController()
1141
1142			dep, _ := newDeployment(t, 10)
1143			dep.Spec.Selector = newSel(labels)
1144			add(t, dc.dStore, dep)
1145
1146			rs, _ := newReplicaSet(t, 5)
1147			rs.Labels = labels
1148			trueVal := true
1149			rs.OwnerReferences = append(rs.OwnerReferences, metav1.OwnerReference{
1150				APIVersion: tc.depApiVersion,
1151				Kind:       tc.depKind,
1152				Name:       dep.Name,
1153				UID:        dep.UID,
1154				Controller: &trueVal,
1155			})
1156			add(t, dc.rsStore, rs)
1157
1158			controllerRef := &metav1.OwnerReference{
1159				APIVersion: tc.rsApiVersion,
1160				Kind:       tc.rsKind,
1161				Name:       rs.Name,
1162				UID:        rs.UID,
1163			}
1164
1165			controllerAndScale, _ := dc.getPodDeployment(controllerRef, metav1.NamespaceDefault)
1166
1167			if controllerAndScale == nil {
1168				if tc.findsScale {
1169					t.Error("Expected scale, but got nil")
1170				}
1171				return
1172			}
1173
1174			if got, want := controllerAndScale.scale, tc.expectedScale; got != want {
1175				t.Errorf("Expected scale %d, but got %d", want, got)
1176			}
1177
1178			if got, want := controllerAndScale.UID, dep.UID; got != want {
1179				t.Errorf("Expected uid %s, but got %s", want, got)
1180			}
1181		})
1182	}
1183}
1184
1185// This test checks that the disruption controller does not write stale data to
1186// a PDB status during race conditions with the eviction handler. Specifically,
1187// failed updates due to ResourceVersion conflict should not cause a stale value
1188// of DisruptionsAllowed to be written.
1189//
1190// In this test, DisruptionsAllowed starts at 2.
1191// (A) We will delete 1 pod and trigger DisruptionController to set
1192// DisruptionsAllowed to 1.
1193// (B) As the DisruptionController attempts this write, we will evict the
1194// remaining 2 pods and update DisruptionsAllowed to 0. (The real eviction
1195// handler would allow this because it still sees DisruptionsAllowed=2.)
1196// (C) If the DisruptionController writes DisruptionsAllowed=1 despite the
1197// resource conflict error, then there is a bug.
1198func TestUpdatePDBStatusRetries(t *testing.T) {
1199	dc, _ := newFakeDisruptionController()
1200	// Inject the production code over our fake impl
1201	dc.getUpdater = func() updater { return dc.writePdbStatus }
1202
1203	// Create a PDB and 3 pods that match it.
1204	pdb, pdbKey := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(1))
1205	pdb, err := dc.coreClient.PolicyV1().PodDisruptionBudgets(pdb.Namespace).Create(context.TODO(), pdb, metav1.CreateOptions{})
1206	if err != nil {
1207		t.Fatalf("Failed to create PDB: %v", err)
1208	}
1209	podNames := []string{"moe", "larry", "curly"}
1210	for _, name := range podNames {
1211		pod, _ := newPod(t, name)
1212		_, err := dc.coreClient.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
1213		if err != nil {
1214			t.Fatalf("Failed to create pod: %v", err)
1215		}
1216	}
1217
1218	// Block until the fake clientset writes are observable in the informer caches.
1219	// FUN FACT: This guarantees that the informer caches have updated, but it does
1220	// not guarantee that informer event handlers have completed. Fortunately,
1221	// DisruptionController does most of its logic by reading from informer
1222	// listers, so this guarantee is sufficient.
1223	if err := waitForCacheCount(dc.pdbStore, 1); err != nil {
1224		t.Fatalf("Failed to verify PDB in informer cache: %v", err)
1225	}
1226	if err := waitForCacheCount(dc.podStore, len(podNames)); err != nil {
1227		t.Fatalf("Failed to verify pods in informer cache: %v", err)
1228	}
1229
1230	// Sync DisruptionController once to update PDB status.
1231	if err := dc.sync(pdbKey); err != nil {
1232		t.Fatalf("Failed initial sync: %v", err)
1233	}
1234
1235	// Evict simulates the visible effects of eviction in our fake client.
1236	evict := func(podNames ...string) {
1237		// These GVRs are copied from the generated fake code because they are not exported.
1238		var (
1239			podsResource                 = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
1240			poddisruptionbudgetsResource = schema.GroupVersionResource{Group: "policy", Version: "v1", Resource: "poddisruptionbudgets"}
1241		)
1242
1243		// Bypass the coreClient.Fake and write directly to the ObjectTracker, because
1244		// this helper will be called while the Fake is holding a lock.
1245		obj, err := dc.coreClient.Tracker().Get(poddisruptionbudgetsResource, pdb.Namespace, pdb.Name)
1246		if err != nil {
1247			t.Fatalf("Failed to get PDB: %v", err)
1248		}
1249		updatedPDB := obj.(*policy.PodDisruptionBudget)
1250		// Each eviction,
1251		// - decrements DisruptionsAllowed
1252		// - adds the pod to DisruptedPods
1253		// - deletes the pod
1254		updatedPDB.Status.DisruptionsAllowed -= int32(len(podNames))
1255		updatedPDB.Status.DisruptedPods = make(map[string]metav1.Time)
1256		for _, name := range podNames {
1257			updatedPDB.Status.DisruptedPods[name] = metav1.NewTime(time.Now())
1258		}
1259		if err := dc.coreClient.Tracker().Update(poddisruptionbudgetsResource, updatedPDB, updatedPDB.Namespace); err != nil {
1260			t.Fatalf("Eviction (PDB update) failed: %v", err)
1261		}
1262		for _, name := range podNames {
1263			if err := dc.coreClient.Tracker().Delete(podsResource, "default", name); err != nil {
1264				t.Fatalf("Eviction (pod delete) failed: %v", err)
1265			}
1266		}
1267	}
1268
1269	// The fake kube client does not update ResourceVersion or check for conflicts.
1270	// Instead, we add a reactor that returns a conflict error on the first PDB
1271	// update and success after that.
1272	var failOnce sync.Once
1273	dc.coreClient.Fake.PrependReactor("update", "poddisruptionbudgets", func(a core.Action) (handled bool, obj runtime.Object, err error) {
1274		failOnce.Do(func() {
1275			// (B) Evict two pods and fail this update.
1276			evict(podNames[1], podNames[2])
1277			handled = true
1278			err = errors.NewConflict(a.GetResource().GroupResource(), pdb.Name, fmt.Errorf("conflict"))
1279		})
1280		return handled, obj, err
1281	})
1282
1283	// (A) Delete one pod
1284	if err := dc.coreClient.CoreV1().Pods("default").Delete(context.TODO(), podNames[0], metav1.DeleteOptions{}); err != nil {
1285		t.Fatal(err)
1286	}
1287	if err := waitForCacheCount(dc.podStore, len(podNames)-1); err != nil {
1288		t.Fatalf("Failed to verify pods in informer cache: %v", err)
1289	}
1290
1291	// The sync() function should either write a correct status which takes the
1292	// evictions into account, or re-queue the PDB for another sync (by returning
1293	// an error)
1294	if err := dc.sync(pdbKey); err != nil {
1295		t.Logf("sync() returned with error: %v", err)
1296	} else {
1297		t.Logf("sync() returned with no error")
1298	}
1299
1300	// (C) Whether or not sync() returned an error, the PDB status should reflect
1301	// the evictions that took place.
1302	finalPDB, err := dc.coreClient.PolicyV1().PodDisruptionBudgets("default").Get(context.TODO(), pdb.Name, metav1.GetOptions{})
1303	if err != nil {
1304		t.Fatalf("Failed to get PDB: %v", err)
1305	}
1306	if expected, actual := int32(0), finalPDB.Status.DisruptionsAllowed; expected != actual {
1307		t.Errorf("DisruptionsAllowed should be %d, got %d", expected, actual)
1308	}
1309}
1310
1311func TestInvalidSelectors(t *testing.T) {
1312	testCases := map[string]struct {
1313		labelSelector *metav1.LabelSelector
1314	}{
1315		"illegal value key": {
1316			labelSelector: &metav1.LabelSelector{
1317				MatchLabels: map[string]string{
1318					"k8s.io/too/many/slashes": "value",
1319				},
1320			},
1321		},
1322		"illegal operator": {
1323			labelSelector: &metav1.LabelSelector{
1324				MatchExpressions: []metav1.LabelSelectorRequirement{
1325					{
1326						Key:      "foo",
1327						Operator: metav1.LabelSelectorOperator("illegal"),
1328						Values:   []string{"bar"},
1329					},
1330				},
1331			},
1332		},
1333	}
1334
1335	for tn, tc := range testCases {
1336		t.Run(tn, func(t *testing.T) {
1337			dc, ps := newFakeDisruptionController()
1338
1339			pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(3))
1340			pdb.Spec.Selector = tc.labelSelector
1341
1342			add(t, dc.pdbStore, pdb)
1343			dc.sync(pdbName)
1344			ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]metav1.Time{})
1345		})
1346	}
1347}
1348
1349// waitForCacheCount blocks until the given cache store has the desired number
1350// of items in it. This will return an error if the condition is not met after a
1351// 10 second timeout.
1352func waitForCacheCount(store cache.Store, n int) error {
1353	return wait.Poll(10*time.Millisecond, 10*time.Second, func() (bool, error) {
1354		return len(store.List()) == n, nil
1355	})
1356}
1357
1358// TestMain adds klog flags to make debugging tests easier.
1359func TestMain(m *testing.M) {
1360	klog.InitFlags(flag.CommandLine)
1361	os.Exit(m.Run())
1362}
1363