1/*
2Copyright 2014 The Kubernetes Authors.
3
4Licensed under the Apache License, Version 2.0 (the "License");
5you may not use this file except in compliance with the License.
6You may obtain a copy of the License at
7
8    http://www.apache.org/licenses/LICENSE-2.0
9
10Unless required by applicable law or agreed to in writing, software
11distributed under the License is distributed on an "AS IS" BASIS,
12WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13See the License for the specific language governing permissions and
14limitations under the License.
15*/
16
17package auth
18
19import (
20	"context"
21	"encoding/json"
22	"fmt"
23	"path"
24	"regexp"
25	"strings"
26	"time"
27
28	authenticationv1 "k8s.io/api/authentication/v1"
29	v1 "k8s.io/api/core/v1"
30	rbacv1 "k8s.io/api/rbac/v1"
31	apierrors "k8s.io/apimachinery/pkg/api/errors"
32	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
33	"k8s.io/apimachinery/pkg/types"
34	"k8s.io/apimachinery/pkg/util/sets"
35	"k8s.io/apimachinery/pkg/util/uuid"
36	"k8s.io/apimachinery/pkg/util/wait"
37	watch "k8s.io/apimachinery/pkg/watch"
38	"k8s.io/kubernetes/plugin/pkg/admission/serviceaccount"
39	"k8s.io/kubernetes/test/e2e/framework"
40	e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
41	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
42	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
43	imageutils "k8s.io/kubernetes/test/utils/image"
44	utilptr "k8s.io/utils/pointer"
45
46	"github.com/onsi/ginkgo"
47)
48
49var _ = SIGDescribe("ServiceAccounts", func() {
50	f := framework.NewDefaultFramework("svcaccounts")
51
52	ginkgo.It("should ensure a single API token exists", func() {
53		// wait for the service account to reference a single secret
54		var secrets []v1.ObjectReference
55		framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*10, func() (bool, error) {
56			ginkgo.By("waiting for a single token reference")
57			sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "default", metav1.GetOptions{})
58			if apierrors.IsNotFound(err) {
59				framework.Logf("default service account was not found")
60				return false, nil
61			}
62			if err != nil {
63				framework.Logf("error getting default service account: %v", err)
64				return false, err
65			}
66			switch len(sa.Secrets) {
67			case 0:
68				framework.Logf("default service account has no secret references")
69				return false, nil
70			case 1:
71				framework.Logf("default service account has a single secret reference")
72				secrets = sa.Secrets
73				return true, nil
74			default:
75				return false, fmt.Errorf("default service account has too many secret references: %#v", sa.Secrets)
76			}
77		}))
78
79		// make sure the reference doesn't flutter
80		{
81			ginkgo.By("ensuring the single token reference persists")
82			time.Sleep(2 * time.Second)
83			sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "default", metav1.GetOptions{})
84			framework.ExpectNoError(err)
85			framework.ExpectEqual(sa.Secrets, secrets)
86		}
87
88		// delete the referenced secret
89		ginkgo.By("deleting the service account token")
90		framework.ExpectNoError(f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secrets[0].Name, metav1.DeleteOptions{}))
91
92		// wait for the referenced secret to be removed, and another one autocreated
93		framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
94			ginkgo.By("waiting for a new token reference")
95			sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "default", metav1.GetOptions{})
96			if err != nil {
97				framework.Logf("error getting default service account: %v", err)
98				return false, err
99			}
100			switch len(sa.Secrets) {
101			case 0:
102				framework.Logf("default service account has no secret references")
103				return false, nil
104			case 1:
105				if sa.Secrets[0] == secrets[0] {
106					framework.Logf("default service account still has the deleted secret reference")
107					return false, nil
108				}
109				framework.Logf("default service account has a new single secret reference")
110				secrets = sa.Secrets
111				return true, nil
112			default:
113				return false, fmt.Errorf("default service account has too many secret references: %#v", sa.Secrets)
114			}
115		}))
116
117		// make sure the reference doesn't flutter
118		{
119			ginkgo.By("ensuring the single token reference persists")
120			time.Sleep(2 * time.Second)
121			sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "default", metav1.GetOptions{})
122			framework.ExpectNoError(err)
123			framework.ExpectEqual(sa.Secrets, secrets)
124		}
125
126		// delete the reference from the service account
127		ginkgo.By("deleting the reference to the service account token")
128		{
129			sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "default", metav1.GetOptions{})
130			framework.ExpectNoError(err)
131			sa.Secrets = nil
132			_, updateErr := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Update(context.TODO(), sa, metav1.UpdateOptions{})
133			framework.ExpectNoError(updateErr)
134		}
135
136		// wait for another one to be autocreated
137		framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
138			ginkgo.By("waiting for a new token to be created and added")
139			sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "default", metav1.GetOptions{})
140			if err != nil {
141				framework.Logf("error getting default service account: %v", err)
142				return false, err
143			}
144			switch len(sa.Secrets) {
145			case 0:
146				framework.Logf("default service account has no secret references")
147				return false, nil
148			case 1:
149				framework.Logf("default service account has a new single secret reference")
150				secrets = sa.Secrets
151				return true, nil
152			default:
153				return false, fmt.Errorf("default service account has too many secret references: %#v", sa.Secrets)
154			}
155		}))
156
157		// make sure the reference doesn't flutter
158		{
159			ginkgo.By("ensuring the single token reference persists")
160			time.Sleep(2 * time.Second)
161			sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "default", metav1.GetOptions{})
162			framework.ExpectNoError(err)
163			framework.ExpectEqual(sa.Secrets, secrets)
164		}
165	})
166
167	/*
168	   Release: v1.9
169	   Testname: Service Account Tokens Must AutoMount
170	   Description: Ensure that Service Account keys are mounted into the Container. Pod
171	                contains three containers each will read Service Account token,
172	                root CA and default namespace respectively from the default API
173	                Token Mount path. All these three files MUST exist and the Service
174	                Account mount path MUST be auto mounted to the Container.
175	*/
176	framework.ConformanceIt("should mount an API token into pods ", func() {
177		var rootCAContent string
178
179		sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "mount-test"}}, metav1.CreateOptions{})
180		framework.ExpectNoError(err)
181
182		// Standard get, update retry loop
183		framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
184			ginkgo.By("getting the auto-created API token")
185			sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "mount-test", metav1.GetOptions{})
186			if apierrors.IsNotFound(err) {
187				framework.Logf("mount-test service account was not found")
188				return false, nil
189			}
190			if err != nil {
191				framework.Logf("error getting mount-test service account: %v", err)
192				return false, err
193			}
194			if len(sa.Secrets) == 0 {
195				framework.Logf("mount-test service account has no secret references")
196				return false, nil
197			}
198			for _, secretRef := range sa.Secrets {
199				secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), secretRef.Name, metav1.GetOptions{})
200				if err != nil {
201					framework.Logf("Error getting secret %s: %v", secretRef.Name, err)
202					continue
203				}
204				if secret.Type == v1.SecretTypeServiceAccountToken {
205					rootCAContent = string(secret.Data[v1.ServiceAccountRootCAKey])
206					return true, nil
207				}
208			}
209
210			framework.Logf("default service account has no secret references to valid service account tokens")
211			return false, nil
212		}))
213
214		zero := int64(0)
215		pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), &v1.Pod{
216			ObjectMeta: metav1.ObjectMeta{
217				Name: "pod-service-account-" + string(uuid.NewUUID()),
218			},
219			Spec: v1.PodSpec{
220				ServiceAccountName: sa.Name,
221				Containers: []v1.Container{{
222					Name:    "test",
223					Image:   imageutils.GetE2EImage(imageutils.BusyBox),
224					Command: []string{"sleep", "100000"},
225				}},
226				TerminationGracePeriodSeconds: &zero,
227				RestartPolicy:                 v1.RestartPolicyNever,
228			},
229		}, metav1.CreateOptions{})
230		framework.ExpectNoError(err)
231		framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod))
232
233		tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, f.Namespace.Name)
234		mountedToken, err := tk.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, path.Join(serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountTokenKey))
235		framework.ExpectNoError(err)
236		mountedCA, err := tk.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, path.Join(serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountRootCAKey))
237		framework.ExpectNoError(err)
238		mountedNamespace, err := tk.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, path.Join(serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountNamespaceKey))
239		framework.ExpectNoError(err)
240
241		// CA and namespace should be identical
242		framework.ExpectEqual(mountedCA, rootCAContent)
243		framework.ExpectEqual(mountedNamespace, f.Namespace.Name)
244		// Token should be a valid credential that identifies the pod's service account
245		tokenReview := &authenticationv1.TokenReview{Spec: authenticationv1.TokenReviewSpec{Token: mountedToken}}
246		tokenReview, err = f.ClientSet.AuthenticationV1().TokenReviews().Create(context.TODO(), tokenReview, metav1.CreateOptions{})
247		framework.ExpectNoError(err)
248		framework.ExpectEqual(tokenReview.Status.Authenticated, true)
249		framework.ExpectEqual(tokenReview.Status.Error, "")
250		framework.ExpectEqual(tokenReview.Status.User.Username, "system:serviceaccount:"+f.Namespace.Name+":"+sa.Name)
251		groups := sets.NewString(tokenReview.Status.User.Groups...)
252		framework.ExpectEqual(groups.Has("system:authenticated"), true, fmt.Sprintf("expected system:authenticated group, had %v", groups.List()))
253		framework.ExpectEqual(groups.Has("system:serviceaccounts"), true, fmt.Sprintf("expected system:serviceaccounts group, had %v", groups.List()))
254		framework.ExpectEqual(groups.Has("system:serviceaccounts:"+f.Namespace.Name), true, fmt.Sprintf("expected system:serviceaccounts:"+f.Namespace.Name+" group, had %v", groups.List()))
255	})
256
257	/*
258	   Release: v1.9
259	   Testname: Service account tokens auto mount optionally
260	   Description: Ensure that Service Account keys are mounted into the Pod only
261	                when AutoMountServiceToken is not set to false. We test the
262	                following scenarios here.
263	   1. Create Pod, Pod Spec has AutomountServiceAccountToken set to nil
264	      a) Service Account with default value,
265	      b) Service Account is an configured AutomountServiceAccountToken set to true,
266	      c) Service Account is an configured AutomountServiceAccountToken set to false
267	   2. Create Pod, Pod Spec has AutomountServiceAccountToken set to true
268	      a) Service Account with default value,
269	      b) Service Account is configured with AutomountServiceAccountToken set to true,
270	      c) Service Account is configured with AutomountServiceAccountToken set to false
271	   3. Create Pod, Pod Spec has AutomountServiceAccountToken set to false
272	      a) Service Account with default value,
273	      b) Service Account is configured with AutomountServiceAccountToken set to true,
274	      c) Service Account is configured with AutomountServiceAccountToken set to false
275
276	   The Containers running in these pods MUST verify that the ServiceTokenVolume path is
277	   auto mounted only when Pod Spec has AutomountServiceAccountToken not set to false
278	   and ServiceAccount object has AutomountServiceAccountToken not set to false, this
279	   include test cases 1a,1b,2a,2b and 2c.
280	   In the test cases 1c,3a,3b and 3c the ServiceTokenVolume MUST not be auto mounted.
281	*/
282	framework.ConformanceIt("should allow opting out of API token automount ", func() {
283
284		var err error
285		trueValue := true
286		falseValue := false
287		mountSA := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "mount"}, AutomountServiceAccountToken: &trueValue}
288		nomountSA := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "nomount"}, AutomountServiceAccountToken: &falseValue}
289		mountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), mountSA, metav1.CreateOptions{})
290		framework.ExpectNoError(err)
291		nomountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), nomountSA, metav1.CreateOptions{})
292		framework.ExpectNoError(err)
293
294		// Standard get, update retry loop
295		framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
296			ginkgo.By("getting the auto-created API token")
297			sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), mountSA.Name, metav1.GetOptions{})
298			if apierrors.IsNotFound(err) {
299				framework.Logf("mount service account was not found")
300				return false, nil
301			}
302			if err != nil {
303				framework.Logf("error getting mount service account: %v", err)
304				return false, err
305			}
306			if len(sa.Secrets) == 0 {
307				framework.Logf("mount service account has no secret references")
308				return false, nil
309			}
310			for _, secretRef := range sa.Secrets {
311				secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), secretRef.Name, metav1.GetOptions{})
312				if err != nil {
313					framework.Logf("Error getting secret %s: %v", secretRef.Name, err)
314					continue
315				}
316				if secret.Type == v1.SecretTypeServiceAccountToken {
317					return true, nil
318				}
319			}
320
321			framework.Logf("default service account has no secret references to valid service account tokens")
322			return false, nil
323		}))
324
325		testcases := []struct {
326			PodName            string
327			ServiceAccountName string
328			AutomountPodSpec   *bool
329			ExpectTokenVolume  bool
330		}{
331			{
332				PodName:            "pod-service-account-defaultsa",
333				ServiceAccountName: "default",
334				AutomountPodSpec:   nil,
335				ExpectTokenVolume:  true, // default is true
336			},
337			{
338				PodName:            "pod-service-account-mountsa",
339				ServiceAccountName: mountSA.Name,
340				AutomountPodSpec:   nil,
341				ExpectTokenVolume:  true,
342			},
343			{
344				PodName:            "pod-service-account-nomountsa",
345				ServiceAccountName: nomountSA.Name,
346				AutomountPodSpec:   nil,
347				ExpectTokenVolume:  false,
348			},
349
350			// Make sure pod spec trumps when opting in
351			{
352				PodName:            "pod-service-account-defaultsa-mountspec",
353				ServiceAccountName: "default",
354				AutomountPodSpec:   &trueValue,
355				ExpectTokenVolume:  true,
356			},
357			{
358				PodName:            "pod-service-account-mountsa-mountspec",
359				ServiceAccountName: mountSA.Name,
360				AutomountPodSpec:   &trueValue,
361				ExpectTokenVolume:  true,
362			},
363			{
364				PodName:            "pod-service-account-nomountsa-mountspec",
365				ServiceAccountName: nomountSA.Name,
366				AutomountPodSpec:   &trueValue,
367				ExpectTokenVolume:  true, // pod spec trumps
368			},
369
370			// Make sure pod spec trumps when opting out
371			{
372				PodName:            "pod-service-account-defaultsa-nomountspec",
373				ServiceAccountName: "default",
374				AutomountPodSpec:   &falseValue,
375				ExpectTokenVolume:  false, // pod spec trumps
376			},
377			{
378				PodName:            "pod-service-account-mountsa-nomountspec",
379				ServiceAccountName: mountSA.Name,
380				AutomountPodSpec:   &falseValue,
381				ExpectTokenVolume:  false, // pod spec trumps
382			},
383			{
384				PodName:            "pod-service-account-nomountsa-nomountspec",
385				ServiceAccountName: nomountSA.Name,
386				AutomountPodSpec:   &falseValue,
387				ExpectTokenVolume:  false, // pod spec trumps
388			},
389		}
390
391		for _, tc := range testcases {
392			pod := &v1.Pod{
393				ObjectMeta: metav1.ObjectMeta{Name: tc.PodName},
394				Spec: v1.PodSpec{
395					Containers:                   []v1.Container{{Name: "token-test", Image: imageutils.GetE2EImage(imageutils.Agnhost)}},
396					RestartPolicy:                v1.RestartPolicyNever,
397					ServiceAccountName:           tc.ServiceAccountName,
398					AutomountServiceAccountToken: tc.AutomountPodSpec,
399				},
400			}
401			createdPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
402			framework.ExpectNoError(err)
403			framework.Logf("created pod %s", tc.PodName)
404
405			hasServiceAccountTokenVolume := false
406			for _, c := range createdPod.Spec.Containers {
407				for _, vm := range c.VolumeMounts {
408					if vm.MountPath == serviceaccount.DefaultAPITokenMountPath {
409						hasServiceAccountTokenVolume = true
410					}
411				}
412			}
413
414			if hasServiceAccountTokenVolume != tc.ExpectTokenVolume {
415				framework.Failf("%s: expected volume=%v, got %v (%#v)", tc.PodName, tc.ExpectTokenVolume, hasServiceAccountTokenVolume, createdPod)
416			} else {
417				framework.Logf("pod %s service account token volume mount: %v", tc.PodName, hasServiceAccountTokenVolume)
418			}
419		}
420	})
421
422	/*
423	  Release : v1.20
424	  Testname: TokenRequestProjection should mount a projected volume with token using TokenRequest API.
425	  Description: Ensure that projected service account token is mounted.
426	*/
427	framework.ConformanceIt("should mount projected service account token", func() {
428
429		var (
430			podName         = "test-pod-" + string(uuid.NewUUID())
431			volumeName      = "test-volume"
432			volumeMountPath = "/test-volume"
433			tokenVolumePath = "/test-volume/token"
434		)
435
436		volumes := []v1.Volume{
437			{
438				Name: volumeName,
439				VolumeSource: v1.VolumeSource{
440					Projected: &v1.ProjectedVolumeSource{
441						Sources: []v1.VolumeProjection{
442							{
443								ServiceAccountToken: &v1.ServiceAccountTokenProjection{
444									Path:              "token",
445									ExpirationSeconds: utilptr.Int64Ptr(60 * 60),
446								},
447							},
448						},
449					},
450				},
451			},
452		}
453		volumeMounts := []v1.VolumeMount{
454			{
455				Name:      volumeName,
456				MountPath: volumeMountPath,
457				ReadOnly:  true,
458			},
459		}
460		mounttestArgs := []string{
461			"mounttest",
462			fmt.Sprintf("--file_content=%v", tokenVolumePath),
463		}
464
465		pod := e2epod.NewAgnhostPod(f.Namespace.Name, podName, volumes, volumeMounts, nil, mounttestArgs...)
466		pod.Spec.RestartPolicy = v1.RestartPolicyNever
467
468		output := []string{
469			fmt.Sprintf("content of file \"%v\": %s", tokenVolumePath, `[A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*`),
470		}
471
472		f.TestContainerOutputRegexp("service account token: ", pod, 0, output)
473	})
474
475	/*
476	   Testname: Projected service account token file ownership and permission.
477	   Description: Ensure that Projected Service Account Token is mounted with
478	               correct file ownership and permission mounted. We test the
479	               following scenarios here.
480	   1. RunAsUser is set,
481	   2. FsGroup is set,
482	   3. RunAsUser and FsGroup are set,
483	   4. Default, neither RunAsUser nor FsGroup is set,
484
485	   Containers MUST verify that the projected service account token can be
486	   read and has correct file mode set including ownership and permission.
487	*/
488	ginkgo.It("should set ownership and permission when RunAsUser or FsGroup is present [LinuxOnly] [NodeFeature:FSGroup]", func() {
489		e2eskipper.SkipIfNodeOSDistroIs("windows")
490
491		var (
492			podName         = "test-pod-" + string(uuid.NewUUID())
493			volumeName      = "test-volume"
494			volumeMountPath = "/test-volume"
495			tokenVolumePath = "/test-volume/token"
496		)
497
498		volumes := []v1.Volume{
499			{
500				Name: volumeName,
501				VolumeSource: v1.VolumeSource{
502					Projected: &v1.ProjectedVolumeSource{
503						Sources: []v1.VolumeProjection{
504							{
505								ServiceAccountToken: &v1.ServiceAccountTokenProjection{
506									Path:              "token",
507									ExpirationSeconds: utilptr.Int64Ptr(60 * 60),
508								},
509							},
510						},
511					},
512				},
513			},
514		}
515		volumeMounts := []v1.VolumeMount{
516			{
517				Name:      volumeName,
518				MountPath: volumeMountPath,
519				ReadOnly:  true,
520			},
521		}
522		mounttestArgs := []string{
523			"mounttest",
524			fmt.Sprintf("--file_perm=%v", tokenVolumePath),
525			fmt.Sprintf("--file_owner=%v", tokenVolumePath),
526			fmt.Sprintf("--file_content=%v", tokenVolumePath),
527		}
528
529		pod := e2epod.NewAgnhostPod(f.Namespace.Name, podName, volumes, volumeMounts, nil, mounttestArgs...)
530		pod.Spec.RestartPolicy = v1.RestartPolicyNever
531
532		testcases := []struct {
533			runAsUser bool
534			fsGroup   bool
535			wantPerm  string
536			wantUID   int64
537			wantGID   int64
538		}{
539			{
540				runAsUser: true,
541				wantPerm:  "-rw-------",
542				wantUID:   1000,
543				wantGID:   0,
544			},
545			{
546				fsGroup:  true,
547				wantPerm: "-rw-r-----",
548				wantUID:  0,
549				wantGID:  10000,
550			},
551			{
552				runAsUser: true,
553				fsGroup:   true,
554				wantPerm:  "-rw-r-----",
555				wantUID:   1000,
556				wantGID:   10000,
557			},
558			{
559				wantPerm: "-rw-r--r--",
560				wantUID:  0,
561				wantGID:  0,
562			},
563		}
564
565		for _, tc := range testcases {
566			pod.Spec.SecurityContext = &v1.PodSecurityContext{}
567			if tc.runAsUser {
568				pod.Spec.SecurityContext.RunAsUser = &tc.wantUID
569			}
570			if tc.fsGroup {
571				pod.Spec.SecurityContext.FSGroup = &tc.wantGID
572			}
573
574			output := []string{
575				fmt.Sprintf("perms of file \"%v\": %s", tokenVolumePath, tc.wantPerm),
576				fmt.Sprintf("content of file \"%v\": %s", tokenVolumePath, `[A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*`),
577				fmt.Sprintf("owner UID of \"%v\": %d", tokenVolumePath, tc.wantUID),
578				fmt.Sprintf("owner GID of \"%v\": %d", tokenVolumePath, tc.wantGID),
579			}
580			f.TestContainerOutputRegexp("service account token: ", pod, 0, output)
581		}
582	})
583
584	ginkgo.It("should support InClusterConfig with token rotation [Slow]", func() {
585		tenMin := int64(10 * 60)
586		pod := &v1.Pod{
587			ObjectMeta: metav1.ObjectMeta{Name: "inclusterclient"},
588			Spec: v1.PodSpec{
589				Containers: []v1.Container{{
590					Name:  "inclusterclient",
591					Image: imageutils.GetE2EImage(imageutils.Agnhost),
592					Args:  []string{"inclusterclient"},
593					VolumeMounts: []v1.VolumeMount{{
594						MountPath: "/var/run/secrets/kubernetes.io/serviceaccount",
595						Name:      "kube-api-access-e2e",
596						ReadOnly:  true,
597					}},
598				}},
599				RestartPolicy:      v1.RestartPolicyNever,
600				ServiceAccountName: "default",
601				Volumes: []v1.Volume{{
602					Name: "kube-api-access-e2e",
603					VolumeSource: v1.VolumeSource{
604						Projected: &v1.ProjectedVolumeSource{
605							Sources: []v1.VolumeProjection{
606								{
607									ServiceAccountToken: &v1.ServiceAccountTokenProjection{
608										Path:              "token",
609										ExpirationSeconds: &tenMin,
610									},
611								},
612								{
613									ConfigMap: &v1.ConfigMapProjection{
614										LocalObjectReference: v1.LocalObjectReference{
615											Name: "kube-root-ca.crt",
616										},
617										Items: []v1.KeyToPath{
618											{
619												Key:  "ca.crt",
620												Path: "ca.crt",
621											},
622										},
623									},
624								},
625								{
626									DownwardAPI: &v1.DownwardAPIProjection{
627										Items: []v1.DownwardAPIVolumeFile{
628											{
629												Path: "namespace",
630												FieldRef: &v1.ObjectFieldSelector{
631													APIVersion: "v1",
632													FieldPath:  "metadata.namespace",
633												},
634											},
635										},
636									},
637								},
638							},
639						},
640					},
641				}},
642			},
643		}
644		pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
645		framework.ExpectNoError(err)
646
647		framework.Logf("created pod")
648		if !e2epod.CheckPodsRunningReady(f.ClientSet, f.Namespace.Name, []string{pod.Name}, time.Minute) {
649			framework.Failf("pod %q in ns %q never became ready", pod.Name, f.Namespace.Name)
650		}
651
652		framework.Logf("pod is ready")
653
654		var logs string
655		if err := wait.Poll(1*time.Minute, 20*time.Minute, func() (done bool, err error) {
656			framework.Logf("polling logs")
657			logs, err = e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, "inclusterclient", "inclusterclient")
658			if err != nil {
659				framework.Logf("Error pulling logs: %v", err)
660				return false, nil
661			}
662			tokenCount, err := ParseInClusterClientLogs(logs)
663			if err != nil {
664				return false, fmt.Errorf("inclusterclient reported an error: %v", err)
665			}
666			if tokenCount < 2 {
667				framework.Logf("Retrying. Still waiting to see more unique tokens: got=%d, want=2", tokenCount)
668				return false, nil
669			}
670			return true, nil
671		}); err != nil {
672			framework.Failf("Unexpected error: %v\n%s", err, logs)
673		}
674	})
675
676	/*
677	   Release: v1.21
678	   Testname: OIDC Discovery (ServiceAccountIssuerDiscovery)
679	   Description: Ensure kube-apiserver serves correct OIDC discovery
680	   endpoints by deploying a Pod that verifies its own
681	   token against these endpoints.
682	*/
683	framework.ConformanceIt("ServiceAccountIssuerDiscovery should support OIDC discovery of service account issuer", func() {
684
685		// Allow the test pod access to the OIDC discovery non-resource URLs.
686		// The role should have already been automatically created as part of the
687		// RBAC bootstrap policy, but not the role binding. If RBAC is disabled,
688		// we skip creating the binding. We also make sure we clean up the
689		// binding after the test.
690		const clusterRoleName = "system:service-account-issuer-discovery"
691		crbName := fmt.Sprintf("%s-%s", f.Namespace.Name, clusterRoleName)
692		if crb, err := f.ClientSet.RbacV1().ClusterRoleBindings().Create(
693			context.TODO(),
694			&rbacv1.ClusterRoleBinding{
695				ObjectMeta: metav1.ObjectMeta{
696					Name: crbName,
697				},
698				Subjects: []rbacv1.Subject{
699					{
700						Kind:      rbacv1.ServiceAccountKind,
701						APIGroup:  "",
702						Name:      "default",
703						Namespace: f.Namespace.Name,
704					},
705				},
706				RoleRef: rbacv1.RoleRef{
707					Name:     clusterRoleName,
708					APIGroup: rbacv1.GroupName,
709					Kind:     "ClusterRole",
710				},
711			},
712			metav1.CreateOptions{}); err != nil {
713			// Tolerate RBAC not being enabled
714			framework.Logf("error granting ClusterRoleBinding %s: %v", crbName, err)
715		} else {
716			defer func() {
717				framework.ExpectNoError(
718					f.ClientSet.RbacV1().ClusterRoleBindings().Delete(
719						context.TODO(),
720						crb.Name, metav1.DeleteOptions{}))
721			}()
722		}
723
724		// Create the pod with tokens.
725		tokenPath := "/var/run/secrets/tokens"
726		tokenName := "sa-token"
727		audience := "oidc-discovery-test"
728		tenMin := int64(10 * 60)
729
730		pod := &v1.Pod{
731			ObjectMeta: metav1.ObjectMeta{Name: "oidc-discovery-validator"},
732			Spec: v1.PodSpec{
733				Containers: []v1.Container{{
734					Name:  "oidc-discovery-validator",
735					Image: imageutils.GetE2EImage(imageutils.Agnhost),
736					Args: []string{
737						"test-service-account-issuer-discovery",
738						"--token-path", path.Join(tokenPath, tokenName),
739						"--audience", audience,
740					},
741					VolumeMounts: []v1.VolumeMount{{
742						MountPath: tokenPath,
743						Name:      tokenName,
744						ReadOnly:  true,
745					}},
746				}},
747				RestartPolicy:      v1.RestartPolicyNever,
748				ServiceAccountName: "default",
749				Volumes: []v1.Volume{{
750					Name: tokenName,
751					VolumeSource: v1.VolumeSource{
752						Projected: &v1.ProjectedVolumeSource{
753							Sources: []v1.VolumeProjection{
754								{
755									ServiceAccountToken: &v1.ServiceAccountTokenProjection{
756										Path:              tokenName,
757										ExpirationSeconds: &tenMin,
758										Audience:          audience,
759									},
760								},
761							},
762						},
763					},
764				}},
765			},
766		}
767		pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
768		framework.ExpectNoError(err)
769
770		framework.Logf("created pod")
771		podErr := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
772
773		// Get the logs before calling ExpectNoError, so we can debug any errors.
774		var logs string
775		if err := wait.Poll(30*time.Second, 2*time.Minute, func() (done bool, err error) {
776			framework.Logf("polling logs")
777			logs, err = e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
778			if err != nil {
779				framework.Logf("Error pulling logs: %v", err)
780				return false, nil
781			}
782			return true, nil
783		}); err != nil {
784			framework.Failf("Unexpected error getting pod logs: %v\n%s", err, logs)
785		} else {
786			framework.Logf("Pod logs: \n%v", logs)
787		}
788
789		framework.ExpectNoError(podErr)
790		framework.Logf("completed pod")
791	})
792
793	/*
794			   Release: v1.19
795			   Testname: ServiceAccount lifecycle test
796			   Description: Creates a ServiceAccount with a static Label MUST be added as shown in watch event.
797		                        Patching the ServiceAccount MUST return it's new property.
798		                        Listing the ServiceAccounts MUST return the test ServiceAccount with it's patched values.
799		                        ServiceAccount will be deleted and MUST find a deleted watch event.
800	*/
801	framework.ConformanceIt("should run through the lifecycle of a ServiceAccount", func() {
802		testNamespaceName := f.Namespace.Name
803		testServiceAccountName := "testserviceaccount"
804		testServiceAccountStaticLabels := map[string]string{"test-serviceaccount-static": "true"}
805		testServiceAccountStaticLabelsFlat := "test-serviceaccount-static=true"
806
807		ginkgo.By("creating a ServiceAccount")
808		testServiceAccount := v1.ServiceAccount{
809			ObjectMeta: metav1.ObjectMeta{
810				Name:   testServiceAccountName,
811				Labels: testServiceAccountStaticLabels,
812			},
813		}
814		_, err := f.ClientSet.CoreV1().ServiceAccounts(testNamespaceName).Create(context.TODO(), &testServiceAccount, metav1.CreateOptions{})
815		framework.ExpectNoError(err, "failed to create a ServiceAccount")
816
817		ginkgo.By("watching for the ServiceAccount to be added")
818		resourceWatchTimeoutSeconds := int64(180)
819		resourceWatch, err := f.ClientSet.CoreV1().ServiceAccounts(testNamespaceName).Watch(context.TODO(), metav1.ListOptions{LabelSelector: testServiceAccountStaticLabelsFlat, TimeoutSeconds: &resourceWatchTimeoutSeconds})
820		if err != nil {
821			fmt.Println(err, "failed to setup watch on newly created ServiceAccount")
822			return
823		}
824
825		resourceWatchChan := resourceWatch.ResultChan()
826		eventFound := false
827		for watchEvent := range resourceWatchChan {
828			if watchEvent.Type == watch.Added {
829				eventFound = true
830				break
831			}
832		}
833		framework.ExpectEqual(eventFound, true, "failed to find %v event", watch.Added)
834
835		ginkgo.By("patching the ServiceAccount")
836		boolFalse := false
837		testServiceAccountPatchData, err := json.Marshal(v1.ServiceAccount{
838			AutomountServiceAccountToken: &boolFalse,
839		})
840		framework.ExpectNoError(err, "failed to marshal JSON patch for the ServiceAccount")
841		_, err = f.ClientSet.CoreV1().ServiceAccounts(testNamespaceName).Patch(context.TODO(), testServiceAccountName, types.StrategicMergePatchType, []byte(testServiceAccountPatchData), metav1.PatchOptions{})
842		framework.ExpectNoError(err, "failed to patch the ServiceAccount")
843		eventFound = false
844		for watchEvent := range resourceWatchChan {
845			if watchEvent.Type == watch.Modified {
846				eventFound = true
847				break
848			}
849		}
850		framework.ExpectEqual(eventFound, true, "failed to find %v event", watch.Modified)
851
852		ginkgo.By("finding ServiceAccount in list of all ServiceAccounts (by LabelSelector)")
853		serviceAccountList, err := f.ClientSet.CoreV1().ServiceAccounts("").List(context.TODO(), metav1.ListOptions{LabelSelector: testServiceAccountStaticLabelsFlat})
854		framework.ExpectNoError(err, "failed to list ServiceAccounts by LabelSelector")
855		foundServiceAccount := false
856		for _, serviceAccountItem := range serviceAccountList.Items {
857			if serviceAccountItem.ObjectMeta.Name == testServiceAccountName && serviceAccountItem.ObjectMeta.Namespace == testNamespaceName && *serviceAccountItem.AutomountServiceAccountToken == boolFalse {
858				foundServiceAccount = true
859				break
860			}
861		}
862		framework.ExpectEqual(foundServiceAccount, true, "failed to find the created ServiceAccount")
863
864		ginkgo.By("deleting the ServiceAccount")
865		err = f.ClientSet.CoreV1().ServiceAccounts(testNamespaceName).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
866		framework.ExpectNoError(err, "failed to delete the ServiceAccount by Collection")
867		eventFound = false
868		for watchEvent := range resourceWatchChan {
869			if watchEvent.Type == watch.Deleted {
870				eventFound = true
871				break
872			}
873		}
874		framework.ExpectEqual(eventFound, true, "failed to find %v event", watch.Deleted)
875	})
876
877	/*
878		Release: v1.21
879		Testname: RootCA ConfigMap test
880		Description: Ensure every namespace exist a ConfigMap for root ca cert.
881			1. Created automatically
882			2. Recreated if deleted
883			3. Reconciled if modified
884	*/
885	framework.ConformanceIt("should guarantee kube-root-ca.crt exist in any namespace", func() {
886		const rootCAConfigMapName = "kube-root-ca.crt"
887
888		framework.ExpectNoError(wait.PollImmediate(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
889			_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), rootCAConfigMapName, metav1.GetOptions{})
890			if err == nil {
891				return true, nil
892			}
893			if apierrors.IsNotFound(err) {
894				ginkgo.By("root ca configmap not found, retrying")
895				return false, nil
896			}
897			return false, err
898		}))
899		framework.Logf("Got root ca configmap in namespace %q", f.Namespace.Name)
900
901		framework.ExpectNoError(f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), rootCAConfigMapName, metav1.DeleteOptions{GracePeriodSeconds: utilptr.Int64Ptr(0)}))
902		framework.Logf("Deleted root ca configmap in namespace %q", f.Namespace.Name)
903
904		framework.ExpectNoError(wait.Poll(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
905			ginkgo.By("waiting for a new root ca configmap created")
906			_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), rootCAConfigMapName, metav1.GetOptions{})
907			if err == nil {
908				return true, nil
909			}
910			if apierrors.IsNotFound(err) {
911				ginkgo.By("root ca configmap not found, retrying")
912				return false, nil
913			}
914			return false, err
915		}))
916		framework.Logf("Recreated root ca configmap in namespace %q", f.Namespace.Name)
917
918		_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), &v1.ConfigMap{
919			ObjectMeta: metav1.ObjectMeta{
920				Name: rootCAConfigMapName,
921			},
922			Data: map[string]string{
923				"ca.crt": "",
924			},
925		}, metav1.UpdateOptions{})
926		framework.ExpectNoError(err)
927		framework.Logf("Updated root ca configmap in namespace %q", f.Namespace.Name)
928
929		framework.ExpectNoError(wait.Poll(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
930			ginkgo.By("waiting for the root ca configmap reconciled")
931			cm, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), rootCAConfigMapName, metav1.GetOptions{})
932			if err != nil {
933				if apierrors.IsNotFound(err) {
934					ginkgo.By("root ca configmap not found, retrying")
935					return false, nil
936				}
937				return false, err
938			}
939			if value, ok := cm.Data["ca.crt"]; !ok || value == "" {
940				ginkgo.By("root ca configmap is not reconciled yet, retrying")
941				return false, nil
942			}
943			return true, nil
944		}))
945		framework.Logf("Reconciled root ca configmap in namespace %q", f.Namespace.Name)
946	})
947})
948
949var reportLogsParser = regexp.MustCompile("([a-zA-Z0-9-_]*)=([a-zA-Z0-9-_]*)$")
950
951// ParseInClusterClientLogs parses logs of pods using inclusterclient.
952func ParseInClusterClientLogs(logs string) (int, error) {
953	seenTokens := map[string]struct{}{}
954
955	lines := strings.Split(logs, "\n")
956	for _, line := range lines {
957		parts := reportLogsParser.FindStringSubmatch(line)
958		if len(parts) != 3 {
959			continue
960		}
961
962		key, value := parts[1], parts[2]
963		switch key {
964		case "authz_header":
965			if value == "<empty>" {
966				return 0, fmt.Errorf("saw empty Authorization header")
967			}
968			seenTokens[value] = struct{}{}
969		case "status":
970			if value == "failed" {
971				return 0, fmt.Errorf("saw status=failed")
972			}
973		}
974	}
975
976	return len(seenTokens), nil
977}
978