1/*
2Copyright 2014 The Kubernetes Authors.
3
4Licensed under the Apache License, Version 2.0 (the "License");
5you may not use this file except in compliance with the License.
6You may obtain a copy of the License at
7
8    http://www.apache.org/licenses/LICENSE-2.0
9
10Unless required by applicable law or agreed to in writing, software
11distributed under the License is distributed on an "AS IS" BASIS,
12WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13See the License for the specific language governing permissions and
14limitations under the License.
15*/
16
17package validation
18
19import (
20	"encoding/json"
21	"fmt"
22	"math"
23	"net"
24	"path"
25	"path/filepath"
26	"reflect"
27	"regexp"
28	"strings"
29	"unicode"
30	"unicode/utf8"
31
32	v1 "k8s.io/api/core/v1"
33	apiequality "k8s.io/apimachinery/pkg/api/equality"
34	"k8s.io/apimachinery/pkg/api/resource"
35	apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
36	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
37	unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
38	"k8s.io/apimachinery/pkg/labels"
39	"k8s.io/apimachinery/pkg/util/diff"
40	"k8s.io/apimachinery/pkg/util/intstr"
41	"k8s.io/apimachinery/pkg/util/sets"
42	"k8s.io/apimachinery/pkg/util/validation"
43	"k8s.io/apimachinery/pkg/util/validation/field"
44	utilfeature "k8s.io/apiserver/pkg/util/feature"
45	schedulinghelper "k8s.io/component-helpers/scheduling/corev1"
46	apiservice "k8s.io/kubernetes/pkg/api/service"
47	"k8s.io/kubernetes/pkg/apis/core"
48	"k8s.io/kubernetes/pkg/apis/core/helper"
49	podshelper "k8s.io/kubernetes/pkg/apis/core/pods"
50	corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
51	"k8s.io/kubernetes/pkg/capabilities"
52	"k8s.io/kubernetes/pkg/cluster/ports"
53	"k8s.io/kubernetes/pkg/features"
54	"k8s.io/kubernetes/pkg/fieldpath"
55	"k8s.io/kubernetes/pkg/security/apparmor"
56	netutils "k8s.io/utils/net"
57)
58
59const isNegativeErrorMsg string = apimachineryvalidation.IsNegativeErrorMsg
60const isInvalidQuotaResource string = `must be a standard resource for quota`
61const fieldImmutableErrorMsg string = apimachineryvalidation.FieldImmutableErrorMsg
62const isNotIntegerErrorMsg string = `must be an integer`
63const isNotPositiveErrorMsg string = `must be greater than zero`
64
65var pdPartitionErrorMsg string = validation.InclusiveRangeError(1, 255)
66var fileModeErrorMsg = "must be a number between 0 and 0777 (octal), both inclusive"
67
68// BannedOwners is a black list of object that are not allowed to be owners.
69var BannedOwners = apimachineryvalidation.BannedOwners
70
71var iscsiInitiatorIqnRegex = regexp.MustCompile(`iqn\.\d{4}-\d{2}\.([[:alnum:]-.]+)(:[^,;*&$|\s]+)$`)
72var iscsiInitiatorEuiRegex = regexp.MustCompile(`^eui.[[:alnum:]]{16}$`)
73var iscsiInitiatorNaaRegex = regexp.MustCompile(`^naa.[[:alnum:]]{32}$`)
74
75var allowedEphemeralContainerFields = map[string]bool{
76	"Name":                     true,
77	"Image":                    true,
78	"Command":                  true,
79	"Args":                     true,
80	"WorkingDir":               true,
81	"EnvFrom":                  true,
82	"Env":                      true,
83	"VolumeMounts":             true,
84	"TerminationMessagePath":   true,
85	"TerminationMessagePolicy": true,
86	"ImagePullPolicy":          true,
87	"SecurityContext":          true,
88	"Stdin":                    true,
89	"StdinOnce":                true,
90	"TTY":                      true,
91}
92
93// ValidateHasLabel requires that metav1.ObjectMeta has a Label with key and expectedValue
94func ValidateHasLabel(meta metav1.ObjectMeta, fldPath *field.Path, key, expectedValue string) field.ErrorList {
95	allErrs := field.ErrorList{}
96	actualValue, found := meta.Labels[key]
97	if !found {
98		allErrs = append(allErrs, field.Required(fldPath.Child("labels").Key(key),
99			fmt.Sprintf("must be '%s'", expectedValue)))
100		return allErrs
101	}
102	if actualValue != expectedValue {
103		allErrs = append(allErrs, field.Invalid(fldPath.Child("labels").Key(key), meta.Labels,
104			fmt.Sprintf("must be '%s'", expectedValue)))
105	}
106	return allErrs
107}
108
109// ValidateAnnotations validates that a set of annotations are correctly defined.
110func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
111	return apimachineryvalidation.ValidateAnnotations(annotations, fldPath)
112}
113
114func ValidateDNS1123Label(value string, fldPath *field.Path) field.ErrorList {
115	allErrs := field.ErrorList{}
116	for _, msg := range validation.IsDNS1123Label(value) {
117		allErrs = append(allErrs, field.Invalid(fldPath, value, msg))
118	}
119	return allErrs
120}
121
122// ValidateQualifiedName validates if name is what Kubernetes calls a "qualified name".
123func ValidateQualifiedName(value string, fldPath *field.Path) field.ErrorList {
124	allErrs := field.ErrorList{}
125	for _, msg := range validation.IsQualifiedName(value) {
126		allErrs = append(allErrs, field.Invalid(fldPath, value, msg))
127	}
128	return allErrs
129}
130
131// ValidateDNS1123Subdomain validates that a name is a proper DNS subdomain.
132func ValidateDNS1123Subdomain(value string, fldPath *field.Path) field.ErrorList {
133	allErrs := field.ErrorList{}
134	for _, msg := range validation.IsDNS1123Subdomain(value) {
135		allErrs = append(allErrs, field.Invalid(fldPath, value, msg))
136	}
137	return allErrs
138}
139
140func ValidatePodSpecificAnnotations(annotations map[string]string, spec *core.PodSpec, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
141	allErrs := field.ErrorList{}
142
143	if value, isMirror := annotations[core.MirrorPodAnnotationKey]; isMirror {
144		if len(spec.NodeName) == 0 {
145			allErrs = append(allErrs, field.Invalid(fldPath.Key(core.MirrorPodAnnotationKey), value, "must set spec.nodeName if mirror pod annotation is set"))
146		}
147	}
148
149	if annotations[core.TolerationsAnnotationKey] != "" {
150		allErrs = append(allErrs, ValidateTolerationsInPodAnnotations(annotations, fldPath)...)
151	}
152
153	if !opts.AllowInvalidPodDeletionCost {
154		if _, err := helper.GetDeletionCostFromPodAnnotations(annotations); err != nil {
155			allErrs = append(allErrs, field.Invalid(fldPath.Key(core.PodDeletionCost), annotations[core.PodDeletionCost], "must be a 32bit integer"))
156		}
157	}
158
159	allErrs = append(allErrs, ValidateSeccompPodAnnotations(annotations, fldPath)...)
160	allErrs = append(allErrs, ValidateAppArmorPodAnnotations(annotations, spec, fldPath)...)
161
162	return allErrs
163}
164
165// ValidateTolerationsInPodAnnotations tests that the serialized tolerations in Pod.Annotations has valid data
166func ValidateTolerationsInPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
167	allErrs := field.ErrorList{}
168
169	tolerations, err := helper.GetTolerationsFromPodAnnotations(annotations)
170	if err != nil {
171		allErrs = append(allErrs, field.Invalid(fldPath, core.TolerationsAnnotationKey, err.Error()))
172		return allErrs
173	}
174
175	if len(tolerations) > 0 {
176		allErrs = append(allErrs, ValidateTolerations(tolerations, fldPath.Child(core.TolerationsAnnotationKey))...)
177	}
178
179	return allErrs
180}
181
182func ValidatePodSpecificAnnotationUpdates(newPod, oldPod *core.Pod, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
183	allErrs := field.ErrorList{}
184	newAnnotations := newPod.Annotations
185	oldAnnotations := oldPod.Annotations
186	for k, oldVal := range oldAnnotations {
187		if newVal, exists := newAnnotations[k]; exists && newVal == oldVal {
188			continue // No change.
189		}
190		if strings.HasPrefix(k, v1.AppArmorBetaContainerAnnotationKeyPrefix) {
191			allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not remove or update AppArmor annotations"))
192		}
193		if k == core.MirrorPodAnnotationKey {
194			allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not remove or update mirror pod annotation"))
195		}
196	}
197	// Check for additions
198	for k := range newAnnotations {
199		if _, ok := oldAnnotations[k]; ok {
200			continue // No change.
201		}
202		if strings.HasPrefix(k, v1.AppArmorBetaContainerAnnotationKeyPrefix) {
203			allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not add AppArmor annotations"))
204		}
205		if k == core.MirrorPodAnnotationKey {
206			allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not add mirror pod annotation"))
207		}
208	}
209	allErrs = append(allErrs, ValidatePodSpecificAnnotations(newAnnotations, &newPod.Spec, fldPath, opts)...)
210	return allErrs
211}
212
213func ValidateEndpointsSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
214	allErrs := field.ErrorList{}
215	return allErrs
216}
217
218// ValidateNameFunc validates that the provided name is valid for a given resource type.
219// Not all resources have the same validation rules for names. Prefix is true
220// if the name will have a value appended to it.  If the name is not valid,
221// this returns a list of descriptions of individual characteristics of the
222// value that were not valid.  Otherwise this returns an empty list or nil.
223type ValidateNameFunc apimachineryvalidation.ValidateNameFunc
224
225// ValidatePodName can be used to check whether the given pod name is valid.
226// Prefix indicates this name will be used as part of generation, in which case
227// trailing dashes are allowed.
228var ValidatePodName = apimachineryvalidation.NameIsDNSSubdomain
229
230// ValidateReplicationControllerName can be used to check whether the given replication
231// controller name is valid.
232// Prefix indicates this name will be used as part of generation, in which case
233// trailing dashes are allowed.
234var ValidateReplicationControllerName = apimachineryvalidation.NameIsDNSSubdomain
235
236// ValidateServiceName can be used to check whether the given service name is valid.
237// Prefix indicates this name will be used as part of generation, in which case
238// trailing dashes are allowed.
239var ValidateServiceName = apimachineryvalidation.NameIsDNS1035Label
240
241// ValidateNodeName can be used to check whether the given node name is valid.
242// Prefix indicates this name will be used as part of generation, in which case
243// trailing dashes are allowed.
244var ValidateNodeName = apimachineryvalidation.NameIsDNSSubdomain
245
246// ValidateNamespaceName can be used to check whether the given namespace name is valid.
247// Prefix indicates this name will be used as part of generation, in which case
248// trailing dashes are allowed.
249var ValidateNamespaceName = apimachineryvalidation.ValidateNamespaceName
250
251// ValidateLimitRangeName can be used to check whether the given limit range name is valid.
252// Prefix indicates this name will be used as part of generation, in which case
253// trailing dashes are allowed.
254var ValidateLimitRangeName = apimachineryvalidation.NameIsDNSSubdomain
255
256// ValidateResourceQuotaName can be used to check whether the given
257// resource quota name is valid.
258// Prefix indicates this name will be used as part of generation, in which case
259// trailing dashes are allowed.
260var ValidateResourceQuotaName = apimachineryvalidation.NameIsDNSSubdomain
261
262// ValidateSecretName can be used to check whether the given secret name is valid.
263// Prefix indicates this name will be used as part of generation, in which case
264// trailing dashes are allowed.
265var ValidateSecretName = apimachineryvalidation.NameIsDNSSubdomain
266
267// ValidateServiceAccountName can be used to check whether the given service account name is valid.
268// Prefix indicates this name will be used as part of generation, in which case
269// trailing dashes are allowed.
270var ValidateServiceAccountName = apimachineryvalidation.ValidateServiceAccountName
271
272// ValidateEndpointsName can be used to check whether the given endpoints name is valid.
273// Prefix indicates this name will be used as part of generation, in which case
274// trailing dashes are allowed.
275var ValidateEndpointsName = apimachineryvalidation.NameIsDNSSubdomain
276
277// ValidateClusterName can be used to check whether the given cluster name is valid.
278var ValidateClusterName = apimachineryvalidation.ValidateClusterName
279
280// ValidateClassName can be used to check whether the given class name is valid.
281// It is defined here to avoid import cycle between pkg/apis/storage/validation
282// (where it should be) and this file.
283var ValidateClassName = apimachineryvalidation.NameIsDNSSubdomain
284
285// ValidatePriorityClassName can be used to check whether the given priority
286// class name is valid.
287var ValidatePriorityClassName = apimachineryvalidation.NameIsDNSSubdomain
288
289// ValidateRuntimeClassName can be used to check whether the given RuntimeClass name is valid.
290// Prefix indicates this name will be used as part of generation, in which case
291// trailing dashes are allowed.
292func ValidateRuntimeClassName(name string, fldPath *field.Path) field.ErrorList {
293	var allErrs field.ErrorList
294	for _, msg := range apimachineryvalidation.NameIsDNSSubdomain(name, false) {
295		allErrs = append(allErrs, field.Invalid(fldPath, name, msg))
296	}
297	return allErrs
298}
299
300// validateOverhead can be used to check whether the given Overhead is valid.
301func validateOverhead(overhead core.ResourceList, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
302	// reuse the ResourceRequirements validation logic
303	return ValidateResourceRequirements(&core.ResourceRequirements{Limits: overhead}, fldPath, opts)
304}
305
306// Validates that given value is not negative.
307func ValidateNonnegativeField(value int64, fldPath *field.Path) field.ErrorList {
308	return apimachineryvalidation.ValidateNonnegativeField(value, fldPath)
309}
310
311// Validates that a Quantity is not negative
312func ValidateNonnegativeQuantity(value resource.Quantity, fldPath *field.Path) field.ErrorList {
313	allErrs := field.ErrorList{}
314	if value.Cmp(resource.Quantity{}) < 0 {
315		allErrs = append(allErrs, field.Invalid(fldPath, value.String(), isNegativeErrorMsg))
316	}
317	return allErrs
318}
319
320// Validates that a Quantity is positive
321func ValidatePositiveQuantityValue(value resource.Quantity, fldPath *field.Path) field.ErrorList {
322	allErrs := field.ErrorList{}
323	if value.Cmp(resource.Quantity{}) <= 0 {
324		allErrs = append(allErrs, field.Invalid(fldPath, value.String(), isNotPositiveErrorMsg))
325	}
326	return allErrs
327}
328
329func ValidateImmutableField(newVal, oldVal interface{}, fldPath *field.Path) field.ErrorList {
330	return apimachineryvalidation.ValidateImmutableField(newVal, oldVal, fldPath)
331}
332
333func ValidateImmutableAnnotation(newVal string, oldVal string, annotation string, fldPath *field.Path) field.ErrorList {
334	allErrs := field.ErrorList{}
335
336	if oldVal != newVal {
337		allErrs = append(allErrs, field.Invalid(fldPath.Child("annotations", annotation), newVal, fieldImmutableErrorMsg))
338	}
339	return allErrs
340}
341
342// ValidateObjectMeta validates an object's metadata on creation. It expects that name generation has already
343// been performed.
344// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before.
345// TODO: Remove calls to this method scattered in validations of specific resources, e.g., ValidatePodUpdate.
346func ValidateObjectMeta(meta *metav1.ObjectMeta, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList {
347	allErrs := apimachineryvalidation.ValidateObjectMeta(meta, requiresNamespace, apimachineryvalidation.ValidateNameFunc(nameFn), fldPath)
348	// run additional checks for the finalizer name
349	for i := range meta.Finalizers {
350		allErrs = append(allErrs, validateKubeFinalizerName(string(meta.Finalizers[i]), fldPath.Child("finalizers").Index(i))...)
351	}
352	return allErrs
353}
354
355// ValidateObjectMetaUpdate validates an object's metadata when updated
356func ValidateObjectMetaUpdate(newMeta, oldMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList {
357	allErrs := apimachineryvalidation.ValidateObjectMetaUpdate(newMeta, oldMeta, fldPath)
358	// run additional checks for the finalizer name
359	for i := range newMeta.Finalizers {
360		allErrs = append(allErrs, validateKubeFinalizerName(string(newMeta.Finalizers[i]), fldPath.Child("finalizers").Index(i))...)
361	}
362
363	return allErrs
364}
365
366func ValidateVolumes(volumes []core.Volume, podMeta *metav1.ObjectMeta, fldPath *field.Path, opts PodValidationOptions) (map[string]core.VolumeSource, field.ErrorList) {
367	allErrs := field.ErrorList{}
368
369	allNames := sets.String{}
370	allCreatedPVCs := sets.String{}
371	// Determine which PVCs will be created for this pod. We need
372	// the exact name of the pod for this. Without it, this sanity
373	// check has to be skipped.
374	if podMeta != nil && podMeta.Name != "" {
375		for _, vol := range volumes {
376			if vol.VolumeSource.Ephemeral != nil {
377				allCreatedPVCs.Insert(podMeta.Name + "-" + vol.Name)
378			}
379		}
380	}
381	vols := make(map[string]core.VolumeSource)
382	for i, vol := range volumes {
383		idxPath := fldPath.Index(i)
384		namePath := idxPath.Child("name")
385		el := validateVolumeSource(&vol.VolumeSource, idxPath, vol.Name, podMeta, opts)
386		if len(vol.Name) == 0 {
387			el = append(el, field.Required(namePath, ""))
388		} else {
389			el = append(el, ValidateDNS1123Label(vol.Name, namePath)...)
390		}
391		if allNames.Has(vol.Name) {
392			el = append(el, field.Duplicate(namePath, vol.Name))
393		}
394		if len(el) == 0 {
395			allNames.Insert(vol.Name)
396			vols[vol.Name] = vol.VolumeSource
397		} else {
398			allErrs = append(allErrs, el...)
399		}
400		// A PersistentVolumeClaimSource should not reference a created PVC. That doesn't
401		// make sense.
402		if vol.PersistentVolumeClaim != nil && allCreatedPVCs.Has(vol.PersistentVolumeClaim.ClaimName) {
403			allErrs = append(allErrs, field.Invalid(idxPath.Child("persistentVolumeClaim").Child("claimName"), vol.PersistentVolumeClaim.ClaimName,
404				"must not reference a PVC that gets created for an ephemeral volume"))
405		}
406	}
407
408	return vols, allErrs
409}
410
411func IsMatchedVolume(name string, volumes map[string]core.VolumeSource) bool {
412	if _, ok := volumes[name]; ok {
413		return true
414	}
415	return false
416}
417
418func isMatchedDevice(name string, volumes map[string]core.VolumeSource) (bool, bool) {
419	if source, ok := volumes[name]; ok {
420		if source.PersistentVolumeClaim != nil {
421			return true, true
422		}
423		return true, false
424	}
425	return false, false
426}
427
428func mountNameAlreadyExists(name string, devices map[string]string) bool {
429	if _, ok := devices[name]; ok {
430		return true
431	}
432	return false
433}
434
435func mountPathAlreadyExists(mountPath string, devices map[string]string) bool {
436	for _, devPath := range devices {
437		if mountPath == devPath {
438			return true
439		}
440	}
441
442	return false
443}
444
445func deviceNameAlreadyExists(name string, mounts map[string]string) bool {
446	if _, ok := mounts[name]; ok {
447		return true
448	}
449	return false
450}
451
452func devicePathAlreadyExists(devicePath string, mounts map[string]string) bool {
453	for _, mountPath := range mounts {
454		if mountPath == devicePath {
455			return true
456		}
457	}
458
459	return false
460}
461
462func validateVolumeSource(source *core.VolumeSource, fldPath *field.Path, volName string, podMeta *metav1.ObjectMeta, opts PodValidationOptions) field.ErrorList {
463	numVolumes := 0
464	allErrs := field.ErrorList{}
465	if source.EmptyDir != nil {
466		numVolumes++
467		if source.EmptyDir.SizeLimit != nil && source.EmptyDir.SizeLimit.Cmp(resource.Quantity{}) < 0 {
468			allErrs = append(allErrs, field.Forbidden(fldPath.Child("emptyDir").Child("sizeLimit"), "SizeLimit field must be a valid resource quantity"))
469		}
470	}
471	if source.HostPath != nil {
472		if numVolumes > 0 {
473			allErrs = append(allErrs, field.Forbidden(fldPath.Child("hostPath"), "may not specify more than 1 volume type"))
474		} else {
475			numVolumes++
476			allErrs = append(allErrs, validateHostPathVolumeSource(source.HostPath, fldPath.Child("hostPath"))...)
477		}
478	}
479	if source.GitRepo != nil {
480		if numVolumes > 0 {
481			allErrs = append(allErrs, field.Forbidden(fldPath.Child("gitRepo"), "may not specify more than 1 volume type"))
482		} else {
483			numVolumes++
484			allErrs = append(allErrs, validateGitRepoVolumeSource(source.GitRepo, fldPath.Child("gitRepo"))...)
485		}
486	}
487	if source.GCEPersistentDisk != nil {
488		if numVolumes > 0 {
489			allErrs = append(allErrs, field.Forbidden(fldPath.Child("gcePersistentDisk"), "may not specify more than 1 volume type"))
490		} else {
491			numVolumes++
492			allErrs = append(allErrs, validateGCEPersistentDiskVolumeSource(source.GCEPersistentDisk, fldPath.Child("persistentDisk"))...)
493		}
494	}
495	if source.AWSElasticBlockStore != nil {
496		if numVolumes > 0 {
497			allErrs = append(allErrs, field.Forbidden(fldPath.Child("awsElasticBlockStore"), "may not specify more than 1 volume type"))
498		} else {
499			numVolumes++
500			allErrs = append(allErrs, validateAWSElasticBlockStoreVolumeSource(source.AWSElasticBlockStore, fldPath.Child("awsElasticBlockStore"))...)
501		}
502	}
503	if source.Secret != nil {
504		if numVolumes > 0 {
505			allErrs = append(allErrs, field.Forbidden(fldPath.Child("secret"), "may not specify more than 1 volume type"))
506		} else {
507			numVolumes++
508			allErrs = append(allErrs, validateSecretVolumeSource(source.Secret, fldPath.Child("secret"))...)
509		}
510	}
511	if source.NFS != nil {
512		if numVolumes > 0 {
513			allErrs = append(allErrs, field.Forbidden(fldPath.Child("nfs"), "may not specify more than 1 volume type"))
514		} else {
515			numVolumes++
516			allErrs = append(allErrs, validateNFSVolumeSource(source.NFS, fldPath.Child("nfs"))...)
517		}
518	}
519	if source.ISCSI != nil {
520		if numVolumes > 0 {
521			allErrs = append(allErrs, field.Forbidden(fldPath.Child("iscsi"), "may not specify more than 1 volume type"))
522		} else {
523			numVolumes++
524			allErrs = append(allErrs, validateISCSIVolumeSource(source.ISCSI, fldPath.Child("iscsi"))...)
525		}
526		if source.ISCSI.InitiatorName != nil && len(volName+":"+source.ISCSI.TargetPortal) > 64 {
527			tooLongErr := "Total length of <volume name>:<iscsi.targetPortal> must be under 64 characters if iscsi.initiatorName is specified."
528			allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), volName, tooLongErr))
529		}
530	}
531	if source.Glusterfs != nil {
532		if numVolumes > 0 {
533			allErrs = append(allErrs, field.Forbidden(fldPath.Child("glusterfs"), "may not specify more than 1 volume type"))
534		} else {
535			numVolumes++
536			allErrs = append(allErrs, validateGlusterfsVolumeSource(source.Glusterfs, fldPath.Child("glusterfs"))...)
537		}
538	}
539	if source.Flocker != nil {
540		if numVolumes > 0 {
541			allErrs = append(allErrs, field.Forbidden(fldPath.Child("flocker"), "may not specify more than 1 volume type"))
542		} else {
543			numVolumes++
544			allErrs = append(allErrs, validateFlockerVolumeSource(source.Flocker, fldPath.Child("flocker"))...)
545		}
546	}
547	if source.PersistentVolumeClaim != nil {
548		if numVolumes > 0 {
549			allErrs = append(allErrs, field.Forbidden(fldPath.Child("persistentVolumeClaim"), "may not specify more than 1 volume type"))
550		} else {
551			numVolumes++
552			allErrs = append(allErrs, validatePersistentClaimVolumeSource(source.PersistentVolumeClaim, fldPath.Child("persistentVolumeClaim"))...)
553		}
554	}
555	if source.RBD != nil {
556		if numVolumes > 0 {
557			allErrs = append(allErrs, field.Forbidden(fldPath.Child("rbd"), "may not specify more than 1 volume type"))
558		} else {
559			numVolumes++
560			allErrs = append(allErrs, validateRBDVolumeSource(source.RBD, fldPath.Child("rbd"))...)
561		}
562	}
563	if source.Cinder != nil {
564		if numVolumes > 0 {
565			allErrs = append(allErrs, field.Forbidden(fldPath.Child("cinder"), "may not specify more than 1 volume type"))
566		} else {
567			numVolumes++
568			allErrs = append(allErrs, validateCinderVolumeSource(source.Cinder, fldPath.Child("cinder"))...)
569		}
570	}
571	if source.CephFS != nil {
572		if numVolumes > 0 {
573			allErrs = append(allErrs, field.Forbidden(fldPath.Child("cephFS"), "may not specify more than 1 volume type"))
574		} else {
575			numVolumes++
576			allErrs = append(allErrs, validateCephFSVolumeSource(source.CephFS, fldPath.Child("cephfs"))...)
577		}
578	}
579	if source.Quobyte != nil {
580		if numVolumes > 0 {
581			allErrs = append(allErrs, field.Forbidden(fldPath.Child("quobyte"), "may not specify more than 1 volume type"))
582		} else {
583			numVolumes++
584			allErrs = append(allErrs, validateQuobyteVolumeSource(source.Quobyte, fldPath.Child("quobyte"))...)
585		}
586	}
587	if source.DownwardAPI != nil {
588		if numVolumes > 0 {
589			allErrs = append(allErrs, field.Forbidden(fldPath.Child("downwarAPI"), "may not specify more than 1 volume type"))
590		} else {
591			numVolumes++
592			allErrs = append(allErrs, validateDownwardAPIVolumeSource(source.DownwardAPI, fldPath.Child("downwardAPI"), opts)...)
593		}
594	}
595	if source.FC != nil {
596		if numVolumes > 0 {
597			allErrs = append(allErrs, field.Forbidden(fldPath.Child("fc"), "may not specify more than 1 volume type"))
598		} else {
599			numVolumes++
600			allErrs = append(allErrs, validateFCVolumeSource(source.FC, fldPath.Child("fc"))...)
601		}
602	}
603	if source.FlexVolume != nil {
604		if numVolumes > 0 {
605			allErrs = append(allErrs, field.Forbidden(fldPath.Child("flexVolume"), "may not specify more than 1 volume type"))
606		} else {
607			numVolumes++
608			allErrs = append(allErrs, validateFlexVolumeSource(source.FlexVolume, fldPath.Child("flexVolume"))...)
609		}
610	}
611	if source.ConfigMap != nil {
612		if numVolumes > 0 {
613			allErrs = append(allErrs, field.Forbidden(fldPath.Child("configMap"), "may not specify more than 1 volume type"))
614		} else {
615			numVolumes++
616			allErrs = append(allErrs, validateConfigMapVolumeSource(source.ConfigMap, fldPath.Child("configMap"))...)
617		}
618	}
619
620	if source.AzureFile != nil {
621		if numVolumes > 0 {
622			allErrs = append(allErrs, field.Forbidden(fldPath.Child("azureFile"), "may not specify more than 1 volume type"))
623		} else {
624			numVolumes++
625			allErrs = append(allErrs, validateAzureFile(source.AzureFile, fldPath.Child("azureFile"))...)
626		}
627	}
628
629	if source.VsphereVolume != nil {
630		if numVolumes > 0 {
631			allErrs = append(allErrs, field.Forbidden(fldPath.Child("vsphereVolume"), "may not specify more than 1 volume type"))
632		} else {
633			numVolumes++
634			allErrs = append(allErrs, validateVsphereVolumeSource(source.VsphereVolume, fldPath.Child("vsphereVolume"))...)
635		}
636	}
637	if source.PhotonPersistentDisk != nil {
638		if numVolumes > 0 {
639			allErrs = append(allErrs, field.Forbidden(fldPath.Child("photonPersistentDisk"), "may not specify more than 1 volume type"))
640		} else {
641			numVolumes++
642			allErrs = append(allErrs, validatePhotonPersistentDiskVolumeSource(source.PhotonPersistentDisk, fldPath.Child("photonPersistentDisk"))...)
643		}
644	}
645	if source.PortworxVolume != nil {
646		if numVolumes > 0 {
647			allErrs = append(allErrs, field.Forbidden(fldPath.Child("portworxVolume"), "may not specify more than 1 volume type"))
648		} else {
649			numVolumes++
650			allErrs = append(allErrs, validatePortworxVolumeSource(source.PortworxVolume, fldPath.Child("portworxVolume"))...)
651		}
652	}
653	if source.AzureDisk != nil {
654		if numVolumes > 0 {
655			allErrs = append(allErrs, field.Forbidden(fldPath.Child("azureDisk"), "may not specify more than 1 volume type"))
656		} else {
657			numVolumes++
658			allErrs = append(allErrs, validateAzureDisk(source.AzureDisk, fldPath.Child("azureDisk"))...)
659		}
660	}
661	if source.StorageOS != nil {
662		if numVolumes > 0 {
663			allErrs = append(allErrs, field.Forbidden(fldPath.Child("storageos"), "may not specify more than 1 volume type"))
664		} else {
665			numVolumes++
666			allErrs = append(allErrs, validateStorageOSVolumeSource(source.StorageOS, fldPath.Child("storageos"))...)
667		}
668	}
669	if source.Projected != nil {
670		if numVolumes > 0 {
671			allErrs = append(allErrs, field.Forbidden(fldPath.Child("projected"), "may not specify more than 1 volume type"))
672		} else {
673			numVolumes++
674			allErrs = append(allErrs, validateProjectedVolumeSource(source.Projected, fldPath.Child("projected"), opts)...)
675		}
676	}
677	if source.ScaleIO != nil {
678		if numVolumes > 0 {
679			allErrs = append(allErrs, field.Forbidden(fldPath.Child("scaleIO"), "may not specify more than 1 volume type"))
680		} else {
681			numVolumes++
682			allErrs = append(allErrs, validateScaleIOVolumeSource(source.ScaleIO, fldPath.Child("scaleIO"))...)
683		}
684	}
685	if source.CSI != nil {
686		if numVolumes > 0 {
687			allErrs = append(allErrs, field.Forbidden(fldPath.Child("csi"), "may not specify more than 1 volume type"))
688		} else {
689			numVolumes++
690			allErrs = append(allErrs, validateCSIVolumeSource(source.CSI, fldPath.Child("csi"))...)
691		}
692	}
693	if source.Ephemeral != nil {
694		if numVolumes > 0 {
695			allErrs = append(allErrs, field.Forbidden(fldPath.Child("ephemeral"), "may not specify more than 1 volume type"))
696		} else {
697			numVolumes++
698			allErrs = append(allErrs, validateEphemeralVolumeSource(source.Ephemeral, fldPath.Child("ephemeral"))...)
699			// Check the expected name for the PVC. This gets skipped if information is missing,
700			// because that already gets flagged as a problem elsewhere. For example,
701			// ValidateObjectMeta as called by validatePodMetadataAndSpec checks that the name is set.
702			if podMeta != nil && podMeta.Name != "" && volName != "" {
703				pvcName := podMeta.Name + "-" + volName
704				for _, msg := range ValidatePersistentVolumeName(pvcName, false) {
705					allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), volName, fmt.Sprintf("PVC name %q: %v", pvcName, msg)))
706				}
707			}
708		}
709	}
710
711	if numVolumes == 0 {
712		allErrs = append(allErrs, field.Required(fldPath, "must specify a volume type"))
713	}
714
715	return allErrs
716}
717
718func validateHostPathVolumeSource(hostPath *core.HostPathVolumeSource, fldPath *field.Path) field.ErrorList {
719	allErrs := field.ErrorList{}
720	if len(hostPath.Path) == 0 {
721		allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
722		return allErrs
723	}
724
725	allErrs = append(allErrs, validatePathNoBacksteps(hostPath.Path, fldPath.Child("path"))...)
726	allErrs = append(allErrs, validateHostPathType(hostPath.Type, fldPath.Child("type"))...)
727	return allErrs
728}
729
730func validateGitRepoVolumeSource(gitRepo *core.GitRepoVolumeSource, fldPath *field.Path) field.ErrorList {
731	allErrs := field.ErrorList{}
732	if len(gitRepo.Repository) == 0 {
733		allErrs = append(allErrs, field.Required(fldPath.Child("repository"), ""))
734	}
735
736	pathErrs := validateLocalDescendingPath(gitRepo.Directory, fldPath.Child("directory"))
737	allErrs = append(allErrs, pathErrs...)
738	return allErrs
739}
740
741func validateISCSIVolumeSource(iscsi *core.ISCSIVolumeSource, fldPath *field.Path) field.ErrorList {
742	allErrs := field.ErrorList{}
743	if len(iscsi.TargetPortal) == 0 {
744		allErrs = append(allErrs, field.Required(fldPath.Child("targetPortal"), ""))
745	}
746	if len(iscsi.IQN) == 0 {
747		allErrs = append(allErrs, field.Required(fldPath.Child("iqn"), ""))
748	} else {
749		if !strings.HasPrefix(iscsi.IQN, "iqn") && !strings.HasPrefix(iscsi.IQN, "eui") && !strings.HasPrefix(iscsi.IQN, "naa") {
750			allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format starting with iqn, eui, or naa"))
751		} else if strings.HasPrefix(iscsi.IQN, "iqn") && !iscsiInitiatorIqnRegex.MatchString(iscsi.IQN) {
752			allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format"))
753		} else if strings.HasPrefix(iscsi.IQN, "eui") && !iscsiInitiatorEuiRegex.MatchString(iscsi.IQN) {
754			allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format"))
755		} else if strings.HasPrefix(iscsi.IQN, "naa") && !iscsiInitiatorNaaRegex.MatchString(iscsi.IQN) {
756			allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format"))
757		}
758	}
759	if iscsi.Lun < 0 || iscsi.Lun > 255 {
760		allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), iscsi.Lun, validation.InclusiveRangeError(0, 255)))
761	}
762	if (iscsi.DiscoveryCHAPAuth || iscsi.SessionCHAPAuth) && iscsi.SecretRef == nil {
763		allErrs = append(allErrs, field.Required(fldPath.Child("secretRef"), ""))
764	}
765	if iscsi.InitiatorName != nil {
766		initiator := *iscsi.InitiatorName
767		if !strings.HasPrefix(initiator, "iqn") && !strings.HasPrefix(initiator, "eui") && !strings.HasPrefix(initiator, "naa") {
768			allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format starting with iqn, eui, or naa"))
769		}
770		if strings.HasPrefix(initiator, "iqn") && !iscsiInitiatorIqnRegex.MatchString(initiator) {
771			allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format"))
772		} else if strings.HasPrefix(initiator, "eui") && !iscsiInitiatorEuiRegex.MatchString(initiator) {
773			allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format"))
774		} else if strings.HasPrefix(initiator, "naa") && !iscsiInitiatorNaaRegex.MatchString(initiator) {
775			allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format"))
776		}
777	}
778	return allErrs
779}
780
781func validateISCSIPersistentVolumeSource(iscsi *core.ISCSIPersistentVolumeSource, pvName string, fldPath *field.Path) field.ErrorList {
782	allErrs := field.ErrorList{}
783	if len(iscsi.TargetPortal) == 0 {
784		allErrs = append(allErrs, field.Required(fldPath.Child("targetPortal"), ""))
785	}
786	if iscsi.InitiatorName != nil && len(pvName+":"+iscsi.TargetPortal) > 64 {
787		tooLongErr := "Total length of <volume name>:<iscsi.targetPortal> must be under 64 characters if iscsi.initiatorName is specified."
788		allErrs = append(allErrs, field.Invalid(fldPath.Child("targetportal"), iscsi.TargetPortal, tooLongErr))
789	}
790	if len(iscsi.IQN) == 0 {
791		allErrs = append(allErrs, field.Required(fldPath.Child("iqn"), ""))
792	} else {
793		if !strings.HasPrefix(iscsi.IQN, "iqn") && !strings.HasPrefix(iscsi.IQN, "eui") && !strings.HasPrefix(iscsi.IQN, "naa") {
794			allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format"))
795		} else if strings.HasPrefix(iscsi.IQN, "iqn") && !iscsiInitiatorIqnRegex.MatchString(iscsi.IQN) {
796			allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format"))
797		} else if strings.HasPrefix(iscsi.IQN, "eui") && !iscsiInitiatorEuiRegex.MatchString(iscsi.IQN) {
798			allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format"))
799		} else if strings.HasPrefix(iscsi.IQN, "naa") && !iscsiInitiatorNaaRegex.MatchString(iscsi.IQN) {
800			allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format"))
801		}
802	}
803	if iscsi.Lun < 0 || iscsi.Lun > 255 {
804		allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), iscsi.Lun, validation.InclusiveRangeError(0, 255)))
805	}
806	if (iscsi.DiscoveryCHAPAuth || iscsi.SessionCHAPAuth) && iscsi.SecretRef == nil {
807		allErrs = append(allErrs, field.Required(fldPath.Child("secretRef"), ""))
808	}
809	if iscsi.SecretRef != nil {
810		if len(iscsi.SecretRef.Name) == 0 {
811			allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), ""))
812		}
813	}
814	if iscsi.InitiatorName != nil {
815		initiator := *iscsi.InitiatorName
816		if !strings.HasPrefix(initiator, "iqn") && !strings.HasPrefix(initiator, "eui") && !strings.HasPrefix(initiator, "naa") {
817			allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format"))
818		}
819		if strings.HasPrefix(initiator, "iqn") && !iscsiInitiatorIqnRegex.MatchString(initiator) {
820			allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format"))
821		} else if strings.HasPrefix(initiator, "eui") && !iscsiInitiatorEuiRegex.MatchString(initiator) {
822			allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format"))
823		} else if strings.HasPrefix(initiator, "naa") && !iscsiInitiatorNaaRegex.MatchString(initiator) {
824			allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format"))
825		}
826	}
827	return allErrs
828}
829
830func validateFCVolumeSource(fc *core.FCVolumeSource, fldPath *field.Path) field.ErrorList {
831	allErrs := field.ErrorList{}
832	if len(fc.TargetWWNs) < 1 && len(fc.WWIDs) < 1 {
833		allErrs = append(allErrs, field.Required(fldPath.Child("targetWWNs"), "must specify either targetWWNs or wwids, but not both"))
834	}
835
836	if len(fc.TargetWWNs) != 0 && len(fc.WWIDs) != 0 {
837		allErrs = append(allErrs, field.Invalid(fldPath.Child("targetWWNs"), fc.TargetWWNs, "targetWWNs and wwids can not be specified simultaneously"))
838	}
839
840	if len(fc.TargetWWNs) != 0 {
841		if fc.Lun == nil {
842			allErrs = append(allErrs, field.Required(fldPath.Child("lun"), "lun is required if targetWWNs is specified"))
843		} else {
844			if *fc.Lun < 0 || *fc.Lun > 255 {
845				allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), fc.Lun, validation.InclusiveRangeError(0, 255)))
846			}
847		}
848	}
849	return allErrs
850}
851
852func validateGCEPersistentDiskVolumeSource(pd *core.GCEPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList {
853	allErrs := field.ErrorList{}
854	if len(pd.PDName) == 0 {
855		allErrs = append(allErrs, field.Required(fldPath.Child("pdName"), ""))
856	}
857	if pd.Partition < 0 || pd.Partition > 255 {
858		allErrs = append(allErrs, field.Invalid(fldPath.Child("partition"), pd.Partition, pdPartitionErrorMsg))
859	}
860	return allErrs
861}
862
863func validateAWSElasticBlockStoreVolumeSource(PD *core.AWSElasticBlockStoreVolumeSource, fldPath *field.Path) field.ErrorList {
864	allErrs := field.ErrorList{}
865	if len(PD.VolumeID) == 0 {
866		allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), ""))
867	}
868	if PD.Partition < 0 || PD.Partition > 255 {
869		allErrs = append(allErrs, field.Invalid(fldPath.Child("partition"), PD.Partition, pdPartitionErrorMsg))
870	}
871	return allErrs
872}
873
874func validateSecretVolumeSource(secretSource *core.SecretVolumeSource, fldPath *field.Path) field.ErrorList {
875	allErrs := field.ErrorList{}
876	if len(secretSource.SecretName) == 0 {
877		allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), ""))
878	}
879
880	secretMode := secretSource.DefaultMode
881	if secretMode != nil && (*secretMode > 0777 || *secretMode < 0) {
882		allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *secretMode, fileModeErrorMsg))
883	}
884
885	itemsPath := fldPath.Child("items")
886	for i, kp := range secretSource.Items {
887		itemPath := itemsPath.Index(i)
888		allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...)
889	}
890	return allErrs
891}
892
893func validateConfigMapVolumeSource(configMapSource *core.ConfigMapVolumeSource, fldPath *field.Path) field.ErrorList {
894	allErrs := field.ErrorList{}
895	if len(configMapSource.Name) == 0 {
896		allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
897	}
898
899	configMapMode := configMapSource.DefaultMode
900	if configMapMode != nil && (*configMapMode > 0777 || *configMapMode < 0) {
901		allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *configMapMode, fileModeErrorMsg))
902	}
903
904	itemsPath := fldPath.Child("items")
905	for i, kp := range configMapSource.Items {
906		itemPath := itemsPath.Index(i)
907		allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...)
908	}
909	return allErrs
910}
911
912func validateKeyToPath(kp *core.KeyToPath, fldPath *field.Path) field.ErrorList {
913	allErrs := field.ErrorList{}
914	if len(kp.Key) == 0 {
915		allErrs = append(allErrs, field.Required(fldPath.Child("key"), ""))
916	}
917	if len(kp.Path) == 0 {
918		allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
919	}
920	allErrs = append(allErrs, validateLocalNonReservedPath(kp.Path, fldPath.Child("path"))...)
921	if kp.Mode != nil && (*kp.Mode > 0777 || *kp.Mode < 0) {
922		allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *kp.Mode, fileModeErrorMsg))
923	}
924
925	return allErrs
926}
927
928func validatePersistentClaimVolumeSource(claim *core.PersistentVolumeClaimVolumeSource, fldPath *field.Path) field.ErrorList {
929	allErrs := field.ErrorList{}
930	if len(claim.ClaimName) == 0 {
931		allErrs = append(allErrs, field.Required(fldPath.Child("claimName"), ""))
932	}
933	return allErrs
934}
935
936func validateNFSVolumeSource(nfs *core.NFSVolumeSource, fldPath *field.Path) field.ErrorList {
937	allErrs := field.ErrorList{}
938	if len(nfs.Server) == 0 {
939		allErrs = append(allErrs, field.Required(fldPath.Child("server"), ""))
940	}
941	if len(nfs.Path) == 0 {
942		allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
943	}
944	if !path.IsAbs(nfs.Path) {
945		allErrs = append(allErrs, field.Invalid(fldPath.Child("path"), nfs.Path, "must be an absolute path"))
946	}
947	return allErrs
948}
949
950func validateQuobyteVolumeSource(quobyte *core.QuobyteVolumeSource, fldPath *field.Path) field.ErrorList {
951	allErrs := field.ErrorList{}
952	if len(quobyte.Registry) == 0 {
953		allErrs = append(allErrs, field.Required(fldPath.Child("registry"), "must be a host:port pair or multiple pairs separated by commas"))
954	} else if len(quobyte.Tenant) >= 65 {
955		allErrs = append(allErrs, field.Required(fldPath.Child("tenant"), "must be a UUID and may not exceed a length of 64 characters"))
956	} else {
957		for _, hostPortPair := range strings.Split(quobyte.Registry, ",") {
958			if _, _, err := net.SplitHostPort(hostPortPair); err != nil {
959				allErrs = append(allErrs, field.Invalid(fldPath.Child("registry"), quobyte.Registry, "must be a host:port pair or multiple pairs separated by commas"))
960			}
961		}
962	}
963
964	if len(quobyte.Volume) == 0 {
965		allErrs = append(allErrs, field.Required(fldPath.Child("volume"), ""))
966	}
967	return allErrs
968}
969
970func validateGlusterfsVolumeSource(glusterfs *core.GlusterfsVolumeSource, fldPath *field.Path) field.ErrorList {
971	allErrs := field.ErrorList{}
972	if len(glusterfs.EndpointsName) == 0 {
973		allErrs = append(allErrs, field.Required(fldPath.Child("endpoints"), ""))
974	}
975	if len(glusterfs.Path) == 0 {
976		allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
977	}
978	return allErrs
979}
980func validateGlusterfsPersistentVolumeSource(glusterfs *core.GlusterfsPersistentVolumeSource, fldPath *field.Path) field.ErrorList {
981	allErrs := field.ErrorList{}
982	if len(glusterfs.EndpointsName) == 0 {
983		allErrs = append(allErrs, field.Required(fldPath.Child("endpoints"), ""))
984	}
985	if len(glusterfs.Path) == 0 {
986		allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
987	}
988	if glusterfs.EndpointsNamespace != nil {
989		endpointNs := glusterfs.EndpointsNamespace
990		if *endpointNs == "" {
991			allErrs = append(allErrs, field.Invalid(fldPath.Child("endpointsNamespace"), *endpointNs, "if the endpointnamespace is set, it must be a valid namespace name"))
992		} else {
993			for _, msg := range ValidateNamespaceName(*endpointNs, false) {
994				allErrs = append(allErrs, field.Invalid(fldPath.Child("endpointsNamespace"), *endpointNs, msg))
995			}
996		}
997	}
998	return allErrs
999}
1000
1001func validateFlockerVolumeSource(flocker *core.FlockerVolumeSource, fldPath *field.Path) field.ErrorList {
1002	allErrs := field.ErrorList{}
1003	if len(flocker.DatasetName) == 0 && len(flocker.DatasetUUID) == 0 {
1004		//TODO: consider adding a RequiredOneOf() error for this and similar cases
1005		allErrs = append(allErrs, field.Required(fldPath, "one of datasetName and datasetUUID is required"))
1006	}
1007	if len(flocker.DatasetName) != 0 && len(flocker.DatasetUUID) != 0 {
1008		allErrs = append(allErrs, field.Invalid(fldPath, "resource", "datasetName and datasetUUID can not be specified simultaneously"))
1009	}
1010	if strings.Contains(flocker.DatasetName, "/") {
1011		allErrs = append(allErrs, field.Invalid(fldPath.Child("datasetName"), flocker.DatasetName, "must not contain '/'"))
1012	}
1013	return allErrs
1014}
1015
1016var validVolumeDownwardAPIFieldPathExpressions = sets.NewString(
1017	"metadata.name",
1018	"metadata.namespace",
1019	"metadata.labels",
1020	"metadata.annotations",
1021	"metadata.uid")
1022
1023func validateDownwardAPIVolumeFile(file *core.DownwardAPIVolumeFile, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
1024	allErrs := field.ErrorList{}
1025	if len(file.Path) == 0 {
1026		allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
1027	}
1028	allErrs = append(allErrs, validateLocalNonReservedPath(file.Path, fldPath.Child("path"))...)
1029	if file.FieldRef != nil {
1030		allErrs = append(allErrs, validateObjectFieldSelector(file.FieldRef, &validVolumeDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...)
1031		if file.ResourceFieldRef != nil {
1032			allErrs = append(allErrs, field.Invalid(fldPath, "resource", "fieldRef and resourceFieldRef can not be specified simultaneously"))
1033		}
1034	} else if file.ResourceFieldRef != nil {
1035		localValidContainerResourceFieldPathPrefixes := validContainerResourceFieldPathPrefixes
1036		if opts.AllowDownwardAPIHugePages {
1037			localValidContainerResourceFieldPathPrefixes = validContainerResourceFieldPathPrefixesWithDownwardAPIHugePages
1038		}
1039		allErrs = append(allErrs, validateContainerResourceFieldSelector(file.ResourceFieldRef, &validContainerResourceFieldPathExpressions, &localValidContainerResourceFieldPathPrefixes, fldPath.Child("resourceFieldRef"), true)...)
1040	} else {
1041		allErrs = append(allErrs, field.Required(fldPath, "one of fieldRef and resourceFieldRef is required"))
1042	}
1043	if file.Mode != nil && (*file.Mode > 0777 || *file.Mode < 0) {
1044		allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *file.Mode, fileModeErrorMsg))
1045	}
1046
1047	return allErrs
1048}
1049
1050func validateDownwardAPIVolumeSource(downwardAPIVolume *core.DownwardAPIVolumeSource, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
1051	allErrs := field.ErrorList{}
1052
1053	downwardAPIMode := downwardAPIVolume.DefaultMode
1054	if downwardAPIMode != nil && (*downwardAPIMode > 0777 || *downwardAPIMode < 0) {
1055		allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *downwardAPIMode, fileModeErrorMsg))
1056	}
1057
1058	for _, file := range downwardAPIVolume.Items {
1059		allErrs = append(allErrs, validateDownwardAPIVolumeFile(&file, fldPath, opts)...)
1060	}
1061	return allErrs
1062}
1063
1064func validateProjectionSources(projection *core.ProjectedVolumeSource, projectionMode *int32, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
1065	allErrs := field.ErrorList{}
1066	allPaths := sets.String{}
1067
1068	for i, source := range projection.Sources {
1069		numSources := 0
1070		srcPath := fldPath.Child("sources").Index(i)
1071		if projPath := srcPath.Child("secret"); source.Secret != nil {
1072			numSources++
1073			if len(source.Secret.Name) == 0 {
1074				allErrs = append(allErrs, field.Required(projPath.Child("name"), ""))
1075			}
1076			itemsPath := projPath.Child("items")
1077			for i, kp := range source.Secret.Items {
1078				itemPath := itemsPath.Index(i)
1079				allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...)
1080				if len(kp.Path) > 0 {
1081					curPath := kp.Path
1082					if !allPaths.Has(curPath) {
1083						allPaths.Insert(curPath)
1084					} else {
1085						allErrs = append(allErrs, field.Invalid(fldPath, source.Secret.Name, "conflicting duplicate paths"))
1086					}
1087				}
1088			}
1089		}
1090		if projPath := srcPath.Child("configMap"); source.ConfigMap != nil {
1091			numSources++
1092			if len(source.ConfigMap.Name) == 0 {
1093				allErrs = append(allErrs, field.Required(projPath.Child("name"), ""))
1094			}
1095			itemsPath := projPath.Child("items")
1096			for i, kp := range source.ConfigMap.Items {
1097				itemPath := itemsPath.Index(i)
1098				allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...)
1099				if len(kp.Path) > 0 {
1100					curPath := kp.Path
1101					if !allPaths.Has(curPath) {
1102						allPaths.Insert(curPath)
1103					} else {
1104						allErrs = append(allErrs, field.Invalid(fldPath, source.ConfigMap.Name, "conflicting duplicate paths"))
1105					}
1106				}
1107			}
1108		}
1109		if projPath := srcPath.Child("downwardAPI"); source.DownwardAPI != nil {
1110			numSources++
1111			for _, file := range source.DownwardAPI.Items {
1112				allErrs = append(allErrs, validateDownwardAPIVolumeFile(&file, projPath, opts)...)
1113				if len(file.Path) > 0 {
1114					curPath := file.Path
1115					if !allPaths.Has(curPath) {
1116						allPaths.Insert(curPath)
1117					} else {
1118						allErrs = append(allErrs, field.Invalid(fldPath, curPath, "conflicting duplicate paths"))
1119					}
1120				}
1121			}
1122		}
1123		if projPath := srcPath.Child("serviceAccountToken"); source.ServiceAccountToken != nil {
1124			numSources++
1125			if source.ServiceAccountToken.ExpirationSeconds < 10*60 {
1126				allErrs = append(allErrs, field.Invalid(projPath.Child("expirationSeconds"), source.ServiceAccountToken.ExpirationSeconds, "may not specify a duration less than 10 minutes"))
1127			}
1128			if source.ServiceAccountToken.ExpirationSeconds > 1<<32 {
1129				allErrs = append(allErrs, field.Invalid(projPath.Child("expirationSeconds"), source.ServiceAccountToken.ExpirationSeconds, "may not specify a duration larger than 2^32 seconds"))
1130			}
1131			if source.ServiceAccountToken.Path == "" {
1132				allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
1133			}
1134		}
1135		if numSources > 1 {
1136			allErrs = append(allErrs, field.Forbidden(srcPath, "may not specify more than 1 volume type"))
1137		}
1138	}
1139	return allErrs
1140}
1141
1142func validateProjectedVolumeSource(projection *core.ProjectedVolumeSource, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
1143	allErrs := field.ErrorList{}
1144
1145	projectionMode := projection.DefaultMode
1146	if projectionMode != nil && (*projectionMode > 0777 || *projectionMode < 0) {
1147		allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *projectionMode, fileModeErrorMsg))
1148	}
1149
1150	allErrs = append(allErrs, validateProjectionSources(projection, projectionMode, fldPath, opts)...)
1151	return allErrs
1152}
1153
1154var supportedHostPathTypes = sets.NewString(
1155	string(core.HostPathUnset),
1156	string(core.HostPathDirectoryOrCreate),
1157	string(core.HostPathDirectory),
1158	string(core.HostPathFileOrCreate),
1159	string(core.HostPathFile),
1160	string(core.HostPathSocket),
1161	string(core.HostPathCharDev),
1162	string(core.HostPathBlockDev))
1163
1164func validateHostPathType(hostPathType *core.HostPathType, fldPath *field.Path) field.ErrorList {
1165	allErrs := field.ErrorList{}
1166
1167	if hostPathType != nil && !supportedHostPathTypes.Has(string(*hostPathType)) {
1168		allErrs = append(allErrs, field.NotSupported(fldPath, hostPathType, supportedHostPathTypes.List()))
1169	}
1170
1171	return allErrs
1172}
1173
1174// This validate will make sure targetPath:
1175// 1. is not abs path
1176// 2. does not have any element which is ".."
1177func validateLocalDescendingPath(targetPath string, fldPath *field.Path) field.ErrorList {
1178	allErrs := field.ErrorList{}
1179	if path.IsAbs(targetPath) {
1180		allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must be a relative path"))
1181	}
1182
1183	allErrs = append(allErrs, validatePathNoBacksteps(targetPath, fldPath)...)
1184
1185	return allErrs
1186}
1187
1188// validatePathNoBacksteps makes sure the targetPath does not have any `..` path elements when split
1189//
1190// This assumes the OS of the apiserver and the nodes are the same. The same check should be done
1191// on the node to ensure there are no backsteps.
1192func validatePathNoBacksteps(targetPath string, fldPath *field.Path) field.ErrorList {
1193	allErrs := field.ErrorList{}
1194	parts := strings.Split(filepath.ToSlash(targetPath), "/")
1195	for _, item := range parts {
1196		if item == ".." {
1197			allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not contain '..'"))
1198			break // even for `../../..`, one error is sufficient to make the point
1199		}
1200	}
1201	return allErrs
1202}
1203
1204// validateMountPropagation verifies that MountPropagation field is valid and
1205// allowed for given container.
1206func validateMountPropagation(mountPropagation *core.MountPropagationMode, container *core.Container, fldPath *field.Path) field.ErrorList {
1207	allErrs := field.ErrorList{}
1208
1209	if mountPropagation == nil {
1210		return allErrs
1211	}
1212
1213	supportedMountPropagations := sets.NewString(string(core.MountPropagationBidirectional), string(core.MountPropagationHostToContainer), string(core.MountPropagationNone))
1214	if !supportedMountPropagations.Has(string(*mountPropagation)) {
1215		allErrs = append(allErrs, field.NotSupported(fldPath, *mountPropagation, supportedMountPropagations.List()))
1216	}
1217
1218	if container == nil {
1219		// The container is not available yet.
1220		// Stop validation now, Pod validation will refuse final
1221		// Pods with Bidirectional propagation in non-privileged containers.
1222		return allErrs
1223	}
1224
1225	privileged := container.SecurityContext != nil && container.SecurityContext.Privileged != nil && *container.SecurityContext.Privileged
1226	if *mountPropagation == core.MountPropagationBidirectional && !privileged {
1227		allErrs = append(allErrs, field.Forbidden(fldPath, "Bidirectional mount propagation is available only to privileged containers"))
1228	}
1229	return allErrs
1230}
1231
1232// This validate will make sure targetPath:
1233// 1. is not abs path
1234// 2. does not contain any '..' elements
1235// 3. does not start with '..'
1236func validateLocalNonReservedPath(targetPath string, fldPath *field.Path) field.ErrorList {
1237	allErrs := field.ErrorList{}
1238	allErrs = append(allErrs, validateLocalDescendingPath(targetPath, fldPath)...)
1239	// Don't report this error if the check for .. elements already caught it.
1240	if strings.HasPrefix(targetPath, "..") && !strings.HasPrefix(targetPath, "../") {
1241		allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not start with '..'"))
1242	}
1243	return allErrs
1244}
1245
1246func validateRBDVolumeSource(rbd *core.RBDVolumeSource, fldPath *field.Path) field.ErrorList {
1247	allErrs := field.ErrorList{}
1248	if len(rbd.CephMonitors) == 0 {
1249		allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), ""))
1250	}
1251	if len(rbd.RBDImage) == 0 {
1252		allErrs = append(allErrs, field.Required(fldPath.Child("image"), ""))
1253	}
1254	return allErrs
1255}
1256
1257func validateRBDPersistentVolumeSource(rbd *core.RBDPersistentVolumeSource, fldPath *field.Path) field.ErrorList {
1258	allErrs := field.ErrorList{}
1259	if len(rbd.CephMonitors) == 0 {
1260		allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), ""))
1261	}
1262	if len(rbd.RBDImage) == 0 {
1263		allErrs = append(allErrs, field.Required(fldPath.Child("image"), ""))
1264	}
1265	return allErrs
1266}
1267
1268func validateCinderVolumeSource(cd *core.CinderVolumeSource, fldPath *field.Path) field.ErrorList {
1269	allErrs := field.ErrorList{}
1270	if len(cd.VolumeID) == 0 {
1271		allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), ""))
1272	}
1273	if cd.SecretRef != nil {
1274		if len(cd.SecretRef.Name) == 0 {
1275			allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), ""))
1276		}
1277	}
1278	return allErrs
1279}
1280
1281func validateCinderPersistentVolumeSource(cd *core.CinderPersistentVolumeSource, fldPath *field.Path) field.ErrorList {
1282	allErrs := field.ErrorList{}
1283	if len(cd.VolumeID) == 0 {
1284		allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), ""))
1285	}
1286	if cd.SecretRef != nil {
1287		if len(cd.SecretRef.Name) == 0 {
1288			allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), ""))
1289		}
1290		if len(cd.SecretRef.Namespace) == 0 {
1291			allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "namespace"), ""))
1292		}
1293	}
1294	return allErrs
1295}
1296
1297func validateCephFSVolumeSource(cephfs *core.CephFSVolumeSource, fldPath *field.Path) field.ErrorList {
1298	allErrs := field.ErrorList{}
1299	if len(cephfs.Monitors) == 0 {
1300		allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), ""))
1301	}
1302	return allErrs
1303}
1304
1305func validateCephFSPersistentVolumeSource(cephfs *core.CephFSPersistentVolumeSource, fldPath *field.Path) field.ErrorList {
1306	allErrs := field.ErrorList{}
1307	if len(cephfs.Monitors) == 0 {
1308		allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), ""))
1309	}
1310	return allErrs
1311}
1312
1313func validateFlexVolumeSource(fv *core.FlexVolumeSource, fldPath *field.Path) field.ErrorList {
1314	allErrs := field.ErrorList{}
1315	if len(fv.Driver) == 0 {
1316		allErrs = append(allErrs, field.Required(fldPath.Child("driver"), ""))
1317	}
1318
1319	// Make sure user-specified options don't use kubernetes namespaces
1320	for k := range fv.Options {
1321		namespace := k
1322		if parts := strings.SplitN(k, "/", 2); len(parts) == 2 {
1323			namespace = parts[0]
1324		}
1325		normalized := "." + strings.ToLower(namespace)
1326		if strings.HasSuffix(normalized, ".kubernetes.io") || strings.HasSuffix(normalized, ".k8s.io") {
1327			allErrs = append(allErrs, field.Invalid(fldPath.Child("options").Key(k), k, "kubernetes.io and k8s.io namespaces are reserved"))
1328		}
1329	}
1330
1331	return allErrs
1332}
1333
1334func validateFlexPersistentVolumeSource(fv *core.FlexPersistentVolumeSource, fldPath *field.Path) field.ErrorList {
1335	allErrs := field.ErrorList{}
1336	if len(fv.Driver) == 0 {
1337		allErrs = append(allErrs, field.Required(fldPath.Child("driver"), ""))
1338	}
1339
1340	// Make sure user-specified options don't use kubernetes namespaces
1341	for k := range fv.Options {
1342		namespace := k
1343		if parts := strings.SplitN(k, "/", 2); len(parts) == 2 {
1344			namespace = parts[0]
1345		}
1346		normalized := "." + strings.ToLower(namespace)
1347		if strings.HasSuffix(normalized, ".kubernetes.io") || strings.HasSuffix(normalized, ".k8s.io") {
1348			allErrs = append(allErrs, field.Invalid(fldPath.Child("options").Key(k), k, "kubernetes.io and k8s.io namespaces are reserved"))
1349		}
1350	}
1351
1352	return allErrs
1353}
1354
1355func validateAzureFile(azure *core.AzureFileVolumeSource, fldPath *field.Path) field.ErrorList {
1356	allErrs := field.ErrorList{}
1357	if azure.SecretName == "" {
1358		allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), ""))
1359	}
1360	if azure.ShareName == "" {
1361		allErrs = append(allErrs, field.Required(fldPath.Child("shareName"), ""))
1362	}
1363	return allErrs
1364}
1365
1366func validateAzureFilePV(azure *core.AzureFilePersistentVolumeSource, fldPath *field.Path) field.ErrorList {
1367	allErrs := field.ErrorList{}
1368	if azure.SecretName == "" {
1369		allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), ""))
1370	}
1371	if azure.ShareName == "" {
1372		allErrs = append(allErrs, field.Required(fldPath.Child("shareName"), ""))
1373	}
1374	if azure.SecretNamespace != nil {
1375		if len(*azure.SecretNamespace) == 0 {
1376			allErrs = append(allErrs, field.Required(fldPath.Child("secretNamespace"), ""))
1377		}
1378	}
1379	return allErrs
1380}
1381
1382func validateAzureDisk(azure *core.AzureDiskVolumeSource, fldPath *field.Path) field.ErrorList {
1383	var supportedCachingModes = sets.NewString(string(core.AzureDataDiskCachingNone), string(core.AzureDataDiskCachingReadOnly), string(core.AzureDataDiskCachingReadWrite))
1384	var supportedDiskKinds = sets.NewString(string(core.AzureSharedBlobDisk), string(core.AzureDedicatedBlobDisk), string(core.AzureManagedDisk))
1385
1386	diskURISupportedManaged := []string{"/subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id}"}
1387	diskURISupportedblob := []string{"https://{account-name}.blob.core.windows.net/{container-name}/{disk-name}.vhd"}
1388
1389	allErrs := field.ErrorList{}
1390	if azure.DiskName == "" {
1391		allErrs = append(allErrs, field.Required(fldPath.Child("diskName"), ""))
1392	}
1393
1394	if azure.DataDiskURI == "" {
1395		allErrs = append(allErrs, field.Required(fldPath.Child("diskURI"), ""))
1396	}
1397
1398	if azure.CachingMode != nil && !supportedCachingModes.Has(string(*azure.CachingMode)) {
1399		allErrs = append(allErrs, field.NotSupported(fldPath.Child("cachingMode"), *azure.CachingMode, supportedCachingModes.List()))
1400	}
1401
1402	if azure.Kind != nil && !supportedDiskKinds.Has(string(*azure.Kind)) {
1403		allErrs = append(allErrs, field.NotSupported(fldPath.Child("kind"), *azure.Kind, supportedDiskKinds.List()))
1404	}
1405
1406	// validate that DiskUri is the correct format
1407	if azure.Kind != nil && *azure.Kind == core.AzureManagedDisk && strings.Index(azure.DataDiskURI, "/subscriptions/") != 0 {
1408		allErrs = append(allErrs, field.NotSupported(fldPath.Child("diskURI"), azure.DataDiskURI, diskURISupportedManaged))
1409	}
1410
1411	if azure.Kind != nil && *azure.Kind != core.AzureManagedDisk && strings.Index(azure.DataDiskURI, "https://") != 0 {
1412		allErrs = append(allErrs, field.NotSupported(fldPath.Child("diskURI"), azure.DataDiskURI, diskURISupportedblob))
1413	}
1414
1415	return allErrs
1416}
1417
1418func validateVsphereVolumeSource(cd *core.VsphereVirtualDiskVolumeSource, fldPath *field.Path) field.ErrorList {
1419	allErrs := field.ErrorList{}
1420	if len(cd.VolumePath) == 0 {
1421		allErrs = append(allErrs, field.Required(fldPath.Child("volumePath"), ""))
1422	}
1423	return allErrs
1424}
1425
1426func validatePhotonPersistentDiskVolumeSource(cd *core.PhotonPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList {
1427	allErrs := field.ErrorList{}
1428	if len(cd.PdID) == 0 {
1429		allErrs = append(allErrs, field.Required(fldPath.Child("pdID"), ""))
1430	}
1431	return allErrs
1432}
1433
1434func validatePortworxVolumeSource(pwx *core.PortworxVolumeSource, fldPath *field.Path) field.ErrorList {
1435	allErrs := field.ErrorList{}
1436	if len(pwx.VolumeID) == 0 {
1437		allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), ""))
1438	}
1439	return allErrs
1440}
1441
1442func validateScaleIOVolumeSource(sio *core.ScaleIOVolumeSource, fldPath *field.Path) field.ErrorList {
1443	allErrs := field.ErrorList{}
1444	if sio.Gateway == "" {
1445		allErrs = append(allErrs, field.Required(fldPath.Child("gateway"), ""))
1446	}
1447	if sio.System == "" {
1448		allErrs = append(allErrs, field.Required(fldPath.Child("system"), ""))
1449	}
1450	if sio.VolumeName == "" {
1451		allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), ""))
1452	}
1453	return allErrs
1454}
1455
1456func validateScaleIOPersistentVolumeSource(sio *core.ScaleIOPersistentVolumeSource, fldPath *field.Path) field.ErrorList {
1457	allErrs := field.ErrorList{}
1458	if sio.Gateway == "" {
1459		allErrs = append(allErrs, field.Required(fldPath.Child("gateway"), ""))
1460	}
1461	if sio.System == "" {
1462		allErrs = append(allErrs, field.Required(fldPath.Child("system"), ""))
1463	}
1464	if sio.VolumeName == "" {
1465		allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), ""))
1466	}
1467	return allErrs
1468}
1469
1470func validateLocalVolumeSource(ls *core.LocalVolumeSource, fldPath *field.Path) field.ErrorList {
1471	allErrs := field.ErrorList{}
1472	if ls.Path == "" {
1473		allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
1474		return allErrs
1475	}
1476
1477	allErrs = append(allErrs, validatePathNoBacksteps(ls.Path, fldPath.Child("path"))...)
1478	return allErrs
1479}
1480
1481func validateStorageOSVolumeSource(storageos *core.StorageOSVolumeSource, fldPath *field.Path) field.ErrorList {
1482	allErrs := field.ErrorList{}
1483	if len(storageos.VolumeName) == 0 {
1484		allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), ""))
1485	} else {
1486		allErrs = append(allErrs, ValidateDNS1123Label(storageos.VolumeName, fldPath.Child("volumeName"))...)
1487	}
1488	if len(storageos.VolumeNamespace) > 0 {
1489		allErrs = append(allErrs, ValidateDNS1123Label(storageos.VolumeNamespace, fldPath.Child("volumeNamespace"))...)
1490	}
1491	if storageos.SecretRef != nil {
1492		if len(storageos.SecretRef.Name) == 0 {
1493			allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), ""))
1494		}
1495	}
1496	return allErrs
1497}
1498
1499func validateStorageOSPersistentVolumeSource(storageos *core.StorageOSPersistentVolumeSource, fldPath *field.Path) field.ErrorList {
1500	allErrs := field.ErrorList{}
1501	if len(storageos.VolumeName) == 0 {
1502		allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), ""))
1503	} else {
1504		allErrs = append(allErrs, ValidateDNS1123Label(storageos.VolumeName, fldPath.Child("volumeName"))...)
1505	}
1506	if len(storageos.VolumeNamespace) > 0 {
1507		allErrs = append(allErrs, ValidateDNS1123Label(storageos.VolumeNamespace, fldPath.Child("volumeNamespace"))...)
1508	}
1509	if storageos.SecretRef != nil {
1510		if len(storageos.SecretRef.Name) == 0 {
1511			allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), ""))
1512		}
1513		if len(storageos.SecretRef.Namespace) == 0 {
1514			allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "namespace"), ""))
1515		}
1516	}
1517	return allErrs
1518}
1519
1520func ValidateCSIDriverName(driverName string, fldPath *field.Path) field.ErrorList {
1521	allErrs := field.ErrorList{}
1522
1523	if len(driverName) == 0 {
1524		allErrs = append(allErrs, field.Required(fldPath, ""))
1525	}
1526
1527	if len(driverName) > 63 {
1528		allErrs = append(allErrs, field.TooLong(fldPath, driverName, 63))
1529	}
1530
1531	for _, msg := range validation.IsDNS1123Subdomain(strings.ToLower(driverName)) {
1532		allErrs = append(allErrs, field.Invalid(fldPath, driverName, msg))
1533	}
1534
1535	return allErrs
1536}
1537
1538func validateCSIPersistentVolumeSource(csi *core.CSIPersistentVolumeSource, fldPath *field.Path) field.ErrorList {
1539	allErrs := field.ErrorList{}
1540
1541	allErrs = append(allErrs, ValidateCSIDriverName(csi.Driver, fldPath.Child("driver"))...)
1542
1543	if len(csi.VolumeHandle) == 0 {
1544		allErrs = append(allErrs, field.Required(fldPath.Child("volumeHandle"), ""))
1545	}
1546
1547	if csi.ControllerPublishSecretRef != nil {
1548		if len(csi.ControllerPublishSecretRef.Name) == 0 {
1549			allErrs = append(allErrs, field.Required(fldPath.Child("controllerPublishSecretRef", "name"), ""))
1550		} else {
1551			allErrs = append(allErrs, ValidateDNS1123Label(csi.ControllerPublishSecretRef.Name, fldPath.Child("name"))...)
1552		}
1553		if len(csi.ControllerPublishSecretRef.Namespace) == 0 {
1554			allErrs = append(allErrs, field.Required(fldPath.Child("controllerPublishSecretRef", "namespace"), ""))
1555		} else {
1556			allErrs = append(allErrs, ValidateDNS1123Label(csi.ControllerPublishSecretRef.Namespace, fldPath.Child("namespace"))...)
1557		}
1558	}
1559
1560	if csi.ControllerExpandSecretRef != nil {
1561		if len(csi.ControllerExpandSecretRef.Name) == 0 {
1562			allErrs = append(allErrs, field.Required(fldPath.Child("controllerExpandSecretRef", "name"), ""))
1563		} else {
1564			allErrs = append(allErrs, ValidateDNS1123Label(csi.ControllerExpandSecretRef.Name, fldPath.Child("name"))...)
1565		}
1566		if len(csi.ControllerExpandSecretRef.Namespace) == 0 {
1567			allErrs = append(allErrs, field.Required(fldPath.Child("controllerExpandSecretRef", "namespace"), ""))
1568		} else {
1569			allErrs = append(allErrs, ValidateDNS1123Label(csi.ControllerExpandSecretRef.Namespace, fldPath.Child("namespace"))...)
1570		}
1571	}
1572
1573	if csi.NodePublishSecretRef != nil {
1574		if len(csi.NodePublishSecretRef.Name) == 0 {
1575			allErrs = append(allErrs, field.Required(fldPath.Child("nodePublishSecretRef ", "name"), ""))
1576		} else {
1577			allErrs = append(allErrs, ValidateDNS1123Label(csi.NodePublishSecretRef.Name, fldPath.Child("name"))...)
1578		}
1579		if len(csi.NodePublishSecretRef.Namespace) == 0 {
1580			allErrs = append(allErrs, field.Required(fldPath.Child("nodePublishSecretRef ", "namespace"), ""))
1581		} else {
1582			allErrs = append(allErrs, ValidateDNS1123Label(csi.NodePublishSecretRef.Namespace, fldPath.Child("namespace"))...)
1583		}
1584	}
1585
1586	return allErrs
1587}
1588
1589func validateCSIVolumeSource(csi *core.CSIVolumeSource, fldPath *field.Path) field.ErrorList {
1590	allErrs := field.ErrorList{}
1591	allErrs = append(allErrs, ValidateCSIDriverName(csi.Driver, fldPath.Child("driver"))...)
1592
1593	if csi.NodePublishSecretRef != nil {
1594		if len(csi.NodePublishSecretRef.Name) == 0 {
1595			allErrs = append(allErrs, field.Required(fldPath.Child("nodePublishSecretRef ", "name"), ""))
1596		} else {
1597			for _, msg := range ValidateSecretName(csi.NodePublishSecretRef.Name, false) {
1598				allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), csi.NodePublishSecretRef.Name, msg))
1599			}
1600		}
1601	}
1602
1603	return allErrs
1604}
1605
1606func validateEphemeralVolumeSource(ephemeral *core.EphemeralVolumeSource, fldPath *field.Path) field.ErrorList {
1607	allErrs := field.ErrorList{}
1608	if ephemeral.VolumeClaimTemplate == nil {
1609		allErrs = append(allErrs, field.Required(fldPath.Child("volumeClaimTemplate"), ""))
1610	} else {
1611		opts := ValidationOptionsForPersistentVolumeClaimTemplate(ephemeral.VolumeClaimTemplate, nil)
1612		allErrs = append(allErrs, ValidatePersistentVolumeClaimTemplate(ephemeral.VolumeClaimTemplate, fldPath.Child("volumeClaimTemplate"), opts)...)
1613	}
1614	return allErrs
1615}
1616
1617// ValidatePersistentVolumeClaimTemplate verifies that the embedded object meta and spec are valid.
1618// Checking of the object data is very minimal because only labels and annotations are used.
1619func ValidatePersistentVolumeClaimTemplate(claimTemplate *core.PersistentVolumeClaimTemplate, fldPath *field.Path, opts PersistentVolumeClaimSpecValidationOptions) field.ErrorList {
1620	allErrs := validatePersistentVolumeClaimTemplateObjectMeta(&claimTemplate.ObjectMeta, fldPath.Child("metadata"))
1621	allErrs = append(allErrs, ValidatePersistentVolumeClaimSpec(&claimTemplate.Spec, fldPath.Child("spec"), opts)...)
1622	return allErrs
1623}
1624
1625func validatePersistentVolumeClaimTemplateObjectMeta(objMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList {
1626	allErrs := apimachineryvalidation.ValidateAnnotations(objMeta.Annotations, fldPath.Child("annotations"))
1627	allErrs = append(allErrs, unversionedvalidation.ValidateLabels(objMeta.Labels, fldPath.Child("labels"))...)
1628	// All other fields are not supported and thus must not be set
1629	// to avoid confusion.  We could reject individual fields,
1630	// but then adding a new one to ObjectMeta wouldn't be checked
1631	// unless this code gets updated. Instead, we ensure that
1632	// only allowed fields are set via reflection.
1633	allErrs = append(allErrs, validateFieldAllowList(*objMeta, allowedPVCTemplateObjectMetaFields, "cannot be set for an ephemeral volume", fldPath)...)
1634	return allErrs
1635}
1636
1637var allowedPVCTemplateObjectMetaFields = map[string]bool{
1638	"Annotations": true,
1639	"Labels":      true,
1640}
1641
1642// PersistentVolumeSpecValidationOptions contains the different settings for PeristentVolume validation
1643type PersistentVolumeSpecValidationOptions struct {
1644	// Allow spec to contain the "ReadWiteOncePod" access mode
1645	AllowReadWriteOncePod bool
1646}
1647
1648// ValidatePersistentVolumeName checks that a name is appropriate for a
1649// PersistentVolumeName object.
1650var ValidatePersistentVolumeName = apimachineryvalidation.NameIsDNSSubdomain
1651
1652var supportedAccessModes = sets.NewString(string(core.ReadWriteOnce), string(core.ReadOnlyMany), string(core.ReadWriteMany))
1653
1654var supportedReclaimPolicy = sets.NewString(string(core.PersistentVolumeReclaimDelete), string(core.PersistentVolumeReclaimRecycle), string(core.PersistentVolumeReclaimRetain))
1655
1656var supportedVolumeModes = sets.NewString(string(core.PersistentVolumeBlock), string(core.PersistentVolumeFilesystem))
1657
1658func ValidationOptionsForPersistentVolume(pv, oldPv *core.PersistentVolume) PersistentVolumeSpecValidationOptions {
1659	opts := PersistentVolumeSpecValidationOptions{
1660		AllowReadWriteOncePod: utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod),
1661	}
1662	if oldPv == nil {
1663		// If there's no old PV, use the options based solely on feature enablement
1664		return opts
1665	}
1666	if helper.ContainsAccessMode(oldPv.Spec.AccessModes, core.ReadWriteOncePod) {
1667		// If the old object allowed "ReadWriteOncePod", continue to allow it in the new object
1668		opts.AllowReadWriteOncePod = true
1669	}
1670	return opts
1671}
1672
1673func ValidatePersistentVolumeSpec(pvSpec *core.PersistentVolumeSpec, pvName string, validateInlinePersistentVolumeSpec bool, fldPath *field.Path, opts PersistentVolumeSpecValidationOptions) field.ErrorList {
1674	allErrs := field.ErrorList{}
1675
1676	if validateInlinePersistentVolumeSpec {
1677		if pvSpec.ClaimRef != nil {
1678			allErrs = append(allErrs, field.Forbidden(fldPath.Child("claimRef"), "may not be specified in the context of inline volumes"))
1679		}
1680		if len(pvSpec.Capacity) != 0 {
1681			allErrs = append(allErrs, field.Forbidden(fldPath.Child("capacity"), "may not be specified in the context of inline volumes"))
1682		}
1683		if pvSpec.CSI == nil {
1684			allErrs = append(allErrs, field.Required(fldPath.Child("csi"), "has to be specified in the context of inline volumes"))
1685		}
1686	}
1687
1688	if len(pvSpec.AccessModes) == 0 {
1689		allErrs = append(allErrs, field.Required(fldPath.Child("accessModes"), ""))
1690	}
1691
1692	expandedSupportedAccessModes := sets.StringKeySet(supportedAccessModes)
1693	if opts.AllowReadWriteOncePod {
1694		expandedSupportedAccessModes.Insert(string(core.ReadWriteOncePod))
1695	}
1696
1697	foundReadWriteOncePod, foundNonReadWriteOncePod := false, false
1698	for _, mode := range pvSpec.AccessModes {
1699		if !expandedSupportedAccessModes.Has(string(mode)) {
1700			allErrs = append(allErrs, field.NotSupported(fldPath.Child("accessModes"), mode, expandedSupportedAccessModes.List()))
1701		}
1702
1703		if mode == core.ReadWriteOncePod {
1704			foundReadWriteOncePod = true
1705		} else if supportedAccessModes.Has(string(mode)) {
1706			foundNonReadWriteOncePod = true
1707		}
1708	}
1709	if foundReadWriteOncePod && foundNonReadWriteOncePod {
1710		allErrs = append(allErrs, field.Forbidden(fldPath.Child("accessModes"), "may not use ReadWriteOncePod with other access modes"))
1711	}
1712
1713	if !validateInlinePersistentVolumeSpec {
1714		if len(pvSpec.Capacity) == 0 {
1715			allErrs = append(allErrs, field.Required(fldPath.Child("capacity"), ""))
1716		}
1717
1718		if _, ok := pvSpec.Capacity[core.ResourceStorage]; !ok || len(pvSpec.Capacity) > 1 {
1719			allErrs = append(allErrs, field.NotSupported(fldPath.Child("capacity"), pvSpec.Capacity, []string{string(core.ResourceStorage)}))
1720		}
1721		capPath := fldPath.Child("capacity")
1722		for r, qty := range pvSpec.Capacity {
1723			allErrs = append(allErrs, validateBasicResource(qty, capPath.Key(string(r)))...)
1724			allErrs = append(allErrs, ValidatePositiveQuantityValue(qty, capPath.Key(string(r)))...)
1725		}
1726	}
1727
1728	if len(string(pvSpec.PersistentVolumeReclaimPolicy)) > 0 {
1729		if validateInlinePersistentVolumeSpec {
1730			if pvSpec.PersistentVolumeReclaimPolicy != core.PersistentVolumeReclaimRetain {
1731				allErrs = append(allErrs, field.Forbidden(fldPath.Child("persistentVolumeReclaimPolicy"), "may only be "+string(core.PersistentVolumeReclaimRetain)+" in the context of inline volumes"))
1732			}
1733		} else {
1734			if !supportedReclaimPolicy.Has(string(pvSpec.PersistentVolumeReclaimPolicy)) {
1735				allErrs = append(allErrs, field.NotSupported(fldPath.Child("persistentVolumeReclaimPolicy"), pvSpec.PersistentVolumeReclaimPolicy, supportedReclaimPolicy.List()))
1736			}
1737		}
1738	}
1739
1740	var nodeAffinitySpecified bool
1741	var errs field.ErrorList
1742	if pvSpec.NodeAffinity != nil {
1743		if validateInlinePersistentVolumeSpec {
1744			allErrs = append(allErrs, field.Forbidden(fldPath.Child("nodeAffinity"), "may not be specified in the context of inline volumes"))
1745		} else {
1746			nodeAffinitySpecified, errs = validateVolumeNodeAffinity(pvSpec.NodeAffinity, fldPath.Child("nodeAffinity"))
1747			allErrs = append(allErrs, errs...)
1748		}
1749	}
1750	numVolumes := 0
1751	if pvSpec.HostPath != nil {
1752		if numVolumes > 0 {
1753			allErrs = append(allErrs, field.Forbidden(fldPath.Child("hostPath"), "may not specify more than 1 volume type"))
1754		} else {
1755			numVolumes++
1756			allErrs = append(allErrs, validateHostPathVolumeSource(pvSpec.HostPath, fldPath.Child("hostPath"))...)
1757		}
1758	}
1759	if pvSpec.GCEPersistentDisk != nil {
1760		if numVolumes > 0 {
1761			allErrs = append(allErrs, field.Forbidden(fldPath.Child("gcePersistentDisk"), "may not specify more than 1 volume type"))
1762		} else {
1763			numVolumes++
1764			allErrs = append(allErrs, validateGCEPersistentDiskVolumeSource(pvSpec.GCEPersistentDisk, fldPath.Child("persistentDisk"))...)
1765		}
1766	}
1767	if pvSpec.AWSElasticBlockStore != nil {
1768		if numVolumes > 0 {
1769			allErrs = append(allErrs, field.Forbidden(fldPath.Child("awsElasticBlockStore"), "may not specify more than 1 volume type"))
1770		} else {
1771			numVolumes++
1772			allErrs = append(allErrs, validateAWSElasticBlockStoreVolumeSource(pvSpec.AWSElasticBlockStore, fldPath.Child("awsElasticBlockStore"))...)
1773		}
1774	}
1775	if pvSpec.Glusterfs != nil {
1776		if numVolumes > 0 {
1777			allErrs = append(allErrs, field.Forbidden(fldPath.Child("glusterfs"), "may not specify more than 1 volume type"))
1778		} else {
1779			numVolumes++
1780			allErrs = append(allErrs, validateGlusterfsPersistentVolumeSource(pvSpec.Glusterfs, fldPath.Child("glusterfs"))...)
1781		}
1782	}
1783	if pvSpec.Flocker != nil {
1784		if numVolumes > 0 {
1785			allErrs = append(allErrs, field.Forbidden(fldPath.Child("flocker"), "may not specify more than 1 volume type"))
1786		} else {
1787			numVolumes++
1788			allErrs = append(allErrs, validateFlockerVolumeSource(pvSpec.Flocker, fldPath.Child("flocker"))...)
1789		}
1790	}
1791	if pvSpec.NFS != nil {
1792		if numVolumes > 0 {
1793			allErrs = append(allErrs, field.Forbidden(fldPath.Child("nfs"), "may not specify more than 1 volume type"))
1794		} else {
1795			numVolumes++
1796			allErrs = append(allErrs, validateNFSVolumeSource(pvSpec.NFS, fldPath.Child("nfs"))...)
1797		}
1798	}
1799	if pvSpec.RBD != nil {
1800		if numVolumes > 0 {
1801			allErrs = append(allErrs, field.Forbidden(fldPath.Child("rbd"), "may not specify more than 1 volume type"))
1802		} else {
1803			numVolumes++
1804			allErrs = append(allErrs, validateRBDPersistentVolumeSource(pvSpec.RBD, fldPath.Child("rbd"))...)
1805		}
1806	}
1807	if pvSpec.Quobyte != nil {
1808		if numVolumes > 0 {
1809			allErrs = append(allErrs, field.Forbidden(fldPath.Child("quobyte"), "may not specify more than 1 volume type"))
1810		} else {
1811			numVolumes++
1812			allErrs = append(allErrs, validateQuobyteVolumeSource(pvSpec.Quobyte, fldPath.Child("quobyte"))...)
1813		}
1814	}
1815	if pvSpec.CephFS != nil {
1816		if numVolumes > 0 {
1817			allErrs = append(allErrs, field.Forbidden(fldPath.Child("cephFS"), "may not specify more than 1 volume type"))
1818		} else {
1819			numVolumes++
1820			allErrs = append(allErrs, validateCephFSPersistentVolumeSource(pvSpec.CephFS, fldPath.Child("cephfs"))...)
1821		}
1822	}
1823	if pvSpec.ISCSI != nil {
1824		if numVolumes > 0 {
1825			allErrs = append(allErrs, field.Forbidden(fldPath.Child("iscsi"), "may not specify more than 1 volume type"))
1826		} else {
1827			numVolumes++
1828			allErrs = append(allErrs, validateISCSIPersistentVolumeSource(pvSpec.ISCSI, pvName, fldPath.Child("iscsi"))...)
1829		}
1830	}
1831	if pvSpec.Cinder != nil {
1832		if numVolumes > 0 {
1833			allErrs = append(allErrs, field.Forbidden(fldPath.Child("cinder"), "may not specify more than 1 volume type"))
1834		} else {
1835			numVolumes++
1836			allErrs = append(allErrs, validateCinderPersistentVolumeSource(pvSpec.Cinder, fldPath.Child("cinder"))...)
1837		}
1838	}
1839	if pvSpec.FC != nil {
1840		if numVolumes > 0 {
1841			allErrs = append(allErrs, field.Forbidden(fldPath.Child("fc"), "may not specify more than 1 volume type"))
1842		} else {
1843			numVolumes++
1844			allErrs = append(allErrs, validateFCVolumeSource(pvSpec.FC, fldPath.Child("fc"))...)
1845		}
1846	}
1847	if pvSpec.FlexVolume != nil {
1848		numVolumes++
1849		allErrs = append(allErrs, validateFlexPersistentVolumeSource(pvSpec.FlexVolume, fldPath.Child("flexVolume"))...)
1850	}
1851	if pvSpec.AzureFile != nil {
1852		if numVolumes > 0 {
1853			allErrs = append(allErrs, field.Forbidden(fldPath.Child("azureFile"), "may not specify more than 1 volume type"))
1854
1855		} else {
1856			numVolumes++
1857			allErrs = append(allErrs, validateAzureFilePV(pvSpec.AzureFile, fldPath.Child("azureFile"))...)
1858		}
1859	}
1860
1861	if pvSpec.VsphereVolume != nil {
1862		if numVolumes > 0 {
1863			allErrs = append(allErrs, field.Forbidden(fldPath.Child("vsphereVolume"), "may not specify more than 1 volume type"))
1864		} else {
1865			numVolumes++
1866			allErrs = append(allErrs, validateVsphereVolumeSource(pvSpec.VsphereVolume, fldPath.Child("vsphereVolume"))...)
1867		}
1868	}
1869	if pvSpec.PhotonPersistentDisk != nil {
1870		if numVolumes > 0 {
1871			allErrs = append(allErrs, field.Forbidden(fldPath.Child("photonPersistentDisk"), "may not specify more than 1 volume type"))
1872		} else {
1873			numVolumes++
1874			allErrs = append(allErrs, validatePhotonPersistentDiskVolumeSource(pvSpec.PhotonPersistentDisk, fldPath.Child("photonPersistentDisk"))...)
1875		}
1876	}
1877	if pvSpec.PortworxVolume != nil {
1878		if numVolumes > 0 {
1879			allErrs = append(allErrs, field.Forbidden(fldPath.Child("portworxVolume"), "may not specify more than 1 volume type"))
1880		} else {
1881			numVolumes++
1882			allErrs = append(allErrs, validatePortworxVolumeSource(pvSpec.PortworxVolume, fldPath.Child("portworxVolume"))...)
1883		}
1884	}
1885	if pvSpec.AzureDisk != nil {
1886		if numVolumes > 0 {
1887			allErrs = append(allErrs, field.Forbidden(fldPath.Child("azureDisk"), "may not specify more than 1 volume type"))
1888		} else {
1889			numVolumes++
1890			allErrs = append(allErrs, validateAzureDisk(pvSpec.AzureDisk, fldPath.Child("azureDisk"))...)
1891		}
1892	}
1893	if pvSpec.ScaleIO != nil {
1894		if numVolumes > 0 {
1895			allErrs = append(allErrs, field.Forbidden(fldPath.Child("scaleIO"), "may not specify more than 1 volume type"))
1896		} else {
1897			numVolumes++
1898			allErrs = append(allErrs, validateScaleIOPersistentVolumeSource(pvSpec.ScaleIO, fldPath.Child("scaleIO"))...)
1899		}
1900	}
1901	if pvSpec.Local != nil {
1902		if numVolumes > 0 {
1903			allErrs = append(allErrs, field.Forbidden(fldPath.Child("local"), "may not specify more than 1 volume type"))
1904		} else {
1905			numVolumes++
1906			allErrs = append(allErrs, validateLocalVolumeSource(pvSpec.Local, fldPath.Child("local"))...)
1907			// NodeAffinity is required
1908			if !nodeAffinitySpecified {
1909				allErrs = append(allErrs, field.Required(fldPath.Child("nodeAffinity"), "Local volume requires node affinity"))
1910			}
1911		}
1912	}
1913	if pvSpec.StorageOS != nil {
1914		if numVolumes > 0 {
1915			allErrs = append(allErrs, field.Forbidden(fldPath.Child("storageos"), "may not specify more than 1 volume type"))
1916		} else {
1917			numVolumes++
1918			allErrs = append(allErrs, validateStorageOSPersistentVolumeSource(pvSpec.StorageOS, fldPath.Child("storageos"))...)
1919		}
1920	}
1921
1922	if pvSpec.CSI != nil {
1923		if numVolumes > 0 {
1924			allErrs = append(allErrs, field.Forbidden(fldPath.Child("csi"), "may not specify more than 1 volume type"))
1925		} else {
1926			numVolumes++
1927			allErrs = append(allErrs, validateCSIPersistentVolumeSource(pvSpec.CSI, fldPath.Child("csi"))...)
1928		}
1929	}
1930
1931	if numVolumes == 0 {
1932		allErrs = append(allErrs, field.Required(fldPath, "must specify a volume type"))
1933	}
1934
1935	// do not allow hostPath mounts of '/' to have a 'recycle' reclaim policy
1936	if pvSpec.HostPath != nil && path.Clean(pvSpec.HostPath.Path) == "/" && pvSpec.PersistentVolumeReclaimPolicy == core.PersistentVolumeReclaimRecycle {
1937		allErrs = append(allErrs, field.Forbidden(fldPath.Child("persistentVolumeReclaimPolicy"), "may not be 'recycle' for a hostPath mount of '/'"))
1938	}
1939
1940	if len(pvSpec.StorageClassName) > 0 {
1941		if validateInlinePersistentVolumeSpec {
1942			allErrs = append(allErrs, field.Forbidden(fldPath.Child("storageClassName"), "may not be specified in the context of inline volumes"))
1943		} else {
1944			for _, msg := range ValidateClassName(pvSpec.StorageClassName, false) {
1945				allErrs = append(allErrs, field.Invalid(fldPath.Child("storageClassName"), pvSpec.StorageClassName, msg))
1946			}
1947		}
1948	}
1949	if pvSpec.VolumeMode != nil {
1950		if validateInlinePersistentVolumeSpec {
1951			if *pvSpec.VolumeMode != core.PersistentVolumeFilesystem {
1952				allErrs = append(allErrs, field.Forbidden(fldPath.Child("volumeMode"), "may not specify volumeMode other than "+string(core.PersistentVolumeFilesystem)+" in the context of inline volumes"))
1953			}
1954		} else {
1955			if !supportedVolumeModes.Has(string(*pvSpec.VolumeMode)) {
1956				allErrs = append(allErrs, field.NotSupported(fldPath.Child("volumeMode"), *pvSpec.VolumeMode, supportedVolumeModes.List()))
1957			}
1958		}
1959	}
1960	return allErrs
1961}
1962
1963func ValidatePersistentVolume(pv *core.PersistentVolume, opts PersistentVolumeSpecValidationOptions) field.ErrorList {
1964	metaPath := field.NewPath("metadata")
1965	allErrs := ValidateObjectMeta(&pv.ObjectMeta, false, ValidatePersistentVolumeName, metaPath)
1966	allErrs = append(allErrs, ValidatePersistentVolumeSpec(&pv.Spec, pv.ObjectMeta.Name, false, field.NewPath("spec"), opts)...)
1967	return allErrs
1968}
1969
1970// ValidatePersistentVolumeUpdate tests to see if the update is legal for an end user to make.
1971// newPv is updated with fields that cannot be changed.
1972func ValidatePersistentVolumeUpdate(newPv, oldPv *core.PersistentVolume, opts PersistentVolumeSpecValidationOptions) field.ErrorList {
1973	allErrs := ValidatePersistentVolume(newPv, opts)
1974
1975	// if oldPV does not have ControllerExpandSecretRef then allow it to be set
1976	if (oldPv.Spec.CSI != nil && oldPv.Spec.CSI.ControllerExpandSecretRef == nil) &&
1977		(newPv.Spec.CSI != nil && newPv.Spec.CSI.ControllerExpandSecretRef != nil) {
1978		newPv = newPv.DeepCopy()
1979		newPv.Spec.CSI.ControllerExpandSecretRef = nil
1980	}
1981
1982	// PersistentVolumeSource should be immutable after creation.
1983	if !apiequality.Semantic.DeepEqual(newPv.Spec.PersistentVolumeSource, oldPv.Spec.PersistentVolumeSource) {
1984		pvcSourceDiff := diff.ObjectDiff(newPv.Spec.PersistentVolumeSource, oldPv.Spec.PersistentVolumeSource)
1985		allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "persistentvolumesource"), fmt.Sprintf("spec.persistentvolumesource is immutable after creation\n%v", pvcSourceDiff)))
1986	}
1987	allErrs = append(allErrs, ValidateImmutableField(newPv.Spec.VolumeMode, oldPv.Spec.VolumeMode, field.NewPath("volumeMode"))...)
1988
1989	// Allow setting NodeAffinity if oldPv NodeAffinity was not set
1990	if oldPv.Spec.NodeAffinity != nil {
1991		allErrs = append(allErrs, ValidateImmutableField(newPv.Spec.NodeAffinity, oldPv.Spec.NodeAffinity, field.NewPath("nodeAffinity"))...)
1992	}
1993
1994	return allErrs
1995}
1996
1997// ValidatePersistentVolumeStatusUpdate tests to see if the status update is legal for an end user to make.
1998func ValidatePersistentVolumeStatusUpdate(newPv, oldPv *core.PersistentVolume) field.ErrorList {
1999	allErrs := ValidateObjectMetaUpdate(&newPv.ObjectMeta, &oldPv.ObjectMeta, field.NewPath("metadata"))
2000	if len(newPv.ResourceVersion) == 0 {
2001		allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), ""))
2002	}
2003	return allErrs
2004}
2005
2006// PersistentVolumeClaimSpecValidationOptions contains the different settings for PersistentVolumeClaim validation
2007type PersistentVolumeClaimSpecValidationOptions struct {
2008	// Allow spec to contain the "ReadWiteOncePod" access mode
2009	AllowReadWriteOncePod bool
2010}
2011
2012func ValidationOptionsForPersistentVolumeClaim(pvc, oldPvc *core.PersistentVolumeClaim) PersistentVolumeClaimSpecValidationOptions {
2013	opts := PersistentVolumeClaimSpecValidationOptions{
2014		AllowReadWriteOncePod: utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod),
2015	}
2016	if oldPvc == nil {
2017		// If there's no old PVC, use the options based solely on feature enablement
2018		return opts
2019	}
2020	if helper.ContainsAccessMode(oldPvc.Spec.AccessModes, core.ReadWriteOncePod) {
2021		// If the old object allowed "ReadWriteOncePod", continue to allow it in the new object
2022		opts.AllowReadWriteOncePod = true
2023	}
2024	return opts
2025}
2026
2027func ValidationOptionsForPersistentVolumeClaimTemplate(claimTemplate, oldClaimTemplate *core.PersistentVolumeClaimTemplate) PersistentVolumeClaimSpecValidationOptions {
2028	opts := PersistentVolumeClaimSpecValidationOptions{
2029		AllowReadWriteOncePod: utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod),
2030	}
2031	if oldClaimTemplate == nil {
2032		// If there's no old PVC template, use the options based solely on feature enablement
2033		return opts
2034	}
2035	if helper.ContainsAccessMode(oldClaimTemplate.Spec.AccessModes, core.ReadWriteOncePod) {
2036		// If the old object allowed "ReadWriteOncePod", continue to allow it in the new object
2037		opts.AllowReadWriteOncePod = true
2038	}
2039	return opts
2040}
2041
2042// ValidatePersistentVolumeClaim validates a PersistentVolumeClaim
2043func ValidatePersistentVolumeClaim(pvc *core.PersistentVolumeClaim, opts PersistentVolumeClaimSpecValidationOptions) field.ErrorList {
2044	allErrs := ValidateObjectMeta(&pvc.ObjectMeta, true, ValidatePersistentVolumeName, field.NewPath("metadata"))
2045	allErrs = append(allErrs, ValidatePersistentVolumeClaimSpec(&pvc.Spec, field.NewPath("spec"), opts)...)
2046	return allErrs
2047}
2048
2049// validateDataSource validates a DataSource/DataSourceRef in a PersistentVolumeClaimSpec
2050func validateDataSource(dataSource *core.TypedLocalObjectReference, fldPath *field.Path) field.ErrorList {
2051	allErrs := field.ErrorList{}
2052
2053	if len(dataSource.Name) == 0 {
2054		allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
2055	}
2056	if len(dataSource.Kind) == 0 {
2057		allErrs = append(allErrs, field.Required(fldPath.Child("kind"), ""))
2058	}
2059	apiGroup := ""
2060	if dataSource.APIGroup != nil {
2061		apiGroup = *dataSource.APIGroup
2062	}
2063	if len(apiGroup) == 0 && dataSource.Kind != "PersistentVolumeClaim" {
2064		allErrs = append(allErrs, field.Invalid(fldPath, dataSource.Kind, ""))
2065	}
2066
2067	return allErrs
2068}
2069
2070// ValidatePersistentVolumeClaimSpec validates a PersistentVolumeClaimSpec
2071func ValidatePersistentVolumeClaimSpec(spec *core.PersistentVolumeClaimSpec, fldPath *field.Path, opts PersistentVolumeClaimSpecValidationOptions) field.ErrorList {
2072	allErrs := field.ErrorList{}
2073	if len(spec.AccessModes) == 0 {
2074		allErrs = append(allErrs, field.Required(fldPath.Child("accessModes"), "at least 1 access mode is required"))
2075	}
2076	if spec.Selector != nil {
2077		allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...)
2078	}
2079
2080	expandedSupportedAccessModes := sets.StringKeySet(supportedAccessModes)
2081	if opts.AllowReadWriteOncePod {
2082		expandedSupportedAccessModes.Insert(string(core.ReadWriteOncePod))
2083	}
2084
2085	foundReadWriteOncePod, foundNonReadWriteOncePod := false, false
2086	for _, mode := range spec.AccessModes {
2087		if !expandedSupportedAccessModes.Has(string(mode)) {
2088			allErrs = append(allErrs, field.NotSupported(fldPath.Child("accessModes"), mode, expandedSupportedAccessModes.List()))
2089		}
2090
2091		if mode == core.ReadWriteOncePod {
2092			foundReadWriteOncePod = true
2093		} else if supportedAccessModes.Has(string(mode)) {
2094			foundNonReadWriteOncePod = true
2095		}
2096	}
2097	if foundReadWriteOncePod && foundNonReadWriteOncePod {
2098		allErrs = append(allErrs, field.Forbidden(fldPath.Child("accessModes"), "may not use ReadWriteOncePod with other access modes"))
2099	}
2100
2101	storageValue, ok := spec.Resources.Requests[core.ResourceStorage]
2102	if !ok {
2103		allErrs = append(allErrs, field.Required(fldPath.Child("resources").Key(string(core.ResourceStorage)), ""))
2104	} else if errs := ValidatePositiveQuantityValue(storageValue, fldPath.Child("resources").Key(string(core.ResourceStorage))); len(errs) > 0 {
2105		allErrs = append(allErrs, errs...)
2106	} else {
2107		allErrs = append(allErrs, ValidateResourceQuantityValue(string(core.ResourceStorage), storageValue, fldPath.Child("resources").Key(string(core.ResourceStorage)))...)
2108	}
2109
2110	if spec.StorageClassName != nil && len(*spec.StorageClassName) > 0 {
2111		for _, msg := range ValidateClassName(*spec.StorageClassName, false) {
2112			allErrs = append(allErrs, field.Invalid(fldPath.Child("storageClassName"), *spec.StorageClassName, msg))
2113		}
2114	}
2115	if spec.VolumeMode != nil && !supportedVolumeModes.Has(string(*spec.VolumeMode)) {
2116		allErrs = append(allErrs, field.NotSupported(fldPath.Child("volumeMode"), *spec.VolumeMode, supportedVolumeModes.List()))
2117	}
2118
2119	if spec.DataSource != nil {
2120		allErrs = append(allErrs, validateDataSource(spec.DataSource, fldPath.Child("dataSource"))...)
2121	}
2122	if spec.DataSourceRef != nil {
2123		allErrs = append(allErrs, validateDataSource(spec.DataSourceRef, fldPath.Child("dataSourceRef"))...)
2124	}
2125	if spec.DataSource != nil && spec.DataSourceRef != nil {
2126		if !apiequality.Semantic.DeepEqual(spec.DataSource, spec.DataSourceRef) {
2127			allErrs = append(allErrs, field.Invalid(fldPath, fldPath.Child("dataSource"),
2128				"must match dataSourceRef"))
2129		}
2130	}
2131
2132	return allErrs
2133}
2134
2135// ValidatePersistentVolumeClaimUpdate validates an update to a PersistentVolumeClaim
2136func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *core.PersistentVolumeClaim, opts PersistentVolumeClaimSpecValidationOptions) field.ErrorList {
2137	allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata"))
2138	allErrs = append(allErrs, ValidatePersistentVolumeClaim(newPvc, opts)...)
2139	newPvcClone := newPvc.DeepCopy()
2140	oldPvcClone := oldPvc.DeepCopy()
2141
2142	// PVController needs to update PVC.Spec w/ VolumeName.
2143	// Claims are immutable in order to enforce quota, range limits, etc. without gaming the system.
2144	if len(oldPvc.Spec.VolumeName) == 0 {
2145		// volumeName changes are allowed once.
2146		oldPvcClone.Spec.VolumeName = newPvcClone.Spec.VolumeName // +k8s:verify-mutation:reason=clone
2147	}
2148
2149	if validateStorageClassUpgrade(oldPvcClone.Annotations, newPvcClone.Annotations,
2150		oldPvcClone.Spec.StorageClassName, newPvcClone.Spec.StorageClassName) {
2151		newPvcClone.Spec.StorageClassName = nil
2152		metav1.SetMetaDataAnnotation(&newPvcClone.ObjectMeta, core.BetaStorageClassAnnotation, oldPvcClone.Annotations[core.BetaStorageClassAnnotation])
2153	} else {
2154		// storageclass annotation should be immutable after creation
2155		// TODO: remove Beta when no longer needed
2156		allErrs = append(allErrs, ValidateImmutableAnnotation(newPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], oldPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], v1.BetaStorageClassAnnotation, field.NewPath("metadata"))...)
2157	}
2158
2159	if utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) {
2160		// lets make sure storage values are same.
2161		if newPvc.Status.Phase == core.ClaimBound && newPvcClone.Spec.Resources.Requests != nil {
2162			newPvcClone.Spec.Resources.Requests["storage"] = oldPvc.Spec.Resources.Requests["storage"] // +k8s:verify-mutation:reason=clone
2163		}
2164
2165		oldSize := oldPvc.Spec.Resources.Requests["storage"]
2166		newSize := newPvc.Spec.Resources.Requests["storage"]
2167
2168		if !apiequality.Semantic.DeepEqual(newPvcClone.Spec, oldPvcClone.Spec) {
2169			specDiff := diff.ObjectDiff(newPvcClone.Spec, oldPvcClone.Spec)
2170			allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), fmt.Sprintf("spec is immutable after creation except resources.requests for bound claims\n%v", specDiff)))
2171		}
2172		if newSize.Cmp(oldSize) < 0 {
2173			allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "resources", "requests", "storage"), "field can not be less than previous value"))
2174		}
2175
2176	} else {
2177		// changes to Spec are not allowed, but updates to label/and some annotations are OK.
2178		// no-op updates pass validation.
2179		if !apiequality.Semantic.DeepEqual(newPvcClone.Spec, oldPvcClone.Spec) {
2180			specDiff := diff.ObjectDiff(newPvcClone.Spec, oldPvcClone.Spec)
2181			allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), fmt.Sprintf("field is immutable after creation\n%v", specDiff)))
2182		}
2183	}
2184
2185	allErrs = append(allErrs, ValidateImmutableField(newPvc.Spec.VolumeMode, oldPvc.Spec.VolumeMode, field.NewPath("volumeMode"))...)
2186
2187	return allErrs
2188}
2189
2190// Provide an upgrade path from PVC with storage class specified in beta
2191// annotation to storage class specified in attribute. We allow update of
2192// StorageClassName only if following four conditions are met at the same time:
2193// 1. The old pvc's StorageClassAnnotation is set
2194// 2. The old pvc's StorageClassName is not set
2195// 3. The new pvc's StorageClassName is set and equal to the old value in annotation
2196// 4. If the new pvc's StorageClassAnnotation is set,it must be equal to the old pv/pvc's StorageClassAnnotation
2197func validateStorageClassUpgrade(oldAnnotations, newAnnotations map[string]string, oldScName, newScName *string) bool {
2198	oldSc, oldAnnotationExist := oldAnnotations[core.BetaStorageClassAnnotation]
2199	newScInAnnotation, newAnnotationExist := newAnnotations[core.BetaStorageClassAnnotation]
2200	return oldAnnotationExist /* condition 1 */ &&
2201		oldScName == nil /* condition 2*/ &&
2202		(newScName != nil && *newScName == oldSc) /* condition 3 */ &&
2203		(!newAnnotationExist || newScInAnnotation == oldSc) /* condition 4 */
2204}
2205
2206// ValidatePersistentVolumeClaimStatusUpdate validates an update to status of a PersistentVolumeClaim
2207func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *core.PersistentVolumeClaim) field.ErrorList {
2208	allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata"))
2209	if len(newPvc.ResourceVersion) == 0 {
2210		allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), ""))
2211	}
2212	if len(newPvc.Spec.AccessModes) == 0 {
2213		allErrs = append(allErrs, field.Required(field.NewPath("Spec", "accessModes"), ""))
2214	}
2215	capPath := field.NewPath("status", "capacity")
2216	for r, qty := range newPvc.Status.Capacity {
2217		allErrs = append(allErrs, validateBasicResource(qty, capPath.Key(string(r)))...)
2218	}
2219	return allErrs
2220}
2221
2222var supportedPortProtocols = sets.NewString(string(core.ProtocolTCP), string(core.ProtocolUDP), string(core.ProtocolSCTP))
2223
2224func validateContainerPorts(ports []core.ContainerPort, fldPath *field.Path) field.ErrorList {
2225	allErrs := field.ErrorList{}
2226
2227	allNames := sets.String{}
2228	for i, port := range ports {
2229		idxPath := fldPath.Index(i)
2230		if len(port.Name) > 0 {
2231			if msgs := validation.IsValidPortName(port.Name); len(msgs) != 0 {
2232				for i = range msgs {
2233					allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), port.Name, msgs[i]))
2234				}
2235			} else if allNames.Has(port.Name) {
2236				allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), port.Name))
2237			} else {
2238				allNames.Insert(port.Name)
2239			}
2240		}
2241		if port.ContainerPort == 0 {
2242			allErrs = append(allErrs, field.Required(idxPath.Child("containerPort"), ""))
2243		} else {
2244			for _, msg := range validation.IsValidPortNum(int(port.ContainerPort)) {
2245				allErrs = append(allErrs, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, msg))
2246			}
2247		}
2248		if port.HostPort != 0 {
2249			for _, msg := range validation.IsValidPortNum(int(port.HostPort)) {
2250				allErrs = append(allErrs, field.Invalid(idxPath.Child("hostPort"), port.HostPort, msg))
2251			}
2252		}
2253		if len(port.Protocol) == 0 {
2254			allErrs = append(allErrs, field.Required(idxPath.Child("protocol"), ""))
2255		} else if !supportedPortProtocols.Has(string(port.Protocol)) {
2256			allErrs = append(allErrs, field.NotSupported(idxPath.Child("protocol"), port.Protocol, supportedPortProtocols.List()))
2257		}
2258	}
2259	return allErrs
2260}
2261
2262// ValidateEnv validates env vars
2263func ValidateEnv(vars []core.EnvVar, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
2264	allErrs := field.ErrorList{}
2265
2266	for i, ev := range vars {
2267		idxPath := fldPath.Index(i)
2268		if len(ev.Name) == 0 {
2269			allErrs = append(allErrs, field.Required(idxPath.Child("name"), ""))
2270		} else {
2271			for _, msg := range validation.IsEnvVarName(ev.Name) {
2272				allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), ev.Name, msg))
2273			}
2274		}
2275		allErrs = append(allErrs, validateEnvVarValueFrom(ev, idxPath.Child("valueFrom"), opts)...)
2276	}
2277	return allErrs
2278}
2279
2280var validEnvDownwardAPIFieldPathExpressions = sets.NewString(
2281	"metadata.name",
2282	"metadata.namespace",
2283	"metadata.uid",
2284	"spec.nodeName",
2285	"spec.serviceAccountName",
2286	"status.hostIP",
2287	"status.podIP",
2288	// status.podIPs is populated even if IPv6DualStack feature gate
2289	// is not enabled. This will work for single stack and dual stack.
2290	"status.podIPs")
2291
2292var validContainerResourceFieldPathExpressions = sets.NewString("limits.cpu", "limits.memory", "limits.ephemeral-storage", "requests.cpu", "requests.memory", "requests.ephemeral-storage")
2293
2294// NOTE: this is only valid with DownwardAPIHugePages enabled
2295var validContainerResourceFieldPathPrefixes = sets.NewString()
2296var validContainerResourceFieldPathPrefixesWithDownwardAPIHugePages = sets.NewString(hugepagesRequestsPrefixDownwardAPI, hugepagesLimitsPrefixDownwardAPI)
2297
2298const hugepagesRequestsPrefixDownwardAPI string = `requests.hugepages-`
2299const hugepagesLimitsPrefixDownwardAPI string = `limits.hugepages-`
2300
2301func validateEnvVarValueFrom(ev core.EnvVar, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
2302	allErrs := field.ErrorList{}
2303
2304	if ev.ValueFrom == nil {
2305		return allErrs
2306	}
2307
2308	numSources := 0
2309
2310	if ev.ValueFrom.FieldRef != nil {
2311		numSources++
2312		allErrs = append(allErrs, validateObjectFieldSelector(ev.ValueFrom.FieldRef, &validEnvDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...)
2313	}
2314	if ev.ValueFrom.ResourceFieldRef != nil {
2315		numSources++
2316		localValidContainerResourceFieldPathPrefixes := validContainerResourceFieldPathPrefixes
2317		if opts.AllowDownwardAPIHugePages {
2318			localValidContainerResourceFieldPathPrefixes = validContainerResourceFieldPathPrefixesWithDownwardAPIHugePages
2319		}
2320		allErrs = append(allErrs, validateContainerResourceFieldSelector(ev.ValueFrom.ResourceFieldRef, &validContainerResourceFieldPathExpressions, &localValidContainerResourceFieldPathPrefixes, fldPath.Child("resourceFieldRef"), false)...)
2321	}
2322	if ev.ValueFrom.ConfigMapKeyRef != nil {
2323		numSources++
2324		allErrs = append(allErrs, validateConfigMapKeySelector(ev.ValueFrom.ConfigMapKeyRef, fldPath.Child("configMapKeyRef"))...)
2325	}
2326	if ev.ValueFrom.SecretKeyRef != nil {
2327		numSources++
2328		allErrs = append(allErrs, validateSecretKeySelector(ev.ValueFrom.SecretKeyRef, fldPath.Child("secretKeyRef"))...)
2329	}
2330
2331	if numSources == 0 {
2332		allErrs = append(allErrs, field.Invalid(fldPath, "", "must specify one of: `fieldRef`, `resourceFieldRef`, `configMapKeyRef` or `secretKeyRef`"))
2333	} else if len(ev.Value) != 0 {
2334		if numSources != 0 {
2335			allErrs = append(allErrs, field.Invalid(fldPath, "", "may not be specified when `value` is not empty"))
2336		}
2337	} else if numSources > 1 {
2338		allErrs = append(allErrs, field.Invalid(fldPath, "", "may not have more than one field specified at a time"))
2339	}
2340
2341	return allErrs
2342}
2343
2344func validateObjectFieldSelector(fs *core.ObjectFieldSelector, expressions *sets.String, fldPath *field.Path) field.ErrorList {
2345	allErrs := field.ErrorList{}
2346
2347	if len(fs.APIVersion) == 0 {
2348		allErrs = append(allErrs, field.Required(fldPath.Child("apiVersion"), ""))
2349		return allErrs
2350	}
2351	if len(fs.FieldPath) == 0 {
2352		allErrs = append(allErrs, field.Required(fldPath.Child("fieldPath"), ""))
2353		return allErrs
2354	}
2355
2356	internalFieldPath, _, err := podshelper.ConvertDownwardAPIFieldLabel(fs.APIVersion, fs.FieldPath, "")
2357	if err != nil {
2358		allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldPath"), fs.FieldPath, fmt.Sprintf("error converting fieldPath: %v", err)))
2359		return allErrs
2360	}
2361
2362	if path, subscript, ok := fieldpath.SplitMaybeSubscriptedPath(internalFieldPath); ok {
2363		switch path {
2364		case "metadata.annotations":
2365			for _, msg := range validation.IsQualifiedName(strings.ToLower(subscript)) {
2366				allErrs = append(allErrs, field.Invalid(fldPath, subscript, msg))
2367			}
2368		case "metadata.labels":
2369			for _, msg := range validation.IsQualifiedName(subscript) {
2370				allErrs = append(allErrs, field.Invalid(fldPath, subscript, msg))
2371			}
2372		default:
2373			allErrs = append(allErrs, field.Invalid(fldPath, path, "does not support subscript"))
2374		}
2375	} else if !expressions.Has(path) {
2376		allErrs = append(allErrs, field.NotSupported(fldPath.Child("fieldPath"), path, expressions.List()))
2377		return allErrs
2378	}
2379
2380	return allErrs
2381}
2382
2383func validateContainerResourceFieldSelector(fs *core.ResourceFieldSelector, expressions *sets.String, prefixes *sets.String, fldPath *field.Path, volume bool) field.ErrorList {
2384	allErrs := field.ErrorList{}
2385
2386	if volume && len(fs.ContainerName) == 0 {
2387		allErrs = append(allErrs, field.Required(fldPath.Child("containerName"), ""))
2388	} else if len(fs.Resource) == 0 {
2389		allErrs = append(allErrs, field.Required(fldPath.Child("resource"), ""))
2390	} else if !expressions.Has(fs.Resource) {
2391		// check if the prefix is present
2392		foundPrefix := false
2393		if prefixes != nil {
2394			for _, prefix := range prefixes.List() {
2395				if strings.HasPrefix(fs.Resource, prefix) {
2396					foundPrefix = true
2397				}
2398			}
2399		}
2400		if !foundPrefix {
2401			allErrs = append(allErrs, field.NotSupported(fldPath.Child("resource"), fs.Resource, expressions.List()))
2402		}
2403	}
2404	allErrs = append(allErrs, validateContainerResourceDivisor(fs.Resource, fs.Divisor, fldPath)...)
2405	return allErrs
2406}
2407
2408func ValidateEnvFrom(vars []core.EnvFromSource, fldPath *field.Path) field.ErrorList {
2409	allErrs := field.ErrorList{}
2410	for i, ev := range vars {
2411		idxPath := fldPath.Index(i)
2412		if len(ev.Prefix) > 0 {
2413			for _, msg := range validation.IsEnvVarName(ev.Prefix) {
2414				allErrs = append(allErrs, field.Invalid(idxPath.Child("prefix"), ev.Prefix, msg))
2415			}
2416		}
2417
2418		numSources := 0
2419		if ev.ConfigMapRef != nil {
2420			numSources++
2421			allErrs = append(allErrs, validateConfigMapEnvSource(ev.ConfigMapRef, idxPath.Child("configMapRef"))...)
2422		}
2423		if ev.SecretRef != nil {
2424			numSources++
2425			allErrs = append(allErrs, validateSecretEnvSource(ev.SecretRef, idxPath.Child("secretRef"))...)
2426		}
2427
2428		if numSources == 0 {
2429			allErrs = append(allErrs, field.Invalid(fldPath, "", "must specify one of: `configMapRef` or `secretRef`"))
2430		} else if numSources > 1 {
2431			allErrs = append(allErrs, field.Invalid(fldPath, "", "may not have more than one field specified at a time"))
2432		}
2433	}
2434	return allErrs
2435}
2436
2437func validateConfigMapEnvSource(configMapSource *core.ConfigMapEnvSource, fldPath *field.Path) field.ErrorList {
2438	allErrs := field.ErrorList{}
2439	if len(configMapSource.Name) == 0 {
2440		allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
2441	} else {
2442		for _, msg := range ValidateConfigMapName(configMapSource.Name, true) {
2443			allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), configMapSource.Name, msg))
2444		}
2445	}
2446	return allErrs
2447}
2448
2449func validateSecretEnvSource(secretSource *core.SecretEnvSource, fldPath *field.Path) field.ErrorList {
2450	allErrs := field.ErrorList{}
2451	if len(secretSource.Name) == 0 {
2452		allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
2453	} else {
2454		for _, msg := range ValidateSecretName(secretSource.Name, true) {
2455			allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), secretSource.Name, msg))
2456		}
2457	}
2458	return allErrs
2459}
2460
2461var validContainerResourceDivisorForCPU = sets.NewString("1m", "1")
2462var validContainerResourceDivisorForMemory = sets.NewString("1", "1k", "1M", "1G", "1T", "1P", "1E", "1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei")
2463var validContainerResourceDivisorForHugePages = sets.NewString("1", "1k", "1M", "1G", "1T", "1P", "1E", "1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei")
2464var validContainerResourceDivisorForEphemeralStorage = sets.NewString("1", "1k", "1M", "1G", "1T", "1P", "1E", "1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei")
2465
2466func validateContainerResourceDivisor(rName string, divisor resource.Quantity, fldPath *field.Path) field.ErrorList {
2467	allErrs := field.ErrorList{}
2468	unsetDivisor := resource.Quantity{}
2469	if unsetDivisor.Cmp(divisor) == 0 {
2470		return allErrs
2471	}
2472	switch rName {
2473	case "limits.cpu", "requests.cpu":
2474		if !validContainerResourceDivisorForCPU.Has(divisor.String()) {
2475			allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, "only divisor's values 1m and 1 are supported with the cpu resource"))
2476		}
2477	case "limits.memory", "requests.memory":
2478		if !validContainerResourceDivisorForMemory.Has(divisor.String()) {
2479			allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, "only divisor's values 1, 1k, 1M, 1G, 1T, 1P, 1E, 1Ki, 1Mi, 1Gi, 1Ti, 1Pi, 1Ei are supported with the memory resource"))
2480		}
2481	case "limits.ephemeral-storage", "requests.ephemeral-storage":
2482		if !validContainerResourceDivisorForEphemeralStorage.Has(divisor.String()) {
2483			allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, "only divisor's values 1, 1k, 1M, 1G, 1T, 1P, 1E, 1Ki, 1Mi, 1Gi, 1Ti, 1Pi, 1Ei are supported with the local ephemeral storage resource"))
2484		}
2485	}
2486	if strings.HasPrefix(rName, hugepagesRequestsPrefixDownwardAPI) || strings.HasPrefix(rName, hugepagesLimitsPrefixDownwardAPI) {
2487		if !validContainerResourceDivisorForHugePages.Has(divisor.String()) {
2488			allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, "only divisor's values 1, 1k, 1M, 1G, 1T, 1P, 1E, 1Ki, 1Mi, 1Gi, 1Ti, 1Pi, 1Ei are supported with the hugepages resource"))
2489		}
2490	}
2491	return allErrs
2492}
2493
2494func validateConfigMapKeySelector(s *core.ConfigMapKeySelector, fldPath *field.Path) field.ErrorList {
2495	allErrs := field.ErrorList{}
2496
2497	nameFn := ValidateNameFunc(ValidateSecretName)
2498	for _, msg := range nameFn(s.Name, false) {
2499		allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), s.Name, msg))
2500	}
2501	if len(s.Key) == 0 {
2502		allErrs = append(allErrs, field.Required(fldPath.Child("key"), ""))
2503	} else {
2504		for _, msg := range validation.IsConfigMapKey(s.Key) {
2505			allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), s.Key, msg))
2506		}
2507	}
2508
2509	return allErrs
2510}
2511
2512func validateSecretKeySelector(s *core.SecretKeySelector, fldPath *field.Path) field.ErrorList {
2513	allErrs := field.ErrorList{}
2514
2515	nameFn := ValidateNameFunc(ValidateSecretName)
2516	for _, msg := range nameFn(s.Name, false) {
2517		allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), s.Name, msg))
2518	}
2519	if len(s.Key) == 0 {
2520		allErrs = append(allErrs, field.Required(fldPath.Child("key"), ""))
2521	} else {
2522		for _, msg := range validation.IsConfigMapKey(s.Key) {
2523			allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), s.Key, msg))
2524		}
2525	}
2526
2527	return allErrs
2528}
2529
2530func GetVolumeMountMap(mounts []core.VolumeMount) map[string]string {
2531	volmounts := make(map[string]string)
2532
2533	for _, mnt := range mounts {
2534		volmounts[mnt.Name] = mnt.MountPath
2535	}
2536
2537	return volmounts
2538}
2539
2540func GetVolumeDeviceMap(devices []core.VolumeDevice) map[string]string {
2541	volDevices := make(map[string]string)
2542
2543	for _, dev := range devices {
2544		volDevices[dev.Name] = dev.DevicePath
2545	}
2546
2547	return volDevices
2548}
2549
2550func ValidateVolumeMounts(mounts []core.VolumeMount, voldevices map[string]string, volumes map[string]core.VolumeSource, container *core.Container, fldPath *field.Path) field.ErrorList {
2551	allErrs := field.ErrorList{}
2552	mountpoints := sets.NewString()
2553
2554	for i, mnt := range mounts {
2555		idxPath := fldPath.Index(i)
2556		if len(mnt.Name) == 0 {
2557			allErrs = append(allErrs, field.Required(idxPath.Child("name"), ""))
2558		}
2559		if !IsMatchedVolume(mnt.Name, volumes) {
2560			allErrs = append(allErrs, field.NotFound(idxPath.Child("name"), mnt.Name))
2561		}
2562		if len(mnt.MountPath) == 0 {
2563			allErrs = append(allErrs, field.Required(idxPath.Child("mountPath"), ""))
2564		}
2565		if mountpoints.Has(mnt.MountPath) {
2566			allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must be unique"))
2567		}
2568		mountpoints.Insert(mnt.MountPath)
2569
2570		// check for overlap with VolumeDevice
2571		if mountNameAlreadyExists(mnt.Name, voldevices) {
2572			allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), mnt.Name, "must not already exist in volumeDevices"))
2573		}
2574		if mountPathAlreadyExists(mnt.MountPath, voldevices) {
2575			allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must not already exist as a path in volumeDevices"))
2576		}
2577
2578		if len(mnt.SubPath) > 0 {
2579			allErrs = append(allErrs, validateLocalDescendingPath(mnt.SubPath, fldPath.Child("subPath"))...)
2580		}
2581
2582		if len(mnt.SubPathExpr) > 0 {
2583			if len(mnt.SubPath) > 0 {
2584				allErrs = append(allErrs, field.Invalid(idxPath.Child("subPathExpr"), mnt.SubPathExpr, "subPathExpr and subPath are mutually exclusive"))
2585			}
2586
2587			allErrs = append(allErrs, validateLocalDescendingPath(mnt.SubPathExpr, fldPath.Child("subPathExpr"))...)
2588		}
2589
2590		if mnt.MountPropagation != nil {
2591			allErrs = append(allErrs, validateMountPropagation(mnt.MountPropagation, container, fldPath.Child("mountPropagation"))...)
2592		}
2593	}
2594	return allErrs
2595}
2596
2597func ValidateVolumeDevices(devices []core.VolumeDevice, volmounts map[string]string, volumes map[string]core.VolumeSource, fldPath *field.Path) field.ErrorList {
2598	allErrs := field.ErrorList{}
2599	devicepath := sets.NewString()
2600	devicename := sets.NewString()
2601
2602	for i, dev := range devices {
2603		idxPath := fldPath.Index(i)
2604		devName := dev.Name
2605		devPath := dev.DevicePath
2606		didMatch, isPVC := isMatchedDevice(devName, volumes)
2607		if len(devName) == 0 {
2608			allErrs = append(allErrs, field.Required(idxPath.Child("name"), ""))
2609		}
2610		if devicename.Has(devName) {
2611			allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "must be unique"))
2612		}
2613		// Must be PersistentVolumeClaim volume source
2614		if didMatch && !isPVC {
2615			allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "can only use volume source type of PersistentVolumeClaim for block mode"))
2616		}
2617		if !didMatch {
2618			allErrs = append(allErrs, field.NotFound(idxPath.Child("name"), devName))
2619		}
2620		if len(devPath) == 0 {
2621			allErrs = append(allErrs, field.Required(idxPath.Child("devicePath"), ""))
2622		}
2623		if devicepath.Has(devPath) {
2624			allErrs = append(allErrs, field.Invalid(idxPath.Child("devicePath"), devPath, "must be unique"))
2625		}
2626		if len(devPath) > 0 && len(validatePathNoBacksteps(devPath, fldPath.Child("devicePath"))) > 0 {
2627			allErrs = append(allErrs, field.Invalid(idxPath.Child("devicePath"), devPath, "can not contain backsteps ('..')"))
2628		} else {
2629			devicepath.Insert(devPath)
2630		}
2631		// check for overlap with VolumeMount
2632		if deviceNameAlreadyExists(devName, volmounts) {
2633			allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "must not already exist in volumeMounts"))
2634		}
2635		if devicePathAlreadyExists(devPath, volmounts) {
2636			allErrs = append(allErrs, field.Invalid(idxPath.Child("devicePath"), devPath, "must not already exist as a path in volumeMounts"))
2637		}
2638		if len(devName) > 0 {
2639			devicename.Insert(devName)
2640		}
2641	}
2642	return allErrs
2643}
2644
2645func validateProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList {
2646	allErrs := field.ErrorList{}
2647
2648	if probe == nil {
2649		return allErrs
2650	}
2651	allErrs = append(allErrs, validateHandler(&probe.Handler, fldPath)...)
2652
2653	allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.InitialDelaySeconds), fldPath.Child("initialDelaySeconds"))...)
2654	allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.TimeoutSeconds), fldPath.Child("timeoutSeconds"))...)
2655	allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.PeriodSeconds), fldPath.Child("periodSeconds"))...)
2656	allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.SuccessThreshold), fldPath.Child("successThreshold"))...)
2657	allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.FailureThreshold), fldPath.Child("failureThreshold"))...)
2658	if probe.TerminationGracePeriodSeconds != nil && *probe.TerminationGracePeriodSeconds <= 0 {
2659		allErrs = append(allErrs, field.Invalid(fldPath.Child("terminationGracePeriodSeconds"), *probe.TerminationGracePeriodSeconds, "must be greater than 0"))
2660	}
2661	return allErrs
2662}
2663
2664func validateClientIPAffinityConfig(config *core.SessionAffinityConfig, fldPath *field.Path) field.ErrorList {
2665	allErrs := field.ErrorList{}
2666	if config == nil {
2667		allErrs = append(allErrs, field.Required(fldPath, fmt.Sprintf("when session affinity type is %s", core.ServiceAffinityClientIP)))
2668		return allErrs
2669	}
2670	if config.ClientIP == nil {
2671		allErrs = append(allErrs, field.Required(fldPath.Child("clientIP"), fmt.Sprintf("when session affinity type is %s", core.ServiceAffinityClientIP)))
2672		return allErrs
2673	}
2674	if config.ClientIP.TimeoutSeconds == nil {
2675		allErrs = append(allErrs, field.Required(fldPath.Child("clientIP").Child("timeoutSeconds"), fmt.Sprintf("when session affinity type is %s", core.ServiceAffinityClientIP)))
2676		return allErrs
2677	}
2678	allErrs = append(allErrs, validateAffinityTimeout(config.ClientIP.TimeoutSeconds, fldPath.Child("clientIP").Child("timeoutSeconds"))...)
2679
2680	return allErrs
2681}
2682
2683func validateAffinityTimeout(timeout *int32, fldPath *field.Path) field.ErrorList {
2684	allErrs := field.ErrorList{}
2685	if *timeout <= 0 || *timeout > core.MaxClientIPServiceAffinitySeconds {
2686		allErrs = append(allErrs, field.Invalid(fldPath, timeout, fmt.Sprintf("must be greater than 0 and less than %d", core.MaxClientIPServiceAffinitySeconds)))
2687	}
2688	return allErrs
2689}
2690
2691// AccumulateUniqueHostPorts extracts each HostPort of each Container,
2692// accumulating the results and returning an error if any ports conflict.
2693func AccumulateUniqueHostPorts(containers []core.Container, accumulator *sets.String, fldPath *field.Path) field.ErrorList {
2694	allErrs := field.ErrorList{}
2695
2696	for ci, ctr := range containers {
2697		idxPath := fldPath.Index(ci)
2698		portsPath := idxPath.Child("ports")
2699		for pi := range ctr.Ports {
2700			idxPath := portsPath.Index(pi)
2701			port := ctr.Ports[pi].HostPort
2702			if port == 0 {
2703				continue
2704			}
2705			str := fmt.Sprintf("%s/%s/%d", ctr.Ports[pi].Protocol, ctr.Ports[pi].HostIP, port)
2706			if accumulator.Has(str) {
2707				allErrs = append(allErrs, field.Duplicate(idxPath.Child("hostPort"), str))
2708			} else {
2709				accumulator.Insert(str)
2710			}
2711		}
2712	}
2713	return allErrs
2714}
2715
2716// checkHostPortConflicts checks for colliding Port.HostPort values across
2717// a slice of containers.
2718func checkHostPortConflicts(containers []core.Container, fldPath *field.Path) field.ErrorList {
2719	allPorts := sets.String{}
2720	return AccumulateUniqueHostPorts(containers, &allPorts, fldPath)
2721}
2722
2723func validateExecAction(exec *core.ExecAction, fldPath *field.Path) field.ErrorList {
2724	allErrors := field.ErrorList{}
2725	if len(exec.Command) == 0 {
2726		allErrors = append(allErrors, field.Required(fldPath.Child("command"), ""))
2727	}
2728	return allErrors
2729}
2730
2731var supportedHTTPSchemes = sets.NewString(string(core.URISchemeHTTP), string(core.URISchemeHTTPS))
2732
2733func validateHTTPGetAction(http *core.HTTPGetAction, fldPath *field.Path) field.ErrorList {
2734	allErrors := field.ErrorList{}
2735	if len(http.Path) == 0 {
2736		allErrors = append(allErrors, field.Required(fldPath.Child("path"), ""))
2737	}
2738	allErrors = append(allErrors, ValidatePortNumOrName(http.Port, fldPath.Child("port"))...)
2739	if !supportedHTTPSchemes.Has(string(http.Scheme)) {
2740		allErrors = append(allErrors, field.NotSupported(fldPath.Child("scheme"), http.Scheme, supportedHTTPSchemes.List()))
2741	}
2742	for _, header := range http.HTTPHeaders {
2743		for _, msg := range validation.IsHTTPHeaderName(header.Name) {
2744			allErrors = append(allErrors, field.Invalid(fldPath.Child("httpHeaders"), header.Name, msg))
2745		}
2746	}
2747	return allErrors
2748}
2749
2750func ValidatePortNumOrName(port intstr.IntOrString, fldPath *field.Path) field.ErrorList {
2751	allErrs := field.ErrorList{}
2752	if port.Type == intstr.Int {
2753		for _, msg := range validation.IsValidPortNum(port.IntValue()) {
2754			allErrs = append(allErrs, field.Invalid(fldPath, port.IntValue(), msg))
2755		}
2756	} else if port.Type == intstr.String {
2757		for _, msg := range validation.IsValidPortName(port.StrVal) {
2758			allErrs = append(allErrs, field.Invalid(fldPath, port.StrVal, msg))
2759		}
2760	} else {
2761		allErrs = append(allErrs, field.InternalError(fldPath, fmt.Errorf("unknown type: %v", port.Type)))
2762	}
2763	return allErrs
2764}
2765
2766func validateTCPSocketAction(tcp *core.TCPSocketAction, fldPath *field.Path) field.ErrorList {
2767	return ValidatePortNumOrName(tcp.Port, fldPath.Child("port"))
2768}
2769
2770func validateHandler(handler *core.Handler, fldPath *field.Path) field.ErrorList {
2771	numHandlers := 0
2772	allErrors := field.ErrorList{}
2773	if handler.Exec != nil {
2774		if numHandlers > 0 {
2775			allErrors = append(allErrors, field.Forbidden(fldPath.Child("exec"), "may not specify more than 1 handler type"))
2776		} else {
2777			numHandlers++
2778			allErrors = append(allErrors, validateExecAction(handler.Exec, fldPath.Child("exec"))...)
2779		}
2780	}
2781	if handler.HTTPGet != nil {
2782		if numHandlers > 0 {
2783			allErrors = append(allErrors, field.Forbidden(fldPath.Child("httpGet"), "may not specify more than 1 handler type"))
2784		} else {
2785			numHandlers++
2786			allErrors = append(allErrors, validateHTTPGetAction(handler.HTTPGet, fldPath.Child("httpGet"))...)
2787		}
2788	}
2789	if handler.TCPSocket != nil {
2790		if numHandlers > 0 {
2791			allErrors = append(allErrors, field.Forbidden(fldPath.Child("tcpSocket"), "may not specify more than 1 handler type"))
2792		} else {
2793			numHandlers++
2794			allErrors = append(allErrors, validateTCPSocketAction(handler.TCPSocket, fldPath.Child("tcpSocket"))...)
2795		}
2796	}
2797	if numHandlers == 0 {
2798		allErrors = append(allErrors, field.Required(fldPath, "must specify a handler type"))
2799	}
2800	return allErrors
2801}
2802
2803func validateLifecycle(lifecycle *core.Lifecycle, fldPath *field.Path) field.ErrorList {
2804	allErrs := field.ErrorList{}
2805	if lifecycle.PostStart != nil {
2806		allErrs = append(allErrs, validateHandler(lifecycle.PostStart, fldPath.Child("postStart"))...)
2807	}
2808	if lifecycle.PreStop != nil {
2809		allErrs = append(allErrs, validateHandler(lifecycle.PreStop, fldPath.Child("preStop"))...)
2810	}
2811	return allErrs
2812}
2813
2814var supportedPullPolicies = sets.NewString(string(core.PullAlways), string(core.PullIfNotPresent), string(core.PullNever))
2815
2816func validatePullPolicy(policy core.PullPolicy, fldPath *field.Path) field.ErrorList {
2817	allErrors := field.ErrorList{}
2818
2819	switch policy {
2820	case core.PullAlways, core.PullIfNotPresent, core.PullNever:
2821		break
2822	case "":
2823		allErrors = append(allErrors, field.Required(fldPath, ""))
2824	default:
2825		allErrors = append(allErrors, field.NotSupported(fldPath, policy, supportedPullPolicies.List()))
2826	}
2827
2828	return allErrors
2829}
2830
2831func validateEphemeralContainers(ephemeralContainers []core.EphemeralContainer, containers, initContainers []core.Container, volumes map[string]core.VolumeSource, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
2832	allErrs := field.ErrorList{}
2833
2834	if len(ephemeralContainers) == 0 {
2835		return allErrs
2836	}
2837
2838	allNames := sets.String{}
2839	for _, c := range containers {
2840		allNames.Insert(c.Name)
2841	}
2842	for _, c := range initContainers {
2843		allNames.Insert(c.Name)
2844	}
2845
2846	for i, ec := range ephemeralContainers {
2847		idxPath := fldPath.Index(i)
2848
2849		if ec.TargetContainerName != "" && !allNames.Has(ec.TargetContainerName) {
2850			allErrs = append(allErrs, field.NotFound(idxPath.Child("targetContainerName"), ec.TargetContainerName))
2851		}
2852
2853		if ec.Name == "" {
2854			allErrs = append(allErrs, field.Required(idxPath, "ephemeralContainer requires a name"))
2855			continue
2856		}
2857
2858		// Using validateContainers() here isn't ideal because it adds an index to the error message that
2859		// doesn't really exist for EphemeralContainers (i.e. ephemeralContainers[0].spec[0].name instead
2860		// of ephemeralContainers[0].spec.name)
2861		// TODO(verb): factor a validateContainer() out of validateContainers() to be used here
2862		c := core.Container(ec.EphemeralContainerCommon)
2863		allErrs = append(allErrs, validateContainers([]core.Container{c}, false, volumes, idxPath, opts)...)
2864		// EphemeralContainers don't require the backwards-compatibility distinction between pod/podTemplate validation
2865		allErrs = append(allErrs, validateContainersOnlyForPod([]core.Container{c}, idxPath)...)
2866
2867		if allNames.Has(ec.Name) {
2868			allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), ec.Name))
2869		} else {
2870			allNames.Insert(ec.Name)
2871		}
2872
2873		// Ephemeral Containers should not be relied upon for fundamental pod services, so fields such as
2874		// Lifecycle, probes, resources and ports should be disallowed. This is implemented as a list
2875		// of allowed fields so that new fields will be given consideration prior to inclusion in Ephemeral Containers.
2876		allErrs = append(allErrs, validateFieldAllowList(ec.EphemeralContainerCommon, allowedEphemeralContainerFields, "cannot be set for an Ephemeral Container", idxPath)...)
2877	}
2878
2879	return allErrs
2880}
2881
2882// validateFieldAcceptList checks that only allowed fields are set.
2883// The value must be a struct (not a pointer to a struct!).
2884func validateFieldAllowList(value interface{}, allowedFields map[string]bool, errorText string, fldPath *field.Path) field.ErrorList {
2885	var allErrs field.ErrorList
2886
2887	reflectType, reflectValue := reflect.TypeOf(value), reflect.ValueOf(value)
2888	for i := 0; i < reflectType.NumField(); i++ {
2889		f := reflectType.Field(i)
2890		if allowedFields[f.Name] {
2891			continue
2892		}
2893
2894		// Compare the value of this field to its zero value to determine if it has been set
2895		if !reflect.DeepEqual(reflectValue.Field(i).Interface(), reflect.Zero(f.Type).Interface()) {
2896			r, n := utf8.DecodeRuneInString(f.Name)
2897			lcName := string(unicode.ToLower(r)) + f.Name[n:]
2898			allErrs = append(allErrs, field.Forbidden(fldPath.Child(lcName), errorText))
2899		}
2900	}
2901
2902	return allErrs
2903}
2904
2905func validateInitContainers(containers, otherContainers []core.Container, deviceVolumes map[string]core.VolumeSource, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
2906	var allErrs field.ErrorList
2907	if len(containers) > 0 {
2908		allErrs = append(allErrs, validateContainers(containers, true, deviceVolumes, fldPath, opts)...)
2909	}
2910
2911	allNames := sets.String{}
2912	for _, ctr := range otherContainers {
2913		allNames.Insert(ctr.Name)
2914	}
2915	for i, ctr := range containers {
2916		idxPath := fldPath.Index(i)
2917		if allNames.Has(ctr.Name) {
2918			allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), ctr.Name))
2919		}
2920		if len(ctr.Name) > 0 {
2921			allNames.Insert(ctr.Name)
2922		}
2923		if ctr.Lifecycle != nil {
2924			allErrs = append(allErrs, field.Invalid(idxPath.Child("lifecycle"), ctr.Lifecycle, "must not be set for init containers"))
2925		}
2926		if ctr.LivenessProbe != nil {
2927			allErrs = append(allErrs, field.Invalid(idxPath.Child("livenessProbe"), ctr.LivenessProbe, "must not be set for init containers"))
2928		}
2929		if ctr.ReadinessProbe != nil {
2930			allErrs = append(allErrs, field.Invalid(idxPath.Child("readinessProbe"), ctr.ReadinessProbe, "must not be set for init containers"))
2931		}
2932		if ctr.StartupProbe != nil {
2933			allErrs = append(allErrs, field.Invalid(idxPath.Child("startupProbe"), ctr.StartupProbe, "must not be set for init containers"))
2934		}
2935	}
2936	return allErrs
2937}
2938
2939func validateContainers(containers []core.Container, isInitContainers bool, volumes map[string]core.VolumeSource, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
2940	allErrs := field.ErrorList{}
2941
2942	if len(containers) == 0 {
2943		return append(allErrs, field.Required(fldPath, ""))
2944	}
2945
2946	allNames := sets.String{}
2947	for i, ctr := range containers {
2948		idxPath := fldPath.Index(i)
2949		namePath := idxPath.Child("name")
2950		volMounts := GetVolumeMountMap(ctr.VolumeMounts)
2951		volDevices := GetVolumeDeviceMap(ctr.VolumeDevices)
2952
2953		if len(ctr.Name) == 0 {
2954			allErrs = append(allErrs, field.Required(namePath, ""))
2955		} else {
2956			allErrs = append(allErrs, ValidateDNS1123Label(ctr.Name, namePath)...)
2957		}
2958		if allNames.Has(ctr.Name) {
2959			allErrs = append(allErrs, field.Duplicate(namePath, ctr.Name))
2960		} else {
2961			allNames.Insert(ctr.Name)
2962		}
2963		// TODO: do not validate leading and trailing whitespace to preserve backward compatibility.
2964		// for example: https://github.com/openshift/origin/issues/14659 image = " " is special token in pod template
2965		// others may have done similar
2966		if len(ctr.Image) == 0 {
2967			allErrs = append(allErrs, field.Required(idxPath.Child("image"), ""))
2968		}
2969		if ctr.Lifecycle != nil {
2970			allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, idxPath.Child("lifecycle"))...)
2971		}
2972		allErrs = append(allErrs, validateProbe(ctr.LivenessProbe, idxPath.Child("livenessProbe"))...)
2973		// Readiness-specific validation
2974		if ctr.ReadinessProbe != nil && ctr.ReadinessProbe.TerminationGracePeriodSeconds != nil {
2975			allErrs = append(allErrs, field.Invalid(idxPath.Child("readinessProbe", "terminationGracePeriodSeconds"), ctr.ReadinessProbe.TerminationGracePeriodSeconds, "must not be set for readinessProbes"))
2976		}
2977		allErrs = append(allErrs, validateProbe(ctr.StartupProbe, idxPath.Child("startupProbe"))...)
2978		// Liveness-specific validation
2979		if ctr.LivenessProbe != nil && ctr.LivenessProbe.SuccessThreshold != 1 {
2980			allErrs = append(allErrs, field.Invalid(idxPath.Child("livenessProbe", "successThreshold"), ctr.LivenessProbe.SuccessThreshold, "must be 1"))
2981		}
2982		allErrs = append(allErrs, validateProbe(ctr.StartupProbe, idxPath.Child("startupProbe"))...)
2983		// Startup-specific validation
2984		if ctr.StartupProbe != nil && ctr.StartupProbe.SuccessThreshold != 1 {
2985			allErrs = append(allErrs, field.Invalid(idxPath.Child("startupProbe", "successThreshold"), ctr.StartupProbe.SuccessThreshold, "must be 1"))
2986		}
2987
2988		switch ctr.TerminationMessagePolicy {
2989		case core.TerminationMessageReadFile, core.TerminationMessageFallbackToLogsOnError:
2990		case "":
2991			allErrs = append(allErrs, field.Required(idxPath.Child("terminationMessagePolicy"), "must be 'File' or 'FallbackToLogsOnError'"))
2992		default:
2993			allErrs = append(allErrs, field.Invalid(idxPath.Child("terminationMessagePolicy"), ctr.TerminationMessagePolicy, "must be 'File' or 'FallbackToLogsOnError'"))
2994		}
2995
2996		allErrs = append(allErrs, validateProbe(ctr.ReadinessProbe, idxPath.Child("readinessProbe"))...)
2997		allErrs = append(allErrs, validateContainerPorts(ctr.Ports, idxPath.Child("ports"))...)
2998		allErrs = append(allErrs, ValidateEnv(ctr.Env, idxPath.Child("env"), opts)...)
2999		allErrs = append(allErrs, ValidateEnvFrom(ctr.EnvFrom, idxPath.Child("envFrom"))...)
3000		allErrs = append(allErrs, ValidateVolumeMounts(ctr.VolumeMounts, volDevices, volumes, &ctr, idxPath.Child("volumeMounts"))...)
3001		allErrs = append(allErrs, ValidateVolumeDevices(ctr.VolumeDevices, volMounts, volumes, idxPath.Child("volumeDevices"))...)
3002		allErrs = append(allErrs, validatePullPolicy(ctr.ImagePullPolicy, idxPath.Child("imagePullPolicy"))...)
3003		allErrs = append(allErrs, ValidateResourceRequirements(&ctr.Resources, idxPath.Child("resources"), opts)...)
3004		allErrs = append(allErrs, ValidateSecurityContext(ctr.SecurityContext, idxPath.Child("securityContext"))...)
3005	}
3006
3007	if isInitContainers {
3008		// check initContainers one by one since they are running in sequential order.
3009		for _, initContainer := range containers {
3010			allErrs = append(allErrs, checkHostPortConflicts([]core.Container{initContainer}, fldPath)...)
3011		}
3012	} else {
3013		// Check for colliding ports across all containers.
3014		allErrs = append(allErrs, checkHostPortConflicts(containers, fldPath)...)
3015	}
3016
3017	return allErrs
3018}
3019
3020func validateRestartPolicy(restartPolicy *core.RestartPolicy, fldPath *field.Path) field.ErrorList {
3021	allErrors := field.ErrorList{}
3022	switch *restartPolicy {
3023	case core.RestartPolicyAlways, core.RestartPolicyOnFailure, core.RestartPolicyNever:
3024		break
3025	case "":
3026		allErrors = append(allErrors, field.Required(fldPath, ""))
3027	default:
3028		validValues := []string{string(core.RestartPolicyAlways), string(core.RestartPolicyOnFailure), string(core.RestartPolicyNever)}
3029		allErrors = append(allErrors, field.NotSupported(fldPath, *restartPolicy, validValues))
3030	}
3031
3032	return allErrors
3033}
3034
3035func ValidatePreemptionPolicy(preemptionPolicy *core.PreemptionPolicy, fldPath *field.Path) field.ErrorList {
3036	allErrors := field.ErrorList{}
3037	switch *preemptionPolicy {
3038	case core.PreemptLowerPriority, core.PreemptNever:
3039	case "":
3040		allErrors = append(allErrors, field.Required(fldPath, ""))
3041	default:
3042		validValues := []string{string(core.PreemptLowerPriority), string(core.PreemptNever)}
3043		allErrors = append(allErrors, field.NotSupported(fldPath, preemptionPolicy, validValues))
3044	}
3045	return allErrors
3046}
3047
3048func validateDNSPolicy(dnsPolicy *core.DNSPolicy, fldPath *field.Path) field.ErrorList {
3049	allErrors := field.ErrorList{}
3050	switch *dnsPolicy {
3051	case core.DNSClusterFirstWithHostNet, core.DNSClusterFirst, core.DNSDefault, core.DNSNone:
3052	case "":
3053		allErrors = append(allErrors, field.Required(fldPath, ""))
3054	default:
3055		validValues := []string{string(core.DNSClusterFirstWithHostNet), string(core.DNSClusterFirst), string(core.DNSDefault), string(core.DNSNone)}
3056		allErrors = append(allErrors, field.NotSupported(fldPath, dnsPolicy, validValues))
3057	}
3058	return allErrors
3059}
3060
3061var validFSGroupChangePolicies = sets.NewString(string(core.FSGroupChangeOnRootMismatch), string(core.FSGroupChangeAlways))
3062
3063func validateFSGroupChangePolicy(fsGroupPolicy *core.PodFSGroupChangePolicy, fldPath *field.Path) field.ErrorList {
3064	allErrors := field.ErrorList{}
3065	if !validFSGroupChangePolicies.Has(string(*fsGroupPolicy)) {
3066		allErrors = append(allErrors, field.NotSupported(fldPath, fsGroupPolicy, validFSGroupChangePolicies.List()))
3067	}
3068	return allErrors
3069}
3070
3071const (
3072	// Limits on various DNS parameters. These are derived from
3073	// restrictions in Linux libc name resolution handling.
3074	// Max number of DNS name servers.
3075	MaxDNSNameservers = 3
3076	// Expanded max number of domains in the search path list.
3077	MaxDNSSearchPathsExpanded = 32
3078	// Expanded max number of characters in the search path.
3079	MaxDNSSearchListCharsExpanded = 2048
3080	// Max number of domains in the search path list.
3081	MaxDNSSearchPathsLegacy = 6
3082	// Max number of characters in the search path list.
3083	MaxDNSSearchListCharsLegacy = 256
3084)
3085
3086func validateReadinessGates(readinessGates []core.PodReadinessGate, fldPath *field.Path) field.ErrorList {
3087	allErrs := field.ErrorList{}
3088	for i, value := range readinessGates {
3089		for _, msg := range validation.IsQualifiedName(string(value.ConditionType)) {
3090			allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("conditionType"), string(value.ConditionType), msg))
3091		}
3092	}
3093	return allErrs
3094}
3095
3096func validatePodDNSConfig(dnsConfig *core.PodDNSConfig, dnsPolicy *core.DNSPolicy, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
3097	allErrs := field.ErrorList{}
3098
3099	// Validate DNSNone case. Must provide at least one DNS name server.
3100	if dnsPolicy != nil && *dnsPolicy == core.DNSNone {
3101		if dnsConfig == nil {
3102			return append(allErrs, field.Required(fldPath, fmt.Sprintf("must provide `dnsConfig` when `dnsPolicy` is %s", core.DNSNone)))
3103		}
3104		if len(dnsConfig.Nameservers) == 0 {
3105			return append(allErrs, field.Required(fldPath.Child("nameservers"), fmt.Sprintf("must provide at least one DNS nameserver when `dnsPolicy` is %s", core.DNSNone)))
3106		}
3107	}
3108
3109	if dnsConfig != nil {
3110		// Validate nameservers.
3111		if len(dnsConfig.Nameservers) > MaxDNSNameservers {
3112			allErrs = append(allErrs, field.Invalid(fldPath.Child("nameservers"), dnsConfig.Nameservers, fmt.Sprintf("must not have more than %v nameservers", MaxDNSNameservers)))
3113		}
3114		for i, ns := range dnsConfig.Nameservers {
3115			if ip := net.ParseIP(ns); ip == nil {
3116				allErrs = append(allErrs, field.Invalid(fldPath.Child("nameservers").Index(i), ns, "must be valid IP address"))
3117			}
3118		}
3119		// Validate searches.
3120		maxDNSSearchPaths, maxDNSSearchListChars := MaxDNSSearchPathsLegacy, MaxDNSSearchListCharsLegacy
3121		if opts.AllowExpandedDNSConfig {
3122			maxDNSSearchPaths, maxDNSSearchListChars = MaxDNSSearchPathsExpanded, MaxDNSSearchListCharsExpanded
3123		}
3124		if len(dnsConfig.Searches) > maxDNSSearchPaths {
3125			allErrs = append(allErrs, field.Invalid(fldPath.Child("searches"), dnsConfig.Searches, fmt.Sprintf("must not have more than %v search paths", maxDNSSearchPaths)))
3126		}
3127		// Include the space between search paths.
3128		if len(strings.Join(dnsConfig.Searches, " ")) > maxDNSSearchListChars {
3129			allErrs = append(allErrs, field.Invalid(fldPath.Child("searches"), dnsConfig.Searches, fmt.Sprintf("must not have more than %v characters (including spaces) in the search list", maxDNSSearchListChars)))
3130		}
3131		for i, search := range dnsConfig.Searches {
3132			// it is fine to have a trailing dot
3133			if strings.HasSuffix(search, ".") {
3134				search = search[0 : len(search)-1]
3135			}
3136			allErrs = append(allErrs, ValidateDNS1123Subdomain(search, fldPath.Child("searches").Index(i))...)
3137		}
3138		// Validate options.
3139		for i, option := range dnsConfig.Options {
3140			if len(option.Name) == 0 {
3141				allErrs = append(allErrs, field.Required(fldPath.Child("options").Index(i), "must not be empty"))
3142			}
3143		}
3144	}
3145	return allErrs
3146}
3147
3148func validateHostNetwork(hostNetwork bool, containers []core.Container, fldPath *field.Path) field.ErrorList {
3149	allErrors := field.ErrorList{}
3150	if hostNetwork {
3151		for i, container := range containers {
3152			portsPath := fldPath.Index(i).Child("ports")
3153			for i, port := range container.Ports {
3154				idxPath := portsPath.Index(i)
3155				if port.HostPort != port.ContainerPort {
3156					allErrors = append(allErrors, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, "must match `hostPort` when `hostNetwork` is true"))
3157				}
3158			}
3159		}
3160	}
3161	return allErrors
3162}
3163
3164// validateImagePullSecrets checks to make sure the pull secrets are well
3165// formed.  Right now, we only expect name to be set (it's the only field).  If
3166// this ever changes and someone decides to set those fields, we'd like to
3167// know.
3168func validateImagePullSecrets(imagePullSecrets []core.LocalObjectReference, fldPath *field.Path) field.ErrorList {
3169	allErrors := field.ErrorList{}
3170	for i, currPullSecret := range imagePullSecrets {
3171		idxPath := fldPath.Index(i)
3172		strippedRef := core.LocalObjectReference{Name: currPullSecret.Name}
3173		if !reflect.DeepEqual(strippedRef, currPullSecret) {
3174			allErrors = append(allErrors, field.Invalid(idxPath, currPullSecret, "only name may be set"))
3175		}
3176	}
3177	return allErrors
3178}
3179
3180// validateAffinity checks if given affinities are valid
3181func validateAffinity(affinity *core.Affinity, fldPath *field.Path) field.ErrorList {
3182	allErrs := field.ErrorList{}
3183
3184	if affinity != nil {
3185		if affinity.NodeAffinity != nil {
3186			allErrs = append(allErrs, validateNodeAffinity(affinity.NodeAffinity, fldPath.Child("nodeAffinity"))...)
3187		}
3188		if affinity.PodAffinity != nil {
3189			allErrs = append(allErrs, validatePodAffinity(affinity.PodAffinity, fldPath.Child("podAffinity"))...)
3190		}
3191		if affinity.PodAntiAffinity != nil {
3192			allErrs = append(allErrs, validatePodAntiAffinity(affinity.PodAntiAffinity, fldPath.Child("podAntiAffinity"))...)
3193		}
3194	}
3195
3196	return allErrs
3197}
3198
3199func validateTaintEffect(effect *core.TaintEffect, allowEmpty bool, fldPath *field.Path) field.ErrorList {
3200	if !allowEmpty && len(*effect) == 0 {
3201		return field.ErrorList{field.Required(fldPath, "")}
3202	}
3203
3204	allErrors := field.ErrorList{}
3205	switch *effect {
3206	// TODO: Replace next line with subsequent commented-out line when implement TaintEffectNoScheduleNoAdmit.
3207	case core.TaintEffectNoSchedule, core.TaintEffectPreferNoSchedule, core.TaintEffectNoExecute:
3208		// case core.TaintEffectNoSchedule, core.TaintEffectPreferNoSchedule, core.TaintEffectNoScheduleNoAdmit, core.TaintEffectNoExecute:
3209	default:
3210		validValues := []string{
3211			string(core.TaintEffectNoSchedule),
3212			string(core.TaintEffectPreferNoSchedule),
3213			string(core.TaintEffectNoExecute),
3214			// TODO: Uncomment this block when implement TaintEffectNoScheduleNoAdmit.
3215			// string(core.TaintEffectNoScheduleNoAdmit),
3216		}
3217		allErrors = append(allErrors, field.NotSupported(fldPath, *effect, validValues))
3218	}
3219	return allErrors
3220}
3221
3222// validateOnlyAddedTolerations validates updated pod tolerations.
3223func validateOnlyAddedTolerations(newTolerations []core.Toleration, oldTolerations []core.Toleration, fldPath *field.Path) field.ErrorList {
3224	allErrs := field.ErrorList{}
3225	for _, old := range oldTolerations {
3226		found := false
3227		oldTolerationClone := old.DeepCopy()
3228		for _, newToleration := range newTolerations {
3229			// assign to our clone before doing a deep equal so we can allow tolerationseconds to change.
3230			oldTolerationClone.TolerationSeconds = newToleration.TolerationSeconds // +k8s:verify-mutation:reason=clone
3231			if reflect.DeepEqual(*oldTolerationClone, newToleration) {
3232				found = true
3233				break
3234			}
3235		}
3236		if !found {
3237			allErrs = append(allErrs, field.Forbidden(fldPath, "existing toleration can not be modified except its tolerationSeconds"))
3238			return allErrs
3239		}
3240	}
3241
3242	allErrs = append(allErrs, ValidateTolerations(newTolerations, fldPath)...)
3243	return allErrs
3244}
3245
3246func ValidateHostAliases(hostAliases []core.HostAlias, fldPath *field.Path) field.ErrorList {
3247	allErrs := field.ErrorList{}
3248	for _, hostAlias := range hostAliases {
3249		if ip := net.ParseIP(hostAlias.IP); ip == nil {
3250			allErrs = append(allErrs, field.Invalid(fldPath.Child("ip"), hostAlias.IP, "must be valid IP address"))
3251		}
3252		for _, hostname := range hostAlias.Hostnames {
3253			allErrs = append(allErrs, ValidateDNS1123Subdomain(hostname, fldPath.Child("hostnames"))...)
3254		}
3255	}
3256	return allErrs
3257}
3258
3259// ValidateTolerations tests if given tolerations have valid data.
3260func ValidateTolerations(tolerations []core.Toleration, fldPath *field.Path) field.ErrorList {
3261	allErrors := field.ErrorList{}
3262	for i, toleration := range tolerations {
3263		idxPath := fldPath.Index(i)
3264		// validate the toleration key
3265		if len(toleration.Key) > 0 {
3266			allErrors = append(allErrors, unversionedvalidation.ValidateLabelName(toleration.Key, idxPath.Child("key"))...)
3267		}
3268
3269		// empty toleration key with Exists operator and empty value means match all taints
3270		if len(toleration.Key) == 0 && toleration.Operator != core.TolerationOpExists {
3271			allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Operator,
3272				"operator must be Exists when `key` is empty, which means \"match all values and all keys\""))
3273		}
3274
3275		if toleration.TolerationSeconds != nil && toleration.Effect != core.TaintEffectNoExecute {
3276			allErrors = append(allErrors, field.Invalid(idxPath.Child("effect"), toleration.Effect,
3277				"effect must be 'NoExecute' when `tolerationSeconds` is set"))
3278		}
3279
3280		// validate toleration operator and value
3281		switch toleration.Operator {
3282		// empty operator means Equal
3283		case core.TolerationOpEqual, "":
3284			if errs := validation.IsValidLabelValue(toleration.Value); len(errs) != 0 {
3285				allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Value, strings.Join(errs, ";")))
3286			}
3287		case core.TolerationOpExists:
3288			if len(toleration.Value) > 0 {
3289				allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration, "value must be empty when `operator` is 'Exists'"))
3290			}
3291		default:
3292			validValues := []string{string(core.TolerationOpEqual), string(core.TolerationOpExists)}
3293			allErrors = append(allErrors, field.NotSupported(idxPath.Child("operator"), toleration.Operator, validValues))
3294		}
3295
3296		// validate toleration effect, empty toleration effect means match all taint effects
3297		if len(toleration.Effect) > 0 {
3298			allErrors = append(allErrors, validateTaintEffect(&toleration.Effect, true, idxPath.Child("effect"))...)
3299		}
3300	}
3301	return allErrors
3302}
3303
3304// validateContainersOnlyForPod does additional validation for containers on a pod versus a pod template
3305// it only does additive validation of fields not covered in validateContainers
3306func validateContainersOnlyForPod(containers []core.Container, fldPath *field.Path) field.ErrorList {
3307	allErrs := field.ErrorList{}
3308	for i, ctr := range containers {
3309		idxPath := fldPath.Index(i)
3310		if len(ctr.Image) != len(strings.TrimSpace(ctr.Image)) {
3311			allErrs = append(allErrs, field.Invalid(idxPath.Child("image"), ctr.Image, "must not have leading or trailing whitespace"))
3312		}
3313	}
3314	return allErrs
3315}
3316
3317// PodValidationOptions contains the different settings for pod validation
3318type PodValidationOptions struct {
3319	// Allow pod spec to use hugepages in downward API
3320	AllowDownwardAPIHugePages bool
3321	// Allow invalid pod-deletion-cost annotation value for backward compatibility.
3322	AllowInvalidPodDeletionCost bool
3323	// Allow pod spec to use non-integer multiple of huge page unit size
3324	AllowIndivisibleHugePagesValues bool
3325	// Allow hostProcess field to be set in windows security context
3326	AllowWindowsHostProcessField bool
3327	// Allow more DNSSearchPaths and longer DNSSearchListChars
3328	AllowExpandedDNSConfig bool
3329}
3330
3331// validatePodMetadataAndSpec tests if required fields in the pod.metadata and pod.spec are set,
3332// and is called by ValidatePodCreate and ValidatePodUpdate.
3333func validatePodMetadataAndSpec(pod *core.Pod, opts PodValidationOptions) field.ErrorList {
3334	fldPath := field.NewPath("metadata")
3335	allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, fldPath)
3336	allErrs = append(allErrs, ValidatePodSpecificAnnotations(pod.ObjectMeta.Annotations, &pod.Spec, fldPath.Child("annotations"), opts)...)
3337	allErrs = append(allErrs, ValidatePodSpec(&pod.Spec, &pod.ObjectMeta, field.NewPath("spec"), opts)...)
3338
3339	// we do additional validation only pertinent for pods and not pod templates
3340	// this was done to preserve backwards compatibility
3341	specPath := field.NewPath("spec")
3342
3343	if pod.Spec.ServiceAccountName == "" {
3344		for vi, volume := range pod.Spec.Volumes {
3345			path := specPath.Child("volumes").Index(vi).Child("projected")
3346			if volume.Projected != nil {
3347				for si, source := range volume.Projected.Sources {
3348					saPath := path.Child("sources").Index(si).Child("serviceAccountToken")
3349					if source.ServiceAccountToken != nil {
3350						allErrs = append(allErrs, field.Forbidden(saPath, "must not be specified when serviceAccountName is not set"))
3351					}
3352				}
3353			}
3354		}
3355	}
3356
3357	allErrs = append(allErrs, validateContainersOnlyForPod(pod.Spec.Containers, specPath.Child("containers"))...)
3358	allErrs = append(allErrs, validateContainersOnlyForPod(pod.Spec.InitContainers, specPath.Child("initContainers"))...)
3359
3360	return allErrs
3361}
3362
3363// validatePodIPs validates IPs in pod status
3364func validatePodIPs(pod *core.Pod) field.ErrorList {
3365	allErrs := field.ErrorList{}
3366
3367	podIPsField := field.NewPath("status", "podIPs")
3368
3369	// all PodIPs must be valid IPs
3370	for i, podIP := range pod.Status.PodIPs {
3371		for _, msg := range validation.IsValidIP(podIP.IP) {
3372			allErrs = append(allErrs, field.Invalid(podIPsField.Index(i), podIP.IP, msg))
3373		}
3374	}
3375
3376	// if we have more than one Pod.PodIP then
3377	// - validate for dual stack
3378	// - validate for duplication
3379	if len(pod.Status.PodIPs) > 1 {
3380		podIPs := make([]string, 0, len(pod.Status.PodIPs))
3381		for _, podIP := range pod.Status.PodIPs {
3382			podIPs = append(podIPs, podIP.IP)
3383		}
3384
3385		dualStack, err := netutils.IsDualStackIPStrings(podIPs)
3386		if err != nil {
3387			allErrs = append(allErrs, field.InternalError(podIPsField, fmt.Errorf("failed to check for dual stack with error:%v", err)))
3388		}
3389
3390		// We only support one from each IP family (i.e. max two IPs in this list).
3391		if !dualStack || len(podIPs) > 2 {
3392			allErrs = append(allErrs, field.Invalid(podIPsField, pod.Status.PodIPs, "may specify no more than one IP for each IP family"))
3393		}
3394
3395		// There should be no duplicates in list of Pod.PodIPs
3396		seen := sets.String{} //:= make(map[string]int)
3397		for i, podIP := range pod.Status.PodIPs {
3398			if seen.Has(podIP.IP) {
3399				allErrs = append(allErrs, field.Duplicate(podIPsField.Index(i), podIP))
3400			}
3401			seen.Insert(podIP.IP)
3402		}
3403	}
3404
3405	return allErrs
3406}
3407
3408// ValidatePodSpec tests that the specified PodSpec has valid data.
3409// This includes checking formatting and uniqueness.  It also canonicalizes the
3410// structure by setting default values and implementing any backwards-compatibility
3411// tricks.
3412// The pod metadata is needed to validate generic ephemeral volumes. It is optional
3413// and should be left empty unless the spec is from a real pod object.
3414func ValidatePodSpec(spec *core.PodSpec, podMeta *metav1.ObjectMeta, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
3415	allErrs := field.ErrorList{}
3416
3417	vols, vErrs := ValidateVolumes(spec.Volumes, podMeta, fldPath.Child("volumes"), opts)
3418	allErrs = append(allErrs, vErrs...)
3419	allErrs = append(allErrs, validateContainers(spec.Containers, false, vols, fldPath.Child("containers"), opts)...)
3420	allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, vols, fldPath.Child("initContainers"), opts)...)
3421	allErrs = append(allErrs, validateEphemeralContainers(spec.EphemeralContainers, spec.Containers, spec.InitContainers, vols, fldPath.Child("ephemeralContainers"), opts)...)
3422	allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...)
3423	allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...)
3424	allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...)
3425	allErrs = append(allErrs, ValidatePodSecurityContext(spec.SecurityContext, spec, fldPath, fldPath.Child("securityContext"))...)
3426	allErrs = append(allErrs, validateImagePullSecrets(spec.ImagePullSecrets, fldPath.Child("imagePullSecrets"))...)
3427	allErrs = append(allErrs, validateAffinity(spec.Affinity, fldPath.Child("affinity"))...)
3428	allErrs = append(allErrs, validatePodDNSConfig(spec.DNSConfig, &spec.DNSPolicy, fldPath.Child("dnsConfig"), opts)...)
3429	allErrs = append(allErrs, validateReadinessGates(spec.ReadinessGates, fldPath.Child("readinessGates"))...)
3430	allErrs = append(allErrs, validateTopologySpreadConstraints(spec.TopologySpreadConstraints, fldPath.Child("topologySpreadConstraints"))...)
3431	allErrs = append(allErrs, validateWindowsHostProcessPod(spec, fldPath, opts)...)
3432	if len(spec.ServiceAccountName) > 0 {
3433		for _, msg := range ValidateServiceAccountName(spec.ServiceAccountName, false) {
3434			allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceAccountName"), spec.ServiceAccountName, msg))
3435		}
3436	}
3437
3438	if len(spec.NodeName) > 0 {
3439		for _, msg := range ValidateNodeName(spec.NodeName, false) {
3440			allErrs = append(allErrs, field.Invalid(fldPath.Child("nodeName"), spec.NodeName, msg))
3441		}
3442	}
3443
3444	if spec.ActiveDeadlineSeconds != nil {
3445		value := *spec.ActiveDeadlineSeconds
3446		if value < 1 || value > math.MaxInt32 {
3447			allErrs = append(allErrs, field.Invalid(fldPath.Child("activeDeadlineSeconds"), value, validation.InclusiveRangeError(1, math.MaxInt32)))
3448		}
3449	}
3450
3451	if len(spec.Hostname) > 0 {
3452		allErrs = append(allErrs, ValidateDNS1123Label(spec.Hostname, fldPath.Child("hostname"))...)
3453	}
3454
3455	if len(spec.Subdomain) > 0 {
3456		allErrs = append(allErrs, ValidateDNS1123Label(spec.Subdomain, fldPath.Child("subdomain"))...)
3457	}
3458
3459	if len(spec.Tolerations) > 0 {
3460		allErrs = append(allErrs, ValidateTolerations(spec.Tolerations, fldPath.Child("tolerations"))...)
3461	}
3462
3463	if len(spec.HostAliases) > 0 {
3464		allErrs = append(allErrs, ValidateHostAliases(spec.HostAliases, fldPath.Child("hostAliases"))...)
3465	}
3466
3467	if len(spec.PriorityClassName) > 0 {
3468		for _, msg := range ValidatePriorityClassName(spec.PriorityClassName, false) {
3469			allErrs = append(allErrs, field.Invalid(fldPath.Child("priorityClassName"), spec.PriorityClassName, msg))
3470		}
3471	}
3472
3473	if spec.RuntimeClassName != nil {
3474		allErrs = append(allErrs, ValidateRuntimeClassName(*spec.RuntimeClassName, fldPath.Child("runtimeClassName"))...)
3475	}
3476
3477	if spec.PreemptionPolicy != nil {
3478		allErrs = append(allErrs, ValidatePreemptionPolicy(spec.PreemptionPolicy, fldPath.Child("preemptionPolicy"))...)
3479	}
3480
3481	if spec.Overhead != nil {
3482		allErrs = append(allErrs, validateOverhead(spec.Overhead, fldPath.Child("overhead"), opts)...)
3483	}
3484
3485	return allErrs
3486}
3487
3488// ValidateNodeSelectorRequirement tests that the specified NodeSelectorRequirement fields has valid data
3489func ValidateNodeSelectorRequirement(rq core.NodeSelectorRequirement, fldPath *field.Path) field.ErrorList {
3490	allErrs := field.ErrorList{}
3491	switch rq.Operator {
3492	case core.NodeSelectorOpIn, core.NodeSelectorOpNotIn:
3493		if len(rq.Values) == 0 {
3494			allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'"))
3495		}
3496	case core.NodeSelectorOpExists, core.NodeSelectorOpDoesNotExist:
3497		if len(rq.Values) > 0 {
3498			allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'"))
3499		}
3500
3501	case core.NodeSelectorOpGt, core.NodeSelectorOpLt:
3502		if len(rq.Values) != 1 {
3503			allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified single value when `operator` is 'Lt' or 'Gt'"))
3504		}
3505	default:
3506		allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), rq.Operator, "not a valid selector operator"))
3507	}
3508
3509	allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(rq.Key, fldPath.Child("key"))...)
3510
3511	return allErrs
3512}
3513
3514var nodeFieldSelectorValidators = map[string]func(string, bool) []string{
3515	metav1.ObjectNameField: ValidateNodeName,
3516}
3517
3518// ValidateNodeFieldSelectorRequirement tests that the specified NodeSelectorRequirement fields has valid data
3519func ValidateNodeFieldSelectorRequirement(req core.NodeSelectorRequirement, fldPath *field.Path) field.ErrorList {
3520	allErrs := field.ErrorList{}
3521
3522	switch req.Operator {
3523	case core.NodeSelectorOpIn, core.NodeSelectorOpNotIn:
3524		if len(req.Values) != 1 {
3525			allErrs = append(allErrs, field.Required(fldPath.Child("values"),
3526				"must be only one value when `operator` is 'In' or 'NotIn' for node field selector"))
3527		}
3528	default:
3529		allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), req.Operator, "not a valid selector operator"))
3530	}
3531
3532	if vf, found := nodeFieldSelectorValidators[req.Key]; !found {
3533		allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), req.Key, "not a valid field selector key"))
3534	} else {
3535		for i, v := range req.Values {
3536			for _, msg := range vf(v, false) {
3537				allErrs = append(allErrs, field.Invalid(fldPath.Child("values").Index(i), v, msg))
3538			}
3539		}
3540	}
3541
3542	return allErrs
3543}
3544
3545// ValidateNodeSelectorTerm tests that the specified node selector term has valid data
3546func ValidateNodeSelectorTerm(term core.NodeSelectorTerm, fldPath *field.Path) field.ErrorList {
3547	allErrs := field.ErrorList{}
3548
3549	for j, req := range term.MatchExpressions {
3550		allErrs = append(allErrs, ValidateNodeSelectorRequirement(req, fldPath.Child("matchExpressions").Index(j))...)
3551	}
3552
3553	for j, req := range term.MatchFields {
3554		allErrs = append(allErrs, ValidateNodeFieldSelectorRequirement(req, fldPath.Child("matchFields").Index(j))...)
3555	}
3556
3557	return allErrs
3558}
3559
3560// ValidateNodeSelector tests that the specified nodeSelector fields has valid data
3561func ValidateNodeSelector(nodeSelector *core.NodeSelector, fldPath *field.Path) field.ErrorList {
3562	allErrs := field.ErrorList{}
3563
3564	termFldPath := fldPath.Child("nodeSelectorTerms")
3565	if len(nodeSelector.NodeSelectorTerms) == 0 {
3566		return append(allErrs, field.Required(termFldPath, "must have at least one node selector term"))
3567	}
3568
3569	for i, term := range nodeSelector.NodeSelectorTerms {
3570		allErrs = append(allErrs, ValidateNodeSelectorTerm(term, termFldPath.Index(i))...)
3571	}
3572
3573	return allErrs
3574}
3575
3576// validateTopologySelectorLabelRequirement tests that the specified TopologySelectorLabelRequirement fields has valid data,
3577// and constructs a set containing all of its Values.
3578func validateTopologySelectorLabelRequirement(rq core.TopologySelectorLabelRequirement, fldPath *field.Path) (sets.String, field.ErrorList) {
3579	allErrs := field.ErrorList{}
3580	valueSet := make(sets.String)
3581	valuesPath := fldPath.Child("values")
3582	if len(rq.Values) == 0 {
3583		allErrs = append(allErrs, field.Required(valuesPath, ""))
3584	}
3585
3586	// Validate set property of Values field
3587	for i, value := range rq.Values {
3588		if valueSet.Has(value) {
3589			allErrs = append(allErrs, field.Duplicate(valuesPath.Index(i), value))
3590		}
3591		valueSet.Insert(value)
3592	}
3593
3594	allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(rq.Key, fldPath.Child("key"))...)
3595
3596	return valueSet, allErrs
3597}
3598
3599// ValidateTopologySelectorTerm tests that the specified topology selector term has valid data,
3600// and constructs a map representing the term in raw form.
3601func ValidateTopologySelectorTerm(term core.TopologySelectorTerm, fldPath *field.Path) (map[string]sets.String, field.ErrorList) {
3602	allErrs := field.ErrorList{}
3603	exprMap := make(map[string]sets.String)
3604	exprPath := fldPath.Child("matchLabelExpressions")
3605
3606	// Allow empty MatchLabelExpressions, in case this field becomes optional in the future.
3607	for i, req := range term.MatchLabelExpressions {
3608		idxPath := exprPath.Index(i)
3609		valueSet, exprErrs := validateTopologySelectorLabelRequirement(req, idxPath)
3610		allErrs = append(allErrs, exprErrs...)
3611
3612		// Validate no duplicate keys exist.
3613		if _, exists := exprMap[req.Key]; exists {
3614			allErrs = append(allErrs, field.Duplicate(idxPath.Child("key"), req.Key))
3615		}
3616		exprMap[req.Key] = valueSet
3617	}
3618
3619	return exprMap, allErrs
3620}
3621
3622// ValidateAvoidPodsInNodeAnnotations tests that the serialized AvoidPods in Node.Annotations has valid data
3623func ValidateAvoidPodsInNodeAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
3624	allErrs := field.ErrorList{}
3625
3626	v1Avoids, err := schedulinghelper.GetAvoidPodsFromNodeAnnotations(annotations)
3627	if err != nil {
3628		allErrs = append(allErrs, field.Invalid(fldPath.Child("AvoidPods"), core.PreferAvoidPodsAnnotationKey, err.Error()))
3629		return allErrs
3630	}
3631	var avoids core.AvoidPods
3632	if err := corev1.Convert_v1_AvoidPods_To_core_AvoidPods(&v1Avoids, &avoids, nil); err != nil {
3633		allErrs = append(allErrs, field.Invalid(fldPath.Child("AvoidPods"), core.PreferAvoidPodsAnnotationKey, err.Error()))
3634		return allErrs
3635	}
3636
3637	if len(avoids.PreferAvoidPods) != 0 {
3638		for i, pa := range avoids.PreferAvoidPods {
3639			idxPath := fldPath.Child(core.PreferAvoidPodsAnnotationKey).Index(i)
3640			allErrs = append(allErrs, validatePreferAvoidPodsEntry(pa, idxPath)...)
3641		}
3642	}
3643
3644	return allErrs
3645}
3646
3647// validatePreferAvoidPodsEntry tests if given PreferAvoidPodsEntry has valid data.
3648func validatePreferAvoidPodsEntry(avoidPodEntry core.PreferAvoidPodsEntry, fldPath *field.Path) field.ErrorList {
3649	allErrors := field.ErrorList{}
3650	if avoidPodEntry.PodSignature.PodController == nil {
3651		allErrors = append(allErrors, field.Required(fldPath.Child("PodSignature"), ""))
3652	} else {
3653		if !*(avoidPodEntry.PodSignature.PodController.Controller) {
3654			allErrors = append(allErrors,
3655				field.Invalid(fldPath.Child("PodSignature").Child("PodController").Child("Controller"),
3656					*(avoidPodEntry.PodSignature.PodController.Controller), "must point to a controller"))
3657		}
3658	}
3659	return allErrors
3660}
3661
3662// ValidatePreferredSchedulingTerms tests that the specified SoftNodeAffinity fields has valid data
3663func ValidatePreferredSchedulingTerms(terms []core.PreferredSchedulingTerm, fldPath *field.Path) field.ErrorList {
3664	allErrs := field.ErrorList{}
3665
3666	for i, term := range terms {
3667		if term.Weight <= 0 || term.Weight > 100 {
3668			allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("weight"), term.Weight, "must be in the range 1-100"))
3669		}
3670
3671		allErrs = append(allErrs, ValidateNodeSelectorTerm(term.Preference, fldPath.Index(i).Child("preference"))...)
3672	}
3673	return allErrs
3674}
3675
3676// validatePodAffinityTerm tests that the specified podAffinityTerm fields have valid data
3677func validatePodAffinityTerm(podAffinityTerm core.PodAffinityTerm, fldPath *field.Path) field.ErrorList {
3678	allErrs := field.ErrorList{}
3679
3680	allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.LabelSelector, fldPath.Child("labelSelector"))...)
3681	allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.NamespaceSelector, fldPath.Child("namespaceSelector"))...)
3682
3683	for _, name := range podAffinityTerm.Namespaces {
3684		for _, msg := range ValidateNamespaceName(name, false) {
3685			allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), name, msg))
3686		}
3687	}
3688	if len(podAffinityTerm.TopologyKey) == 0 {
3689		allErrs = append(allErrs, field.Required(fldPath.Child("topologyKey"), "can not be empty"))
3690	}
3691	return append(allErrs, unversionedvalidation.ValidateLabelName(podAffinityTerm.TopologyKey, fldPath.Child("topologyKey"))...)
3692}
3693
3694// validatePodAffinityTerms tests that the specified podAffinityTerms fields have valid data
3695func validatePodAffinityTerms(podAffinityTerms []core.PodAffinityTerm, fldPath *field.Path) field.ErrorList {
3696	allErrs := field.ErrorList{}
3697	for i, podAffinityTerm := range podAffinityTerms {
3698		allErrs = append(allErrs, validatePodAffinityTerm(podAffinityTerm, fldPath.Index(i))...)
3699	}
3700	return allErrs
3701}
3702
3703// validateWeightedPodAffinityTerms tests that the specified weightedPodAffinityTerms fields have valid data
3704func validateWeightedPodAffinityTerms(weightedPodAffinityTerms []core.WeightedPodAffinityTerm, fldPath *field.Path) field.ErrorList {
3705	allErrs := field.ErrorList{}
3706	for j, weightedTerm := range weightedPodAffinityTerms {
3707		if weightedTerm.Weight <= 0 || weightedTerm.Weight > 100 {
3708			allErrs = append(allErrs, field.Invalid(fldPath.Index(j).Child("weight"), weightedTerm.Weight, "must be in the range 1-100"))
3709		}
3710		allErrs = append(allErrs, validatePodAffinityTerm(weightedTerm.PodAffinityTerm, fldPath.Index(j).Child("podAffinityTerm"))...)
3711	}
3712	return allErrs
3713}
3714
3715// validatePodAntiAffinity tests that the specified podAntiAffinity fields have valid data
3716func validatePodAntiAffinity(podAntiAffinity *core.PodAntiAffinity, fldPath *field.Path) field.ErrorList {
3717	allErrs := field.ErrorList{}
3718	// TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented.
3719	// if podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil {
3720	//	allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution, false,
3721	//		fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
3722	//}
3723	if podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
3724		allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution,
3725			fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
3726	}
3727	if podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil {
3728		allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution,
3729			fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
3730	}
3731	return allErrs
3732}
3733
3734// validateNodeAffinity tests that the specified nodeAffinity fields have valid data
3735func validateNodeAffinity(na *core.NodeAffinity, fldPath *field.Path) field.ErrorList {
3736	allErrs := field.ErrorList{}
3737	// TODO: Uncomment the next three lines once RequiredDuringSchedulingRequiredDuringExecution is implemented.
3738	// if na.RequiredDuringSchedulingRequiredDuringExecution != nil {
3739	//	allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
3740	// }
3741	if na.RequiredDuringSchedulingIgnoredDuringExecution != nil {
3742		allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
3743	}
3744	if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
3745		allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
3746	}
3747	return allErrs
3748}
3749
3750// validatePodAffinity tests that the specified podAffinity fields have valid data
3751func validatePodAffinity(podAffinity *core.PodAffinity, fldPath *field.Path) field.ErrorList {
3752	allErrs := field.ErrorList{}
3753	// TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented.
3754	// if podAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil {
3755	//	allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingRequiredDuringExecution, false,
3756	//		fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
3757	//}
3758	if podAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
3759		allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution,
3760			fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
3761	}
3762	if podAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil {
3763		allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAffinity.PreferredDuringSchedulingIgnoredDuringExecution,
3764			fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
3765	}
3766	return allErrs
3767}
3768
3769func validateSeccompProfileField(sp *core.SeccompProfile, fldPath *field.Path) field.ErrorList {
3770	allErrs := field.ErrorList{}
3771	if sp == nil {
3772		return allErrs
3773	}
3774
3775	if err := validateSeccompProfileType(fldPath.Child("type"), sp.Type); err != nil {
3776		allErrs = append(allErrs, err)
3777	}
3778
3779	if sp.Type == core.SeccompProfileTypeLocalhost {
3780		if sp.LocalhostProfile == nil {
3781			allErrs = append(allErrs, field.Required(fldPath.Child("localhostProfile"), "must be set when seccomp type is Localhost"))
3782		} else {
3783			allErrs = append(allErrs, validateLocalDescendingPath(*sp.LocalhostProfile, fldPath.Child("localhostProfile"))...)
3784		}
3785	} else {
3786		if sp.LocalhostProfile != nil {
3787			allErrs = append(allErrs, field.Invalid(fldPath.Child("localhostProfile"), sp, "can only be set when seccomp type is Localhost"))
3788		}
3789	}
3790
3791	return allErrs
3792}
3793
3794func ValidateSeccompProfile(p string, fldPath *field.Path) field.ErrorList {
3795	if p == core.SeccompProfileRuntimeDefault || p == core.DeprecatedSeccompProfileDockerDefault {
3796		return nil
3797	}
3798	if p == v1.SeccompProfileNameUnconfined {
3799		return nil
3800	}
3801	if strings.HasPrefix(p, v1.SeccompLocalhostProfileNamePrefix) {
3802		return validateLocalDescendingPath(strings.TrimPrefix(p, v1.SeccompLocalhostProfileNamePrefix), fldPath)
3803	}
3804	return field.ErrorList{field.Invalid(fldPath, p, "must be a valid seccomp profile")}
3805}
3806
3807func ValidateSeccompPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
3808	allErrs := field.ErrorList{}
3809	if p, exists := annotations[core.SeccompPodAnnotationKey]; exists {
3810		allErrs = append(allErrs, ValidateSeccompProfile(p, fldPath.Child(core.SeccompPodAnnotationKey))...)
3811	}
3812	for k, p := range annotations {
3813		if strings.HasPrefix(k, core.SeccompContainerAnnotationKeyPrefix) {
3814			allErrs = append(allErrs, ValidateSeccompProfile(p, fldPath.Child(k))...)
3815		}
3816	}
3817
3818	return allErrs
3819}
3820
3821// ValidateSeccompProfileType tests that the argument is a valid SeccompProfileType.
3822func validateSeccompProfileType(fldPath *field.Path, seccompProfileType core.SeccompProfileType) *field.Error {
3823	switch seccompProfileType {
3824	case core.SeccompProfileTypeLocalhost, core.SeccompProfileTypeRuntimeDefault, core.SeccompProfileTypeUnconfined:
3825		return nil
3826	case "":
3827		return field.Required(fldPath, "type is required when seccompProfile is set")
3828	default:
3829		return field.NotSupported(fldPath, seccompProfileType, []string{string(core.SeccompProfileTypeLocalhost), string(core.SeccompProfileTypeRuntimeDefault), string(core.SeccompProfileTypeUnconfined)})
3830	}
3831}
3832
3833func ValidateAppArmorPodAnnotations(annotations map[string]string, spec *core.PodSpec, fldPath *field.Path) field.ErrorList {
3834	allErrs := field.ErrorList{}
3835	for k, p := range annotations {
3836		if !strings.HasPrefix(k, v1.AppArmorBetaContainerAnnotationKeyPrefix) {
3837			continue
3838		}
3839		containerName := strings.TrimPrefix(k, v1.AppArmorBetaContainerAnnotationKeyPrefix)
3840		if !podSpecHasContainer(spec, containerName) {
3841			allErrs = append(allErrs, field.Invalid(fldPath.Key(k), containerName, "container not found"))
3842		}
3843
3844		if err := apparmor.ValidateProfileFormat(p); err != nil {
3845			allErrs = append(allErrs, field.Invalid(fldPath.Key(k), p, err.Error()))
3846		}
3847	}
3848
3849	return allErrs
3850}
3851
3852func podSpecHasContainer(spec *core.PodSpec, containerName string) bool {
3853	var hasContainer bool
3854	podshelper.VisitContainersWithPath(spec, field.NewPath("spec"), func(c *core.Container, _ *field.Path) bool {
3855		if c.Name == containerName {
3856			hasContainer = true
3857			return false
3858		}
3859		return true
3860	})
3861	return hasContainer
3862}
3863
3864const (
3865	// a sysctl segment regex, concatenated with dots to form a sysctl name
3866	SysctlSegmentFmt string = "[a-z0-9]([-_a-z0-9]*[a-z0-9])?"
3867
3868	// a sysctl name regex
3869	SysctlFmt string = "(" + SysctlSegmentFmt + "\\.)*" + SysctlSegmentFmt
3870
3871	// the maximal length of a sysctl name
3872	SysctlMaxLength int = 253
3873)
3874
3875var sysctlRegexp = regexp.MustCompile("^" + SysctlFmt + "$")
3876
3877// IsValidSysctlName checks that the given string is a valid sysctl name,
3878// i.e. matches SysctlFmt.
3879func IsValidSysctlName(name string) bool {
3880	if len(name) > SysctlMaxLength {
3881		return false
3882	}
3883	return sysctlRegexp.MatchString(name)
3884}
3885
3886func validateSysctls(sysctls []core.Sysctl, fldPath *field.Path) field.ErrorList {
3887	allErrs := field.ErrorList{}
3888	names := make(map[string]struct{})
3889	for i, s := range sysctls {
3890		if len(s.Name) == 0 {
3891			allErrs = append(allErrs, field.Required(fldPath.Index(i).Child("name"), ""))
3892		} else if !IsValidSysctlName(s.Name) {
3893			allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("name"), s.Name, fmt.Sprintf("must have at most %d characters and match regex %s", SysctlMaxLength, SysctlFmt)))
3894		} else if _, ok := names[s.Name]; ok {
3895			allErrs = append(allErrs, field.Duplicate(fldPath.Index(i).Child("name"), s.Name))
3896		}
3897		names[s.Name] = struct{}{}
3898	}
3899	return allErrs
3900}
3901
3902// ValidatePodSecurityContext test that the specified PodSecurityContext has valid data.
3903func ValidatePodSecurityContext(securityContext *core.PodSecurityContext, spec *core.PodSpec, specPath, fldPath *field.Path) field.ErrorList {
3904	allErrs := field.ErrorList{}
3905
3906	if securityContext != nil {
3907		allErrs = append(allErrs, validateHostNetwork(securityContext.HostNetwork, spec.Containers, specPath.Child("containers"))...)
3908		if securityContext.FSGroup != nil {
3909			for _, msg := range validation.IsValidGroupID(*securityContext.FSGroup) {
3910				allErrs = append(allErrs, field.Invalid(fldPath.Child("fsGroup"), *(securityContext.FSGroup), msg))
3911			}
3912		}
3913		if securityContext.RunAsUser != nil {
3914			for _, msg := range validation.IsValidUserID(*securityContext.RunAsUser) {
3915				allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *(securityContext.RunAsUser), msg))
3916			}
3917		}
3918		if securityContext.RunAsGroup != nil {
3919			for _, msg := range validation.IsValidGroupID(*securityContext.RunAsGroup) {
3920				allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsGroup"), *(securityContext.RunAsGroup), msg))
3921			}
3922		}
3923		for g, gid := range securityContext.SupplementalGroups {
3924			for _, msg := range validation.IsValidGroupID(gid) {
3925				allErrs = append(allErrs, field.Invalid(fldPath.Child("supplementalGroups").Index(g), gid, msg))
3926			}
3927		}
3928		if securityContext.ShareProcessNamespace != nil && securityContext.HostPID && *securityContext.ShareProcessNamespace {
3929			allErrs = append(allErrs, field.Invalid(fldPath.Child("shareProcessNamespace"), *securityContext.ShareProcessNamespace, "ShareProcessNamespace and HostPID cannot both be enabled"))
3930		}
3931
3932		if len(securityContext.Sysctls) != 0 {
3933			allErrs = append(allErrs, validateSysctls(securityContext.Sysctls, fldPath.Child("sysctls"))...)
3934		}
3935
3936		if securityContext.FSGroupChangePolicy != nil {
3937			allErrs = append(allErrs, validateFSGroupChangePolicy(securityContext.FSGroupChangePolicy, fldPath.Child("fsGroupChangePolicy"))...)
3938		}
3939
3940		allErrs = append(allErrs, validateSeccompProfileField(securityContext.SeccompProfile, fldPath.Child("seccompProfile"))...)
3941		allErrs = append(allErrs, validateWindowsSecurityContextOptions(securityContext.WindowsOptions, fldPath.Child("windowsOptions"))...)
3942	}
3943
3944	return allErrs
3945}
3946
3947func ValidateContainerUpdates(newContainers, oldContainers []core.Container, fldPath *field.Path) (allErrs field.ErrorList, stop bool) {
3948	allErrs = field.ErrorList{}
3949	if len(newContainers) != len(oldContainers) {
3950		//TODO: Pinpoint the specific container that causes the invalid error after we have strategic merge diff
3951		allErrs = append(allErrs, field.Forbidden(fldPath, "pod updates may not add or remove containers"))
3952		return allErrs, true
3953	}
3954
3955	// validate updated container images
3956	for i, ctr := range newContainers {
3957		if len(ctr.Image) == 0 {
3958			allErrs = append(allErrs, field.Required(fldPath.Index(i).Child("image"), ""))
3959		}
3960		// this is only called from ValidatePodUpdate so its safe to check leading/trailing whitespace.
3961		if len(strings.TrimSpace(ctr.Image)) != len(ctr.Image) {
3962			allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("image"), ctr.Image, "must not have leading or trailing whitespace"))
3963		}
3964	}
3965	return allErrs, false
3966}
3967
3968// ValidatePodCreate validates a pod in the context of its initial create
3969func ValidatePodCreate(pod *core.Pod, opts PodValidationOptions) field.ErrorList {
3970	allErrs := validatePodMetadataAndSpec(pod, opts)
3971
3972	fldPath := field.NewPath("spec")
3973	// EphemeralContainers can only be set on update using the ephemeralcontainers subresource
3974	if len(pod.Spec.EphemeralContainers) > 0 {
3975		allErrs = append(allErrs, field.Forbidden(fldPath.Child("ephemeralContainers"), "cannot be set on create"))
3976	}
3977	allErrs = append(allErrs, validateSeccompAnnotationsAndFields(pod.ObjectMeta, &pod.Spec, fldPath)...)
3978
3979	return allErrs
3980}
3981
3982// ValidateSeccompAnnotationsAndFields iterates through all containers and ensure that when both seccompProfile and seccomp annotations exist they match.
3983func validateSeccompAnnotationsAndFields(objectMeta metav1.ObjectMeta, podSpec *core.PodSpec, specPath *field.Path) field.ErrorList {
3984	allErrs := field.ErrorList{}
3985
3986	if podSpec.SecurityContext != nil && podSpec.SecurityContext.SeccompProfile != nil {
3987		// If both seccomp annotations and fields are specified, the values must match.
3988		if annotation, found := objectMeta.Annotations[v1.SeccompPodAnnotationKey]; found {
3989			seccompPath := specPath.Child("securityContext").Child("seccompProfile")
3990			err := validateSeccompAnnotationsAndFieldsMatch(annotation, podSpec.SecurityContext.SeccompProfile, seccompPath)
3991			if err != nil {
3992				allErrs = append(allErrs, err)
3993			}
3994		}
3995	}
3996
3997	podshelper.VisitContainersWithPath(podSpec, specPath, func(c *core.Container, cFldPath *field.Path) bool {
3998		var field *core.SeccompProfile
3999		if c.SecurityContext != nil {
4000			field = c.SecurityContext.SeccompProfile
4001		}
4002
4003		if field == nil {
4004			return true
4005		}
4006
4007		key := v1.SeccompContainerAnnotationKeyPrefix + c.Name
4008		if annotation, found := objectMeta.Annotations[key]; found {
4009			seccompPath := cFldPath.Child("securityContext").Child("seccompProfile")
4010			err := validateSeccompAnnotationsAndFieldsMatch(annotation, field, seccompPath)
4011			if err != nil {
4012				allErrs = append(allErrs, err)
4013			}
4014		}
4015		return true
4016	})
4017
4018	return allErrs
4019}
4020
4021func validateSeccompAnnotationsAndFieldsMatch(annotationValue string, seccompField *core.SeccompProfile, fldPath *field.Path) *field.Error {
4022	if seccompField == nil {
4023		return nil
4024	}
4025
4026	switch seccompField.Type {
4027	case core.SeccompProfileTypeUnconfined:
4028		if annotationValue != v1.SeccompProfileNameUnconfined {
4029			return field.Forbidden(fldPath.Child("type"), "seccomp type in annotation and field must match")
4030		}
4031
4032	case core.SeccompProfileTypeRuntimeDefault:
4033		if annotationValue != v1.SeccompProfileRuntimeDefault && annotationValue != v1.DeprecatedSeccompProfileDockerDefault {
4034			return field.Forbidden(fldPath.Child("type"), "seccomp type in annotation and field must match")
4035		}
4036
4037	case core.SeccompProfileTypeLocalhost:
4038		if !strings.HasPrefix(annotationValue, v1.SeccompLocalhostProfileNamePrefix) {
4039			return field.Forbidden(fldPath.Child("type"), "seccomp type in annotation and field must match")
4040		} else if seccompField.LocalhostProfile == nil || strings.TrimPrefix(annotationValue, v1.SeccompLocalhostProfileNamePrefix) != *seccompField.LocalhostProfile {
4041			return field.Forbidden(fldPath.Child("localhostProfile"), "seccomp profile in annotation and field must match")
4042		}
4043	}
4044
4045	return nil
4046}
4047
4048// ValidatePodUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields
4049// that cannot be changed.
4050func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) field.ErrorList {
4051	fldPath := field.NewPath("metadata")
4052	allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath)
4053	allErrs = append(allErrs, validatePodMetadataAndSpec(newPod, opts)...)
4054	allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"), opts)...)
4055	specPath := field.NewPath("spec")
4056
4057	// validate updateable fields:
4058	// 1.  spec.containers[*].image
4059	// 2.  spec.initContainers[*].image
4060	// 3.  spec.activeDeadlineSeconds
4061	// 4.  spec.terminationGracePeriodSeconds
4062
4063	containerErrs, stop := ValidateContainerUpdates(newPod.Spec.Containers, oldPod.Spec.Containers, specPath.Child("containers"))
4064	allErrs = append(allErrs, containerErrs...)
4065	if stop {
4066		return allErrs
4067	}
4068	containerErrs, stop = ValidateContainerUpdates(newPod.Spec.InitContainers, oldPod.Spec.InitContainers, specPath.Child("initContainers"))
4069	allErrs = append(allErrs, containerErrs...)
4070	if stop {
4071		return allErrs
4072	}
4073
4074	// validate updated spec.activeDeadlineSeconds.  two types of updates are allowed:
4075	// 1.  from nil to a positive value
4076	// 2.  from a positive value to a lesser, non-negative value
4077	if newPod.Spec.ActiveDeadlineSeconds != nil {
4078		newActiveDeadlineSeconds := *newPod.Spec.ActiveDeadlineSeconds
4079		if newActiveDeadlineSeconds < 0 || newActiveDeadlineSeconds > math.MaxInt32 {
4080			allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newActiveDeadlineSeconds, validation.InclusiveRangeError(0, math.MaxInt32)))
4081			return allErrs
4082		}
4083		if oldPod.Spec.ActiveDeadlineSeconds != nil {
4084			oldActiveDeadlineSeconds := *oldPod.Spec.ActiveDeadlineSeconds
4085			if oldActiveDeadlineSeconds < newActiveDeadlineSeconds {
4086				allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newActiveDeadlineSeconds, "must be less than or equal to previous value"))
4087				return allErrs
4088			}
4089		}
4090	} else if oldPod.Spec.ActiveDeadlineSeconds != nil {
4091		allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newPod.Spec.ActiveDeadlineSeconds, "must not update from a positive integer to nil value"))
4092	}
4093
4094	// Allow only additions to tolerations updates.
4095	allErrs = append(allErrs, validateOnlyAddedTolerations(newPod.Spec.Tolerations, oldPod.Spec.Tolerations, specPath.Child("tolerations"))...)
4096
4097	// the last thing to check is pod spec equality.  If the pod specs are equal, then we can simply return the errors we have
4098	// so far and save the cost of a deep copy.
4099	if apiequality.Semantic.DeepEqual(newPod.Spec, oldPod.Spec) {
4100		return allErrs
4101	}
4102
4103	// handle updateable fields by munging those fields prior to deep equal comparison.
4104	mungedPodSpec := *newPod.Spec.DeepCopy()
4105	// munge spec.containers[*].image
4106	var newContainers []core.Container
4107	for ix, container := range mungedPodSpec.Containers {
4108		container.Image = oldPod.Spec.Containers[ix].Image // +k8s:verify-mutation:reason=clone
4109		newContainers = append(newContainers, container)
4110	}
4111	mungedPodSpec.Containers = newContainers
4112	// munge spec.initContainers[*].image
4113	var newInitContainers []core.Container
4114	for ix, container := range mungedPodSpec.InitContainers {
4115		container.Image = oldPod.Spec.InitContainers[ix].Image // +k8s:verify-mutation:reason=clone
4116		newInitContainers = append(newInitContainers, container)
4117	}
4118	mungedPodSpec.InitContainers = newInitContainers
4119	// munge spec.activeDeadlineSeconds
4120	mungedPodSpec.ActiveDeadlineSeconds = nil
4121	if oldPod.Spec.ActiveDeadlineSeconds != nil {
4122		activeDeadlineSeconds := *oldPod.Spec.ActiveDeadlineSeconds
4123		mungedPodSpec.ActiveDeadlineSeconds = &activeDeadlineSeconds
4124	}
4125	// tolerations are checked before the deep copy, so munge those too
4126	mungedPodSpec.Tolerations = oldPod.Spec.Tolerations // +k8s:verify-mutation:reason=clone
4127
4128	// Relax validation of immutable fields to allow it to be set to 1 if it was previously negative.
4129	if oldPod.Spec.TerminationGracePeriodSeconds != nil && *oldPod.Spec.TerminationGracePeriodSeconds < 0 &&
4130		mungedPodSpec.TerminationGracePeriodSeconds != nil && *mungedPodSpec.TerminationGracePeriodSeconds == 1 {
4131		mungedPodSpec.TerminationGracePeriodSeconds = oldPod.Spec.TerminationGracePeriodSeconds // +k8s:verify-mutation:reason=clone
4132	}
4133
4134	if !apiequality.Semantic.DeepEqual(mungedPodSpec, oldPod.Spec) {
4135		// This diff isn't perfect, but it's a helluva lot better an "I'm not going to tell you what the difference is".
4136		//TODO: Pinpoint the specific field that causes the invalid error after we have strategic merge diff
4137		specDiff := diff.ObjectDiff(mungedPodSpec, oldPod.Spec)
4138		allErrs = append(allErrs, field.Forbidden(specPath, fmt.Sprintf("pod updates may not change fields other than `spec.containers[*].image`, `spec.initContainers[*].image`, `spec.activeDeadlineSeconds`, `spec.tolerations` (only additions to existing tolerations) or `spec.terminationGracePeriodSeconds` (allow it to be set to 1 if it was previously negative)\n%v", specDiff)))
4139	}
4140
4141	return allErrs
4142}
4143
4144// ValidateContainerStateTransition test to if any illegal container state transitions are being attempted
4145func ValidateContainerStateTransition(newStatuses, oldStatuses []core.ContainerStatus, fldpath *field.Path, restartPolicy core.RestartPolicy) field.ErrorList {
4146	allErrs := field.ErrorList{}
4147	// If we should always restart, containers are allowed to leave the terminated state
4148	if restartPolicy == core.RestartPolicyAlways {
4149		return allErrs
4150	}
4151	for i, oldStatus := range oldStatuses {
4152		// Skip any container that is not terminated
4153		if oldStatus.State.Terminated == nil {
4154			continue
4155		}
4156		// Skip any container that failed but is allowed to restart
4157		if oldStatus.State.Terminated.ExitCode != 0 && restartPolicy == core.RestartPolicyOnFailure {
4158			continue
4159		}
4160		for _, newStatus := range newStatuses {
4161			if oldStatus.Name == newStatus.Name && newStatus.State.Terminated == nil {
4162				allErrs = append(allErrs, field.Forbidden(fldpath.Index(i).Child("state"), "may not be transitioned to non-terminated state"))
4163			}
4164		}
4165	}
4166	return allErrs
4167}
4168
4169// ValidatePodStatusUpdate tests to see if the update is legal for an end user to make.
4170func ValidatePodStatusUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) field.ErrorList {
4171	fldPath := field.NewPath("metadata")
4172	allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath)
4173	allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"), opts)...)
4174	allErrs = append(allErrs, validatePodConditions(newPod.Status.Conditions, fldPath.Child("conditions"))...)
4175
4176	fldPath = field.NewPath("status")
4177	if newPod.Spec.NodeName != oldPod.Spec.NodeName {
4178		allErrs = append(allErrs, field.Forbidden(fldPath.Child("nodeName"), "may not be changed directly"))
4179	}
4180
4181	if newPod.Status.NominatedNodeName != oldPod.Status.NominatedNodeName && len(newPod.Status.NominatedNodeName) > 0 {
4182		for _, msg := range ValidateNodeName(newPod.Status.NominatedNodeName, false) {
4183			allErrs = append(allErrs, field.Invalid(fldPath.Child("nominatedNodeName"), newPod.Status.NominatedNodeName, msg))
4184		}
4185	}
4186
4187	// If pod should not restart, make sure the status update does not transition
4188	// any terminated containers to a non-terminated state.
4189	allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.ContainerStatuses, oldPod.Status.ContainerStatuses, fldPath.Child("containerStatuses"), oldPod.Spec.RestartPolicy)...)
4190	allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.InitContainerStatuses, oldPod.Status.InitContainerStatuses, fldPath.Child("initContainerStatuses"), oldPod.Spec.RestartPolicy)...)
4191
4192	if newIPErrs := validatePodIPs(newPod); len(newIPErrs) > 0 {
4193		allErrs = append(allErrs, newIPErrs...)
4194	}
4195
4196	return allErrs
4197}
4198
4199// validatePodConditions tests if the custom pod conditions are valid.
4200func validatePodConditions(conditions []core.PodCondition, fldPath *field.Path) field.ErrorList {
4201	allErrs := field.ErrorList{}
4202	systemConditions := sets.NewString(string(core.PodScheduled), string(core.PodReady), string(core.PodInitialized))
4203	for i, condition := range conditions {
4204		if systemConditions.Has(string(condition.Type)) {
4205			continue
4206		}
4207		for _, msg := range validation.IsQualifiedName(string(condition.Type)) {
4208			allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("Type"), string(condition.Type), msg))
4209		}
4210	}
4211	return allErrs
4212}
4213
4214// ValidatePodEphemeralContainersUpdate tests that a user update to EphemeralContainers is valid.
4215// newPod and oldPod must only differ in their EphemeralContainers.
4216func ValidatePodEphemeralContainersUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) field.ErrorList {
4217	spec := newPod.Spec
4218	specPath := field.NewPath("spec").Child("ephemeralContainers")
4219
4220	vols := make(map[string]core.VolumeSource)
4221	for _, vol := range spec.Volumes {
4222		vols[vol.Name] = vol.VolumeSource
4223	}
4224	allErrs := validateEphemeralContainers(spec.EphemeralContainers, spec.Containers, spec.InitContainers, vols, specPath, opts)
4225
4226	// Existing EphemeralContainers may not be changed. Order isn't preserved by patch, so check each individually.
4227	newContainerIndex := make(map[string]*core.EphemeralContainer)
4228	for i := range newPod.Spec.EphemeralContainers {
4229		newContainerIndex[newPod.Spec.EphemeralContainers[i].Name] = &newPod.Spec.EphemeralContainers[i]
4230	}
4231	for _, old := range oldPod.Spec.EphemeralContainers {
4232		if new, ok := newContainerIndex[old.Name]; !ok {
4233			allErrs = append(allErrs, field.Forbidden(specPath, fmt.Sprintf("existing ephemeral containers %q may not be removed\n", old.Name)))
4234		} else if !apiequality.Semantic.DeepEqual(old, *new) {
4235			specDiff := diff.ObjectDiff(old, *new)
4236			allErrs = append(allErrs, field.Forbidden(specPath, fmt.Sprintf("existing ephemeral containers %q may not be changed\n%v", old.Name, specDiff)))
4237		}
4238	}
4239
4240	return allErrs
4241}
4242
4243// ValidatePodBinding tests if required fields in the pod binding are legal.
4244func ValidatePodBinding(binding *core.Binding) field.ErrorList {
4245	allErrs := field.ErrorList{}
4246
4247	if len(binding.Target.Kind) != 0 && binding.Target.Kind != "Node" {
4248		// TODO: When validation becomes versioned, this gets more complicated.
4249		allErrs = append(allErrs, field.NotSupported(field.NewPath("target", "kind"), binding.Target.Kind, []string{"Node", "<empty>"}))
4250	}
4251	if len(binding.Target.Name) == 0 {
4252		// TODO: When validation becomes versioned, this gets more complicated.
4253		allErrs = append(allErrs, field.Required(field.NewPath("target", "name"), ""))
4254	}
4255
4256	return allErrs
4257}
4258
4259// ValidatePodTemplate tests if required fields in the pod template are set.
4260func ValidatePodTemplate(pod *core.PodTemplate, opts PodValidationOptions) field.ErrorList {
4261	allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, field.NewPath("metadata"))
4262	allErrs = append(allErrs, ValidatePodTemplateSpec(&pod.Template, field.NewPath("template"), opts)...)
4263	return allErrs
4264}
4265
4266// ValidatePodTemplateUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields
4267// that cannot be changed.
4268func ValidatePodTemplateUpdate(newPod, oldPod *core.PodTemplate, opts PodValidationOptions) field.ErrorList {
4269	allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, field.NewPath("metadata"))
4270	allErrs = append(allErrs, ValidatePodTemplateSpec(&newPod.Template, field.NewPath("template"), opts)...)
4271	return allErrs
4272}
4273
4274var supportedSessionAffinityType = sets.NewString(string(core.ServiceAffinityClientIP), string(core.ServiceAffinityNone))
4275var supportedServiceType = sets.NewString(string(core.ServiceTypeClusterIP), string(core.ServiceTypeNodePort),
4276	string(core.ServiceTypeLoadBalancer), string(core.ServiceTypeExternalName))
4277
4278var supportedServiceInternalTrafficPolicy = sets.NewString(string(core.ServiceInternalTrafficPolicyCluster), string(core.ServiceExternalTrafficPolicyTypeLocal))
4279
4280var supportedServiceIPFamily = sets.NewString(string(core.IPv4Protocol), string(core.IPv6Protocol))
4281var supportedServiceIPFamilyPolicy = sets.NewString(string(core.IPFamilyPolicySingleStack), string(core.IPFamilyPolicyPreferDualStack), string(core.IPFamilyPolicyRequireDualStack))
4282
4283// ValidateService tests if required fields/annotations of a Service are valid.
4284func ValidateService(service *core.Service) field.ErrorList {
4285	allErrs := ValidateObjectMeta(&service.ObjectMeta, true, ValidateServiceName, field.NewPath("metadata"))
4286
4287	specPath := field.NewPath("spec")
4288
4289	if len(service.Spec.Ports) == 0 && !isHeadlessService(service) && service.Spec.Type != core.ServiceTypeExternalName {
4290		allErrs = append(allErrs, field.Required(specPath.Child("ports"), ""))
4291	}
4292	switch service.Spec.Type {
4293	case core.ServiceTypeLoadBalancer:
4294		for ix := range service.Spec.Ports {
4295			port := &service.Spec.Ports[ix]
4296			// This is a workaround for broken cloud environments that
4297			// over-open firewalls.  Hopefully it can go away when more clouds
4298			// understand containers better.
4299			if port.Port == ports.KubeletPort {
4300				portPath := specPath.Child("ports").Index(ix)
4301				allErrs = append(allErrs, field.Invalid(portPath, port.Port, fmt.Sprintf("may not expose port %v externally since it is used by kubelet", ports.KubeletPort)))
4302			}
4303		}
4304		if isHeadlessService(service) {
4305			allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIPs").Index(0), service.Spec.ClusterIPs[0], "may not be set to 'None' for LoadBalancer services"))
4306		}
4307	case core.ServiceTypeNodePort:
4308		if isHeadlessService(service) {
4309			allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIPs").Index(0), service.Spec.ClusterIPs[0], "may not be set to 'None' for NodePort services"))
4310		}
4311	case core.ServiceTypeExternalName:
4312		// must have  len(.spec.ClusterIPs) == 0 // note: strategy sets ClusterIPs based on ClusterIP
4313		if len(service.Spec.ClusterIPs) > 0 {
4314			allErrs = append(allErrs, field.Forbidden(specPath.Child("clusterIPs"), "may not be set for ExternalName services"))
4315		}
4316
4317		// must have nil families and nil policy
4318		if len(service.Spec.IPFamilies) > 0 {
4319			allErrs = append(allErrs, field.Forbidden(specPath.Child("ipFamilies"), "may not be set for ExternalName services"))
4320		}
4321		if service.Spec.IPFamilyPolicy != nil {
4322			allErrs = append(allErrs, field.Forbidden(specPath.Child("ipFamilyPolicy"), "may not be set for ExternalName services"))
4323		}
4324
4325		// The value (a CNAME) may have a trailing dot to denote it as fully qualified
4326		cname := strings.TrimSuffix(service.Spec.ExternalName, ".")
4327		if len(cname) > 0 {
4328			allErrs = append(allErrs, ValidateDNS1123Subdomain(cname, specPath.Child("externalName"))...)
4329		} else {
4330			allErrs = append(allErrs, field.Required(specPath.Child("externalName"), ""))
4331		}
4332	}
4333
4334	allPortNames := sets.String{}
4335	portsPath := specPath.Child("ports")
4336	for i := range service.Spec.Ports {
4337		portPath := portsPath.Index(i)
4338		allErrs = append(allErrs, validateServicePort(&service.Spec.Ports[i], len(service.Spec.Ports) > 1, isHeadlessService(service), &allPortNames, portPath)...)
4339	}
4340
4341	if service.Spec.Selector != nil {
4342		allErrs = append(allErrs, unversionedvalidation.ValidateLabels(service.Spec.Selector, specPath.Child("selector"))...)
4343	}
4344
4345	if len(service.Spec.SessionAffinity) == 0 {
4346		allErrs = append(allErrs, field.Required(specPath.Child("sessionAffinity"), ""))
4347	} else if !supportedSessionAffinityType.Has(string(service.Spec.SessionAffinity)) {
4348		allErrs = append(allErrs, field.NotSupported(specPath.Child("sessionAffinity"), service.Spec.SessionAffinity, supportedSessionAffinityType.List()))
4349	}
4350
4351	if service.Spec.SessionAffinity == core.ServiceAffinityClientIP {
4352		allErrs = append(allErrs, validateClientIPAffinityConfig(service.Spec.SessionAffinityConfig, specPath.Child("sessionAffinityConfig"))...)
4353	} else if service.Spec.SessionAffinity == core.ServiceAffinityNone {
4354		if service.Spec.SessionAffinityConfig != nil {
4355			allErrs = append(allErrs, field.Forbidden(specPath.Child("sessionAffinityConfig"), fmt.Sprintf("must not be set when session affinity is %s", string(core.ServiceAffinityNone))))
4356		}
4357	}
4358
4359	// dualstack <-> ClusterIPs <-> ipfamilies
4360	allErrs = append(allErrs, validateServiceClusterIPsRelatedFields(service)...)
4361
4362	ipPath := specPath.Child("externalIPs")
4363	for i, ip := range service.Spec.ExternalIPs {
4364		idxPath := ipPath.Index(i)
4365		if msgs := validation.IsValidIP(ip); len(msgs) != 0 {
4366			for i := range msgs {
4367				allErrs = append(allErrs, field.Invalid(idxPath, ip, msgs[i]))
4368			}
4369		} else {
4370			allErrs = append(allErrs, ValidateNonSpecialIP(ip, idxPath)...)
4371		}
4372	}
4373
4374	if len(service.Spec.Type) == 0 {
4375		allErrs = append(allErrs, field.Required(specPath.Child("type"), ""))
4376	} else if !supportedServiceType.Has(string(service.Spec.Type)) {
4377		allErrs = append(allErrs, field.NotSupported(specPath.Child("type"), service.Spec.Type, supportedServiceType.List()))
4378	}
4379
4380	if service.Spec.Type == core.ServiceTypeClusterIP {
4381		portsPath := specPath.Child("ports")
4382		for i := range service.Spec.Ports {
4383			portPath := portsPath.Index(i)
4384			if service.Spec.Ports[i].NodePort != 0 {
4385				allErrs = append(allErrs, field.Forbidden(portPath.Child("nodePort"), "may not be used when `type` is 'ClusterIP'"))
4386			}
4387		}
4388	}
4389
4390	// Check for duplicate NodePorts, considering (protocol,port) pairs
4391	portsPath = specPath.Child("ports")
4392	nodePorts := make(map[core.ServicePort]bool)
4393	for i := range service.Spec.Ports {
4394		port := &service.Spec.Ports[i]
4395		if port.NodePort == 0 {
4396			continue
4397		}
4398		portPath := portsPath.Index(i)
4399		var key core.ServicePort
4400		key.Protocol = port.Protocol
4401		key.NodePort = port.NodePort
4402		_, found := nodePorts[key]
4403		if found {
4404			allErrs = append(allErrs, field.Duplicate(portPath.Child("nodePort"), port.NodePort))
4405		}
4406		nodePorts[key] = true
4407	}
4408
4409	// Check for duplicate Ports, considering (protocol,port) pairs
4410	portsPath = specPath.Child("ports")
4411	ports := make(map[core.ServicePort]bool)
4412	for i, port := range service.Spec.Ports {
4413		portPath := portsPath.Index(i)
4414		key := core.ServicePort{Protocol: port.Protocol, Port: port.Port}
4415		_, found := ports[key]
4416		if found {
4417			allErrs = append(allErrs, field.Duplicate(portPath, key))
4418		}
4419		ports[key] = true
4420	}
4421
4422	// Validate SourceRange field and annotation
4423	_, ok := service.Annotations[core.AnnotationLoadBalancerSourceRangesKey]
4424	if len(service.Spec.LoadBalancerSourceRanges) > 0 || ok {
4425		var fieldPath *field.Path
4426		var val string
4427		if len(service.Spec.LoadBalancerSourceRanges) > 0 {
4428			fieldPath = specPath.Child("LoadBalancerSourceRanges")
4429			val = fmt.Sprintf("%v", service.Spec.LoadBalancerSourceRanges)
4430		} else {
4431			fieldPath = field.NewPath("metadata", "annotations").Key(core.AnnotationLoadBalancerSourceRangesKey)
4432			val = service.Annotations[core.AnnotationLoadBalancerSourceRangesKey]
4433		}
4434		if service.Spec.Type != core.ServiceTypeLoadBalancer {
4435			allErrs = append(allErrs, field.Forbidden(fieldPath, "may only be used when `type` is 'LoadBalancer'"))
4436		}
4437		_, err := apiservice.GetLoadBalancerSourceRanges(service)
4438		if err != nil {
4439			allErrs = append(allErrs, field.Invalid(fieldPath, val, "must be a list of IP ranges. For example, 10.240.0.0/24,10.250.0.0/24 "))
4440		}
4441	}
4442
4443	if service.Spec.AllocateLoadBalancerNodePorts != nil && service.Spec.Type != core.ServiceTypeLoadBalancer {
4444		allErrs = append(allErrs, field.Forbidden(specPath.Child("allocateLoadBalancerNodePorts"), "may only be used when `type` is 'LoadBalancer'"))
4445	}
4446
4447	if utilfeature.DefaultFeatureGate.Enabled(features.ServiceLBNodePortControl) {
4448		if service.Spec.Type == core.ServiceTypeLoadBalancer && service.Spec.AllocateLoadBalancerNodePorts == nil {
4449			allErrs = append(allErrs, field.Required(field.NewPath("allocateLoadBalancerNodePorts"), ""))
4450		}
4451	}
4452
4453	// validate LoadBalancerClass field
4454	allErrs = append(allErrs, validateLoadBalancerClassField(nil, service)...)
4455
4456	// external traffic fields
4457	allErrs = append(allErrs, validateServiceExternalTrafficFieldsValue(service)...)
4458
4459	// internal traffic policy field
4460	allErrs = append(allErrs, validateServiceInternalTrafficFieldsValue(service)...)
4461
4462	return allErrs
4463}
4464
4465func validateServicePort(sp *core.ServicePort, requireName, isHeadlessService bool, allNames *sets.String, fldPath *field.Path) field.ErrorList {
4466	allErrs := field.ErrorList{}
4467
4468	if requireName && len(sp.Name) == 0 {
4469		allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
4470	} else if len(sp.Name) != 0 {
4471		allErrs = append(allErrs, ValidateDNS1123Label(sp.Name, fldPath.Child("name"))...)
4472		if allNames.Has(sp.Name) {
4473			allErrs = append(allErrs, field.Duplicate(fldPath.Child("name"), sp.Name))
4474		} else {
4475			allNames.Insert(sp.Name)
4476		}
4477	}
4478
4479	for _, msg := range validation.IsValidPortNum(int(sp.Port)) {
4480		allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), sp.Port, msg))
4481	}
4482
4483	if len(sp.Protocol) == 0 {
4484		allErrs = append(allErrs, field.Required(fldPath.Child("protocol"), ""))
4485	} else if !supportedPortProtocols.Has(string(sp.Protocol)) {
4486		allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), sp.Protocol, supportedPortProtocols.List()))
4487	}
4488
4489	allErrs = append(allErrs, ValidatePortNumOrName(sp.TargetPort, fldPath.Child("targetPort"))...)
4490
4491	if sp.AppProtocol != nil {
4492		for _, msg := range validation.IsQualifiedName(*sp.AppProtocol) {
4493			allErrs = append(allErrs, field.Invalid(fldPath.Child("appProtocol"), sp.AppProtocol, msg))
4494		}
4495	}
4496
4497	// in the v1 API, targetPorts on headless services were tolerated.
4498	// once we have version-specific validation, we can reject this on newer API versions, but until then, we have to tolerate it for compatibility.
4499	//
4500	// if isHeadlessService {
4501	// 	if sp.TargetPort.Type == intstr.String || (sp.TargetPort.Type == intstr.Int && sp.Port != sp.TargetPort.IntValue()) {
4502	// 		allErrs = append(allErrs, field.Invalid(fldPath.Child("targetPort"), sp.TargetPort, "must be equal to the value of 'port' when clusterIP = None"))
4503	// 	}
4504	// }
4505
4506	return allErrs
4507}
4508
4509// validateServiceExternalTrafficFieldsValue validates ExternalTraffic related annotations
4510// have legal value.
4511func validateServiceExternalTrafficFieldsValue(service *core.Service) field.ErrorList {
4512	allErrs := field.ErrorList{}
4513
4514	// Check first class fields.
4515	if service.Spec.ExternalTrafficPolicy != "" &&
4516		service.Spec.ExternalTrafficPolicy != core.ServiceExternalTrafficPolicyTypeCluster &&
4517		service.Spec.ExternalTrafficPolicy != core.ServiceExternalTrafficPolicyTypeLocal {
4518		allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("externalTrafficPolicy"), service.Spec.ExternalTrafficPolicy,
4519			fmt.Sprintf("ExternalTrafficPolicy must be empty, %v or %v", core.ServiceExternalTrafficPolicyTypeCluster, core.ServiceExternalTrafficPolicyTypeLocal)))
4520	}
4521
4522	if service.Spec.HealthCheckNodePort < 0 {
4523		allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("healthCheckNodePort"), service.Spec.HealthCheckNodePort,
4524			"HealthCheckNodePort must be not less than 0"))
4525	}
4526
4527	return allErrs
4528}
4529
4530// validateServiceInternalTrafficFieldsValue validates InternalTraffic related
4531// spec have legal value.
4532func validateServiceInternalTrafficFieldsValue(service *core.Service) field.ErrorList {
4533	allErrs := field.ErrorList{}
4534
4535	if utilfeature.DefaultFeatureGate.Enabled(features.ServiceInternalTrafficPolicy) {
4536		if service.Spec.InternalTrafficPolicy == nil {
4537			allErrs = append(allErrs, field.Required(field.NewPath("spec").Child("internalTrafficPolicy"), ""))
4538		}
4539	}
4540
4541	if service.Spec.InternalTrafficPolicy != nil && !supportedServiceInternalTrafficPolicy.Has(string(*service.Spec.InternalTrafficPolicy)) {
4542		allErrs = append(allErrs, field.NotSupported(field.NewPath("spec").Child("internalTrafficPolicy"), *service.Spec.InternalTrafficPolicy, supportedServiceInternalTrafficPolicy.List()))
4543	}
4544
4545	return allErrs
4546}
4547
4548// ValidateServiceExternalTrafficFieldsCombination validates if ExternalTrafficPolicy,
4549// HealthCheckNodePort and Type combination are legal. For update, it should be called
4550// after clearing externalTraffic related fields for the ease of transitioning between
4551// different service types.
4552func ValidateServiceExternalTrafficFieldsCombination(service *core.Service) field.ErrorList {
4553	allErrs := field.ErrorList{}
4554
4555	if service.Spec.Type != core.ServiceTypeLoadBalancer &&
4556		service.Spec.Type != core.ServiceTypeNodePort &&
4557		service.Spec.ExternalTrafficPolicy != "" {
4558		allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "externalTrafficPolicy"), service.Spec.ExternalTrafficPolicy,
4559			"ExternalTrafficPolicy can only be set on NodePort and LoadBalancer service"))
4560	}
4561
4562	if !apiservice.NeedsHealthCheck(service) &&
4563		service.Spec.HealthCheckNodePort != 0 {
4564		allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "healthCheckNodePort"), service.Spec.HealthCheckNodePort,
4565			"HealthCheckNodePort can only be set on LoadBalancer service with ExternalTrafficPolicy=Local"))
4566	}
4567
4568	return allErrs
4569}
4570
4571// ValidateServiceCreate validates Services as they are created.
4572func ValidateServiceCreate(service *core.Service) field.ErrorList {
4573	return ValidateService(service)
4574}
4575
4576// ValidateServiceUpdate tests if required fields in the service are set during an update
4577func ValidateServiceUpdate(service, oldService *core.Service) field.ErrorList {
4578	allErrs := ValidateObjectMetaUpdate(&service.ObjectMeta, &oldService.ObjectMeta, field.NewPath("metadata"))
4579
4580	// User can upgrade (add another clusterIP or ipFamily)
4581	//      can downgrade (remove secondary clusterIP or ipFamily)
4582	// but *CAN NOT* change primary/secondary clusterIP || ipFamily *UNLESS*
4583	// they are changing from/to/ON ExternalName
4584
4585	upgradeDowngradeClusterIPsErrs := validateUpgradeDowngradeClusterIPs(oldService, service)
4586	allErrs = append(allErrs, upgradeDowngradeClusterIPsErrs...)
4587
4588	upgradeDowngradeIPFamiliesErrs := validateUpgradeDowngradeIPFamilies(oldService, service)
4589	allErrs = append(allErrs, upgradeDowngradeIPFamiliesErrs...)
4590
4591	upgradeDowngradeLoadBalancerClassErrs := validateLoadBalancerClassField(oldService, service)
4592	allErrs = append(allErrs, upgradeDowngradeLoadBalancerClassErrs...)
4593
4594	return append(allErrs, ValidateService(service)...)
4595}
4596
4597// ValidateServiceStatusUpdate tests if required fields in the Service are set when updating status.
4598func ValidateServiceStatusUpdate(service, oldService *core.Service) field.ErrorList {
4599	allErrs := ValidateObjectMetaUpdate(&service.ObjectMeta, &oldService.ObjectMeta, field.NewPath("metadata"))
4600	allErrs = append(allErrs, ValidateLoadBalancerStatus(&service.Status.LoadBalancer, field.NewPath("status", "loadBalancer"))...)
4601	return allErrs
4602}
4603
4604// ValidateReplicationController tests if required fields in the replication controller are set.
4605func ValidateReplicationController(controller *core.ReplicationController, opts PodValidationOptions) field.ErrorList {
4606	allErrs := ValidateObjectMeta(&controller.ObjectMeta, true, ValidateReplicationControllerName, field.NewPath("metadata"))
4607	allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"), opts)...)
4608	return allErrs
4609}
4610
4611// ValidateReplicationControllerUpdate tests if required fields in the replication controller are set.
4612func ValidateReplicationControllerUpdate(controller, oldController *core.ReplicationController, opts PodValidationOptions) field.ErrorList {
4613	allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata"))
4614	allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"), opts)...)
4615	return allErrs
4616}
4617
4618// ValidateReplicationControllerStatusUpdate tests if required fields in the replication controller are set.
4619func ValidateReplicationControllerStatusUpdate(controller, oldController *core.ReplicationController) field.ErrorList {
4620	allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata"))
4621	allErrs = append(allErrs, ValidateReplicationControllerStatus(controller.Status, field.NewPath("status"))...)
4622	return allErrs
4623}
4624
4625func ValidateReplicationControllerStatus(status core.ReplicationControllerStatus, statusPath *field.Path) field.ErrorList {
4626	allErrs := field.ErrorList{}
4627	allErrs = append(allErrs, ValidateNonnegativeField(int64(status.Replicas), statusPath.Child("replicas"))...)
4628	allErrs = append(allErrs, ValidateNonnegativeField(int64(status.FullyLabeledReplicas), statusPath.Child("fullyLabeledReplicas"))...)
4629	allErrs = append(allErrs, ValidateNonnegativeField(int64(status.ReadyReplicas), statusPath.Child("readyReplicas"))...)
4630	allErrs = append(allErrs, ValidateNonnegativeField(int64(status.AvailableReplicas), statusPath.Child("availableReplicas"))...)
4631	allErrs = append(allErrs, ValidateNonnegativeField(int64(status.ObservedGeneration), statusPath.Child("observedGeneration"))...)
4632	msg := "cannot be greater than status.replicas"
4633	if status.FullyLabeledReplicas > status.Replicas {
4634		allErrs = append(allErrs, field.Invalid(statusPath.Child("fullyLabeledReplicas"), status.FullyLabeledReplicas, msg))
4635	}
4636	if status.ReadyReplicas > status.Replicas {
4637		allErrs = append(allErrs, field.Invalid(statusPath.Child("readyReplicas"), status.ReadyReplicas, msg))
4638	}
4639	if status.AvailableReplicas > status.Replicas {
4640		allErrs = append(allErrs, field.Invalid(statusPath.Child("availableReplicas"), status.AvailableReplicas, msg))
4641	}
4642	if status.AvailableReplicas > status.ReadyReplicas {
4643		allErrs = append(allErrs, field.Invalid(statusPath.Child("availableReplicas"), status.AvailableReplicas, "cannot be greater than readyReplicas"))
4644	}
4645	return allErrs
4646}
4647
4648// Validates that the given selector is non-empty.
4649func ValidateNonEmptySelector(selectorMap map[string]string, fldPath *field.Path) field.ErrorList {
4650	allErrs := field.ErrorList{}
4651	selector := labels.Set(selectorMap).AsSelector()
4652	if selector.Empty() {
4653		allErrs = append(allErrs, field.Required(fldPath, ""))
4654	}
4655	return allErrs
4656}
4657
4658// Validates the given template and ensures that it is in accordance with the desired selector and replicas.
4659func ValidatePodTemplateSpecForRC(template *core.PodTemplateSpec, selectorMap map[string]string, replicas int32, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
4660	allErrs := field.ErrorList{}
4661	if template == nil {
4662		allErrs = append(allErrs, field.Required(fldPath, ""))
4663	} else {
4664		selector := labels.Set(selectorMap).AsSelector()
4665		if !selector.Empty() {
4666			// Verify that the RC selector matches the labels in template.
4667			labels := labels.Set(template.Labels)
4668			if !selector.Matches(labels) {
4669				allErrs = append(allErrs, field.Invalid(fldPath.Child("metadata", "labels"), template.Labels, "`selector` does not match template `labels`"))
4670			}
4671		}
4672		allErrs = append(allErrs, ValidatePodTemplateSpec(template, fldPath, opts)...)
4673		if replicas > 1 {
4674			allErrs = append(allErrs, ValidateReadOnlyPersistentDisks(template.Spec.Volumes, fldPath.Child("spec", "volumes"))...)
4675		}
4676		// RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec().
4677		if template.Spec.RestartPolicy != core.RestartPolicyAlways {
4678			allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []string{string(core.RestartPolicyAlways)}))
4679		}
4680		if template.Spec.ActiveDeadlineSeconds != nil {
4681			allErrs = append(allErrs, field.Forbidden(fldPath.Child("spec", "activeDeadlineSeconds"), "activeDeadlineSeconds in ReplicationController is not Supported"))
4682		}
4683	}
4684	return allErrs
4685}
4686
4687// ValidateReplicationControllerSpec tests if required fields in the replication controller spec are set.
4688func ValidateReplicationControllerSpec(spec *core.ReplicationControllerSpec, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
4689	allErrs := field.ErrorList{}
4690	allErrs = append(allErrs, ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...)
4691	allErrs = append(allErrs, ValidateNonEmptySelector(spec.Selector, fldPath.Child("selector"))...)
4692	allErrs = append(allErrs, ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...)
4693	allErrs = append(allErrs, ValidatePodTemplateSpecForRC(spec.Template, spec.Selector, spec.Replicas, fldPath.Child("template"), opts)...)
4694	return allErrs
4695}
4696
4697// ValidatePodTemplateSpec validates the spec of a pod template
4698func ValidatePodTemplateSpec(spec *core.PodTemplateSpec, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
4699	allErrs := field.ErrorList{}
4700	allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.Labels, fldPath.Child("labels"))...)
4701	allErrs = append(allErrs, ValidateAnnotations(spec.Annotations, fldPath.Child("annotations"))...)
4702	allErrs = append(allErrs, ValidatePodSpecificAnnotations(spec.Annotations, &spec.Spec, fldPath.Child("annotations"), opts)...)
4703	allErrs = append(allErrs, ValidatePodSpec(&spec.Spec, nil, fldPath.Child("spec"), opts)...)
4704	allErrs = append(allErrs, validateSeccompAnnotationsAndFields(spec.ObjectMeta, &spec.Spec, fldPath.Child("spec"))...)
4705
4706	if len(spec.Spec.EphemeralContainers) > 0 {
4707		allErrs = append(allErrs, field.Forbidden(fldPath.Child("spec", "ephemeralContainers"), "ephemeral containers not allowed in pod template"))
4708	}
4709
4710	return allErrs
4711}
4712
4713func ValidateReadOnlyPersistentDisks(volumes []core.Volume, fldPath *field.Path) field.ErrorList {
4714	allErrs := field.ErrorList{}
4715	for i := range volumes {
4716		vol := &volumes[i]
4717		idxPath := fldPath.Index(i)
4718		if vol.GCEPersistentDisk != nil {
4719			if !vol.GCEPersistentDisk.ReadOnly {
4720				allErrs = append(allErrs, field.Invalid(idxPath.Child("gcePersistentDisk", "readOnly"), false, "must be true for replicated pods > 1; GCE PD can only be mounted on multiple machines if it is read-only"))
4721			}
4722		}
4723		// TODO: What to do for AWS?  It doesn't support replicas
4724	}
4725	return allErrs
4726}
4727
4728// ValidateTaintsInNodeAnnotations tests that the serialized taints in Node.Annotations has valid data
4729func ValidateTaintsInNodeAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
4730	allErrs := field.ErrorList{}
4731
4732	taints, err := helper.GetTaintsFromNodeAnnotations(annotations)
4733	if err != nil {
4734		allErrs = append(allErrs, field.Invalid(fldPath, core.TaintsAnnotationKey, err.Error()))
4735		return allErrs
4736	}
4737
4738	if len(taints) > 0 {
4739		allErrs = append(allErrs, validateNodeTaints(taints, fldPath.Child(core.TaintsAnnotationKey))...)
4740	}
4741
4742	return allErrs
4743}
4744
4745// validateNodeTaints tests if given taints have valid data.
4746func validateNodeTaints(taints []core.Taint, fldPath *field.Path) field.ErrorList {
4747	allErrors := field.ErrorList{}
4748
4749	uniqueTaints := map[core.TaintEffect]sets.String{}
4750
4751	for i, currTaint := range taints {
4752		idxPath := fldPath.Index(i)
4753		// validate the taint key
4754		allErrors = append(allErrors, unversionedvalidation.ValidateLabelName(currTaint.Key, idxPath.Child("key"))...)
4755		// validate the taint value
4756		if errs := validation.IsValidLabelValue(currTaint.Value); len(errs) != 0 {
4757			allErrors = append(allErrors, field.Invalid(idxPath.Child("value"), currTaint.Value, strings.Join(errs, ";")))
4758		}
4759		// validate the taint effect
4760		allErrors = append(allErrors, validateTaintEffect(&currTaint.Effect, false, idxPath.Child("effect"))...)
4761
4762		// validate if taint is unique by <key, effect>
4763		if len(uniqueTaints[currTaint.Effect]) > 0 && uniqueTaints[currTaint.Effect].Has(currTaint.Key) {
4764			duplicatedError := field.Duplicate(idxPath, currTaint)
4765			duplicatedError.Detail = "taints must be unique by key and effect pair"
4766			allErrors = append(allErrors, duplicatedError)
4767			continue
4768		}
4769
4770		// add taint to existingTaints for uniqueness check
4771		if len(uniqueTaints[currTaint.Effect]) == 0 {
4772			uniqueTaints[currTaint.Effect] = sets.String{}
4773		}
4774		uniqueTaints[currTaint.Effect].Insert(currTaint.Key)
4775	}
4776	return allErrors
4777}
4778
4779func ValidateNodeSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
4780	allErrs := field.ErrorList{}
4781
4782	if annotations[core.TaintsAnnotationKey] != "" {
4783		allErrs = append(allErrs, ValidateTaintsInNodeAnnotations(annotations, fldPath)...)
4784	}
4785
4786	if annotations[core.PreferAvoidPodsAnnotationKey] != "" {
4787		allErrs = append(allErrs, ValidateAvoidPodsInNodeAnnotations(annotations, fldPath)...)
4788	}
4789	return allErrs
4790}
4791
4792// ValidateNode tests if required fields in the node are set.
4793func ValidateNode(node *core.Node) field.ErrorList {
4794	fldPath := field.NewPath("metadata")
4795	allErrs := ValidateObjectMeta(&node.ObjectMeta, false, ValidateNodeName, fldPath)
4796	allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...)
4797	if len(node.Spec.Taints) > 0 {
4798		allErrs = append(allErrs, validateNodeTaints(node.Spec.Taints, fldPath.Child("taints"))...)
4799	}
4800
4801	// Only validate spec.
4802	// All status fields are optional and can be updated later.
4803	// That said, if specified, we need to ensure they are valid.
4804	allErrs = append(allErrs, ValidateNodeResources(node)...)
4805
4806	// validate PodCIDRS only if we need to
4807	if len(node.Spec.PodCIDRs) > 0 {
4808		podCIDRsField := field.NewPath("spec", "podCIDRs")
4809
4810		// all PodCIDRs should be valid ones
4811		for idx, value := range node.Spec.PodCIDRs {
4812			if _, err := ValidateCIDR(value); err != nil {
4813				allErrs = append(allErrs, field.Invalid(podCIDRsField.Index(idx), node.Spec.PodCIDRs, "must be valid CIDR"))
4814			}
4815		}
4816
4817		// if more than PodCIDR then
4818		// - validate for dual stack
4819		// - validate for duplication
4820		if len(node.Spec.PodCIDRs) > 1 {
4821			dualStack, err := netutils.IsDualStackCIDRStrings(node.Spec.PodCIDRs)
4822			if err != nil {
4823				allErrs = append(allErrs, field.InternalError(podCIDRsField, fmt.Errorf("invalid PodCIDRs. failed to check with dual stack with error:%v", err)))
4824			}
4825			if !dualStack || len(node.Spec.PodCIDRs) > 2 {
4826				allErrs = append(allErrs, field.Invalid(podCIDRsField, node.Spec.PodCIDRs, "may specify no more than one CIDR for each IP family"))
4827			}
4828
4829			// PodCIDRs must not contain duplicates
4830			seen := sets.String{}
4831			for i, value := range node.Spec.PodCIDRs {
4832				if seen.Has(value) {
4833					allErrs = append(allErrs, field.Duplicate(podCIDRsField.Index(i), value))
4834				}
4835				seen.Insert(value)
4836			}
4837		}
4838	}
4839
4840	return allErrs
4841}
4842
4843// ValidateNodeResources is used to make sure a node has valid capacity and allocatable values.
4844func ValidateNodeResources(node *core.Node) field.ErrorList {
4845	allErrs := field.ErrorList{}
4846
4847	// Validate resource quantities in capacity.
4848	for k, v := range node.Status.Capacity {
4849		resPath := field.NewPath("status", "capacity", string(k))
4850		allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...)
4851	}
4852
4853	// Validate resource quantities in allocatable.
4854	for k, v := range node.Status.Allocatable {
4855		resPath := field.NewPath("status", "allocatable", string(k))
4856		allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...)
4857	}
4858	return allErrs
4859}
4860
4861// ValidateNodeUpdate tests to make sure a node update can be applied.  Modifies oldNode.
4862func ValidateNodeUpdate(node, oldNode *core.Node) field.ErrorList {
4863	fldPath := field.NewPath("metadata")
4864	allErrs := ValidateObjectMetaUpdate(&node.ObjectMeta, &oldNode.ObjectMeta, fldPath)
4865	allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...)
4866
4867	// TODO: Enable the code once we have better core object.status update model. Currently,
4868	// anyone can update node status.
4869	// if !apiequality.Semantic.DeepEqual(node.Status, core.NodeStatus{}) {
4870	// 	allErrs = append(allErrs, field.Invalid("status", node.Status, "must be empty"))
4871	// }
4872
4873	allErrs = append(allErrs, ValidateNodeResources(node)...)
4874
4875	// Validate no duplicate addresses in node status.
4876	addresses := make(map[core.NodeAddress]bool)
4877	for i, address := range node.Status.Addresses {
4878		if _, ok := addresses[address]; ok {
4879			allErrs = append(allErrs, field.Duplicate(field.NewPath("status", "addresses").Index(i), address))
4880		}
4881		addresses[address] = true
4882	}
4883
4884	// Allow the controller manager to assign a CIDR to a node if it doesn't have one.
4885	if len(oldNode.Spec.PodCIDRs) > 0 {
4886		// compare the entire slice
4887		if len(oldNode.Spec.PodCIDRs) != len(node.Spec.PodCIDRs) {
4888			allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "podCIDRs"), "node updates may not change podCIDR except from \"\" to valid"))
4889		} else {
4890			for idx, value := range oldNode.Spec.PodCIDRs {
4891				if value != node.Spec.PodCIDRs[idx] {
4892					allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "podCIDRs"), "node updates may not change podCIDR except from \"\" to valid"))
4893				}
4894			}
4895		}
4896	}
4897
4898	// Allow controller manager updating provider ID when not set
4899	if len(oldNode.Spec.ProviderID) > 0 && oldNode.Spec.ProviderID != node.Spec.ProviderID {
4900		allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "providerID"), "node updates may not change providerID except from \"\" to valid"))
4901	}
4902
4903	if node.Spec.ConfigSource != nil {
4904		allErrs = append(allErrs, validateNodeConfigSourceSpec(node.Spec.ConfigSource, field.NewPath("spec", "configSource"))...)
4905	}
4906	if node.Status.Config != nil {
4907		allErrs = append(allErrs, validateNodeConfigStatus(node.Status.Config, field.NewPath("status", "config"))...)
4908	}
4909
4910	// update taints
4911	if len(node.Spec.Taints) > 0 {
4912		allErrs = append(allErrs, validateNodeTaints(node.Spec.Taints, fldPath.Child("taints"))...)
4913	}
4914
4915	if node.Spec.DoNotUseExternalID != oldNode.Spec.DoNotUseExternalID {
4916		allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "externalID"), "may not be updated"))
4917	}
4918
4919	// status and metadata are allowed change (barring restrictions above), so separately test spec field.
4920	// spec only has a few fields, so check the ones we don't allow changing
4921	//  1. PodCIDRs - immutable after first set - checked above
4922	//  2. ProviderID - immutable after first set - checked above
4923	//  3. Unschedulable - allowed to change
4924	//  4. Taints - allowed to change
4925	//  5. ConfigSource - allowed to change (and checked above)
4926	//  6. DoNotUseExternalID - immutable - checked above
4927
4928	return allErrs
4929}
4930
4931// validation specific to Node.Spec.ConfigSource
4932func validateNodeConfigSourceSpec(source *core.NodeConfigSource, fldPath *field.Path) field.ErrorList {
4933	allErrs := field.ErrorList{}
4934	count := int(0)
4935	if source.ConfigMap != nil {
4936		count++
4937		allErrs = append(allErrs, validateConfigMapNodeConfigSourceSpec(source.ConfigMap, fldPath.Child("configMap"))...)
4938	}
4939	// add more subfields here in the future as they are added to NodeConfigSource
4940
4941	// exactly one reference subfield must be non-nil
4942	if count != 1 {
4943		allErrs = append(allErrs, field.Invalid(fldPath, source, "exactly one reference subfield must be non-nil"))
4944	}
4945	return allErrs
4946}
4947
4948// validation specific to Node.Spec.ConfigSource.ConfigMap
4949func validateConfigMapNodeConfigSourceSpec(source *core.ConfigMapNodeConfigSource, fldPath *field.Path) field.ErrorList {
4950	allErrs := field.ErrorList{}
4951	// uid and resourceVersion must not be set in spec
4952	if string(source.UID) != "" {
4953		allErrs = append(allErrs, field.Forbidden(fldPath.Child("uid"), "uid must not be set in spec"))
4954	}
4955	if source.ResourceVersion != "" {
4956		allErrs = append(allErrs, field.Forbidden(fldPath.Child("resourceVersion"), "resourceVersion must not be set in spec"))
4957	}
4958	return append(allErrs, validateConfigMapNodeConfigSource(source, fldPath)...)
4959}
4960
4961// validation specififc to Node.Status.Config
4962func validateNodeConfigStatus(status *core.NodeConfigStatus, fldPath *field.Path) field.ErrorList {
4963	allErrs := field.ErrorList{}
4964	if status.Assigned != nil {
4965		allErrs = append(allErrs, validateNodeConfigSourceStatus(status.Assigned, fldPath.Child("assigned"))...)
4966	}
4967	if status.Active != nil {
4968		allErrs = append(allErrs, validateNodeConfigSourceStatus(status.Active, fldPath.Child("active"))...)
4969	}
4970	if status.LastKnownGood != nil {
4971		allErrs = append(allErrs, validateNodeConfigSourceStatus(status.LastKnownGood, fldPath.Child("lastKnownGood"))...)
4972	}
4973	return allErrs
4974}
4975
4976// validation specific to Node.Status.Config.(Active|Assigned|LastKnownGood)
4977func validateNodeConfigSourceStatus(source *core.NodeConfigSource, fldPath *field.Path) field.ErrorList {
4978	allErrs := field.ErrorList{}
4979	count := int(0)
4980	if source.ConfigMap != nil {
4981		count++
4982		allErrs = append(allErrs, validateConfigMapNodeConfigSourceStatus(source.ConfigMap, fldPath.Child("configMap"))...)
4983	}
4984	// add more subfields here in the future as they are added to NodeConfigSource
4985
4986	// exactly one reference subfield must be non-nil
4987	if count != 1 {
4988		allErrs = append(allErrs, field.Invalid(fldPath, source, "exactly one reference subfield must be non-nil"))
4989	}
4990	return allErrs
4991}
4992
4993// validation specific to Node.Status.Config.(Active|Assigned|LastKnownGood).ConfigMap
4994func validateConfigMapNodeConfigSourceStatus(source *core.ConfigMapNodeConfigSource, fldPath *field.Path) field.ErrorList {
4995	allErrs := field.ErrorList{}
4996	// uid and resourceVersion must be set in status
4997	if string(source.UID) == "" {
4998		allErrs = append(allErrs, field.Required(fldPath.Child("uid"), "uid must be set in status"))
4999	}
5000	if source.ResourceVersion == "" {
5001		allErrs = append(allErrs, field.Required(fldPath.Child("resourceVersion"), "resourceVersion must be set in status"))
5002	}
5003	return append(allErrs, validateConfigMapNodeConfigSource(source, fldPath)...)
5004}
5005
5006// common validation
5007func validateConfigMapNodeConfigSource(source *core.ConfigMapNodeConfigSource, fldPath *field.Path) field.ErrorList {
5008	allErrs := field.ErrorList{}
5009	// validate target configmap namespace
5010	if source.Namespace == "" {
5011		allErrs = append(allErrs, field.Required(fldPath.Child("namespace"), "namespace must be set"))
5012	} else {
5013		for _, msg := range ValidateNameFunc(ValidateNamespaceName)(source.Namespace, false) {
5014			allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), source.Namespace, msg))
5015		}
5016	}
5017	// validate target configmap name
5018	if source.Name == "" {
5019		allErrs = append(allErrs, field.Required(fldPath.Child("name"), "name must be set"))
5020	} else {
5021		for _, msg := range ValidateNameFunc(ValidateConfigMapName)(source.Name, false) {
5022			allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), source.Name, msg))
5023		}
5024	}
5025	// validate kubeletConfigKey against rules for configMap key names
5026	if source.KubeletConfigKey == "" {
5027		allErrs = append(allErrs, field.Required(fldPath.Child("kubeletConfigKey"), "kubeletConfigKey must be set"))
5028	} else {
5029		for _, msg := range validation.IsConfigMapKey(source.KubeletConfigKey) {
5030			allErrs = append(allErrs, field.Invalid(fldPath.Child("kubeletConfigKey"), source.KubeletConfigKey, msg))
5031		}
5032	}
5033	return allErrs
5034}
5035
5036// Validate compute resource typename.
5037// Refer to docs/design/resources.md for more details.
5038func validateResourceName(value string, fldPath *field.Path) field.ErrorList {
5039	allErrs := field.ErrorList{}
5040	for _, msg := range validation.IsQualifiedName(value) {
5041		allErrs = append(allErrs, field.Invalid(fldPath, value, msg))
5042	}
5043	if len(allErrs) != 0 {
5044		return allErrs
5045	}
5046
5047	if len(strings.Split(value, "/")) == 1 {
5048		if !helper.IsStandardResourceName(value) {
5049			return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource type or fully qualified"))
5050		}
5051	}
5052
5053	return allErrs
5054}
5055
5056// Validate container resource name
5057// Refer to docs/design/resources.md for more details.
5058func validateContainerResourceName(value string, fldPath *field.Path) field.ErrorList {
5059	allErrs := validateResourceName(value, fldPath)
5060
5061	if len(strings.Split(value, "/")) == 1 {
5062		if !helper.IsStandardContainerResourceName(value) {
5063			return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource for containers"))
5064		}
5065	} else if !helper.IsNativeResource(core.ResourceName(value)) {
5066		if !helper.IsExtendedResourceName(core.ResourceName(value)) {
5067			return append(allErrs, field.Invalid(fldPath, value, "doesn't follow extended resource name standard"))
5068		}
5069	}
5070	return allErrs
5071}
5072
5073// Validate resource names that can go in a resource quota
5074// Refer to docs/design/resources.md for more details.
5075func ValidateResourceQuotaResourceName(value string, fldPath *field.Path) field.ErrorList {
5076	allErrs := validateResourceName(value, fldPath)
5077
5078	if len(strings.Split(value, "/")) == 1 {
5079		if !helper.IsStandardQuotaResourceName(value) {
5080			return append(allErrs, field.Invalid(fldPath, value, isInvalidQuotaResource))
5081		}
5082	}
5083	return allErrs
5084}
5085
5086// Validate limit range types
5087func validateLimitRangeTypeName(value string, fldPath *field.Path) field.ErrorList {
5088	allErrs := field.ErrorList{}
5089	for _, msg := range validation.IsQualifiedName(value) {
5090		allErrs = append(allErrs, field.Invalid(fldPath, value, msg))
5091	}
5092	if len(allErrs) != 0 {
5093		return allErrs
5094	}
5095
5096	if len(strings.Split(value, "/")) == 1 {
5097		if !helper.IsStandardLimitRangeType(value) {
5098			return append(allErrs, field.Invalid(fldPath, value, "must be a standard limit type or fully qualified"))
5099		}
5100	}
5101
5102	return allErrs
5103}
5104
5105// Validate limit range resource name
5106// limit types (other than Pod/Container) could contain storage not just cpu or memory
5107func validateLimitRangeResourceName(limitType core.LimitType, value string, fldPath *field.Path) field.ErrorList {
5108	switch limitType {
5109	case core.LimitTypePod, core.LimitTypeContainer:
5110		return validateContainerResourceName(value, fldPath)
5111	default:
5112		return validateResourceName(value, fldPath)
5113	}
5114}
5115
5116// ValidateLimitRange tests if required fields in the LimitRange are set.
5117func ValidateLimitRange(limitRange *core.LimitRange) field.ErrorList {
5118	allErrs := ValidateObjectMeta(&limitRange.ObjectMeta, true, ValidateLimitRangeName, field.NewPath("metadata"))
5119
5120	// ensure resource names are properly qualified per docs/design/resources.md
5121	limitTypeSet := map[core.LimitType]bool{}
5122	fldPath := field.NewPath("spec", "limits")
5123	for i := range limitRange.Spec.Limits {
5124		idxPath := fldPath.Index(i)
5125		limit := &limitRange.Spec.Limits[i]
5126		allErrs = append(allErrs, validateLimitRangeTypeName(string(limit.Type), idxPath.Child("type"))...)
5127
5128		_, found := limitTypeSet[limit.Type]
5129		if found {
5130			allErrs = append(allErrs, field.Duplicate(idxPath.Child("type"), limit.Type))
5131		}
5132		limitTypeSet[limit.Type] = true
5133
5134		keys := sets.String{}
5135		min := map[string]resource.Quantity{}
5136		max := map[string]resource.Quantity{}
5137		defaults := map[string]resource.Quantity{}
5138		defaultRequests := map[string]resource.Quantity{}
5139		maxLimitRequestRatios := map[string]resource.Quantity{}
5140
5141		for k, q := range limit.Max {
5142			allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("max").Key(string(k)))...)
5143			keys.Insert(string(k))
5144			max[string(k)] = q
5145		}
5146		for k, q := range limit.Min {
5147			allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("min").Key(string(k)))...)
5148			keys.Insert(string(k))
5149			min[string(k)] = q
5150		}
5151
5152		if limit.Type == core.LimitTypePod {
5153			if len(limit.Default) > 0 {
5154				allErrs = append(allErrs, field.Forbidden(idxPath.Child("default"), "may not be specified when `type` is 'Pod'"))
5155			}
5156			if len(limit.DefaultRequest) > 0 {
5157				allErrs = append(allErrs, field.Forbidden(idxPath.Child("defaultRequest"), "may not be specified when `type` is 'Pod'"))
5158			}
5159		} else {
5160			for k, q := range limit.Default {
5161				allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("default").Key(string(k)))...)
5162				keys.Insert(string(k))
5163				defaults[string(k)] = q
5164			}
5165			for k, q := range limit.DefaultRequest {
5166				allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("defaultRequest").Key(string(k)))...)
5167				keys.Insert(string(k))
5168				defaultRequests[string(k)] = q
5169			}
5170		}
5171
5172		if limit.Type == core.LimitTypePersistentVolumeClaim {
5173			_, minQuantityFound := limit.Min[core.ResourceStorage]
5174			_, maxQuantityFound := limit.Max[core.ResourceStorage]
5175			if !minQuantityFound && !maxQuantityFound {
5176				allErrs = append(allErrs, field.Required(idxPath.Child("limits"), "either minimum or maximum storage value is required, but neither was provided"))
5177			}
5178		}
5179
5180		for k, q := range limit.MaxLimitRequestRatio {
5181			allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("maxLimitRequestRatio").Key(string(k)))...)
5182			keys.Insert(string(k))
5183			maxLimitRequestRatios[string(k)] = q
5184		}
5185
5186		for k := range keys {
5187			minQuantity, minQuantityFound := min[k]
5188			maxQuantity, maxQuantityFound := max[k]
5189			defaultQuantity, defaultQuantityFound := defaults[k]
5190			defaultRequestQuantity, defaultRequestQuantityFound := defaultRequests[k]
5191			maxRatio, maxRatioFound := maxLimitRequestRatios[k]
5192
5193			if minQuantityFound && maxQuantityFound && minQuantity.Cmp(maxQuantity) > 0 {
5194				allErrs = append(allErrs, field.Invalid(idxPath.Child("min").Key(string(k)), minQuantity, fmt.Sprintf("min value %s is greater than max value %s", minQuantity.String(), maxQuantity.String())))
5195			}
5196
5197			if defaultRequestQuantityFound && minQuantityFound && minQuantity.Cmp(defaultRequestQuantity) > 0 {
5198				allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("min value %s is greater than default request value %s", minQuantity.String(), defaultRequestQuantity.String())))
5199			}
5200
5201			if defaultRequestQuantityFound && maxQuantityFound && defaultRequestQuantity.Cmp(maxQuantity) > 0 {
5202				allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than max value %s", defaultRequestQuantity.String(), maxQuantity.String())))
5203			}
5204
5205			if defaultRequestQuantityFound && defaultQuantityFound && defaultRequestQuantity.Cmp(defaultQuantity) > 0 {
5206				allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than default limit value %s", defaultRequestQuantity.String(), defaultQuantity.String())))
5207			}
5208
5209			if defaultQuantityFound && minQuantityFound && minQuantity.Cmp(defaultQuantity) > 0 {
5210				allErrs = append(allErrs, field.Invalid(idxPath.Child("default").Key(string(k)), minQuantity, fmt.Sprintf("min value %s is greater than default value %s", minQuantity.String(), defaultQuantity.String())))
5211			}
5212
5213			if defaultQuantityFound && maxQuantityFound && defaultQuantity.Cmp(maxQuantity) > 0 {
5214				allErrs = append(allErrs, field.Invalid(idxPath.Child("default").Key(string(k)), maxQuantity, fmt.Sprintf("default value %s is greater than max value %s", defaultQuantity.String(), maxQuantity.String())))
5215			}
5216			if maxRatioFound && maxRatio.Cmp(*resource.NewQuantity(1, resource.DecimalSI)) < 0 {
5217				allErrs = append(allErrs, field.Invalid(idxPath.Child("maxLimitRequestRatio").Key(string(k)), maxRatio, fmt.Sprintf("ratio %s is less than 1", maxRatio.String())))
5218			}
5219			if maxRatioFound && minQuantityFound && maxQuantityFound {
5220				maxRatioValue := float64(maxRatio.Value())
5221				minQuantityValue := minQuantity.Value()
5222				maxQuantityValue := maxQuantity.Value()
5223				if maxRatio.Value() < resource.MaxMilliValue && minQuantityValue < resource.MaxMilliValue && maxQuantityValue < resource.MaxMilliValue {
5224					maxRatioValue = float64(maxRatio.MilliValue()) / 1000
5225					minQuantityValue = minQuantity.MilliValue()
5226					maxQuantityValue = maxQuantity.MilliValue()
5227				}
5228				maxRatioLimit := float64(maxQuantityValue) / float64(minQuantityValue)
5229				if maxRatioValue > maxRatioLimit {
5230					allErrs = append(allErrs, field.Invalid(idxPath.Child("maxLimitRequestRatio").Key(string(k)), maxRatio, fmt.Sprintf("ratio %s is greater than max/min = %f", maxRatio.String(), maxRatioLimit)))
5231				}
5232			}
5233
5234			// for GPU, hugepages and other resources that are not allowed to overcommit,
5235			// the default value and defaultRequest value must match if both are specified
5236			if !helper.IsOvercommitAllowed(core.ResourceName(k)) && defaultQuantityFound && defaultRequestQuantityFound && defaultQuantity.Cmp(defaultRequestQuantity) != 0 {
5237				allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default value %s must equal to defaultRequest value %s in %s", defaultQuantity.String(), defaultRequestQuantity.String(), k)))
5238			}
5239		}
5240	}
5241
5242	return allErrs
5243}
5244
5245// ValidateServiceAccount tests if required fields in the ServiceAccount are set.
5246func ValidateServiceAccount(serviceAccount *core.ServiceAccount) field.ErrorList {
5247	allErrs := ValidateObjectMeta(&serviceAccount.ObjectMeta, true, ValidateServiceAccountName, field.NewPath("metadata"))
5248	return allErrs
5249}
5250
5251// ValidateServiceAccountUpdate tests if required fields in the ServiceAccount are set.
5252func ValidateServiceAccountUpdate(newServiceAccount, oldServiceAccount *core.ServiceAccount) field.ErrorList {
5253	allErrs := ValidateObjectMetaUpdate(&newServiceAccount.ObjectMeta, &oldServiceAccount.ObjectMeta, field.NewPath("metadata"))
5254	allErrs = append(allErrs, ValidateServiceAccount(newServiceAccount)...)
5255	return allErrs
5256}
5257
5258// ValidateSecret tests if required fields in the Secret are set.
5259func ValidateSecret(secret *core.Secret) field.ErrorList {
5260	allErrs := ValidateObjectMeta(&secret.ObjectMeta, true, ValidateSecretName, field.NewPath("metadata"))
5261
5262	dataPath := field.NewPath("data")
5263	totalSize := 0
5264	for key, value := range secret.Data {
5265		for _, msg := range validation.IsConfigMapKey(key) {
5266			allErrs = append(allErrs, field.Invalid(dataPath.Key(key), key, msg))
5267		}
5268		totalSize += len(value)
5269	}
5270	if totalSize > core.MaxSecretSize {
5271		allErrs = append(allErrs, field.TooLong(dataPath, "", core.MaxSecretSize))
5272	}
5273
5274	switch secret.Type {
5275	case core.SecretTypeServiceAccountToken:
5276		// Only require Annotations[kubernetes.io/service-account.name]
5277		// Additional fields (like Annotations[kubernetes.io/service-account.uid] and Data[token]) might be contributed later by a controller loop
5278		if value := secret.Annotations[core.ServiceAccountNameKey]; len(value) == 0 {
5279			allErrs = append(allErrs, field.Required(field.NewPath("metadata", "annotations").Key(core.ServiceAccountNameKey), ""))
5280		}
5281	case core.SecretTypeOpaque, "":
5282	// no-op
5283	case core.SecretTypeDockercfg:
5284		dockercfgBytes, exists := secret.Data[core.DockerConfigKey]
5285		if !exists {
5286			allErrs = append(allErrs, field.Required(dataPath.Key(core.DockerConfigKey), ""))
5287			break
5288		}
5289
5290		// make sure that the content is well-formed json.
5291		if err := json.Unmarshal(dockercfgBytes, &map[string]interface{}{}); err != nil {
5292			allErrs = append(allErrs, field.Invalid(dataPath.Key(core.DockerConfigKey), "<secret contents redacted>", err.Error()))
5293		}
5294	case core.SecretTypeDockerConfigJSON:
5295		dockerConfigJSONBytes, exists := secret.Data[core.DockerConfigJSONKey]
5296		if !exists {
5297			allErrs = append(allErrs, field.Required(dataPath.Key(core.DockerConfigJSONKey), ""))
5298			break
5299		}
5300
5301		// make sure that the content is well-formed json.
5302		if err := json.Unmarshal(dockerConfigJSONBytes, &map[string]interface{}{}); err != nil {
5303			allErrs = append(allErrs, field.Invalid(dataPath.Key(core.DockerConfigJSONKey), "<secret contents redacted>", err.Error()))
5304		}
5305	case core.SecretTypeBasicAuth:
5306		_, usernameFieldExists := secret.Data[core.BasicAuthUsernameKey]
5307		_, passwordFieldExists := secret.Data[core.BasicAuthPasswordKey]
5308
5309		// username or password might be empty, but the field must be present
5310		if !usernameFieldExists && !passwordFieldExists {
5311			allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(core.BasicAuthUsernameKey), ""))
5312			allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(core.BasicAuthPasswordKey), ""))
5313			break
5314		}
5315	case core.SecretTypeSSHAuth:
5316		if len(secret.Data[core.SSHAuthPrivateKey]) == 0 {
5317			allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(core.SSHAuthPrivateKey), ""))
5318			break
5319		}
5320
5321	case core.SecretTypeTLS:
5322		if _, exists := secret.Data[core.TLSCertKey]; !exists {
5323			allErrs = append(allErrs, field.Required(dataPath.Key(core.TLSCertKey), ""))
5324		}
5325		if _, exists := secret.Data[core.TLSPrivateKeyKey]; !exists {
5326			allErrs = append(allErrs, field.Required(dataPath.Key(core.TLSPrivateKeyKey), ""))
5327		}
5328	// TODO: Verify that the key matches the cert.
5329	default:
5330		// no-op
5331	}
5332
5333	return allErrs
5334}
5335
5336// ValidateSecretUpdate tests if required fields in the Secret are set.
5337func ValidateSecretUpdate(newSecret, oldSecret *core.Secret) field.ErrorList {
5338	allErrs := ValidateObjectMetaUpdate(&newSecret.ObjectMeta, &oldSecret.ObjectMeta, field.NewPath("metadata"))
5339
5340	allErrs = append(allErrs, ValidateImmutableField(newSecret.Type, oldSecret.Type, field.NewPath("type"))...)
5341	if oldSecret.Immutable != nil && *oldSecret.Immutable {
5342		if newSecret.Immutable == nil || !*newSecret.Immutable {
5343			allErrs = append(allErrs, field.Forbidden(field.NewPath("immutable"), "field is immutable when `immutable` is set"))
5344		}
5345		if !reflect.DeepEqual(newSecret.Data, oldSecret.Data) {
5346			allErrs = append(allErrs, field.Forbidden(field.NewPath("data"), "field is immutable when `immutable` is set"))
5347		}
5348		// We don't validate StringData, as it was already converted back to Data
5349		// before validation is happening.
5350	}
5351
5352	allErrs = append(allErrs, ValidateSecret(newSecret)...)
5353	return allErrs
5354}
5355
5356// ValidateConfigMapName can be used to check whether the given ConfigMap name is valid.
5357// Prefix indicates this name will be used as part of generation, in which case
5358// trailing dashes are allowed.
5359var ValidateConfigMapName = apimachineryvalidation.NameIsDNSSubdomain
5360
5361// ValidateConfigMap tests whether required fields in the ConfigMap are set.
5362func ValidateConfigMap(cfg *core.ConfigMap) field.ErrorList {
5363	allErrs := field.ErrorList{}
5364	allErrs = append(allErrs, ValidateObjectMeta(&cfg.ObjectMeta, true, ValidateConfigMapName, field.NewPath("metadata"))...)
5365
5366	totalSize := 0
5367
5368	for key, value := range cfg.Data {
5369		for _, msg := range validation.IsConfigMapKey(key) {
5370			allErrs = append(allErrs, field.Invalid(field.NewPath("data").Key(key), key, msg))
5371		}
5372		// check if we have a duplicate key in the other bag
5373		if _, isValue := cfg.BinaryData[key]; isValue {
5374			msg := "duplicate of key present in binaryData"
5375			allErrs = append(allErrs, field.Invalid(field.NewPath("data").Key(key), key, msg))
5376		}
5377		totalSize += len(value)
5378	}
5379	for key, value := range cfg.BinaryData {
5380		for _, msg := range validation.IsConfigMapKey(key) {
5381			allErrs = append(allErrs, field.Invalid(field.NewPath("binaryData").Key(key), key, msg))
5382		}
5383		totalSize += len(value)
5384	}
5385	if totalSize > core.MaxSecretSize {
5386		// pass back "" to indicate that the error refers to the whole object.
5387		allErrs = append(allErrs, field.TooLong(field.NewPath(""), cfg, core.MaxSecretSize))
5388	}
5389
5390	return allErrs
5391}
5392
5393// ValidateConfigMapUpdate tests if required fields in the ConfigMap are set.
5394func ValidateConfigMapUpdate(newCfg, oldCfg *core.ConfigMap) field.ErrorList {
5395	allErrs := field.ErrorList{}
5396	allErrs = append(allErrs, ValidateObjectMetaUpdate(&newCfg.ObjectMeta, &oldCfg.ObjectMeta, field.NewPath("metadata"))...)
5397
5398	if oldCfg.Immutable != nil && *oldCfg.Immutable {
5399		if newCfg.Immutable == nil || !*newCfg.Immutable {
5400			allErrs = append(allErrs, field.Forbidden(field.NewPath("immutable"), "field is immutable when `immutable` is set"))
5401		}
5402		if !reflect.DeepEqual(newCfg.Data, oldCfg.Data) {
5403			allErrs = append(allErrs, field.Forbidden(field.NewPath("data"), "field is immutable when `immutable` is set"))
5404		}
5405		if !reflect.DeepEqual(newCfg.BinaryData, oldCfg.BinaryData) {
5406			allErrs = append(allErrs, field.Forbidden(field.NewPath("binaryData"), "field is immutable when `immutable` is set"))
5407		}
5408	}
5409
5410	allErrs = append(allErrs, ValidateConfigMap(newCfg)...)
5411	return allErrs
5412}
5413
5414func validateBasicResource(quantity resource.Quantity, fldPath *field.Path) field.ErrorList {
5415	if quantity.Value() < 0 {
5416		return field.ErrorList{field.Invalid(fldPath, quantity.Value(), "must be a valid resource quantity")}
5417	}
5418	return field.ErrorList{}
5419}
5420
5421// Validates resource requirement spec.
5422func ValidateResourceRequirements(requirements *core.ResourceRequirements, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
5423	allErrs := field.ErrorList{}
5424	limPath := fldPath.Child("limits")
5425	reqPath := fldPath.Child("requests")
5426	limContainsCPUOrMemory := false
5427	reqContainsCPUOrMemory := false
5428	limContainsHugePages := false
5429	reqContainsHugePages := false
5430	supportedQoSComputeResources := sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory))
5431	for resourceName, quantity := range requirements.Limits {
5432
5433		fldPath := limPath.Key(string(resourceName))
5434		// Validate resource name.
5435		allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...)
5436
5437		// Validate resource quantity.
5438		allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...)
5439
5440		if helper.IsHugePageResourceName(resourceName) {
5441			limContainsHugePages = true
5442			if err := validateResourceQuantityHugePageValue(resourceName, quantity, opts); err != nil {
5443				allErrs = append(allErrs, field.Invalid(fldPath, quantity.String(), err.Error()))
5444			}
5445		}
5446
5447		if supportedQoSComputeResources.Has(string(resourceName)) {
5448			limContainsCPUOrMemory = true
5449		}
5450	}
5451	for resourceName, quantity := range requirements.Requests {
5452		fldPath := reqPath.Key(string(resourceName))
5453		// Validate resource name.
5454		allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...)
5455		// Validate resource quantity.
5456		allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...)
5457
5458		// Check that request <= limit.
5459		limitQuantity, exists := requirements.Limits[resourceName]
5460		if exists {
5461			// For non overcommitable resources, not only requests can't exceed limits, they also can't be lower, i.e. must be equal.
5462			if quantity.Cmp(limitQuantity) != 0 && !helper.IsOvercommitAllowed(resourceName) {
5463				allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s limit", resourceName)))
5464			} else if quantity.Cmp(limitQuantity) > 0 {
5465				allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be less than or equal to %s limit", resourceName)))
5466			}
5467		} else if !helper.IsOvercommitAllowed(resourceName) {
5468			allErrs = append(allErrs, field.Required(limPath, "Limit must be set for non overcommitable resources"))
5469		}
5470		if helper.IsHugePageResourceName(resourceName) {
5471			reqContainsHugePages = true
5472			if err := validateResourceQuantityHugePageValue(resourceName, quantity, opts); err != nil {
5473				allErrs = append(allErrs, field.Invalid(fldPath, quantity.String(), err.Error()))
5474			}
5475		}
5476		if supportedQoSComputeResources.Has(string(resourceName)) {
5477			reqContainsCPUOrMemory = true
5478		}
5479
5480	}
5481	if !limContainsCPUOrMemory && !reqContainsCPUOrMemory && (reqContainsHugePages || limContainsHugePages) {
5482		allErrs = append(allErrs, field.Forbidden(fldPath, "HugePages require cpu or memory"))
5483	}
5484
5485	return allErrs
5486}
5487
5488func validateResourceQuantityHugePageValue(name core.ResourceName, quantity resource.Quantity, opts PodValidationOptions) error {
5489	if !helper.IsHugePageResourceName(name) {
5490		return nil
5491	}
5492
5493	if !opts.AllowIndivisibleHugePagesValues && !helper.IsHugePageResourceValueDivisible(name, quantity) {
5494		return fmt.Errorf("%s is not positive integer multiple of %s", quantity.String(), name)
5495	}
5496
5497	return nil
5498}
5499
5500// validateResourceQuotaScopes ensures that each enumerated hard resource constraint is valid for set of scopes
5501func validateResourceQuotaScopes(resourceQuotaSpec *core.ResourceQuotaSpec, opts ResourceQuotaValidationOptions, fld *field.Path) field.ErrorList {
5502	allErrs := field.ErrorList{}
5503	if len(resourceQuotaSpec.Scopes) == 0 {
5504		return allErrs
5505	}
5506	hardLimits := sets.NewString()
5507	for k := range resourceQuotaSpec.Hard {
5508		hardLimits.Insert(string(k))
5509	}
5510	fldPath := fld.Child("scopes")
5511	scopeSet := sets.NewString()
5512	for _, scope := range resourceQuotaSpec.Scopes {
5513		if !helper.IsStandardResourceQuotaScope(string(scope), opts.AllowPodAffinityNamespaceSelector) {
5514			allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "unsupported scope"))
5515		}
5516		for _, k := range hardLimits.List() {
5517			if helper.IsStandardQuotaResourceName(k) && !helper.IsResourceQuotaScopeValidForResource(scope, k) {
5518				allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "unsupported scope applied to resource"))
5519			}
5520		}
5521		scopeSet.Insert(string(scope))
5522	}
5523	invalidScopePairs := []sets.String{
5524		sets.NewString(string(core.ResourceQuotaScopeBestEffort), string(core.ResourceQuotaScopeNotBestEffort)),
5525		sets.NewString(string(core.ResourceQuotaScopeTerminating), string(core.ResourceQuotaScopeNotTerminating)),
5526	}
5527	for _, invalidScopePair := range invalidScopePairs {
5528		if scopeSet.HasAll(invalidScopePair.List()...) {
5529			allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "conflicting scopes"))
5530		}
5531	}
5532	return allErrs
5533}
5534
5535// validateScopedResourceSelectorRequirement tests that the match expressions has valid data
5536func validateScopedResourceSelectorRequirement(resourceQuotaSpec *core.ResourceQuotaSpec, opts ResourceQuotaValidationOptions, fld *field.Path) field.ErrorList {
5537	allErrs := field.ErrorList{}
5538	hardLimits := sets.NewString()
5539	for k := range resourceQuotaSpec.Hard {
5540		hardLimits.Insert(string(k))
5541	}
5542	fldPath := fld.Child("matchExpressions")
5543	scopeSet := sets.NewString()
5544	for _, req := range resourceQuotaSpec.ScopeSelector.MatchExpressions {
5545		if !helper.IsStandardResourceQuotaScope(string(req.ScopeName), opts.AllowPodAffinityNamespaceSelector) {
5546			allErrs = append(allErrs, field.Invalid(fldPath.Child("scopeName"), req.ScopeName, "unsupported scope"))
5547		}
5548		for _, k := range hardLimits.List() {
5549			if helper.IsStandardQuotaResourceName(k) && !helper.IsResourceQuotaScopeValidForResource(req.ScopeName, k) {
5550				allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.ScopeSelector, "unsupported scope applied to resource"))
5551			}
5552		}
5553		switch req.ScopeName {
5554		case core.ResourceQuotaScopeBestEffort, core.ResourceQuotaScopeNotBestEffort, core.ResourceQuotaScopeTerminating, core.ResourceQuotaScopeNotTerminating, core.ResourceQuotaScopeCrossNamespacePodAffinity:
5555			if req.Operator != core.ScopeSelectorOpExists {
5556				allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), req.Operator,
5557					"must be 'Exist' when scope is any of ResourceQuotaScopeTerminating, ResourceQuotaScopeNotTerminating, ResourceQuotaScopeBestEffort, ResourceQuotaScopeNotBestEffort or ResourceQuotaScopeCrossNamespacePodAffinity"))
5558			}
5559		}
5560
5561		switch req.Operator {
5562		case core.ScopeSelectorOpIn, core.ScopeSelectorOpNotIn:
5563			if len(req.Values) == 0 {
5564				allErrs = append(allErrs, field.Required(fldPath.Child("values"),
5565					"must be at least one value when `operator` is 'In' or 'NotIn' for scope selector"))
5566			}
5567		case core.ScopeSelectorOpExists, core.ScopeSelectorOpDoesNotExist:
5568			if len(req.Values) != 0 {
5569				allErrs = append(allErrs, field.Invalid(fldPath.Child("values"), req.Values,
5570					"must be no value when `operator` is 'Exist' or 'DoesNotExist' for scope selector"))
5571			}
5572		default:
5573			allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), req.Operator, "not a valid selector operator"))
5574		}
5575		scopeSet.Insert(string(req.ScopeName))
5576	}
5577	invalidScopePairs := []sets.String{
5578		sets.NewString(string(core.ResourceQuotaScopeBestEffort), string(core.ResourceQuotaScopeNotBestEffort)),
5579		sets.NewString(string(core.ResourceQuotaScopeTerminating), string(core.ResourceQuotaScopeNotTerminating)),
5580	}
5581	for _, invalidScopePair := range invalidScopePairs {
5582		if scopeSet.HasAll(invalidScopePair.List()...) {
5583			allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "conflicting scopes"))
5584		}
5585	}
5586
5587	return allErrs
5588}
5589
5590// validateScopeSelector tests that the specified scope selector has valid data
5591func validateScopeSelector(resourceQuotaSpec *core.ResourceQuotaSpec, opts ResourceQuotaValidationOptions, fld *field.Path) field.ErrorList {
5592	allErrs := field.ErrorList{}
5593	if resourceQuotaSpec.ScopeSelector == nil {
5594		return allErrs
5595	}
5596	allErrs = append(allErrs, validateScopedResourceSelectorRequirement(resourceQuotaSpec, opts, fld.Child("scopeSelector"))...)
5597	return allErrs
5598}
5599
5600// ResourceQuotaValidationOptions contains the different settings for ResourceQuota validation
5601type ResourceQuotaValidationOptions struct {
5602	// Allow pod-affinity namespace selector validation.
5603	AllowPodAffinityNamespaceSelector bool
5604}
5605
5606// ValidateResourceQuota tests if required fields in the ResourceQuota are set.
5607func ValidateResourceQuota(resourceQuota *core.ResourceQuota, opts ResourceQuotaValidationOptions) field.ErrorList {
5608	allErrs := ValidateObjectMeta(&resourceQuota.ObjectMeta, true, ValidateResourceQuotaName, field.NewPath("metadata"))
5609
5610	allErrs = append(allErrs, ValidateResourceQuotaSpec(&resourceQuota.Spec, opts, field.NewPath("spec"))...)
5611	allErrs = append(allErrs, ValidateResourceQuotaStatus(&resourceQuota.Status, field.NewPath("status"))...)
5612
5613	return allErrs
5614}
5615
5616func ValidateResourceQuotaStatus(status *core.ResourceQuotaStatus, fld *field.Path) field.ErrorList {
5617	allErrs := field.ErrorList{}
5618
5619	fldPath := fld.Child("hard")
5620	for k, v := range status.Hard {
5621		resPath := fldPath.Key(string(k))
5622		allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...)
5623		allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...)
5624	}
5625	fldPath = fld.Child("used")
5626	for k, v := range status.Used {
5627		resPath := fldPath.Key(string(k))
5628		allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...)
5629		allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...)
5630	}
5631
5632	return allErrs
5633}
5634
5635func ValidateResourceQuotaSpec(resourceQuotaSpec *core.ResourceQuotaSpec, opts ResourceQuotaValidationOptions, fld *field.Path) field.ErrorList {
5636	allErrs := field.ErrorList{}
5637
5638	fldPath := fld.Child("hard")
5639	for k, v := range resourceQuotaSpec.Hard {
5640		resPath := fldPath.Key(string(k))
5641		allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...)
5642		allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...)
5643	}
5644
5645	allErrs = append(allErrs, validateResourceQuotaScopes(resourceQuotaSpec, opts, fld)...)
5646	allErrs = append(allErrs, validateScopeSelector(resourceQuotaSpec, opts, fld)...)
5647
5648	return allErrs
5649}
5650
5651// ValidateResourceQuantityValue enforces that specified quantity is valid for specified resource
5652func ValidateResourceQuantityValue(resource string, value resource.Quantity, fldPath *field.Path) field.ErrorList {
5653	allErrs := field.ErrorList{}
5654	allErrs = append(allErrs, ValidateNonnegativeQuantity(value, fldPath)...)
5655	if helper.IsIntegerResourceName(resource) {
5656		if value.MilliValue()%int64(1000) != int64(0) {
5657			allErrs = append(allErrs, field.Invalid(fldPath, value, isNotIntegerErrorMsg))
5658		}
5659	}
5660	return allErrs
5661}
5662
5663// ValidateResourceQuotaUpdate tests to see if the update is legal for an end user to make.
5664func ValidateResourceQuotaUpdate(newResourceQuota, oldResourceQuota *core.ResourceQuota, opts ResourceQuotaValidationOptions) field.ErrorList {
5665	allErrs := ValidateObjectMetaUpdate(&newResourceQuota.ObjectMeta, &oldResourceQuota.ObjectMeta, field.NewPath("metadata"))
5666	allErrs = append(allErrs, ValidateResourceQuotaSpec(&newResourceQuota.Spec, opts, field.NewPath("spec"))...)
5667
5668	// ensure scopes cannot change, and that resources are still valid for scope
5669	fldPath := field.NewPath("spec", "scopes")
5670	oldScopes := sets.NewString()
5671	newScopes := sets.NewString()
5672	for _, scope := range newResourceQuota.Spec.Scopes {
5673		newScopes.Insert(string(scope))
5674	}
5675	for _, scope := range oldResourceQuota.Spec.Scopes {
5676		oldScopes.Insert(string(scope))
5677	}
5678	if !oldScopes.Equal(newScopes) {
5679		allErrs = append(allErrs, field.Invalid(fldPath, newResourceQuota.Spec.Scopes, fieldImmutableErrorMsg))
5680	}
5681
5682	return allErrs
5683}
5684
5685// ValidateResourceQuotaStatusUpdate tests to see if the status update is legal for an end user to make.
5686func ValidateResourceQuotaStatusUpdate(newResourceQuota, oldResourceQuota *core.ResourceQuota) field.ErrorList {
5687	allErrs := ValidateObjectMetaUpdate(&newResourceQuota.ObjectMeta, &oldResourceQuota.ObjectMeta, field.NewPath("metadata"))
5688	if len(newResourceQuota.ResourceVersion) == 0 {
5689		allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), ""))
5690	}
5691	fldPath := field.NewPath("status", "hard")
5692	for k, v := range newResourceQuota.Status.Hard {
5693		resPath := fldPath.Key(string(k))
5694		allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...)
5695		allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...)
5696	}
5697	fldPath = field.NewPath("status", "used")
5698	for k, v := range newResourceQuota.Status.Used {
5699		resPath := fldPath.Key(string(k))
5700		allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...)
5701		allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...)
5702	}
5703	return allErrs
5704}
5705
5706// ValidateNamespace tests if required fields are set.
5707func ValidateNamespace(namespace *core.Namespace) field.ErrorList {
5708	allErrs := ValidateObjectMeta(&namespace.ObjectMeta, false, ValidateNamespaceName, field.NewPath("metadata"))
5709	for i := range namespace.Spec.Finalizers {
5710		allErrs = append(allErrs, validateFinalizerName(string(namespace.Spec.Finalizers[i]), field.NewPath("spec", "finalizers"))...)
5711	}
5712	return allErrs
5713}
5714
5715// Validate finalizer names
5716func validateFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList {
5717	allErrs := apimachineryvalidation.ValidateFinalizerName(stringValue, fldPath)
5718	allErrs = append(allErrs, validateKubeFinalizerName(stringValue, fldPath)...)
5719	return allErrs
5720}
5721
5722// validateKubeFinalizerName checks for "standard" names of legacy finalizer
5723func validateKubeFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList {
5724	allErrs := field.ErrorList{}
5725	if len(strings.Split(stringValue, "/")) == 1 {
5726		if !helper.IsStandardFinalizerName(stringValue) {
5727			return append(allErrs, field.Invalid(fldPath, stringValue, "name is neither a standard finalizer name nor is it fully qualified"))
5728		}
5729	}
5730
5731	return allErrs
5732}
5733
5734// ValidateNamespaceUpdate tests to make sure a namespace update can be applied.
5735func ValidateNamespaceUpdate(newNamespace *core.Namespace, oldNamespace *core.Namespace) field.ErrorList {
5736	allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata"))
5737	return allErrs
5738}
5739
5740// ValidateNamespaceStatusUpdate tests to see if the update is legal for an end user to make.
5741func ValidateNamespaceStatusUpdate(newNamespace, oldNamespace *core.Namespace) field.ErrorList {
5742	allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata"))
5743	if newNamespace.DeletionTimestamp.IsZero() {
5744		if newNamespace.Status.Phase != core.NamespaceActive {
5745			allErrs = append(allErrs, field.Invalid(field.NewPath("status", "Phase"), newNamespace.Status.Phase, "may only be 'Active' if `deletionTimestamp` is empty"))
5746		}
5747	} else {
5748		if newNamespace.Status.Phase != core.NamespaceTerminating {
5749			allErrs = append(allErrs, field.Invalid(field.NewPath("status", "Phase"), newNamespace.Status.Phase, "may only be 'Terminating' if `deletionTimestamp` is not empty"))
5750		}
5751	}
5752	return allErrs
5753}
5754
5755// ValidateNamespaceFinalizeUpdate tests to see if the update is legal for an end user to make.
5756func ValidateNamespaceFinalizeUpdate(newNamespace, oldNamespace *core.Namespace) field.ErrorList {
5757	allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata"))
5758
5759	fldPath := field.NewPath("spec", "finalizers")
5760	for i := range newNamespace.Spec.Finalizers {
5761		idxPath := fldPath.Index(i)
5762		allErrs = append(allErrs, validateFinalizerName(string(newNamespace.Spec.Finalizers[i]), idxPath)...)
5763	}
5764	return allErrs
5765}
5766
5767// ValidateEndpoints validates Endpoints on create and update.
5768func ValidateEndpoints(endpoints *core.Endpoints) field.ErrorList {
5769	allErrs := ValidateObjectMeta(&endpoints.ObjectMeta, true, ValidateEndpointsName, field.NewPath("metadata"))
5770	allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(endpoints.Annotations, field.NewPath("annotations"))...)
5771	allErrs = append(allErrs, validateEndpointSubsets(endpoints.Subsets, field.NewPath("subsets"))...)
5772	return allErrs
5773}
5774
5775// ValidateEndpointsCreate validates Endpoints on create.
5776func ValidateEndpointsCreate(endpoints *core.Endpoints) field.ErrorList {
5777	return ValidateEndpoints(endpoints)
5778}
5779
5780// ValidateEndpointsUpdate validates Endpoints on update. NodeName changes are
5781// allowed during update to accommodate the case where nodeIP or PodCIDR is
5782// reused. An existing endpoint ip will have a different nodeName if this
5783// happens.
5784func ValidateEndpointsUpdate(newEndpoints, oldEndpoints *core.Endpoints) field.ErrorList {
5785	allErrs := ValidateObjectMetaUpdate(&newEndpoints.ObjectMeta, &oldEndpoints.ObjectMeta, field.NewPath("metadata"))
5786	allErrs = append(allErrs, ValidateEndpoints(newEndpoints)...)
5787	return allErrs
5788}
5789
5790func validateEndpointSubsets(subsets []core.EndpointSubset, fldPath *field.Path) field.ErrorList {
5791	allErrs := field.ErrorList{}
5792	for i := range subsets {
5793		ss := &subsets[i]
5794		idxPath := fldPath.Index(i)
5795
5796		// EndpointSubsets must include endpoint address. For headless service, we allow its endpoints not to have ports.
5797		if len(ss.Addresses) == 0 && len(ss.NotReadyAddresses) == 0 {
5798			//TODO: consider adding a RequiredOneOf() error for this and similar cases
5799			allErrs = append(allErrs, field.Required(idxPath, "must specify `addresses` or `notReadyAddresses`"))
5800		}
5801		for addr := range ss.Addresses {
5802			allErrs = append(allErrs, validateEndpointAddress(&ss.Addresses[addr], idxPath.Child("addresses").Index(addr))...)
5803		}
5804		for addr := range ss.NotReadyAddresses {
5805			allErrs = append(allErrs, validateEndpointAddress(&ss.NotReadyAddresses[addr], idxPath.Child("notReadyAddresses").Index(addr))...)
5806		}
5807		for port := range ss.Ports {
5808			allErrs = append(allErrs, validateEndpointPort(&ss.Ports[port], len(ss.Ports) > 1, idxPath.Child("ports").Index(port))...)
5809		}
5810	}
5811
5812	return allErrs
5813}
5814
5815func validateEndpointAddress(address *core.EndpointAddress, fldPath *field.Path) field.ErrorList {
5816	allErrs := field.ErrorList{}
5817	for _, msg := range validation.IsValidIP(address.IP) {
5818		allErrs = append(allErrs, field.Invalid(fldPath.Child("ip"), address.IP, msg))
5819	}
5820	if len(address.Hostname) > 0 {
5821		allErrs = append(allErrs, ValidateDNS1123Label(address.Hostname, fldPath.Child("hostname"))...)
5822	}
5823	// During endpoint update, verify that NodeName is a DNS subdomain and transition rules allow the update
5824	if address.NodeName != nil {
5825		for _, msg := range ValidateNodeName(*address.NodeName, false) {
5826			allErrs = append(allErrs, field.Invalid(fldPath.Child("nodeName"), *address.NodeName, msg))
5827		}
5828	}
5829	allErrs = append(allErrs, ValidateNonSpecialIP(address.IP, fldPath.Child("ip"))...)
5830	return allErrs
5831}
5832
5833// ValidateNonSpecialIP is used to validate Endpoints, EndpointSlices, and
5834// external IPs. Specifically, this disallows unspecified and loopback addresses
5835// are nonsensical and link-local addresses tend to be used for node-centric
5836// purposes (e.g. metadata service).
5837//
5838// IPv6 references
5839// - https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml
5840// - https://www.iana.org/assignments/ipv6-multicast-addresses/ipv6-multicast-addresses.xhtml
5841func ValidateNonSpecialIP(ipAddress string, fldPath *field.Path) field.ErrorList {
5842	allErrs := field.ErrorList{}
5843	ip := net.ParseIP(ipAddress)
5844	if ip == nil {
5845		allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "must be a valid IP address"))
5846		return allErrs
5847	}
5848	if ip.IsUnspecified() {
5849		allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, fmt.Sprintf("may not be unspecified (%v)", ipAddress)))
5850	}
5851	if ip.IsLoopback() {
5852		allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the loopback range (127.0.0.0/8, ::1/128)"))
5853	}
5854	if ip.IsLinkLocalUnicast() {
5855		allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the link-local range (169.254.0.0/16, fe80::/10)"))
5856	}
5857	if ip.IsLinkLocalMulticast() {
5858		allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the link-local multicast range (224.0.0.0/24, ff02::/10)"))
5859	}
5860	return allErrs
5861}
5862
5863func validateEndpointPort(port *core.EndpointPort, requireName bool, fldPath *field.Path) field.ErrorList {
5864	allErrs := field.ErrorList{}
5865	if requireName && len(port.Name) == 0 {
5866		allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
5867	} else if len(port.Name) != 0 {
5868		allErrs = append(allErrs, ValidateDNS1123Label(port.Name, fldPath.Child("name"))...)
5869	}
5870	for _, msg := range validation.IsValidPortNum(int(port.Port)) {
5871		allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), port.Port, msg))
5872	}
5873	if len(port.Protocol) == 0 {
5874		allErrs = append(allErrs, field.Required(fldPath.Child("protocol"), ""))
5875	} else if !supportedPortProtocols.Has(string(port.Protocol)) {
5876		allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), port.Protocol, supportedPortProtocols.List()))
5877	}
5878	if port.AppProtocol != nil {
5879		for _, msg := range validation.IsQualifiedName(*port.AppProtocol) {
5880			allErrs = append(allErrs, field.Invalid(fldPath.Child("appProtocol"), port.AppProtocol, msg))
5881		}
5882	}
5883	return allErrs
5884}
5885
5886// ValidateSecurityContext ensures the security context contains valid settings
5887func ValidateSecurityContext(sc *core.SecurityContext, fldPath *field.Path) field.ErrorList {
5888	allErrs := field.ErrorList{}
5889	//this should only be true for testing since SecurityContext is defaulted by the core
5890	if sc == nil {
5891		return allErrs
5892	}
5893
5894	if sc.Privileged != nil {
5895		if *sc.Privileged && !capabilities.Get().AllowPrivileged {
5896			allErrs = append(allErrs, field.Forbidden(fldPath.Child("privileged"), "disallowed by cluster policy"))
5897		}
5898	}
5899
5900	if sc.RunAsUser != nil {
5901		for _, msg := range validation.IsValidUserID(*sc.RunAsUser) {
5902			allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *sc.RunAsUser, msg))
5903		}
5904	}
5905
5906	if sc.RunAsGroup != nil {
5907		for _, msg := range validation.IsValidGroupID(*sc.RunAsGroup) {
5908			allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsGroup"), *sc.RunAsGroup, msg))
5909		}
5910	}
5911
5912	if sc.ProcMount != nil {
5913		if err := ValidateProcMountType(fldPath.Child("procMount"), *sc.ProcMount); err != nil {
5914			allErrs = append(allErrs, err)
5915		}
5916
5917	}
5918	allErrs = append(allErrs, validateSeccompProfileField(sc.SeccompProfile, fldPath.Child("seccompProfile"))...)
5919	if sc.AllowPrivilegeEscalation != nil && !*sc.AllowPrivilegeEscalation {
5920		if sc.Privileged != nil && *sc.Privileged {
5921			allErrs = append(allErrs, field.Invalid(fldPath, sc, "cannot set `allowPrivilegeEscalation` to false and `privileged` to true"))
5922		}
5923
5924		if sc.Capabilities != nil {
5925			for _, cap := range sc.Capabilities.Add {
5926				if string(cap) == "CAP_SYS_ADMIN" {
5927					allErrs = append(allErrs, field.Invalid(fldPath, sc, "cannot set `allowPrivilegeEscalation` to false and `capabilities.Add` CAP_SYS_ADMIN"))
5928				}
5929			}
5930		}
5931	}
5932
5933	allErrs = append(allErrs, validateWindowsSecurityContextOptions(sc.WindowsOptions, fldPath.Child("windowsOptions"))...)
5934
5935	return allErrs
5936}
5937
5938// maxGMSACredentialSpecLength is the max length, in bytes, for the actual contents
5939// of a GMSA cred spec. In general, those shouldn't be more than a few hundred bytes,
5940// so we want to give plenty of room here while still providing an upper bound.
5941// The runAsUserName field will be used to execute the given container's entrypoint, and
5942// it can be formatted as "DOMAIN/USER", where the DOMAIN is optional, maxRunAsUserNameDomainLength
5943// is the max character length for the user's DOMAIN, and maxRunAsUserNameUserLength
5944// is the max character length for the USER itself. Both the DOMAIN and USER have their
5945// own restrictions, and more information about them can be found here:
5946// https://support.microsoft.com/en-us/help/909264/naming-conventions-in-active-directory-for-computers-domains-sites-and
5947// https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-2000-server/bb726984(v=technet.10)
5948const (
5949	maxGMSACredentialSpecLengthInKiB = 64
5950	maxGMSACredentialSpecLength      = maxGMSACredentialSpecLengthInKiB * 1024
5951	maxRunAsUserNameDomainLength     = 256
5952	maxRunAsUserNameUserLength       = 104
5953)
5954
5955var (
5956	// control characters are not permitted in the runAsUserName field.
5957	ctrlRegex = regexp.MustCompile(`[[:cntrl:]]+`)
5958
5959	// a valid NetBios Domain name cannot start with a dot, has at least 1 character,
5960	// at most 15 characters, and it cannot the characters: \ / : * ? " < > |
5961	validNetBiosRegex = regexp.MustCompile(`^[^\\/:\*\?"<>|\.][^\\/:\*\?"<>|]{0,14}$`)
5962
5963	// a valid DNS name contains only alphanumeric characters, dots, and dashes.
5964	dnsLabelFormat                 = `[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?`
5965	dnsSubdomainFormat             = fmt.Sprintf(`^%s(?:\.%s)*$`, dnsLabelFormat, dnsLabelFormat)
5966	validWindowsUserDomainDNSRegex = regexp.MustCompile(dnsSubdomainFormat)
5967
5968	// a username is invalid if it contains the characters: " / \ [ ] : ; | = , + * ? < > @
5969	// or it contains only dots or spaces.
5970	invalidUserNameCharsRegex      = regexp.MustCompile(`["/\\:;|=,\+\*\?<>@\[\]]`)
5971	invalidUserNameDotsSpacesRegex = regexp.MustCompile(`^[\. ]+$`)
5972)
5973
5974func validateWindowsSecurityContextOptions(windowsOptions *core.WindowsSecurityContextOptions, fieldPath *field.Path) field.ErrorList {
5975	allErrs := field.ErrorList{}
5976
5977	if windowsOptions == nil {
5978		return allErrs
5979	}
5980
5981	if windowsOptions.GMSACredentialSpecName != nil {
5982		// gmsaCredentialSpecName must be the name of a custom resource
5983		for _, msg := range validation.IsDNS1123Subdomain(*windowsOptions.GMSACredentialSpecName) {
5984			allErrs = append(allErrs, field.Invalid(fieldPath.Child("gmsaCredentialSpecName"), windowsOptions.GMSACredentialSpecName, msg))
5985		}
5986	}
5987
5988	if windowsOptions.GMSACredentialSpec != nil {
5989		if l := len(*windowsOptions.GMSACredentialSpec); l == 0 {
5990			allErrs = append(allErrs, field.Invalid(fieldPath.Child("gmsaCredentialSpec"), windowsOptions.GMSACredentialSpec, "gmsaCredentialSpec cannot be an empty string"))
5991		} else if l > maxGMSACredentialSpecLength {
5992			errMsg := fmt.Sprintf("gmsaCredentialSpec size must be under %d KiB", maxGMSACredentialSpecLengthInKiB)
5993			allErrs = append(allErrs, field.Invalid(fieldPath.Child("gmsaCredentialSpec"), windowsOptions.GMSACredentialSpec, errMsg))
5994		}
5995	}
5996
5997	if windowsOptions.RunAsUserName != nil {
5998		if l := len(*windowsOptions.RunAsUserName); l == 0 {
5999			allErrs = append(allErrs, field.Invalid(fieldPath.Child("runAsUserName"), windowsOptions.RunAsUserName, "runAsUserName cannot be an empty string"))
6000		} else if ctrlRegex.MatchString(*windowsOptions.RunAsUserName) {
6001			errMsg := "runAsUserName cannot contain control characters"
6002			allErrs = append(allErrs, field.Invalid(fieldPath.Child("runAsUserName"), windowsOptions.RunAsUserName, errMsg))
6003		} else if parts := strings.Split(*windowsOptions.RunAsUserName, "\\"); len(parts) > 2 {
6004			errMsg := "runAsUserName cannot contain more than one backslash"
6005			allErrs = append(allErrs, field.Invalid(fieldPath.Child("runAsUserName"), windowsOptions.RunAsUserName, errMsg))
6006		} else {
6007			var (
6008				hasDomain = false
6009				domain    = ""
6010				user      string
6011			)
6012			if len(parts) == 1 {
6013				user = parts[0]
6014			} else {
6015				hasDomain = true
6016				domain = parts[0]
6017				user = parts[1]
6018			}
6019
6020			if len(domain) >= maxRunAsUserNameDomainLength {
6021				errMsg := fmt.Sprintf("runAsUserName's Domain length must be under %d characters", maxRunAsUserNameDomainLength)
6022				allErrs = append(allErrs, field.Invalid(fieldPath.Child("runAsUserName"), windowsOptions.RunAsUserName, errMsg))
6023			}
6024
6025			if hasDomain && !(validNetBiosRegex.MatchString(domain) || validWindowsUserDomainDNSRegex.MatchString(domain)) {
6026				errMsg := "runAsUserName's Domain doesn't match the NetBios nor the DNS format"
6027				allErrs = append(allErrs, field.Invalid(fieldPath.Child("runAsUserName"), windowsOptions.RunAsUserName, errMsg))
6028			}
6029
6030			if l := len(user); l == 0 {
6031				errMsg := "runAsUserName's User cannot be empty"
6032				allErrs = append(allErrs, field.Invalid(fieldPath.Child("runAsUserName"), windowsOptions.RunAsUserName, errMsg))
6033			} else if l > maxRunAsUserNameUserLength {
6034				errMsg := fmt.Sprintf("runAsUserName's User length must not be longer than %d characters", maxRunAsUserNameUserLength)
6035				allErrs = append(allErrs, field.Invalid(fieldPath.Child("runAsUserName"), windowsOptions.RunAsUserName, errMsg))
6036			}
6037
6038			if invalidUserNameDotsSpacesRegex.MatchString(user) {
6039				errMsg := `runAsUserName's User cannot contain only periods or spaces`
6040				allErrs = append(allErrs, field.Invalid(fieldPath.Child("runAsUserName"), windowsOptions.RunAsUserName, errMsg))
6041			}
6042
6043			if invalidUserNameCharsRegex.MatchString(user) {
6044				errMsg := `runAsUserName's User cannot contain the following characters: "/\:;|=,+*?<>@[]`
6045				allErrs = append(allErrs, field.Invalid(fieldPath.Child("runAsUserName"), windowsOptions.RunAsUserName, errMsg))
6046			}
6047		}
6048	}
6049
6050	return allErrs
6051}
6052
6053func validateWindowsHostProcessPod(podSpec *core.PodSpec, fieldPath *field.Path, opts PodValidationOptions) field.ErrorList {
6054	allErrs := field.ErrorList{}
6055
6056	// Keep track of container and hostProcess container count for validate
6057	containerCount := 0
6058	hostProcessContainerCount := 0
6059
6060	var podHostProcess *bool
6061	if podSpec.SecurityContext != nil && podSpec.SecurityContext.WindowsOptions != nil {
6062		podHostProcess = podSpec.SecurityContext.WindowsOptions.HostProcess
6063	}
6064
6065	if !opts.AllowWindowsHostProcessField && podHostProcess != nil {
6066		// Do not allow pods to persist data that sets hostProcess (true or false)
6067		errMsg := "not allowed when feature gate 'WindowsHostProcessContainers' is not enabled"
6068		allErrs = append(allErrs, field.Forbidden(fieldPath.Child("securityContext", "windowsOptions", "hostProcess"), errMsg))
6069		return allErrs
6070	}
6071
6072	hostNetwork := false
6073	if podSpec.SecurityContext != nil {
6074		hostNetwork = podSpec.SecurityContext.HostNetwork
6075	}
6076
6077	podshelper.VisitContainersWithPath(podSpec, fieldPath, func(c *core.Container, cFieldPath *field.Path) bool {
6078		containerCount++
6079
6080		var containerHostProcess *bool = nil
6081		if c.SecurityContext != nil && c.SecurityContext.WindowsOptions != nil {
6082			containerHostProcess = c.SecurityContext.WindowsOptions.HostProcess
6083		}
6084
6085		if !opts.AllowWindowsHostProcessField && containerHostProcess != nil {
6086			// Do not allow pods to persist data that sets hostProcess (true or false)
6087			errMsg := "not allowed when feature gate 'WindowsHostProcessContainers' is not enabled"
6088			allErrs = append(allErrs, field.Forbidden(cFieldPath.Child("securityContext", "windowsOptions", "hostProcess"), errMsg))
6089		}
6090
6091		if podHostProcess != nil && containerHostProcess != nil && *podHostProcess != *containerHostProcess {
6092			errMsg := fmt.Sprintf("pod hostProcess value must be identical if both are specified, was %v", *podHostProcess)
6093			allErrs = append(allErrs, field.Invalid(cFieldPath.Child("securityContext", "windowsOptions", "hostProcess"), *containerHostProcess, errMsg))
6094		}
6095
6096		switch {
6097		case containerHostProcess != nil && *containerHostProcess:
6098			// Container explitly sets hostProcess=true
6099			hostProcessContainerCount++
6100		case containerHostProcess == nil && podHostProcess != nil && *podHostProcess:
6101			// Container inherits hostProcess=true from pod settings
6102			hostProcessContainerCount++
6103		}
6104
6105		return true
6106	})
6107
6108	if hostProcessContainerCount > 0 {
6109		// Fail Pod validation if feature is not enabled (unless podspec already exists and contains HostProcess fields) instead of dropping fields based on PRR reivew.
6110		if !opts.AllowWindowsHostProcessField {
6111			errMsg := "pod must not contain Windows hostProcess containers when feature gate 'WindowsHostProcessContainers' is not enabled"
6112			allErrs = append(allErrs, field.Forbidden(fieldPath, errMsg))
6113			return allErrs
6114		}
6115
6116		// At present, if a Windows Pods contains any HostProcess containers than all containers must be
6117		// HostProcess containers (explicitly set or inherited).
6118		if hostProcessContainerCount != containerCount {
6119			errMsg := "If pod contains any hostProcess containers then all containers must be HostProcess containers"
6120			allErrs = append(allErrs, field.Invalid(fieldPath, "", errMsg))
6121		}
6122
6123		// At present Windows Pods which contain HostProcess containers must also set HostNetwork.
6124		if hostNetwork != true {
6125			errMsg := "hostNetwork must be true if pod contains any hostProcess containers"
6126			allErrs = append(allErrs, field.Invalid(fieldPath.Child("hostNetwork"), hostNetwork, errMsg))
6127		}
6128
6129		if !capabilities.Get().AllowPrivileged {
6130			errMsg := "hostProcess containers are disallowed by cluster policy"
6131			allErrs = append(allErrs, field.Forbidden(fieldPath, errMsg))
6132		}
6133	}
6134
6135	return allErrs
6136}
6137
6138func ValidatePodLogOptions(opts *core.PodLogOptions) field.ErrorList {
6139	allErrs := field.ErrorList{}
6140	if opts.TailLines != nil && *opts.TailLines < 0 {
6141		allErrs = append(allErrs, field.Invalid(field.NewPath("tailLines"), *opts.TailLines, isNegativeErrorMsg))
6142	}
6143	if opts.LimitBytes != nil && *opts.LimitBytes < 1 {
6144		allErrs = append(allErrs, field.Invalid(field.NewPath("limitBytes"), *opts.LimitBytes, "must be greater than 0"))
6145	}
6146	switch {
6147	case opts.SinceSeconds != nil && opts.SinceTime != nil:
6148		allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "at most one of `sinceTime` or `sinceSeconds` may be specified"))
6149	case opts.SinceSeconds != nil:
6150		if *opts.SinceSeconds < 1 {
6151			allErrs = append(allErrs, field.Invalid(field.NewPath("sinceSeconds"), *opts.SinceSeconds, "must be greater than 0"))
6152		}
6153	}
6154	return allErrs
6155}
6156
6157// ValidateLoadBalancerStatus validates required fields on a LoadBalancerStatus
6158func ValidateLoadBalancerStatus(status *core.LoadBalancerStatus, fldPath *field.Path) field.ErrorList {
6159	allErrs := field.ErrorList{}
6160	for i, ingress := range status.Ingress {
6161		idxPath := fldPath.Child("ingress").Index(i)
6162		if len(ingress.IP) > 0 {
6163			if isIP := (net.ParseIP(ingress.IP) != nil); !isIP {
6164				allErrs = append(allErrs, field.Invalid(idxPath.Child("ip"), ingress.IP, "must be a valid IP address"))
6165			}
6166		}
6167		if len(ingress.Hostname) > 0 {
6168			for _, msg := range validation.IsDNS1123Subdomain(ingress.Hostname) {
6169				allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, msg))
6170			}
6171			if isIP := (net.ParseIP(ingress.Hostname) != nil); isIP {
6172				allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, "must be a DNS name, not an IP address"))
6173			}
6174		}
6175	}
6176	return allErrs
6177}
6178
6179// validateVolumeNodeAffinity tests that the PersistentVolume.NodeAffinity has valid data
6180// returns:
6181// - true if volumeNodeAffinity is set
6182// - errorList if there are validation errors
6183func validateVolumeNodeAffinity(nodeAffinity *core.VolumeNodeAffinity, fldPath *field.Path) (bool, field.ErrorList) {
6184	allErrs := field.ErrorList{}
6185
6186	if nodeAffinity == nil {
6187		return false, allErrs
6188	}
6189
6190	if nodeAffinity.Required != nil {
6191		allErrs = append(allErrs, ValidateNodeSelector(nodeAffinity.Required, fldPath.Child("required"))...)
6192	} else {
6193		allErrs = append(allErrs, field.Required(fldPath.Child("required"), "must specify required node constraints"))
6194	}
6195
6196	return true, allErrs
6197}
6198
6199// ValidateCIDR validates whether a CIDR matches the conventions expected by net.ParseCIDR
6200func ValidateCIDR(cidr string) (*net.IPNet, error) {
6201	_, net, err := net.ParseCIDR(cidr)
6202	if err != nil {
6203		return nil, err
6204	}
6205	return net, nil
6206}
6207
6208func IsDecremented(update, old *int32) bool {
6209	if update == nil && old != nil {
6210		return true
6211	}
6212	if update == nil || old == nil {
6213		return false
6214	}
6215	return *update < *old
6216}
6217
6218// ValidateProcMountType tests that the argument is a valid ProcMountType.
6219func ValidateProcMountType(fldPath *field.Path, procMountType core.ProcMountType) *field.Error {
6220	switch procMountType {
6221	case core.DefaultProcMount, core.UnmaskedProcMount:
6222		return nil
6223	default:
6224		return field.NotSupported(fldPath, procMountType, []string{string(core.DefaultProcMount), string(core.UnmaskedProcMount)})
6225	}
6226}
6227
6228var (
6229	supportedScheduleActions = sets.NewString(string(core.DoNotSchedule), string(core.ScheduleAnyway))
6230)
6231
6232// validateTopologySpreadConstraints validates given TopologySpreadConstraints.
6233func validateTopologySpreadConstraints(constraints []core.TopologySpreadConstraint, fldPath *field.Path) field.ErrorList {
6234	allErrs := field.ErrorList{}
6235
6236	for i, constraint := range constraints {
6237		subFldPath := fldPath.Index(i)
6238		if err := ValidateMaxSkew(subFldPath.Child("maxSkew"), constraint.MaxSkew); err != nil {
6239			allErrs = append(allErrs, err)
6240		}
6241		if err := ValidateTopologyKey(subFldPath.Child("topologyKey"), constraint.TopologyKey); err != nil {
6242			allErrs = append(allErrs, err)
6243		}
6244		if err := ValidateWhenUnsatisfiable(subFldPath.Child("whenUnsatisfiable"), constraint.WhenUnsatisfiable); err != nil {
6245			allErrs = append(allErrs, err)
6246		}
6247		// tuple {topologyKey, whenUnsatisfiable} denotes one kind of spread constraint
6248		if err := ValidateSpreadConstraintNotRepeat(subFldPath.Child("{topologyKey, whenUnsatisfiable}"), constraint, constraints[i+1:]); err != nil {
6249			allErrs = append(allErrs, err)
6250		}
6251	}
6252
6253	return allErrs
6254}
6255
6256// ValidateMaxSkew tests that the argument is a valid MaxSkew.
6257func ValidateMaxSkew(fldPath *field.Path, maxSkew int32) *field.Error {
6258	if maxSkew <= 0 {
6259		return field.Invalid(fldPath, maxSkew, isNotPositiveErrorMsg)
6260	}
6261	return nil
6262}
6263
6264// ValidateTopologyKey tests that the argument is a valid TopologyKey.
6265func ValidateTopologyKey(fldPath *field.Path, topologyKey string) *field.Error {
6266	if len(topologyKey) == 0 {
6267		return field.Required(fldPath, "can not be empty")
6268	}
6269	return nil
6270}
6271
6272// ValidateWhenUnsatisfiable tests that the argument is a valid UnsatisfiableConstraintAction.
6273func ValidateWhenUnsatisfiable(fldPath *field.Path, action core.UnsatisfiableConstraintAction) *field.Error {
6274	if !supportedScheduleActions.Has(string(action)) {
6275		return field.NotSupported(fldPath, action, supportedScheduleActions.List())
6276	}
6277	return nil
6278}
6279
6280// ValidateSpreadConstraintNotRepeat tests that if `constraint` duplicates with `existingConstraintPairs`
6281// on TopologyKey and WhenUnsatisfiable fields.
6282func ValidateSpreadConstraintNotRepeat(fldPath *field.Path, constraint core.TopologySpreadConstraint, restingConstraints []core.TopologySpreadConstraint) *field.Error {
6283	for _, restingConstraint := range restingConstraints {
6284		if constraint.TopologyKey == restingConstraint.TopologyKey &&
6285			constraint.WhenUnsatisfiable == restingConstraint.WhenUnsatisfiable {
6286			return field.Duplicate(fldPath, fmt.Sprintf("{%v, %v}", constraint.TopologyKey, constraint.WhenUnsatisfiable))
6287		}
6288	}
6289	return nil
6290}
6291
6292// validateServiceClusterIPsRelatedFields validates .spec.ClusterIPs,, .spec.IPFamilies, .spec.ipFamilyPolicy
6293func validateServiceClusterIPsRelatedFields(service *core.Service) field.ErrorList {
6294	// ClusterIP, ClusterIPs, IPFamilyPolicy and IPFamilies are validated prior (all must be unset) for ExternalName service
6295	if service.Spec.Type == core.ServiceTypeExternalName {
6296		return field.ErrorList{}
6297	}
6298
6299	allErrs := field.ErrorList{}
6300	hasInvalidIPs := false
6301
6302	specPath := field.NewPath("spec")
6303	clusterIPsField := specPath.Child("clusterIPs")
6304	ipFamiliesField := specPath.Child("ipFamilies")
6305	ipFamilyPolicyField := specPath.Child("ipFamilyPolicy")
6306
6307	// Make sure ClusterIP and ClusterIPs are synced.  For most cases users can
6308	// just manage one or the other and we'll handle the rest (see PrepareFor*
6309	// in strategy).
6310	if len(service.Spec.ClusterIP) != 0 {
6311		// If ClusterIP is set, ClusterIPs[0] must match.
6312		if len(service.Spec.ClusterIPs) == 0 {
6313			allErrs = append(allErrs, field.Required(clusterIPsField, ""))
6314		} else if service.Spec.ClusterIPs[0] != service.Spec.ClusterIP {
6315			allErrs = append(allErrs, field.Invalid(clusterIPsField, service.Spec.ClusterIPs, "element [0] must match clusterIP"))
6316		}
6317	} else { // ClusterIP == ""
6318		// If ClusterIP is not set, ClusterIPs must also be unset.
6319		if len(service.Spec.ClusterIPs) != 0 {
6320			allErrs = append(allErrs, field.Invalid(clusterIPsField, service.Spec.ClusterIPs, "must be empty when clusterIP is empty"))
6321		}
6322	}
6323
6324	// ipfamilies stand alone validation
6325	// must be either IPv4 or IPv6
6326	seen := sets.String{}
6327	for i, ipFamily := range service.Spec.IPFamilies {
6328		if !supportedServiceIPFamily.Has(string(ipFamily)) {
6329			allErrs = append(allErrs, field.NotSupported(ipFamiliesField.Index(i), ipFamily, supportedServiceIPFamily.List()))
6330		}
6331		// no duplicate check also ensures that ipfamilies is dualstacked, in any order
6332		if seen.Has(string(ipFamily)) {
6333			allErrs = append(allErrs, field.Duplicate(ipFamiliesField.Index(i), ipFamily))
6334		}
6335		seen.Insert(string(ipFamily))
6336	}
6337
6338	// IPFamilyPolicy stand alone validation
6339	//note: nil is ok, defaulted in alloc check registry/core/service/*
6340	if service.Spec.IPFamilyPolicy != nil {
6341		// must have a supported value
6342		if !supportedServiceIPFamilyPolicy.Has(string(*(service.Spec.IPFamilyPolicy))) {
6343			allErrs = append(allErrs, field.NotSupported(ipFamilyPolicyField, service.Spec.IPFamilyPolicy, supportedServiceIPFamilyPolicy.List()))
6344		}
6345	}
6346
6347	// clusterIPs stand alone validation
6348	// valid ips with None and empty string handling
6349	// duplication check is done as part of DualStackvalidation below
6350	for i, clusterIP := range service.Spec.ClusterIPs {
6351		// valid at first location only. if and only if len(clusterIPs) == 1
6352		if i == 0 && clusterIP == core.ClusterIPNone {
6353			if len(service.Spec.ClusterIPs) > 1 {
6354				hasInvalidIPs = true
6355				allErrs = append(allErrs, field.Invalid(clusterIPsField, service.Spec.ClusterIPs, "'None' must be the first and only value"))
6356			}
6357			continue
6358		}
6359
6360		// is it valid ip?
6361		errorMessages := validation.IsValidIP(clusterIP)
6362		hasInvalidIPs = (len(errorMessages) != 0) || hasInvalidIPs
6363		for _, msg := range errorMessages {
6364			allErrs = append(allErrs, field.Invalid(clusterIPsField.Index(i), clusterIP, msg))
6365		}
6366	}
6367
6368	// max two
6369	if len(service.Spec.ClusterIPs) > 2 {
6370		allErrs = append(allErrs, field.Invalid(clusterIPsField, service.Spec.ClusterIPs, "may only hold up to 2 values"))
6371	}
6372
6373	// at this stage if there is an invalid ip or misplaced none/empty string
6374	// it will skew the error messages (bad index || dualstackness of already bad ips). so we
6375	// stop here if there are errors in clusterIPs validation
6376	if hasInvalidIPs {
6377		return allErrs
6378	}
6379
6380	// must be dual stacked ips if they are more than one ip
6381	if len(service.Spec.ClusterIPs) > 1 /* meaning: it does not have a None or empty string */ {
6382		dualStack, err := netutils.IsDualStackIPStrings(service.Spec.ClusterIPs)
6383		if err != nil { // though we check for that earlier. safe > sorry
6384			allErrs = append(allErrs, field.InternalError(clusterIPsField, fmt.Errorf("failed to check for dual stack with error:%v", err)))
6385		}
6386
6387		// We only support one from each IP family (i.e. max two IPs in this list).
6388		if !dualStack {
6389			allErrs = append(allErrs, field.Invalid(clusterIPsField, service.Spec.ClusterIPs, "may specify no more than one IP for each IP family"))
6390		}
6391	}
6392
6393	// match clusterIPs to their families, if they were provided
6394	if !isHeadlessService(service) && len(service.Spec.ClusterIPs) > 0 && len(service.Spec.IPFamilies) > 0 {
6395		for i, ip := range service.Spec.ClusterIPs {
6396			if i > (len(service.Spec.IPFamilies) - 1) {
6397				break // no more families to check
6398			}
6399
6400			// 4=>6
6401			if service.Spec.IPFamilies[i] == core.IPv4Protocol && netutils.IsIPv6String(ip) {
6402				allErrs = append(allErrs, field.Invalid(clusterIPsField.Index(i), ip, fmt.Sprintf("expected an IPv4 value as indicated by `ipFamilies[%v]`", i)))
6403			}
6404			// 6=>4
6405			if service.Spec.IPFamilies[i] == core.IPv6Protocol && !netutils.IsIPv6String(ip) {
6406				allErrs = append(allErrs, field.Invalid(clusterIPsField.Index(i), ip, fmt.Sprintf("expected an IPv6 value as indicated by `ipFamilies[%v]`", i)))
6407			}
6408		}
6409	}
6410
6411	return allErrs
6412}
6413
6414// specific validation for clusterIPs in cases of user upgrading or downgrading to/from dualstack
6415func validateUpgradeDowngradeClusterIPs(oldService, service *core.Service) field.ErrorList {
6416	allErrs := make(field.ErrorList, 0)
6417
6418	// bail out early for ExternalName
6419	if service.Spec.Type == core.ServiceTypeExternalName || oldService.Spec.Type == core.ServiceTypeExternalName {
6420		return allErrs
6421	}
6422	newIsHeadless := isHeadlessService(service)
6423	oldIsHeadless := isHeadlessService(oldService)
6424
6425	if oldIsHeadless && newIsHeadless {
6426		return allErrs
6427	}
6428
6429	switch {
6430	// no change in ClusterIP lengths
6431	// compare each
6432	case len(oldService.Spec.ClusterIPs) == len(service.Spec.ClusterIPs):
6433		for i, ip := range oldService.Spec.ClusterIPs {
6434			if ip != service.Spec.ClusterIPs[i] {
6435				allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "clusterIPs").Index(i), service.Spec.ClusterIPs, "may not change once set"))
6436			}
6437		}
6438
6439	// something has been released (downgraded)
6440	case len(oldService.Spec.ClusterIPs) > len(service.Spec.ClusterIPs):
6441		// primary ClusterIP has been released
6442		if len(service.Spec.ClusterIPs) == 0 {
6443			allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "clusterIPs").Index(0), service.Spec.ClusterIPs, "primary clusterIP can not be unset"))
6444		}
6445
6446		// test if primary clusterIP has changed
6447		if len(oldService.Spec.ClusterIPs) > 0 &&
6448			len(service.Spec.ClusterIPs) > 0 &&
6449			service.Spec.ClusterIPs[0] != oldService.Spec.ClusterIPs[0] {
6450			allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "clusterIPs").Index(0), service.Spec.ClusterIPs, "may not change once set"))
6451		}
6452
6453		// test if secondary ClusterIP has been released. has this service been downgraded correctly?
6454		// user *must* set IPFamilyPolicy == SingleStack
6455		if len(service.Spec.ClusterIPs) == 1 {
6456			if service.Spec.IPFamilyPolicy == nil || *(service.Spec.IPFamilyPolicy) != core.IPFamilyPolicySingleStack {
6457				allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "clusterIPs").Index(0), service.Spec.ClusterIPs, "`ipFamilyPolicy` must be set to 'SingleStack' when releasing the secondary clusterIP"))
6458			}
6459		}
6460	case len(oldService.Spec.ClusterIPs) < len(service.Spec.ClusterIPs):
6461		// something has been added (upgraded)
6462		// test if primary clusterIP has changed
6463		if len(oldService.Spec.ClusterIPs) > 0 &&
6464			service.Spec.ClusterIPs[0] != oldService.Spec.ClusterIPs[0] {
6465			allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "clusterIPs").Index(0), service.Spec.ClusterIPs, "may not change once set"))
6466		}
6467		// we don't check for Policy == RequireDualStack here since, Validation/Creation func takes care of it
6468	}
6469	return allErrs
6470}
6471
6472// specific validation for ipFamilies in cases of user upgrading or downgrading to/from dualstack
6473func validateUpgradeDowngradeIPFamilies(oldService, service *core.Service) field.ErrorList {
6474	allErrs := make(field.ErrorList, 0)
6475	// bail out early for ExternalName
6476	if service.Spec.Type == core.ServiceTypeExternalName || oldService.Spec.Type == core.ServiceTypeExternalName {
6477		return allErrs
6478	}
6479
6480	oldIsHeadless := isHeadlessService(oldService)
6481	newIsHeadless := isHeadlessService(service)
6482
6483	// if changed to/from headless, then bail out
6484	if newIsHeadless != oldIsHeadless {
6485		return allErrs
6486	}
6487	// headless can change families
6488	if newIsHeadless {
6489		return allErrs
6490	}
6491
6492	switch {
6493	case len(oldService.Spec.IPFamilies) == len(service.Spec.IPFamilies):
6494		// no change in ClusterIP lengths
6495		// compare each
6496
6497		for i, ip := range oldService.Spec.IPFamilies {
6498			if ip != service.Spec.IPFamilies[i] {
6499				allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ipFamilies").Index(0), service.Spec.IPFamilies, "may not change once set"))
6500			}
6501		}
6502
6503	case len(oldService.Spec.IPFamilies) > len(service.Spec.IPFamilies):
6504		// something has been released (downgraded)
6505
6506		// test if primary ipfamily has been released
6507		if len(service.Spec.ClusterIPs) == 0 {
6508			allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ipFamilies").Index(0), service.Spec.IPFamilies, "primary ipFamily can not be unset"))
6509		}
6510
6511		// test if primary ipFamily has changed
6512		if len(service.Spec.IPFamilies) > 0 &&
6513			service.Spec.IPFamilies[0] != oldService.Spec.IPFamilies[0] {
6514			allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ipFamilies").Index(0), service.Spec.ClusterIPs, "may not change once set"))
6515		}
6516
6517		// test if secondary IPFamily has been released. has this service been downgraded correctly?
6518		// user *must* set IPFamilyPolicy == SingleStack
6519		if len(service.Spec.IPFamilies) == 1 {
6520			if service.Spec.IPFamilyPolicy == nil || *(service.Spec.IPFamilyPolicy) != core.IPFamilyPolicySingleStack {
6521				allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "clusterIPs").Index(0), service.Spec.ClusterIPs, "`ipFamilyPolicy` must be set to 'SingleStack' when releasing the secondary ipFamily"))
6522			}
6523		}
6524	case len(oldService.Spec.IPFamilies) < len(service.Spec.IPFamilies):
6525		// something has been added (upgraded)
6526
6527		// test if primary ipFamily has changed
6528		if len(oldService.Spec.IPFamilies) > 0 &&
6529			len(service.Spec.IPFamilies) > 0 &&
6530			service.Spec.IPFamilies[0] != oldService.Spec.IPFamilies[0] {
6531			allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ipFamilies").Index(0), service.Spec.ClusterIPs, "may not change once set"))
6532		}
6533		// we don't check for Policy == RequireDualStack here since, Validation/Creation func takes care of it
6534	}
6535	return allErrs
6536}
6537
6538func isHeadlessService(service *core.Service) bool {
6539	return service != nil &&
6540		len(service.Spec.ClusterIPs) == 1 &&
6541		service.Spec.ClusterIPs[0] == core.ClusterIPNone
6542}
6543
6544// validateLoadBalancerClassField validation for loadBalancerClass
6545func validateLoadBalancerClassField(oldService, service *core.Service) field.ErrorList {
6546	allErrs := make(field.ErrorList, 0)
6547	if oldService != nil {
6548		// validate update op
6549		if isTypeLoadBalancer(oldService) && isTypeLoadBalancer(service) {
6550			// old and new are both LoadBalancer
6551			if !sameLoadBalancerClass(oldService, service) {
6552				// can't change loadBalancerClass
6553				allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "loadBalancerClass"), service.Spec.LoadBalancerClass, "may not change once set"))
6554			}
6555		}
6556	}
6557
6558	if isTypeLoadBalancer(service) {
6559		// check LoadBalancerClass format
6560		if service.Spec.LoadBalancerClass != nil {
6561			allErrs = append(allErrs, ValidateQualifiedName(*service.Spec.LoadBalancerClass, field.NewPath("spec", "loadBalancerClass"))...)
6562		}
6563	} else {
6564		// check if LoadBalancerClass set for non LoadBalancer type of service
6565		if service.Spec.LoadBalancerClass != nil {
6566			allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "loadBalancerClass"), "may only be used when `type` is 'LoadBalancer'"))
6567		}
6568	}
6569	return allErrs
6570}
6571
6572// isTypeLoadBalancer tests service type is loadBalancer or not
6573func isTypeLoadBalancer(service *core.Service) bool {
6574	return service.Spec.Type == core.ServiceTypeLoadBalancer
6575}
6576
6577// sameLoadBalancerClass check two services have the same loadBalancerClass or not
6578func sameLoadBalancerClass(oldService, service *core.Service) bool {
6579	if oldService.Spec.LoadBalancerClass == nil && service.Spec.LoadBalancerClass == nil {
6580		return true
6581	}
6582	if oldService.Spec.LoadBalancerClass == nil || service.Spec.LoadBalancerClass == nil {
6583		return false
6584	}
6585	return *oldService.Spec.LoadBalancerClass == *service.Spec.LoadBalancerClass
6586}
6587