1# -*- coding: utf-8 -*- #
2# Copyright 2015 Google LLC. All Rights Reserved.
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8#    http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15"""Api client adapter containers commands."""
16
17from __future__ import absolute_import
18from __future__ import division
19from __future__ import unicode_literals
20
21import os
22
23import time
24
25from apitools.base.py import exceptions as apitools_exceptions
26from apitools.base.py import http_wrapper
27
28from googlecloudsdk.api_lib.compute import constants
29from googlecloudsdk.api_lib.container import constants as gke_constants
30from googlecloudsdk.api_lib.container import util
31from googlecloudsdk.api_lib.util import apis as core_apis
32from googlecloudsdk.calliope import exceptions
33from googlecloudsdk.command_lib.util.apis import arg_utils
34from googlecloudsdk.core import log
35from googlecloudsdk.core import properties
36from googlecloudsdk.core import resources as cloud_resources
37from googlecloudsdk.core import yaml
38from googlecloudsdk.core.console import progress_tracker
39from googlecloudsdk.core.util import times
40import six
41from six.moves import range  # pylint: disable=redefined-builtin
42import six.moves.http_client
43
44WRONG_ZONE_ERROR_MSG = """\
45{error}
46Could not find [{name}] in [{wrong_zone}].
47Did you mean [{name}] in [{zone}]?"""
48
49NO_SUCH_CLUSTER_ERROR_MSG = """\
50{error}
51No cluster named '{name}' in {project}."""
52
53NO_SUCH_NODE_POOL_ERROR_MSG = """\
54No node pool named '{name}' in {cluster}."""
55
56NO_NODE_POOL_SELECTED_ERROR_MSG = """\
57Please specify one of the following node pools:
58"""
59
60MISMATCH_AUTHORIZED_NETWORKS_ERROR_MSG = """\
61Cannot use --master-authorized-networks \
62if --enable-master-authorized-networks is not \
63specified."""
64
65NO_AUTOPROVISIONING_MSG = """\
66Node autoprovisioning is currently in beta.
67"""
68
69NO_AUTOPROVISIONING_LIMITS_ERROR_MSG = """\
70Must specify both --max-cpu and --max-memory to enable autoprovisioning.
71"""
72
73LIMITS_WITHOUT_AUTOPROVISIONING_MSG = """\
74Must enable node autoprovisioning to specify resource limits for autoscaling.
75"""
76
77DEFAULTS_WITHOUT_AUTOPROVISIONING_MSG = """\
78Must enable node autoprovisioning to specify defaults for node autoprovisioning.
79"""
80
81BOTH_AUTOPROVISIONING_UPGRADE_SETTINGS_ERROR_MSG = """\
82Must specify both 'maxSurgeUpgrade' and 'maxUnavailableUpgrade' in \
83'upgradeSettings' in --autoprovisioning-config-file to set upgrade settings.
84"""
85
86BOTH_AUTOPROVISIONING_MANAGEMENT_SETTINGS_ERROR_MSG = """\
87Must specify both 'autoUpgrade' and 'autoRepair' in 'management' in \
88--autoprovisioning-config-file to set management settings.
89"""
90
91BOTH_AUTOPROVISIONING_SHIELDED_INSTANCE_SETTINGS_ERROR_MSG = """\
92Must specify both 'enableSecureBoot' and 'enableIntegrityMonitoring' \
93in 'shieldedInstanceConfig' in --autoprovisioning-config-file to set \
94management settings.
95"""
96
97LIMITS_WITHOUT_AUTOPROVISIONING_FLAG_MSG = """\
98Must specify --enable-autoprovisioning to specify resource limits for autoscaling.
99"""
100
101MISMATCH_ACCELERATOR_TYPE_LIMITS_ERROR_MSG = """\
102Maximum and minimum accelerator limits must be set on the same accelerator type.
103"""
104
105NO_SUCH_LABEL_ERROR_MSG = """\
106No label named '{name}' found on cluster '{cluster}'."""
107
108NO_LABELS_ON_CLUSTER_ERROR_MSG = """\
109Cluster '{cluster}' has no labels to remove."""
110
111CREATE_SUBNETWORK_INVALID_KEY_ERROR_MSG = """\
112Invalid key '{key}' for --create-subnetwork (must be one of 'name', 'range').
113"""
114
115CREATE_SUBNETWORK_WITH_SUBNETWORK_ERROR_MSG = """\
116Cannot specify both --subnetwork and --create-subnetwork at the same time.
117"""
118
119CREATE_POD_RANGE_INVALID_KEY_ERROR_MSG = """
120Invalid key '{key}' for --create-pod-ipv4-range (must be one of 'name', 'range').
121"""
122
123NODE_TAINT_INCORRECT_FORMAT_ERROR_MSG = """\
124Invalid value [{key}={value}] for argument --node-taints. Node taint is of format key=value:effect
125"""
126
127NODE_TAINT_INCORRECT_EFFECT_ERROR_MSG = """\
128Invalid taint effect [{effect}] for argument --node-taints. Valid effect values are NoSchedule, PreferNoSchedule, NoExecute'
129"""
130
131LOCAL_SSD_INCORRECT_FORMAT_ERROR_MSG = """\
132Invalid local SSD format [{err_format}] for argument --local-ssd-volumes. Valid formats are fs, block
133"""
134
135UNKNOWN_WORKLOAD_METADATA_ERROR_MSG = """\
136Invalid option '{option}' for '--workload-metadata' (must be one of 'gce_metadata', 'gke_metadata').
137"""
138
139ALLOW_ROUTE_OVERLAP_WITHOUT_EXPLICIT_NETWORK_MODE = """\
140Flag --allow-route-overlap must be used with either --enable-ip-alias or --no-enable-ip-alias.
141"""
142
143ALLOW_ROUTE_OVERLAP_WITHOUT_CLUSTER_CIDR_ERROR_MSG = """\
144Flag --cluster-ipv4-cidr must be fully specified (e.g. `10.96.0.0/14`, but not `/14`) with --allow-route-overlap.
145"""
146
147ALLOW_ROUTE_OVERLAP_WITHOUT_SERVICES_CIDR_ERROR_MSG = """\
148Flag --services-ipv4-cidr must be fully specified (e.g. `10.96.0.0/14`, but not `/14`) with --allow-route-overlap and --enable-ip-alias.
149"""
150
151PREREQUISITE_OPTION_ERROR_MSG = """\
152Cannot specify --{opt} without --{prerequisite}.
153"""
154
155CLOUD_LOGGING_OR_MONITORING_DISABLED_ERROR_MSG = """\
156Flag --enable-stackdriver-kubernetes requires Cloud Logging and Cloud Monitoring enabled with --enable-cloud-logging and --enable-cloud-monitoring.
157"""
158
159CLOUDRUN_STACKDRIVER_KUBERNETES_DISABLED_ERROR_MSG = """\
160The CloudRun-on-GKE addon (--addons=CloudRun) requires Cloud Logging and Cloud Monitoring to be enabled via the --enable-stackdriver-kubernetes flag.
161"""
162
163CLOUDRUN_INGRESS_KUBERNETES_DISABLED_ERROR_MSG = """\
164The CloudRun-on-GKE addon (--addons=CloudRun) requires HTTP Load Balancing to be enabled via the --addons=HttpLoadBalancing flag.
165"""
166
167CONFIGCONNECTOR_STACKDRIVER_KUBERNETES_DISABLED_ERROR_MSG = """\
168The ConfigConnector-on-GKE addon (--addons=ConfigConnector) requires Cloud Logging and Cloud Monitoring to be enabled via the --enable-stackdriver-kubernetes flag.
169"""
170
171CONFIGCONNECTOR_WORKLOAD_IDENTITY_DISABLED_ERROR_MSG = """\
172The ConfigConnector-on-GKE addon (--addons=ConfigConnector) requires workload identity to be enabled via the --workload-pool=WORKLOAD_POOL flag.
173"""
174
175CLOUDBUILD_STACKDRIVER_KUBERNETES_DISABLED_ERROR_MSG = """\
176Cloud Build for Anthos (--addons=CloudBuild) requires Cloud Logging and Cloud Monitoring to be enabled via the --enable-stackdriver-kubernetes flag.
177"""
178
179DEFAULT_MAX_PODS_PER_NODE_WITHOUT_IP_ALIAS_ERROR_MSG = """\
180Cannot use --default-max-pods-per-node without --enable-ip-alias.
181"""
182
183MAX_PODS_PER_NODE_WITHOUT_IP_ALIAS_ERROR_MSG = """\
184Cannot use --max-pods-per-node without --enable-ip-alias.
185"""
186
187NOTHING_TO_UPDATE_ERROR_MSG = """\
188Nothing to update.
189"""
190
191ENABLE_PRIVATE_NODES_WITH_PRIVATE_CLUSTER_ERROR_MSG = """\
192Cannot specify both --[no-]enable-private-nodes and --[no-]private-cluster at the same time.
193"""
194
195ENABLE_NETWORK_EGRESS_METERING_ERROR_MSG = """\
196Cannot use --[no-]enable-network-egress-metering without --resource-usage-bigquery-dataset.
197"""
198
199ENABLE_RESOURCE_CONSUMPTION_METERING_ERROR_MSG = """\
200Cannot use --[no-]enable-resource-consumption-metering without --resource-usage-bigquery-dataset.
201"""
202
203DISABLE_DEFAULT_SNAT_WITHOUT_IP_ALIAS_ERROR_MSG = """\
204Cannot use --disable-default-snat without --enable-ip-alias.
205"""
206
207DISABLE_DEFAULT_SNAT_WITHOUT_PRIVATE_NODES_ERROR_MSG = """\
208Cannot use --disable-default-snat without --enable-private-nodes.
209"""
210
211RESERVATION_AFFINITY_SPECIFIC_WITHOUT_RESERVATION_NAME_ERROR_MSG = """\
212Must specify --reservation for --reservation-affinity=specific.
213"""
214
215RESERVATION_AFFINITY_NON_SPECIFIC_WITH_RESERVATION_NAME_ERROR_MSG = """\
216Cannot specify --reservation for --reservation-affinity={affinity}.
217"""
218
219DATAPATH_PROVIDER_ILL_SPECIFIED_ERROR_MSG = """\
220Invalid provider '{provider}' for argument --datapath-provider. Valid providers are legacy, advanced.
221"""
222
223SANDBOX_TYPE_NOT_PROVIDED = """\
224Must specify sandbox type.
225"""
226
227SANDBOX_TYPE_NOT_SUPPORTED = """\
228Provided sandbox type '{type}' not supported.
229"""
230TPU_SERVING_MODE_ERROR = """\
231Cannot specify --tpu-ipv4-cidr with --enable-tpu-service-networking."""
232
233MAX_NODES_PER_POOL = 1000
234
235MAX_AUTHORIZED_NETWORKS_CIDRS_PRIVATE = 100
236MAX_AUTHORIZED_NETWORKS_CIDRS_PUBLIC = 50
237
238INGRESS = 'HttpLoadBalancing'
239HPA = 'HorizontalPodAutoscaling'
240DASHBOARD = 'KubernetesDashboard'
241CLOUDBUILD = 'CloudBuild'
242CONFIGCONNECTOR = 'ConfigConnector'
243GCEPDCSIDRIVER = 'GcePersistentDiskCsiDriver'
244ISTIO = 'Istio'
245NETWORK_POLICY = 'NetworkPolicy'
246NODELOCALDNS = 'NodeLocalDNS'
247APPLICATIONMANAGER = 'ApplicationManager'
248RESOURCE_LIMITS = 'resourceLimits'
249SERVICE_ACCOUNT = 'serviceAccount'
250MIN_CPU_PLATFORM = 'minCpuPlatform'
251UPGRADE_SETTINGS = 'upgradeSettings'
252MAX_SURGE_UPGRADE = 'maxSurgeUpgrade'
253MAX_UNAVAILABLE_UPGRADE = 'maxUnavailableUpgrade'
254NODE_MANAGEMENT = 'management'
255ENABLE_AUTO_UPGRADE = 'autoUpgrade'
256ENABLE_AUTO_REPAIR = 'autoRepair'
257SCOPES = 'scopes'
258AUTOPROVISIONING_LOCATIONS = 'autoprovisioningLocations'
259BOOT_DISK_KMS_KEY = 'bootDiskKmsKey'
260DISK_SIZE_GB = 'diskSizeGb'
261DISK_TYPE = 'diskType'
262SHIELDED_INSTANCE_CONFIG = 'shieldedInstanceConfig'
263ENABLE_SECURE_BOOT = 'enableSecureBoot'
264ENABLE_INTEGRITY_MONITORING = 'enableIntegrityMonitoring'
265DEFAULT_ADDONS = [INGRESS, HPA]
266CLOUDRUN_ADDONS = ['CloudRun', 'KubeRun']
267VISIBLE_CLOUDRUN_ADDONS = ['CloudRun']
268ADDONS_OPTIONS = DEFAULT_ADDONS + [
269    DASHBOARD,
270    NETWORK_POLICY,
271    NODELOCALDNS,
272    CONFIGCONNECTOR,
273    GCEPDCSIDRIVER,
274]
275BETA_ADDONS_OPTIONS = ADDONS_OPTIONS + [
276    ISTIO,
277    APPLICATIONMANAGER,
278]
279ALPHA_ADDONS_OPTIONS = BETA_ADDONS_OPTIONS + [CLOUDBUILD]
280
281APISERVER = 'APISERVER'
282SCHEDULER = 'SCHEDULER'
283CONTROLLER_MANAGER = 'CONTROLLER_MANAGER'
284ADDON_MANAGER = 'ADDON_MANAGER'
285PRIMARY_LOGS_OPTIONS = [
286    APISERVER,
287    SCHEDULER,
288    CONTROLLER_MANAGER,
289    ADDON_MANAGER,
290]
291
292
293def CheckResponse(response):
294  """Wrap http_wrapper.CheckResponse to skip retry on 503."""
295  if response.status_code == 503:
296    raise apitools_exceptions.HttpError.FromResponse(response)
297  return http_wrapper.CheckResponse(response)
298
299
300def NewAPIAdapter(api_version):
301  if api_version == 'v1alpha1':
302    return NewV1Alpha1APIAdapter()
303  elif api_version == 'v1beta1':
304    return NewV1Beta1APIAdapter()
305  else:
306    return NewV1APIAdapter()
307
308
309def NewV1APIAdapter():
310  return InitAPIAdapter('v1', V1Adapter)
311
312
313def NewV1Beta1APIAdapter():
314  return InitAPIAdapter('v1beta1', V1Beta1Adapter)
315
316
317def NewV1Alpha1APIAdapter():
318  return InitAPIAdapter('v1alpha1', V1Alpha1Adapter)
319
320
321def InitAPIAdapter(api_version, adapter):
322  """Initialize an api adapter.
323
324  Args:
325    api_version: the api version we want.
326    adapter: the api adapter constructor.
327
328  Returns:
329    APIAdapter object.
330  """
331
332  api_client = core_apis.GetClientInstance('container', api_version)
333  api_client.check_response_func = CheckResponse
334  messages = core_apis.GetMessagesModule('container', api_version)
335
336  registry = cloud_resources.REGISTRY.Clone()
337  registry.RegisterApiByName('container', api_version)
338  registry.RegisterApiByName('compute', 'v1')
339
340  return adapter(registry, api_client, messages)
341
342
343_SERVICE_ACCOUNT_SCOPES = ('https://www.googleapis.com/auth/cloud-platform',
344                           'https://www.googleapis.com/auth/userinfo.email')
345
346
347def NodeIdentityOptionsToNodeConfig(options, node_config):
348  """Convert node identity options into node config.
349
350  If scopes are specified with the `--scopes` flag, respect them.
351  If no scopes are presented, 'gke-default' will be passed here indicating that
352  we should use the default set:
353  - If no service account is specified, default set is GKE_DEFAULT_SCOPES which
354    is handled by ExpandScopeURIs:
355    - https://www.googleapis.com/auth/devstorage.read_only,
356    - https://www.googleapis.com/auth/logging.write',
357    - https://www.googleapis.com/auth/monitoring,
358    - https://www.googleapis.com/auth/servicecontrol,
359    - https://www.googleapis.com/auth/service.management.readonly,
360    - https://www.googleapis.com/auth/trace.append,
361  - If a service account is specified, default set is _SERVICE_ACCOUNT_SCOPES:
362    - https://www.googleapis.com/auth/cloud-platform
363    - https://www.googleapis.com/auth/userinfo.email
364  Args:
365    options: the CreateCluster or CreateNodePool options.
366    node_config: the messages.node_config object to be populated.
367  """
368  if options.service_account:
369    node_config.serviceAccount = options.service_account
370    replaced_scopes = []
371    for scope in options.scopes:
372      if scope == 'gke-default':
373        replaced_scopes.extend(_SERVICE_ACCOUNT_SCOPES)
374      else:
375        replaced_scopes.append(scope)
376    options.scopes = replaced_scopes
377
378  options.scopes = ExpandScopeURIs(options.scopes)
379  node_config.oauthScopes = sorted(set(options.scopes))
380
381
382def ExpandScopeURIs(scopes):
383  """Expand scope names to the fully qualified uris.
384
385  Args:
386    scopes: [str,] list of scope names. Can be short names ('compute-rw') or
387    full urls ('https://www.googleapis.com/auth/compute'). See SCOPES in
388      api_lib/container/constants.py & api_lib/compute/constants.py.
389
390  Returns:
391    list of str, full urls for recognized scopes.
392  """
393
394  scope_uris = []
395  for scope in scopes:
396    # Expand any scope aliases (like 'storage-rw') that the user provided
397    # to their official URL representation.
398    expanded = constants.SCOPES.get(scope, [scope])
399    scope_uris.extend(expanded)
400  return scope_uris
401
402
403class CreateClusterOptions(object):
404  """Options to pass to CreateCluster."""
405
406  def __init__(
407      self,
408      node_machine_type=None,
409      node_source_image=None,
410      node_disk_size_gb=None,
411      scopes=None,
412      num_nodes=None,
413      additional_zones=None,
414      node_locations=None,
415      user=None,
416      password=None,
417      cluster_version=None,
418      node_version=None,
419      network=None,
420      cluster_ipv4_cidr=None,
421      enable_cloud_logging=None,
422      enable_cloud_monitoring=None,
423      enable_stackdriver_kubernetes=None,
424      enable_logging_monitoring_system_only=None,
425      enable_workload_monitoring_eap=None,
426      subnetwork=None,
427      addons=None,
428      istio_config=None,
429      cloud_run_config=None,
430      local_ssd_count=None,
431      local_ssd_volume_configs=None,
432      ephemeral_storage=None,
433      boot_disk_kms_key=None,
434      node_pool_name=None,
435      tags=None,
436      node_labels=None,
437      node_taints=None,
438      enable_autoscaling=None,
439      min_nodes=None,
440      max_nodes=None,
441      image_type=None,
442      image=None,
443      image_project=None,
444      image_family=None,
445      issue_client_certificate=None,
446      max_nodes_per_pool=None,
447      enable_kubernetes_alpha=None,
448      enable_cloud_run_alpha=None,
449      preemptible=None,
450      enable_autorepair=None,
451      enable_autoupgrade=None,
452      service_account=None,
453      enable_master_authorized_networks=None,
454      master_authorized_networks=None,
455      enable_legacy_authorization=None,
456      labels=None,
457      disk_type=None,
458      enable_network_policy=None,
459      enable_l4_ilb_subsetting=None,
460      services_ipv4_cidr=None,
461      enable_ip_alias=None,
462      create_subnetwork=None,
463      cluster_secondary_range_name=None,
464      services_secondary_range_name=None,
465      accelerators=None,
466      enable_binauthz=None,
467      min_cpu_platform=None,
468      workload_metadata=None,
469      workload_metadata_from_node=None,
470      maintenance_window=None,
471      enable_pod_security_policy=None,
472      allow_route_overlap=None,
473      private_cluster=None,
474      enable_private_nodes=None,
475      enable_private_endpoint=None,
476      master_ipv4_cidr=None,
477      tpu_ipv4_cidr=None,
478      enable_tpu=None,
479      enable_tpu_service_networking=None,
480      default_max_pods_per_node=None,
481      max_pods_per_node=None,
482      resource_usage_bigquery_dataset=None,
483      security_group=None,
484      enable_private_ipv6_access=None,
485      enable_intra_node_visibility=None,
486      enable_vertical_pod_autoscaling=None,
487      security_profile=None,
488      security_profile_runtime_rules=None,
489      database_encryption_key=None,
490      metadata=None,
491      enable_network_egress_metering=None,
492      enable_resource_consumption_metering=None,
493      workload_pool=None,
494      identity_provider=None,
495      workload_identity_certificate_authority=None,
496      enable_gke_oidc=None,
497      enable_shielded_nodes=None,
498      linux_sysctls=None,
499      disable_default_snat=None,
500      shielded_secure_boot=None,
501      shielded_integrity_monitoring=None,
502      system_config_from_file=None,
503      maintenance_window_start=None,
504      maintenance_window_end=None,
505      maintenance_window_recurrence=None,
506      enable_cost_management=None,
507      max_surge_upgrade=None,
508      max_unavailable_upgrade=None,
509      enable_autoprovisioning=None,
510      autoprovisioning_config_file=None,
511      autoprovisioning_service_account=None,
512      autoprovisioning_scopes=None,
513      autoprovisioning_locations=None,
514      min_cpu=None,
515      max_cpu=None,
516      min_memory=None,
517      max_memory=None,
518      min_accelerator=None,
519      max_accelerator=None,
520      autoprovisioning_max_surge_upgrade=None,
521      autoprovisioning_max_unavailable_upgrade=None,
522      enable_autoprovisioning_autoupgrade=None,
523      enable_autoprovisioning_autorepair=None,
524      reservation_affinity=None,
525      reservation=None,
526      autoprovisioning_min_cpu_platform=None,
527      enable_master_global_access=None,
528      enable_gvnic=None,
529      enable_master_metrics=None,
530      master_logs=None,
531      release_channel=None,
532      notification_config=None,
533      autopilot=None,
534      private_ipv6_google_access_type=None,
535      enable_confidential_nodes=None,
536      cluster_dns=None,
537      cluster_dns_scope=None,
538      cluster_dns_domain=None,
539      kubernetes_objects_changes_target=None,
540      kubernetes_objects_snapshots_target=None,
541      enable_gcfs=None,
542      private_endpoint_subnetwork=None,
543      cross_connect_subnetworks=None,
544  ):
545    self.node_machine_type = node_machine_type
546    self.node_source_image = node_source_image
547    self.node_disk_size_gb = node_disk_size_gb
548    self.scopes = scopes
549    self.num_nodes = num_nodes
550    self.additional_zones = additional_zones
551    self.node_locations = node_locations
552    self.user = user
553    self.password = password
554    self.cluster_version = cluster_version
555    self.node_version = node_version
556    self.network = network
557    self.cluster_ipv4_cidr = cluster_ipv4_cidr
558    self.enable_cloud_logging = enable_cloud_logging
559    self.enable_cloud_monitoring = enable_cloud_monitoring
560    self.enable_stackdriver_kubernetes = enable_stackdriver_kubernetes
561    self.enable_logging_monitoring_system_only = enable_logging_monitoring_system_only
562    self.enable_workload_monitoring_eap = enable_workload_monitoring_eap,
563    self.subnetwork = subnetwork
564    self.addons = addons
565    self.istio_config = istio_config
566    self.cloud_run_config = cloud_run_config
567    self.local_ssd_count = local_ssd_count
568    self.local_ssd_volume_configs = local_ssd_volume_configs
569    self.ephemeral_storage = ephemeral_storage
570    self.boot_disk_kms_key = boot_disk_kms_key
571    self.node_pool_name = node_pool_name
572    self.tags = tags
573    self.node_labels = node_labels
574    self.node_taints = node_taints
575    self.enable_autoscaling = enable_autoscaling
576    self.min_nodes = min_nodes
577    self.max_nodes = max_nodes
578    self.image_type = image_type
579    self.image = image
580    self.image_project = image_project
581    self.image_family = image_family
582    self.max_nodes_per_pool = max_nodes_per_pool
583    self.enable_kubernetes_alpha = enable_kubernetes_alpha
584    self.enable_cloud_run_alpha = enable_cloud_run_alpha
585    self.preemptible = preemptible
586    self.enable_autorepair = enable_autorepair
587    self.enable_autoupgrade = enable_autoupgrade
588    self.service_account = service_account
589    self.enable_master_authorized_networks = enable_master_authorized_networks
590    self.master_authorized_networks = master_authorized_networks
591    self.enable_legacy_authorization = enable_legacy_authorization
592    self.enable_network_policy = enable_network_policy
593    self.enable_l4_ilb_subsetting = enable_l4_ilb_subsetting
594    self.labels = labels
595    self.disk_type = disk_type
596    self.services_ipv4_cidr = services_ipv4_cidr
597    self.enable_ip_alias = enable_ip_alias
598    self.create_subnetwork = create_subnetwork
599    self.cluster_secondary_range_name = cluster_secondary_range_name
600    self.services_secondary_range_name = services_secondary_range_name
601    self.accelerators = accelerators
602    self.enable_binauthz = enable_binauthz
603    self.min_cpu_platform = min_cpu_platform
604    self.workload_metadata = workload_metadata
605    self.workload_metadata_from_node = workload_metadata_from_node
606    self.maintenance_window = maintenance_window
607    self.enable_pod_security_policy = enable_pod_security_policy
608    self.allow_route_overlap = allow_route_overlap
609    self.private_cluster = private_cluster
610    self.enable_private_nodes = enable_private_nodes
611    self.enable_private_endpoint = enable_private_endpoint
612    self.master_ipv4_cidr = master_ipv4_cidr
613    self.tpu_ipv4_cidr = tpu_ipv4_cidr
614    self.enable_tpu_service_networking = enable_tpu_service_networking
615    self.enable_tpu = enable_tpu
616    self.issue_client_certificate = issue_client_certificate
617    self.default_max_pods_per_node = default_max_pods_per_node
618    self.max_pods_per_node = max_pods_per_node
619    self.resource_usage_bigquery_dataset = resource_usage_bigquery_dataset
620    self.security_group = security_group
621    self.enable_private_ipv6_access = enable_private_ipv6_access
622    self.enable_intra_node_visibility = enable_intra_node_visibility
623    self.enable_vertical_pod_autoscaling = enable_vertical_pod_autoscaling
624    self.security_profile = security_profile
625    self.security_profile_runtime_rules = security_profile_runtime_rules
626    self.database_encryption_key = database_encryption_key
627    self.metadata = metadata
628    self.enable_network_egress_metering = enable_network_egress_metering
629    self.enable_resource_consumption_metering = enable_resource_consumption_metering
630    self.workload_pool = workload_pool
631    self.identity_provider = identity_provider
632    self.workload_identity_certificate_authority = workload_identity_certificate_authority
633    self.enable_gke_oidc = enable_gke_oidc
634    self.enable_shielded_nodes = enable_shielded_nodes
635    self.linux_sysctls = linux_sysctls
636    self.disable_default_snat = disable_default_snat
637    self.shielded_secure_boot = shielded_secure_boot
638    self.shielded_integrity_monitoring = shielded_integrity_monitoring
639    self.system_config_from_file = system_config_from_file
640    self.maintenance_window_start = maintenance_window_start
641    self.maintenance_window_end = maintenance_window_end
642    self.maintenance_window_recurrence = maintenance_window_recurrence
643    self.enable_cost_management = enable_cost_management
644    self.max_surge_upgrade = max_surge_upgrade
645    self.max_unavailable_upgrade = max_unavailable_upgrade
646    self.enable_autoprovisioning = enable_autoprovisioning
647    self.autoprovisioning_config_file = autoprovisioning_config_file
648    self.autoprovisioning_service_account = autoprovisioning_service_account
649    self.autoprovisioning_scopes = autoprovisioning_scopes
650    self.autoprovisioning_locations = autoprovisioning_locations
651    self.min_cpu = min_cpu
652    self.max_cpu = max_cpu
653    self.min_memory = min_memory
654    self.max_memory = max_memory
655    self.min_accelerator = min_accelerator
656    self.max_accelerator = max_accelerator
657    self.autoprovisioning_max_surge_upgrade = autoprovisioning_max_surge_upgrade
658    self.autoprovisioning_max_unavailable_upgrade = autoprovisioning_max_unavailable_upgrade
659    self.enable_autoprovisioning_autoupgrade = enable_autoprovisioning_autoupgrade
660    self.enable_autoprovisioning_autorepair = enable_autoprovisioning_autorepair
661    self.reservation_affinity = reservation_affinity
662    self.reservation = reservation
663    self.autoprovisioning_min_cpu_platform = autoprovisioning_min_cpu_platform
664    self.enable_master_global_access = enable_master_global_access
665    self.enable_gvnic = enable_gvnic
666    self.enable_master_metrics = enable_master_metrics
667    self.master_logs = master_logs
668    self.release_channel = release_channel
669    self.notification_config = notification_config
670    self.autopilot = autopilot
671    self.private_ipv6_google_access_type = private_ipv6_google_access_type
672    self.enable_confidential_nodes = enable_confidential_nodes
673    self.cluster_dns = cluster_dns
674    self.cluster_dns_scope = cluster_dns_scope
675    self.cluster_dns_domain = cluster_dns_domain
676    self.kubernetes_objects_changes_target = kubernetes_objects_changes_target
677    self.kubernetes_objects_snapshots_target = kubernetes_objects_snapshots_target
678    self.enable_gcfs = enable_gcfs
679    self.private_endpoint_subnetwork = private_endpoint_subnetwork
680    self.cross_connect_subnetworks = cross_connect_subnetworks
681
682
683class UpdateClusterOptions(object):
684  """Options to pass to UpdateCluster."""
685
686  def __init__(self,
687               version=None,
688               update_master=None,
689               update_nodes=None,
690               node_pool=None,
691               monitoring_service=None,
692               logging_service=None,
693               enable_stackdriver_kubernetes=None,
694               enable_logging_monitoring_system_only=None,
695               enable_workload_monitoring_eap=None,
696               master_logs=None,
697               no_master_logs=None,
698               enable_master_metrics=None,
699               disable_addons=None,
700               istio_config=None,
701               cloud_run_config=None,
702               cluster_dns=None,
703               cluster_dns_scope=None,
704               cluster_dns_domain=None,
705               enable_autoscaling=None,
706               min_nodes=None,
707               max_nodes=None,
708               image_type=None,
709               image=None,
710               image_project=None,
711               locations=None,
712               enable_master_authorized_networks=None,
713               master_authorized_networks=None,
714               enable_pod_security_policy=None,
715               enable_binauthz=None,
716               enable_vertical_pod_autoscaling=None,
717               enable_intra_node_visibility=None,
718               enable_l4_ilb_subsetting=None,
719               security_profile=None,
720               security_profile_runtime_rules=None,
721               autoscaling_profile=None,
722               enable_peering_route_sharing=None,
723               workload_pool=None,
724               identity_provider=None,
725               disable_workload_identity=None,
726               workload_identity_certificate_authority=None,
727               disable_workload_identity_certificates=None,
728               enable_gke_oidc=None,
729               enable_shielded_nodes=None,
730               disable_default_snat=None,
731               resource_usage_bigquery_dataset=None,
732               enable_network_egress_metering=None,
733               enable_resource_consumption_metering=None,
734               database_encryption_key=None,
735               disable_database_encryption=None,
736               enable_cost_management=None,
737               enable_autoprovisioning=None,
738               autoprovisioning_config_file=None,
739               autoprovisioning_service_account=None,
740               autoprovisioning_scopes=None,
741               autoprovisioning_locations=None,
742               min_cpu=None,
743               max_cpu=None,
744               min_memory=None,
745               max_memory=None,
746               min_accelerator=None,
747               max_accelerator=None,
748               release_channel=None,
749               autoprovisioning_max_surge_upgrade=None,
750               autoprovisioning_max_unavailable_upgrade=None,
751               enable_autoprovisioning_autoupgrade=None,
752               enable_autoprovisioning_autorepair=None,
753               autoprovisioning_min_cpu_platform=None,
754               enable_tpu=None,
755               tpu_ipv4_cidr=None,
756               enable_master_global_access=None,
757               enable_tpu_service_networking=None,
758               enable_gvnic=None,
759               notification_config=None,
760               private_ipv6_google_access_type=None,
761               kubernetes_objects_changes_target=None,
762               kubernetes_objects_snapshots_target=None,
763               disable_autopilot=None,
764               add_cross_connect_subnetworks=None,
765               remove_cross_connect_subnetworks=None,
766               clear_cross_connect_subnetworks=None):
767    self.version = version
768    self.update_master = bool(update_master)
769    self.update_nodes = bool(update_nodes)
770    self.node_pool = node_pool
771    self.monitoring_service = monitoring_service
772    self.logging_service = logging_service
773    self.enable_stackdriver_kubernetes = enable_stackdriver_kubernetes
774    self.enable_logging_monitoring_system_only = enable_logging_monitoring_system_only
775    self.enable_workload_monitoring_eap = enable_workload_monitoring_eap
776    self.no_master_logs = no_master_logs
777    self.master_logs = master_logs
778    self.enable_master_metrics = enable_master_metrics
779    self.disable_addons = disable_addons
780    self.istio_config = istio_config
781    self.cloud_run_config = cloud_run_config
782    self.cluster_dns = cluster_dns
783    self.cluster_dns_scope = cluster_dns_scope
784    self.cluster_dns_domain = cluster_dns_domain
785    self.enable_autoscaling = enable_autoscaling
786    self.min_nodes = min_nodes
787    self.max_nodes = max_nodes
788    self.image_type = image_type
789    self.image = image
790    self.image_project = image_project
791    self.locations = locations
792    self.enable_master_authorized_networks = enable_master_authorized_networks
793    self.master_authorized_networks = master_authorized_networks
794    self.enable_pod_security_policy = enable_pod_security_policy
795    self.enable_binauthz = enable_binauthz
796    self.enable_vertical_pod_autoscaling = enable_vertical_pod_autoscaling
797    self.security_profile = security_profile
798    self.security_profile_runtime_rules = security_profile_runtime_rules
799    self.autoscaling_profile = autoscaling_profile
800    self.enable_intra_node_visibility = enable_intra_node_visibility
801    self.enable_l4_ilb_subsetting = enable_l4_ilb_subsetting
802    self.enable_peering_route_sharing = enable_peering_route_sharing
803    self.workload_pool = workload_pool
804    self.identity_provider = identity_provider
805    self.disable_workload_identity = disable_workload_identity
806    self.workload_identity_certificate_authority = workload_identity_certificate_authority
807    self.disable_workload_identity_certificates = disable_workload_identity_certificates
808    self.enable_gke_oidc = enable_gke_oidc
809    self.enable_shielded_nodes = enable_shielded_nodes
810    self.disable_default_snat = disable_default_snat
811    self.resource_usage_bigquery_dataset = resource_usage_bigquery_dataset
812    self.enable_network_egress_metering = enable_network_egress_metering
813    self.enable_resource_consumption_metering = (
814        enable_resource_consumption_metering)
815    self.database_encryption_key = database_encryption_key
816    self.disable_database_encryption = disable_database_encryption
817    self.enable_cost_management = enable_cost_management
818    self.enable_autoprovisioning = enable_autoprovisioning
819    self.autoprovisioning_config_file = autoprovisioning_config_file
820    self.autoprovisioning_service_account = autoprovisioning_service_account
821    self.autoprovisioning_scopes = autoprovisioning_scopes
822    self.autoprovisioning_locations = autoprovisioning_locations
823    self.min_cpu = min_cpu
824    self.max_cpu = max_cpu
825    self.min_memory = min_memory
826    self.max_memory = max_memory
827    self.min_accelerator = min_accelerator
828    self.max_accelerator = max_accelerator
829    self.release_channel = release_channel
830    self.autoprovisioning_max_surge_upgrade = autoprovisioning_max_surge_upgrade
831    self.autoprovisioning_max_unavailable_upgrade = autoprovisioning_max_unavailable_upgrade
832    self.enable_autoprovisioning_autoupgrade = enable_autoprovisioning_autoupgrade
833    self.enable_autoprovisioning_autorepair = enable_autoprovisioning_autorepair
834    self.autoprovisioning_min_cpu_platform = autoprovisioning_min_cpu_platform
835    self.enable_tpu = enable_tpu
836    self.tpu_ipv4_cidr = tpu_ipv4_cidr
837    self.enable_tpu_service_networking = enable_tpu_service_networking
838    self.enable_master_global_access = enable_master_global_access
839    self.enable_gvnic = enable_gvnic
840    self.notification_config = notification_config
841    self.private_ipv6_google_access_type = private_ipv6_google_access_type
842    self.kubernetes_objects_changes_target = kubernetes_objects_changes_target
843    self.kubernetes_objects_snapshots_target = kubernetes_objects_snapshots_target
844    self.disable_autopilot = disable_autopilot
845    self.add_cross_connect_subnetworks = add_cross_connect_subnetworks
846    self.remove_cross_connect_subnetworks = remove_cross_connect_subnetworks
847    self.clear_cross_connect_subnetworks = clear_cross_connect_subnetworks
848
849
850class SetMasterAuthOptions(object):
851  """Options to pass to SetMasterAuth."""
852
853  SET_PASSWORD = 'SetPassword'
854  GENERATE_PASSWORD = 'GeneratePassword'
855  SET_USERNAME = 'SetUsername'
856
857  def __init__(self, action=None, username=None, password=None):
858    self.action = action
859    self.username = username
860    self.password = password
861
862
863class SetNetworkPolicyOptions(object):
864
865  def __init__(self, enabled):
866    self.enabled = enabled
867
868
869class CreateNodePoolOptions(object):
870  """Options to pass to CreateNodePool."""
871
872  def __init__(self,
873               machine_type=None,
874               disk_size_gb=None,
875               scopes=None,
876               node_version=None,
877               num_nodes=None,
878               local_ssd_count=None,
879               local_ssd_volume_configs=None,
880               ephemeral_storage=None,
881               boot_disk_kms_key=None,
882               tags=None,
883               node_labels=None,
884               node_taints=None,
885               enable_autoscaling=None,
886               max_nodes=None,
887               min_nodes=None,
888               enable_autoprovisioning=None,
889               image_type=None,
890               image=None,
891               image_project=None,
892               image_family=None,
893               preemptible=None,
894               enable_autorepair=None,
895               enable_autoupgrade=None,
896               service_account=None,
897               disk_type=None,
898               accelerators=None,
899               min_cpu_platform=None,
900               workload_metadata=None,
901               workload_metadata_from_node=None,
902               max_pods_per_node=None,
903               sandbox=None,
904               metadata=None,
905               linux_sysctls=None,
906               max_surge_upgrade=None,
907               max_unavailable_upgrade=None,
908               node_locations=None,
909               shielded_secure_boot=None,
910               shielded_integrity_monitoring=None,
911               system_config_from_file=None,
912               reservation_affinity=None,
913               reservation=None,
914               node_group=None,
915               enable_gcfs=None,
916               pod_ipv4_range=None,
917               create_pod_ipv4_range=None):
918    self.machine_type = machine_type
919    self.disk_size_gb = disk_size_gb
920    self.scopes = scopes
921    self.node_version = node_version
922    self.num_nodes = num_nodes
923    self.local_ssd_count = local_ssd_count
924    self.local_ssd_volume_configs = local_ssd_volume_configs
925    self.ephemeral_storage = ephemeral_storage
926    self.boot_disk_kms_key = boot_disk_kms_key
927    self.tags = tags
928    self.node_labels = node_labels
929    self.node_taints = node_taints
930    self.enable_autoscaling = enable_autoscaling
931    self.max_nodes = max_nodes
932    self.min_nodes = min_nodes
933    self.enable_autoprovisioning = enable_autoprovisioning
934    self.image_type = image_type
935    self.image = image
936    self.image_project = image_project
937    self.image_family = image_family
938    self.preemptible = preemptible
939    self.enable_autorepair = enable_autorepair
940    self.enable_autoupgrade = enable_autoupgrade
941    self.service_account = service_account
942    self.disk_type = disk_type
943    self.accelerators = accelerators
944    self.min_cpu_platform = min_cpu_platform
945    self.workload_metadata = workload_metadata
946    self.workload_metadata_from_node = workload_metadata_from_node
947    self.max_pods_per_node = max_pods_per_node
948    self.sandbox = sandbox
949    self.metadata = metadata
950    self.linux_sysctls = linux_sysctls
951    self.max_surge_upgrade = max_surge_upgrade
952    self.max_unavailable_upgrade = max_unavailable_upgrade
953    self.node_locations = node_locations
954    self.shielded_secure_boot = shielded_secure_boot
955    self.shielded_integrity_monitoring = shielded_integrity_monitoring
956    self.system_config_from_file = system_config_from_file
957    self.reservation_affinity = reservation_affinity
958    self.reservation = reservation
959    self.node_group = node_group
960    self.enable_gcfs = enable_gcfs
961    self.pod_ipv4_range = pod_ipv4_range
962    self.create_pod_ipv4_range = create_pod_ipv4_range
963
964
965class UpdateNodePoolOptions(object):
966  """Options to pass to UpdateNodePool."""
967
968  def __init__(self,
969               enable_autorepair=None,
970               enable_autoupgrade=None,
971               enable_autoscaling=None,
972               max_nodes=None,
973               min_nodes=None,
974               enable_autoprovisioning=None,
975               workload_metadata=None,
976               workload_metadata_from_node=None,
977               node_locations=None,
978               max_surge_upgrade=None,
979               max_unavailable_upgrade=None,
980               system_config_from_file=None,
981               node_labels=None,
982               node_taints=None,
983               tags=None):
984    self.enable_autorepair = enable_autorepair
985    self.enable_autoupgrade = enable_autoupgrade
986    self.enable_autoscaling = enable_autoscaling
987    self.max_nodes = max_nodes
988    self.min_nodes = min_nodes
989    self.enable_autoprovisioning = enable_autoprovisioning
990    self.workload_metadata = workload_metadata
991    self.workload_metadata_from_node = workload_metadata_from_node
992    self.node_locations = node_locations
993    self.max_surge_upgrade = max_surge_upgrade
994    self.max_unavailable_upgrade = max_unavailable_upgrade
995    self.system_config_from_file = system_config_from_file
996    self.node_labels = node_labels
997    self.node_taints = node_taints
998    self.tags = tags
999
1000  def IsAutoscalingUpdate(self):
1001    return (self.enable_autoscaling is not None or self.max_nodes is not None or
1002            self.min_nodes is not None or
1003            self.enable_autoprovisioning is not None)
1004
1005  def IsNodePoolManagementUpdate(self):
1006    return (self.enable_autorepair is not None or
1007            self.enable_autoupgrade is not None)
1008
1009  def IsUpdateNodePoolRequest(self):
1010    return (self.workload_metadata is not None or
1011            self.workload_metadata_from_node is not None or
1012            self.node_locations is not None or
1013            self.max_surge_upgrade is not None or
1014            self.max_unavailable_upgrade is not None or
1015            self.system_config_from_file is not None or
1016            self.node_labels is not None or
1017            self.node_taints is not None or
1018            self.tags is not None)
1019
1020
1021class APIAdapter(object):
1022  """Handles making api requests in a version-agnostic way."""
1023
1024  def __init__(self, registry, client, messages):
1025    self.registry = registry
1026    self.client = client
1027    self.messages = messages
1028
1029  def ParseCluster(self, name, location, project=None):
1030    project = project or properties.VALUES.core.project.GetOrFail()
1031    # Note: we don't directly use container.projects.locations.clusters, etc,
1032    # because it has different fields and thus would change the rest of our
1033    # code heavily.
1034    return self.registry.Parse(
1035        util.LocationalResourceToZonal(name),
1036        params={
1037            'projectId': project,
1038            'zone': location,
1039        },
1040        collection='container.projects.zones.clusters')
1041
1042  def ParseOperation(self, operation_id, location, project=None):
1043    project = project or properties.VALUES.core.project.GetOrFail()
1044    return self.registry.Parse(
1045        util.LocationalResourceToZonal(operation_id),
1046        params={
1047            'projectId': project,
1048            'zone': location,
1049        },
1050        collection='container.projects.zones.operations')
1051
1052  def ParseNodePool(self, node_pool_id, location, project=None):
1053    project = project or properties.VALUES.core.project.GetOrFail()
1054    return self.registry.Parse(
1055        util.LocationalResourceToZonal(node_pool_id),
1056        params={
1057            'projectId': project,
1058            'clusterId': properties.VALUES.container.cluster.GetOrFail,
1059            'zone': location,
1060        },
1061        collection='container.projects.zones.clusters.nodePools')
1062
1063  def GetCluster(self, cluster_ref):
1064    """Get a running cluster.
1065
1066    Args:
1067      cluster_ref: cluster Resource to describe.
1068
1069    Returns:
1070      Cluster message.
1071    Raises:
1072      Error: if cluster cannot be found or caller is missing permissions. Will
1073        attempt to find similar clusters in other zones for a more useful error
1074        if the user has list permissions.
1075    """
1076    try:
1077      return self.client.projects_locations_clusters.Get(
1078          self.messages.ContainerProjectsLocationsClustersGetRequest(
1079              name=ProjectLocationCluster(cluster_ref.projectId, cluster_ref
1080                                          .zone, cluster_ref.clusterId)))
1081    except apitools_exceptions.HttpNotFoundError as error:
1082      api_error = exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
1083      # Cluster couldn't be found, maybe user got the location wrong?
1084      self.CheckClusterOtherZones(cluster_ref, api_error)
1085    except apitools_exceptions.HttpError as error:
1086      raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
1087
1088  def CheckClusterOtherZones(self, cluster_ref, api_error):
1089    """Searches for similar clusters in other locations and reports via error.
1090
1091    Args:
1092      cluster_ref: cluster Resource to look for others with the same ID in
1093        different locations.
1094      api_error: current error from original request.
1095
1096    Raises:
1097      Error: wrong zone error if another similar cluster found, otherwise not
1098      found error.
1099    """
1100    not_found_error = util.Error(
1101        NO_SUCH_CLUSTER_ERROR_MSG.format(
1102            error=api_error,
1103            name=cluster_ref.clusterId,
1104            project=cluster_ref.projectId))
1105    try:
1106      clusters = self.ListClusters(cluster_ref.projectId).clusters
1107    except apitools_exceptions.HttpForbiddenError as error:
1108      # Raise the default 404 Not Found error.
1109      # 403 Forbidden error shouldn't be raised for this unrequested list.
1110      raise not_found_error
1111    except apitools_exceptions.HttpError as error:
1112      raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
1113    for cluster in clusters:
1114      if cluster.name == cluster_ref.clusterId:
1115        # Fall back to generic not found error if we *did* have the zone right.
1116        # Don't allow the case of a same-name cluster in a different zone to
1117        # be hinted (confusing!).
1118        if cluster.zone == cluster_ref.zone:
1119          raise api_error
1120
1121        # User likely got zone wrong.
1122        raise util.Error(
1123            WRONG_ZONE_ERROR_MSG.format(
1124                error=api_error,
1125                name=cluster_ref.clusterId,
1126                wrong_zone=self.Zone(cluster_ref),
1127                zone=cluster.zone))
1128    # Couldn't find a cluster with that name.
1129    raise not_found_error
1130
1131  def FindNodePool(self, cluster, pool_name=None):
1132    """Find the node pool with the given name in the cluster."""
1133    msg = ''
1134    if pool_name:
1135      for np in cluster.nodePools:
1136        if np.name == pool_name:
1137          return np
1138      msg = NO_SUCH_NODE_POOL_ERROR_MSG.format(
1139          cluster=cluster.name, name=pool_name) + os.linesep
1140    elif len(cluster.nodePools) == 1:
1141      return cluster.nodePools[0]
1142    # Couldn't find a node pool with that name or a node pool was not specified.
1143    msg += NO_NODE_POOL_SELECTED_ERROR_MSG + os.linesep.join(
1144        [np.name for np in cluster.nodePools])
1145    raise util.Error(msg)
1146
1147  def GetOperation(self, operation_ref):
1148    return self.client.projects_locations_operations.Get(
1149        self.messages.ContainerProjectsLocationsOperationsGetRequest(
1150            name=ProjectLocationOperation(operation_ref.projectId, operation_ref
1151                                          .zone, operation_ref.operationId)))
1152
1153  def WaitForOperation(self,
1154                       operation_ref,
1155                       message,
1156                       timeout_s=1200,
1157                       poll_period_s=5):
1158    """Poll container Operation until its status is done or timeout reached.
1159
1160    Args:
1161      operation_ref: operation resource.
1162      message: str, message to display to user while polling.
1163      timeout_s: number, seconds to poll with retries before timing out.
1164      poll_period_s: number, delay in seconds between requests.
1165
1166    Returns:
1167      Operation: the return value of the last successful operations.get
1168      request.
1169
1170    Raises:
1171      Error: if the operation times out or finishes with an error.
1172    """
1173    detail_message = None
1174    with progress_tracker.ProgressTracker(
1175        message, autotick=True, detail_message_callback=lambda: detail_message):
1176      start_time = time.time()
1177      while timeout_s > (time.time() - start_time):
1178        try:
1179          operation = self.GetOperation(operation_ref)
1180          if self.IsOperationFinished(operation):
1181            # Success!
1182            log.info('Operation %s succeeded after %.3f seconds', operation,
1183                     (time.time() - start_time))
1184            break
1185          detail_message = operation.detail
1186        except apitools_exceptions.HttpError as error:
1187          log.debug('GetOperation failed: %s', error)
1188          if error.status_code == six.moves.http_client.FORBIDDEN:
1189            raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
1190          # Keep trying until we timeout in case error is transient.
1191        time.sleep(poll_period_s)
1192    if not self.IsOperationFinished(operation):
1193      log.err.Print('Timed out waiting for operation {0}'.format(operation))
1194      raise util.Error('Operation [{0}] is still running'.format(operation))
1195    if self.GetOperationError(operation):
1196      raise util.Error('Operation [{0}] finished with error: {1}'.format(
1197          operation, self.GetOperationError(operation)))
1198
1199    return operation
1200
1201  def Zone(self, cluster_ref):
1202    # TODO(b/72146704): Remove this method.
1203    return cluster_ref.zone
1204
1205  def CreateClusterCommon(self, cluster_ref, options):
1206    """Returns a CreateCluster operation."""
1207    node_config = self.ParseNodeConfig(options)
1208    pools = self.ParseNodePools(options, node_config)
1209
1210    cluster = self.messages.Cluster(name=cluster_ref.clusterId, nodePools=pools)
1211    if options.additional_zones:
1212      cluster.locations = sorted([cluster_ref.zone] + options.additional_zones)
1213    if options.node_locations:
1214      cluster.locations = sorted(options.node_locations)
1215    if options.cluster_version:
1216      cluster.initialClusterVersion = options.cluster_version
1217    if options.network:
1218      cluster.network = options.network
1219    if options.cluster_ipv4_cidr:
1220      cluster.clusterIpv4Cidr = options.cluster_ipv4_cidr
1221    if options.enable_stackdriver_kubernetes is not None:
1222      # When "enable-stackdriver-kubernetes" is specified, either true or false.
1223      if options.enable_stackdriver_kubernetes:
1224        cluster.loggingService = 'logging.googleapis.com/kubernetes'
1225        cluster.monitoringService = 'monitoring.googleapis.com/kubernetes'
1226        # When "enable-stackdriver-kubernetes" is true, the
1227        # "enable-cloud-logging" and "enable-cloud-monitoring" flags can be
1228        # used to explicitly disable logging or monitoring
1229        if (options.enable_cloud_logging is not None and
1230            not options.enable_cloud_logging):
1231          cluster.loggingService = 'none'
1232        if (options.enable_cloud_monitoring is not None and
1233            not options.enable_cloud_monitoring):
1234          cluster.monitoringService = 'none'
1235      else:
1236        cluster.loggingService = 'none'
1237        cluster.monitoringService = 'none'
1238    # When "enable-stackdriver-kubernetes" is unspecified, checks whether
1239    # "enable-cloud-logging" or "enable-cloud-monitoring" options are specified.
1240    else:
1241      if options.enable_cloud_logging is not None:
1242        if options.enable_cloud_logging:
1243          cluster.loggingService = 'logging.googleapis.com'
1244        else:
1245          cluster.loggingService = 'none'
1246      if options.enable_cloud_monitoring is not None:
1247        if options.enable_cloud_monitoring:
1248          cluster.monitoringService = 'monitoring.googleapis.com'
1249        else:
1250          cluster.monitoringService = 'none'
1251    if options.subnetwork:
1252      cluster.subnetwork = options.subnetwork
1253    if options.addons:
1254      addons = self._AddonsConfig(
1255          disable_ingress=INGRESS not in options.addons,
1256          disable_hpa=HPA not in options.addons,
1257          disable_dashboard=DASHBOARD not in options.addons,
1258          disable_network_policy=(NETWORK_POLICY not in options.addons),
1259          enable_node_local_dns=(NODELOCALDNS in options.addons or None),
1260          enable_gcepd_csi_driver=(GCEPDCSIDRIVER in options.addons),
1261          enable_application_manager=(APPLICATIONMANAGER in options.addons),
1262          enable_cloud_build=(CLOUDBUILD in options.addons),
1263      )
1264      # CONFIGCONNECTOR is disabled by default.
1265      if CONFIGCONNECTOR in options.addons:
1266        if not options.enable_stackdriver_kubernetes:
1267          raise util.Error(
1268              CONFIGCONNECTOR_STACKDRIVER_KUBERNETES_DISABLED_ERROR_MSG)
1269        if options.workload_pool is None:
1270          raise util.Error(CONFIGCONNECTOR_WORKLOAD_IDENTITY_DISABLED_ERROR_MSG)
1271        addons.configConnectorConfig = self.messages.ConfigConnectorConfig(
1272            enabled=True)
1273      cluster.addonsConfig = addons
1274    self.ParseMasterAuthorizedNetworkOptions(options, cluster)
1275
1276    if options.enable_kubernetes_alpha:
1277      cluster.enableKubernetesAlpha = options.enable_kubernetes_alpha
1278
1279    if options.default_max_pods_per_node is not None:
1280      if not options.enable_ip_alias:
1281        raise util.Error(DEFAULT_MAX_PODS_PER_NODE_WITHOUT_IP_ALIAS_ERROR_MSG)
1282      cluster.defaultMaxPodsConstraint = self.messages.MaxPodsConstraint(
1283          maxPodsPerNode=options.default_max_pods_per_node)
1284
1285    if options.disable_default_snat:
1286      if not options.enable_ip_alias:
1287        raise util.Error(DISABLE_DEFAULT_SNAT_WITHOUT_IP_ALIAS_ERROR_MSG)
1288      if not options.enable_private_nodes:
1289        raise util.Error(DISABLE_DEFAULT_SNAT_WITHOUT_PRIVATE_NODES_ERROR_MSG)
1290      default_snat_status = self.messages.DefaultSnatStatus(
1291          disabled=options.disable_default_snat)
1292      if cluster.networkConfig is None:
1293        cluster.networkConfig = self.messages.NetworkConfig(
1294            defaultSnatStatus=default_snat_status)
1295      else:
1296        cluster.networkConfig.defaultSnatStatus = default_snat_status
1297
1298    if options.enable_l4_ilb_subsetting:
1299      if cluster.networkConfig is None:
1300        cluster.networkConfig = self.messages.NetworkConfig(
1301            enableL4ilbSubsetting=options.enable_l4_ilb_subsetting)
1302      else:
1303        cluster.networkConfig.enableL4ilbSubsetting = options.enable_l4_ilb_subsetting
1304
1305    dns_config = self.ParseClusterDNSOptions(options)
1306    if dns_config is not None:
1307      if cluster.networkConfig is None:
1308        cluster.networkConfig = self.messages.NetworkConfig(
1309            dnsConfig=dns_config)
1310      else:
1311        cluster.networkConfig.dnsConfig = dns_config
1312
1313    if options.enable_legacy_authorization is not None:
1314      cluster.legacyAbac = self.messages.LegacyAbac(
1315          enabled=bool(options.enable_legacy_authorization))
1316
1317    # Only Calico is currently supported as a network policy provider.
1318    if options.enable_network_policy:
1319      cluster.networkPolicy = self.messages.NetworkPolicy(
1320          enabled=options.enable_network_policy,
1321          provider=self.messages.NetworkPolicy.ProviderValueValuesEnum.CALICO)
1322
1323    if options.enable_binauthz is not None:
1324      cluster.binaryAuthorization = self.messages.BinaryAuthorization(
1325          enabled=options.enable_binauthz)
1326
1327    if options.maintenance_window is not None:
1328      cluster.maintenancePolicy = self.messages.MaintenancePolicy(
1329          window=self.messages.MaintenanceWindow(
1330              dailyMaintenanceWindow=self.messages.DailyMaintenanceWindow(
1331                  startTime=options.maintenance_window)))
1332    elif options.maintenance_window_start is not None:
1333      window_start = options.maintenance_window_start.isoformat()
1334      window_end = options.maintenance_window_end.isoformat()
1335      cluster.maintenancePolicy = self.messages.MaintenancePolicy(
1336          window=self.messages.MaintenanceWindow(
1337              recurringWindow=self.messages.RecurringTimeWindow(
1338                  window=self.messages.TimeWindow(
1339                      startTime=window_start, endTime=window_end),
1340                  recurrence=options.maintenance_window_recurrence)))
1341
1342    self.ParseResourceLabels(options, cluster)
1343
1344    if options.enable_pod_security_policy is not None:
1345      cluster.podSecurityPolicyConfig = self.messages.PodSecurityPolicyConfig(
1346          enabled=options.enable_pod_security_policy)
1347
1348    if options.security_group is not None:
1349      # The presence of the --security_group="foo" flag implies enabled=True.
1350      cluster.authenticatorGroupsConfig = (
1351          self.messages.AuthenticatorGroupsConfig(
1352              enabled=True, securityGroup=options.security_group))
1353    if options.enable_shielded_nodes is not None:
1354      cluster.shieldedNodes = self.messages.ShieldedNodes(
1355          enabled=options.enable_shielded_nodes)
1356
1357    self.ParseIPAliasOptions(options, cluster)
1358    self.ParseAllowRouteOverlapOptions(options, cluster)
1359    self.ParsePrivateClusterOptions(options, cluster)
1360    self.ParseTpuOptions(options, cluster)
1361    if options.enable_vertical_pod_autoscaling is not None:
1362      cluster.verticalPodAutoscaling = self.messages.VerticalPodAutoscaling(
1363          enabled=options.enable_vertical_pod_autoscaling)
1364
1365    if options.resource_usage_bigquery_dataset:
1366      bigquery_destination = self.messages.BigQueryDestination(
1367          datasetId=options.resource_usage_bigquery_dataset)
1368      cluster.resourceUsageExportConfig = \
1369          self.messages.ResourceUsageExportConfig(
1370              bigqueryDestination=bigquery_destination)
1371      if options.enable_network_egress_metering:
1372        cluster.resourceUsageExportConfig.enableNetworkEgressMetering = True
1373      if options.enable_resource_consumption_metering is not None:
1374        cluster.resourceUsageExportConfig.consumptionMeteringConfig = \
1375            self.messages.ConsumptionMeteringConfig(
1376                enabled=options.enable_resource_consumption_metering)
1377    elif options.enable_network_egress_metering is not None:
1378      raise util.Error(ENABLE_NETWORK_EGRESS_METERING_ERROR_MSG)
1379    elif options.enable_resource_consumption_metering is not None:
1380      raise util.Error(ENABLE_RESOURCE_CONSUMPTION_METERING_ERROR_MSG)
1381
1382    # Only instantiate the masterAuth struct if one or both of `user` or
1383    # `issue_client_certificate` is configured. Server-side Basic auth default
1384    # behavior is dependent on the absence of the MasterAuth struct. For this
1385    # reason, if only `issue_client_certificate` is configured, Basic auth will
1386    # be disabled.
1387    if options.user is not None or options.issue_client_certificate is not None:
1388      cluster.masterAuth = self.messages.MasterAuth(
1389          username=options.user, password=options.password)
1390      if options.issue_client_certificate is not None:
1391        cluster.masterAuth.clientCertificateConfig = (
1392            self.messages.ClientCertificateConfig(
1393                issueClientCertificate=options.issue_client_certificate))
1394
1395    if options.enable_intra_node_visibility is not None:
1396      if cluster.networkConfig is None:
1397        cluster.networkConfig = self.messages.NetworkConfig(
1398            enableIntraNodeVisibility=options.enable_intra_node_visibility)
1399      else:
1400        cluster.networkConfig.enableIntraNodeVisibility = \
1401            options.enable_intra_node_visibility
1402
1403    if options.database_encryption_key:
1404      cluster.databaseEncryption = self.messages.DatabaseEncryption(
1405          keyName=options.database_encryption_key,
1406          state=self.messages.DatabaseEncryption.StateValueValuesEnum.ENCRYPTED)
1407
1408    if options.enable_gvnic:
1409      cluster.enableGvnic = options.enable_gvnic
1410
1411    if options.boot_disk_kms_key:
1412      for pool in cluster.nodePools:
1413        pool.config.bootDiskKmsKey = options.boot_disk_kms_key
1414
1415    cluster.releaseChannel = _GetReleaseChannel(options, self.messages)
1416
1417    if options.autopilot:
1418      cluster.autopilot = self.messages.Autopilot()
1419      cluster.autopilot.enabled = True
1420
1421    if options.enable_confidential_nodes:
1422      cluster.confidentialNodes = self.messages.ConfidentialNodes(
1423          enabled=options.enable_confidential_nodes)
1424
1425    if options.private_ipv6_google_access_type is not None:
1426      if cluster.networkConfig is None:
1427        cluster.networkConfig = self.messages.NetworkConfig()
1428      cluster.networkConfig.privateIpv6GoogleAccess = util.GetPrivateIpv6GoogleAccessTypeMapper(
1429          self.messages, hidden=False).GetEnumForChoice(
1430              options.private_ipv6_google_access_type)
1431
1432    _AddNotificationConfigToCluster(cluster, options, self.messages)
1433
1434    return cluster
1435
1436  def ParseNodeConfig(self, options):
1437    """Creates node config based on node config options."""
1438    node_config = self.messages.NodeConfig()
1439    if options.node_machine_type:
1440      node_config.machineType = options.node_machine_type
1441    if options.node_disk_size_gb:
1442      node_config.diskSizeGb = options.node_disk_size_gb
1443    if options.disk_type:
1444      node_config.diskType = options.disk_type
1445    if options.node_source_image:
1446      raise util.Error('cannot specify node source image in container v1 api')
1447
1448    NodeIdentityOptionsToNodeConfig(options, node_config)
1449
1450    if options.local_ssd_count:
1451      node_config.localSsdCount = options.local_ssd_count
1452    self._AddLocalSSDVolumeConfigsToNodeConfig(node_config, options)
1453    self._AddEphemeralStorageToNodeConfig(node_config, options)
1454
1455    if options.tags:
1456      node_config.tags = options.tags
1457    else:
1458      node_config.tags = []
1459
1460    if options.image_type:
1461      node_config.imageType = options.image_type
1462
1463    self.ParseCustomNodeConfig(options, node_config)
1464
1465    _AddNodeLabelsToNodeConfig(node_config, options)
1466    _AddMetadataToNodeConfig(node_config, options)
1467    self._AddNodeTaintsToNodeConfig(node_config, options)
1468
1469    if options.preemptible:
1470      node_config.preemptible = options.preemptible
1471
1472    self.ParseAcceleratorOptions(options, node_config)
1473
1474    if options.min_cpu_platform is not None:
1475      node_config.minCpuPlatform = options.min_cpu_platform
1476
1477    if options.enable_gcfs:
1478      gcfs_config = self.messages.GcfsConfig(enabled=options.enable_gcfs)
1479      node_config.gcfsConfig = gcfs_config
1480
1481    self._AddWorkloadMetadataToNodeConfig(node_config, options, self.messages)
1482    _AddLinuxNodeConfigToNodeConfig(node_config, options, self.messages)
1483    _AddShieldedInstanceConfigToNodeConfig(node_config, options, self.messages)
1484    _AddReservationAffinityToNodeConfig(node_config, options, self.messages)
1485
1486    if options.system_config_from_file is not None:
1487      util.LoadSystemConfigFromYAML(node_config,
1488                                    options.system_config_from_file,
1489                                    self.messages)
1490
1491    return node_config
1492
1493  def ParseCustomNodeConfig(self, options, node_config):
1494    """Parses custom node config options."""
1495    custom_config = self.messages.CustomImageConfig()
1496    if options.image:
1497      custom_config.image = options.image
1498    if options.image_project:
1499      custom_config.imageProject = options.image_project
1500    if options.image_family:
1501      custom_config.imageFamily = options.image_family
1502    if options.image or options.image_project or options.image_family:
1503      node_config.nodeImageConfig = custom_config
1504
1505  def ParseNodePools(self, options, node_config):
1506    """Creates a list of node pools for the cluster by parsing options.
1507
1508    Args:
1509      options: cluster creation options
1510      node_config: node configuration for nodes in the node pools
1511
1512    Returns:
1513      List of node pools.
1514    """
1515    max_nodes_per_pool = options.max_nodes_per_pool or MAX_NODES_PER_POOL
1516    pools = (options.num_nodes + max_nodes_per_pool - 1) // max_nodes_per_pool
1517    if pools == 1:
1518      pool_names = ['default-pool']  # pool consistency with server default
1519    else:
1520      # default-pool-0, -1, ...
1521      pool_names = ['default-pool-{0}'.format(i) for i in range(0, pools)]
1522
1523    pools = []
1524    per_pool = (options.num_nodes + len(pool_names) - 1) // len(pool_names)
1525    to_add = options.num_nodes
1526    for name in pool_names:
1527      nodes = per_pool if (to_add > per_pool) else to_add
1528      pool = self.messages.NodePool(
1529          name=name,
1530          initialNodeCount=nodes,
1531          config=node_config,
1532          version=options.node_version,
1533          management=self._GetNodeManagement(options))
1534      if options.enable_autoscaling:
1535        pool.autoscaling = self.messages.NodePoolAutoscaling(
1536            enabled=options.enable_autoscaling,
1537            minNodeCount=options.min_nodes,
1538            maxNodeCount=options.max_nodes)
1539      if options.max_pods_per_node:
1540        if not options.enable_ip_alias:
1541          raise util.Error(MAX_PODS_PER_NODE_WITHOUT_IP_ALIAS_ERROR_MSG)
1542        pool.maxPodsConstraint = self.messages.MaxPodsConstraint(
1543            maxPodsPerNode=options.max_pods_per_node)
1544      if (options.max_surge_upgrade is not None or
1545          options.max_unavailable_upgrade is not None):
1546        pool.upgradeSettings = self.messages.UpgradeSettings()
1547        pool.upgradeSettings.maxSurge = options.max_surge_upgrade
1548        pool.upgradeSettings.maxUnavailable = options.max_unavailable_upgrade
1549      pools.append(pool)
1550      to_add -= nodes
1551    return pools
1552
1553  def ParseAcceleratorOptions(self, options, node_config):
1554    """Parses accrelerator options for the nodes in the cluster."""
1555    if options.accelerators is not None:
1556      type_name = options.accelerators['type']
1557      # Accelerator count defaults to 1.
1558      count = int(options.accelerators.get('count', 1))
1559      node_config.accelerators = [
1560          self.messages.AcceleratorConfig(
1561              acceleratorType=type_name, acceleratorCount=count)
1562      ]
1563
1564  def ParseResourceLabels(self, options, cluster):
1565    """Parses resource labels options for the cluster."""
1566    if options.labels is not None:
1567      labels = self.messages.Cluster.ResourceLabelsValue()
1568      props = []
1569      for k, v in sorted(six.iteritems(options.labels)):
1570        props.append(labels.AdditionalProperty(key=k, value=v))
1571      labels.additionalProperties = props
1572      cluster.resourceLabels = labels
1573
1574  def ParseIPAliasOptions(self, options, cluster):
1575    """Parses the options for IP Alias."""
1576    ip_alias_only_options = [
1577        ('services-ipv4-cidr', options.services_ipv4_cidr),
1578        ('create-subnetwork', options.create_subnetwork),
1579        ('cluster-secondary-range-name', options.cluster_secondary_range_name),
1580        ('services-secondary-range-name', options.services_secondary_range_name)
1581    ]
1582    if not options.enable_ip_alias:
1583      for name, opt in ip_alias_only_options:
1584        if opt:
1585          raise util.Error(
1586              PREREQUISITE_OPTION_ERROR_MSG.format(
1587                  prerequisite='enable-ip-alias', opt=name))
1588
1589    if options.subnetwork and options.create_subnetwork is not None:
1590      raise util.Error(CREATE_SUBNETWORK_WITH_SUBNETWORK_ERROR_MSG)
1591
1592    if options.enable_ip_alias:
1593      subnetwork_name = None
1594      node_ipv4_cidr = None
1595
1596      if options.create_subnetwork is not None:
1597        for key in options.create_subnetwork:
1598          if key not in ['name', 'range']:
1599            raise util.Error(
1600                CREATE_SUBNETWORK_INVALID_KEY_ERROR_MSG.format(key=key))
1601        subnetwork_name = options.create_subnetwork.get('name', None)
1602        node_ipv4_cidr = options.create_subnetwork.get('range', None)
1603
1604      policy = self.messages.IPAllocationPolicy(
1605          useIpAliases=options.enable_ip_alias,
1606          createSubnetwork=options.create_subnetwork is not None,
1607          subnetworkName=subnetwork_name,
1608          clusterIpv4CidrBlock=options.cluster_ipv4_cidr,
1609          nodeIpv4CidrBlock=node_ipv4_cidr,
1610          servicesIpv4CidrBlock=options.services_ipv4_cidr,
1611          clusterSecondaryRangeName=options.cluster_secondary_range_name,
1612          servicesSecondaryRangeName=options.services_secondary_range_name)
1613      if options.tpu_ipv4_cidr:
1614        policy.tpuIpv4CidrBlock = options.tpu_ipv4_cidr
1615      cluster.clusterIpv4Cidr = None
1616      cluster.ipAllocationPolicy = policy
1617    elif options.enable_ip_alias is not None:
1618      cluster.ipAllocationPolicy = self.messages.IPAllocationPolicy(
1619          useRoutes=True)
1620
1621    return cluster
1622
1623  def ParseAllowRouteOverlapOptions(self, options, cluster):
1624    """Parse the options for allow route overlap."""
1625    if not options.allow_route_overlap:
1626      return
1627    if options.enable_ip_alias is None:
1628      raise util.Error(ALLOW_ROUTE_OVERLAP_WITHOUT_EXPLICIT_NETWORK_MODE)
1629    # Validate required flags are set.
1630    if options.cluster_ipv4_cidr is None:
1631      raise util.Error(ALLOW_ROUTE_OVERLAP_WITHOUT_CLUSTER_CIDR_ERROR_MSG)
1632    if options.enable_ip_alias and options.services_ipv4_cidr is None:
1633      raise util.Error(ALLOW_ROUTE_OVERLAP_WITHOUT_SERVICES_CIDR_ERROR_MSG)
1634
1635    # Fill in corresponding field.
1636    if cluster.ipAllocationPolicy is None:
1637      policy = self.messages.IPAllocationPolicy(allowRouteOverlap=True)
1638      cluster.ipAllocationPolicy = policy
1639    else:
1640      cluster.ipAllocationPolicy.allowRouteOverlap = True
1641
1642  def ParsePrivateClusterOptions(self, options, cluster):
1643    """Parses the options for Private Clusters."""
1644    if (options.enable_private_nodes is not None and
1645        options.private_cluster is not None):
1646      raise util.Error(ENABLE_PRIVATE_NODES_WITH_PRIVATE_CLUSTER_ERROR_MSG)
1647
1648    if options.enable_private_nodes is None:
1649      options.enable_private_nodes = options.private_cluster
1650
1651    if options.enable_private_nodes and not options.enable_ip_alias:
1652      raise util.Error(
1653          PREREQUISITE_OPTION_ERROR_MSG.format(
1654              prerequisite='enable-ip-alias', opt='enable-private-nodes'))
1655
1656    if options.enable_private_endpoint and not options.enable_private_nodes:
1657      raise util.Error(
1658          PREREQUISITE_OPTION_ERROR_MSG.format(
1659              prerequisite='enable-private-nodes',
1660              opt='enable-private-endpoint'))
1661
1662    if options.master_ipv4_cidr and not options.enable_private_nodes:
1663      raise util.Error(
1664          PREREQUISITE_OPTION_ERROR_MSG.format(
1665              prerequisite='enable-private-nodes', opt='master-ipv4-cidr'))
1666
1667    if options.enable_private_nodes:
1668      config = self.messages.PrivateClusterConfig(
1669          enablePrivateNodes=options.enable_private_nodes,
1670          enablePrivateEndpoint=options.enable_private_endpoint,
1671          masterIpv4CidrBlock=options.master_ipv4_cidr)
1672      cluster.privateClusterConfig = config
1673    return cluster
1674
1675  def ParseTpuOptions(self, options, cluster):
1676    """Parses the options for TPUs."""
1677    if options.enable_tpu and not options.enable_ip_alias:
1678      # Raises error if use --enable-tpu without --enable-ip-alias.
1679      raise util.Error(
1680          PREREQUISITE_OPTION_ERROR_MSG.format(
1681              prerequisite='enable-ip-alias', opt='enable-tpu'))
1682
1683    if not options.enable_tpu and options.tpu_ipv4_cidr:
1684      # Raises error if use --tpu-ipv4-cidr without --enable-tpu.
1685      raise util.Error(
1686          PREREQUISITE_OPTION_ERROR_MSG.format(
1687              prerequisite='enable-tpu', opt='tpu-ipv4-cidr'))
1688
1689    if not options.enable_tpu and options.enable_tpu_service_networking:
1690      # Raises error if use --enable-tpu-service-networking without
1691      # --enable-tpu.
1692      raise util.Error(
1693          PREREQUISITE_OPTION_ERROR_MSG.format(
1694              prerequisite='enable-tpu', opt='enable-tpu-service-networking'))
1695
1696    if options.enable_tpu:
1697      cluster.enableTpu = options.enable_tpu
1698      if options.enable_tpu_service_networking:
1699        tpu_config = self.messages.TpuConfig(
1700            enabled=options.enable_tpu,
1701            ipv4CidrBlock=options.tpu_ipv4_cidr,
1702            useServiceNetworking=options.enable_tpu_service_networking)
1703        cluster.tpuConfig = tpu_config
1704
1705  def ParseMasterAuthorizedNetworkOptions(self, options, cluster):
1706    """Parses the options for master authorized networks."""
1707    if (options.master_authorized_networks and
1708        not options.enable_master_authorized_networks):
1709      # Raise error if use --master-authorized-networks without
1710      # --enable-master-authorized-networks.
1711      raise util.Error(MISMATCH_AUTHORIZED_NETWORKS_ERROR_MSG)
1712    elif options.enable_master_authorized_networks is None:
1713      cluster.masterAuthorizedNetworksConfig = None
1714    elif not options.enable_master_authorized_networks:
1715      authorized_networks = self.messages.MasterAuthorizedNetworksConfig(
1716          enabled=False)
1717      cluster.masterAuthorizedNetworksConfig = authorized_networks
1718    else:
1719      authorized_networks = self.messages.MasterAuthorizedNetworksConfig(
1720          enabled=options.enable_master_authorized_networks)
1721      if options.master_authorized_networks:
1722        for network in options.master_authorized_networks:
1723          authorized_networks.cidrBlocks.append(
1724              self.messages.CidrBlock(cidrBlock=network))
1725      cluster.masterAuthorizedNetworksConfig = authorized_networks
1726
1727  def ParseClusterDNSOptions(self, options):
1728    """Parses the options for ClusterDNS."""
1729    if options.cluster_dns is None:
1730      if options.cluster_dns_scope:
1731        raise util.Error(
1732            PREREQUISITE_OPTION_ERROR_MSG.format(
1733                prerequisite='cluster-dns', opt='cluster-dns-scope'))
1734      if options.cluster_dns_domain:
1735        raise util.Error(
1736            PREREQUISITE_OPTION_ERROR_MSG.format(
1737                prerequisite='cluster-dns', opt='cluster-dns-domain'))
1738      return
1739
1740    dns_config = self.messages.DNSConfig()
1741    provider_enum = self.messages.DNSConfig.ClusterDnsValueValuesEnum
1742    if options.cluster_dns.lower() == 'clouddns':
1743      dns_config.clusterDns = provider_enum.CLOUD_DNS
1744    else:
1745      dns_config.clusterDns = provider_enum.PLATFORM_DEFAULT
1746
1747    if options.cluster_dns_scope is not None:
1748      scope_enum = self.messages.DNSConfig.ClusterDnsScopeValueValuesEnum
1749      if options.cluster_dns_scope.lower() == 'cluster':
1750        dns_config.clusterDnsScope = scope_enum.CLUSTER_SCOPE
1751      else:
1752        dns_config.clusterDnsScope = scope_enum.VPC_SCOPE
1753
1754    if options.cluster_dns_domain is not None:
1755      dns_config.clusterDnsDomain = options.cluster_dns_domain
1756    return dns_config
1757
1758  def CreateCluster(self, cluster_ref, options):
1759    """Handles CreateCluster options that are specific to a release track.
1760
1761    Overridden in each release track.
1762
1763    Args:
1764      cluster_ref: Name and location of the cluster.
1765      options: An UpdateClusterOptions containining the user-specified options.
1766
1767    Returns:
1768      The operation to be executed.
1769    """
1770    cluster = self.CreateClusterCommon(cluster_ref, options)
1771    if options.enable_autoprovisioning is not None:
1772      cluster.autoscaling = self.CreateClusterAutoscalingCommon(
1773          cluster_ref, options, False)
1774    if options.addons:
1775      # CloudRun is disabled by default.
1776      if any((v in options.addons) for v in CLOUDRUN_ADDONS):
1777        if not options.enable_stackdriver_kubernetes:
1778          raise util.Error(CLOUDRUN_STACKDRIVER_KUBERNETES_DISABLED_ERROR_MSG)
1779        if INGRESS not in options.addons:
1780          raise util.Error(CLOUDRUN_INGRESS_KUBERNETES_DISABLED_ERROR_MSG)
1781        load_balancer_type = _GetCloudRunLoadBalancerType(
1782            options, self.messages)
1783        cluster.addonsConfig.cloudRunConfig = self.messages.CloudRunConfig(
1784            disabled=False, loadBalancerType=load_balancer_type)
1785
1786    if options.workload_pool:
1787      cluster.workloadIdentityConfig = self.messages.WorkloadIdentityConfig(
1788          workloadPool=options.workload_pool)
1789
1790    if options.enable_master_global_access is not None:
1791      if not options.enable_private_nodes:
1792        raise util.Error(
1793            PREREQUISITE_OPTION_ERROR_MSG.format(
1794                prerequisite='enable-private-nodes',
1795                opt='enable-master-global-access'))
1796      cluster.privateClusterConfig.masterGlobalAccessConfig = \
1797          self.messages.PrivateClusterMasterGlobalAccessConfig(
1798              enabled=options.enable_master_global_access)
1799
1800    req = self.messages.CreateClusterRequest(
1801        parent=ProjectLocation(cluster_ref.projectId, cluster_ref.zone),
1802        cluster=cluster)
1803    operation = self.client.projects_locations_clusters.Create(req)
1804    return self.ParseOperation(operation.name, cluster_ref.zone)
1805
1806  def CreateClusterAutoscalingCommon(self, cluster_ref, options, for_update):
1807    """Create cluster's autoscaling configuration.
1808
1809    Args:
1810      cluster_ref: Cluster reference.
1811      options: Either CreateClusterOptions or UpdateClusterOptions.
1812      for_update: Is function executed for update operation.
1813
1814    Returns:
1815      Cluster's autoscaling configuration.
1816    """
1817    del cluster_ref  # Unused in GA.
1818
1819    autoscaling = self.messages.ClusterAutoscaling()
1820    autoscaling.enableNodeAutoprovisioning = options.enable_autoprovisioning
1821
1822    resource_limits = []
1823    if options.autoprovisioning_config_file is not None:
1824      # Create using config file only.
1825      config = yaml.load(options.autoprovisioning_config_file)
1826      resource_limits = config.get(RESOURCE_LIMITS)
1827      service_account = config.get(SERVICE_ACCOUNT)
1828      scopes = config.get(SCOPES)
1829      max_surge_upgrade = None
1830      max_unavailable_upgrade = None
1831      upgrade_settings = config.get(UPGRADE_SETTINGS)
1832      if upgrade_settings:
1833        max_surge_upgrade = upgrade_settings.get(MAX_SURGE_UPGRADE)
1834        max_unavailable_upgrade = upgrade_settings.get(MAX_UNAVAILABLE_UPGRADE)
1835      management_settings = config.get(NODE_MANAGEMENT)
1836      enable_autoupgrade = None
1837      enable_autorepair = None
1838      if management_settings:
1839        enable_autoupgrade = management_settings.get(ENABLE_AUTO_UPGRADE)
1840        enable_autorepair = management_settings.get(ENABLE_AUTO_REPAIR)
1841      autoprovisioning_locations = \
1842          config.get(AUTOPROVISIONING_LOCATIONS)
1843      min_cpu_platform = config.get(MIN_CPU_PLATFORM)
1844      boot_disk_kms_key = config.get(BOOT_DISK_KMS_KEY)
1845      disk_type = config.get(DISK_TYPE)
1846      disk_size_gb = config.get(DISK_SIZE_GB)
1847      shielded_instance_config = config.get(SHIELDED_INSTANCE_CONFIG)
1848      enable_secure_boot = None
1849      enable_integrity_monitoring = None
1850      if shielded_instance_config:
1851        enable_secure_boot = shielded_instance_config.get(ENABLE_SECURE_BOOT)
1852        enable_integrity_monitoring = \
1853            shielded_instance_config.get(ENABLE_INTEGRITY_MONITORING)
1854    else:
1855      resource_limits = self.ResourceLimitsFromFlags(options)
1856      service_account = options.autoprovisioning_service_account
1857      scopes = options.autoprovisioning_scopes
1858      max_surge_upgrade = options.autoprovisioning_max_surge_upgrade
1859      max_unavailable_upgrade = options.autoprovisioning_max_unavailable_upgrade
1860      enable_autoupgrade = options.enable_autoprovisioning_autoupgrade
1861      enable_autorepair = options.enable_autoprovisioning_autorepair
1862      autoprovisioning_locations = options.autoprovisioning_locations
1863      min_cpu_platform = options.autoprovisioning_min_cpu_platform
1864      boot_disk_kms_key = None
1865      disk_type = None
1866      disk_size_gb = None
1867      enable_secure_boot = None
1868      enable_integrity_monitoring = None
1869
1870    if options.enable_autoprovisioning is not None:
1871      autoscaling.enableNodeAutoprovisioning = options.enable_autoprovisioning
1872      if resource_limits is None:
1873        resource_limits = []
1874      autoscaling.resourceLimits = resource_limits
1875      if scopes is None:
1876        scopes = []
1877      management = None
1878      upgrade_settings = None
1879      if max_surge_upgrade is not None or max_unavailable_upgrade is not None:
1880        upgrade_settings = self.messages.UpgradeSettings()
1881        upgrade_settings.maxUnavailable = max_unavailable_upgrade
1882        upgrade_settings.maxSurge = max_surge_upgrade
1883      if enable_autorepair is not None or enable_autoupgrade is not None:
1884        management = (
1885            self.messages.NodeManagement(
1886                autoUpgrade=enable_autoupgrade, autoRepair=enable_autorepair))
1887      shielded_instance_config = None
1888      if (enable_secure_boot is not None or
1889          enable_integrity_monitoring is not None):
1890        shielded_instance_config = self.messages.ShieldedInstanceConfig()
1891        shielded_instance_config.enableSecureBoot = enable_secure_boot
1892        shielded_instance_config.enableIntegrityMonitoring = \
1893            enable_integrity_monitoring
1894      autoscaling.autoprovisioningNodePoolDefaults = self.messages \
1895        .AutoprovisioningNodePoolDefaults(serviceAccount=service_account,
1896                                          oauthScopes=scopes,
1897                                          upgradeSettings=upgrade_settings,
1898                                          management=management,
1899                                          minCpuPlatform=min_cpu_platform,
1900                                          bootDiskKmsKey=boot_disk_kms_key,
1901                                          diskSizeGb=disk_size_gb,
1902                                          diskType=disk_type,
1903                                          shieldedInstanceConfig=
1904                                          shielded_instance_config)
1905      if autoprovisioning_locations:
1906        autoscaling.autoprovisioningLocations = \
1907            sorted(autoprovisioning_locations)
1908
1909    self.ValidateClusterAutoscaling(autoscaling, for_update)
1910    return autoscaling
1911
1912  def ValidateClusterAutoscaling(self, autoscaling, for_update):
1913    """Validate cluster autoscaling configuration.
1914
1915    Args:
1916      autoscaling: autoscaling configuration to be validated.
1917      for_update: Is function executed for update operation.
1918
1919    Raises:
1920      Error if the new configuration is invalid.
1921    """
1922    if autoscaling.enableNodeAutoprovisioning:
1923      if not for_update or autoscaling.resourceLimits:
1924        cpu_found = any(
1925            limit.resourceType == 'cpu' for limit in autoscaling.resourceLimits)
1926        mem_found = any(limit.resourceType == 'memory'
1927                        for limit in autoscaling.resourceLimits)
1928        if not cpu_found or not mem_found:
1929          raise util.Error(NO_AUTOPROVISIONING_LIMITS_ERROR_MSG)
1930        defaults = autoscaling.autoprovisioningNodePoolDefaults
1931        if defaults:
1932          if defaults.upgradeSettings:
1933            max_surge_found = defaults.upgradeSettings.maxSurge is not None
1934            max_unavailable_found = defaults.upgradeSettings.maxUnavailable is not None
1935            if max_unavailable_found != max_surge_found:
1936              raise util.Error(BOTH_AUTOPROVISIONING_UPGRADE_SETTINGS_ERROR_MSG)
1937          if defaults.management:
1938            auto_upgrade_found = defaults.management.autoUpgrade is not None
1939            auto_repair_found = defaults.management.autoRepair is not None
1940            if auto_repair_found != auto_upgrade_found:
1941              raise util.Error(
1942                  BOTH_AUTOPROVISIONING_MANAGEMENT_SETTINGS_ERROR_MSG)
1943          if defaults.shieldedInstanceConfig:
1944            secure_boot_found = defaults.shieldedInstanceConfig.enableSecureBoot is not None
1945            integrity_monitoring_found = defaults.shieldedInstanceConfig.enableIntegrityMonitoring is not None
1946            if secure_boot_found != integrity_monitoring_found:
1947              raise util.Error(
1948                  BOTH_AUTOPROVISIONING_SHIELDED_INSTANCE_SETTINGS_ERROR_MSG)
1949    elif autoscaling.resourceLimits:
1950      raise util.Error(LIMITS_WITHOUT_AUTOPROVISIONING_MSG)
1951    elif autoscaling.autoprovisioningNodePoolDefaults and \
1952        (autoscaling.autoprovisioningNodePoolDefaults.serviceAccount or
1953         autoscaling.autoprovisioningNodePoolDefaults.oauthScopes):
1954      raise util.Error(DEFAULTS_WITHOUT_AUTOPROVISIONING_MSG)
1955
1956  def _GetClusterTelemetryType(self, options, logging_service,
1957                               monitoring_service):
1958    """Gets the cluster telemetry from create options."""
1959    # If enable_stackdriver_kubernetes is set to false cluster telemetry
1960    # will be set to DISABLED.
1961    if (options.enable_stackdriver_kubernetes is not None and
1962        not options.enable_stackdriver_kubernetes):
1963      return self.messages.ClusterTelemetry.TypeValueValuesEnum.DISABLED
1964
1965    # If either logging service or monitoring service is explicitly disabled we
1966    # do not set the cluster telemtry.
1967    if (options.enable_stackdriver_kubernetes and
1968        (logging_service == 'none' or monitoring_service == 'none')):
1969      return None
1970
1971    # When enable_stackdriver_kubernetes is set to true and neither logging nor
1972    # monitoring service are explicitly disabled we set the Cluster Telemetry
1973    # to ENABLED.
1974    if (options.enable_stackdriver_kubernetes and logging_service != 'none' and
1975        monitoring_service != 'none'):
1976      return self.messages.ClusterTelemetry.TypeValueValuesEnum.ENABLED
1977
1978    # When enable_logging_monitoring_system_only is set to true we set the
1979    # telemetry to SYSTEM_ONLY. In case of SYSTEM_ONLY it's not possible to
1980    # disable either logging or monitoring.
1981    if options.enable_logging_monitoring_system_only:
1982      return self.messages.ClusterTelemetry.TypeValueValuesEnum.SYSTEM_ONLY
1983    return None
1984
1985  def ResourceLimitsFromFlags(self, options):
1986    """Create cluster's autoscaling resource limits from command line flags.
1987
1988    Args:
1989      options: Either CreateClusterOptions or UpdateClusterOptions.
1990
1991    Returns:
1992      Cluster's new autoscaling resource limits.
1993    """
1994    new_resource_limits = []
1995    if options.min_cpu is not None or options.max_cpu is not None:
1996      new_resource_limits.append(
1997          self.messages.ResourceLimit(
1998              resourceType='cpu',
1999              minimum=options.min_cpu,
2000              maximum=options.max_cpu))
2001    if options.min_memory is not None or options.max_memory is not None:
2002      new_resource_limits.append(
2003          self.messages.ResourceLimit(
2004              resourceType='memory',
2005              minimum=options.min_memory,
2006              maximum=options.max_memory))
2007    if options.max_accelerator is not None:
2008      accelerator_type = options.max_accelerator.get('type')
2009      min_count = 0
2010      if options.min_accelerator is not None:
2011        if options.min_accelerator.get('type') != accelerator_type:
2012          raise util.Error(MISMATCH_ACCELERATOR_TYPE_LIMITS_ERROR_MSG)
2013        min_count = options.min_accelerator.get('count', 0)
2014      new_resource_limits.append(
2015          self.messages.ResourceLimit(
2016              resourceType=options.max_accelerator.get('type'),
2017              minimum=min_count,
2018              maximum=options.max_accelerator.get('count', 0)))
2019    return new_resource_limits
2020
2021  def UpdateClusterCommon(self, cluster_ref, options):
2022    """Returns an UpdateCluster operation."""
2023
2024    update = None
2025    if not options.version:
2026      options.version = '-'
2027    if options.update_nodes:
2028      update = self.messages.ClusterUpdate(
2029          desiredNodeVersion=options.version,
2030          desiredNodePoolId=options.node_pool,
2031          desiredImageType=options.image_type,
2032          desiredImage=options.image,
2033          desiredImageProject=options.image_project)
2034      # security_profile may be set in upgrade command
2035      if options.security_profile is not None:
2036        update.securityProfile = self.messages.SecurityProfile(
2037            name=options.security_profile)
2038    elif options.update_master:
2039      update = self.messages.ClusterUpdate(desiredMasterVersion=options.version)
2040      # security_profile may be set in upgrade command
2041      if options.security_profile is not None:
2042        update.securityProfile = self.messages.SecurityProfile(
2043            name=options.security_profile)
2044    elif options.enable_stackdriver_kubernetes:
2045      update = self.messages.ClusterUpdate()
2046      update.desiredLoggingService = 'logging.googleapis.com/kubernetes'
2047      update.desiredMonitoringService = 'monitoring.googleapis.com/kubernetes'
2048    elif options.enable_stackdriver_kubernetes is not None:
2049      update = self.messages.ClusterUpdate()
2050      update.desiredLoggingService = 'none'
2051      update.desiredMonitoringService = 'none'
2052    elif options.monitoring_service or options.logging_service:
2053      update = self.messages.ClusterUpdate()
2054      if options.monitoring_service:
2055        update.desiredMonitoringService = options.monitoring_service
2056      if options.logging_service:
2057        update.desiredLoggingService = options.logging_service
2058    elif options.disable_addons:
2059      disable_node_local_dns = options.disable_addons.get(NODELOCALDNS)
2060      addons = self._AddonsConfig(
2061          disable_ingress=options.disable_addons.get(INGRESS),
2062          disable_hpa=options.disable_addons.get(HPA),
2063          disable_dashboard=options.disable_addons.get(DASHBOARD),
2064          disable_network_policy=options.disable_addons.get(NETWORK_POLICY),
2065          enable_node_local_dns=not disable_node_local_dns if \
2066             disable_node_local_dns is not None else None)
2067      if options.disable_addons.get(CONFIGCONNECTOR) is not None:
2068        addons.configConnectorConfig = (
2069            self.messages.ConfigConnectorConfig(
2070                enabled=(not options.disable_addons.get(CONFIGCONNECTOR))))
2071      if options.disable_addons.get(GCEPDCSIDRIVER) is not None:
2072        addons.gcePersistentDiskCsiDriverConfig = (
2073            self.messages.GcePersistentDiskCsiDriverConfig(
2074                enabled=not options.disable_addons.get(GCEPDCSIDRIVER)))
2075      update = self.messages.ClusterUpdate(desiredAddonsConfig=addons)
2076    elif options.enable_autoscaling is not None:
2077      # For update, we can either enable or disable.
2078      autoscaling = self.messages.NodePoolAutoscaling(
2079          enabled=options.enable_autoscaling)
2080      if options.enable_autoscaling:
2081        autoscaling.minNodeCount = options.min_nodes
2082        autoscaling.maxNodeCount = options.max_nodes
2083      update = self.messages.ClusterUpdate(
2084          desiredNodePoolId=options.node_pool,
2085          desiredNodePoolAutoscaling=autoscaling)
2086    elif options.locations:
2087      update = self.messages.ClusterUpdate(desiredLocations=options.locations)
2088    elif options.enable_master_authorized_networks is not None:
2089      # For update, we can either enable or disable.
2090      authorized_networks = self.messages.MasterAuthorizedNetworksConfig(
2091          enabled=options.enable_master_authorized_networks)
2092      if options.master_authorized_networks:
2093        for network in options.master_authorized_networks:
2094          authorized_networks.cidrBlocks.append(
2095              self.messages.CidrBlock(cidrBlock=network))
2096      update = self.messages.ClusterUpdate(
2097          desiredMasterAuthorizedNetworksConfig=authorized_networks)
2098    elif options.enable_autoprovisioning is not None or \
2099         options.autoscaling_profile is not None:
2100      autoscaling = self.CreateClusterAutoscalingCommon(cluster_ref, options,
2101                                                        True)
2102      update = self.messages.ClusterUpdate(
2103          desiredClusterAutoscaling=autoscaling)
2104    elif options.enable_pod_security_policy is not None:
2105      config = self.messages.PodSecurityPolicyConfig(
2106          enabled=options.enable_pod_security_policy)
2107      update = self.messages.ClusterUpdate(
2108          desiredPodSecurityPolicyConfig=config)
2109    elif options.enable_binauthz is not None:
2110      binary_authorization = self.messages.BinaryAuthorization(
2111          enabled=options.enable_binauthz)
2112      update = self.messages.ClusterUpdate(
2113          desiredBinaryAuthorization=binary_authorization)
2114    elif options.enable_vertical_pod_autoscaling is not None:
2115      vertical_pod_autoscaling = self.messages.VerticalPodAutoscaling(
2116          enabled=options.enable_vertical_pod_autoscaling)
2117      update = self.messages.ClusterUpdate(
2118          desiredVerticalPodAutoscaling=vertical_pod_autoscaling)
2119    elif options.resource_usage_bigquery_dataset is not None:
2120      export_config = self.messages.ResourceUsageExportConfig(
2121          bigqueryDestination=self.messages.BigQueryDestination(
2122              datasetId=options.resource_usage_bigquery_dataset))
2123      if options.enable_network_egress_metering:
2124        export_config.enableNetworkEgressMetering = True
2125      if options.enable_resource_consumption_metering is not None:
2126        export_config.consumptionMeteringConfig = \
2127            self.messages.ConsumptionMeteringConfig(
2128                enabled=options.enable_resource_consumption_metering)
2129      update = self.messages.ClusterUpdate(
2130          desiredResourceUsageExportConfig=export_config)
2131    elif options.enable_network_egress_metering is not None:
2132      raise util.Error(ENABLE_NETWORK_EGRESS_METERING_ERROR_MSG)
2133    elif options.enable_resource_consumption_metering is not None:
2134      raise util.Error(ENABLE_RESOURCE_CONSUMPTION_METERING_ERROR_MSG)
2135    elif options.clear_resource_usage_bigquery_dataset is not None:
2136      export_config = self.messages.ResourceUsageExportConfig()
2137      update = self.messages.ClusterUpdate(
2138          desiredResourceUsageExportConfig=export_config)
2139    elif options.security_profile is not None:
2140      # security_profile is set in update command
2141      security_profile = self.messages.SecurityProfile(
2142          name=options.security_profile)
2143      update = self.messages.ClusterUpdate(securityProfile=security_profile)
2144    elif options.enable_intra_node_visibility is not None:
2145      intra_node_visibility_config = self.messages.IntraNodeVisibilityConfig(
2146          enabled=options.enable_intra_node_visibility)
2147      update = self.messages.ClusterUpdate(
2148          desiredIntraNodeVisibilityConfig=intra_node_visibility_config)
2149    elif options.enable_master_global_access is not None:
2150      # For update, we can either enable or disable.
2151      master_global_access_config = self.messages.PrivateClusterMasterGlobalAccessConfig(
2152          enabled=options.enable_master_global_access)
2153      private_cluster_config = self.messages.PrivateClusterConfig(
2154          masterGlobalAccessConfig=master_global_access_config)
2155      update = self.messages.ClusterUpdate(
2156          desiredPrivateClusterConfig=private_cluster_config)
2157
2158    if (options.security_profile is not None and
2159        options.security_profile_runtime_rules is not None):
2160      update.securityProfile.disableRuntimeRules = \
2161          not options.security_profile_runtime_rules
2162    if (options.master_authorized_networks and
2163        not options.enable_master_authorized_networks):
2164      # Raise error if use --master-authorized-networks without
2165      # --enable-master-authorized-networks.
2166      raise util.Error(MISMATCH_AUTHORIZED_NETWORKS_ERROR_MSG)
2167
2168    if options.database_encryption_key:
2169      update = self.messages.ClusterUpdate(
2170          desiredDatabaseEncryption=self.messages.DatabaseEncryption(
2171              keyName=options.database_encryption_key,
2172              state=self.messages.DatabaseEncryption.StateValueValuesEnum
2173              .ENCRYPTED))
2174
2175    elif options.disable_database_encryption:
2176      update = self.messages.ClusterUpdate(
2177          desiredDatabaseEncryption=self.messages.DatabaseEncryption(
2178              state=self.messages.DatabaseEncryption.StateValueValuesEnum
2179              .DECRYPTED))
2180
2181    if options.enable_shielded_nodes is not None:
2182      update = self.messages.ClusterUpdate(
2183          desiredShieldedNodes=self.messages.ShieldedNodes(
2184              enabled=options.enable_shielded_nodes))
2185    if options.enable_tpu is not None:
2186      update = self.messages.ClusterUpdate(
2187          desiredTpuConfig=_GetTpuConfigForClusterUpdate(
2188              options, self.messages))
2189    if options.enable_gvnic is not None:
2190      update = self.messages.ClusterUpdate(
2191          desiredEnableGvnic=options.enable_gvnic)
2192
2193    if options.release_channel is not None:
2194      update = self.messages.ClusterUpdate(
2195          desiredReleaseChannel=_GetReleaseChannel(
2196              options, self.messages))
2197
2198    if options.disable_default_snat is not None:
2199      disable_default_snat = self.messages.DefaultSnatStatus(
2200          disabled=options.disable_default_snat)
2201      update = self.messages.ClusterUpdate(
2202          desiredDefaultSnatStatus=disable_default_snat)
2203    if options.enable_l4_ilb_subsetting is not None:
2204      ilb_subsettting_config = self.messages.ILBSubsettingConfig(
2205          enabled=options.enable_l4_ilb_subsetting)
2206      update = self.messages.ClusterUpdate(
2207          desiredL4ilbSubsettingConfig=ilb_subsettting_config)
2208    if options.private_ipv6_google_access_type is not None:
2209      update = self.messages.ClusterUpdate(
2210          desiredPrivateIpv6GoogleAccess=util
2211          .GetPrivateIpv6GoogleAccessTypeMapperForUpdate(
2212              self.messages, hidden=False).GetEnumForChoice(
2213                  options.private_ipv6_google_access_type))
2214
2215    dns_config = self.ParseClusterDNSOptions(options)
2216    if dns_config is not None:
2217      update = self.messages.ClusterUpdate(
2218          desiredDnsConfig=dns_config)
2219
2220    if options.notification_config is not None:
2221      update = self.messages.ClusterUpdate(
2222          desiredNotificationConfig=_GetNotificationConfigForClusterUpdate(
2223              options, self.messages))
2224
2225    return update
2226
2227  def UpdateCluster(self, cluster_ref, options):
2228    """Handles UpdateCluster options that are specific to a release track.
2229
2230    Overridden in each release track.
2231
2232    Args:
2233      cluster_ref: Name and location of the cluster.
2234      options: An UpdateClusterOptions containining the user-specified options.
2235
2236    Returns:
2237      The operation to be executed.
2238    """
2239
2240    update = self.UpdateClusterCommon(cluster_ref, options)
2241
2242    if options.workload_pool:
2243      update = self.messages.ClusterUpdate(
2244          desiredWorkloadIdentityConfig=self.messages.WorkloadIdentityConfig(
2245              workloadPool=options.workload_pool))
2246    elif options.disable_workload_identity:
2247      update = self.messages.ClusterUpdate(
2248          desiredWorkloadIdentityConfig=self.messages.WorkloadIdentityConfig(
2249              workloadPool=''))
2250
2251    if not update:
2252      # if reached here, it's possible:
2253      # - someone added update flags but not handled
2254      # - none of the update flags specified from command line
2255      # so raise an error with readable message like:
2256      #   Nothing to update
2257      # to catch this error.
2258      raise util.Error(NOTHING_TO_UPDATE_ERROR_MSG)
2259
2260    if options.disable_addons is not None:
2261      if any(
2262          (options.disable_addons.get(v) is not None) for v in CLOUDRUN_ADDONS):
2263        load_balancer_type = _GetCloudRunLoadBalancerType(
2264            options, self.messages)
2265        update.desiredAddonsConfig.cloudRunConfig = (
2266            self.messages.CloudRunConfig(
2267                disabled=any(
2268                    options.disable_addons.get(v) or False
2269                    for v in CLOUDRUN_ADDONS),
2270                loadBalancerType=load_balancer_type))
2271
2272    op = self.client.projects_locations_clusters.Update(
2273        self.messages.UpdateClusterRequest(
2274            name=ProjectLocationCluster(cluster_ref.projectId, cluster_ref.zone,
2275                                        cluster_ref.clusterId),
2276            update=update))
2277
2278    return self.ParseOperation(op.name, cluster_ref.zone)
2279
2280  def SetLoggingService(self, cluster_ref, logging_service):
2281    op = self.client.projects_locations_clusters.SetLogging(
2282        self.messages.SetLoggingServiceRequest(
2283            name=ProjectLocationCluster(cluster_ref.projectId, cluster_ref.zone,
2284                                        cluster_ref.clusterId),
2285            loggingService=logging_service))
2286    return self.ParseOperation(op.name, cluster_ref.zone)
2287
2288  def SetLegacyAuthorization(self, cluster_ref, enable_legacy_authorization):
2289    op = self.client.projects_locations_clusters.SetLegacyAbac(
2290        self.messages.SetLegacyAbacRequest(
2291            name=ProjectLocationCluster(cluster_ref.projectId, cluster_ref.zone,
2292                                        cluster_ref.clusterId),
2293            enabled=bool(enable_legacy_authorization)))
2294    return self.ParseOperation(op.name, cluster_ref.zone)
2295
2296  def _AddonsConfig(self,
2297                    disable_ingress=None,
2298                    disable_hpa=None,
2299                    disable_dashboard=None,
2300                    disable_network_policy=None,
2301                    enable_node_local_dns=None,
2302                    enable_gcepd_csi_driver=None,
2303                    enable_application_manager=None,
2304                    enable_cloud_build=None):
2305    """Generates an AddonsConfig object given specific parameters.
2306
2307    Args:
2308      disable_ingress: whether to disable the GCLB ingress controller.
2309      disable_hpa: whether to disable the horizontal pod autoscaling controller.
2310      disable_dashboard: whether to disable the Kuberntes Dashboard.
2311      disable_network_policy: whether to disable NetworkPolicy enforcement.
2312      enable_node_local_dns: whether to enable NodeLocalDNS cache.
2313      enable_gcepd_csi_driver: whether to enable GcePersistentDiskCsiDriver.
2314      enable_application_manager: whether to enable ApplicationManager.
2315      enable_cloud_build: whether to enable CloudBuild.
2316
2317    Returns:
2318      An AddonsConfig object that contains the options defining what addons to
2319      run in the cluster.
2320    """
2321    addons = self.messages.AddonsConfig()
2322    if disable_ingress is not None:
2323      addons.httpLoadBalancing = self.messages.HttpLoadBalancing(
2324          disabled=disable_ingress)
2325    if disable_hpa is not None:
2326      addons.horizontalPodAutoscaling = self.messages.HorizontalPodAutoscaling(
2327          disabled=disable_hpa)
2328    if disable_dashboard is not None:
2329      addons.kubernetesDashboard = self.messages.KubernetesDashboard(
2330          disabled=disable_dashboard)
2331    # Network policy is disabled by default.
2332    if disable_network_policy is not None:
2333      addons.networkPolicyConfig = self.messages.NetworkPolicyConfig(
2334          disabled=disable_network_policy)
2335    if enable_node_local_dns is not None:
2336      addons.dnsCacheConfig = self.messages.DnsCacheConfig(
2337          enabled=enable_node_local_dns)
2338    if enable_gcepd_csi_driver:
2339      addons.gcePersistentDiskCsiDriverConfig = self.messages.GcePersistentDiskCsiDriverConfig(
2340          enabled=True)
2341    if enable_application_manager:
2342      addons.kalmConfig = self.messages.KalmConfig(enabled=True)
2343    if enable_cloud_build:
2344      addons.cloudBuildConfig = self.messages.CloudBuildConfig(enabled=True)
2345
2346    return addons
2347
2348  def _AddLocalSSDVolumeConfigsToNodeConfig(self, node_config, options):
2349    """Add LocalSSDVolumeConfigs to nodeConfig."""
2350    if not options.local_ssd_volume_configs:
2351      return
2352    format_enum = self.messages.LocalSsdVolumeConfig.FormatValueValuesEnum
2353    local_ssd_volume_configs_list = []
2354    for config in options.local_ssd_volume_configs:
2355      count = int(config['count'])
2356      ssd_type = config['type'].lower()
2357      if config['format'].lower() == 'fs':
2358        ssd_format = format_enum.FS
2359      elif config['format'].lower() == 'block':
2360        ssd_format = format_enum.BLOCK
2361      else:
2362        raise util.Error(
2363            LOCAL_SSD_INCORRECT_FORMAT_ERROR_MSG.format(
2364                err_format=config['format']))
2365      local_ssd_volume_configs_list.append(
2366          self.messages.LocalSsdVolumeConfig(
2367              count=count, type=ssd_type, format=ssd_format))
2368    node_config.localSsdVolumeConfigs = local_ssd_volume_configs_list
2369
2370  def _AddEphemeralStorageToNodeConfig(self, node_config, options):
2371    if not options.ephemeral_storage:
2372      return
2373    config = options.ephemeral_storage
2374    node_config.ephemeralStorageConfig = self.messages.EphemeralStorageConfig(
2375        localSsdCount=config['local-ssd-count'])
2376
2377  def _AddNodeTaintsToNodeConfig(self, node_config, options):
2378    """Add nodeTaints to nodeConfig."""
2379    if options.node_taints is None:
2380      return
2381    taints = []
2382    effect_enum = self.messages.NodeTaint.EffectValueValuesEnum
2383    for key, value in sorted(six.iteritems(options.node_taints)):
2384      strs = value.split(':')
2385      if len(strs) != 2:
2386        raise util.Error(
2387            NODE_TAINT_INCORRECT_FORMAT_ERROR_MSG.format(key=key, value=value))
2388      value = strs[0]
2389      taint_effect = strs[1]
2390      if taint_effect == 'NoSchedule':
2391        effect = effect_enum.NO_SCHEDULE
2392      elif taint_effect == 'PreferNoSchedule':
2393        effect = effect_enum.PREFER_NO_SCHEDULE
2394      elif taint_effect == 'NoExecute':
2395        effect = effect_enum.NO_EXECUTE
2396      else:
2397        raise util.Error(
2398            NODE_TAINT_INCORRECT_EFFECT_ERROR_MSG.format(effect=strs[1]))
2399      taints.append(
2400          self.messages.NodeTaint(key=key, value=value, effect=effect))
2401
2402    node_config.taints = taints
2403
2404  def _AddWorkloadMetadataToNodeConfig(self, node_config, options, messages):
2405    """Adds WorkLoadMetadata to NodeConfig."""
2406    if options.workload_metadata is not None:
2407      option = options.workload_metadata
2408      if option == 'GCE_METADATA':
2409        node_config.workloadMetadataConfig = messages.WorkloadMetadataConfig(
2410            mode=messages.WorkloadMetadataConfig.ModeValueValuesEnum
2411            .GCE_METADATA)
2412      elif option == 'GKE_METADATA':
2413        node_config.workloadMetadataConfig = messages.WorkloadMetadataConfig(
2414            mode=messages.WorkloadMetadataConfig.ModeValueValuesEnum
2415            .GKE_METADATA)
2416      else:
2417        raise util.Error(
2418            UNKNOWN_WORKLOAD_METADATA_ERROR_MSG.format(option=option))
2419    elif options.workload_metadata_from_node is not None:
2420      option = options.workload_metadata_from_node
2421      if option == 'GCE_METADATA':
2422        node_config.workloadMetadataConfig = messages.WorkloadMetadataConfig(
2423            mode=messages.WorkloadMetadataConfig.ModeValueValuesEnum
2424            .GCE_METADATA)
2425      elif option == 'GKE_METADATA':
2426        node_config.workloadMetadataConfig = messages.WorkloadMetadataConfig(
2427            mode=messages.WorkloadMetadataConfig.ModeValueValuesEnum
2428            .GKE_METADATA)
2429      # the following options are deprecated
2430      elif option == 'SECURE':
2431        node_config.workloadMetadataConfig = messages.WorkloadMetadataConfig(
2432            nodeMetadata=messages.WorkloadMetadataConfig
2433            .NodeMetadataValueValuesEnum.SECURE)
2434      elif option == 'EXPOSED':
2435        node_config.workloadMetadataConfig = messages.WorkloadMetadataConfig(
2436            nodeMetadata=messages.WorkloadMetadataConfig
2437            .NodeMetadataValueValuesEnum.EXPOSE)
2438      elif option == 'GKE_METADATA_SERVER':
2439        node_config.workloadMetadataConfig = messages.WorkloadMetadataConfig(
2440            nodeMetadata=messages.WorkloadMetadataConfig
2441            .NodeMetadataValueValuesEnum.GKE_METADATA_SERVER)
2442      else:
2443        raise util.Error(
2444            UNKNOWN_WORKLOAD_METADATA_ERROR_MSG.format(option=option))
2445
2446  def SetNetworkPolicyCommon(self, options):
2447    """Returns a SetNetworkPolicy operation."""
2448    return self.messages.NetworkPolicy(
2449        enabled=options.enabled,
2450        # Only Calico is currently supported as a network policy provider.
2451        provider=self.messages.NetworkPolicy.ProviderValueValuesEnum.CALICO)
2452
2453  def SetNetworkPolicy(self, cluster_ref, options):
2454    netpol = self.SetNetworkPolicyCommon(options)
2455    req = self.messages.SetNetworkPolicyRequest(
2456        name=ProjectLocationCluster(cluster_ref.projectId, cluster_ref.zone,
2457                                    cluster_ref.clusterId),
2458        networkPolicy=netpol)
2459    return self.ParseOperation(
2460        self.client.projects_locations_clusters.SetNetworkPolicy(req).name,
2461        cluster_ref.zone)
2462
2463  def SetMasterAuthCommon(self, options):
2464    """Returns a SetMasterAuth action."""
2465    update = self.messages.MasterAuth(
2466        username=options.username, password=options.password)
2467    if options.action == SetMasterAuthOptions.SET_PASSWORD:
2468      action = (
2469          self.messages.SetMasterAuthRequest.ActionValueValuesEnum.SET_PASSWORD)
2470    elif options.action == SetMasterAuthOptions.GENERATE_PASSWORD:
2471      action = (
2472          self.messages.SetMasterAuthRequest.ActionValueValuesEnum
2473          .GENERATE_PASSWORD)
2474    else:  # options.action == SetMasterAuthOptions.SET_USERNAME
2475      action = (
2476          self.messages.SetMasterAuthRequest.ActionValueValuesEnum.SET_USERNAME)
2477    return update, action
2478
2479  def SetMasterAuth(self, cluster_ref, options):
2480    update, action = self.SetMasterAuthCommon(options)
2481    req = self.messages.SetMasterAuthRequest(
2482        name=ProjectLocationCluster(cluster_ref.projectId, cluster_ref.zone,
2483                                    cluster_ref.clusterId),
2484        action=action,
2485        update=update)
2486    op = self.client.projects_locations_clusters.SetMasterAuth(req)
2487    return self.ParseOperation(op.name, cluster_ref.zone)
2488
2489  def StartIpRotation(self, cluster_ref, rotate_credentials):
2490    operation = self.client.projects_locations_clusters.StartIpRotation(
2491        self.messages.StartIPRotationRequest(
2492            name=ProjectLocationCluster(cluster_ref.projectId, cluster_ref.zone,
2493                                        cluster_ref.clusterId),
2494            rotateCredentials=rotate_credentials))
2495    return self.ParseOperation(operation.name, cluster_ref.zone)
2496
2497  def CompleteIpRotation(self, cluster_ref):
2498    operation = self.client.projects_locations_clusters.CompleteIpRotation(
2499        self.messages.CompleteIPRotationRequest(
2500            name=ProjectLocationCluster(cluster_ref.projectId, cluster_ref.zone,
2501                                        cluster_ref.clusterId)))
2502    return self.ParseOperation(operation.name, cluster_ref.zone)
2503
2504  def _SendMaintenancePolicyRequest(self, cluster_ref, policy):
2505    """Given a policy, sends a SetMaintenancePolicy request and returns the operation."""
2506    req = self.messages.SetMaintenancePolicyRequest(
2507        name=ProjectLocationCluster(cluster_ref.projectId, cluster_ref.zone,
2508                                    cluster_ref.clusterId),
2509        maintenancePolicy=policy)
2510    operation = self.client.projects_locations_clusters.SetMaintenancePolicy(
2511        req)
2512    return self.ParseOperation(operation.name, cluster_ref.zone)
2513
2514  def SetDailyMaintenanceWindow(self, cluster_ref, existing_policy,
2515                                maintenance_window):
2516    """Sets the daily maintenance window for a cluster."""
2517    # Special behavior for removing the window. This actually removes the
2518    # recurring window too, if set (since anyone using this command if there's
2519    # actually a recurring window probably intends that!).
2520    if maintenance_window == 'None':
2521      daily_window = None
2522    else:
2523      daily_window = self.messages.DailyMaintenanceWindow(
2524          startTime=maintenance_window)
2525
2526    if existing_policy is None:
2527      existing_policy = self.messages.MaintenancePolicy()
2528    if existing_policy.window is None:
2529      existing_policy.window = self.messages.MaintenanceWindow()
2530
2531    # Temporary until in GA:
2532    if hasattr(existing_policy.window, 'recurringWindow'):
2533      existing_policy.window.recurringWindow = None
2534    existing_policy.window.dailyMaintenanceWindow = daily_window
2535
2536    return self._SendMaintenancePolicyRequest(cluster_ref, existing_policy)
2537
2538  def DeleteCluster(self, cluster_ref):
2539    """Delete a running cluster.
2540
2541    Args:
2542      cluster_ref: cluster Resource to describe
2543
2544    Returns:
2545      Cluster message.
2546    Raises:
2547      Error: if cluster cannot be found or caller is missing permissions. Will
2548        attempt to find similar clusters in other zones for a more useful error
2549        if the user has list permissions.
2550    """
2551    try:
2552      operation = self.client.projects_locations_clusters.Delete(
2553          self.messages.ContainerProjectsLocationsClustersDeleteRequest(
2554              name=ProjectLocationCluster(cluster_ref.projectId, cluster_ref
2555                                          .zone, cluster_ref.clusterId)))
2556      return self.ParseOperation(operation.name, cluster_ref.zone)
2557    except apitools_exceptions.HttpNotFoundError as error:
2558      api_error = exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
2559      # Cluster couldn't be found, maybe user got the location wrong?
2560      self.CheckClusterOtherZones(cluster_ref, api_error)
2561
2562  def ListClusters(self, project, location=None):
2563    if not location:
2564      location = '-'
2565    req = self.messages.ContainerProjectsLocationsClustersListRequest(
2566        parent=ProjectLocation(project, location))
2567    return self.client.projects_locations_clusters.List(req)
2568
2569  def CreateNodePoolCommon(self, node_pool_ref, options):
2570    """Returns a CreateNodePool operation."""
2571    node_config = self.messages.NodeConfig()
2572    if options.machine_type:
2573      node_config.machineType = options.machine_type
2574    if options.disk_size_gb:
2575      node_config.diskSizeGb = options.disk_size_gb
2576    if options.disk_type:
2577      node_config.diskType = options.disk_type
2578    if options.image_type:
2579      node_config.imageType = options.image_type
2580
2581    custom_config = self.messages.CustomImageConfig()
2582    if options.image:
2583      custom_config.image = options.image
2584    if options.image_project:
2585      custom_config.imageProject = options.image_project
2586    if options.image_family:
2587      custom_config.imageFamily = options.image_family
2588    if options.image or options.image_project or options.image_family:
2589      node_config.nodeImageConfig = custom_config
2590
2591    NodeIdentityOptionsToNodeConfig(options, node_config)
2592
2593    if options.local_ssd_count:
2594      node_config.localSsdCount = options.local_ssd_count
2595    self._AddLocalSSDVolumeConfigsToNodeConfig(node_config, options)
2596    self._AddEphemeralStorageToNodeConfig(node_config, options)
2597    if options.boot_disk_kms_key:
2598      node_config.bootDiskKmsKey = options.boot_disk_kms_key
2599    if options.tags:
2600      node_config.tags = options.tags
2601    else:
2602      node_config.tags = []
2603
2604    if options.accelerators is not None:
2605      type_name = options.accelerators['type']
2606      # Accelerator count defaults to 1.
2607      count = int(options.accelerators.get('count', 1))
2608      node_config.accelerators = [
2609          self.messages.AcceleratorConfig(
2610              acceleratorType=type_name, acceleratorCount=count)
2611      ]
2612
2613    _AddMetadataToNodeConfig(node_config, options)
2614    _AddNodeLabelsToNodeConfig(node_config, options)
2615    self._AddNodeTaintsToNodeConfig(node_config, options)
2616
2617    if options.preemptible:
2618      node_config.preemptible = options.preemptible
2619
2620    if options.min_cpu_platform is not None:
2621      node_config.minCpuPlatform = options.min_cpu_platform
2622
2623    if options.node_group is not None:
2624      node_config.nodeGroup = options.node_group
2625
2626    if options.enable_gcfs:
2627      gcfs_config = self.messages.GcfsConfig(enabled=options.enable_gcfs)
2628      node_config.gcfsConfig = gcfs_config
2629
2630    self._AddWorkloadMetadataToNodeConfig(node_config, options, self.messages)
2631    _AddLinuxNodeConfigToNodeConfig(node_config, options, self.messages)
2632    _AddShieldedInstanceConfigToNodeConfig(node_config, options, self.messages)
2633    _AddReservationAffinityToNodeConfig(node_config, options, self.messages)
2634    _AddSandboxConfigToNodeConfig(node_config, options, self.messages)
2635
2636    pool = self.messages.NodePool(
2637        name=node_pool_ref.nodePoolId,
2638        initialNodeCount=options.num_nodes,
2639        config=node_config,
2640        version=options.node_version,
2641        management=self._GetNodeManagement(options))
2642
2643    if options.enable_autoscaling:
2644      pool.autoscaling = self.messages.NodePoolAutoscaling(
2645          enabled=options.enable_autoscaling,
2646          minNodeCount=options.min_nodes,
2647          maxNodeCount=options.max_nodes)
2648
2649    if options.max_pods_per_node is not None:
2650      pool.maxPodsConstraint = self.messages.MaxPodsConstraint(
2651          maxPodsPerNode=options.max_pods_per_node)
2652
2653    if (options.max_surge_upgrade is not None or
2654        options.max_unavailable_upgrade is not None):
2655      pool.upgradeSettings = self.messages.UpgradeSettings()
2656      pool.upgradeSettings.maxSurge = options.max_surge_upgrade
2657      pool.upgradeSettings.maxUnavailable = options.max_unavailable_upgrade
2658
2659    if options.node_locations is not None:
2660      pool.locations = sorted(options.node_locations)
2661
2662    if options.system_config_from_file is not None:
2663      util.LoadSystemConfigFromYAML(node_config,
2664                                    options.system_config_from_file,
2665                                    self.messages)
2666
2667    return pool
2668
2669  def CreateNodePool(self, node_pool_ref, options):
2670    """CreateNodePool creates a node pool and returns the operation."""
2671    pool = self.CreateNodePoolCommon(node_pool_ref, options)
2672    if options.enable_autoprovisioning is not None:
2673      pool.autoscaling.autoprovisioned = options.enable_autoprovisioning
2674    req = self.messages.CreateNodePoolRequest(
2675        nodePool=pool,
2676        parent=ProjectLocationCluster(node_pool_ref.projectId,
2677                                      node_pool_ref.zone,
2678                                      node_pool_ref.clusterId))
2679    operation = self.client.projects_locations_clusters_nodePools.Create(req)
2680    return self.ParseOperation(operation.name, node_pool_ref.zone)
2681
2682  def ListNodePools(self, cluster_ref):
2683    req = self.messages.ContainerProjectsLocationsClustersNodePoolsListRequest(
2684        parent=ProjectLocationCluster(cluster_ref.projectId, cluster_ref.zone,
2685                                      cluster_ref.clusterId))
2686    return self.client.projects_locations_clusters_nodePools.List(req)
2687
2688  def GetNodePool(self, node_pool_ref):
2689    req = self.messages.ContainerProjectsLocationsClustersNodePoolsGetRequest(
2690        name=ProjectLocationClusterNodePool(
2691            node_pool_ref.projectId, node_pool_ref.zone,
2692            node_pool_ref.clusterId, node_pool_ref.nodePoolId))
2693    return self.client.projects_locations_clusters_nodePools.Get(req)
2694
2695  def UpdateNodePoolNodeManagement(self, node_pool_ref, options):
2696    """Updates node pool's node management configuration.
2697
2698    Args:
2699      node_pool_ref: node pool Resource to update.
2700      options: node pool update options
2701
2702    Returns:
2703      Updated node management configuration.
2704    """
2705    pool = self.GetNodePool(node_pool_ref)
2706    node_management = pool.management
2707    if node_management is None:
2708      node_management = self.messages.NodeManagement()
2709    if options.enable_autorepair is not None:
2710      node_management.autoRepair = options.enable_autorepair
2711    if options.enable_autoupgrade is not None:
2712      node_management.autoUpgrade = options.enable_autoupgrade
2713    return node_management
2714
2715  def UpdateNodePoolAutoscaling(self, node_pool_ref, options):
2716    """Update node pool's autoscaling configuration.
2717
2718    Args:
2719      node_pool_ref: node pool Resource to update.
2720      options: node pool update options
2721
2722    Returns:
2723      Updated autoscaling configuration for the node pool.
2724    """
2725    pool = self.GetNodePool(node_pool_ref)
2726    autoscaling = pool.autoscaling
2727    if autoscaling is None:
2728      autoscaling = self.messages.NodePoolAutoscaling()
2729    if options.enable_autoscaling is not None:
2730      autoscaling.enabled = options.enable_autoscaling
2731      if not autoscaling.enabled:
2732        # clear limits and autoprovisioned when disabling autoscaling
2733        autoscaling.minNodeCount = 0
2734        autoscaling.maxNodeCount = 0
2735        autoscaling.autoprovisioned = False
2736    if options.enable_autoprovisioning is not None:
2737      autoscaling.autoprovisioned = options.enable_autoprovisioning
2738      if autoscaling.autoprovisioned:
2739        # clear min nodes limit when enabling autoprovisioning
2740        autoscaling.minNodeCount = 0
2741    if options.max_nodes is not None:
2742      autoscaling.maxNodeCount = options.max_nodes
2743    if options.min_nodes is not None:
2744      autoscaling.minNodeCount = options.min_nodes
2745    return autoscaling
2746
2747  def UpdateUpgradeSettings(self, node_pool_ref, options):
2748    """Updates node pool's upgrade setting."""
2749    pool = self.GetNodePool(node_pool_ref)
2750    upgrade_settings = pool.upgradeSettings
2751    if upgrade_settings is None:
2752      upgrade_settings = self.messages.UpgradeSettings()
2753    if options.max_surge_upgrade is not None:
2754      upgrade_settings.maxSurge = options.max_surge_upgrade
2755    if options.max_unavailable_upgrade is not None:
2756      upgrade_settings.maxUnavailable = options.max_unavailable_upgrade
2757    return upgrade_settings
2758
2759  def UpdateNodePoolRequest(self, node_pool_ref, options):
2760    """Creates an UpdateNodePoolRequest from the provided options.
2761
2762    Arguments:
2763      node_pool_ref: The node pool to act on.
2764      options: UpdateNodePoolOptions with the user-specified options.
2765
2766    Returns:
2767
2768      An UpdateNodePoolRequest.
2769    """
2770
2771    update_request = self.messages.UpdateNodePoolRequest(
2772        name=ProjectLocationClusterNodePool(
2773            node_pool_ref.projectId,
2774            node_pool_ref.zone,
2775            node_pool_ref.clusterId,
2776            node_pool_ref.nodePoolId,
2777        ))
2778
2779    if options.workload_metadata is not None or options.workload_metadata_from_node is not None:
2780      self._AddWorkloadMetadataToNodeConfig(update_request, options,
2781                                            self.messages)
2782    elif options.node_locations is not None:
2783      update_request.locations = sorted(options.node_locations)
2784    elif (options.max_surge_upgrade is not None or
2785          options.max_unavailable_upgrade is not None):
2786      update_request.upgradeSettings = self.UpdateUpgradeSettings(
2787          node_pool_ref, options)
2788    elif options.system_config_from_file is not None:
2789      node_config = self.messages.NodeConfig()
2790      util.LoadSystemConfigFromYAML(node_config,
2791                                    options.system_config_from_file,
2792                                    self.messages)
2793      update_request.linuxNodeConfig = node_config.linuxNodeConfig
2794      update_request.kubeletConfig = node_config.kubeletConfig
2795    elif options.node_labels is not None:
2796      node_labels = self.messages.NodeLabels()
2797      labels = node_labels.LabelsValue()
2798      props = []
2799      for key, value in six.iteritems(options.node_labels):
2800        props.append(labels.AdditionalProperty(key=key, value=value))
2801      labels.additionalProperties = props
2802      node_labels.labels = labels
2803      update_request.labels = node_labels
2804    elif options.node_taints is not None:
2805      taints = []
2806      effect_enum = self.messages.NodeTaint.EffectValueValuesEnum
2807      for key, value in sorted(six.iteritems(options.node_taints)):
2808        strs = value.split(':')
2809        if len(strs) != 2:
2810          raise util.Error(
2811              NODE_TAINT_INCORRECT_FORMAT_ERROR_MSG.format(
2812                  key=key, value=value))
2813        value = strs[0]
2814        taint_effect = strs[1]
2815        if taint_effect == 'NoSchedule':
2816          effect = effect_enum.NO_SCHEDULE
2817        elif taint_effect == 'PreferNoSchedule':
2818          effect = effect_enum.PREFER_NO_SCHEDULE
2819        elif taint_effect == 'NoExecute':
2820          effect = effect_enum.NO_EXECUTE
2821        else:
2822          raise util.Error(
2823              NODE_TAINT_INCORRECT_EFFECT_ERROR_MSG.format(effect=strs[1]))
2824        taints.append(
2825            self.messages.NodeTaint(key=key, value=value, effect=effect))
2826      node_taints = self.messages.NodeTaints()
2827      node_taints.taints = taints
2828      update_request.taints = node_taints
2829    elif options.tags is not None:
2830      node_tags = self.messages.NetworkTags()
2831      node_tags.tags = options.tags
2832      update_request.tags = node_tags
2833    return update_request
2834
2835  def UpdateNodePool(self, node_pool_ref, options):
2836    """Updates nodePool on a cluster."""
2837    if options.IsAutoscalingUpdate():
2838      autoscaling = self.UpdateNodePoolAutoscaling(node_pool_ref, options)
2839      update = self.messages.ClusterUpdate(
2840          desiredNodePoolId=node_pool_ref.nodePoolId,
2841          desiredNodePoolAutoscaling=autoscaling)
2842      operation = self.client.projects_locations_clusters.Update(
2843          self.messages.UpdateClusterRequest(
2844              name=ProjectLocationCluster(node_pool_ref.projectId,
2845                                          node_pool_ref.zone,
2846                                          node_pool_ref.clusterId),
2847              update=update))
2848      return self.ParseOperation(operation.name, node_pool_ref.zone)
2849    elif options.IsNodePoolManagementUpdate():
2850      management = self.UpdateNodePoolNodeManagement(node_pool_ref, options)
2851      req = (
2852          self.messages.SetNodePoolManagementRequest(
2853              name=ProjectLocationClusterNodePool(node_pool_ref.projectId,
2854                                                  node_pool_ref.zone,
2855                                                  node_pool_ref.clusterId,
2856                                                  node_pool_ref.nodePoolId),
2857              management=management))
2858      operation = (
2859          self.client.projects_locations_clusters_nodePools.SetManagement(req))
2860    elif options.IsUpdateNodePoolRequest():
2861      req = self.UpdateNodePoolRequest(node_pool_ref, options)
2862      operation = self.client.projects_locations_clusters_nodePools.Update(req)
2863    else:
2864      raise util.Error('Unhandled node pool update mode')
2865
2866    return self.ParseOperation(operation.name, node_pool_ref.zone)
2867
2868  def DeleteNodePool(self, node_pool_ref):
2869    operation = self.client.projects_locations_clusters_nodePools.Delete(
2870        self.messages.ContainerProjectsLocationsClustersNodePoolsDeleteRequest(
2871            name=ProjectLocationClusterNodePool(
2872                node_pool_ref.projectId, node_pool_ref.zone,
2873                node_pool_ref.clusterId, node_pool_ref.nodePoolId)))
2874    return self.ParseOperation(operation.name, node_pool_ref.zone)
2875
2876  def RollbackUpgrade(self, node_pool_ref):
2877    operation = self.client.projects_locations_clusters_nodePools.Rollback(
2878        self.messages.RollbackNodePoolUpgradeRequest(
2879            name=ProjectLocationClusterNodePool(
2880                node_pool_ref.projectId, node_pool_ref.zone,
2881                node_pool_ref.clusterId, node_pool_ref.nodePoolId)))
2882    return self.ParseOperation(operation.name, node_pool_ref.zone)
2883
2884  def CancelOperation(self, op_ref):
2885    req = self.messages.CancelOperationRequest(
2886        name=ProjectLocationOperation(op_ref.projectId, op_ref.zone,
2887                                      op_ref.operationId))
2888    return self.client.projects_locations_operations.Cancel(req)
2889
2890  def IsRunning(self, cluster):
2891    return (
2892        cluster.status == self.messages.Cluster.StatusValueValuesEnum.RUNNING)
2893
2894  def IsDegraded(self, cluster):
2895    return (
2896        cluster.status == self.messages.Cluster.StatusValueValuesEnum.DEGRADED)
2897
2898  def GetDegradedWarning(self, cluster):
2899    if cluster.conditions:
2900      codes = [condition.code for condition in cluster.conditions]
2901      messages = [condition.message for condition in cluster.conditions]
2902      return ('Codes: {0}\n' 'Messages: {1}.').format(codes, messages)
2903    else:
2904      return gke_constants.DEFAULT_DEGRADED_WARNING
2905
2906  def GetOperationError(self, operation):
2907    return operation.statusMessage
2908
2909  def ListOperations(self, project, location=None):
2910    if not location:
2911      location = '-'
2912    req = self.messages.ContainerProjectsLocationsOperationsListRequest(
2913        parent=ProjectLocation(project, location))
2914    return self.client.projects_locations_operations.List(req)
2915
2916  def IsOperationFinished(self, operation):
2917    return (
2918        operation.status == self.messages.Operation.StatusValueValuesEnum.DONE)
2919
2920  def GetServerConfig(self, project, location):
2921    req = self.messages.ContainerProjectsLocationsGetServerConfigRequest(
2922        name=ProjectLocation(project, location))
2923    return self.client.projects_locations.GetServerConfig(req)
2924
2925  def ResizeNodePool(self, cluster_ref, pool_name, size):
2926    req = self.messages.SetNodePoolSizeRequest(
2927        name=ProjectLocationClusterNodePool(cluster_ref.projectId,
2928                                            cluster_ref.zone,
2929                                            cluster_ref.clusterId, pool_name),
2930        nodeCount=size)
2931    operation = self.client.projects_locations_clusters_nodePools.SetSize(req)
2932    return self.ParseOperation(operation.name, cluster_ref.zone)
2933
2934  def _GetNodeManagement(self, options):
2935    """Gets a wrapper containing the options for how nodes are managed.
2936
2937    Args:
2938      options: node management options
2939
2940    Returns:
2941      A NodeManagement object that contains the options indicating how nodes
2942      are managed. This is currently quite simple, containing only two options.
2943      However, there are more options planned for node management.
2944    """
2945    if options.enable_autorepair is None and options.enable_autoupgrade is None:
2946      return None
2947
2948    node_management = self.messages.NodeManagement()
2949    node_management.autoRepair = options.enable_autorepair
2950    node_management.autoUpgrade = options.enable_autoupgrade
2951    return node_management
2952
2953  def _GetNetworkConfig(self, options):
2954    """Gets a wrapper containing the network config for the node pool.
2955
2956    Args:
2957      options: Network config options
2958
2959    Returns:
2960      A NetworkConfig object that contains the options for how the network
2961      for the nodepool needs to be configured.
2962    """
2963    if (options.pod_ipv4_range is None
2964        and options.create_pod_ipv4_range is None):
2965      return None
2966
2967    network_config = self.messages.NodeNetworkConfig()
2968    network_config.podRange = options.pod_ipv4_range
2969    if options.create_pod_ipv4_range is not None:
2970      for key in options.create_pod_ipv4_range:
2971        if key not in ['name', 'range']:
2972          raise util.Error(
2973              CREATE_POD_RANGE_INVALID_KEY_ERROR_MSG.format(key=key))
2974      network_config.createPodRange = True
2975      network_config.podRange = options.create_pod_ipv4_range.get('name', None)
2976      network_config.podIpv4CidrBlock = options.create_pod_ipv4_range.get(
2977          'range', None)
2978    return network_config
2979
2980  def UpdateLabelsCommon(self, cluster_ref, update_labels):
2981    """Update labels on a cluster.
2982
2983    Args:
2984      cluster_ref: cluster to update.
2985      update_labels: labels to set.
2986
2987    Returns:
2988      Operation ref for label set operation.
2989    """
2990    clus = None
2991    try:
2992      clus = self.GetCluster(cluster_ref)
2993    except apitools_exceptions.HttpNotFoundError:
2994      pass
2995    except apitools_exceptions.HttpError as error:
2996      raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
2997
2998    labels = self.messages.SetLabelsRequest.ResourceLabelsValue()
2999    props = []
3000    for k, v in sorted(six.iteritems(update_labels)):
3001      props.append(labels.AdditionalProperty(key=k, value=v))
3002    labels.additionalProperties = props
3003    return labels, clus.labelFingerprint
3004
3005  def UpdateLabels(self, cluster_ref, update_labels):
3006    """Updates labels for a cluster."""
3007    labels, fingerprint = self.UpdateLabelsCommon(cluster_ref, update_labels)
3008    operation = self.client.projects_locations_clusters.SetResourceLabels(
3009        self.messages.SetLabelsRequest(
3010            name=ProjectLocationCluster(cluster_ref.projectId, cluster_ref.zone,
3011                                        cluster_ref.clusterId),
3012            resourceLabels=labels,
3013            labelFingerprint=fingerprint))
3014    return self.ParseOperation(operation.name, cluster_ref.zone)
3015
3016  def RemoveLabelsCommon(self, cluster_ref, remove_labels):
3017    """Removes labels from a cluster.
3018
3019    Args:
3020      cluster_ref: cluster to update.
3021      remove_labels: labels to remove.
3022
3023    Returns:
3024      Operation ref for label set operation.
3025    """
3026    clus = None
3027    try:
3028      clus = self.GetCluster(cluster_ref)
3029    except apitools_exceptions.HttpNotFoundError:
3030      pass
3031    except apitools_exceptions.HttpError as error:
3032      raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
3033
3034    clus_labels = {}
3035    if clus.resourceLabels:
3036      for item in clus.resourceLabels.additionalProperties:
3037        clus_labels[item.key] = str(item.value)
3038
3039    # if clusLabels empty, nothing to do
3040    if not clus_labels:
3041      raise util.Error(NO_LABELS_ON_CLUSTER_ERROR_MSG.format(cluster=clus.name))
3042
3043    for k in remove_labels:
3044      try:
3045        clus_labels.pop(k)
3046      except KeyError as error:
3047        # if at least one label not found on cluster, raise error
3048        raise util.Error(
3049            NO_SUCH_LABEL_ERROR_MSG.format(cluster=clus.name, name=k))
3050
3051    labels = self.messages.SetLabelsRequest.ResourceLabelsValue()
3052    for k, v in sorted(six.iteritems(clus_labels)):
3053      labels.additionalProperties.append(
3054          labels.AdditionalProperty(key=k, value=v))
3055    return labels, clus.labelFingerprint
3056
3057  def RemoveLabels(self, cluster_ref, remove_labels):
3058    """Removes labels from a cluster."""
3059    labels, fingerprint = self.RemoveLabelsCommon(cluster_ref, remove_labels)
3060    operation = self.client.projects_locations_clusters.SetResourceLabels(
3061        self.messages.SetLabelsRequest(
3062            name=ProjectLocationCluster(cluster_ref.projectId, cluster_ref.zone,
3063                                        cluster_ref.clusterId),
3064            resourceLabels=labels,
3065            labelFingerprint=fingerprint))
3066    return self.ParseOperation(operation.name, cluster_ref.zone)
3067
3068  def GetIamPolicy(self, cluster_ref):
3069    raise NotImplementedError('GetIamPolicy is not overridden')
3070
3071  def SetIamPolicy(self, cluster_ref):
3072    raise NotImplementedError('GetIamPolicy is not overridden')
3073
3074  def SetRecurringMaintenanceWindow(self, cluster_ref, existing_policy,
3075                                    window_start, window_end,
3076                                    window_recurrence):
3077    """Sets a recurring maintenance window as the maintenance policy for a cluster.
3078
3079    Args:
3080      cluster_ref: The cluster to update.
3081      existing_policy: The existing maintenance policy, if any.
3082      window_start: Start time of the window as a datetime.datetime.
3083      window_end: End time of the window as a datetime.datetime.
3084      window_recurrence: RRULE str defining how the window will recur.
3085
3086    Returns:
3087      The operation from this cluster update.
3088    """
3089    recurring_window = self.messages.RecurringTimeWindow(
3090        window=self.messages.TimeWindow(
3091            startTime=window_start.isoformat(), endTime=window_end.isoformat()),
3092        recurrence=window_recurrence)
3093    if existing_policy is None:
3094      existing_policy = self.messages.MaintenancePolicy()
3095    if existing_policy.window is None:
3096      existing_policy.window = self.messages.MaintenanceWindow()
3097    existing_policy.window.dailyMaintenanceWindow = None
3098    existing_policy.window.recurringWindow = recurring_window
3099    return self._SendMaintenancePolicyRequest(cluster_ref, existing_policy)
3100
3101  def RemoveMaintenanceWindow(self, cluster_ref, existing_policy):
3102    """Removes the recurring or daily maintenance policy."""
3103    if (existing_policy is None or existing_policy.window is None or
3104        (existing_policy.window.dailyMaintenanceWindow is None and
3105         existing_policy.window.recurringWindow is None)):
3106      raise util.Error(NOTHING_TO_UPDATE_ERROR_MSG)
3107    existing_policy.window.dailyMaintenanceWindow = None
3108    existing_policy.window.recurringWindow = None
3109    return self._SendMaintenancePolicyRequest(cluster_ref, existing_policy)
3110
3111  def _NormalizeMaintenanceExclusionsForPolicy(self, policy):
3112    """Given a maintenance policy (can be None), return a normalized form.
3113
3114    This makes it easier to add and remove blackouts because the blackouts
3115    list will definitely exist.
3116
3117    Args:
3118      policy: The policy to normalize.
3119
3120    Returns:
3121      The modified policy (note: modifies in place, but there might not have
3122      even been an existing policy).
3123    """
3124    empty_excl = self.messages.MaintenanceWindow.MaintenanceExclusionsValue()
3125    if policy is None:
3126      policy = self.messages.MaintenancePolicy(
3127          window=self.messages.MaintenanceWindow(
3128              maintenanceExclusions=empty_excl))
3129    elif policy.window is None:
3130      # Shouldn't happen due to defaulting on the server, but easy enough to
3131      # handle.
3132      policy.window = self.messages.MaintenanceWindow(
3133          maintenanceExclusions=empty_excl)
3134    elif policy.window.maintenanceExclusions is None:
3135      policy.window.maintenanceExclusions = empty_excl
3136    return policy
3137
3138  def _GetMaintenanceExclusionNames(self, maintenance_policy):
3139    """Returns a list of maintenance exclusion names from the policy."""
3140    return [
3141        p.key for p in
3142        maintenance_policy.window.maintenanceExclusions.additionalProperties
3143    ]
3144
3145  def AddMaintenanceExclusion(self, cluster_ref, existing_policy, window_name,
3146                              window_start, window_end):
3147    """Adds a maintenance exclusion to the cluster's maintenance policy.
3148
3149    Args:
3150      cluster_ref: The cluster to update.
3151      existing_policy: The existing maintenance policy, if any.
3152      window_name: Unique name for the exclusion. Can be None (will be
3153        autogenerated if so).
3154      window_start: Start time of the window as a datetime.datetime. Can be
3155        None.
3156      window_end: End time of the window as a datetime.datetime.
3157
3158    Returns:
3159      Operation from this cluster update.
3160
3161    Raises:
3162      Error if a maintenance exclusion of that name already exists.
3163    """
3164    existing_policy = self._NormalizeMaintenanceExclusionsForPolicy(
3165        existing_policy)
3166
3167    if window_start is None:
3168      window_start = times.Now(times.UTC)
3169    if window_name is None:
3170      # Collisions from this shouldn't be an issue because this has millisecond
3171      # resolution.
3172      window_name = 'generated-exclusion-' + times.Now(times.UTC).isoformat()
3173
3174    if window_name in self._GetMaintenanceExclusionNames(existing_policy):
3175      raise util.Error(
3176          'A maintenance exclusion named {0} already exists.'.format(
3177              window_name))
3178
3179    # Note: we're using external/python/gcloud_deps/apitools/base/protorpclite
3180    # which does *not* handle maps very nicely. We actually have a
3181    # MaintenanceExclusionsValue field that has a repeated additionalProperties
3182    # field that has key and value fields. See
3183    # third_party/apis/container/v1alpha1/container_v1alpha1_messages.py.
3184    exclusions = existing_policy.window.maintenanceExclusions
3185    window = self.messages.TimeWindow(
3186        startTime=window_start.isoformat(), endTime=window_end.isoformat())
3187    exclusions.additionalProperties.append(
3188        exclusions.AdditionalProperty(key=window_name, value=window))
3189    return self._SendMaintenancePolicyRequest(cluster_ref, existing_policy)
3190
3191  def RemoveMaintenanceExclusion(self, cluster_ref, existing_policy,
3192                                 exclusion_name):
3193    """Removes a maintenance exclusion from the maintenance policy by name."""
3194    existing_policy = self._NormalizeMaintenanceExclusionsForPolicy(
3195        existing_policy)
3196    existing_exclusions = self._GetMaintenanceExclusionNames(existing_policy)
3197    if exclusion_name not in existing_exclusions:
3198      message = ('No maintenance exclusion with name {0} exists. Existing '
3199                 'exclusions: {1}.').format(exclusion_name,
3200                                            ', '.join(existing_exclusions))
3201      raise util.Error(message)
3202
3203    props = []
3204    for ex in existing_policy.window.maintenanceExclusions.additionalProperties:
3205      if ex.key != exclusion_name:
3206        props.append(ex)
3207    existing_policy.window.maintenanceExclusions.additionalProperties = props
3208
3209    return self._SendMaintenancePolicyRequest(cluster_ref, existing_policy)
3210
3211  def ListUsableSubnets(self, project_ref, network_project, filter_arg):
3212    """List usable subnets for a given project.
3213
3214    Args:
3215      project_ref: project where clusters will be created.
3216      network_project: project ID where clusters will be created.
3217      filter_arg: value of filter flag.
3218
3219    Returns:
3220      Response containing the list of subnetworks and a next page token.
3221    """
3222    filters = []
3223    if network_project is not None:
3224      filters.append('networkProjectId=' + network_project)
3225
3226    if filter_arg is not None:
3227      filters.append(filter_arg)
3228
3229    filters = ' AND '.join(filters)
3230
3231    req = self.messages.ContainerProjectsAggregatedUsableSubnetworksListRequest(
3232        # parent example: 'projects/abc'
3233        parent=project_ref.RelativeName(),
3234        # max pageSize accepted by GKE
3235        pageSize=500,
3236        filter=filters)
3237    return self.client.projects_aggregated_usableSubnetworks.List(req)
3238
3239  def ModifyCrossConnectSubnetworks(self,
3240                                    cluster_ref,
3241                                    existing_cross_connect_config,
3242                                    add_subnetworks=None,
3243                                    remove_subnetworks=None,
3244                                    clear_all_subnetworks=None):
3245    """Add/Remove/Clear cross connect subnetworks and schedule cluster update request."""
3246    items = existing_cross_connect_config.items
3247    if clear_all_subnetworks:
3248      items = []
3249    if remove_subnetworks:
3250      items = [x for x in items if x.subnetwork not in remove_subnetworks]
3251    if add_subnetworks:
3252      existing_subnetworks = set([x.subnetwork for x in items])
3253      items.extend([
3254          self.messages.CrossConnectItem(subnetwork=subnetwork)
3255          for subnetwork in add_subnetworks
3256          if subnetwork not in existing_subnetworks
3257      ])
3258
3259    cross_connect_config = self.messages.CrossConnectConfig(
3260        fingerprint=existing_cross_connect_config.fingerprint, items=items)
3261    private_cluster_config = self.messages.PrivateClusterConfig(
3262        crossConnectConfig=cross_connect_config)
3263    update = self.messages.ClusterUpdate(
3264        desiredPrivateClusterConfig=private_cluster_config)
3265    op = self.client.projects_locations_clusters.Update(
3266        self.messages.UpdateClusterRequest(
3267            name=ProjectLocationCluster(cluster_ref.projectId, cluster_ref.zone,
3268                                        cluster_ref.clusterId),
3269            update=update))
3270
3271    return self.ParseOperation(op.name, cluster_ref.zone)
3272
3273
3274class V1Adapter(APIAdapter):
3275  """APIAdapter for v1."""
3276
3277
3278class V1Beta1Adapter(V1Adapter):
3279  """APIAdapter for v1beta1."""
3280
3281  def CreateCluster(self, cluster_ref, options):
3282    cluster = self.CreateClusterCommon(cluster_ref, options)
3283    if options.addons:
3284      # CloudRun is disabled by default.
3285      if any((v in options.addons) for v in CLOUDRUN_ADDONS):
3286        if not options.enable_stackdriver_kubernetes:
3287          raise util.Error(CLOUDRUN_STACKDRIVER_KUBERNETES_DISABLED_ERROR_MSG)
3288        if INGRESS not in options.addons:
3289          raise util.Error(CLOUDRUN_INGRESS_KUBERNETES_DISABLED_ERROR_MSG)
3290        load_balancer_type = _GetCloudRunLoadBalancerType(
3291            options, self.messages)
3292        cluster.addonsConfig.cloudRunConfig = self.messages.CloudRunConfig(
3293            disabled=False, loadBalancerType=load_balancer_type)
3294      # CloudBuild is disabled by default.
3295      if CLOUDBUILD in options.addons:
3296        if not options.enable_stackdriver_kubernetes:
3297          raise util.Error(CLOUDBUILD_STACKDRIVER_KUBERNETES_DISABLED_ERROR_MSG)
3298        cluster.addonsConfig.cloudBuildConfig = self.messages.CloudBuildConfig(
3299            enabled=True)
3300      # Istio is disabled by default.
3301      if ISTIO in options.addons:
3302        istio_auth = self.messages.IstioConfig.AuthValueValuesEnum.AUTH_NONE
3303        mtls = self.messages.IstioConfig.AuthValueValuesEnum.AUTH_MUTUAL_TLS
3304        istio_config = options.istio_config
3305        if istio_config is not None:
3306          auth_config = istio_config.get('auth')
3307          if auth_config is not None:
3308            if auth_config == 'MTLS_STRICT':
3309              istio_auth = mtls
3310        cluster.addonsConfig.istioConfig = self.messages.IstioConfig(
3311            disabled=False, auth=istio_auth)
3312    if (options.enable_autoprovisioning is not None or
3313        options.autoscaling_profile is not None):
3314      cluster.autoscaling = self.CreateClusterAutoscalingCommon(
3315          None, options, False)
3316    if options.workload_pool:
3317      cluster.workloadIdentityConfig = self.messages.WorkloadIdentityConfig(
3318          workloadPool=options.workload_pool)
3319      if options.identity_provider:
3320        cluster.workloadIdentityConfig.identityProvider = options.identity_provider
3321      if options.workload_identity_certificate_authority:
3322        cluster.workloadIdentityConfig.issuingCertificateAuthority = options.workload_identity_certificate_authority
3323    if options.enable_gke_oidc:
3324      cluster.gkeOidcConfig = self.messages.GkeOidcConfig(
3325          enabled=options.enable_gke_oidc)
3326    if options.enable_master_global_access is not None:
3327      if not options.enable_private_nodes:
3328        raise util.Error(
3329            PREREQUISITE_OPTION_ERROR_MSG.format(
3330                prerequisite='enable-private-nodes',
3331                opt='enable-master-global-access'))
3332      cluster.privateClusterConfig.masterGlobalAccessConfig = \
3333          self.messages.PrivateClusterMasterGlobalAccessConfig(
3334              enabled=options.enable_master_global_access)
3335    _AddPSCPrivateClustersOptionsToClusterForCreateCluster(
3336        cluster, options, self.messages)
3337
3338    cluster_telemetry_type = self._GetClusterTelemetryType(
3339        options, cluster.loggingService, cluster.monitoringService)
3340    if cluster_telemetry_type is not None:
3341      cluster.clusterTelemetry = self.messages.ClusterTelemetry()
3342      cluster.clusterTelemetry.type = cluster_telemetry_type
3343
3344    if cluster.clusterTelemetry:
3345      cluster.loggingService = None
3346      cluster.monitoringService = None
3347
3348    if options.enable_workload_monitoring_eap:
3349      cluster.workloadMonitoringEnabledEap = True
3350
3351    if options.datapath_provider is not None:
3352      if cluster.networkConfig is None:
3353        cluster.networkConfig = self.messages.NetworkConfig()
3354      if options.datapath_provider.lower() == 'legacy':
3355        cluster.networkConfig.datapathProvider = \
3356            self.messages.NetworkConfig.DatapathProviderValueValuesEnum.LEGACY_DATAPATH
3357      elif options.datapath_provider.lower() == 'advanced':
3358        cluster.networkConfig.datapathProvider = \
3359            self.messages.NetworkConfig.DatapathProviderValueValuesEnum.ADVANCED_DATAPATH
3360      else:
3361        raise util.Error(
3362            DATAPATH_PROVIDER_ILL_SPECIFIED_ERROR_MSG.format(
3363                provider=options.datapath_provider))
3364
3365    if options.dataplane_v2 is not None and options.dataplane_v2:
3366      if cluster.networkConfig is None:
3367        cluster.networkConfig = self.messages.NetworkConfig()
3368      cluster.networkConfig.datapathProvider = \
3369            self.messages.NetworkConfig.DatapathProviderValueValuesEnum.ADVANCED_DATAPATH
3370
3371    cluster.master = _GetMasterForClusterCreate(options, self.messages)
3372
3373    cluster.kubernetesObjectsExportConfig = _GetKubernetesObjectsExportConfigForClusterCreate(
3374        options, self.messages)
3375
3376    req = self.messages.CreateClusterRequest(
3377        parent=ProjectLocation(cluster_ref.projectId, cluster_ref.zone),
3378        cluster=cluster)
3379    operation = self.client.projects_locations_clusters.Create(req)
3380    return self.ParseOperation(operation.name, cluster_ref.zone)
3381
3382  def CreateNodePool(self, node_pool_ref, options):
3383    pool = self.CreateNodePoolCommon(node_pool_ref, options)
3384    if options.enable_autoprovisioning is not None:
3385      pool.autoscaling.autoprovisioned = options.enable_autoprovisioning
3386    pool.networkConfig = self._GetNetworkConfig(options)
3387    req = self.messages.CreateNodePoolRequest(
3388        nodePool=pool,
3389        parent=ProjectLocationCluster(node_pool_ref.projectId,
3390                                      node_pool_ref.zone,
3391                                      node_pool_ref.clusterId))
3392    operation = self.client.projects_locations_clusters_nodePools.Create(req)
3393    return self.ParseOperation(operation.name, node_pool_ref.zone)
3394
3395  def UpdateCluster(self, cluster_ref, options):
3396    update = self.UpdateClusterCommon(cluster_ref, options)
3397
3398    if options.workload_pool:
3399      update = self.messages.ClusterUpdate(
3400          desiredWorkloadIdentityConfig=self.messages.WorkloadIdentityConfig(
3401              workloadPool=options.workload_pool))
3402    elif options.identity_provider:
3403      update = self.messages.ClusterUpdate(
3404          desiredWorkloadIdentityConfig=self.messages.WorkloadIdentityConfig(
3405              identityProvider=options.identity_provider))
3406    elif options.disable_workload_identity:
3407      update = self.messages.ClusterUpdate(
3408          desiredWorkloadIdentityConfig=self.messages.WorkloadIdentityConfig(
3409              workloadPool=''))
3410    elif options.workload_identity_certificate_authority:
3411      update = self.messages.ClusterUpdate(
3412          desiredWorkloadIdentityConfig=self.messages.WorkloadIdentityConfig(
3413              issuingCertificateAuthority=options
3414              .workload_identity_certificate_authority,))
3415    elif options.disable_workload_identity_certificates:
3416      update = self.messages.ClusterUpdate(
3417          desiredWorkloadIdentityConfig=self.messages.WorkloadIdentityConfig(
3418              issuingCertificateAuthority='',))
3419
3420    if options.enable_gke_oidc is not None:
3421      update = self.messages.ClusterUpdate(
3422          desiredGkeOidcConfig=self.messages.GkeOidcConfig(
3423              enabled=options.enable_gke_oidc))
3424
3425    if options.disable_autopilot is not None:
3426      update = self.messages.ClusterUpdate(
3427          desiredAutopilot=self.messages.Autopilot(
3428              enabled=False))
3429
3430    if options.enable_stackdriver_kubernetes:
3431      update = self.messages.ClusterUpdate(
3432          desiredClusterTelemetry=self.messages.ClusterTelemetry(
3433              type=self.messages.ClusterTelemetry.TypeValueValuesEnum.ENABLED))
3434    elif options.enable_logging_monitoring_system_only:
3435      update = self.messages.ClusterUpdate(
3436          desiredClusterTelemetry=self.messages.ClusterTelemetry(
3437              type=self.messages.ClusterTelemetry.TypeValueValuesEnum
3438              .SYSTEM_ONLY))
3439    elif options.enable_stackdriver_kubernetes is not None:
3440      update = self.messages.ClusterUpdate(
3441          desiredClusterTelemetry=self.messages.ClusterTelemetry(
3442              type=self.messages.ClusterTelemetry.TypeValueValuesEnum.DISABLED))
3443
3444    if options.enable_workload_monitoring_eap is not None:
3445      update = self.messages.ClusterUpdate(
3446          desiredWorkloadMonitoringEapConfig=self.messages
3447          .WorkloadMonitoringEapConfig(
3448              enabled=options.enable_workload_monitoring_eap))
3449
3450    master = _GetMasterForClusterUpdate(options, self.messages)
3451    if master is not None:
3452      update = self.messages.ClusterUpdate(desiredMaster=master)
3453
3454    kubernetes_objects_export_config = _GetKubernetesObjectsExportConfigForClusterUpdate(
3455        options, self.messages)
3456    if kubernetes_objects_export_config is not None:
3457      update = self.messages.ClusterUpdate(
3458          desiredKubernetesObjectsExportConfig=kubernetes_objects_export_config)
3459
3460    if not update:
3461      # if reached here, it's possible:
3462      # - someone added update flags but not handled
3463      # - none of the update flags specified from command line
3464      # so raise an error with readable message like:
3465      #   Nothing to update
3466      # to catch this error.
3467      raise util.Error(NOTHING_TO_UPDATE_ERROR_MSG)
3468
3469    if options.disable_addons is not None:
3470      if options.disable_addons.get(ISTIO) is not None:
3471        istio_auth = self.messages.IstioConfig.AuthValueValuesEnum.AUTH_NONE
3472        mtls = self.messages.IstioConfig.AuthValueValuesEnum.AUTH_MUTUAL_TLS
3473        istio_config = options.istio_config
3474        if istio_config is not None:
3475          auth_config = istio_config.get('auth')
3476          if auth_config is not None:
3477            if auth_config == 'MTLS_STRICT':
3478              istio_auth = mtls
3479        update.desiredAddonsConfig.istioConfig = self.messages.IstioConfig(
3480            disabled=options.disable_addons.get(ISTIO), auth=istio_auth)
3481      if any(
3482          (options.disable_addons.get(v) is not None) for v in CLOUDRUN_ADDONS):
3483        load_balancer_type = _GetCloudRunLoadBalancerType(
3484            options, self.messages)
3485        update.desiredAddonsConfig.cloudRunConfig = (
3486            self.messages.CloudRunConfig(
3487                disabled=any(
3488                    options.disable_addons.get(v) or False
3489                    for v in CLOUDRUN_ADDONS),
3490                loadBalancerType=load_balancer_type))
3491      if options.disable_addons.get(APPLICATIONMANAGER) is not None:
3492        update.desiredAddonsConfig.kalmConfig = (
3493            self.messages.KalmConfig(
3494                enabled=(not options.disable_addons.get(APPLICATIONMANAGER))))
3495      if options.disable_addons.get(CLOUDBUILD) is not None:
3496        update.desiredAddonsConfig.cloudBuildConfig = (
3497            self.messages.CloudBuildConfig(
3498                enabled=(not options.disable_addons.get(CLOUDBUILD))))
3499
3500    op = self.client.projects_locations_clusters.Update(
3501        self.messages.UpdateClusterRequest(
3502            name=ProjectLocationCluster(cluster_ref.projectId, cluster_ref.zone,
3503                                        cluster_ref.clusterId),
3504            update=update))
3505    return self.ParseOperation(op.name, cluster_ref.zone)
3506
3507  def CreateClusterAutoscalingCommon(self, cluster_ref, options, for_update):
3508    """Create cluster's autoscaling configuration.
3509
3510    Args:
3511      cluster_ref: Cluster reference.
3512      options: Either CreateClusterOptions or UpdateClusterOptions.
3513      for_update: Is function executed for update operation.
3514
3515    Returns:
3516      Cluster's autoscaling configuration.
3517    """
3518
3519    # Patch cluster autoscaling if cluster_ref is provided.
3520    autoscaling = self.messages.ClusterAutoscaling()
3521    cluster = self.GetCluster(cluster_ref) if cluster_ref else None
3522    if cluster and cluster.autoscaling:
3523      autoscaling.enableNodeAutoprovisioning = \
3524          cluster.autoscaling.enableNodeAutoprovisioning
3525
3526    resource_limits = []
3527    if options.autoprovisioning_config_file is not None:
3528      # Create using config file only.
3529      config = yaml.load(options.autoprovisioning_config_file)
3530      resource_limits = config.get(RESOURCE_LIMITS)
3531      service_account = config.get(SERVICE_ACCOUNT)
3532      scopes = config.get(SCOPES)
3533      max_surge_upgrade = None
3534      max_unavailable_upgrade = None
3535      upgrade_settings = config.get(UPGRADE_SETTINGS)
3536      if upgrade_settings:
3537        max_surge_upgrade = upgrade_settings.get(MAX_SURGE_UPGRADE)
3538        max_unavailable_upgrade = upgrade_settings.get(MAX_UNAVAILABLE_UPGRADE)
3539      management_settings = config.get(NODE_MANAGEMENT)
3540      enable_autoupgrade = None
3541      enable_autorepair = None
3542      if management_settings:
3543        enable_autoupgrade = management_settings.get(ENABLE_AUTO_UPGRADE)
3544        enable_autorepair = management_settings.get(ENABLE_AUTO_REPAIR)
3545      autoprovisioning_locations = \
3546        config.get(AUTOPROVISIONING_LOCATIONS)
3547      min_cpu_platform = config.get(MIN_CPU_PLATFORM)
3548      boot_disk_kms_key = config.get(BOOT_DISK_KMS_KEY)
3549      disk_type = config.get(DISK_TYPE)
3550      disk_size_gb = config.get(DISK_SIZE_GB)
3551      shielded_instance_config = config.get(SHIELDED_INSTANCE_CONFIG)
3552      enable_secure_boot = None
3553      enable_integrity_monitoring = None
3554      if shielded_instance_config:
3555        enable_secure_boot = shielded_instance_config.get(ENABLE_SECURE_BOOT)
3556        enable_integrity_monitoring = \
3557            shielded_instance_config.get(ENABLE_INTEGRITY_MONITORING)
3558    else:
3559      resource_limits = self.ResourceLimitsFromFlags(options)
3560      service_account = options.autoprovisioning_service_account
3561      scopes = options.autoprovisioning_scopes
3562      autoprovisioning_locations = options.autoprovisioning_locations
3563      max_surge_upgrade = options.autoprovisioning_max_surge_upgrade
3564      max_unavailable_upgrade = options.autoprovisioning_max_unavailable_upgrade
3565      enable_autoupgrade = options.enable_autoprovisioning_autoupgrade
3566      enable_autorepair = options.enable_autoprovisioning_autorepair
3567      min_cpu_platform = options.autoprovisioning_min_cpu_platform
3568      boot_disk_kms_key = None
3569      disk_type = None
3570      disk_size_gb = None
3571      enable_secure_boot = None
3572      enable_integrity_monitoring = None
3573
3574    if options.enable_autoprovisioning is not None:
3575      autoscaling.enableNodeAutoprovisioning = options.enable_autoprovisioning
3576      autoscaling.resourceLimits = resource_limits or []
3577      if scopes is None:
3578        scopes = []
3579      management = None
3580      upgrade_settings = None
3581      if max_surge_upgrade is not None or max_unavailable_upgrade is not None:
3582        upgrade_settings = self.messages.UpgradeSettings()
3583        upgrade_settings.maxUnavailable = max_unavailable_upgrade
3584        upgrade_settings.maxSurge = max_surge_upgrade
3585      if enable_autorepair is not None or enable_autoupgrade is not None:
3586        management = (
3587            self.messages.NodeManagement(
3588                autoUpgrade=enable_autoupgrade, autoRepair=enable_autorepair))
3589      shielded_instance_config = None
3590      if enable_secure_boot is not None or \
3591          enable_integrity_monitoring is not None:
3592        shielded_instance_config = self.messages.ShieldedInstanceConfig()
3593        shielded_instance_config.enableSecureBoot = enable_secure_boot
3594        shielded_instance_config.enableIntegrityMonitoring = \
3595            enable_integrity_monitoring
3596      autoscaling.autoprovisioningNodePoolDefaults = self.messages \
3597        .AutoprovisioningNodePoolDefaults(serviceAccount=service_account,
3598                                          oauthScopes=scopes,
3599                                          upgradeSettings=upgrade_settings,
3600                                          management=management,
3601                                          minCpuPlatform=min_cpu_platform,
3602                                          bootDiskKmsKey=boot_disk_kms_key,
3603                                          diskSizeGb=disk_size_gb,
3604                                          diskType=disk_type,
3605                                          shieldedInstanceConfig=
3606                                          shielded_instance_config)
3607      if autoprovisioning_locations:
3608        autoscaling.autoprovisioningLocations = \
3609          sorted(autoprovisioning_locations)
3610
3611    if options.autoscaling_profile is not None:
3612      autoscaling.autoscalingProfile = \
3613          self.CreateAutoscalingProfileCommon(options)
3614
3615    self.ValidateClusterAutoscaling(autoscaling, for_update)
3616    return autoscaling
3617
3618  def CreateAutoscalingProfileCommon(self, options):
3619    """Create and validate cluster's autoscaling profile configuration.
3620
3621    Args:
3622      options: Either CreateClusterOptions or UpdateClusterOptions.
3623
3624    Returns:
3625      Cluster's autoscaling profile configuration.
3626    """
3627
3628    profiles_enum = \
3629        self.messages.ClusterAutoscaling.AutoscalingProfileValueValuesEnum
3630    valid_choices = [
3631        arg_utils.EnumNameToChoice(n)
3632        for n in profiles_enum.names()
3633        if n != 'profile-unspecified'
3634    ]
3635    return arg_utils.ChoiceToEnum(
3636        choice=arg_utils.EnumNameToChoice(options.autoscaling_profile),
3637        enum_type=profiles_enum,
3638        valid_choices=valid_choices)
3639
3640  def ValidateClusterAutoscaling(self, autoscaling, for_update):
3641    """Validate cluster autoscaling configuration.
3642
3643    Args:
3644      autoscaling: autoscaling configuration to be validated.
3645      for_update: Is function executed for update operation.
3646
3647    Raises:
3648      Error if the new configuration is invalid.
3649    """
3650    if autoscaling.enableNodeAutoprovisioning:
3651      if not for_update or autoscaling.resourceLimits:
3652        cpu_found = any(
3653            limit.resourceType == 'cpu' for limit in autoscaling.resourceLimits)
3654        mem_found = any(limit.resourceType == 'memory'
3655                        for limit in autoscaling.resourceLimits)
3656        if not cpu_found or not mem_found:
3657          raise util.Error(NO_AUTOPROVISIONING_LIMITS_ERROR_MSG)
3658        defaults = autoscaling.autoprovisioningNodePoolDefaults
3659        if defaults:
3660          if defaults.upgradeSettings:
3661            max_surge_found = defaults.upgradeSettings.maxSurge is not None
3662            max_unavailable_found = defaults.upgradeSettings.maxUnavailable is not None
3663            if max_unavailable_found != max_surge_found:
3664              raise util.Error(BOTH_AUTOPROVISIONING_UPGRADE_SETTINGS_ERROR_MSG)
3665          if defaults.management:
3666            auto_upgrade_found = defaults.management.autoUpgrade is not None
3667            auto_repair_found = defaults.management.autoRepair is not None
3668            if auto_repair_found != auto_upgrade_found:
3669              raise util.Error(
3670                  BOTH_AUTOPROVISIONING_MANAGEMENT_SETTINGS_ERROR_MSG)
3671          if defaults.shieldedInstanceConfig:
3672            secure_boot_found = defaults.shieldedInstanceConfig.enableSecureBoot is not None
3673            integrity_monitoring_found = defaults.shieldedInstanceConfig.enableIntegrityMonitoring is not None
3674            if secure_boot_found != integrity_monitoring_found:
3675              raise util.Error(
3676                  BOTH_AUTOPROVISIONING_SHIELDED_INSTANCE_SETTINGS_ERROR_MSG)
3677    elif autoscaling.resourceLimits:
3678      raise util.Error(LIMITS_WITHOUT_AUTOPROVISIONING_MSG)
3679    elif autoscaling.autoprovisioningNodePoolDefaults and \
3680        (autoscaling.autoprovisioningNodePoolDefaults.serviceAccount or
3681         autoscaling.autoprovisioningNodePoolDefaults.oauthScopes or
3682         autoscaling.autoprovisioningNodePoolDefaults.management or
3683         autoscaling.autoprovisioningNodePoolDefaults.upgradeSettings):
3684      raise util.Error(DEFAULTS_WITHOUT_AUTOPROVISIONING_MSG)
3685
3686  def UpdateNodePool(self, node_pool_ref, options):
3687    if options.IsAutoscalingUpdate():
3688      autoscaling = self.UpdateNodePoolAutoscaling(node_pool_ref, options)
3689      update = self.messages.ClusterUpdate(
3690          desiredNodePoolId=node_pool_ref.nodePoolId,
3691          desiredNodePoolAutoscaling=autoscaling)
3692      operation = self.client.projects_locations_clusters.Update(
3693          self.messages.UpdateClusterRequest(
3694              name=ProjectLocationCluster(node_pool_ref.projectId,
3695                                          node_pool_ref.zone,
3696                                          node_pool_ref.clusterId),
3697              update=update))
3698      return self.ParseOperation(operation.name, node_pool_ref.zone)
3699    elif options.IsNodePoolManagementUpdate():
3700      management = self.UpdateNodePoolNodeManagement(node_pool_ref, options)
3701      req = (
3702          self.messages.SetNodePoolManagementRequest(
3703              name=ProjectLocationClusterNodePool(node_pool_ref.projectId,
3704                                                  node_pool_ref.zone,
3705                                                  node_pool_ref.clusterId,
3706                                                  node_pool_ref.nodePoolId),
3707              management=management))
3708      operation = (
3709          self.client.projects_locations_clusters_nodePools.SetManagement(req))
3710    elif options.IsUpdateNodePoolRequest():
3711      req = self.UpdateNodePoolRequest(node_pool_ref, options)
3712      operation = self.client.projects_locations_clusters_nodePools.Update(req)
3713    else:
3714      raise util.Error('Unhandled node pool update mode')
3715
3716    return self.ParseOperation(operation.name, node_pool_ref.zone)
3717
3718
3719class V1Alpha1Adapter(V1Beta1Adapter):
3720  """APIAdapter for v1alpha1."""
3721
3722  def CreateCluster(self, cluster_ref, options):
3723    cluster = self.CreateClusterCommon(cluster_ref, options)
3724    if (options.enable_autoprovisioning is not None or
3725        options.autoscaling_profile is not None):
3726      cluster.autoscaling = self.CreateClusterAutoscalingCommon(
3727          None, options, False)
3728    if options.addons:
3729      # CloudRun is disabled by default.
3730      if any((v in options.addons) for v in CLOUDRUN_ADDONS):
3731        if not options.enable_stackdriver_kubernetes:
3732          raise util.Error(CLOUDRUN_STACKDRIVER_KUBERNETES_DISABLED_ERROR_MSG)
3733        if INGRESS not in options.addons:
3734          raise util.Error(CLOUDRUN_INGRESS_KUBERNETES_DISABLED_ERROR_MSG)
3735        enable_alpha_features = options.enable_cloud_run_alpha if \
3736            options.enable_cloud_run_alpha is not None else False
3737        load_balancer_type = _GetCloudRunLoadBalancerType(
3738            options, self.messages)
3739        cluster.addonsConfig.cloudRunConfig = self.messages.CloudRunConfig(
3740            disabled=False,
3741            enableAlphaFeatures=enable_alpha_features,
3742            loadBalancerType=load_balancer_type)
3743      # Cloud Build is disabled by default.
3744      if CLOUDBUILD in options.addons:
3745        if not options.enable_stackdriver_kubernetes:
3746          raise util.Error(CLOUDBUILD_STACKDRIVER_KUBERNETES_DISABLED_ERROR_MSG)
3747        cluster.addonsConfig.cloudBuildConfig = self.messages.CloudBuildConfig(
3748            enabled=True)
3749      # Istio is disabled by default
3750      if ISTIO in options.addons:
3751        istio_auth = self.messages.IstioConfig.AuthValueValuesEnum.AUTH_NONE
3752        mtls = self.messages.IstioConfig.AuthValueValuesEnum.AUTH_MUTUAL_TLS
3753        istio_config = options.istio_config
3754        if istio_config is not None:
3755          auth_config = istio_config.get('auth')
3756          if auth_config is not None:
3757            if auth_config == 'MTLS_STRICT':
3758              istio_auth = mtls
3759        cluster.addonsConfig.istioConfig = self.messages.IstioConfig(
3760            disabled=False, auth=istio_auth)
3761    if options.workload_pool:
3762      cluster.workloadIdentityConfig = self.messages.WorkloadIdentityConfig(
3763          workloadPool=options.workload_pool)
3764      if options.identity_provider:
3765        cluster.workloadIdentityConfig.identityProvider = options.identity_provider
3766      if options.workload_identity_certificate_authority:
3767        cluster.workloadIdentityConfig.issuingCertificateAuthority = options.workload_identity_certificate_authority
3768    if options.enable_gke_oidc:
3769      cluster.gkeOidcConfig = self.messages.GkeOidcConfig(
3770          enabled=options.enable_gke_oidc)
3771    if options.security_profile is not None:
3772      cluster.securityProfile = self.messages.SecurityProfile(
3773          name=options.security_profile)
3774      if options.security_profile_runtime_rules is not None:
3775        cluster.securityProfile.disableRuntimeRules = \
3776          not options.security_profile_runtime_rules
3777    if options.enable_private_ipv6_access is not None:
3778      if cluster.networkConfig is None:
3779        cluster.networkConfig = self.messages.NetworkConfig(
3780            enablePrivateIpv6Access=options.enable_private_ipv6_access)
3781      else:
3782        cluster.networkConfig.enablePrivateIpv6Access = \
3783            options.enable_private_ipv6_access
3784    if options.enable_master_global_access is not None:
3785      if not options.enable_private_nodes:
3786        raise util.Error(
3787            PREREQUISITE_OPTION_ERROR_MSG.format(
3788                prerequisite='enable-private-nodes',
3789                opt='enable-master-global-access'))
3790      cluster.privateClusterConfig.masterGlobalAccessConfig = \
3791          self.messages.PrivateClusterMasterGlobalAccessConfig(
3792              enabled=options.enable_master_global_access)
3793    _AddPSCPrivateClustersOptionsToClusterForCreateCluster(
3794        cluster, options, self.messages)
3795
3796    cluster.releaseChannel = _GetReleaseChannel(options, self.messages)
3797    if options.enable_cost_management:
3798      cluster.costManagementConfig = self.messages.CostManagementConfig(
3799          enabled=True)
3800
3801    cluster_telemetry_type = self._GetClusterTelemetryType(
3802        options, cluster.loggingService, cluster.monitoringService)
3803    if cluster_telemetry_type is not None:
3804      cluster.clusterTelemetry = self.messages.ClusterTelemetry()
3805      cluster.clusterTelemetry.type = cluster_telemetry_type
3806
3807    if cluster.clusterTelemetry:
3808      cluster.loggingService = None
3809      cluster.monitoringService = None
3810
3811    if options.enable_workload_monitoring_eap:
3812      cluster.workloadMonitoringEnabledEap = True
3813
3814    if options.datapath_provider is not None:
3815      if cluster.networkConfig is None:
3816        cluster.networkConfig = self.messages.NetworkConfig()
3817      if options.datapath_provider.lower() == 'legacy':
3818        cluster.networkConfig.datapathProvider = \
3819            self.messages.NetworkConfig.DatapathProviderValueValuesEnum.LEGACY_DATAPATH
3820      elif options.datapath_provider.lower() == 'advanced':
3821        cluster.networkConfig.datapathProvider = \
3822            self.messages.NetworkConfig.DatapathProviderValueValuesEnum.ADVANCED_DATAPATH
3823      else:
3824        raise util.Error(
3825            DATAPATH_PROVIDER_ILL_SPECIFIED_ERROR_MSG.format(
3826                provider=options.datapath_provider))
3827
3828    if options.dataplane_v2 is not None and options.dataplane_v2:
3829      if cluster.networkConfig is None:
3830        cluster.networkConfig = self.messages.NetworkConfig()
3831      cluster.networkConfig.datapathProvider = \
3832            self.messages.NetworkConfig.DatapathProviderValueValuesEnum.ADVANCED_DATAPATH
3833
3834    cluster.master = _GetMasterForClusterCreate(options, self.messages)
3835
3836    cluster.kubernetesObjectsExportConfig = _GetKubernetesObjectsExportConfigForClusterCreate(
3837        options, self.messages)
3838
3839    req = self.messages.CreateClusterRequest(
3840        parent=ProjectLocation(cluster_ref.projectId, cluster_ref.zone),
3841        cluster=cluster)
3842    operation = self.client.projects_locations_clusters.Create(req)
3843    return self.ParseOperation(operation.name, cluster_ref.zone)
3844
3845  def UpdateCluster(self, cluster_ref, options):
3846    update = self.UpdateClusterCommon(cluster_ref, options)
3847
3848    if options.workload_pool:
3849      update = self.messages.ClusterUpdate(
3850          desiredWorkloadIdentityConfig=self.messages.WorkloadIdentityConfig(
3851              workloadPool=options.workload_pool))
3852    elif options.identity_provider:
3853      update = self.messages.ClusterUpdate(
3854          desiredWorkloadIdentityConfig=self.messages.WorkloadIdentityConfig(
3855              identityProvider=options.identity_provider))
3856    elif options.disable_workload_identity:
3857      update = self.messages.ClusterUpdate(
3858          desiredWorkloadIdentityConfig=self.messages.WorkloadIdentityConfig(
3859              workloadPool=''))
3860    elif options.workload_identity_certificate_authority:
3861      update = self.messages.ClusterUpdate(
3862          desiredWorkloadIdentityConfig=self.messages.WorkloadIdentityConfig(
3863              issuingCertificateAuthority=options
3864              .workload_identity_certificate_authority,))
3865    elif options.disable_workload_identity_certificates:
3866      update = self.messages.ClusterUpdate(
3867          desiredWorkloadIdentityConfig=self.messages.WorkloadIdentityConfig(
3868              issuingCertificateAuthority='',))
3869
3870    if options.enable_gke_oidc is not None:
3871      update = self.messages.ClusterUpdate(
3872          desiredGkeOidcConfig=self.messages.GkeOidcConfig(
3873              enabled=options.enable_gke_oidc))
3874
3875    if options.enable_cost_management is not None:
3876      update = self.messages.ClusterUpdate(
3877          desiredCostManagementConfig=self.messages.CostManagementConfig(
3878              enabled=options.enable_cost_management))
3879
3880    if options.release_channel is not None:
3881      update = self.messages.ClusterUpdate(
3882          desiredReleaseChannel=_GetReleaseChannel(
3883              options, self.messages))
3884
3885    if options.disable_autopilot is not None:
3886      update = self.messages.ClusterUpdate(
3887          desiredAutopilot=self.messages.Autopilot(
3888              enabled=False))
3889
3890    if options.enable_stackdriver_kubernetes:
3891      update = self.messages.ClusterUpdate(
3892          desiredClusterTelemetry=self.messages.ClusterTelemetry(
3893              type=self.messages.ClusterTelemetry.TypeValueValuesEnum.ENABLED))
3894    elif options.enable_logging_monitoring_system_only:
3895      update = self.messages.ClusterUpdate(
3896          desiredClusterTelemetry=self.messages.ClusterTelemetry(
3897              type=self.messages.ClusterTelemetry.TypeValueValuesEnum
3898              .SYSTEM_ONLY))
3899    elif options.enable_stackdriver_kubernetes is not None:
3900      update = self.messages.ClusterUpdate(
3901          desiredClusterTelemetry=self.messages.ClusterTelemetry(
3902              type=self.messages.ClusterTelemetry.TypeValueValuesEnum.DISABLED))
3903
3904    if options.enable_workload_monitoring_eap is not None:
3905      update = self.messages.ClusterUpdate(
3906          desiredWorkloadMonitoringEapConfig=self.messages
3907          .WorkloadMonitoringEapConfig(
3908              enabled=options.enable_workload_monitoring_eap))
3909
3910    master = _GetMasterForClusterUpdate(options, self.messages)
3911    if master is not None:
3912      update = self.messages.ClusterUpdate(desiredMaster=master)
3913
3914    kubernetes_objects_export_config = _GetKubernetesObjectsExportConfigForClusterUpdate(
3915        options, self.messages)
3916    if kubernetes_objects_export_config is not None:
3917      update = self.messages.ClusterUpdate(
3918          desiredKubernetesObjectsExportConfig=kubernetes_objects_export_config)
3919
3920    if not update:
3921      # if reached here, it's possible:
3922      # - someone added update flags but not handled
3923      # - none of the update flags specified from command line
3924      # so raise an error with readable message like:
3925      #   Nothing to update
3926      # to catch this error.
3927      raise util.Error(NOTHING_TO_UPDATE_ERROR_MSG)
3928
3929    if options.disable_addons is not None:
3930      if options.disable_addons.get(ISTIO) is not None:
3931        istio_auth = self.messages.IstioConfig.AuthValueValuesEnum.AUTH_NONE
3932        mtls = self.messages.IstioConfig.AuthValueValuesEnum.AUTH_MUTUAL_TLS
3933        istio_config = options.istio_config
3934        if istio_config is not None:
3935          auth_config = istio_config.get('auth')
3936          if auth_config is not None:
3937            if auth_config == 'MTLS_STRICT':
3938              istio_auth = mtls
3939        update.desiredAddonsConfig.istioConfig = self.messages.IstioConfig(
3940            disabled=options.disable_addons.get(ISTIO), auth=istio_auth)
3941      if any(
3942          (options.disable_addons.get(v) is not None) for v in CLOUDRUN_ADDONS):
3943        load_balancer_type = _GetCloudRunLoadBalancerType(
3944            options, self.messages)
3945        update.desiredAddonsConfig.cloudRunConfig = (
3946            self.messages.CloudRunConfig(
3947                disabled=any(
3948                    options.disable_addons.get(v) or False
3949                    for v in CLOUDRUN_ADDONS),
3950                loadBalancerType=load_balancer_type))
3951      if options.disable_addons.get(APPLICATIONMANAGER) is not None:
3952        update.desiredAddonsConfig.kalmConfig = (
3953            self.messages.KalmConfig(
3954                enabled=(not options.disable_addons.get(APPLICATIONMANAGER))))
3955      if options.disable_addons.get(CLOUDBUILD) is not None:
3956        update.desiredAddonsConfig.cloudBuildConfig = (
3957            self.messages.CloudBuildConfig(
3958                enabled=(not options.disable_addons.get(CLOUDBUILD))))
3959
3960    op = self.client.projects_locations_clusters.Update(
3961        self.messages.UpdateClusterRequest(
3962            name=ProjectLocationCluster(cluster_ref.projectId, cluster_ref.zone,
3963                                        cluster_ref.clusterId),
3964            update=update))
3965    return self.ParseOperation(op.name, cluster_ref.zone)
3966
3967  def CreateNodePool(self, node_pool_ref, options):
3968    pool = self.CreateNodePoolCommon(node_pool_ref, options)
3969    if options.enable_autoprovisioning is not None:
3970      pool.autoscaling.autoprovisioned = options.enable_autoprovisioning
3971    pool.networkConfig = self._GetNetworkConfig(options)
3972    req = self.messages.CreateNodePoolRequest(
3973        nodePool=pool,
3974        parent=ProjectLocationCluster(node_pool_ref.projectId,
3975                                      node_pool_ref.zone,
3976                                      node_pool_ref.clusterId))
3977    operation = self.client.projects_locations_clusters_nodePools.Create(req)
3978    return self.ParseOperation(operation.name, node_pool_ref.zone)
3979
3980  def CreateClusterAutoscalingCommon(self, cluster_ref, options, for_update):
3981    """Create cluster's autoscaling configuration.
3982
3983    Args:
3984      cluster_ref: Cluster reference.
3985      options: Either CreateClusterOptions or UpdateClusterOptions.
3986      for_update: Is function executed for update operation.
3987
3988    Returns:
3989      Cluster's autoscaling configuration.
3990    """
3991    # Patch cluster autoscaling if cluster_ref is provided.
3992    cluster = None
3993    autoscaling = self.messages.ClusterAutoscaling()
3994    if cluster_ref:
3995      cluster = self.GetCluster(cluster_ref)
3996    if cluster and cluster.autoscaling:
3997      autoscaling.enableNodeAutoprovisioning = \
3998          cluster.autoscaling.enableNodeAutoprovisioning
3999
4000    resource_limits = []
4001    if options.autoprovisioning_config_file is not None:
4002      # Create using config file only.
4003      config = yaml.load(options.autoprovisioning_config_file)
4004      resource_limits = config.get(RESOURCE_LIMITS)
4005      service_account = config.get(SERVICE_ACCOUNT)
4006      scopes = config.get(SCOPES)
4007      max_surge_upgrade = None
4008      max_unavailable_upgrade = None
4009      upgrade_settings = config.get(UPGRADE_SETTINGS)
4010      if upgrade_settings:
4011        max_surge_upgrade = upgrade_settings.get(MAX_SURGE_UPGRADE)
4012        max_unavailable_upgrade = upgrade_settings.get(MAX_UNAVAILABLE_UPGRADE)
4013      management_settings = config.get(NODE_MANAGEMENT)
4014      enable_autoupgrade = None
4015      enable_autorepair = None
4016      if management_settings is not None:
4017        enable_autoupgrade = management_settings.get(ENABLE_AUTO_UPGRADE)
4018        enable_autorepair = management_settings.get(ENABLE_AUTO_REPAIR)
4019      autoprovisioning_locations = \
4020          config.get(AUTOPROVISIONING_LOCATIONS)
4021      min_cpu_platform = config.get(MIN_CPU_PLATFORM)
4022      boot_disk_kms_key = config.get(BOOT_DISK_KMS_KEY)
4023      disk_type = config.get(DISK_TYPE)
4024      disk_size_gb = config.get(DISK_SIZE_GB)
4025      shielded_instance_config = config.get(SHIELDED_INSTANCE_CONFIG)
4026      enable_secure_boot = None
4027      enable_integrity_monitoring = None
4028      if shielded_instance_config:
4029        enable_secure_boot = shielded_instance_config.get(ENABLE_SECURE_BOOT)
4030        enable_integrity_monitoring = \
4031            shielded_instance_config.get(ENABLE_INTEGRITY_MONITORING)
4032    else:
4033      resource_limits = self.ResourceLimitsFromFlags(options)
4034      service_account = options.autoprovisioning_service_account
4035      scopes = options.autoprovisioning_scopes
4036      autoprovisioning_locations = options.autoprovisioning_locations
4037      max_surge_upgrade = options.autoprovisioning_max_surge_upgrade
4038      max_unavailable_upgrade = options.autoprovisioning_max_unavailable_upgrade
4039      enable_autoupgrade = options.enable_autoprovisioning_autoupgrade
4040      enable_autorepair = options.enable_autoprovisioning_autorepair
4041      min_cpu_platform = options.autoprovisioning_min_cpu_platform
4042      boot_disk_kms_key = None
4043      disk_type = None
4044      disk_size_gb = None
4045      enable_secure_boot = None
4046      enable_integrity_monitoring = None
4047
4048    if options.enable_autoprovisioning is not None:
4049      autoscaling.enableNodeAutoprovisioning = options.enable_autoprovisioning
4050      if resource_limits is None:
4051        resource_limits = []
4052      autoscaling.resourceLimits = resource_limits
4053      if scopes is None:
4054        scopes = []
4055      management = None
4056      upgrade_settings = None
4057      if max_surge_upgrade is not None or max_unavailable_upgrade is not None:
4058        upgrade_settings = self.messages.UpgradeSettings()
4059        upgrade_settings.maxUnavailable = max_unavailable_upgrade
4060        upgrade_settings.maxSurge = max_surge_upgrade
4061      if enable_autorepair is not None or enable_autorepair is not None:
4062        management = self.messages \
4063          .NodeManagement(autoUpgrade=enable_autoupgrade,
4064                          autoRepair=enable_autorepair)
4065      shielded_instance_config = None
4066      if enable_secure_boot is not None or \
4067          enable_integrity_monitoring is not None:
4068        shielded_instance_config = self.messages.ShieldedInstanceConfig()
4069        shielded_instance_config.enableSecureBoot = enable_secure_boot
4070        shielded_instance_config.enableIntegrityMonitoring = \
4071            enable_integrity_monitoring
4072      autoscaling.autoprovisioningNodePoolDefaults = self.messages \
4073        .AutoprovisioningNodePoolDefaults(serviceAccount=service_account,
4074                                          oauthScopes=scopes,
4075                                          upgradeSettings=upgrade_settings,
4076                                          management=management,
4077                                          minCpuPlatform=min_cpu_platform,
4078                                          bootDiskKmsKey=boot_disk_kms_key,
4079                                          diskSizeGb=disk_size_gb,
4080                                          diskType=disk_type,
4081                                          shieldedInstanceConfig=
4082                                          shielded_instance_config)
4083
4084      if autoprovisioning_locations:
4085        autoscaling.autoprovisioningLocations = \
4086            sorted(autoprovisioning_locations)
4087
4088    if options.autoscaling_profile is not None:
4089      autoscaling.autoscalingProfile = \
4090          self.CreateAutoscalingProfileCommon(options)
4091
4092    self.ValidateClusterAutoscaling(autoscaling, for_update)
4093    return autoscaling
4094
4095  def ParseNodePools(self, options, node_config):
4096    """Creates a list of node pools for the cluster by parsing options.
4097
4098    Args:
4099      options: cluster creation options
4100      node_config: node configuration for nodes in the node pools
4101
4102    Returns:
4103      List of node pools.
4104    """
4105    max_nodes_per_pool = options.max_nodes_per_pool or MAX_NODES_PER_POOL
4106    num_pools = (options.num_nodes + max_nodes_per_pool -
4107                 1) // max_nodes_per_pool
4108    # pool consistency with server default
4109    node_pool_name = options.node_pool_name or 'default-pool'
4110
4111    if num_pools == 1:
4112      pool_names = [node_pool_name]
4113    else:
4114      # default-pool-0, -1, ... or some-pool-0, -1 where some-pool is user
4115      # supplied
4116      pool_names = [
4117          '{0}-{1}'.format(node_pool_name, i) for i in range(0, num_pools)
4118      ]
4119
4120    pools = []
4121    nodes_per_pool = (options.num_nodes + num_pools - 1) // len(pool_names)
4122    to_add = options.num_nodes
4123    for name in pool_names:
4124      nodes = nodes_per_pool if (to_add > nodes_per_pool) else to_add
4125      pool = self.messages.NodePool(
4126          name=name,
4127          initialNodeCount=nodes,
4128          config=node_config,
4129          version=options.node_version,
4130          management=self._GetNodeManagement(options))
4131      if options.enable_autoscaling:
4132        pool.autoscaling = self.messages.NodePoolAutoscaling(
4133            enabled=options.enable_autoscaling,
4134            minNodeCount=options.min_nodes,
4135            maxNodeCount=options.max_nodes)
4136      if options.max_pods_per_node:
4137        if not options.enable_ip_alias:
4138          raise util.Error(MAX_PODS_PER_NODE_WITHOUT_IP_ALIAS_ERROR_MSG)
4139        pool.maxPodsConstraint = self.messages.MaxPodsConstraint(
4140            maxPodsPerNode=options.max_pods_per_node)
4141      if (options.max_surge_upgrade is not None or
4142          options.max_unavailable_upgrade is not None):
4143        pool.upgradeSettings = self.messages.UpgradeSettings()
4144        pool.upgradeSettings.maxSurge = options.max_surge_upgrade
4145        pool.upgradeSettings.maxUnavailable = options.max_unavailable_upgrade
4146      pools.append(pool)
4147      to_add -= nodes
4148    return pools
4149
4150  def GetIamPolicy(self, cluster_ref):
4151    return self.client.projects.GetIamPolicy(
4152        self.messages.ContainerProjectsGetIamPolicyRequest(
4153            resource=ProjectLocationCluster(cluster_ref.projectId, cluster_ref
4154                                            .zone, cluster_ref.clusterId)))
4155
4156  def SetIamPolicy(self, cluster_ref, policy):
4157    return self.client.projects.SetIamPolicy(
4158        self.messages.ContainerProjectsSetIamPolicyRequest(
4159            googleIamV1SetIamPolicyRequest=self.messages
4160            .GoogleIamV1SetIamPolicyRequest(policy=policy),
4161            resource=ProjectLocationCluster(cluster_ref.projectId,
4162                                            cluster_ref.zone,
4163                                            cluster_ref.clusterId)))
4164
4165
4166def _GetCloudRunLoadBalancerType(options, messages):
4167  if options.cloud_run_config is not None:
4168    input_load_balancer_type = options.cloud_run_config.get(
4169        'load-balancer-type')
4170    if input_load_balancer_type is not None:
4171      if input_load_balancer_type == 'INTERNAL':
4172        return messages.CloudRunConfig.LoadBalancerTypeValueValuesEnum.LOAD_BALANCER_TYPE_INTERNAL
4173      return messages.CloudRunConfig.LoadBalancerTypeValueValuesEnum.LOAD_BALANCER_TYPE_EXTERNAL
4174  return None
4175
4176
4177def _AddMetadataToNodeConfig(node_config, options):
4178  if not options.metadata:
4179    return
4180  metadata = node_config.MetadataValue()
4181  props = []
4182  for key, value in six.iteritems(options.metadata):
4183    props.append(metadata.AdditionalProperty(key=key, value=value))
4184  metadata.additionalProperties = props
4185  node_config.metadata = metadata
4186
4187
4188def _AddNodeLabelsToNodeConfig(node_config, options):
4189  if options.node_labels is None:
4190    return
4191  labels = node_config.LabelsValue()
4192  props = []
4193  for key, value in six.iteritems(options.node_labels):
4194    props.append(labels.AdditionalProperty(key=key, value=value))
4195  labels.additionalProperties = props
4196  node_config.labels = labels
4197
4198
4199def _AddLinuxNodeConfigToNodeConfig(node_config, options, messages):
4200  """Adds LinuxNodeConfig to NodeConfig."""
4201
4202  # Linux kernel parameters (sysctls).
4203  if options.linux_sysctls:
4204    if not node_config.linuxNodeConfig:
4205      node_config.linuxNodeConfig = messages.LinuxNodeConfig()
4206    linux_sysctls = node_config.linuxNodeConfig.SysctlsValue()
4207    props = []
4208    for key, value in six.iteritems(options.linux_sysctls):
4209      props.append(linux_sysctls.AdditionalProperty(key=key, value=value))
4210    linux_sysctls.additionalProperties = props
4211
4212    node_config.linuxNodeConfig.sysctls = linux_sysctls
4213
4214
4215def _AddShieldedInstanceConfigToNodeConfig(node_config, options, messages):
4216  """Adds ShieldedInstanceConfig to NodeConfig."""
4217  if (options.shielded_secure_boot is not None or
4218      options.shielded_integrity_monitoring is not None):
4219    node_config.shieldedInstanceConfig = messages.ShieldedInstanceConfig()
4220    if options.shielded_secure_boot is not None:
4221      node_config.shieldedInstanceConfig.enableSecureBoot = (
4222          options.shielded_secure_boot)
4223    else:
4224      # Workaround for API proto3->proto2 conversion, preserve enableSecureBoot
4225      # default value.
4226      #
4227      # When shieldedInstanceConfig is set in API request, server-side
4228      # defaulting logic won't kick in. Instead, default proto values for unset
4229      # fields will be used.
4230      # By default, enableSecureBoot should be true. But if it's not set in
4231      # shieldedInstanceConfig, it defaults to false on proto conversion in the
4232      # API. Always send it as true explicitly when flag isn't set (is None).
4233      node_config.shieldedInstanceConfig.enableSecureBoot = True
4234    if options.shielded_integrity_monitoring is not None:
4235      node_config.shieldedInstanceConfig.enableIntegrityMonitoring = (
4236          options.shielded_integrity_monitoring)
4237
4238
4239def _AddReservationAffinityToNodeConfig(node_config, options, messages):
4240  """Adds ReservationAffinity to NodeConfig."""
4241  affinity = options.reservation_affinity
4242  if options.reservation and affinity != 'specific':
4243    raise util.Error(
4244        RESERVATION_AFFINITY_NON_SPECIFIC_WITH_RESERVATION_NAME_ERROR_MSG
4245        .format(affinity=affinity))
4246
4247  if not options.reservation and affinity == 'specific':
4248    raise util.Error(
4249        RESERVATION_AFFINITY_SPECIFIC_WITHOUT_RESERVATION_NAME_ERROR_MSG)
4250
4251  if affinity == 'none':
4252    node_config.reservationAffinity = messages.ReservationAffinity(
4253        consumeReservationType=messages.ReservationAffinity
4254        .ConsumeReservationTypeValueValuesEnum.NO_RESERVATION)
4255  elif affinity == 'any':
4256    node_config.reservationAffinity = messages.ReservationAffinity(
4257        consumeReservationType=messages.ReservationAffinity
4258        .ConsumeReservationTypeValueValuesEnum.ANY_RESERVATION)
4259  elif affinity == 'specific':
4260    node_config.reservationAffinity = messages.ReservationAffinity(
4261        consumeReservationType=messages.ReservationAffinity
4262        .ConsumeReservationTypeValueValuesEnum.SPECIFIC_RESERVATION,
4263        key='compute.googleapis.com/reservation-name',
4264        values=[options.reservation])
4265
4266
4267def _AddSandboxConfigToNodeConfig(node_config, options, messages):
4268  """Adds SandboxConfig to NodeConfig."""
4269  if options.sandbox is not None:
4270    if 'type' not in options.sandbox:
4271      raise util.Error(SANDBOX_TYPE_NOT_PROVIDED)
4272    sandbox_types = {
4273        'unspecified': messages.SandboxConfig.TypeValueValuesEnum.UNSPECIFIED,
4274        'gvisor': messages.SandboxConfig.TypeValueValuesEnum.GVISOR,
4275    }
4276    if options.sandbox['type'] not in sandbox_types:
4277      raise util.Error(
4278          SANDBOX_TYPE_NOT_SUPPORTED.format(type=options.sandbox['type']))
4279    node_config.sandboxConfig = messages.SandboxConfig(
4280        type=sandbox_types[options.sandbox['type']])
4281
4282
4283def _AddNotificationConfigToCluster(cluster, options, messages):
4284  """Adds notification config to Cluster."""
4285  nc = options.notification_config
4286  if nc is not None:
4287    pubsub = messages.PubSub()
4288    if 'pubsub' in nc:
4289      pubsub.enabled = nc['pubsub'] == 'ENABLED'
4290    if 'pubsub-topic' in nc:
4291      pubsub.topic = nc['pubsub-topic']
4292    cluster.notificationConfig = messages.NotificationConfig(pubsub=pubsub)
4293
4294
4295def _GetReleaseChannel(options, messages):
4296  """Gets the ReleaseChannel from options."""
4297  if options.release_channel is not None:
4298    channels = {
4299        'rapid': messages.ReleaseChannel.ChannelValueValuesEnum.RAPID,
4300        'regular': messages.ReleaseChannel.ChannelValueValuesEnum.REGULAR,
4301        'stable': messages.ReleaseChannel.ChannelValueValuesEnum.STABLE,
4302        'None': messages.ReleaseChannel.ChannelValueValuesEnum.UNSPECIFIED,
4303    }
4304    return messages.ReleaseChannel(channel=channels[options.release_channel])
4305
4306
4307def _GetNotificationConfigForClusterUpdate(options, messages):
4308  """Gets the NotificationConfig from update options."""
4309  nc = options.notification_config
4310  if nc is not None:
4311    pubsub = messages.PubSub()
4312    if 'pubsub' in nc:
4313      pubsub.enabled = nc['pubsub'] == 'ENABLED'
4314    if 'pubsub-topic' in nc:
4315      pubsub.topic = nc['pubsub-topic']
4316    return messages.NotificationConfig(pubsub=pubsub)
4317
4318
4319def _GetTpuConfigForClusterUpdate(options, messages):
4320  """Gets the TpuConfig from update options."""
4321  if options.enable_tpu is not None:
4322    if options.tpu_ipv4_cidr and options.enable_tpu_service_networking:
4323      raise util.Error(TPU_SERVING_MODE_ERROR)
4324    return messages.TpuConfig(
4325        enabled=options.enable_tpu,
4326        ipv4CidrBlock=options.tpu_ipv4_cidr,
4327        useServiceNetworking=options.enable_tpu_service_networking,
4328    )
4329
4330
4331def _GetMasterForClusterCreate(options, messages):
4332  """Gets the Master from create options."""
4333  if options.master_logs is not None or options.enable_master_metrics is not None:
4334    config = messages.MasterSignalsConfig()
4335
4336    if options.master_logs is not None:
4337      if APISERVER in options.master_logs:
4338        config.logEnabledComponents.append(
4339            messages.MasterSignalsConfig
4340            .LogEnabledComponentsValueListEntryValuesEnum.APISERVER)
4341      if SCHEDULER in options.master_logs:
4342        config.logEnabledComponents.append(
4343            messages.MasterSignalsConfig
4344            .LogEnabledComponentsValueListEntryValuesEnum.SCHEDULER)
4345      if CONTROLLER_MANAGER in options.master_logs:
4346        config.logEnabledComponents.append(
4347            messages.MasterSignalsConfig
4348            .LogEnabledComponentsValueListEntryValuesEnum.CONTROLLER_MANAGER)
4349      if ADDON_MANAGER in options.master_logs:
4350        config.logEnabledComponents.append(
4351            messages.MasterSignalsConfig
4352            .LogEnabledComponentsValueListEntryValuesEnum.ADDON_MANAGER)
4353    if options.enable_master_metrics is not None:
4354      config.enableMetrics = options.enable_master_metrics
4355    return messages.Master(signalsConfig=config)
4356
4357
4358def _GetMasterForClusterUpdate(options, messages):
4359  """Gets the Master from update options."""
4360  if options.no_master_logs:
4361    options.master_logs = []
4362  if options.master_logs is not None:
4363    config = messages.MasterSignalsConfig()
4364    if APISERVER in options.master_logs:
4365      config.logEnabledComponents.append(
4366          messages.MasterSignalsConfig
4367          .LogEnabledComponentsValueListEntryValuesEnum.APISERVER)
4368    if SCHEDULER in options.master_logs:
4369      config.logEnabledComponents.append(
4370          messages.MasterSignalsConfig
4371          .LogEnabledComponentsValueListEntryValuesEnum.SCHEDULER)
4372    if CONTROLLER_MANAGER in options.master_logs:
4373      config.logEnabledComponents.append(
4374          messages.MasterSignalsConfig
4375          .LogEnabledComponentsValueListEntryValuesEnum.CONTROLLER_MANAGER)
4376    if ADDON_MANAGER in options.master_logs:
4377      config.logEnabledComponents.append(
4378          messages.MasterSignalsConfig
4379          .LogEnabledComponentsValueListEntryValuesEnum.ADDON_MANAGER)
4380    return messages.Master(signalsConfig=config)
4381
4382  if options.enable_master_metrics is not None:
4383    config = messages.MasterSignalsConfig(
4384        enableMetrics=options.enable_master_metrics,
4385        logEnabledComponents=[
4386            messages.MasterSignalsConfig
4387            .LogEnabledComponentsValueListEntryValuesEnum.COMPONENT_UNSPECIFIED
4388        ])
4389    return messages.Master(signalsConfig=config)
4390
4391
4392def _GetKubernetesObjectsExportConfigForClusterCreate(options, messages):
4393  """Gets the KubernetesObjectsExportConfig from create options."""
4394  if options.kubernetes_objects_changes_target is not None or options.kubernetes_objects_snapshots_target is not None:
4395    config = messages.KubernetesObjectsExportConfig()
4396    if options.kubernetes_objects_changes_target is not None:
4397      config.kubernetesObjectsChangesTarget = options.kubernetes_objects_changes_target
4398    if options.kubernetes_objects_snapshots_target is not None:
4399      config.kubernetesObjectsSnapshotsTarget = options.kubernetes_objects_snapshots_target
4400    return config
4401
4402
4403def _GetKubernetesObjectsExportConfigForClusterUpdate(options, messages):
4404  """Gets the KubernetesObjectsExportConfig from update options."""
4405  if options.kubernetes_objects_changes_target is not None or options.kubernetes_objects_snapshots_target is not None:
4406    changes_target = None
4407    snapshots_target = None
4408    if options.kubernetes_objects_changes_target is not None:
4409      changes_target = options.kubernetes_objects_changes_target
4410      if changes_target == 'NONE':
4411        changes_target = ''
4412    if options.kubernetes_objects_snapshots_target is not None:
4413      snapshots_target = options.kubernetes_objects_snapshots_target
4414      if snapshots_target == 'NONE':
4415        snapshots_target = ''
4416    return messages.KubernetesObjectsExportConfig(
4417        kubernetesObjectsSnapshotsTarget=snapshots_target,
4418        kubernetesObjectsChangesTarget=changes_target)
4419
4420
4421def _AddPSCPrivateClustersOptionsToClusterForCreateCluster(
4422    cluster, options, messages):
4423  """Adds all PSC private cluster options to cluster during create cluster."""
4424  if options.private_endpoint_subnetwork is not None:
4425    cluster.privateClusterConfig.privateEndpointSubnetwork = options.private_endpoint_subnetwork
4426  if options.cross_connect_subnetworks is not None:
4427    items = []
4428    for subnetwork in sorted(options.cross_connect_subnetworks):
4429      items.append(messages.CrossConnectItem(subnetwork=subnetwork))
4430    cluster.privateClusterConfig.crossConnectConfig = messages.CrossConnectConfig(
4431        items=items)
4432
4433
4434def ProjectLocation(project, location):
4435  return 'projects/' + project + '/locations/' + location
4436
4437
4438def ProjectLocationCluster(project, location, cluster):
4439  return ProjectLocation(project, location) + '/clusters/' + cluster
4440
4441
4442def ProjectLocationClusterNodePool(project, location, cluster, nodepool):
4443  return (ProjectLocationCluster(project, location, cluster) + '/nodePools/' +
4444          nodepool)
4445
4446
4447def ProjectLocationOperation(project, location, operation):
4448  return ProjectLocation(project, location) + '/operations/' + operation
4449