1// Copyright 2017 Istio Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15package v1alpha3
16
17import (
18	"fmt"
19	"math"
20	"strconv"
21	"strings"
22
23	apiv2 "github.com/envoyproxy/go-control-plane/envoy/api/v2"
24	auth "github.com/envoyproxy/go-control-plane/envoy/api/v2/auth"
25	v2Cluster "github.com/envoyproxy/go-control-plane/envoy/api/v2/cluster"
26	core "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
27	endpoint "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint"
28	envoy_type "github.com/envoyproxy/go-control-plane/envoy/type"
29	"github.com/gogo/protobuf/types"
30	structpb "github.com/golang/protobuf/ptypes/struct"
31	"github.com/golang/protobuf/ptypes/wrappers"
32
33	meshconfig "istio.io/api/mesh/v1alpha1"
34	networking "istio.io/api/networking/v1alpha3"
35	"istio.io/pkg/log"
36
37	"istio.io/istio/pilot/pkg/features"
38	"istio.io/istio/pilot/pkg/model"
39	"istio.io/istio/pilot/pkg/networking/core/v1alpha3/envoyfilter"
40	"istio.io/istio/pilot/pkg/networking/core/v1alpha3/loadbalancer"
41	"istio.io/istio/pilot/pkg/networking/plugin"
42	"istio.io/istio/pilot/pkg/networking/util"
43	authn_model "istio.io/istio/pilot/pkg/security/model"
44	"istio.io/istio/pilot/pkg/serviceregistry"
45	"istio.io/istio/pkg/config/constants"
46	"istio.io/istio/pkg/config/host"
47	"istio.io/istio/pkg/config/labels"
48	"istio.io/istio/pkg/config/protocol"
49	"istio.io/istio/pkg/util/gogo"
50)
51
52const (
53	// DefaultLbType set to round robin
54	DefaultLbType = networking.LoadBalancerSettings_ROUND_ROBIN
55
56	// ManagementClusterHostname indicates the hostname used for building inbound clusters for management ports
57	ManagementClusterHostname = "mgmtCluster"
58)
59
60var (
61	// This disables circuit breaking by default by setting highest possible values.
62	// See: https://www.envoyproxy.io/docs/envoy/v1.11.1/faq/disable_circuit_breaking
63	defaultCircuitBreakerThresholds = v2Cluster.CircuitBreakers_Thresholds{
64		// DefaultMaxRetries specifies the default for the Envoy circuit breaker parameter max_retries. This
65		// defines the maximum number of parallel retries a given Envoy will allow to the upstream cluster. Envoy defaults
66		// this value to 3, however that has shown to be insufficient during periods of pod churn (e.g. rolling updates),
67		// where multiple endpoints in a cluster are terminated. In these scenarios the circuit breaker can kick
68		// in before Pilot is able to deliver an updated endpoint list to Envoy, leading to client-facing 503s.
69		MaxRetries:         &wrappers.UInt32Value{Value: math.MaxUint32},
70		MaxRequests:        &wrappers.UInt32Value{Value: math.MaxUint32},
71		MaxConnections:     &wrappers.UInt32Value{Value: math.MaxUint32},
72		MaxPendingRequests: &wrappers.UInt32Value{Value: math.MaxUint32},
73	}
74
75	// defaultTransportSocketMatch applies to endpoints that have no security.istio.io/tlsMode label
76	// or those whose label value does not match "istio"
77	defaultTransportSocketMatch = &apiv2.Cluster_TransportSocketMatch{
78		Name:  "tlsMode-disabled",
79		Match: &structpb.Struct{},
80		TransportSocket: &core.TransportSocket{
81			Name: util.EnvoyRawBufferSocketName,
82		},
83	}
84)
85
86// getDefaultCircuitBreakerThresholds returns a copy of the default circuit breaker thresholds for the given traffic direction.
87func getDefaultCircuitBreakerThresholds() *v2Cluster.CircuitBreakers_Thresholds {
88	thresholds := defaultCircuitBreakerThresholds
89	return &thresholds
90}
91
92// BuildClusters returns the list of clusters for the given proxy. This is the CDS output
93// For outbound: Cluster for each service/subset hostname or cidr with SNI set to service hostname
94// Cluster type based on resolution
95// For inbound (sidecar only): Cluster for each inbound endpoint port and for each service port
96func (configgen *ConfigGeneratorImpl) BuildClusters(proxy *model.Proxy, push *model.PushContext) []*apiv2.Cluster {
97	clusters := make([]*apiv2.Cluster, 0)
98	cb := NewClusterBuilder(proxy, push)
99	instances := proxy.ServiceInstances
100
101	outboundClusters := configgen.buildOutboundClusters(proxy, push)
102
103	switch proxy.Type {
104	case model.SidecarProxy:
105		// Add a blackhole and passthrough cluster for catching traffic to unresolved routes
106		// DO NOT CALL PLUGINS for these two clusters.
107		outboundClusters = append(outboundClusters, cb.buildBlackHoleCluster(), cb.buildDefaultPassthroughCluster())
108		outboundClusters = envoyfilter.ApplyClusterPatches(networking.EnvoyFilter_SIDECAR_OUTBOUND, proxy, push, outboundClusters)
109		// Let ServiceDiscovery decide which IP and Port are used for management if
110		// there are multiple IPs
111		managementPorts := make([]*model.Port, 0)
112		for _, ip := range proxy.IPAddresses {
113			managementPorts = append(managementPorts, push.ManagementPorts(ip)...)
114		}
115		inboundClusters := configgen.buildInboundClusters(proxy, push, instances, managementPorts)
116		// Pass through clusters for inbound traffic. These cluster bind loopback-ish src address to access node local service.
117		inboundClusters = append(inboundClusters, cb.buildInboundPassthroughClusters()...)
118		inboundClusters = envoyfilter.ApplyClusterPatches(networking.EnvoyFilter_SIDECAR_INBOUND, proxy, push, inboundClusters)
119		clusters = append(clusters, outboundClusters...)
120		clusters = append(clusters, inboundClusters...)
121
122	default: // Gateways
123		// Gateways do not require the default passthrough cluster as they do not have original dst listeners.
124		outboundClusters = append(outboundClusters, cb.buildBlackHoleCluster())
125		if proxy.Type == model.Router && proxy.GetRouterMode() == model.SniDnatRouter {
126			outboundClusters = append(outboundClusters, configgen.buildOutboundSniDnatClusters(proxy, push)...)
127		}
128		outboundClusters = envoyfilter.ApplyClusterPatches(networking.EnvoyFilter_GATEWAY, proxy, push, outboundClusters)
129		clusters = outboundClusters
130	}
131
132	clusters = normalizeClusters(push, proxy, clusters)
133
134	return clusters
135}
136
137// resolves cluster name conflicts. there can be duplicate cluster names if there are conflicting service definitions.
138// for any clusters that share the same name the first cluster is kept and the others are discarded.
139func normalizeClusters(metrics model.Metrics, proxy *model.Proxy, clusters []*apiv2.Cluster) []*apiv2.Cluster {
140	have := make(map[string]bool)
141	out := make([]*apiv2.Cluster, 0, len(clusters))
142	for _, cluster := range clusters {
143		if !have[cluster.Name] {
144			out = append(out, cluster)
145		} else {
146			metrics.AddMetric(model.DuplicatedClusters, cluster.Name, proxy,
147				fmt.Sprintf("Duplicate cluster %s found while pushing CDS", cluster.Name))
148		}
149		have[cluster.Name] = true
150	}
151	return out
152}
153
154func (configgen *ConfigGeneratorImpl) buildOutboundClusters(proxy *model.Proxy, push *model.PushContext) []*apiv2.Cluster {
155	clusters := make([]*apiv2.Cluster, 0)
156	cb := NewClusterBuilder(proxy, push)
157	inputParams := &plugin.InputParams{
158		Push: push,
159		Node: proxy,
160	}
161	networkView := model.GetNetworkView(proxy)
162
163	var services []*model.Service
164	if features.FilterGatewayClusterConfig && proxy.Type == model.Router {
165		services = push.GatewayServices(proxy)
166	} else {
167		services = push.Services(proxy)
168	}
169	for _, service := range services {
170		for _, port := range service.Ports {
171			if port.Protocol == protocol.UDP {
172				continue
173			}
174			inputParams.Service = service
175			inputParams.Port = port
176
177			lbEndpoints := buildLocalityLbEndpoints(proxy, push, networkView, service, port.Port, nil)
178
179			// create default cluster
180			discoveryType := convertResolution(proxy, service)
181			clusterName := model.BuildSubsetKey(model.TrafficDirectionOutbound, "", service.Hostname, port.Port)
182			defaultCluster := cb.buildDefaultCluster(clusterName, discoveryType, lbEndpoints, model.TrafficDirectionOutbound, port, service.MeshExternal)
183			if defaultCluster == nil {
184				continue
185			}
186			// If stat name is configured, build the alternate stats name.
187			if len(push.Mesh.OutboundClusterStatName) != 0 {
188				defaultCluster.AltStatName = util.BuildStatPrefix(push.Mesh.OutboundClusterStatName, string(service.Hostname), "", port, service.Attributes)
189			}
190
191			setUpstreamProtocol(proxy, defaultCluster, port, model.TrafficDirectionOutbound)
192			clusters = append(clusters, defaultCluster)
193			subsetClusters := cb.applyDestinationRule(proxy, defaultCluster, DefaultClusterMode, service, port, networkView)
194
195			// call plugins for subset clusters.
196			for _, subsetCluster := range subsetClusters {
197				for _, p := range configgen.Plugins {
198					p.OnOutboundCluster(inputParams, subsetCluster)
199				}
200			}
201			clusters = append(clusters, subsetClusters...)
202
203			// call plugins for the default cluster.
204			for _, p := range configgen.Plugins {
205				p.OnOutboundCluster(inputParams, defaultCluster)
206			}
207		}
208	}
209
210	return clusters
211}
212
213// SniDnat clusters do not have any TLS setting, as they simply forward traffic to upstream
214// All SniDnat clusters are internal services in the mesh.
215func (configgen *ConfigGeneratorImpl) buildOutboundSniDnatClusters(proxy *model.Proxy, push *model.PushContext) []*apiv2.Cluster {
216	clusters := make([]*apiv2.Cluster, 0)
217	cb := NewClusterBuilder(proxy, push)
218
219	networkView := model.GetNetworkView(proxy)
220
221	for _, service := range push.Services(proxy) {
222		if service.MeshExternal {
223			continue
224		}
225		for _, port := range service.Ports {
226			if port.Protocol == protocol.UDP {
227				continue
228			}
229			lbEndpoints := buildLocalityLbEndpoints(proxy, push, networkView, service, port.Port, nil)
230
231			// create default cluster
232			discoveryType := convertResolution(proxy, service)
233
234			clusterName := model.BuildDNSSrvSubsetKey(model.TrafficDirectionOutbound, "", service.Hostname, port.Port)
235			defaultCluster := cb.buildDefaultCluster(clusterName, discoveryType, lbEndpoints, model.TrafficDirectionOutbound, nil, service.MeshExternal)
236			if defaultCluster == nil {
237				continue
238			}
239			clusters = append(clusters, defaultCluster)
240			clusters = append(clusters, cb.applyDestinationRule(proxy, defaultCluster, SniDnatClusterMode, service, port, networkView)...)
241		}
242	}
243
244	return clusters
245}
246
247func buildLocalityLbEndpoints(proxy *model.Proxy, push *model.PushContext, proxyNetworkView map[string]bool, service *model.Service,
248	port int, labels labels.Collection) []*endpoint.LocalityLbEndpoints {
249	if service.Resolution != model.DNSLB {
250		return nil
251	}
252
253	instances, err := push.InstancesByPort(service, port, labels)
254	if err != nil {
255		log.Errorf("failed to retrieve instances for %s: %v", service.Hostname, err)
256		return nil
257	}
258
259	// Determine whether or not the target service is considered local to the cluster
260	// and should, therefore, not be accessed from outside the cluster.
261	isClusterLocal := push.IsClusterLocal(service)
262
263	lbEndpoints := make(map[string][]*endpoint.LbEndpoint)
264	for _, instance := range instances {
265		// Only send endpoints from the networks in the network view requested by the proxy.
266		// The default network view assigned to the Proxy is the UnnamedNetwork (""), which matches
267		// the default network assigned to endpoints that don't have an explicit network
268		if !proxyNetworkView[instance.Endpoint.Network] {
269			// Endpoint's network doesn't match the set of networks that the proxy wants to see.
270			continue
271		}
272		// If the downstream service is configured as cluster-local, only include endpoints that
273		// reside in the same cluster.
274		if isClusterLocal && (proxy.ClusterID != instance.Endpoint.Locality.ClusterID) {
275			continue
276		}
277		addr := util.BuildAddress(instance.Endpoint.Address, instance.Endpoint.EndpointPort)
278		ep := &endpoint.LbEndpoint{
279			HostIdentifier: &endpoint.LbEndpoint_Endpoint{
280				Endpoint: &endpoint.Endpoint{
281					Address: addr,
282				},
283			},
284			LoadBalancingWeight: &wrappers.UInt32Value{
285				Value: 1,
286			},
287		}
288		if instance.Endpoint.LbWeight > 0 {
289			ep.LoadBalancingWeight.Value = instance.Endpoint.LbWeight
290		}
291		ep.Metadata = util.BuildLbEndpointMetadata(instance.Endpoint.UID, instance.Endpoint.Network, instance.Endpoint.TLSMode, push)
292		locality := instance.Endpoint.Locality.Label
293		lbEndpoints[locality] = append(lbEndpoints[locality], ep)
294	}
295
296	localityLbEndpoints := make([]*endpoint.LocalityLbEndpoints, 0, len(lbEndpoints))
297
298	for locality, eps := range lbEndpoints {
299		var weight uint32
300		for _, ep := range eps {
301			weight += ep.LoadBalancingWeight.GetValue()
302		}
303		localityLbEndpoints = append(localityLbEndpoints, &endpoint.LocalityLbEndpoints{
304			Locality:    util.ConvertLocality(locality),
305			LbEndpoints: eps,
306			LoadBalancingWeight: &wrappers.UInt32Value{
307				Value: weight,
308			},
309		})
310	}
311
312	return localityLbEndpoints
313}
314
315func buildInboundLocalityLbEndpoints(bind string, port uint32) []*endpoint.LocalityLbEndpoints {
316	address := util.BuildAddress(bind, port)
317	lbEndpoint := &endpoint.LbEndpoint{
318		HostIdentifier: &endpoint.LbEndpoint_Endpoint{
319			Endpoint: &endpoint.Endpoint{
320				Address: address,
321			},
322		},
323	}
324	return []*endpoint.LocalityLbEndpoints{
325		{
326			LbEndpoints: []*endpoint.LbEndpoint{lbEndpoint},
327		},
328	}
329}
330
331func (configgen *ConfigGeneratorImpl) buildInboundClusters(proxy *model.Proxy,
332	push *model.PushContext, instances []*model.ServiceInstance, managementPorts []*model.Port) []*apiv2.Cluster {
333
334	clusters := make([]*apiv2.Cluster, 0)
335	cb := NewClusterBuilder(proxy, push)
336
337	// The inbound clusters for a node depends on whether the node has a SidecarScope with inbound listeners
338	// or not. If the node has a sidecarscope with ingress listeners, we only return clusters corresponding
339	// to those listeners i.e. clusters made out of the defaultEndpoint field.
340	// If the node has no sidecarScope and has interception mode set to NONE, then we should skip the inbound
341	// clusters, because there would be no corresponding inbound listeners
342	sidecarScope := proxy.SidecarScope
343	noneMode := proxy.GetInterceptionMode() == model.InterceptionNone
344
345	_, actualLocalHost := getActualWildcardAndLocalHost(proxy)
346
347	if !sidecarScope.HasCustomIngressListeners {
348		// No user supplied sidecar scope or the user supplied one has no ingress listeners
349
350		// We should not create inbound listeners in NONE mode based on the service instances
351		// Doing so will prevent the workloads from starting as they would be listening on the same port
352		// Users are required to provide the sidecar config to define the inbound listeners
353		if noneMode {
354			return nil
355		}
356
357		have := make(map[*model.Port]bool)
358		for _, instance := range instances {
359			// Filter out service instances with the same port as we are going to mark them as duplicates any way
360			// in normalizeClusters method.
361			if !have[instance.ServicePort] {
362				pluginParams := &plugin.InputParams{
363					Node:            proxy,
364					ServiceInstance: instance,
365					Port:            instance.ServicePort,
366					Push:            push,
367					Bind:            actualLocalHost,
368				}
369				localCluster := configgen.buildInboundClusterForPortOrUDS(pluginParams)
370				clusters = append(clusters, localCluster)
371				have[instance.ServicePort] = true
372			}
373		}
374
375		// Add a passthrough cluster for traffic to management ports (health check ports)
376		for _, port := range managementPorts {
377			clusterName := model.BuildSubsetKey(model.TrafficDirectionInbound, port.Name,
378				ManagementClusterHostname, port.Port)
379			localityLbEndpoints := buildInboundLocalityLbEndpoints(actualLocalHost, uint32(port.Port))
380			mgmtCluster := cb.buildDefaultCluster(clusterName, apiv2.Cluster_STATIC, localityLbEndpoints,
381				model.TrafficDirectionInbound, nil, false)
382			setUpstreamProtocol(proxy, mgmtCluster, port, model.TrafficDirectionInbound)
383			clusters = append(clusters, mgmtCluster)
384		}
385	} else {
386		rule := sidecarScope.Config.Spec.(*networking.Sidecar)
387		for _, ingressListener := range rule.Ingress {
388			// LDS would have setup the inbound clusters
389			// as inbound|portNumber|portName|Hostname[or]SidecarScopeID
390			listenPort := &model.Port{
391				Port:     int(ingressListener.Port.Number),
392				Protocol: protocol.Parse(ingressListener.Port.Protocol),
393				Name:     ingressListener.Port.Name,
394			}
395
396			// When building an inbound cluster for the ingress listener, we take the defaultEndpoint specified
397			// by the user and parse it into host:port or a unix domain socket
398			// The default endpoint can be 127.0.0.1:port or :port or unix domain socket
399			endpointAddress := actualLocalHost
400			port := 0
401			var err error
402			if strings.HasPrefix(ingressListener.DefaultEndpoint, model.UnixAddressPrefix) {
403				// this is a UDS endpoint. assign it as is
404				endpointAddress = ingressListener.DefaultEndpoint
405			} else {
406				// parse the ip, port. Validation guarantees presence of :
407				parts := strings.Split(ingressListener.DefaultEndpoint, ":")
408				if len(parts) < 2 {
409					continue
410				}
411				if port, err = strconv.Atoi(parts[1]); err != nil {
412					continue
413				}
414			}
415
416			// Find the service instance that corresponds to this ingress listener by looking
417			// for a service instance that matches this ingress port as this will allow us
418			// to generate the right cluster name that LDS expects inbound|portNumber|portName|Hostname
419			instance := configgen.findOrCreateServiceInstance(instances, ingressListener, sidecarScope.Config.Name, sidecarScope.Config.Namespace)
420			instance.Endpoint.Address = endpointAddress
421			instance.ServicePort = listenPort
422			instance.Endpoint.ServicePortName = listenPort.Name
423			instance.Endpoint.EndpointPort = uint32(port)
424
425			pluginParams := &plugin.InputParams{
426				Node:            proxy,
427				ServiceInstance: instance,
428				Port:            listenPort,
429				Push:            push,
430				Bind:            endpointAddress,
431			}
432			localCluster := configgen.buildInboundClusterForPortOrUDS(pluginParams)
433			clusters = append(clusters, localCluster)
434		}
435	}
436
437	return clusters
438}
439
440func (configgen *ConfigGeneratorImpl) findOrCreateServiceInstance(instances []*model.ServiceInstance,
441	ingressListener *networking.IstioIngressListener, sidecar string, sidecarns string) *model.ServiceInstance {
442	for _, realInstance := range instances {
443		if realInstance.Endpoint.EndpointPort == ingressListener.Port.Number {
444			// We need to create a copy of the instance, as it is modified later while building clusters/listeners.
445			return realInstance.DeepCopy()
446		}
447	}
448	// We didn't find a matching instance. Create a dummy one because we need the right
449	// params to generate the right cluster name i.e. inbound|portNumber|portName|SidecarScopeID - which is uniformly generated by LDS/CDS.
450	attrs := model.ServiceAttributes{
451		Name: sidecar,
452		// This will ensure that the right AuthN policies are selected
453		Namespace: sidecarns,
454	}
455	return &model.ServiceInstance{
456		Service: &model.Service{
457			Hostname:   host.Name(sidecar + "." + sidecarns),
458			Attributes: attrs,
459		},
460		Endpoint: &model.IstioEndpoint{},
461	}
462}
463
464func (configgen *ConfigGeneratorImpl) buildInboundClusterForPortOrUDS(pluginParams *plugin.InputParams) *apiv2.Cluster {
465	cb := NewClusterBuilder(pluginParams.Node, pluginParams.Push)
466	instance := pluginParams.ServiceInstance
467	clusterName := model.BuildSubsetKey(model.TrafficDirectionInbound, instance.ServicePort.Name,
468		instance.Service.Hostname, instance.ServicePort.Port)
469	localityLbEndpoints := buildInboundLocalityLbEndpoints(pluginParams.Bind, instance.Endpoint.EndpointPort)
470	localCluster := cb.buildDefaultCluster(clusterName, apiv2.Cluster_STATIC, localityLbEndpoints,
471		model.TrafficDirectionInbound, nil, false)
472	// If stat name is configured, build the alt statname.
473	if len(pluginParams.Push.Mesh.InboundClusterStatName) != 0 {
474		localCluster.AltStatName = util.BuildStatPrefix(pluginParams.Push.Mesh.InboundClusterStatName,
475			string(instance.Service.Hostname), "", instance.ServicePort, instance.Service.Attributes)
476	}
477	setUpstreamProtocol(pluginParams.Node, localCluster, instance.ServicePort, model.TrafficDirectionInbound)
478	// call plugins
479	for _, p := range configgen.Plugins {
480		p.OnInboundCluster(pluginParams, localCluster)
481	}
482
483	// When users specify circuit breakers, they need to be set on the receiver end
484	// (server side) as well as client side, so that the server has enough capacity
485	// (not the defaults) to handle the increased traffic volume
486	// TODO: This is not foolproof - if instance is part of multiple services listening on same port,
487	// choice of inbound cluster is arbitrary. So the connection pool settings may not apply cleanly.
488	cfg := pluginParams.Push.DestinationRule(pluginParams.Node, instance.Service)
489	if cfg != nil {
490		destinationRule := cfg.Spec.(*networking.DestinationRule)
491		if destinationRule.TrafficPolicy != nil {
492			connectionPool, _, _, _ := SelectTrafficPolicyComponents(destinationRule.TrafficPolicy, instance.ServicePort)
493			// only connection pool settings make sense on the inbound path.
494			// upstream TLS settings/outlier detection/load balancer don't apply here.
495			applyConnectionPool(pluginParams.Push, localCluster, connectionPool)
496			localCluster.Metadata = util.BuildConfigInfoMetadata(cfg.ConfigMeta)
497		}
498	}
499	return localCluster
500}
501
502func convertResolution(proxy *model.Proxy, service *model.Service) apiv2.Cluster_DiscoveryType {
503	switch service.Resolution {
504	case model.ClientSideLB:
505		return apiv2.Cluster_EDS
506	case model.DNSLB:
507		return apiv2.Cluster_STRICT_DNS
508	case model.Passthrough:
509		// Gateways cannot use passthrough clusters. So fallback to EDS
510		if proxy.Type == model.SidecarProxy {
511			if service.Attributes.ServiceRegistry == string(serviceregistry.Kubernetes) && features.EnableEDSForHeadless {
512				return apiv2.Cluster_EDS
513			}
514
515			return apiv2.Cluster_ORIGINAL_DST
516		}
517		return apiv2.Cluster_EDS
518	default:
519		return apiv2.Cluster_EDS
520	}
521}
522
523type mtlsContextType int
524
525const (
526	userSupplied mtlsContextType = iota
527	autoDetected
528)
529
530// conditionallyConvertToIstioMtls fills key cert fields for all TLSSettings when the mode is `ISTIO_MUTUAL`.
531// If the (input) TLS setting is nil (i.e not set), *and* the service mTLS mode is STRICT, it also
532// creates and populates the config as if they are set as ISTIO_MUTUAL.
533func conditionallyConvertToIstioMtls(
534	tls *networking.ClientTLSSettings,
535	serviceAccounts []string,
536	sni string,
537	proxy *model.Proxy,
538	autoMTLSEnabled bool,
539	meshExternal bool,
540	serviceMTLSMode model.MutualTLSMode,
541	clusterDiscoveryType apiv2.Cluster_DiscoveryType) (*networking.ClientTLSSettings, mtlsContextType) {
542	mtlsCtx := userSupplied
543	if tls == nil {
544		if meshExternal || !autoMTLSEnabled || serviceMTLSMode == model.MTLSUnknown || serviceMTLSMode == model.MTLSDisable {
545			return nil, mtlsCtx
546		}
547		// Do not enable auto mtls when cluster type is `Cluster_ORIGINAL_DST`
548		// We don't know whether headless service instance has sidecar injected or not.
549		if clusterDiscoveryType == apiv2.Cluster_ORIGINAL_DST {
550			return nil, mtlsCtx
551		}
552
553		mtlsCtx = autoDetected
554		// we will setup transport sockets later
555		tls = &networking.ClientTLSSettings{
556			Mode: networking.ClientTLSSettings_ISTIO_MUTUAL,
557		}
558	}
559	if tls.Mode == networking.ClientTLSSettings_ISTIO_MUTUAL {
560		// Use client provided SNI if set. Otherwise, overwrite with the auto generated SNI
561		// user specified SNIs in the istio mtls settings are useful when routing via gateways
562		sniToUse := tls.Sni
563		if len(sniToUse) == 0 {
564			sniToUse = sni
565		}
566		subjectAltNamesToUse := tls.SubjectAltNames
567		if len(subjectAltNamesToUse) == 0 {
568			subjectAltNamesToUse = serviceAccounts
569		}
570		return buildIstioMutualTLS(subjectAltNamesToUse, sniToUse, proxy), mtlsCtx
571	}
572	return tls, mtlsCtx
573}
574
575// buildIstioMutualTLS returns a `TLSSettings` for ISTIO_MUTUAL mode.
576func buildIstioMutualTLS(serviceAccounts []string, sni string, proxy *model.Proxy) *networking.ClientTLSSettings {
577	return &networking.ClientTLSSettings{
578		Mode:              networking.ClientTLSSettings_ISTIO_MUTUAL,
579		CaCertificates:    model.GetOrDefault(proxy.Metadata.TLSClientRootCert, constants.DefaultRootCert),
580		ClientCertificate: model.GetOrDefault(proxy.Metadata.TLSClientCertChain, constants.DefaultCertChain),
581		PrivateKey:        model.GetOrDefault(proxy.Metadata.TLSClientKey, constants.DefaultKey),
582		SubjectAltNames:   serviceAccounts,
583		Sni:               sni,
584	}
585}
586
587// SelectTrafficPolicyComponents returns the components of TrafficPolicy that should be used for given port.
588func SelectTrafficPolicyComponents(policy *networking.TrafficPolicy, port *model.Port) (
589	*networking.ConnectionPoolSettings, *networking.OutlierDetection, *networking.LoadBalancerSettings, *networking.ClientTLSSettings) {
590	if policy == nil {
591		return nil, nil, nil, nil
592	}
593	// Default to traffic policy's settings.
594	connectionPool := policy.ConnectionPool
595	outlierDetection := policy.OutlierDetection
596	loadBalancer := policy.LoadBalancer
597	tls := policy.Tls
598
599	// Check if port level overrides exist, if yes override with them.
600	if port != nil && len(policy.PortLevelSettings) > 0 {
601		for _, p := range policy.PortLevelSettings {
602			if p.Port != nil && uint32(port.Port) == p.Port.Number {
603				connectionPool = p.ConnectionPool
604				outlierDetection = p.OutlierDetection
605				loadBalancer = p.LoadBalancer
606				tls = p.Tls
607				break
608			}
609		}
610	}
611	return connectionPool, outlierDetection, loadBalancer, tls
612}
613
614// ClusterMode defines whether the cluster is being built for SNI-DNATing (sni passthrough) or not
615type ClusterMode string
616
617const (
618	// SniDnatClusterMode indicates cluster is being built for SNI dnat mode
619	SniDnatClusterMode ClusterMode = "sni-dnat"
620	// DefaultClusterMode indicates usual cluster with mTLS et al
621	DefaultClusterMode ClusterMode = "outbound"
622)
623
624type buildClusterOpts struct {
625	push            *model.PushContext
626	cluster         *apiv2.Cluster
627	policy          *networking.TrafficPolicy
628	port            *model.Port
629	serviceAccounts []string
630	// Used for traffic across multiple Istio clusters
631	// the ingress gateway in a remote cluster will use this value to route
632	// traffic to the appropriate service
633	istioMtlsSni string
634	// This is used when the sidecar is sending simple TLS traffic
635	// to endpoints. This is different from the previous SNI
636	// because usually in this case the traffic is going to a
637	// non-sidecar workload that can only understand the service's
638	// hostname in the SNI.
639	simpleTLSSni    string
640	clusterMode     ClusterMode
641	direction       model.TrafficDirection
642	proxy           *model.Proxy
643	meshExternal    bool
644	serviceMTLSMode model.MutualTLSMode
645}
646
647type upgradeTuple struct {
648	meshdefault meshconfig.MeshConfig_H2UpgradePolicy
649	override    networking.ConnectionPoolSettings_HTTPSettings_H2UpgradePolicy
650}
651
652// h2UpgradeMap specifies the truth table when upgrade takes place.
653var h2UpgradeMap = map[upgradeTuple]bool{
654	{meshconfig.MeshConfig_DO_NOT_UPGRADE, networking.ConnectionPoolSettings_HTTPSettings_UPGRADE}:        true,
655	{meshconfig.MeshConfig_DO_NOT_UPGRADE, networking.ConnectionPoolSettings_HTTPSettings_DO_NOT_UPGRADE}: false,
656	{meshconfig.MeshConfig_DO_NOT_UPGRADE, networking.ConnectionPoolSettings_HTTPSettings_DEFAULT}:        false,
657	{meshconfig.MeshConfig_UPGRADE, networking.ConnectionPoolSettings_HTTPSettings_UPGRADE}:               true,
658	{meshconfig.MeshConfig_UPGRADE, networking.ConnectionPoolSettings_HTTPSettings_DO_NOT_UPGRADE}:        false,
659	{meshconfig.MeshConfig_UPGRADE, networking.ConnectionPoolSettings_HTTPSettings_DEFAULT}:               true,
660}
661
662// applyH2Upgrade function will upgrade outbound cluster to http2 if specified by configuration.
663func applyH2Upgrade(opts buildClusterOpts, connectionPool *networking.ConnectionPoolSettings) {
664	if shouldH2Upgrade(opts.cluster.Name, opts.direction, opts.port, opts.push.Mesh, connectionPool) {
665		setH2Options(opts.cluster)
666	}
667}
668
669// shouldH2Upgrade function returns true if the cluster  should be upgraded to http2.
670func shouldH2Upgrade(clusterName string, direction model.TrafficDirection, port *model.Port, mesh *meshconfig.MeshConfig,
671	connectionPool *networking.ConnectionPoolSettings) bool {
672	if direction != model.TrafficDirectionOutbound {
673		return false
674	}
675
676	// Do not upgrade non-http ports
677	// This also ensures that we are only upgrading named ports so that
678	// EnableProtocolSniffingForInbound does not interfere.
679	// protocol sniffing uses Cluster_USE_DOWNSTREAM_PROTOCOL.
680	// Therefore if the client upgrades connection to http2, the server will send h2 stream to the application,
681	// even though the application only supports http 1.1.
682	if port != nil && !port.Protocol.IsHTTP() {
683		return false
684	}
685
686	// TODO (mjog)
687	// Upgrade if tls.GetMode() == networking.TLSSettings_ISTIO_MUTUAL
688	override := networking.ConnectionPoolSettings_HTTPSettings_DEFAULT
689	if connectionPool != nil && connectionPool.Http != nil {
690		override = connectionPool.Http.H2UpgradePolicy
691	}
692
693	if !h2UpgradeMap[upgradeTuple{mesh.H2UpgradePolicy, override}] {
694		log.Debugf("Not upgrading cluster: %v (%v %v)", clusterName, mesh.H2UpgradePolicy, override)
695		return false
696	}
697
698	log.Debugf("Upgrading cluster: %v (%v %v)", clusterName, mesh.H2UpgradePolicy, override)
699	return true
700}
701
702// setH2Options make the cluster an h2 cluster by setting http2ProtocolOptions.
703func setH2Options(cluster *apiv2.Cluster) {
704	if cluster == nil || cluster.Http2ProtocolOptions != nil {
705		return
706	}
707	cluster.Http2ProtocolOptions = &core.Http2ProtocolOptions{
708		// Envoy default value of 100 is too low for data path.
709		MaxConcurrentStreams: &wrappers.UInt32Value{
710			Value: 1073741824,
711		},
712	}
713}
714
715func applyTrafficPolicy(opts buildClusterOpts) {
716	connectionPool, outlierDetection, loadBalancer, tls := SelectTrafficPolicyComponents(opts.policy, opts.port)
717
718	applyH2Upgrade(opts, connectionPool)
719	applyConnectionPool(opts.push, opts.cluster, connectionPool)
720	applyOutlierDetection(opts.cluster, outlierDetection)
721	applyLoadBalancer(opts.cluster, loadBalancer, opts.port, opts.proxy, opts.push.Mesh)
722
723	if opts.clusterMode != SniDnatClusterMode && opts.direction != model.TrafficDirectionInbound {
724		autoMTLSEnabled := opts.push.Mesh.GetEnableAutoMtls().Value
725		var mtlsCtxType mtlsContextType
726		tls, mtlsCtxType = conditionallyConvertToIstioMtls(tls, opts.serviceAccounts, opts.istioMtlsSni, opts.proxy,
727			autoMTLSEnabled, opts.meshExternal, opts.serviceMTLSMode, opts.cluster.GetType())
728		applyUpstreamTLSSettings(&opts, tls, mtlsCtxType, opts.proxy)
729	}
730}
731
732// FIXME: there isn't a way to distinguish between unset values and zero values
733func applyConnectionPool(push *model.PushContext, cluster *apiv2.Cluster, settings *networking.ConnectionPoolSettings) {
734	if settings == nil {
735		return
736	}
737
738	threshold := getDefaultCircuitBreakerThresholds()
739	var idleTimeout *types.Duration
740
741	if settings.Http != nil {
742		if settings.Http.Http2MaxRequests > 0 {
743			// Envoy only applies MaxRequests in HTTP/2 clusters
744			threshold.MaxRequests = &wrappers.UInt32Value{Value: uint32(settings.Http.Http2MaxRequests)}
745		}
746		if settings.Http.Http1MaxPendingRequests > 0 {
747			// Envoy only applies MaxPendingRequests in HTTP/1.1 clusters
748			threshold.MaxPendingRequests = &wrappers.UInt32Value{Value: uint32(settings.Http.Http1MaxPendingRequests)}
749		}
750
751		if settings.Http.MaxRequestsPerConnection > 0 {
752			cluster.MaxRequestsPerConnection = &wrappers.UInt32Value{Value: uint32(settings.Http.MaxRequestsPerConnection)}
753		}
754
755		// FIXME: zero is a valid value if explicitly set, otherwise we want to use the default
756		if settings.Http.MaxRetries > 0 {
757			threshold.MaxRetries = &wrappers.UInt32Value{Value: uint32(settings.Http.MaxRetries)}
758		}
759
760		idleTimeout = settings.Http.IdleTimeout
761	}
762
763	if settings.Tcp != nil {
764		if settings.Tcp.ConnectTimeout != nil {
765			cluster.ConnectTimeout = gogo.DurationToProtoDuration(settings.Tcp.ConnectTimeout)
766		}
767
768		if settings.Tcp.MaxConnections > 0 {
769			threshold.MaxConnections = &wrappers.UInt32Value{Value: uint32(settings.Tcp.MaxConnections)}
770		}
771
772		applyTCPKeepalive(push, cluster, settings)
773	}
774
775	cluster.CircuitBreakers = &v2Cluster.CircuitBreakers{
776		Thresholds: []*v2Cluster.CircuitBreakers_Thresholds{threshold},
777	}
778
779	if idleTimeout != nil {
780		idleTimeoutDuration := gogo.DurationToProtoDuration(idleTimeout)
781		cluster.CommonHttpProtocolOptions = &core.HttpProtocolOptions{IdleTimeout: idleTimeoutDuration}
782	}
783}
784
785func applyTCPKeepalive(push *model.PushContext, cluster *apiv2.Cluster, settings *networking.ConnectionPoolSettings) {
786	// Apply Keepalive config only if it is configured in mesh config or in destination rule.
787	if push.Mesh.TcpKeepalive != nil || settings.Tcp.TcpKeepalive != nil {
788
789		// Start with empty tcp_keepalive, which would set SO_KEEPALIVE on the socket with OS default values.
790		cluster.UpstreamConnectionOptions = &apiv2.UpstreamConnectionOptions{
791			TcpKeepalive: &core.TcpKeepalive{},
792		}
793
794		// Apply mesh wide TCP keepalive if available.
795		if push.Mesh.TcpKeepalive != nil {
796			setKeepAliveSettings(cluster, push.Mesh.TcpKeepalive)
797		}
798
799		// Apply/Override individual attributes with DestinationRule TCP keepalive if set.
800		if settings.Tcp.TcpKeepalive != nil {
801			setKeepAliveSettings(cluster, settings.Tcp.TcpKeepalive)
802		}
803	}
804}
805
806func setKeepAliveSettings(cluster *apiv2.Cluster, keepalive *networking.ConnectionPoolSettings_TCPSettings_TcpKeepalive) {
807	if keepalive.Probes > 0 {
808		cluster.UpstreamConnectionOptions.TcpKeepalive.KeepaliveProbes = &wrappers.UInt32Value{Value: keepalive.Probes}
809	}
810
811	if keepalive.Time != nil {
812		cluster.UpstreamConnectionOptions.TcpKeepalive.KeepaliveTime = &wrappers.UInt32Value{Value: uint32(keepalive.Time.Seconds)}
813	}
814
815	if keepalive.Interval != nil {
816		cluster.UpstreamConnectionOptions.TcpKeepalive.KeepaliveInterval = &wrappers.UInt32Value{Value: uint32(keepalive.Interval.Seconds)}
817	}
818}
819
820// FIXME: there isn't a way to distinguish between unset values and zero values
821func applyOutlierDetection(cluster *apiv2.Cluster, outlier *networking.OutlierDetection) {
822	if outlier == nil {
823		return
824	}
825
826	out := &v2Cluster.OutlierDetection{}
827
828	// SuccessRate based outlier detection should be disabled.
829	out.EnforcingSuccessRate = &wrappers.UInt32Value{Value: 0}
830
831	if outlier.BaseEjectionTime != nil {
832		out.BaseEjectionTime = gogo.DurationToProtoDuration(outlier.BaseEjectionTime)
833	}
834	if outlier.ConsecutiveErrors > 0 {
835		// Only listen to gateway errors, see https://github.com/istio/api/pull/617
836		out.EnforcingConsecutiveGatewayFailure = &wrappers.UInt32Value{Value: uint32(100)} // defaults to 0
837		out.EnforcingConsecutive_5Xx = &wrappers.UInt32Value{Value: uint32(0)}             // defaults to 100
838		out.ConsecutiveGatewayFailure = &wrappers.UInt32Value{Value: uint32(outlier.ConsecutiveErrors)}
839	}
840
841	if e := outlier.Consecutive_5XxErrors; e != nil {
842		v := e.GetValue()
843
844		out.Consecutive_5Xx = &wrappers.UInt32Value{Value: v}
845
846		if v > 0 {
847			v = 100
848		}
849		out.EnforcingConsecutive_5Xx = &wrappers.UInt32Value{Value: v}
850	}
851	if e := outlier.ConsecutiveGatewayErrors; e != nil {
852		v := e.GetValue()
853
854		out.ConsecutiveGatewayFailure = &wrappers.UInt32Value{Value: v}
855
856		if v > 0 {
857			v = 100
858		}
859		out.EnforcingConsecutiveGatewayFailure = &wrappers.UInt32Value{Value: v}
860	}
861
862	if outlier.Interval != nil {
863		out.Interval = gogo.DurationToProtoDuration(outlier.Interval)
864	}
865	if outlier.MaxEjectionPercent > 0 {
866		out.MaxEjectionPercent = &wrappers.UInt32Value{Value: uint32(outlier.MaxEjectionPercent)}
867	}
868
869	cluster.OutlierDetection = out
870
871	// Disable panic threshold by default as its not typically applicable in k8s environments
872	// with few pods per service.
873	// To do so, set the healthy_panic_threshold field even if its value is 0 (defaults to 50).
874	// FIXME: we can't distinguish between it being unset or being explicitly set to 0
875	if outlier.MinHealthPercent >= 0 {
876		if cluster.CommonLbConfig == nil {
877			cluster.CommonLbConfig = &apiv2.Cluster_CommonLbConfig{}
878		}
879		cluster.CommonLbConfig.HealthyPanicThreshold = &envoy_type.Percent{Value: float64(outlier.MinHealthPercent)} // defaults to 50
880	}
881}
882
883func applyLoadBalancer(cluster *apiv2.Cluster, lb *networking.LoadBalancerSettings, port *model.Port, proxy *model.Proxy, meshConfig *meshconfig.MeshConfig) {
884	lbSetting := loadbalancer.GetLocalityLbSetting(meshConfig.GetLocalityLbSetting(), lb.GetLocalityLbSetting())
885	if cluster.OutlierDetection != nil {
886		if cluster.CommonLbConfig == nil {
887			cluster.CommonLbConfig = &apiv2.Cluster_CommonLbConfig{}
888		}
889		// Locality weighted load balancing - set it only if Locality load balancing is enabled.
890		if lbSetting != nil {
891			cluster.CommonLbConfig.LocalityConfigSpecifier = &apiv2.Cluster_CommonLbConfig_LocalityWeightedLbConfig_{
892				LocalityWeightedLbConfig: &apiv2.Cluster_CommonLbConfig_LocalityWeightedLbConfig{},
893			}
894		}
895	}
896
897	// Use locality lb settings from load balancer settings if present, else use mesh wide locality lb settings
898	applyLocalityLBSetting(proxy.Locality, cluster, lbSetting)
899
900	// The following order is important. If cluster type has been identified as Original DST since Resolution is PassThrough,
901	// and port is named as redis-xxx we end up creating a cluster with type Original DST and LbPolicy as MAGLEV which would be
902	// rejected by Envoy.
903
904	// Original destination service discovery must be used with the original destination load balancer.
905	if cluster.GetType() == apiv2.Cluster_ORIGINAL_DST {
906		cluster.LbPolicy = apiv2.Cluster_CLUSTER_PROVIDED
907		return
908	}
909
910	// Redis protocol must be defaulted with MAGLEV to benefit from client side sharding.
911	if features.EnableRedisFilter && port != nil && port.Protocol == protocol.Redis {
912		cluster.LbPolicy = apiv2.Cluster_MAGLEV
913		return
914	}
915
916	if lb == nil {
917		return
918	}
919
920	// DO not do if else here. since lb.GetSimple returns a enum value (not pointer).
921	switch lb.GetSimple() {
922	case networking.LoadBalancerSettings_LEAST_CONN:
923		cluster.LbPolicy = apiv2.Cluster_LEAST_REQUEST
924	case networking.LoadBalancerSettings_RANDOM:
925		cluster.LbPolicy = apiv2.Cluster_RANDOM
926	case networking.LoadBalancerSettings_ROUND_ROBIN:
927		cluster.LbPolicy = apiv2.Cluster_ROUND_ROBIN
928	case networking.LoadBalancerSettings_PASSTHROUGH:
929		cluster.LbPolicy = apiv2.Cluster_CLUSTER_PROVIDED
930		cluster.ClusterDiscoveryType = &apiv2.Cluster_Type{Type: apiv2.Cluster_ORIGINAL_DST}
931	}
932
933	consistentHash := lb.GetConsistentHash()
934	if consistentHash != nil {
935		// TODO MinimumRingSize is an int, and zero could potentially be a valid value
936		// unable to distinguish between set and unset case currently GregHanson
937		// 1024 is the default value for envoy
938		minRingSize := &wrappers.UInt64Value{Value: 1024}
939		if consistentHash.MinimumRingSize != 0 {
940			minRingSize = &wrappers.UInt64Value{Value: consistentHash.GetMinimumRingSize()}
941		}
942		cluster.LbPolicy = apiv2.Cluster_RING_HASH
943		cluster.LbConfig = &apiv2.Cluster_RingHashLbConfig_{
944			RingHashLbConfig: &apiv2.Cluster_RingHashLbConfig{
945				MinimumRingSize: minRingSize,
946			},
947		}
948	}
949}
950
951func applyLocalityLBSetting(
952	locality *core.Locality,
953	cluster *apiv2.Cluster,
954	localityLB *networking.LocalityLoadBalancerSetting,
955) {
956	if locality == nil || localityLB == nil {
957		return
958	}
959
960	// Failover should only be applied with outlier detection, or traffic will never failover.
961	enabledFailover := cluster.OutlierDetection != nil
962	if cluster.LoadAssignment != nil {
963		loadbalancer.ApplyLocalityLBSetting(locality, cluster.LoadAssignment, localityLB, enabledFailover)
964	}
965}
966
967func applyUpstreamTLSSettings(opts *buildClusterOpts, tls *networking.ClientTLSSettings, mtlsCtxType mtlsContextType, node *model.Proxy) {
968	if tls == nil {
969		return
970	}
971
972	cluster := opts.cluster
973	proxy := opts.proxy
974
975	certValidationContext := &auth.CertificateValidationContext{}
976	var trustedCa *core.DataSource
977	if len(tls.CaCertificates) != 0 {
978		trustedCa = &core.DataSource{
979			Specifier: &core.DataSource_Filename{
980				Filename: model.GetOrDefault(proxy.Metadata.TLSClientRootCert, tls.CaCertificates),
981			},
982		}
983	}
984	if trustedCa != nil || len(tls.SubjectAltNames) > 0 {
985		certValidationContext = &auth.CertificateValidationContext{
986			TrustedCa:            trustedCa,
987			MatchSubjectAltNames: util.StringToExactMatch(tls.SubjectAltNames),
988		}
989	}
990
991	tlsContext := &auth.UpstreamTlsContext{}
992	switch tls.Mode {
993	case networking.ClientTLSSettings_DISABLE:
994		tlsContext = nil
995	case networking.ClientTLSSettings_SIMPLE:
996		tlsContext = &auth.UpstreamTlsContext{
997			CommonTlsContext: &auth.CommonTlsContext{
998				ValidationContextType: &auth.CommonTlsContext_ValidationContext{
999					ValidationContext: certValidationContext,
1000				},
1001			},
1002			Sni: tls.Sni,
1003		}
1004		if cluster.Http2ProtocolOptions != nil {
1005			// This is HTTP/2 cluster, advertise it with ALPN.
1006			tlsContext.CommonTlsContext.AlpnProtocols = util.ALPNH2Only
1007		}
1008	case networking.ClientTLSSettings_MUTUAL, networking.ClientTLSSettings_ISTIO_MUTUAL:
1009		if tls.ClientCertificate == "" || tls.PrivateKey == "" {
1010			log.Errorf("failed to apply tls setting for %s: client certificate and private key must not be empty",
1011				cluster.Name)
1012			return
1013		}
1014
1015		tlsContext = &auth.UpstreamTlsContext{
1016			CommonTlsContext: &auth.CommonTlsContext{},
1017			Sni:              tls.Sni,
1018		}
1019
1020		// Fallback to file mount secret instead of SDS if meshConfig.sdsUdsPath isn't set or tls.mode is TLSSettings_MUTUAL.
1021		if !node.Metadata.SdsEnabled || opts.push.Mesh.SdsUdsPath == "" {
1022			tlsContext.CommonTlsContext.ValidationContextType = &auth.CommonTlsContext_ValidationContext{
1023				ValidationContext: certValidationContext,
1024			}
1025			tlsContext.CommonTlsContext.TlsCertificates = []*auth.TlsCertificate{
1026				{
1027					CertificateChain: &core.DataSource{
1028						Specifier: &core.DataSource_Filename{
1029							Filename: model.GetOrDefault(proxy.Metadata.TLSClientCertChain, tls.ClientCertificate),
1030						},
1031					},
1032					PrivateKey: &core.DataSource{
1033						Specifier: &core.DataSource_Filename{
1034							Filename: model.GetOrDefault(proxy.Metadata.TLSClientKey, tls.PrivateKey),
1035						},
1036					},
1037				},
1038			}
1039		} else if tls.Mode == networking.ClientTLSSettings_MUTUAL {
1040			// These are certs being mounted from within the pod. Rather than reading directly in Envoy,
1041			// which does not support rotation, we will serve them over SDS by reading the files.
1042			res := model.SdsCertificateConfig{
1043				CertificatePath:   model.GetOrDefault(proxy.Metadata.TLSClientCertChain, tls.ClientCertificate),
1044				PrivateKeyPath:    model.GetOrDefault(proxy.Metadata.TLSClientKey, tls.PrivateKey),
1045				CaCertificatePath: model.GetOrDefault(proxy.Metadata.TLSClientRootCert, tls.CaCertificates),
1046			}
1047			tlsContext.CommonTlsContext.TlsCertificateSdsSecretConfigs = append(tlsContext.CommonTlsContext.TlsCertificateSdsSecretConfigs,
1048				authn_model.ConstructSdsSecretConfig(res.GetResourceName(), opts.push.Mesh.SdsUdsPath))
1049
1050			tlsContext.CommonTlsContext.ValidationContextType = &auth.CommonTlsContext_CombinedValidationContext{
1051				CombinedValidationContext: &auth.CommonTlsContext_CombinedCertificateValidationContext{
1052					DefaultValidationContext:         &auth.CertificateValidationContext{MatchSubjectAltNames: util.StringToExactMatch(tls.SubjectAltNames)},
1053					ValidationContextSdsSecretConfig: authn_model.ConstructSdsSecretConfig(res.GetRootResourceName(), opts.push.Mesh.SdsUdsPath),
1054				},
1055			}
1056		} else {
1057			tlsContext.CommonTlsContext.TlsCertificateSdsSecretConfigs = append(tlsContext.CommonTlsContext.TlsCertificateSdsSecretConfigs,
1058				authn_model.ConstructSdsSecretConfig(authn_model.SDSDefaultResourceName, opts.push.Mesh.SdsUdsPath))
1059
1060			tlsContext.CommonTlsContext.ValidationContextType = &auth.CommonTlsContext_CombinedValidationContext{
1061				CombinedValidationContext: &auth.CommonTlsContext_CombinedCertificateValidationContext{
1062					DefaultValidationContext:         &auth.CertificateValidationContext{MatchSubjectAltNames: util.StringToExactMatch(tls.SubjectAltNames)},
1063					ValidationContextSdsSecretConfig: authn_model.ConstructSdsSecretConfig(authn_model.SDSRootResourceName, opts.push.Mesh.SdsUdsPath),
1064				},
1065			}
1066		}
1067
1068		// Set default SNI of cluster name for istio_mutual if sni is not set.
1069		if len(tls.Sni) == 0 && tls.Mode == networking.ClientTLSSettings_ISTIO_MUTUAL {
1070			tlsContext.Sni = cluster.Name
1071		}
1072
1073		// `istio-peer-exchange` alpn is only used when using mtls communication between peers.
1074		// We add `istio-peer-exchange` to the list of alpn strings.
1075		// The code has repeated snippets because We want to use predefined alpn strings for efficiency.
1076		if cluster.Http2ProtocolOptions != nil {
1077			// This is HTTP/2 in-mesh cluster, advertise it with ALPN.
1078			if tls.Mode == networking.ClientTLSSettings_ISTIO_MUTUAL {
1079				// Enable sending `istio-peer-exchange`	ALPN in ALPN list if TCP
1080				// metadataexchange is enabled.
1081				if util.IsTCPMetadataExchangeEnabled(node) {
1082					tlsContext.CommonTlsContext.AlpnProtocols = util.ALPNInMeshH2WithMxc
1083				} else {
1084					tlsContext.CommonTlsContext.AlpnProtocols = util.ALPNInMeshH2
1085				}
1086			} else {
1087				tlsContext.CommonTlsContext.AlpnProtocols = util.ALPNH2Only
1088			}
1089		} else if tls.Mode == networking.ClientTLSSettings_ISTIO_MUTUAL {
1090			// This is in-mesh cluster, advertise it with ALPN.
1091			// Also, Enable sending `istio-peer-exchange` ALPN in ALPN list if TCP
1092			// metadataexchange is enabled.
1093			if util.IsTCPMetadataExchangeEnabled(node) {
1094				tlsContext.CommonTlsContext.AlpnProtocols = util.ALPNInMeshWithMxc
1095			} else {
1096				tlsContext.CommonTlsContext.AlpnProtocols = util.ALPNInMesh
1097			}
1098		}
1099	}
1100
1101	if tlsContext != nil {
1102		cluster.TransportSocket = &core.TransportSocket{
1103			Name:       util.EnvoyTLSSocketName,
1104			ConfigType: &core.TransportSocket_TypedConfig{TypedConfig: util.MessageToAny(tlsContext)},
1105		}
1106	}
1107
1108	// For headless service, discover type will be `Cluster_ORIGINAL_DST`
1109	// Apply auto mtls to clusters excluding these kind of headless service
1110	if cluster.GetType() != apiv2.Cluster_ORIGINAL_DST {
1111		// convert to transport socket matcher if the mode was auto detected
1112		if tls.Mode == networking.ClientTLSSettings_ISTIO_MUTUAL && mtlsCtxType == autoDetected {
1113			transportSocket := cluster.TransportSocket
1114			cluster.TransportSocket = nil
1115			cluster.TransportSocketMatches = []*apiv2.Cluster_TransportSocketMatch{
1116				{
1117					Name: "tlsMode-" + model.IstioMutualTLSModeLabel,
1118					Match: &structpb.Struct{
1119						Fields: map[string]*structpb.Value{
1120							model.TLSModeLabelShortname: {Kind: &structpb.Value_StringValue{StringValue: model.IstioMutualTLSModeLabel}},
1121						},
1122					},
1123					TransportSocket: transportSocket,
1124				},
1125				defaultTransportSocketMatch,
1126			}
1127		} else {
1128			// Since previous calls to applyTrafficPolicy may have set TransportSocketMatches for a subset cluster
1129			// make sure they are reset.  See https://github.com/istio/istio/issues/23910
1130			cluster.TransportSocketMatches = nil
1131		}
1132	}
1133}
1134
1135func setUpstreamProtocol(node *model.Proxy, cluster *apiv2.Cluster, port *model.Port, direction model.TrafficDirection) {
1136	if port.Protocol.IsHTTP2() {
1137		setH2Options(cluster)
1138	}
1139
1140	// Add use_downstream_protocol for sidecar proxy only if protocol sniffing is enabled.
1141	// Since protocol detection is disabled for gateway and use_downstream_protocol is used
1142	// under protocol detection for cluster to select upstream connection protocol when
1143	// the service port is unnamed. use_downstream_protocol should be disabled for gateway.
1144	if node.Type == model.SidecarProxy && ((util.IsProtocolSniffingEnabledForInboundPort(port) && direction == model.TrafficDirectionInbound) ||
1145		(util.IsProtocolSniffingEnabledForOutboundPort(port) && direction == model.TrafficDirectionOutbound)) {
1146		// setup http2 protocol options for upstream connection.
1147		setH2Options(cluster)
1148
1149		// Use downstream protocol. If the incoming traffic use HTTP 1.1, the
1150		// upstream cluster will use HTTP 1.1, if incoming traffic use HTTP2,
1151		// the upstream cluster will use HTTP2.
1152		cluster.ProtocolSelection = apiv2.Cluster_USE_DOWNSTREAM_PROTOCOL
1153	}
1154}
1155