1/*
2Copyright 2017 The Kubernetes Authors.
3
4Licensed under the Apache License, Version 2.0 (the "License");
5you may not use this file except in compliance with the License.
6You may obtain a copy of the License at
7
8    http://www.apache.org/licenses/LICENSE-2.0
9
10Unless required by applicable law or agreed to in writing, software
11distributed under the License is distributed on an "AS IS" BASIS,
12WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13See the License for the specific language governing permissions and
14limitations under the License.
15*/
16
17package upgrade
18
19import (
20	"context"
21	"os"
22
23	kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
24	kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
25	"k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns"
26	"k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/proxy"
27	"k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo"
28	nodebootstraptoken "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node"
29	kubeletphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet"
30	patchnodephase "k8s.io/kubernetes/cmd/kubeadm/app/phases/patchnode"
31	"k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig"
32	"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
33	dryrunutil "k8s.io/kubernetes/cmd/kubeadm/app/util/dryrun"
34
35	v1 "k8s.io/api/core/v1"
36	apierrors "k8s.io/apimachinery/pkg/api/errors"
37	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
38	"k8s.io/apimachinery/pkg/labels"
39	errorsutil "k8s.io/apimachinery/pkg/util/errors"
40	clientset "k8s.io/client-go/kubernetes"
41	"k8s.io/klog/v2"
42
43	"github.com/pkg/errors"
44)
45
46// PerformPostUpgradeTasks runs nearly the same functions as 'kubeadm init' would do
47// Note that the mark-control-plane phase is left out, not needed, and no token is created as that doesn't belong to the upgrade
48func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.InitConfiguration, dryRun bool) error {
49	errs := []error{}
50
51	// Upload currently used configuration to the cluster
52	// Note: This is done right in the beginning of cluster initialization; as we might want to make other phases
53	// depend on centralized information from this source in the future
54	if err := uploadconfig.UploadConfiguration(cfg, client); err != nil {
55		errs = append(errs, err)
56	}
57
58	// Create the new, version-branched kubelet ComponentConfig ConfigMap
59	if err := kubeletphase.CreateConfigMap(&cfg.ClusterConfiguration, client); err != nil {
60		errs = append(errs, errors.Wrap(err, "error creating kubelet configuration ConfigMap"))
61	}
62
63	// Write the new kubelet config down to disk and the env file if needed
64	if err := writeKubeletConfigFiles(client, cfg, dryRun); err != nil {
65		errs = append(errs, err)
66	}
67
68	// Annotate the node with the crisocket information, sourced either from the InitConfiguration struct or
69	// --cri-socket.
70	// TODO: In the future we want to use something more official like NodeStatus or similar for detecting this properly
71	if err := patchnodephase.AnnotateCRISocket(client, cfg.NodeRegistration.Name, cfg.NodeRegistration.CRISocket); err != nil {
72		errs = append(errs, errors.Wrap(err, "error uploading crisocket"))
73	}
74
75	// Create RBAC rules that makes the bootstrap tokens able to get nodes
76	if err := nodebootstraptoken.AllowBoostrapTokensToGetNodes(client); err != nil {
77		errs = append(errs, err)
78	}
79
80	// Create/update RBAC rules that makes the bootstrap tokens able to post CSRs
81	if err := nodebootstraptoken.AllowBootstrapTokensToPostCSRs(client); err != nil {
82		errs = append(errs, err)
83	}
84
85	// Create/update RBAC rules that makes the bootstrap tokens able to get their CSRs approved automatically
86	if err := nodebootstraptoken.AutoApproveNodeBootstrapTokens(client); err != nil {
87		errs = append(errs, err)
88	}
89
90	// Create/update RBAC rules that makes the nodes to rotate certificates and get their CSRs approved automatically
91	if err := nodebootstraptoken.AutoApproveNodeCertificateRotation(client); err != nil {
92		errs = append(errs, err)
93	}
94
95	// TODO: Is this needed to do here? I think that updating cluster info should probably be separate from a normal upgrade
96	// Create the cluster-info ConfigMap with the associated RBAC rules
97	// if err := clusterinfo.CreateBootstrapConfigMapIfNotExists(client, kubeadmconstants.GetAdminKubeConfigPath()); err != nil {
98	// 	return err
99	//}
100	// Create/update RBAC rules that makes the cluster-info ConfigMap reachable
101	if err := clusterinfo.CreateClusterInfoRBACRules(client); err != nil {
102		errs = append(errs, err)
103	}
104
105	// If the coredns ConfigMap is missing, show a warning and assume that the
106	// DNS addon was skipped during "kubeadm init", and that its redeployment on upgrade is not desired.
107	//
108	// TODO: remove this once "kubeadm upgrade apply" phases are supported:
109	//   https://github.com/kubernetes/kubeadm/issues/1318
110	var missingCoreDNSConfigMap bool
111	if _, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(
112		context.TODO(),
113		kubeadmconstants.CoreDNSConfigMap,
114		metav1.GetOptions{},
115	); err != nil && apierrors.IsNotFound(err) {
116		missingCoreDNSConfigMap = true
117	}
118	if missingCoreDNSConfigMap {
119		klog.Warningf("the ConfigMaps %q in the namespace %q were not found. "+
120			"Assuming that a DNS server was not deployed for this cluster. "+
121			"Note that once 'kubeadm upgrade apply' supports phases you "+
122			"will have to skip the DNS upgrade manually",
123			kubeadmconstants.CoreDNSConfigMap,
124			metav1.NamespaceSystem)
125	} else {
126		// Upgrade CoreDNS
127		if err := dns.EnsureDNSAddon(&cfg.ClusterConfiguration, client); err != nil {
128			errs = append(errs, err)
129		}
130	}
131
132	// If the kube-proxy ConfigMap is missing, show a warning and assume that kube-proxy
133	// was skipped during "kubeadm init", and that its redeployment on upgrade is not desired.
134	//
135	// TODO: remove this once "kubeadm upgrade apply" phases are supported:
136	//   https://github.com/kubernetes/kubeadm/issues/1318
137	if _, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(
138		context.TODO(),
139		kubeadmconstants.KubeProxyConfigMap,
140		metav1.GetOptions{},
141	); err != nil && apierrors.IsNotFound(err) {
142		klog.Warningf("the ConfigMap %q in the namespace %q was not found. "+
143			"Assuming that kube-proxy was not deployed for this cluster. "+
144			"Note that once 'kubeadm upgrade apply' supports phases you "+
145			"will have to skip the kube-proxy upgrade manually",
146			kubeadmconstants.KubeProxyConfigMap,
147			metav1.NamespaceSystem)
148	} else {
149		// Upgrade kube-proxy
150		if err := proxy.EnsureProxyAddon(&cfg.ClusterConfiguration, &cfg.LocalAPIEndpoint, client); err != nil {
151			errs = append(errs, err)
152		}
153	}
154
155	return errorsutil.NewAggregate(errs)
156}
157
158func writeKubeletConfigFiles(client clientset.Interface, cfg *kubeadmapi.InitConfiguration, dryRun bool) error {
159	kubeletDir, err := GetKubeletDir(dryRun)
160	if err != nil {
161		// The error here should never occur in reality, would only be thrown if /tmp doesn't exist on the machine.
162		return err
163	}
164	errs := []error{}
165	// Write the configuration for the kubelet down to disk so the upgraded kubelet can start with fresh config
166	if err := kubeletphase.WriteConfigToDisk(&cfg.ClusterConfiguration, kubeletDir); err != nil {
167		errs = append(errs, errors.Wrap(err, "error writing kubelet configuration to file"))
168	}
169
170	if dryRun { // Print what contents would be written
171		dryrunutil.PrintDryRunFile(kubeadmconstants.KubeletConfigurationFileName, kubeletDir, kubeadmconstants.KubeletRunDirectory, os.Stdout)
172	}
173	return errorsutil.NewAggregate(errs)
174}
175
176// GetKubeletDir gets the kubelet directory based on whether the user is dry-running this command or not.
177func GetKubeletDir(dryRun bool) (string, error) {
178	if dryRun {
179		return kubeadmconstants.CreateTempDirForKubeadm("", "kubeadm-upgrade-dryrun")
180	}
181	return kubeadmconstants.KubeletRunDirectory, nil
182}
183
184// moveFiles moves files from one directory to another.
185func moveFiles(files map[string]string) error {
186	filesToRecover := map[string]string{}
187	for from, to := range files {
188		if err := os.Rename(from, to); err != nil {
189			return rollbackFiles(filesToRecover, err)
190		}
191		filesToRecover[to] = from
192	}
193	return nil
194}
195
196// rollbackFiles moves the files back to the original directory.
197func rollbackFiles(files map[string]string, originalErr error) error {
198	errs := []error{originalErr}
199	for from, to := range files {
200		if err := os.Rename(from, to); err != nil {
201			errs = append(errs, err)
202		}
203	}
204	return errors.Errorf("couldn't move these files: %v. Got errors: %v", files, errorsutil.NewAggregate(errs))
205}
206
207// LabelOldControlPlaneNodes finds all nodes with the legacy node-role label and also applies
208// the "control-plane" node-role label to them.
209// TODO: https://github.com/kubernetes/kubeadm/issues/2200
210func LabelOldControlPlaneNodes(client clientset.Interface) error {
211	selectorOldControlPlane := labels.SelectorFromSet(labels.Set(map[string]string{
212		kubeadmconstants.LabelNodeRoleOldControlPlane: "",
213	}))
214	nodesWithOldLabel, err := client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{
215		LabelSelector: selectorOldControlPlane.String(),
216	})
217	if err != nil {
218		return errors.Wrapf(err, "could not list nodes labeled with %q", kubeadmconstants.LabelNodeRoleOldControlPlane)
219	}
220
221	for _, n := range nodesWithOldLabel.Items {
222		if _, hasNewLabel := n.ObjectMeta.Labels[kubeadmconstants.LabelNodeRoleControlPlane]; hasNewLabel {
223			continue
224		}
225		err = apiclient.PatchNode(client, n.Name, func(n *v1.Node) {
226			n.ObjectMeta.Labels[kubeadmconstants.LabelNodeRoleControlPlane] = ""
227		})
228		if err != nil {
229			return err
230		}
231	}
232	return nil
233}
234