1package kubernetes
2
3import (
4	"encoding/json"
5	"errors"
6	"fmt"
7	"net/http"
8	"path"
9	"strings"
10	"sync"
11	"time"
12
13	"github.com/docker/cli/cli/config/types"
14	"github.com/jpillora/backoff"
15	"golang.org/x/net/context"
16	api "k8s.io/api/core/v1"
17	kubeerrors "k8s.io/apimachinery/pkg/api/errors"
18	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
19	"k8s.io/apimachinery/pkg/util/intstr"
20	"k8s.io/client-go/kubernetes"
21	_ "k8s.io/client-go/plugin/pkg/client/auth" // Register all available authentication methods
22	restclient "k8s.io/client-go/rest"
23	"k8s.io/client-go/util/exec"
24
25	"gitlab.com/gitlab-org/gitlab-runner/common"
26	"gitlab.com/gitlab-org/gitlab-runner/executors"
27	"gitlab.com/gitlab-org/gitlab-runner/executors/kubernetes/internal/pull"
28	"gitlab.com/gitlab-org/gitlab-runner/helpers/container/helperimage"
29	"gitlab.com/gitlab-org/gitlab-runner/helpers/dns"
30	"gitlab.com/gitlab-org/gitlab-runner/helpers/docker/auth"
31	"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags"
32	"gitlab.com/gitlab-org/gitlab-runner/helpers/retry"
33	"gitlab.com/gitlab-org/gitlab-runner/session/proxy"
34	"gitlab.com/gitlab-org/gitlab-runner/shells"
35)
36
37const (
38	buildContainerName  = "build"
39	helperContainerName = "helper"
40
41	detectShellScriptName = "detect_shell_script"
42
43	// The `.ps1` extension is added to the script name to fix a strange behavior
44	// where stage scripts wouldn't be executed otherwise
45	parsePwshScriptName = "parse_pwsh_script.ps1"
46
47	waitLogFileTimeout = time.Minute
48
49	outputLogFileNotExistsExitCode = 100
50	unknownLogProcessorExitCode    = 1000
51)
52
53var (
54	executorOptions = executors.ExecutorOptions{
55		DefaultCustomBuildsDirEnabled: true,
56		DefaultBuildsDir:              "/builds",
57		DefaultCacheDir:               "/cache",
58		SharedBuildsDir:               true,
59		Shell: common.ShellScriptInfo{
60			Shell:         "bash",
61			Type:          common.NormalShell,
62			RunnerCommand: "/usr/bin/gitlab-runner-helper",
63		},
64		ShowHostname: true,
65	}
66
67	errIncorrectShellType = fmt.Errorf("kubernetes executor incorrect shell type")
68)
69
70// GetDefaultCapDrop returns the default capabilities that should be dropped
71// from a build container.
72func GetDefaultCapDrop() []string {
73	return []string{
74		// Reasons for disabling NET_RAW by default were
75		// discussed in https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26833
76		"NET_RAW",
77	}
78}
79
80type commandTerminatedError struct {
81	exitCode int
82}
83
84func (c *commandTerminatedError) Error() string {
85	return fmt.Sprintf("command terminated with exit code %d", c.exitCode)
86}
87
88func (c *commandTerminatedError) Is(err error) bool {
89	_, ok := err.(*commandTerminatedError)
90	return ok
91}
92
93type podPhaseError struct {
94	name  string
95	phase api.PodPhase
96}
97
98func (p *podPhaseError) Error() string {
99	return fmt.Sprintf("pod %q status is %q", p.name, p.phase)
100}
101
102type kubernetesOptions struct {
103	Image    common.Image
104	Services common.Services
105}
106
107type executor struct {
108	executors.AbstractExecutor
109
110	kubeClient  *kubernetes.Clientset
111	kubeConfig  *restclient.Config
112	pod         *api.Pod
113	configMap   *api.ConfigMap
114	credentials *api.Secret
115	options     *kubernetesOptions
116	services    []api.Service
117
118	configurationOverwrites *overwrites
119	pullManager             pull.Manager
120
121	helperImageInfo helperimage.Info
122
123	featureChecker featureChecker
124
125	newLogProcessor func() logProcessor
126
127	remoteProcessTerminated chan shells.TrapCommandExitStatus
128
129	// Flag if a repo mount and emptyDir volume are needed
130	requireDefaultBuildsDirVolume *bool
131}
132
133type serviceDeleteResponse struct {
134	serviceName string
135	err         error
136}
137
138type serviceCreateResponse struct {
139	service *api.Service
140	err     error
141}
142
143func (s *executor) Prepare(options common.ExecutorPrepareOptions) (err error) {
144	if err = s.AbstractExecutor.Prepare(options); err != nil {
145		return fmt.Errorf("prepare AbstractExecutor: %w", err)
146	}
147
148	if s.BuildShell.PassFile {
149		return fmt.Errorf("kubernetes doesn't support shells that require script file")
150	}
151
152	if err = s.prepareOverwrites(options.Build.GetAllVariables()); err != nil {
153		return fmt.Errorf("couldn't prepare overwrites: %w", err)
154	}
155
156	var pullPolicies []api.PullPolicy
157	if pullPolicies, err = s.Config.Kubernetes.GetPullPolicies(); err != nil {
158		return fmt.Errorf("couldn't get pull policy: %w", err)
159	}
160	s.pullManager = pull.NewPullManager(pullPolicies, &s.BuildLogger)
161
162	s.prepareOptions(options.Build)
163
164	if err = s.checkDefaults(); err != nil {
165		return fmt.Errorf("check defaults error: %w", err)
166	}
167
168	s.kubeConfig, err = getKubeClientConfig(s.Config.Kubernetes, s.configurationOverwrites)
169	if err != nil {
170		return fmt.Errorf("getting Kubernetes config: %w", err)
171	}
172
173	s.kubeClient, err = kubernetes.NewForConfig(s.kubeConfig)
174	if err != nil {
175		return fmt.Errorf("connecting to Kubernetes: %w", err)
176	}
177
178	s.helperImageInfo, err = s.prepareHelperImage()
179	if err != nil {
180		return fmt.Errorf("prepare helper image: %w", err)
181	}
182
183	s.featureChecker = &kubeClientFeatureChecker{kubeClient: s.kubeClient}
184
185	imageName := s.Build.GetAllVariables().ExpandValue(s.options.Image.Name)
186
187	s.Println("Using Kubernetes executor with image", imageName, "...")
188	if !s.Build.IsFeatureFlagOn(featureflags.UseLegacyKubernetesExecutionStrategy) {
189		s.Println("Using attach strategy to execute scripts...")
190	}
191
192	return nil
193}
194
195func (s *executor) prepareHelperImage() (helperimage.Info, error) {
196	return helperimage.Get(common.REVISION, helperimage.Config{
197		OSType:         helperimage.OSTypeLinux,
198		Architecture:   "amd64",
199		GitLabRegistry: s.Build.IsFeatureFlagOn(featureflags.GitLabRegistryHelperImage),
200		Shell:          s.Config.Shell,
201		Flavor:         s.Config.Kubernetes.HelperImageFlavor,
202	})
203}
204
205func (s *executor) Run(cmd common.ExecutorCommand) error {
206	for attempt := 1; ; attempt++ {
207		var err error
208
209		if s.Build.IsFeatureFlagOn(featureflags.UseLegacyKubernetesExecutionStrategy) {
210			s.Debugln("Starting Kubernetes command...")
211			err = s.runWithExecLegacy(cmd)
212		} else {
213			s.Debugln("Starting Kubernetes command with attach...")
214			err = s.runWithAttach(cmd)
215		}
216
217		var imagePullErr *pull.ImagePullError
218		if errors.As(err, &imagePullErr) {
219			if s.pullManager.UpdatePolicyForImage(attempt, imagePullErr) {
220				s.cleanupResources()
221				s.pod = nil
222				continue
223			}
224		}
225		return err
226	}
227}
228
229func (s *executor) runWithExecLegacy(cmd common.ExecutorCommand) error {
230	if s.pod == nil {
231		err := s.setupCredentials()
232		if err != nil {
233			return err
234		}
235
236		err = s.setupBuildPod(nil)
237		if err != nil {
238			return err
239		}
240	}
241
242	containerName := buildContainerName
243	containerCommand := s.BuildShell.DockerCommand
244	if cmd.Predefined {
245		containerName = helperContainerName
246		containerCommand = s.helperImageInfo.Cmd
247	}
248
249	ctx, cancel := context.WithCancel(context.Background())
250	defer cancel()
251
252	s.Debugln(fmt.Sprintf(
253		"Starting in container %q the command %q with script: %s",
254		containerName,
255		containerCommand,
256		cmd.Script,
257	))
258
259	select {
260	case err := <-s.runInContainerWithExecLegacy(ctx, containerName, containerCommand, cmd.Script):
261		s.Debugln(fmt.Sprintf("Container %q exited with error: %v", containerName, err))
262		var exitError exec.CodeExitError
263		if err != nil && errors.As(err, &exitError) {
264			return &common.BuildError{Inner: err, ExitCode: exitError.ExitStatus()}
265		}
266		return err
267
268	case <-cmd.Context.Done():
269		return fmt.Errorf("build aborted")
270	}
271}
272
273func (s *executor) runWithAttach(cmd common.ExecutorCommand) error {
274	err := s.ensurePodsConfigured(cmd.Context)
275	if err != nil {
276		return err
277	}
278
279	ctx, cancel := context.WithCancel(cmd.Context)
280	defer cancel()
281
282	containerName, containerCommand := s.getContainerInfo(cmd)
283
284	s.Debugln(fmt.Sprintf(
285		"Starting in container %q the command %q with script: %s",
286		containerName,
287		containerCommand,
288		cmd.Script,
289	))
290
291	podStatusCh := s.watchPodStatus(ctx)
292
293	select {
294	case err := <-s.runInContainer(containerName, containerCommand):
295		s.Debugln(fmt.Sprintf("Container %q exited with error: %v", containerName, err))
296		var terminatedError *commandTerminatedError
297		if err != nil && errors.As(err, &terminatedError) {
298			return &common.BuildError{Inner: err, ExitCode: terminatedError.exitCode}
299		}
300
301		return err
302	case err := <-podStatusCh:
303		if IsKubernetesPodNotFoundError(err) {
304			return err
305		}
306
307		return &common.BuildError{Inner: err}
308	case <-ctx.Done():
309		return fmt.Errorf("build aborted")
310	}
311}
312
313func (s *executor) ensurePodsConfigured(ctx context.Context) error {
314	if s.pod != nil {
315		return nil
316	}
317
318	err := s.setupCredentials()
319	if err != nil {
320		return fmt.Errorf("setting up credentials: %w", err)
321	}
322
323	err = s.setupScriptsConfigMap()
324	if err != nil {
325		return fmt.Errorf("setting up scripts configMap: %w", err)
326	}
327
328	permissionsInitContainer, err := s.buildLogPermissionsInitContainer()
329	if err != nil {
330		return fmt.Errorf("building log permissions init container: %w", err)
331	}
332	err = s.setupBuildPod([]api.Container{permissionsInitContainer})
333	if err != nil {
334		return fmt.Errorf("setting up build pod: %w", err)
335	}
336
337	status, err := waitForPodRunning(ctx, s.kubeClient, s.pod, s.Trace, s.Config.Kubernetes)
338	if err != nil {
339		return fmt.Errorf("waiting for pod running: %w", err)
340	}
341
342	if status != api.PodRunning {
343		return fmt.Errorf("pod failed to enter running state: %s", status)
344	}
345
346	go s.processLogs(ctx)
347
348	return nil
349}
350
351func (s *executor) getContainerInfo(cmd common.ExecutorCommand) (string, []string) {
352	var containerCommand []string
353	containerName := buildContainerName
354
355	switch s.Shell().Shell {
356	case shells.SNPwsh:
357		// Translates to roughly "/path/to/parse_pwsh_script.ps1 /path/to/stage_script /path/to/logFile"
358		containerCommand = []string{
359			s.scriptPath(parsePwshScriptName),
360			s.scriptPath(cmd.Stage),
361			s.logFile(),
362			s.buildRedirectionCmd(),
363		}
364		if cmd.Predefined {
365			containerName = helperContainerName
366			containerCommand = []string{fmt.Sprintf("Get-Content -Path %s | ", s.scriptPath(cmd.Stage))}
367			containerCommand = append(containerCommand, s.helperImageInfo.Cmd...)
368			containerCommand = append(containerCommand, s.buildRedirectionCmd())
369		}
370	default:
371		// Translates to roughly "sh /detect/shell/path.sh /stage/script/path.sh"
372		// which when the detect shell exits becomes something like "bash /stage/script/path.sh".
373		// This works unlike "gitlab-runner-build" since the detect shell passes arguments with "$@"
374		containerCommand = []string{
375			"sh",
376			s.scriptPath(detectShellScriptName),
377			s.scriptPath(cmd.Stage),
378			s.buildRedirectionCmd(),
379		}
380		if cmd.Predefined {
381			containerName = helperContainerName
382			// We use redirection here since the "gitlab-runner-build" helper doesn't pass input args
383			// to the shell it executes, so we technically pass the script to the stdin of the underlying shell
384			// translates roughly to "gitlab-runner-build <<< /stage/script/path.sh"
385			containerCommand = append(
386				s.helperImageInfo.Cmd,
387				"<<<",
388				s.scriptPath(cmd.Stage),
389				s.buildRedirectionCmd(),
390			)
391		}
392	}
393
394	return containerName, containerCommand
395}
396
397func (s *executor) buildLogPermissionsInitContainer() (api.Container, error) {
398	// We need to create the log file in which all scripts will append their output.
399	// The log file is created with the current user. There are 3 different scenarios for the user:
400	// 1. The user in all images and containers is root, in that case the chmod is redundant since they
401	// will all have permissions to the file.
402	// 2. The user of the helper image is root, however the build image's user is not root.
403	// In that case we need to allow the build user to write to the log file from inside the
404	// build container. That's where the chmod comes into play.
405	// 3. No user is root but all containers have the same user ID. In that case create the file.
406	// It will have the same user and group owner across all containers. This is the case for Kubernetes
407	// where the PodSecurityContext is set manually or for Openshift where each pod has a different user ID.
408	// *4. We don't allow setting different user IDs across containers, if that ever becomes the case
409	// we might need to try and chown the log file for the group only.
410	logFile := s.logFile()
411	chmod := fmt.Sprintf("touch %s && (chmod 777 %s || exit 0)", logFile, logFile)
412
413	pullPolicy, err := s.pullManager.GetPullPolicyFor(s.getHelperImage())
414	if err != nil {
415		return api.Container{}, fmt.Errorf("getting pull policy for log permissions init container: %w", err)
416	}
417
418	return api.Container{
419		Name:            "init-logs",
420		Image:           s.getHelperImage(),
421		Command:         []string{"sh", "-c", chmod},
422		VolumeMounts:    s.getVolumeMounts(),
423		ImagePullPolicy: pullPolicy,
424	}, nil
425}
426
427func (s *executor) buildRedirectionCmd() string {
428	return fmt.Sprintf("2>&1 | tee -a %s", s.logFile())
429}
430
431func (s *executor) processLogs(ctx context.Context) {
432	processor := s.newLogProcessor()
433	logsCh, errCh := processor.Process(ctx)
434
435	for {
436		select {
437		case line, ok := <-logsCh:
438			if !ok {
439				return
440			}
441			var status shells.TrapCommandExitStatus
442			if status.TryUnmarshal(line) {
443				s.remoteProcessTerminated <- status
444				continue
445			}
446
447			_, err := s.Trace.Write(append([]byte(line), '\n'))
448			if err != nil {
449				s.Warningln(fmt.Sprintf("Error writing log line to trace: %v", err))
450			}
451		case err, ok := <-errCh:
452			if !ok {
453				continue
454			}
455
456			exitCode := getExitCode(err)
457			s.Warningln(fmt.Sprintf("%v", err))
458			// Script can be kept to nil as not being used after the exitStatus is received L1223
459			s.remoteProcessTerminated <- shells.TrapCommandExitStatus{CommandExitCode: &exitCode}
460		}
461	}
462}
463
464// getExitCode tries to extract the exit code from an inner exec.CodeExitError
465// This error may be returned by the underlying kubernetes connection stream
466// however it's not guaranteed to be.
467// getExitCode would return unknownLogProcessorExitCode if err isn't of type exec.CodeExitError
468// or if it's nil
469func getExitCode(err error) int {
470	var exitErr exec.CodeExitError
471	if errors.As(err, &exitErr) {
472		return exitErr.Code
473	}
474	return unknownLogProcessorExitCode
475}
476
477func (s *executor) setupScriptsConfigMap() error {
478	s.Debugln("Setting up scripts config map")
479
480	// After issue https://gitlab.com/gitlab-org/gitlab-runner/issues/10342 is resolved and
481	// the legacy execution mode is removed we can remove the manual construction of trapShell and just use "bash+trap"
482	// in the exec options
483	shell, err := s.retrieveShell()
484	if err != nil {
485		return err
486	}
487
488	scripts, err := s.generateScripts(shell)
489	if err != nil {
490		return err
491	}
492
493	configMap := &api.ConfigMap{
494		ObjectMeta: metav1.ObjectMeta{
495			GenerateName: fmt.Sprintf("%s-scripts", s.Build.ProjectUniqueName()),
496			Namespace:    s.configurationOverwrites.namespace,
497		},
498		Data: scripts,
499	}
500
501	// TODO: handle the context properly with https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27932
502	s.configMap, err = s.kubeClient.
503		CoreV1().
504		ConfigMaps(s.configurationOverwrites.namespace).
505		Create(context.TODO(), configMap, metav1.CreateOptions{})
506	if err != nil {
507		return fmt.Errorf("generating scripts config map: %w", err)
508	}
509
510	return nil
511}
512
513func (s *executor) retrieveShell() (common.Shell, error) {
514	bashShell, ok := common.GetShell(s.Shell().Shell).(*shells.BashShell)
515	if ok {
516		return &shells.BashTrapShell{BashShell: bashShell, LogFile: s.logFile()}, nil
517	}
518
519	pwshShell, ok := common.GetShell(s.Shell().Shell).(*shells.PowerShell)
520	if ok {
521		return &shells.PwshTrapShell{PowerShell: pwshShell, LogFile: s.logFile()}, nil
522	}
523
524	return nil, errIncorrectShellType
525}
526
527func (s *executor) generateScripts(shell common.Shell) (map[string]string, error) {
528	scripts := map[string]string{}
529	switch s.Shell().Shell {
530	case shells.SNPwsh:
531		scripts[parsePwshScriptName] = shells.PwshValidationScript
532	default:
533		scripts[detectShellScriptName] = shells.BashDetectShellScript
534	}
535
536	for _, stage := range s.Build.BuildStages() {
537		script, err := shell.GenerateScript(stage, *s.Shell())
538		if errors.Is(err, common.ErrSkipBuildStage) {
539			continue
540		} else if err != nil {
541			return nil, fmt.Errorf("generating trap shell script: %w", err)
542		}
543
544		scripts[string(stage)] = script
545	}
546
547	return scripts, nil
548}
549
550func (s *executor) Finish(err error) {
551	if IsKubernetesPodNotFoundError(err) {
552		// Avoid an additional error message when trying to
553		// cleanup a pod that we know no longer exists
554		s.pod = nil
555	}
556
557	s.AbstractExecutor.Finish(err)
558}
559
560func (s *executor) Cleanup() {
561	s.cleanupResources()
562	closeKubeClient(s.kubeClient)
563	s.AbstractExecutor.Cleanup()
564}
565
566func (s *executor) cleanupServices() {
567	ch := make(chan serviceDeleteResponse)
568	var wg sync.WaitGroup
569	wg.Add(len(s.services))
570
571	for _, service := range s.services {
572		go s.deleteKubernetesService(service.ObjectMeta.Name, ch, &wg)
573	}
574
575	go func() {
576		wg.Wait()
577		close(ch)
578	}()
579
580	for res := range ch {
581		if res.err != nil {
582			s.Errorln(fmt.Sprintf("Error cleaning up the pod service %q: %v", res.serviceName, res.err))
583		}
584	}
585}
586
587func (s *executor) deleteKubernetesService(serviceName string, ch chan<- serviceDeleteResponse, wg *sync.WaitGroup) {
588	defer wg.Done()
589
590	// TODO: handle the context properly with https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27932
591	err := s.kubeClient.CoreV1().
592		Services(s.configurationOverwrites.namespace).
593		Delete(context.TODO(), serviceName, metav1.DeleteOptions{})
594	ch <- serviceDeleteResponse{serviceName: serviceName, err: err}
595}
596
597func (s *executor) cleanupResources() {
598	if s.pod != nil {
599		// TODO: handle the context properly with https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27932
600		err := s.kubeClient.
601			CoreV1().
602			Pods(s.pod.Namespace).
603			Delete(context.TODO(), s.pod.Name, metav1.DeleteOptions{})
604		if err != nil {
605			s.Errorln(fmt.Sprintf("Error cleaning up pod: %s", err.Error()))
606		}
607	}
608	if s.credentials != nil {
609		// TODO: handle the context properly with https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27932
610		err := s.kubeClient.CoreV1().
611			Secrets(s.configurationOverwrites.namespace).
612			Delete(context.TODO(), s.credentials.Name, metav1.DeleteOptions{})
613		if err != nil {
614			s.Errorln(fmt.Sprintf("Error cleaning up secrets: %s", err.Error()))
615		}
616	}
617	if s.configMap != nil {
618		// TODO: handle the context properly with https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27932
619		err := s.kubeClient.CoreV1().
620			ConfigMaps(s.configurationOverwrites.namespace).
621			Delete(context.TODO(), s.configMap.Name, metav1.DeleteOptions{})
622		if err != nil {
623			s.Errorln(fmt.Sprintf("Error cleaning up configmap: %s", err.Error()))
624		}
625	}
626
627	s.cleanupServices()
628}
629
630//nolint:funlen
631func (s *executor) buildContainer(
632	name, image string,
633	imageDefinition common.Image,
634	requests, limits api.ResourceList,
635	containerCommand ...string,
636) (api.Container, error) {
637	privileged := false
638	var allowPrivilegeEscalation *bool
639	containerPorts := make([]api.ContainerPort, len(imageDefinition.Ports))
640	proxyPorts := make([]proxy.Port, len(imageDefinition.Ports))
641
642	for i, port := range imageDefinition.Ports {
643		proxyPorts[i] = proxy.Port{Name: port.Name, Number: port.Number, Protocol: port.Protocol}
644		containerPorts[i] = api.ContainerPort{ContainerPort: int32(port.Number)}
645	}
646
647	if len(proxyPorts) > 0 {
648		serviceName := imageDefinition.Alias
649
650		if serviceName == "" {
651			serviceName = name
652			if name != buildContainerName {
653				serviceName = fmt.Sprintf("proxy-%s", name)
654			}
655		}
656
657		s.ProxyPool[serviceName] = s.newProxy(serviceName, proxyPorts)
658	}
659
660	if s.Config.Kubernetes != nil {
661		privileged = s.Config.Kubernetes.Privileged
662		allowPrivilegeEscalation = s.Config.Kubernetes.AllowPrivilegeEscalation
663	}
664
665	pullPolicy, err := s.pullManager.GetPullPolicyFor(image)
666	if err != nil {
667		return api.Container{}, err
668	}
669
670	command, args := s.getCommandAndArgs(imageDefinition, containerCommand...)
671
672	return api.Container{
673			Name:            name,
674			Image:           image,
675			ImagePullPolicy: pullPolicy,
676			Command:         command,
677			Args:            args,
678			Env:             buildVariables(s.Build.GetAllVariables().PublicOrInternal()),
679			Resources: api.ResourceRequirements{
680				Limits:   limits,
681				Requests: requests,
682			},
683			Ports:        containerPorts,
684			VolumeMounts: s.getVolumeMounts(),
685			SecurityContext: &api.SecurityContext{
686				Privileged:               &privileged,
687				AllowPrivilegeEscalation: allowPrivilegeEscalation,
688				Capabilities: getCapabilities(
689					GetDefaultCapDrop(),
690					s.Config.Kubernetes.CapAdd,
691					s.Config.Kubernetes.CapDrop,
692				),
693			},
694			Stdin: true,
695		},
696		nil
697}
698
699func (s *executor) getCommandAndArgs(imageDefinition common.Image, command ...string) ([]string, []string) {
700	if len(command) == 0 && len(imageDefinition.Entrypoint) > 0 {
701		command = imageDefinition.Entrypoint
702	}
703
704	var args []string
705	if len(imageDefinition.Command) > 0 {
706		args = imageDefinition.Command
707	}
708
709	return command, args
710}
711
712func (s *executor) logFile() string {
713	return path.Join(s.logsDir(), "output.log")
714}
715
716func (s *executor) logsDir() string {
717	return fmt.Sprintf("/logs-%d-%d", s.Build.JobInfo.ProjectID, s.Build.JobResponse.ID)
718}
719
720func (s *executor) scriptsDir() string {
721	return fmt.Sprintf("/scripts-%d-%d", s.Build.JobInfo.ProjectID, s.Build.JobResponse.ID)
722}
723
724func (s *executor) scriptPath(stage common.BuildStage) string {
725	return path.Join(s.scriptsDir(), string(stage))
726}
727
728func (s *executor) getVolumeMounts() []api.VolumeMount {
729	var mounts []api.VolumeMount
730
731	// The configMap is nil when using legacy execution
732	if s.configMap != nil {
733		// These volume mounts **MUST NOT** be mounted inside another volume mount.
734		// E.g. mounting them inside the "repo" volume mount will cause the whole volume
735		// to be owned by root instead of the current user of the image. Something similar
736		// is explained here https://github.com/kubernetes/kubernetes/issues/2630#issuecomment-64679120
737		// where the first container determines the ownership of a volume. However, it seems like
738		// when mounting a volume inside another volume the first container or the first point of contact
739		// becomes root, regardless of SecurityContext or Image settings changing the user ID of the container.
740		// This causes builds to stop working in environments such as OpenShift where there's no root access
741		// resulting in an inability to modify anything inside the parent volume.
742		mounts = append(
743			mounts,
744			api.VolumeMount{
745				Name:      "scripts",
746				MountPath: s.scriptsDir(),
747			},
748			api.VolumeMount{
749				Name:      "logs",
750				MountPath: s.logsDir(),
751			})
752	}
753
754	mounts = append(mounts, s.getVolumeMountsForConfig()...)
755
756	if s.isDefaultBuildsDirVolumeRequired() {
757		mounts = append(mounts, api.VolumeMount{
758			Name:      "repo",
759			MountPath: s.Build.RootDir,
760		})
761	}
762
763	return mounts
764}
765
766func (s *executor) getVolumeMountsForConfig() []api.VolumeMount {
767	var mounts []api.VolumeMount
768
769	for _, mount := range s.Config.Kubernetes.Volumes.HostPaths {
770		mounts = append(mounts, api.VolumeMount{
771			Name:      mount.Name,
772			MountPath: mount.MountPath,
773			SubPath:   mount.SubPath,
774			ReadOnly:  mount.ReadOnly,
775		})
776	}
777
778	for _, mount := range s.Config.Kubernetes.Volumes.Secrets {
779		mounts = append(mounts, api.VolumeMount{
780			Name:      mount.Name,
781			MountPath: mount.MountPath,
782			SubPath:   mount.SubPath,
783			ReadOnly:  mount.ReadOnly,
784		})
785	}
786
787	for _, mount := range s.Config.Kubernetes.Volumes.PVCs {
788		mounts = append(mounts, api.VolumeMount{
789			Name:      mount.Name,
790			MountPath: mount.MountPath,
791			SubPath:   mount.SubPath,
792			ReadOnly:  mount.ReadOnly,
793		})
794	}
795
796	for _, mount := range s.Config.Kubernetes.Volumes.ConfigMaps {
797		mounts = append(mounts, api.VolumeMount{
798			Name:      mount.Name,
799			MountPath: mount.MountPath,
800			SubPath:   mount.SubPath,
801			ReadOnly:  mount.ReadOnly,
802		})
803	}
804
805	for _, mount := range s.Config.Kubernetes.Volumes.EmptyDirs {
806		mounts = append(mounts, api.VolumeMount{
807			Name:      mount.Name,
808			MountPath: mount.MountPath,
809			SubPath:   mount.SubPath,
810		})
811	}
812
813	for _, mount := range s.Config.Kubernetes.Volumes.CSIs {
814		mounts = append(mounts, api.VolumeMount{
815			Name:      mount.Name,
816			MountPath: mount.MountPath,
817			SubPath:   mount.SubPath,
818			ReadOnly:  mount.ReadOnly,
819		})
820	}
821
822	return mounts
823}
824
825func (s *executor) getVolumes() []api.Volume {
826	volumes := s.getVolumesForConfig()
827
828	if s.isDefaultBuildsDirVolumeRequired() {
829		volumes = append(volumes, api.Volume{
830			Name: "repo",
831			VolumeSource: api.VolumeSource{
832				EmptyDir: &api.EmptyDirVolumeSource{},
833			},
834		})
835	}
836
837	// The configMap is nil when using legacy execution
838	if s.configMap == nil {
839		return volumes
840	}
841
842	mode := int32(0777)
843	optional := false
844	volumes = append(
845		volumes,
846		api.Volume{
847			Name: "scripts",
848			VolumeSource: api.VolumeSource{
849				ConfigMap: &api.ConfigMapVolumeSource{
850					LocalObjectReference: api.LocalObjectReference{
851						Name: s.configMap.Name,
852					},
853					DefaultMode: &mode,
854					Optional:    &optional,
855				},
856			},
857		},
858		api.Volume{
859			Name: "logs",
860			VolumeSource: api.VolumeSource{
861				EmptyDir: &api.EmptyDirVolumeSource{},
862			},
863		})
864
865	return volumes
866}
867
868func (s *executor) getVolumesForConfig() []api.Volume {
869	var volumes []api.Volume
870
871	volumes = append(volumes, s.getVolumesForHostPaths()...)
872	volumes = append(volumes, s.getVolumesForSecrets()...)
873	volumes = append(volumes, s.getVolumesForPVCs()...)
874	volumes = append(volumes, s.getVolumesForConfigMaps()...)
875	volumes = append(volumes, s.getVolumesForEmptyDirs()...)
876	volumes = append(volumes, s.getVolumesForCSIs()...)
877
878	return volumes
879}
880
881func (s *executor) getVolumesForHostPaths() []api.Volume {
882	var volumes []api.Volume
883
884	for _, volume := range s.Config.Kubernetes.Volumes.HostPaths {
885		path := volume.HostPath
886		// Make backward compatible with syntax introduced in version 9.3.0
887		if path == "" {
888			path = volume.MountPath
889		}
890
891		volumes = append(volumes, api.Volume{
892			Name: volume.Name,
893			VolumeSource: api.VolumeSource{
894				HostPath: &api.HostPathVolumeSource{
895					Path: path,
896				},
897			},
898		})
899	}
900
901	return volumes
902}
903
904func (s *executor) getVolumesForSecrets() []api.Volume {
905	var volumes []api.Volume
906
907	for _, volume := range s.Config.Kubernetes.Volumes.Secrets {
908		var items []api.KeyToPath
909		for key, path := range volume.Items {
910			items = append(items, api.KeyToPath{Key: key, Path: path})
911		}
912
913		volumes = append(volumes, api.Volume{
914			Name: volume.Name,
915			VolumeSource: api.VolumeSource{
916				Secret: &api.SecretVolumeSource{
917					SecretName: volume.Name,
918					Items:      items,
919				},
920			},
921		})
922	}
923
924	return volumes
925}
926
927func (s *executor) getVolumesForPVCs() []api.Volume {
928	var volumes []api.Volume
929
930	for _, volume := range s.Config.Kubernetes.Volumes.PVCs {
931		volumes = append(volumes, api.Volume{
932			Name: volume.Name,
933			VolumeSource: api.VolumeSource{
934				PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{
935					ClaimName: volume.Name,
936					ReadOnly:  volume.ReadOnly,
937				},
938			},
939		})
940	}
941
942	return volumes
943}
944
945func (s *executor) getVolumesForConfigMaps() []api.Volume {
946	var volumes []api.Volume
947
948	for _, volume := range s.Config.Kubernetes.Volumes.ConfigMaps {
949		var items []api.KeyToPath
950		for key, path := range volume.Items {
951			items = append(items, api.KeyToPath{Key: key, Path: path})
952		}
953
954		volumes = append(volumes, api.Volume{
955			Name: volume.Name,
956			VolumeSource: api.VolumeSource{
957				ConfigMap: &api.ConfigMapVolumeSource{
958					LocalObjectReference: api.LocalObjectReference{
959						Name: volume.Name,
960					},
961					Items: items,
962				},
963			},
964		})
965	}
966
967	return volumes
968}
969
970func (s *executor) getVolumesForEmptyDirs() []api.Volume {
971	var volumes []api.Volume
972
973	for _, volume := range s.Config.Kubernetes.Volumes.EmptyDirs {
974		volumes = append(volumes, api.Volume{
975			Name: volume.Name,
976			VolumeSource: api.VolumeSource{
977				EmptyDir: &api.EmptyDirVolumeSource{
978					Medium: api.StorageMedium(volume.Medium),
979				},
980			},
981		})
982	}
983	return volumes
984}
985
986func (s *executor) getVolumesForCSIs() []api.Volume {
987	var volumes []api.Volume
988
989	for _, volume := range s.Config.Kubernetes.Volumes.CSIs {
990		volumes = append(volumes, api.Volume{
991			Name: volume.Name,
992			VolumeSource: api.VolumeSource{
993				CSI: &api.CSIVolumeSource{
994					Driver:           volume.Driver,
995					FSType:           &volume.FSType,
996					ReadOnly:         &volume.ReadOnly,
997					VolumeAttributes: volume.VolumeAttributes,
998				},
999			},
1000		})
1001	}
1002	return volumes
1003}
1004
1005func (s *executor) isDefaultBuildsDirVolumeRequired() bool {
1006	if s.requireDefaultBuildsDirVolume != nil {
1007		return *s.requireDefaultBuildsDirVolume
1008	}
1009
1010	var required = true
1011	for _, mount := range s.getVolumeMountsForConfig() {
1012		if mount.MountPath == s.Build.RootDir {
1013			required = false
1014			break
1015		}
1016	}
1017
1018	s.requireDefaultBuildsDirVolume = &required
1019
1020	return required
1021}
1022
1023func (s *executor) setupCredentials() error {
1024	s.Debugln("Setting up secrets")
1025
1026	authConfigs, err := auth.ResolveConfigs(s.Build.GetDockerAuthConfig(), s.Shell().User, s.Build.Credentials)
1027	if err != nil {
1028		return err
1029	}
1030
1031	if len(authConfigs) == 0 {
1032		return nil
1033	}
1034
1035	dockerCfgs := make(map[string]types.AuthConfig)
1036	for registry, registryInfo := range authConfigs {
1037		dockerCfgs[registry] = registryInfo.AuthConfig
1038	}
1039
1040	dockerCfgContent, err := json.Marshal(dockerCfgs)
1041	if err != nil {
1042		return err
1043	}
1044
1045	secret := api.Secret{}
1046	secret.GenerateName = s.Build.ProjectUniqueName()
1047	secret.Namespace = s.configurationOverwrites.namespace
1048	secret.Type = api.SecretTypeDockercfg
1049	secret.Data = map[string][]byte{}
1050	secret.Data[api.DockerConfigKey] = dockerCfgContent
1051
1052	// TODO: handle the context properly with https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27932
1053	creds, err := s.kubeClient.
1054		CoreV1().
1055		Secrets(s.configurationOverwrites.namespace).
1056		Create(context.TODO(), &secret, metav1.CreateOptions{})
1057	if err != nil {
1058		return err
1059	}
1060
1061	s.credentials = creds
1062	return nil
1063}
1064
1065func (s *executor) getHostAliases() ([]api.HostAlias, error) {
1066	supportsHostAliases, err := s.featureChecker.IsHostAliasSupported()
1067	switch {
1068	case errors.Is(err, &badVersionError{}):
1069		s.Warningln("Checking for host alias support. Host aliases will be disabled.", err)
1070		return nil, nil
1071	case err != nil:
1072		return nil, err
1073	case !supportsHostAliases:
1074		return nil, nil
1075	}
1076
1077	return createHostAliases(s.options.Services, s.Config.Kubernetes.GetHostAliases())
1078}
1079
1080//nolint:funlen
1081func (s *executor) setupBuildPod(initContainers []api.Container) error {
1082	s.Debugln("Setting up build pod")
1083
1084	podServices := make([]api.Container, len(s.options.Services))
1085
1086	for i, service := range s.options.Services {
1087		resolvedImage := s.Build.GetAllVariables().ExpandValue(service.Name)
1088		var err error
1089		podServices[i], err = s.buildContainer(
1090			fmt.Sprintf("svc-%d", i),
1091			resolvedImage,
1092			service,
1093			s.configurationOverwrites.serviceRequests,
1094			s.configurationOverwrites.serviceLimits,
1095		)
1096		if err != nil {
1097			return err
1098		}
1099	}
1100
1101	// We set a default label to the pod. This label will be used later
1102	// by the services, to link each service to the pod
1103	labels := map[string]string{"pod": s.Build.ProjectUniqueName()}
1104	for k, v := range s.Build.Runner.Kubernetes.PodLabels {
1105		labels[k] = s.Build.Variables.ExpandValue(v)
1106	}
1107
1108	annotations := make(map[string]string)
1109	for key, val := range s.configurationOverwrites.podAnnotations {
1110		annotations[key] = s.Build.Variables.ExpandValue(val)
1111	}
1112
1113	var imagePullSecrets []api.LocalObjectReference
1114	for _, imagePullSecret := range s.Config.Kubernetes.ImagePullSecrets {
1115		imagePullSecrets = append(imagePullSecrets, api.LocalObjectReference{Name: imagePullSecret})
1116	}
1117
1118	if s.credentials != nil {
1119		imagePullSecrets = append(imagePullSecrets, api.LocalObjectReference{Name: s.credentials.Name})
1120	}
1121
1122	hostAliases, err := s.getHostAliases()
1123	if err != nil {
1124		return err
1125	}
1126
1127	podConfig, err :=
1128		s.preparePodConfig(labels, annotations, podServices, imagePullSecrets, hostAliases, initContainers)
1129	if err != nil {
1130		return err
1131	}
1132
1133	s.Debugln("Creating build pod")
1134
1135	// TODO: handle the context properly with https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27932
1136	pod, err := s.kubeClient.
1137		CoreV1().
1138		Pods(s.configurationOverwrites.namespace).
1139		Create(context.TODO(), &podConfig, metav1.CreateOptions{})
1140	if err != nil {
1141		return err
1142	}
1143
1144	s.pod = pod
1145	s.services, err = s.makePodProxyServices()
1146	if err != nil {
1147		return err
1148	}
1149
1150	return nil
1151}
1152
1153//nolint:funlen
1154func (s *executor) preparePodConfig(
1155	labels, annotations map[string]string,
1156	services []api.Container,
1157	imagePullSecrets []api.LocalObjectReference,
1158	hostAliases []api.HostAlias,
1159	initContainers []api.Container,
1160) (api.Pod, error) {
1161	buildImage := s.Build.GetAllVariables().ExpandValue(s.options.Image.Name)
1162
1163	buildContainer, err := s.buildContainer(
1164		buildContainerName,
1165		buildImage,
1166		s.options.Image,
1167		s.configurationOverwrites.buildRequests,
1168		s.configurationOverwrites.buildLimits,
1169		s.BuildShell.DockerCommand...,
1170	)
1171	if err != nil {
1172		return api.Pod{}, fmt.Errorf("building build container: %w", err)
1173	}
1174
1175	helperContainer, err := s.buildContainer(
1176		helperContainerName,
1177		s.getHelperImage(),
1178		common.Image{},
1179		s.configurationOverwrites.helperRequests,
1180		s.configurationOverwrites.helperLimits,
1181		s.BuildShell.DockerCommand...,
1182	)
1183	if err != nil {
1184		return api.Pod{}, fmt.Errorf("building helper container: %w", err)
1185	}
1186
1187	pod := api.Pod{
1188		ObjectMeta: metav1.ObjectMeta{
1189			GenerateName: s.Build.ProjectUniqueName(),
1190			Namespace:    s.configurationOverwrites.namespace,
1191			Labels:       labels,
1192			Annotations:  annotations,
1193		},
1194		Spec: api.PodSpec{
1195			Volumes:            s.getVolumes(),
1196			ServiceAccountName: s.configurationOverwrites.serviceAccount,
1197			RestartPolicy:      api.RestartPolicyNever,
1198			NodeSelector:       s.Config.Kubernetes.NodeSelector,
1199			Tolerations:        s.Config.Kubernetes.GetNodeTolerations(),
1200			InitContainers:     initContainers,
1201			Containers: append([]api.Container{
1202				buildContainer,
1203				helperContainer,
1204			}, services...),
1205			TerminationGracePeriodSeconds: &s.Config.Kubernetes.TerminationGracePeriodSeconds,
1206			ImagePullSecrets:              imagePullSecrets,
1207			SecurityContext:               s.Config.Kubernetes.GetPodSecurityContext(),
1208			HostAliases:                   hostAliases,
1209			Affinity:                      s.Config.Kubernetes.GetAffinity(),
1210			DNSPolicy:                     s.getDNSPolicy(),
1211			DNSConfig:                     s.Config.Kubernetes.GetDNSConfig(),
1212		},
1213	}
1214
1215	return pod, nil
1216}
1217
1218func (s *executor) getDNSPolicy() api.DNSPolicy {
1219	dnsPolicy, err := s.Config.Kubernetes.DNSPolicy.Get()
1220	if err != nil {
1221		s.Warningln(fmt.Sprintf("falling back to cluster's default policy: %v", err))
1222	}
1223	return dnsPolicy
1224}
1225
1226func (s *executor) getHelperImage() string {
1227	if len(s.Config.Kubernetes.HelperImage) > 0 {
1228		return common.AppVersion.Variables().ExpandValue(s.Config.Kubernetes.HelperImage)
1229	}
1230
1231	if !s.Build.IsFeatureFlagOn(featureflags.GitLabRegistryHelperImage) {
1232		s.Warningln(helperimage.DockerHubWarningMessage)
1233	}
1234
1235	return s.helperImageInfo.String()
1236}
1237
1238func (s *executor) makePodProxyServices() ([]api.Service, error) {
1239	s.Debugln("Creating pod proxy services")
1240
1241	ch := make(chan serviceCreateResponse)
1242	var wg sync.WaitGroup
1243	wg.Add(len(s.ProxyPool))
1244
1245	for serviceName, serviceProxy := range s.ProxyPool {
1246		serviceName = dns.MakeRFC1123Compatible(serviceName)
1247		servicePorts := make([]api.ServicePort, len(serviceProxy.Settings.Ports))
1248		for i, port := range serviceProxy.Settings.Ports {
1249			// When there is more than one port Kubernetes requires a port name
1250			portName := fmt.Sprintf("%s-%d", serviceName, port.Number)
1251			servicePorts[i] = api.ServicePort{
1252				Port:       int32(port.Number),
1253				TargetPort: intstr.FromInt(port.Number),
1254				Name:       portName,
1255			}
1256		}
1257
1258		serviceConfig := s.prepareServiceConfig(serviceName, servicePorts)
1259		go s.createKubernetesService(&serviceConfig, serviceProxy.Settings, ch, &wg)
1260	}
1261
1262	go func() {
1263		wg.Wait()
1264		close(ch)
1265	}()
1266
1267	var proxyServices []api.Service
1268	for res := range ch {
1269		if res.err != nil {
1270			err := fmt.Errorf("error creating the proxy service %q: %w", res.service.Name, res.err)
1271			s.Errorln(err)
1272
1273			return []api.Service{}, err
1274		}
1275
1276		proxyServices = append(proxyServices, *res.service)
1277	}
1278
1279	return proxyServices, nil
1280}
1281
1282func (s *executor) prepareServiceConfig(name string, ports []api.ServicePort) api.Service {
1283	return api.Service{
1284		ObjectMeta: metav1.ObjectMeta{
1285			GenerateName: name,
1286			Namespace:    s.configurationOverwrites.namespace,
1287		},
1288		Spec: api.ServiceSpec{
1289			Ports:    ports,
1290			Selector: map[string]string{"pod": s.Build.ProjectUniqueName()},
1291			Type:     api.ServiceTypeClusterIP,
1292		},
1293	}
1294}
1295
1296func (s *executor) createKubernetesService(
1297	service *api.Service,
1298	proxySettings *proxy.Settings,
1299	ch chan<- serviceCreateResponse,
1300	wg *sync.WaitGroup,
1301) {
1302	defer wg.Done()
1303
1304	// TODO: handle the context properly with https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27932
1305	service, err := s.kubeClient.
1306		CoreV1().
1307		Services(s.pod.Namespace).
1308		Create(context.TODO(), service, metav1.CreateOptions{})
1309	if err == nil {
1310		// Updating the internal service name reference and activating the proxy
1311		proxySettings.ServiceName = service.Name
1312	}
1313
1314	ch <- serviceCreateResponse{service: service, err: err}
1315}
1316
1317func (s *executor) watchPodStatus(ctx context.Context) <-chan error {
1318	// Buffer of 1 in case the context is cancelled while the timer tick case is being executed
1319	// and the consumer is no longer reading from the channel while we try to write to it
1320	ch := make(chan error, 1)
1321
1322	go func() {
1323		defer close(ch)
1324
1325		t := time.NewTicker(time.Duration(s.Config.Kubernetes.GetPollInterval()) * time.Second)
1326		defer t.Stop()
1327
1328		for {
1329			select {
1330			case <-ctx.Done():
1331				return
1332			case <-t.C:
1333				err := s.checkPodStatus()
1334				if err != nil {
1335					ch <- err
1336					return
1337				}
1338			}
1339		}
1340	}()
1341
1342	return ch
1343}
1344
1345func (s *executor) checkPodStatus() error {
1346	// TODO: handle the context properly with https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27932
1347	pod, err := s.kubeClient.
1348		CoreV1().
1349		Pods(s.pod.Namespace).
1350		Get(context.TODO(), s.pod.Name, metav1.GetOptions{})
1351	if IsKubernetesPodNotFoundError(err) {
1352		return err
1353	}
1354
1355	if err != nil {
1356		// General request failure
1357		s.Warningln("Getting job pod status", err)
1358		return nil
1359	}
1360
1361	if pod.Status.Phase != api.PodRunning {
1362		return &podPhaseError{
1363			name:  s.pod.Name,
1364			phase: pod.Status.Phase,
1365		}
1366	}
1367
1368	return nil
1369}
1370
1371func (s *executor) runInContainer(name string, command []string) <-chan error {
1372	errCh := make(chan error, 1)
1373	go func() {
1374		defer close(errCh)
1375
1376		attach := AttachOptions{
1377			PodName:       s.pod.Name,
1378			Namespace:     s.pod.Namespace,
1379			ContainerName: name,
1380			Command:       command,
1381
1382			Config:   s.kubeConfig,
1383			Client:   s.kubeClient,
1384			Executor: &DefaultRemoteExecutor{},
1385		}
1386
1387		retryable := retry.New(retry.WithBuildLog(&attach, &s.BuildLogger))
1388		err := retryable.Run()
1389		if err != nil {
1390			errCh <- err
1391		}
1392
1393		exitStatus := <-s.remoteProcessTerminated
1394		if *exitStatus.CommandExitCode == 0 {
1395			errCh <- nil
1396			return
1397		}
1398
1399		errCh <- &commandTerminatedError{exitCode: *exitStatus.CommandExitCode}
1400	}()
1401
1402	return errCh
1403}
1404
1405func (s *executor) runInContainerWithExecLegacy(
1406	ctx context.Context,
1407	name string,
1408	command []string,
1409	script string,
1410) <-chan error {
1411	errCh := make(chan error, 1)
1412	go func() {
1413		defer close(errCh)
1414
1415		status, err := waitForPodRunning(ctx, s.kubeClient, s.pod, s.Trace, s.Config.Kubernetes)
1416		if err != nil {
1417			errCh <- err
1418			return
1419		}
1420
1421		if status != api.PodRunning {
1422			errCh <- fmt.Errorf("pod failed to enter running state: %s", status)
1423			return
1424		}
1425
1426		exec := ExecOptions{
1427			PodName:       s.pod.Name,
1428			Namespace:     s.pod.Namespace,
1429			ContainerName: name,
1430			Command:       command,
1431			In:            strings.NewReader(script),
1432			Out:           s.Trace,
1433			Err:           s.Trace,
1434			Stdin:         true,
1435			Config:        s.kubeConfig,
1436			Client:        s.kubeClient,
1437			Executor:      &DefaultRemoteExecutor{},
1438		}
1439
1440		retryable := retry.New(retry.WithBuildLog(&exec, &s.BuildLogger))
1441		errCh <- retryable.Run()
1442	}()
1443
1444	return errCh
1445}
1446
1447func (s *executor) prepareOverwrites(variables common.JobVariables) error {
1448	values, err := createOverwrites(s.Config.Kubernetes, variables, s.BuildLogger)
1449	if err != nil {
1450		return err
1451	}
1452
1453	s.configurationOverwrites = values
1454	return nil
1455}
1456
1457func (s *executor) prepareOptions(build *common.Build) {
1458	s.options = &kubernetesOptions{}
1459	s.options.Image = build.Image
1460
1461	s.getServices(build)
1462}
1463
1464func (s *executor) getServices(build *common.Build) {
1465	for _, service := range s.Config.Kubernetes.Services {
1466		if service.Name == "" {
1467			continue
1468		}
1469		s.options.Services = append(s.options.Services, service.ToImageDefinition())
1470	}
1471
1472	for _, service := range build.Services {
1473		if service.Name == "" {
1474			continue
1475		}
1476		s.options.Services = append(s.options.Services, service)
1477	}
1478}
1479
1480// checkDefaults Defines the configuration for the Pod on Kubernetes
1481func (s *executor) checkDefaults() error {
1482	if s.options.Image.Name == "" {
1483		if s.Config.Kubernetes.Image == "" {
1484			return fmt.Errorf("no image specified and no default set in config")
1485		}
1486
1487		s.options.Image = common.Image{
1488			Name: s.Config.Kubernetes.Image,
1489		}
1490	}
1491
1492	if s.configurationOverwrites.namespace == "" {
1493		s.Warningln("Namespace is empty, therefore assuming 'default'.")
1494		s.configurationOverwrites.namespace = "default"
1495	}
1496
1497	s.Println("Using Kubernetes namespace:", s.configurationOverwrites.namespace)
1498
1499	return nil
1500}
1501
1502func IsKubernetesPodNotFoundError(err error) bool {
1503	var statusErr *kubeerrors.StatusError
1504	return errors.As(err, &statusErr) &&
1505		statusErr.ErrStatus.Code == http.StatusNotFound &&
1506		statusErr.ErrStatus.Details != nil &&
1507		statusErr.ErrStatus.Details.Kind == "pods"
1508}
1509
1510func newExecutor() *executor {
1511	e := &executor{
1512		AbstractExecutor: executors.AbstractExecutor{
1513			ExecutorOptions: executorOptions,
1514		},
1515		remoteProcessTerminated: make(chan shells.TrapCommandExitStatus),
1516	}
1517
1518	e.newLogProcessor = func() logProcessor {
1519		return newKubernetesLogProcessor(
1520			e.kubeClient,
1521			e.kubeConfig,
1522			&backoff.Backoff{Min: time.Second, Max: 30 * time.Second},
1523			e.Build.Log(),
1524			kubernetesLogProcessorPodConfig{
1525				namespace:          e.pod.Namespace,
1526				pod:                e.pod.Name,
1527				container:          helperContainerName,
1528				logPath:            e.logFile(),
1529				waitLogFileTimeout: waitLogFileTimeout,
1530			},
1531		)
1532	}
1533
1534	return e
1535}
1536
1537func featuresFn(features *common.FeaturesInfo) {
1538	features.Variables = true
1539	features.Image = true
1540	features.Services = true
1541	features.Artifacts = true
1542	features.Cache = true
1543	features.Session = true
1544	features.Terminal = true
1545	features.Proxy = true
1546}
1547
1548func init() {
1549	common.RegisterExecutorProvider("kubernetes", executors.DefaultExecutorProvider{
1550		Creator: func() common.Executor {
1551			return newExecutor()
1552		},
1553		FeaturesUpdater:  featuresFn,
1554		DefaultShellName: executorOptions.Shell.Shell,
1555	})
1556}
1557