1############################
2# Licensed to the Apache Software Foundation (ASF) under one
3# or more contributor license agreements.  See the NOTICE file
4# distributed with this work for additional information
5# regarding copyright ownership.  The ASF licenses this file
6# to you under the Apache License, Version 2.0 (the
7# "License"); you may not use this file except in compliance
8# with the License.  You may obtain a copy of the License at
9#
10#   http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing,
13# software distributed under the License is distributed on an
14# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15# KIND, either express or implied.  See the License for the
16# specific language governing permissions and limitations
17# under the License.
18#
19import logging
20import os
21import pexpect
22import re
23import tempfile
24
25from uuid import uuid4
26try:
27    from collections.abc import Mapping
28except ImportError:
29    from collections import Mapping
30
31from six import iteritems, string_types
32
33from ansible_runner import defaults
34from ansible_runner.output import debug
35from ansible_runner.exceptions import ConfigurationError
36from ansible_runner.loader import ArtifactLoader
37from ansible_runner.utils import (
38    open_fifo_write,
39    args2cmdline,
40    sanitize_container_name,
41    cli_mounts
42)
43
44logger = logging.getLogger('ansible-runner')
45
46
47class BaseExecutionMode():
48    NONE = 0
49    # run ansible commands either locally or within EE
50    ANSIBLE_COMMANDS = 1
51    # execute generic commands
52    GENERIC_COMMANDS = 2
53
54
55class BaseConfig(object):
56
57    def __init__(self,
58                 private_data_dir=None, host_cwd=None, envvars=None, passwords=None, settings=None,
59                 project_dir=None, artifact_dir=None, fact_cache_type='jsonfile', fact_cache=None,
60                 process_isolation=False, process_isolation_executable=None,
61                 container_image=None, container_volume_mounts=None, container_options=None, container_workdir=None,
62                 ident=None, rotate_artifacts=0, timeout=None, ssh_key=None, quiet=False, json_mode=False, check_job_event_data=False):
63        # common params
64        self.host_cwd = host_cwd
65        self.envvars = envvars
66        self.ssh_key_data = ssh_key
67
68        # container params
69        self.process_isolation = process_isolation
70        self.process_isolation_executable = process_isolation_executable or defaults.default_process_isolation_executable
71        self.container_image = container_image or defaults.default_container_image
72        self.container_volume_mounts = container_volume_mounts
73        self.container_workdir = container_workdir
74        self.container_name = None  # like other properties, not accurate until prepare is called
75        self.container_options = container_options
76        self._volume_mount_paths = []
77
78        # runner params
79        self.private_data_dir = private_data_dir
80        self.rotate_artifacts = rotate_artifacts
81        self.quiet = quiet
82        self.json_mode = json_mode
83        self.passwords = passwords
84        self.settings = settings
85        self.timeout = timeout
86        self.check_job_event_data = check_job_event_data
87
88        # setup initial environment
89        if private_data_dir:
90            self.private_data_dir = os.path.abspath(private_data_dir)
91            # Note that os.makedirs, exist_ok=True is dangerous.  If there's a directory writable
92            # by someone other than the user anywhere in the path to be created, an attacker can
93            # attempt to compromise the directories via a race.
94            os.makedirs(self.private_data_dir, exist_ok=True, mode=0o700)
95        else:
96            self.private_data_dir = tempfile.mkdtemp(prefix=".ansible-runner-")
97
98        if artifact_dir is None:
99            artifact_dir = os.path.join(self.private_data_dir, 'artifacts')
100        else:
101            artifact_dir = os.path.abspath(artifact_dir)
102
103        if ident is None:
104            self.ident = str(uuid4())
105        else:
106            self.ident = ident
107
108        self.artifact_dir = os.path.join(artifact_dir, "{}".format(self.ident))
109
110        if not project_dir:
111            self.project_dir = os.path.join(self.private_data_dir, 'project')
112        else:
113            self.project_dir = project_dir
114
115        self.rotate_artifacts = rotate_artifacts
116        self.fact_cache_type = fact_cache_type
117        self.fact_cache = os.path.join(self.artifact_dir, fact_cache or 'fact_cache') if self.fact_cache_type == 'jsonfile' else None
118
119        self.loader = ArtifactLoader(self.private_data_dir)
120
121        if self.host_cwd:
122            self.host_cwd = os.path.abspath(self.host_cwd)
123            self.cwd = self.host_cwd
124        else:
125            self.cwd = os.getcwd()
126
127        os.makedirs(self.artifact_dir, exist_ok=True, mode=0o700)
128
129    _CONTAINER_ENGINES = ('docker', 'podman')
130
131    @property
132    def containerized(self):
133        return self.process_isolation and self.process_isolation_executable in self._CONTAINER_ENGINES
134
135    def _prepare_env(self, runner_mode='pexpect'):
136        """
137        Manages reading environment metadata files under ``private_data_dir`` and merging/updating
138        with existing values so the :py:class:`ansible_runner.runner.Runner` object can read and use them easily
139        """
140        self.runner_mode = runner_mode
141        try:
142            if self.settings and isinstance(self.settings, dict):
143                self.settings = self.settings.update(self.loader.load_file('env/settings', Mapping))
144            else:
145                self.settings = self.loader.load_file('env/settings', Mapping)
146        except ConfigurationError:
147            debug("Not loading settings")
148            self.settings = dict()
149
150        if self.runner_mode == 'pexpect':
151            try:
152                if self.passwords and isinstance(self.passwords, dict):
153                    self.passwords = self.passwords.update(self.loader.load_file('env/passwords', Mapping))
154                else:
155                    self.passwords = self.passwords or self.loader.load_file('env/passwords', Mapping)
156                self.expect_passwords = {
157                    re.compile(pattern, re.M): password
158                    for pattern, password in iteritems(self.passwords)
159                }
160            except ConfigurationError:
161                debug('Not loading passwords')
162                self.expect_passwords = dict()
163
164            self.expect_passwords[pexpect.TIMEOUT] = None
165            self.expect_passwords[pexpect.EOF] = None
166
167            self.pexpect_timeout = self.settings.get('pexpect_timeout', 5)
168            self.pexpect_use_poll = self.settings.get('pexpect_use_poll', True)
169            self.pexpect_timeout = self.settings.get('pexpect_timeout', 5)
170            self.pexpect_use_poll = self.settings.get('pexpect_use_poll', True)
171            self.idle_timeout = self.settings.get('idle_timeout', None)
172
173            if self.timeout:
174                self.job_timeout = int(self.timeout)
175            else:
176                self.job_timeout = self.settings.get('job_timeout', None)
177
178        elif self.runner_mode == 'subprocess':
179            if self.timeout:
180                self.subprocess_timeout = int(self.timeout)
181            else:
182                self.subprocess_timeout = self.settings.get('subprocess_timeout', None)
183
184        self.process_isolation = self.settings.get('process_isolation', self.process_isolation)
185        self.process_isolation_executable = self.settings.get('process_isolation_executable', self.process_isolation_executable)
186
187        self.container_image = self.settings.get('container_image', self.container_image)
188        self.container_volume_mounts = self.settings.get('container_volume_mounts', self.container_volume_mounts)
189        self.container_options = self.settings.get('container_options', self.container_options)
190
191        if self.containerized:
192            self.container_name = "ansible_runner_{}".format(sanitize_container_name(self.ident))
193            self.env = {}
194            # Special flags to convey info to entrypoint or process in container
195            self.env['LAUNCHED_BY_RUNNER'] = '1'
196
197            if self.process_isolation_executable == 'podman':
198                # A kernel bug in RHEL < 8.5 causes podman to use the fuse-overlayfs driver. This results in errors when
199                # trying to set extended file attributes. Setting this environment variable allows modules to take advantage
200                # of a fallback to work around this bug when failures are encountered.
201                #
202                # See the following for more information:
203                #    https://github.com/ansible/ansible/pull/73282
204                #    https://github.com/ansible/ansible/issues/73310
205                #    https://issues.redhat.com/browse/AAP-476
206                self.env['ANSIBLE_UNSAFE_WRITES'] = '1'
207
208            artifact_dir = os.path.join("/runner/artifacts", "{}".format(self.ident))
209            self.env['AWX_ISOLATED_DATA_DIR'] = artifact_dir
210            if self.fact_cache_type == 'jsonfile':
211                self.env['ANSIBLE_CACHE_PLUGIN_CONNECTION'] = os.path.join(artifact_dir, 'fact_cache')
212        else:
213            # seed env with existing shell env
214            self.env = os.environ.copy()
215
216        if self.envvars and isinstance(self.envvars, dict):
217            self.env.update(self.envvars)
218
219        try:
220            envvars = self.loader.load_file('env/envvars', Mapping)
221            if envvars:
222                self.env.update({str(k): str(v) for k, v in envvars.items()})
223        except ConfigurationError:
224            debug("Not loading environment vars")
225            # Still need to pass default environment to pexpect
226
227        try:
228            if self.ssh_key_data is None:
229                self.ssh_key_data = self.loader.load_file('env/ssh_key', string_types)
230        except ConfigurationError:
231            debug("Not loading ssh key")
232            self.ssh_key_data = None
233
234        # write the SSH key data into a fifo read by ssh-agent
235        if self.ssh_key_data:
236            self.ssh_key_path = os.path.join(self.artifact_dir, 'ssh_key_data')
237            open_fifo_write(self.ssh_key_path, self.ssh_key_data)
238
239        self.suppress_ansible_output = self.settings.get('suppress_ansible_output', self.quiet)
240
241        if 'fact_cache' in self.settings:
242            if 'fact_cache_type' in self.settings:
243                if self.settings['fact_cache_type'] == 'jsonfile':
244                    self.fact_cache = os.path.join(self.artifact_dir, self.settings['fact_cache'])
245            else:
246                self.fact_cache = os.path.join(self.artifact_dir, self.settings['fact_cache'])
247
248        # Use local callback directory
249        if not self.containerized:
250            callback_dir = self.env.get('AWX_LIB_DIRECTORY', os.getenv('AWX_LIB_DIRECTORY'))
251            if callback_dir is None:
252                callback_dir = os.path.join(os.path.split(os.path.abspath(__file__))[0], "..", "callbacks")
253            python_path = self.env.get('PYTHONPATH', os.getenv('PYTHONPATH', ''))
254            self.env['PYTHONPATH'] = ':'.join([python_path, callback_dir])
255            if python_path and not python_path.endswith(':'):
256                python_path += ':'
257            self.env['ANSIBLE_CALLBACK_PLUGINS'] = ':'.join(filter(None, (self.env.get('ANSIBLE_CALLBACK_PLUGINS'), callback_dir)))
258
259        if 'AD_HOC_COMMAND_ID' in self.env:
260            self.env['ANSIBLE_STDOUT_CALLBACK'] = 'minimal'
261        else:
262            self.env['ANSIBLE_STDOUT_CALLBACK'] = 'awx_display'
263        self.env['ANSIBLE_RETRY_FILES_ENABLED'] = 'False'
264        if 'ANSIBLE_HOST_KEY_CHECKING' not in self.env:
265            self.env['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
266        if not self.containerized:
267            self.env['AWX_ISOLATED_DATA_DIR'] = self.artifact_dir
268
269        if self.fact_cache_type == 'jsonfile':
270            self.env['ANSIBLE_CACHE_PLUGIN'] = 'jsonfile'
271            if not self.containerized:
272                self.env['ANSIBLE_CACHE_PLUGIN_CONNECTION'] = self.fact_cache
273
274        debug('env:')
275        for k, v in sorted(self.env.items()):
276            debug(f' {k}: {v}')
277
278    def _handle_command_wrap(self, execution_mode, cmdline_args):
279        if self.ssh_key_data:
280            logger.debug('ssh key data added')
281            self.command = self.wrap_args_with_ssh_agent(self.command, self.ssh_key_path)
282
283        if self.containerized:
284            logger.debug('containerization enabled')
285            self.command = self.wrap_args_for_containerization(self.command, execution_mode, cmdline_args)
286        else:
287            logger.debug('containerization disabled')
288
289        if hasattr(self, 'command') and isinstance(self.command, list):
290            logger.debug(f"command: {' '.join(self.command)}")
291
292    def _ensure_path_safe_to_mount(self, path):
293        if os.path.isfile(path):
294            path = os.path.dirname(path)
295        if os.path.join(path, "") in ('/', '/home/', '/usr/'):
296            raise ConfigurationError("When using containerized execution, cannot mount '/' or '/home' or '/usr'")
297
298    def _get_playbook_path(self, cmdline_args):
299        _playbook = ""
300        _book_keeping_copy = cmdline_args.copy()
301        for arg in cmdline_args:
302            if arg in ['-i', '--inventory', '--inventory-file']:
303                _book_keeping_copy_inventory_index = _book_keeping_copy.index(arg)
304                _book_keeping_copy.pop(_book_keeping_copy_inventory_index)
305                try:
306                    _book_keeping_copy.pop(_book_keeping_copy_inventory_index)
307                except IndexError:
308                    # invalid command, pass through for execution
309                    # to return correct error from ansible-core
310                    return None
311
312        if len(_book_keeping_copy) == 1:
313            # it's probably safe to assume this is the playbook
314            _playbook = _book_keeping_copy[0]
315        elif _book_keeping_copy[0][0] != '-':
316            # this should be the playbook, it's the only "naked" arg
317            _playbook = _book_keeping_copy[0]
318        else:
319            # parse everything beyond the first arg because we checked that
320            # in the previous case already
321            for arg in _book_keeping_copy[1:]:
322                if arg[0] == '-':
323                    continue
324                elif _book_keeping_copy[(_book_keeping_copy.index(arg) - 1)][0] != '-':
325                    _playbook = arg
326                    break
327
328        return _playbook
329
330    def _update_volume_mount_paths(
331        self, args_list, src_mount_path, dst_mount_path=None, labels=None
332    ):
333
334        if src_mount_path is None or not os.path.exists(src_mount_path):
335            logger.debug("Source volume mount path does not exit {0}".format(src_mount_path))
336            return
337
338        # ensure source is abs
339        src_path = os.path.abspath(os.path.expanduser(os.path.expandvars(src_mount_path)))
340
341        # set dest src (if None) relative to workdir(not absolute) or provided
342        if dst_mount_path is None:
343            dst_path = src_path
344        elif self.container_workdir and not os.path.isabs(dst_mount_path):
345            dst_path = os.path.abspath(
346                os.path.expanduser(
347                    os.path.expandvars(os.path.join(self.container_workdir, dst_mount_path))
348                )
349            )
350        else:
351            dst_path = os.path.abspath(os.path.expanduser(os.path.expandvars(dst_mount_path)))
352
353        # ensure each is a directory not file, use src for dest
354        # because dest doesn't exist locally
355        src_dir = src_path if os.path.isdir(src_path) else os.path.dirname(src_path)
356        dst_dir = dst_path if os.path.isdir(src_path) else os.path.dirname(dst_path)
357
358        # always ensure a trailing slash
359        src_dir = os.path.join(src_dir, "")
360        dst_dir = os.path.join(dst_dir, "")
361
362        # ensure the src and dest are safe mount points
363        # after stripping off the file and resolving
364        self._ensure_path_safe_to_mount(src_dir)
365        self._ensure_path_safe_to_mount(dst_dir)
366
367        # format the src dest str
368        volume_mount_path = "{}:{}".format(src_dir, dst_dir)
369
370        # add labels as needed
371        if labels:
372            if not labels.startswith(":"):
373                volume_mount_path += ":"
374            volume_mount_path += labels
375
376        # check if mount path already added in args list
377        if volume_mount_path not in args_list:
378            args_list.extend(["-v", volume_mount_path])
379
380    def _handle_ansible_cmd_options_bind_mounts(self, args_list, cmdline_args):
381        inventory_file_options = ['-i', '--inventory', '--inventory-file']
382        vault_file_options = ['--vault-password-file', '--vault-pass-file']
383        private_key_file_options = ['--private-key', '--key-file']
384
385        optional_mount_args = inventory_file_options + vault_file_options + private_key_file_options
386
387        if not cmdline_args:
388            return
389
390        if '-h' in cmdline_args or '--help' in cmdline_args:
391            return
392
393        for value in self.command:
394            if 'ansible-playbook' in value:
395                playbook_file_path = self._get_playbook_path(cmdline_args)
396                if playbook_file_path:
397                    self._update_volume_mount_paths(args_list, playbook_file_path)
398                    break
399
400        cmdline_args_copy = cmdline_args.copy()
401        optional_arg_paths = []
402        for arg in cmdline_args:
403
404            if arg not in optional_mount_args:
405                continue
406
407            optional_arg_index = cmdline_args_copy.index(arg)
408            optional_arg_paths.append(cmdline_args[optional_arg_index + 1])
409            cmdline_args_copy.pop(optional_arg_index)
410            try:
411                optional_arg_value = cmdline_args_copy.pop(optional_arg_index)
412            except IndexError:
413                # invalid command, pass through for execution
414                # to return valid error from ansible-core
415                return
416
417            if arg in inventory_file_options and optional_arg_value.endswith(','):
418                # comma separated host list provided as value
419                continue
420
421            self._update_volume_mount_paths(args_list, optional_arg_value)
422
423    def wrap_args_for_containerization(self, args, execution_mode, cmdline_args):
424        new_args = [self.process_isolation_executable]
425        new_args.extend(['run', '--rm'])
426
427        if self.runner_mode == 'pexpect' or hasattr(self, 'input_fd') and self.input_fd is not None:
428            new_args.extend(['--tty'])
429
430        new_args.append('--interactive')
431
432        if self.container_workdir:
433            workdir = self.container_workdir
434        elif self.host_cwd is not None and os.path.exists(self.host_cwd):
435            # mount current host working diretory if passed and exist
436            self._ensure_path_safe_to_mount(self.host_cwd)
437            self._update_volume_mount_paths(new_args, self.host_cwd)
438            workdir = self.host_cwd
439        else:
440            workdir = "/runner/project"
441
442        self.cwd = workdir
443        new_args.extend(["--workdir", workdir])
444
445        # For run() and run_async() API value of base execution_mode is 'BaseExecutionMode.NONE'
446        # and the container volume mounts are handled seperately using 'container_volume_mounts'
447        #  hence ignore additonal mount here
448        if execution_mode != BaseExecutionMode.NONE:
449            if execution_mode == BaseExecutionMode.ANSIBLE_COMMANDS:
450                self._handle_ansible_cmd_options_bind_mounts(new_args, cmdline_args)
451
452            # Handle automounts for .ssh config
453            self._handle_automounts(new_args)
454
455            if 'podman' in self.process_isolation_executable:
456                # container namespace stuff
457                new_args.extend(["--group-add=root"])
458                new_args.extend(["--ipc=host"])
459
460            self._ensure_path_safe_to_mount(self.private_data_dir)
461            # Relative paths are mounted relative to /runner/project
462            for subdir in ('project', 'artifacts'):
463                subdir_path = os.path.join(self.private_data_dir, subdir)
464                if not os.path.exists(subdir_path):
465                    os.mkdir(subdir_path, 0o700)
466
467            # runtime commands need artifacts mounted to output data
468            self._update_volume_mount_paths(new_args,
469                                            "{}/artifacts".format(self.private_data_dir),
470                                            dst_mount_path="/runner/artifacts",
471                                            labels=":Z")
472
473            # Mount the entire private_data_dir
474            # custom show paths inside private_data_dir do not make sense
475            self._update_volume_mount_paths(new_args,
476                                            "{}".format(self.private_data_dir),
477                                            dst_mount_path="/runner",
478                                            labels=":Z")
479        else:
480            subdir_path = os.path.join(self.private_data_dir, 'artifacts')
481            if not os.path.exists(subdir_path):
482                os.mkdir(subdir_path, 0o700)
483
484            # Mount the entire private_data_dir
485            # custom show paths inside private_data_dir do not make sense
486            self._update_volume_mount_paths(new_args, "{}".format(self.private_data_dir), dst_mount_path="/runner", labels=":Z")
487
488        if self.container_volume_mounts:
489            for mapping in self.container_volume_mounts:
490                volume_mounts = mapping.split(':', 2)
491                self._ensure_path_safe_to_mount(volume_mounts[0])
492                labels = None
493                if len(volume_mounts) == 3:
494                    labels = ":%s" % volume_mounts[2]
495                self._update_volume_mount_paths(new_args, volume_mounts[0], dst_mount_path=volume_mounts[1], labels=labels)
496
497        # Reference the file with list of keys to pass into container
498        # this file will be written in ansible_runner.runner
499        env_file_host = os.path.join(self.artifact_dir, 'env.list')
500        new_args.extend(['--env-file', env_file_host])
501
502        if 'podman' in self.process_isolation_executable:
503            # docker doesnt support this option
504            new_args.extend(['--quiet'])
505
506        if 'docker' in self.process_isolation_executable:
507            new_args.extend([f'--user={os.getuid()}'])
508
509        new_args.extend(['--name', self.container_name])
510
511        if self.container_options:
512            new_args.extend(self.container_options)
513
514        new_args.extend([self.container_image])
515        new_args.extend(args)
516        logger.debug(f"container engine invocation: {' '.join(new_args)}")
517        return new_args
518
519    def wrap_args_with_ssh_agent(self, args, ssh_key_path, ssh_auth_sock=None, silence_ssh_add=False):
520        """
521        Given an existing command line and parameterization this will return the same command line wrapped with the
522        necessary calls to ``ssh-agent``
523        """
524        if self.containerized:
525            artifact_dir = os.path.join("/runner/artifacts", "{}".format(self.ident))
526            ssh_key_path = os.path.join(artifact_dir, "ssh_key_data")
527
528        if ssh_key_path:
529            ssh_add_command = args2cmdline('ssh-add', ssh_key_path)
530            if silence_ssh_add:
531                ssh_add_command = ' '.join([ssh_add_command, '2>/dev/null'])
532            ssh_key_cleanup_command = 'rm -f {}'.format(ssh_key_path)
533            # The trap ensures the fifo is cleaned up even if the call to ssh-add fails.
534            # This prevents getting into certain scenarios where subsequent reads will
535            # hang forever.
536            cmd = ' && '.join([args2cmdline('trap', ssh_key_cleanup_command, 'EXIT'),
537                               ssh_add_command,
538                               ssh_key_cleanup_command,
539                               args2cmdline(*args)])
540            args = ['ssh-agent']
541            if ssh_auth_sock:
542                args.extend(['-a', ssh_auth_sock])
543            args.extend(['sh', '-c', cmd])
544        return args
545
546    def _handle_automounts(self, new_args):
547        for cli_automount in cli_mounts():
548            for env in cli_automount['ENVS']:
549                if env in os.environ:
550                    dest_path = os.environ[env]
551
552                    if os.path.exists(os.environ[env]):
553                        if os.environ[env].startswith(os.environ['HOME']):
554                            dest_path = '/home/runner/{}'.format(os.environ[env].lstrip(os.environ['HOME']))
555                        elif os.environ[env].startswith('~'):
556                            dest_path = '/home/runner/{}'.format(os.environ[env].lstrip('~/'))
557                        else:
558                            dest_path = os.environ[env]
559
560                        self._update_volume_mount_paths(new_args, os.environ[env], dst_mount_path=dest_path)
561
562                    new_args.extend(["-e", "{}={}".format(env, dest_path)])
563
564            for paths in cli_automount['PATHS']:
565                if os.path.exists(paths['src']):
566                    self._update_volume_mount_paths(new_args, paths['src'], dst_mount_path=paths['dest'])
567