1"""Execute Ansible tests."""
2from __future__ import (absolute_import, division, print_function)
3__metaclass__ = type
4
5import atexit
6import json
7import os
8import datetime
9import re
10import time
11import textwrap
12import functools
13import hashlib
14import difflib
15import filecmp
16import random
17import string
18import shutil
19
20from . import types as t
21
22from .thread import (
23    WrappedThread,
24)
25
26from .core_ci import (
27    AnsibleCoreCI,
28    SshKey,
29)
30
31from .manage_ci import (
32    ManageWindowsCI,
33    ManageNetworkCI,
34)
35
36from .cloud import (
37    cloud_filter,
38    cloud_init,
39    get_cloud_environment,
40    get_cloud_platforms,
41    CloudEnvironmentConfig,
42)
43
44from .io import (
45    make_dirs,
46    open_text_file,
47    read_binary_file,
48    read_text_file,
49    write_text_file,
50)
51
52from .util import (
53    ApplicationWarning,
54    ApplicationError,
55    SubprocessError,
56    display,
57    remove_tree,
58    find_executable,
59    raw_command,
60    get_available_port,
61    generate_pip_command,
62    find_python,
63    cmd_quote,
64    ANSIBLE_LIB_ROOT,
65    ANSIBLE_TEST_DATA_ROOT,
66    ANSIBLE_TEST_CONFIG_ROOT,
67    get_ansible_version,
68    tempdir,
69    open_zipfile,
70    SUPPORTED_PYTHON_VERSIONS,
71    str_to_version,
72    version_to_str,
73)
74
75from .util_common import (
76    get_docker_completion,
77    get_network_settings,
78    get_remote_completion,
79    get_python_path,
80    intercept_command,
81    named_temporary_file,
82    run_command,
83    write_json_test_results,
84    ResultType,
85    handle_layout_messages,
86    CommonConfig,
87)
88
89from .docker_util import (
90    docker_pull,
91    docker_run,
92    docker_available,
93    docker_rm,
94    get_docker_container_id,
95    get_docker_container_ip,
96    get_docker_hostname,
97    get_docker_preferred_network_name,
98    is_docker_user_defined_network,
99)
100
101from .ansible_util import (
102    ansible_environment,
103    check_pyyaml,
104)
105
106from .target import (
107    IntegrationTarget,
108    walk_internal_targets,
109    walk_posix_integration_targets,
110    walk_network_integration_targets,
111    walk_windows_integration_targets,
112    TIntegrationTarget,
113)
114
115from .ci import (
116    get_ci_provider,
117)
118
119from .classification import (
120    categorize_changes,
121)
122
123from .config import (
124    TestConfig,
125    EnvironmentConfig,
126    IntegrationConfig,
127    NetworkIntegrationConfig,
128    PosixIntegrationConfig,
129    ShellConfig,
130    WindowsIntegrationConfig,
131    TIntegrationConfig,
132    UnitsConfig,
133    SanityConfig,
134)
135
136from .metadata import (
137    ChangeDescription,
138)
139
140from .integration import (
141    integration_test_environment,
142    integration_test_config_file,
143    setup_common_temp_dir,
144    get_inventory_relative_path,
145    check_inventory,
146    delegate_inventory,
147)
148
149from .data import (
150    data_context,
151)
152
153from .http import (
154    urlparse,
155)
156
157HTTPTESTER_HOSTS = (
158    'ansible.http.tests',
159    'sni1.ansible.http.tests',
160    'fail.ansible.http.tests',
161)
162
163
164def check_startup():
165    """Checks to perform at startup before running commands."""
166    check_legacy_modules()
167
168
169def check_legacy_modules():
170    """Detect conflicts with legacy core/extras module directories to avoid problems later."""
171    for directory in 'core', 'extras':
172        path = 'lib/ansible/modules/%s' % directory
173
174        for root, _dir_names, file_names in os.walk(path):
175            if file_names:
176                # the directory shouldn't exist, but if it does, it must contain no files
177                raise ApplicationError('Files prohibited in "%s". '
178                                       'These are most likely legacy modules from version 2.2 or earlier.' % root)
179
180
181def create_shell_command(command):
182    """
183    :type command: list[str]
184    :rtype: list[str]
185    """
186    optional_vars = (
187        'TERM',
188    )
189
190    cmd = ['/usr/bin/env']
191    cmd += ['%s=%s' % (var, os.environ[var]) for var in optional_vars if var in os.environ]
192    cmd += command
193
194    return cmd
195
196
197def get_openssl_version(args, python, python_version):  # type: (EnvironmentConfig, str, str) -> t.Optional[t.Tuple[int, ...]]
198    """Return the openssl version."""
199    if not python_version.startswith('2.'):
200        # OpenSSL version checking only works on Python 3.x.
201        # This should be the most accurate, since it is the Python we will be using.
202        version = json.loads(run_command(args, [python, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'sslcheck.py')], capture=True, always=True)[0])['version']
203
204        if version:
205            display.info('Detected OpenSSL version %s under Python %s.' % (version_to_str(version), python_version), verbosity=1)
206
207            return tuple(version)
208
209    # Fall back to detecting the OpenSSL version from the CLI.
210    # This should provide an adequate solution on Python 2.x.
211    openssl_path = find_executable('openssl', required=False)
212
213    if openssl_path:
214        try:
215            result = raw_command([openssl_path, 'version'], capture=True)[0]
216        except SubprocessError:
217            result = ''
218
219        match = re.search(r'^OpenSSL (?P<version>[0-9]+\.[0-9]+\.[0-9]+)', result)
220
221        if match:
222            version = str_to_version(match.group('version'))
223
224            display.info('Detected OpenSSL version %s using the openssl CLI.' % version_to_str(version), verbosity=1)
225
226            return version
227
228    display.info('Unable to detect OpenSSL version.', verbosity=1)
229
230    return None
231
232
233def get_setuptools_version(args, python):  # type: (EnvironmentConfig, str) -> t.Tuple[int]
234    """Return the setuptools version for the given python."""
235    try:
236        return str_to_version(raw_command([python, '-c', 'import setuptools; print(setuptools.__version__)'], capture=True)[0])
237    except SubprocessError:
238        if args.explain:
239            return tuple()  # ignore errors in explain mode in case setuptools is not aleady installed
240
241        raise
242
243
244def get_cryptography_requirement(args, python_version):  # type: (EnvironmentConfig, str) -> str
245    """
246    Return the correct cryptography requirement for the given python version.
247    The version of cryptography installed depends on the python version, setuptools version and openssl version.
248    """
249    python = find_python(python_version)
250    setuptools_version = get_setuptools_version(args, python)
251    openssl_version = get_openssl_version(args, python, python_version)
252
253    if setuptools_version >= (18, 5):
254        if python_version == '2.6':
255            # cryptography 2.2+ requires python 2.7+
256            # see https://github.com/pyca/cryptography/blob/master/CHANGELOG.rst#22---2018-03-19
257            cryptography = 'cryptography < 2.2'
258        elif openssl_version and openssl_version < (1, 1, 0):
259            # cryptography 3.2 requires openssl 1.1.x or later
260            # see https://cryptography.io/en/latest/changelog.html#v3-2
261            cryptography = 'cryptography < 3.2'
262        else:
263            # cryptography 3.4+ fails to install on many systems
264            # this is a temporary work-around until a more permanent solution is available
265            cryptography = 'cryptography < 3.4'
266    else:
267        # cryptography 2.1+ requires setuptools 18.5+
268        # see https://github.com/pyca/cryptography/blob/62287ae18383447585606b9d0765c0f1b8a9777c/setup.py#L26
269        cryptography = 'cryptography < 2.1'
270
271    return cryptography
272
273
274def install_command_requirements(args, python_version=None, context=None, enable_pyyaml_check=False):
275    """
276    :type args: EnvironmentConfig
277    :type python_version: str | None
278    :type context: str | None
279    :type enable_pyyaml_check: bool
280    """
281    if not args.explain:
282        make_dirs(ResultType.COVERAGE.path)
283        make_dirs(ResultType.DATA.path)
284
285    if isinstance(args, ShellConfig):
286        if args.raw:
287            return
288
289    if not args.requirements:
290        return
291
292    if isinstance(args, ShellConfig):
293        return
294
295    packages = []
296
297    if isinstance(args, TestConfig):
298        if args.coverage:
299            packages.append('coverage')
300        if args.junit:
301            packages.append('junit-xml')
302
303    if not python_version:
304        python_version = args.python_version
305
306    pip = generate_pip_command(find_python(python_version))
307
308    # skip packages which have aleady been installed for python_version
309
310    try:
311        package_cache = install_command_requirements.package_cache
312    except AttributeError:
313        package_cache = install_command_requirements.package_cache = {}
314
315    installed_packages = package_cache.setdefault(python_version, set())
316    skip_packages = [package for package in packages if package in installed_packages]
317
318    for package in skip_packages:
319        packages.remove(package)
320
321    installed_packages.update(packages)
322
323    if args.command != 'sanity':
324        install_ansible_test_requirements(args, pip)
325
326        # make sure setuptools is available before trying to install cryptography
327        # the installed version of setuptools affects the version of cryptography to install
328        run_command(args, generate_pip_install(pip, '', packages=['setuptools']))
329
330        # install the latest cryptography version that the current requirements can support
331        # use a custom constraints file to avoid the normal constraints file overriding the chosen version of cryptography
332        # if not installed here later install commands may try to install an unsupported version due to the presence of older setuptools
333        # this is done instead of upgrading setuptools to allow tests to function with older distribution provided versions of setuptools
334        run_command(args, generate_pip_install(pip, '',
335                                               packages=[get_cryptography_requirement(args, python_version)],
336                                               constraints=os.path.join(ANSIBLE_TEST_DATA_ROOT, 'cryptography-constraints.txt')))
337
338    commands = [generate_pip_install(pip, args.command, packages=packages, context=context)]
339
340    if isinstance(args, IntegrationConfig):
341        for cloud_platform in get_cloud_platforms(args):
342            commands.append(generate_pip_install(pip, '%s.cloud.%s' % (args.command, cloud_platform)))
343
344    commands = [cmd for cmd in commands if cmd]
345
346    if not commands:
347        return  # no need to detect changes or run pip check since we are not making any changes
348
349    # only look for changes when more than one requirements file is needed
350    detect_pip_changes = len(commands) > 1
351
352    # first pass to install requirements, changes expected unless environment is already set up
353    install_ansible_test_requirements(args, pip)
354    changes = run_pip_commands(args, pip, commands, detect_pip_changes)
355
356    if changes:
357        # second pass to check for conflicts in requirements, changes are not expected here
358        changes = run_pip_commands(args, pip, commands, detect_pip_changes)
359
360        if changes:
361            raise ApplicationError('Conflicts detected in requirements. The following commands reported changes during verification:\n%s' %
362                                   '\n'.join((' '.join(cmd_quote(c) for c in cmd) for cmd in changes)))
363
364    if args.pip_check:
365        # ask pip to check for conflicts between installed packages
366        try:
367            run_command(args, pip + ['check', '--disable-pip-version-check'], capture=True)
368        except SubprocessError as ex:
369            if ex.stderr.strip() == 'ERROR: unknown command "check"':
370                display.warning('Cannot check pip requirements for conflicts because "pip check" is not supported.')
371            else:
372                raise
373
374    if enable_pyyaml_check:
375        # pyyaml may have been one of the requirements that was installed, so perform an optional check for it
376        check_pyyaml(args, python_version, required=False)
377
378
379def install_ansible_test_requirements(args, pip):  # type: (EnvironmentConfig, t.List[str]) -> None
380    """Install requirements for ansible-test for the given pip if not already installed."""
381    try:
382        installed = install_command_requirements.installed
383    except AttributeError:
384        installed = install_command_requirements.installed = set()
385
386    if tuple(pip) in installed:
387        return
388
389    # make sure basic ansible-test requirements are met, including making sure that pip is recent enough to support constraints
390    # virtualenvs created by older distributions may include very old pip versions, such as those created in the centos6 test container (pip 6.0.8)
391    run_command(args, generate_pip_install(pip, 'ansible-test', use_constraints=False))
392
393    installed.add(tuple(pip))
394
395
396def run_pip_commands(args, pip, commands, detect_pip_changes=False):
397    """
398    :type args: EnvironmentConfig
399    :type pip: list[str]
400    :type commands: list[list[str]]
401    :type detect_pip_changes: bool
402    :rtype: list[list[str]]
403    """
404    changes = []
405
406    after_list = pip_list(args, pip) if detect_pip_changes else None
407
408    for cmd in commands:
409        if not cmd:
410            continue
411
412        before_list = after_list
413
414        run_command(args, cmd)
415
416        after_list = pip_list(args, pip) if detect_pip_changes else None
417
418        if before_list != after_list:
419            changes.append(cmd)
420
421    return changes
422
423
424def pip_list(args, pip):
425    """
426    :type args: EnvironmentConfig
427    :type pip: list[str]
428    :rtype: str
429    """
430    stdout = run_command(args, pip + ['list'], capture=True)[0]
431    return stdout
432
433
434def generate_pip_install(pip, command, packages=None, constraints=None, use_constraints=True, context=None):
435    """
436    :type pip: list[str]
437    :type command: str
438    :type packages: list[str] | None
439    :type constraints: str | None
440    :type use_constraints: bool
441    :type context: str | None
442    :rtype: list[str] | None
443    """
444    constraints = constraints or os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', 'constraints.txt')
445    requirements = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', '%s.txt' % ('%s.%s' % (command, context) if context else command))
446    content_constraints = None
447
448    options = []
449
450    if os.path.exists(requirements) and os.path.getsize(requirements):
451        options += ['-r', requirements]
452
453    if command == 'sanity' and data_context().content.is_ansible:
454        requirements = os.path.join(data_context().content.sanity_path, 'code-smell', '%s.requirements.txt' % context)
455
456        if os.path.exists(requirements) and os.path.getsize(requirements):
457            options += ['-r', requirements]
458
459    if command == 'units':
460        requirements = os.path.join(data_context().content.unit_path, 'requirements.txt')
461
462        if os.path.exists(requirements) and os.path.getsize(requirements):
463            options += ['-r', requirements]
464
465        content_constraints = os.path.join(data_context().content.unit_path, 'constraints.txt')
466
467    if command in ('integration', 'windows-integration', 'network-integration'):
468        requirements = os.path.join(data_context().content.integration_path, 'requirements.txt')
469
470        if os.path.exists(requirements) and os.path.getsize(requirements):
471            options += ['-r', requirements]
472
473        requirements = os.path.join(data_context().content.integration_path, '%s.requirements.txt' % command)
474
475        if os.path.exists(requirements) and os.path.getsize(requirements):
476            options += ['-r', requirements]
477
478        content_constraints = os.path.join(data_context().content.integration_path, 'constraints.txt')
479
480    if command.startswith('integration.cloud.'):
481        content_constraints = os.path.join(data_context().content.integration_path, 'constraints.txt')
482
483    if packages:
484        options += packages
485
486    if not options:
487        return None
488
489    if use_constraints:
490        if content_constraints and os.path.exists(content_constraints) and os.path.getsize(content_constraints):
491            # listing content constraints first gives them priority over constraints provided by ansible-test
492            options.extend(['-c', content_constraints])
493
494        options.extend(['-c', constraints])
495
496    return pip + ['install', '--disable-pip-version-check'] + options
497
498
499def command_shell(args):
500    """
501    :type args: ShellConfig
502    """
503    if args.delegate:
504        raise Delegate()
505
506    install_command_requirements(args)
507
508    if args.inject_httptester:
509        inject_httptester(args)
510
511    cmd = create_shell_command(['bash', '-i'])
512    run_command(args, cmd)
513
514
515def command_posix_integration(args):
516    """
517    :type args: PosixIntegrationConfig
518    """
519    handle_layout_messages(data_context().content.integration_messages)
520
521    inventory_relative_path = get_inventory_relative_path(args)
522    inventory_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, os.path.basename(inventory_relative_path))
523
524    all_targets = tuple(walk_posix_integration_targets(include_hidden=True))
525    internal_targets = command_integration_filter(args, all_targets)
526    command_integration_filtered(args, internal_targets, all_targets, inventory_path)
527
528
529def command_network_integration(args):
530    """
531    :type args: NetworkIntegrationConfig
532    """
533    handle_layout_messages(data_context().content.integration_messages)
534
535    inventory_relative_path = get_inventory_relative_path(args)
536    template_path = os.path.join(ANSIBLE_TEST_CONFIG_ROOT, os.path.basename(inventory_relative_path)) + '.template'
537
538    if args.inventory:
539        inventory_path = os.path.join(data_context().content.root, data_context().content.integration_path, args.inventory)
540    else:
541        inventory_path = os.path.join(data_context().content.root, inventory_relative_path)
542
543    if args.no_temp_workdir:
544        # temporary solution to keep DCI tests working
545        inventory_exists = os.path.exists(inventory_path)
546    else:
547        inventory_exists = os.path.isfile(inventory_path)
548
549    if not args.explain and not args.platform and not inventory_exists:
550        raise ApplicationError(
551            'Inventory not found: %s\n'
552            'Use --inventory to specify the inventory path.\n'
553            'Use --platform to provision resources and generate an inventory file.\n'
554            'See also inventory template: %s' % (inventory_path, template_path)
555        )
556
557    check_inventory(args, inventory_path)
558    delegate_inventory(args, inventory_path)
559
560    all_targets = tuple(walk_network_integration_targets(include_hidden=True))
561    internal_targets = command_integration_filter(args, all_targets, init_callback=network_init)
562    instances = []  # type: t.List[WrappedThread]
563
564    if args.platform:
565        get_python_path(args, args.python_executable)  # initialize before starting threads
566
567        configs = dict((config['platform_version'], config) for config in args.metadata.instance_config)
568
569        for platform_version in args.platform:
570            platform, version = platform_version.split('/', 1)
571            config = configs.get(platform_version)
572
573            if not config:
574                continue
575
576            instance = WrappedThread(functools.partial(network_run, args, platform, version, config))
577            instance.daemon = True
578            instance.start()
579            instances.append(instance)
580
581        while any(instance.is_alive() for instance in instances):
582            time.sleep(1)
583
584        remotes = [instance.wait_for_result() for instance in instances]
585        inventory = network_inventory(remotes)
586
587        display.info('>>> Inventory: %s\n%s' % (inventory_path, inventory.strip()), verbosity=3)
588
589        if not args.explain:
590            write_text_file(inventory_path, inventory)
591
592    success = False
593
594    try:
595        command_integration_filtered(args, internal_targets, all_targets, inventory_path)
596        success = True
597    finally:
598        if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success):
599            for instance in instances:
600                instance.result.stop()
601
602
603def network_init(args, internal_targets):  # type: (NetworkIntegrationConfig, t.Tuple[IntegrationTarget, ...]) -> None
604    """Initialize platforms for network integration tests."""
605    if not args.platform:
606        return
607
608    if args.metadata.instance_config is not None:
609        return
610
611    platform_targets = set(a for target in internal_targets for a in target.aliases if a.startswith('network/'))
612
613    instances = []  # type: t.List[WrappedThread]
614
615    # generate an ssh key (if needed) up front once, instead of for each instance
616    SshKey(args)
617
618    for platform_version in args.platform:
619        platform, version = platform_version.split('/', 1)
620        platform_target = 'network/%s/' % platform
621
622        if platform_target not in platform_targets:
623            display.warning('Skipping "%s" because selected tests do not target the "%s" platform.' % (
624                platform_version, platform))
625            continue
626
627        instance = WrappedThread(functools.partial(network_start, args, platform, version))
628        instance.daemon = True
629        instance.start()
630        instances.append(instance)
631
632    while any(instance.is_alive() for instance in instances):
633        time.sleep(1)
634
635    args.metadata.instance_config = [instance.wait_for_result() for instance in instances]
636
637
638def network_start(args, platform, version):
639    """
640    :type args: NetworkIntegrationConfig
641    :type platform: str
642    :type version: str
643    :rtype: AnsibleCoreCI
644    """
645    core_ci = AnsibleCoreCI(args, platform, version, stage=args.remote_stage, provider=args.remote_provider)
646    core_ci.start()
647
648    return core_ci.save()
649
650
651def network_run(args, platform, version, config):
652    """
653    :type args: NetworkIntegrationConfig
654    :type platform: str
655    :type version: str
656    :type config: dict[str, str]
657    :rtype: AnsibleCoreCI
658    """
659    core_ci = AnsibleCoreCI(args, platform, version, stage=args.remote_stage, provider=args.remote_provider, load=False)
660    core_ci.load(config)
661    core_ci.wait()
662
663    manage = ManageNetworkCI(core_ci)
664    manage.wait()
665
666    return core_ci
667
668
669def network_inventory(remotes):
670    """
671    :type remotes: list[AnsibleCoreCI]
672    :rtype: str
673    """
674    groups = dict([(remote.platform, []) for remote in remotes])
675    net = []
676
677    for remote in remotes:
678        options = dict(
679            ansible_host=remote.connection.hostname,
680            ansible_user=remote.connection.username,
681            ansible_ssh_private_key_file=os.path.abspath(remote.ssh_key.key),
682        )
683
684        settings = get_network_settings(remote.args, remote.platform, remote.version)
685
686        options.update(settings.inventory_vars)
687
688        groups[remote.platform].append(
689            '%s %s' % (
690                remote.name.replace('.', '-'),
691                ' '.join('%s="%s"' % (k, options[k]) for k in sorted(options)),
692            )
693        )
694
695        net.append(remote.platform)
696
697    groups['net:children'] = net
698
699    template = ''
700
701    for group in groups:
702        hosts = '\n'.join(groups[group])
703
704        template += textwrap.dedent("""
705        [%s]
706        %s
707        """) % (group, hosts)
708
709    inventory = template
710
711    return inventory
712
713
714def command_windows_integration(args):
715    """
716    :type args: WindowsIntegrationConfig
717    """
718    handle_layout_messages(data_context().content.integration_messages)
719
720    inventory_relative_path = get_inventory_relative_path(args)
721    template_path = os.path.join(ANSIBLE_TEST_CONFIG_ROOT, os.path.basename(inventory_relative_path)) + '.template'
722
723    if args.inventory:
724        inventory_path = os.path.join(data_context().content.root, data_context().content.integration_path, args.inventory)
725    else:
726        inventory_path = os.path.join(data_context().content.root, inventory_relative_path)
727
728    if not args.explain and not args.windows and not os.path.isfile(inventory_path):
729        raise ApplicationError(
730            'Inventory not found: %s\n'
731            'Use --inventory to specify the inventory path.\n'
732            'Use --windows to provision resources and generate an inventory file.\n'
733            'See also inventory template: %s' % (inventory_path, template_path)
734        )
735
736    check_inventory(args, inventory_path)
737    delegate_inventory(args, inventory_path)
738
739    all_targets = tuple(walk_windows_integration_targets(include_hidden=True))
740    internal_targets = command_integration_filter(args, all_targets, init_callback=windows_init)
741    instances = []  # type: t.List[WrappedThread]
742    pre_target = None
743    post_target = None
744    httptester_id = None
745
746    if args.windows:
747        get_python_path(args, args.python_executable)  # initialize before starting threads
748
749        configs = dict((config['platform_version'], config) for config in args.metadata.instance_config)
750
751        for version in args.windows:
752            config = configs['windows/%s' % version]
753
754            instance = WrappedThread(functools.partial(windows_run, args, version, config))
755            instance.daemon = True
756            instance.start()
757            instances.append(instance)
758
759        while any(instance.is_alive() for instance in instances):
760            time.sleep(1)
761
762        remotes = [instance.wait_for_result() for instance in instances]
763        inventory = windows_inventory(remotes)
764
765        display.info('>>> Inventory: %s\n%s' % (inventory_path, inventory.strip()), verbosity=3)
766
767        if not args.explain:
768            write_text_file(inventory_path, inventory)
769
770        use_httptester = args.httptester and any('needs/httptester/' in target.aliases for target in internal_targets)
771        # if running under Docker delegation, the httptester may have already been started
772        docker_httptester = bool(os.environ.get("HTTPTESTER", False))
773
774        if use_httptester and not docker_available() and not docker_httptester:
775            display.warning('Assuming --disable-httptester since `docker` is not available.')
776        elif use_httptester:
777            if docker_httptester:
778                # we are running in a Docker container that is linked to the httptester container, we just need to
779                # forward these requests to the linked hostname
780                first_host = HTTPTESTER_HOSTS[0]
781                ssh_options = ["-R", "8080:%s:80" % first_host, "-R", "8443:%s:443" % first_host]
782            else:
783                # we are running directly and need to start the httptester container ourselves and forward the port
784                # from there manually set so HTTPTESTER env var is set during the run
785                args.inject_httptester = True
786                httptester_id, ssh_options = start_httptester(args)
787
788            # to get this SSH command to run in the background we need to set to run in background (-f) and disable
789            # the pty allocation (-T)
790            ssh_options.insert(0, "-fT")
791
792            # create a script that will continue to run in the background until the script is deleted, this will
793            # cleanup and close the connection
794            def forward_ssh_ports(target):
795                """
796                :type target: IntegrationTarget
797                """
798                if 'needs/httptester/' not in target.aliases:
799                    return
800
801                for remote in [r for r in remotes if r.version != '2008']:
802                    manage = ManageWindowsCI(remote)
803                    manage.upload(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'setup', 'windows-httptester.ps1'), watcher_path)
804
805                    # We cannot pass an array of string with -File so we just use a delimiter for multiple values
806                    script = "powershell.exe -NoProfile -ExecutionPolicy Bypass -File .\\%s -Hosts \"%s\"" \
807                             % (watcher_path, "|".join(HTTPTESTER_HOSTS))
808                    if args.verbosity > 3:
809                        script += " -Verbose"
810                    manage.ssh(script, options=ssh_options, force_pty=False)
811
812            def cleanup_ssh_ports(target):
813                """
814                :type target: IntegrationTarget
815                """
816                if 'needs/httptester/' not in target.aliases:
817                    return
818
819                for remote in [r for r in remotes if r.version != '2008']:
820                    # delete the tmp file that keeps the http-tester alive
821                    manage = ManageWindowsCI(remote)
822                    manage.ssh("cmd.exe /c \"del %s /F /Q\"" % watcher_path, force_pty=False)
823
824            watcher_path = "ansible-test-http-watcher-%s.ps1" % time.time()
825            pre_target = forward_ssh_ports
826            post_target = cleanup_ssh_ports
827
828    def run_playbook(playbook, run_playbook_vars):  # type: (str, t.Dict[str, t.Any]) -> None
829        playbook_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'playbooks', playbook)
830        command = ['ansible-playbook', '-i', inventory_path, playbook_path, '-e', json.dumps(run_playbook_vars)]
831        if args.verbosity:
832            command.append('-%s' % ('v' * args.verbosity))
833
834        env = ansible_environment(args)
835        intercept_command(args, command, '', env, disable_coverage=True)
836
837    remote_temp_path = None
838
839    if args.coverage and not args.coverage_check:
840        # Create the remote directory that is writable by everyone. Use Ansible to talk to the remote host.
841        remote_temp_path = 'C:\\ansible_test_coverage_%s' % time.time()
842        playbook_vars = {'remote_temp_path': remote_temp_path}
843        run_playbook('windows_coverage_setup.yml', playbook_vars)
844
845    success = False
846
847    try:
848        command_integration_filtered(args, internal_targets, all_targets, inventory_path, pre_target=pre_target,
849                                     post_target=post_target, remote_temp_path=remote_temp_path)
850        success = True
851    finally:
852        if httptester_id:
853            docker_rm(args, httptester_id)
854
855        if remote_temp_path:
856            # Zip up the coverage files that were generated and fetch it back to localhost.
857            with tempdir() as local_temp_path:
858                playbook_vars = {'remote_temp_path': remote_temp_path, 'local_temp_path': local_temp_path}
859                run_playbook('windows_coverage_teardown.yml', playbook_vars)
860
861                for filename in os.listdir(local_temp_path):
862                    with open_zipfile(os.path.join(local_temp_path, filename)) as coverage_zip:
863                        coverage_zip.extractall(ResultType.COVERAGE.path)
864
865        if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success):
866            for instance in instances:
867                instance.result.stop()
868
869
870# noinspection PyUnusedLocal
871def windows_init(args, internal_targets):  # pylint: disable=locally-disabled, unused-argument
872    """
873    :type args: WindowsIntegrationConfig
874    :type internal_targets: tuple[IntegrationTarget]
875    """
876    if not args.windows:
877        return
878
879    if args.metadata.instance_config is not None:
880        return
881
882    instances = []  # type: t.List[WrappedThread]
883
884    for version in args.windows:
885        instance = WrappedThread(functools.partial(windows_start, args, version))
886        instance.daemon = True
887        instance.start()
888        instances.append(instance)
889
890    while any(instance.is_alive() for instance in instances):
891        time.sleep(1)
892
893    args.metadata.instance_config = [instance.wait_for_result() for instance in instances]
894
895
896def windows_start(args, version):
897    """
898    :type args: WindowsIntegrationConfig
899    :type version: str
900    :rtype: AnsibleCoreCI
901    """
902    core_ci = AnsibleCoreCI(args, 'windows', version, stage=args.remote_stage, provider=args.remote_provider)
903    core_ci.start()
904
905    return core_ci.save()
906
907
908def windows_run(args, version, config):
909    """
910    :type args: WindowsIntegrationConfig
911    :type version: str
912    :type config: dict[str, str]
913    :rtype: AnsibleCoreCI
914    """
915    core_ci = AnsibleCoreCI(args, 'windows', version, stage=args.remote_stage, provider=args.remote_provider, load=False)
916    core_ci.load(config)
917    core_ci.wait()
918
919    manage = ManageWindowsCI(core_ci)
920    manage.wait()
921
922    return core_ci
923
924
925def windows_inventory(remotes):
926    """
927    :type remotes: list[AnsibleCoreCI]
928    :rtype: str
929    """
930    hosts = []
931
932    for remote in remotes:
933        options = dict(
934            ansible_host=remote.connection.hostname,
935            ansible_user=remote.connection.username,
936            ansible_password=remote.connection.password,
937            ansible_port=remote.connection.port,
938        )
939
940        # used for the connection_windows_ssh test target
941        if remote.ssh_key:
942            options["ansible_ssh_private_key_file"] = os.path.abspath(remote.ssh_key.key)
943
944        if remote.name == 'windows-2008':
945            options.update(
946                # force 2008 to use PSRP for the connection plugin
947                ansible_connection='psrp',
948                ansible_psrp_auth='basic',
949                ansible_psrp_cert_validation='ignore',
950            )
951        elif remote.name == 'windows-2016':
952            options.update(
953                # force 2016 to use NTLM + HTTP message encryption
954                ansible_connection='winrm',
955                ansible_winrm_server_cert_validation='ignore',
956                ansible_winrm_transport='ntlm',
957                ansible_winrm_scheme='http',
958                ansible_port='5985',
959            )
960        else:
961            options.update(
962                ansible_connection='winrm',
963                ansible_winrm_server_cert_validation='ignore',
964            )
965
966        hosts.append(
967            '%s %s' % (
968                remote.name.replace('/', '_'),
969                ' '.join('%s="%s"' % (k, options[k]) for k in sorted(options)),
970            )
971        )
972
973    template = """
974    [windows]
975    %s
976
977    # support winrm binary module tests (temporary solution)
978    [testhost:children]
979    windows
980    """
981
982    template = textwrap.dedent(template)
983    inventory = template % ('\n'.join(hosts))
984
985    return inventory
986
987
988def command_integration_filter(args,  # type: TIntegrationConfig
989                               targets,  # type: t.Iterable[TIntegrationTarget]
990                               init_callback=None,  # type: t.Callable[[TIntegrationConfig, t.Tuple[TIntegrationTarget, ...]], None]
991                               ):  # type: (...) -> t.Tuple[TIntegrationTarget, ...]
992    """Filter the given integration test targets."""
993    targets = tuple(target for target in targets if 'hidden/' not in target.aliases)
994    changes = get_changes_filter(args)
995
996    # special behavior when the --changed-all-target target is selected based on changes
997    if args.changed_all_target in changes:
998        # act as though the --changed-all-target target was in the include list
999        if args.changed_all_mode == 'include' and args.changed_all_target not in args.include:
1000            args.include.append(args.changed_all_target)
1001            args.delegate_args += ['--include', args.changed_all_target]
1002        # act as though the --changed-all-target target was in the exclude list
1003        elif args.changed_all_mode == 'exclude' and args.changed_all_target not in args.exclude:
1004            args.exclude.append(args.changed_all_target)
1005
1006    require = args.require + changes
1007    exclude = args.exclude
1008
1009    internal_targets = walk_internal_targets(targets, args.include, exclude, require)
1010    environment_exclude = get_integration_filter(args, internal_targets)
1011
1012    environment_exclude += cloud_filter(args, internal_targets)
1013
1014    if environment_exclude:
1015        exclude += environment_exclude
1016        internal_targets = walk_internal_targets(targets, args.include, exclude, require)
1017
1018    if not internal_targets:
1019        raise AllTargetsSkipped()
1020
1021    if args.start_at and not any(target.name == args.start_at for target in internal_targets):
1022        raise ApplicationError('Start at target matches nothing: %s' % args.start_at)
1023
1024    if init_callback:
1025        init_callback(args, internal_targets)
1026
1027    cloud_init(args, internal_targets)
1028
1029    vars_file_src = os.path.join(data_context().content.root, data_context().content.integration_vars_path)
1030
1031    if os.path.exists(vars_file_src):
1032        def integration_config_callback(files):  # type: (t.List[t.Tuple[str, str]]) -> None
1033            """
1034            Add the integration config vars file to the payload file list.
1035            This will preserve the file during delegation even if the file is ignored by source control.
1036            """
1037            files.append((vars_file_src, data_context().content.integration_vars_path))
1038
1039        data_context().register_payload_callback(integration_config_callback)
1040
1041    if args.delegate:
1042        raise Delegate(require=require, exclude=exclude, integration_targets=internal_targets)
1043
1044    install_command_requirements(args)
1045
1046    return internal_targets
1047
1048
1049def command_integration_filtered(args, targets, all_targets, inventory_path, pre_target=None, post_target=None,
1050                                 remote_temp_path=None):
1051    """
1052    :type args: IntegrationConfig
1053    :type targets: tuple[IntegrationTarget]
1054    :type all_targets: tuple[IntegrationTarget]
1055    :type inventory_path: str
1056    :type pre_target: (IntegrationTarget) -> None | None
1057    :type post_target: (IntegrationTarget) -> None | None
1058    :type remote_temp_path: str | None
1059    """
1060    found = False
1061    passed = []
1062    failed = []
1063
1064    targets_iter = iter(targets)
1065    all_targets_dict = dict((target.name, target) for target in all_targets)
1066
1067    setup_errors = []
1068    setup_targets_executed = set()
1069
1070    for target in all_targets:
1071        for setup_target in target.setup_once + target.setup_always:
1072            if setup_target not in all_targets_dict:
1073                setup_errors.append('Target "%s" contains invalid setup target: %s' % (target.name, setup_target))
1074
1075    if setup_errors:
1076        raise ApplicationError('Found %d invalid setup aliases:\n%s' % (len(setup_errors), '\n'.join(setup_errors)))
1077
1078    check_pyyaml(args, args.python_version)
1079
1080    test_dir = os.path.join(ResultType.TMP.path, 'output_dir')
1081
1082    if not args.explain and any('needs/ssh/' in target.aliases for target in targets):
1083        max_tries = 20
1084        display.info('SSH service required for tests. Checking to make sure we can connect.')
1085        for i in range(1, max_tries + 1):
1086            try:
1087                run_command(args, ['ssh', '-o', 'BatchMode=yes', 'localhost', 'id'], capture=True)
1088                display.info('SSH service responded.')
1089                break
1090            except SubprocessError:
1091                if i == max_tries:
1092                    raise
1093                seconds = 3
1094                display.warning('SSH service not responding. Waiting %d second(s) before checking again.' % seconds)
1095                time.sleep(seconds)
1096
1097    # Windows is different as Ansible execution is done locally but the host is remote
1098    if args.inject_httptester and not isinstance(args, WindowsIntegrationConfig):
1099        inject_httptester(args)
1100
1101    start_at_task = args.start_at_task
1102
1103    results = {}
1104
1105    current_environment = None  # type: t.Optional[EnvironmentDescription]
1106
1107    # common temporary directory path that will be valid on both the controller and the remote
1108    # it must be common because it will be referenced in environment variables that are shared across multiple hosts
1109    common_temp_path = '/tmp/ansible-test-%s' % ''.join(random.choice(string.ascii_letters + string.digits) for _idx in range(8))
1110
1111    setup_common_temp_dir(args, common_temp_path)
1112
1113    try:
1114        for target in targets_iter:
1115            if args.start_at and not found:
1116                found = target.name == args.start_at
1117
1118                if not found:
1119                    continue
1120
1121            if args.list_targets:
1122                print(target.name)
1123                continue
1124
1125            tries = 2 if args.retry_on_error else 1
1126            verbosity = args.verbosity
1127
1128            cloud_environment = get_cloud_environment(args, target)
1129
1130            original_environment = current_environment if current_environment else EnvironmentDescription(args)
1131            current_environment = None
1132
1133            display.info('>>> Environment Description\n%s' % original_environment, verbosity=3)
1134
1135            try:
1136                while tries:
1137                    tries -= 1
1138
1139                    try:
1140                        if cloud_environment:
1141                            cloud_environment.setup_once()
1142
1143                        run_setup_targets(args, test_dir, target.setup_once, all_targets_dict, setup_targets_executed, inventory_path, common_temp_path, False)
1144
1145                        start_time = time.time()
1146
1147                        run_setup_targets(args, test_dir, target.setup_always, all_targets_dict, setup_targets_executed, inventory_path, common_temp_path, True)
1148
1149                        if not args.explain:
1150                            # create a fresh test directory for each test target
1151                            remove_tree(test_dir)
1152                            make_dirs(test_dir)
1153
1154                        if pre_target:
1155                            pre_target(target)
1156
1157                        try:
1158                            if target.script_path:
1159                                command_integration_script(args, target, test_dir, inventory_path, common_temp_path,
1160                                                           remote_temp_path=remote_temp_path)
1161                            else:
1162                                command_integration_role(args, target, start_at_task, test_dir, inventory_path,
1163                                                         common_temp_path, remote_temp_path=remote_temp_path)
1164                                start_at_task = None
1165                        finally:
1166                            if post_target:
1167                                post_target(target)
1168
1169                        end_time = time.time()
1170
1171                        results[target.name] = dict(
1172                            name=target.name,
1173                            type=target.type,
1174                            aliases=target.aliases,
1175                            modules=target.modules,
1176                            run_time_seconds=int(end_time - start_time),
1177                            setup_once=target.setup_once,
1178                            setup_always=target.setup_always,
1179                            coverage=args.coverage,
1180                            coverage_label=args.coverage_label,
1181                            python_version=args.python_version,
1182                        )
1183
1184                        break
1185                    except SubprocessError:
1186                        if cloud_environment:
1187                            cloud_environment.on_failure(target, tries)
1188
1189                        if not original_environment.validate(target.name, throw=False):
1190                            raise
1191
1192                        if not tries:
1193                            raise
1194
1195                        display.warning('Retrying test target "%s" with maximum verbosity.' % target.name)
1196                        display.verbosity = args.verbosity = 6
1197
1198                start_time = time.time()
1199                current_environment = EnvironmentDescription(args)
1200                end_time = time.time()
1201
1202                EnvironmentDescription.check(original_environment, current_environment, target.name, throw=True)
1203
1204                results[target.name]['validation_seconds'] = int(end_time - start_time)
1205
1206                passed.append(target)
1207            except Exception as ex:
1208                failed.append(target)
1209
1210                if args.continue_on_error:
1211                    display.error(ex)
1212                    continue
1213
1214                display.notice('To resume at this test target, use the option: --start-at %s' % target.name)
1215
1216                next_target = next(targets_iter, None)
1217
1218                if next_target:
1219                    display.notice('To resume after this test target, use the option: --start-at %s' % next_target.name)
1220
1221                raise
1222            finally:
1223                display.verbosity = args.verbosity = verbosity
1224
1225    finally:
1226        if not args.explain:
1227            if args.coverage:
1228                coverage_temp_path = os.path.join(common_temp_path, ResultType.COVERAGE.name)
1229                coverage_save_path = ResultType.COVERAGE.path
1230
1231                for filename in os.listdir(coverage_temp_path):
1232                    shutil.copy(os.path.join(coverage_temp_path, filename), os.path.join(coverage_save_path, filename))
1233
1234            remove_tree(common_temp_path)
1235
1236            result_name = '%s-%s.json' % (
1237                args.command, re.sub(r'[^0-9]', '-', str(datetime.datetime.utcnow().replace(microsecond=0))))
1238
1239            data = dict(
1240                targets=results,
1241            )
1242
1243            write_json_test_results(ResultType.DATA, result_name, data)
1244
1245    if failed:
1246        raise ApplicationError('The %d integration test(s) listed below (out of %d) failed. See error output above for details:\n%s' % (
1247            len(failed), len(passed) + len(failed), '\n'.join(target.name for target in failed)))
1248
1249
1250def start_httptester(args):
1251    """
1252    :type args: EnvironmentConfig
1253    :rtype: str, list[str]
1254    """
1255
1256    # map ports from remote -> localhost -> container
1257    # passing through localhost is only used when ansible-test is not already running inside a docker container
1258    ports = [
1259        dict(
1260            remote=8080,
1261            container=80,
1262        ),
1263        dict(
1264            remote=8443,
1265            container=443,
1266        ),
1267    ]
1268
1269    container_id = get_docker_container_id()
1270
1271    if not container_id:
1272        for item in ports:
1273            item['localhost'] = get_available_port()
1274
1275    docker_pull(args, args.httptester)
1276
1277    httptester_id = run_httptester(args, dict((port['localhost'], port['container']) for port in ports if 'localhost' in port))
1278
1279    if container_id:
1280        container_host = get_docker_container_ip(args, httptester_id)
1281        display.info('Found httptester container address: %s' % container_host, verbosity=1)
1282    else:
1283        container_host = get_docker_hostname()
1284
1285    ssh_options = []
1286
1287    for port in ports:
1288        ssh_options += ['-R', '%d:%s:%d' % (port['remote'], container_host, port.get('localhost', port['container']))]
1289
1290    return httptester_id, ssh_options
1291
1292
1293def run_httptester(args, ports=None):
1294    """
1295    :type args: EnvironmentConfig
1296    :type ports: dict[int, int] | None
1297    :rtype: str
1298    """
1299    options = [
1300        '--detach',
1301    ]
1302
1303    if ports:
1304        for localhost_port, container_port in ports.items():
1305            options += ['-p', '%d:%d' % (localhost_port, container_port)]
1306
1307    network = get_docker_preferred_network_name(args)
1308
1309    if is_docker_user_defined_network(network):
1310        # network-scoped aliases are only supported for containers in user defined networks
1311        for alias in HTTPTESTER_HOSTS:
1312            options.extend(['--network-alias', alias])
1313
1314    httptester_id = docker_run(args, args.httptester, options=options)[0]
1315
1316    if args.explain:
1317        httptester_id = 'httptester_id'
1318    else:
1319        httptester_id = httptester_id.strip()
1320
1321    return httptester_id
1322
1323
1324def inject_httptester(args):
1325    """
1326    :type args: CommonConfig
1327    """
1328    comment = ' # ansible-test httptester\n'
1329    append_lines = ['127.0.0.1 %s%s' % (host, comment) for host in HTTPTESTER_HOSTS]
1330    hosts_path = '/etc/hosts'
1331
1332    original_lines = read_text_file(hosts_path).splitlines(True)
1333
1334    if not any(line.endswith(comment) for line in original_lines):
1335        write_text_file(hosts_path, ''.join(original_lines + append_lines))
1336
1337    # determine which forwarding mechanism to use
1338    pfctl = find_executable('pfctl', required=False)
1339    iptables = find_executable('iptables', required=False)
1340
1341    if pfctl:
1342        kldload = find_executable('kldload', required=False)
1343
1344        if kldload:
1345            try:
1346                run_command(args, ['kldload', 'pf'], capture=True)
1347            except SubprocessError:
1348                pass  # already loaded
1349
1350        rules = '''
1351rdr pass inet proto tcp from any to any port 80 -> 127.0.0.1 port 8080
1352rdr pass inet proto tcp from any to any port 443 -> 127.0.0.1 port 8443
1353'''
1354        cmd = ['pfctl', '-ef', '-']
1355
1356        try:
1357            run_command(args, cmd, capture=True, data=rules)
1358        except SubprocessError:
1359            pass  # non-zero exit status on success
1360
1361    elif iptables:
1362        ports = [
1363            (80, 8080),
1364            (443, 8443),
1365        ]
1366
1367        for src, dst in ports:
1368            rule = ['-o', 'lo', '-p', 'tcp', '--dport', str(src), '-j', 'REDIRECT', '--to-port', str(dst)]
1369
1370            try:
1371                # check for existing rule
1372                cmd = ['iptables', '-t', 'nat', '-C', 'OUTPUT'] + rule
1373                run_command(args, cmd, capture=True)
1374            except SubprocessError:
1375                # append rule when it does not exist
1376                cmd = ['iptables', '-t', 'nat', '-A', 'OUTPUT'] + rule
1377                run_command(args, cmd, capture=True)
1378    else:
1379        raise ApplicationError('No supported port forwarding mechanism detected.')
1380
1381
1382def run_pypi_proxy(args):  # type: (EnvironmentConfig) -> t.Tuple[t.Optional[str], t.Optional[str]]
1383    """Run a PyPI proxy container, returning the container ID and proxy endpoint."""
1384    use_proxy = False
1385
1386    if args.docker_raw == 'centos6':
1387        use_proxy = True  # python 2.6 is the only version available
1388
1389    if args.docker_raw == 'default':
1390        if args.python == '2.6':
1391            use_proxy = True  # python 2.6 requested
1392        elif not args.python and isinstance(args, (SanityConfig, UnitsConfig, ShellConfig)):
1393            use_proxy = True  # multiple versions (including python 2.6) can be used
1394
1395    if args.docker_raw and args.pypi_proxy:
1396        use_proxy = True  # manual override to force proxy usage
1397
1398    if not use_proxy:
1399        return None, None
1400
1401    proxy_image = 'quay.io/ansible/pypi-test-container:1.0.0'
1402    port = 3141
1403
1404    options = [
1405        '--detach',
1406    ]
1407
1408    docker_pull(args, proxy_image)
1409
1410    container_id = docker_run(args, proxy_image, options=options)[0]
1411
1412    if args.explain:
1413        container_id = 'pypi_id'
1414        container_ip = '127.0.0.1'
1415    else:
1416        container_id = container_id.strip()
1417        container_ip = get_docker_container_ip(args, container_id)
1418
1419    endpoint = 'http://%s:%d/root/pypi/+simple/' % (container_ip, port)
1420
1421    return container_id, endpoint
1422
1423
1424def configure_pypi_proxy(args):  # type: (CommonConfig) -> None
1425    """Configure the environment to use a PyPI proxy, if present."""
1426    if not isinstance(args, EnvironmentConfig):
1427        return
1428
1429    if args.pypi_endpoint:
1430        configure_pypi_block_access()
1431        configure_pypi_proxy_pip(args)
1432        configure_pypi_proxy_easy_install(args)
1433
1434
1435def configure_pypi_block_access():  # type: () -> None
1436    """Block direct access to PyPI to ensure proxy configurations are always used."""
1437    if os.getuid() != 0:
1438        display.warning('Skipping custom hosts block for PyPI for non-root user.')
1439        return
1440
1441    hosts_path = '/etc/hosts'
1442    hosts_block = '''
1443127.0.0.1 pypi.org pypi.python.org files.pythonhosted.org
1444'''
1445
1446    def hosts_cleanup():
1447        display.info('Removing custom PyPI hosts entries: %s' % hosts_path, verbosity=1)
1448
1449        with open(hosts_path) as hosts_file_read:
1450            content = hosts_file_read.read()
1451
1452        content = content.replace(hosts_block, '')
1453
1454        with open(hosts_path, 'w') as hosts_file_write:
1455            hosts_file_write.write(content)
1456
1457    display.info('Injecting custom PyPI hosts entries: %s' % hosts_path, verbosity=1)
1458    display.info('Config: %s\n%s' % (hosts_path, hosts_block), verbosity=3)
1459
1460    with open(hosts_path, 'a') as hosts_file:
1461        hosts_file.write(hosts_block)
1462
1463    atexit.register(hosts_cleanup)
1464
1465
1466def configure_pypi_proxy_pip(args):  # type: (EnvironmentConfig) -> None
1467    """Configure a custom index for pip based installs."""
1468    pypi_hostname = urlparse(args.pypi_endpoint)[1].split(':')[0]
1469
1470    pip_conf_path = os.path.expanduser('~/.pip/pip.conf')
1471    pip_conf = '''
1472[global]
1473index-url = {0}
1474trusted-host = {1}
1475'''.format(args.pypi_endpoint, pypi_hostname).strip()
1476
1477    def pip_conf_cleanup():
1478        display.info('Removing custom PyPI config: %s' % pip_conf_path, verbosity=1)
1479        os.remove(pip_conf_path)
1480
1481    if os.path.exists(pip_conf_path):
1482        raise ApplicationError('Refusing to overwrite existing file: %s' % pip_conf_path)
1483
1484    display.info('Injecting custom PyPI config: %s' % pip_conf_path, verbosity=1)
1485    display.info('Config: %s\n%s' % (pip_conf_path, pip_conf), verbosity=3)
1486
1487    write_text_file(pip_conf_path, pip_conf, True)
1488    atexit.register(pip_conf_cleanup)
1489
1490
1491def configure_pypi_proxy_easy_install(args):  # type: (EnvironmentConfig) -> None
1492    """Configure a custom index for easy_install based installs."""
1493    pydistutils_cfg_path = os.path.expanduser('~/.pydistutils.cfg')
1494    pydistutils_cfg = '''
1495[easy_install]
1496index_url = {0}
1497'''.format(args.pypi_endpoint).strip()
1498
1499    if os.path.exists(pydistutils_cfg_path):
1500        raise ApplicationError('Refusing to overwrite existing file: %s' % pydistutils_cfg_path)
1501
1502    def pydistutils_cfg_cleanup():
1503        display.info('Removing custom PyPI config: %s' % pydistutils_cfg_path, verbosity=1)
1504        os.remove(pydistutils_cfg_path)
1505
1506    display.info('Injecting custom PyPI config: %s' % pydistutils_cfg_path, verbosity=1)
1507    display.info('Config: %s\n%s' % (pydistutils_cfg_path, pydistutils_cfg), verbosity=3)
1508
1509    write_text_file(pydistutils_cfg_path, pydistutils_cfg, True)
1510    atexit.register(pydistutils_cfg_cleanup)
1511
1512
1513def run_setup_targets(args, test_dir, target_names, targets_dict, targets_executed, inventory_path, temp_path, always):
1514    """
1515    :type args: IntegrationConfig
1516    :type test_dir: str
1517    :type target_names: list[str]
1518    :type targets_dict: dict[str, IntegrationTarget]
1519    :type targets_executed: set[str]
1520    :type inventory_path: str
1521    :type temp_path: str
1522    :type always: bool
1523    """
1524    for target_name in target_names:
1525        if not always and target_name in targets_executed:
1526            continue
1527
1528        target = targets_dict[target_name]
1529
1530        if not args.explain:
1531            # create a fresh test directory for each test target
1532            remove_tree(test_dir)
1533            make_dirs(test_dir)
1534
1535        if target.script_path:
1536            command_integration_script(args, target, test_dir, inventory_path, temp_path)
1537        else:
1538            command_integration_role(args, target, None, test_dir, inventory_path, temp_path)
1539
1540        targets_executed.add(target_name)
1541
1542
1543def integration_environment(args, target, test_dir, inventory_path, ansible_config, env_config):
1544    """
1545    :type args: IntegrationConfig
1546    :type target: IntegrationTarget
1547    :type test_dir: str
1548    :type inventory_path: str
1549    :type ansible_config: str | None
1550    :type env_config: CloudEnvironmentConfig | None
1551    :rtype: dict[str, str]
1552    """
1553    env = ansible_environment(args, ansible_config=ansible_config)
1554
1555    if args.inject_httptester:
1556        env.update(dict(
1557            HTTPTESTER='1',
1558        ))
1559
1560    callback_plugins = ['junit'] + (env_config.callback_plugins or [] if env_config else [])
1561
1562    integration = dict(
1563        JUNIT_OUTPUT_DIR=ResultType.JUNIT.path,
1564        ANSIBLE_CALLBACK_WHITELIST=','.join(sorted(set(callback_plugins))),
1565        ANSIBLE_TEST_CI=args.metadata.ci_provider or get_ci_provider().code,
1566        ANSIBLE_TEST_COVERAGE='check' if args.coverage_check else ('yes' if args.coverage else ''),
1567        OUTPUT_DIR=test_dir,
1568        INVENTORY_PATH=os.path.abspath(inventory_path),
1569    )
1570
1571    if args.debug_strategy:
1572        env.update(dict(ANSIBLE_STRATEGY='debug'))
1573
1574    if 'non_local/' in target.aliases:
1575        if args.coverage:
1576            display.warning('Skipping coverage reporting on Ansible modules for non-local test: %s' % target.name)
1577
1578        env.update(dict(ANSIBLE_TEST_REMOTE_INTERPRETER=''))
1579
1580    env.update(integration)
1581
1582    return env
1583
1584
1585def command_integration_script(args, target, test_dir, inventory_path, temp_path, remote_temp_path=None):
1586    """
1587    :type args: IntegrationConfig
1588    :type target: IntegrationTarget
1589    :type test_dir: str
1590    :type inventory_path: str
1591    :type temp_path: str
1592    :type remote_temp_path: str | None
1593    """
1594    display.info('Running %s integration test script' % target.name)
1595
1596    env_config = None
1597
1598    if isinstance(args, PosixIntegrationConfig):
1599        cloud_environment = get_cloud_environment(args, target)
1600
1601        if cloud_environment:
1602            env_config = cloud_environment.get_environment_config()
1603
1604    with integration_test_environment(args, target, inventory_path) as test_env:
1605        cmd = ['./%s' % os.path.basename(target.script_path)]
1606
1607        if args.verbosity:
1608            cmd.append('-' + ('v' * args.verbosity))
1609
1610        env = integration_environment(args, target, test_dir, test_env.inventory_path, test_env.ansible_config, env_config)
1611        cwd = os.path.join(test_env.targets_dir, target.relative_path)
1612
1613        env.update(dict(
1614            # support use of adhoc ansible commands in collections without specifying the fully qualified collection name
1615            ANSIBLE_PLAYBOOK_DIR=cwd,
1616        ))
1617
1618        if env_config and env_config.env_vars:
1619            env.update(env_config.env_vars)
1620
1621        with integration_test_config_file(args, env_config, test_env.integration_dir) as config_path:
1622            if config_path:
1623                cmd += ['-e', '@%s' % config_path]
1624
1625            module_coverage = 'non_local/' not in target.aliases
1626            intercept_command(args, cmd, target_name=target.name, env=env, cwd=cwd, temp_path=temp_path,
1627                              remote_temp_path=remote_temp_path, module_coverage=module_coverage)
1628
1629
1630def command_integration_role(args, target, start_at_task, test_dir, inventory_path, temp_path, remote_temp_path=None):
1631    """
1632    :type args: IntegrationConfig
1633    :type target: IntegrationTarget
1634    :type start_at_task: str | None
1635    :type test_dir: str
1636    :type inventory_path: str
1637    :type temp_path: str
1638    :type remote_temp_path: str | None
1639    """
1640    display.info('Running %s integration test role' % target.name)
1641
1642    env_config = None
1643
1644    vars_files = []
1645    variables = dict(
1646        output_dir=test_dir,
1647    )
1648
1649    if isinstance(args, WindowsIntegrationConfig):
1650        hosts = 'windows'
1651        gather_facts = False
1652        variables.update(dict(
1653            win_output_dir=r'C:\ansible_testing',
1654        ))
1655    elif isinstance(args, NetworkIntegrationConfig):
1656        hosts = target.network_platform
1657        gather_facts = False
1658    else:
1659        hosts = 'testhost'
1660        gather_facts = True
1661
1662        cloud_environment = get_cloud_environment(args, target)
1663
1664        if cloud_environment:
1665            env_config = cloud_environment.get_environment_config()
1666
1667    with integration_test_environment(args, target, inventory_path) as test_env:
1668        if os.path.exists(test_env.vars_file):
1669            vars_files.append(os.path.relpath(test_env.vars_file, test_env.integration_dir))
1670
1671        play = dict(
1672            hosts=hosts,
1673            gather_facts=gather_facts,
1674            vars_files=vars_files,
1675            vars=variables,
1676            roles=[
1677                target.name,
1678            ],
1679        )
1680
1681        if env_config:
1682            if env_config.ansible_vars:
1683                variables.update(env_config.ansible_vars)
1684
1685            play.update(dict(
1686                environment=env_config.env_vars,
1687                module_defaults=env_config.module_defaults,
1688            ))
1689
1690        playbook = json.dumps([play], indent=4, sort_keys=True)
1691
1692        with named_temporary_file(args=args, directory=test_env.integration_dir, prefix='%s-' % target.name, suffix='.yml', content=playbook) as playbook_path:
1693            filename = os.path.basename(playbook_path)
1694
1695            display.info('>>> Playbook: %s\n%s' % (filename, playbook.strip()), verbosity=3)
1696
1697            cmd = ['ansible-playbook', filename, '-i', os.path.relpath(test_env.inventory_path, test_env.integration_dir)]
1698
1699            if start_at_task:
1700                cmd += ['--start-at-task', start_at_task]
1701
1702            if args.tags:
1703                cmd += ['--tags', args.tags]
1704
1705            if args.skip_tags:
1706                cmd += ['--skip-tags', args.skip_tags]
1707
1708            if args.diff:
1709                cmd += ['--diff']
1710
1711            if isinstance(args, NetworkIntegrationConfig):
1712                if args.testcase:
1713                    cmd += ['-e', 'testcase=%s' % args.testcase]
1714
1715            if args.verbosity:
1716                cmd.append('-' + ('v' * args.verbosity))
1717
1718            env = integration_environment(args, target, test_dir, test_env.inventory_path, test_env.ansible_config, env_config)
1719            cwd = test_env.integration_dir
1720
1721            env.update(dict(
1722                # support use of adhoc ansible commands in collections without specifying the fully qualified collection name
1723                ANSIBLE_PLAYBOOK_DIR=cwd,
1724            ))
1725
1726            env['ANSIBLE_ROLES_PATH'] = test_env.targets_dir
1727
1728            module_coverage = 'non_local/' not in target.aliases
1729            intercept_command(args, cmd, target_name=target.name, env=env, cwd=cwd, temp_path=temp_path,
1730                              remote_temp_path=remote_temp_path, module_coverage=module_coverage)
1731
1732
1733def get_changes_filter(args):
1734    """
1735    :type args: TestConfig
1736    :rtype: list[str]
1737    """
1738    paths = detect_changes(args)
1739
1740    if not args.metadata.change_description:
1741        if paths:
1742            changes = categorize_changes(args, paths, args.command)
1743        else:
1744            changes = ChangeDescription()
1745
1746        args.metadata.change_description = changes
1747
1748    if paths is None:
1749        return []  # change detection not enabled, do not filter targets
1750
1751    if not paths:
1752        raise NoChangesDetected()
1753
1754    if args.metadata.change_description.targets is None:
1755        raise NoTestsForChanges()
1756
1757    return args.metadata.change_description.targets
1758
1759
1760def detect_changes(args):
1761    """
1762    :type args: TestConfig
1763    :rtype: list[str] | None
1764    """
1765    if args.changed:
1766        paths = get_ci_provider().detect_changes(args)
1767    elif args.changed_from or args.changed_path:
1768        paths = args.changed_path or []
1769        if args.changed_from:
1770            paths += read_text_file(args.changed_from).splitlines()
1771    else:
1772        return None  # change detection not enabled
1773
1774    if paths is None:
1775        return None  # act as though change detection not enabled, do not filter targets
1776
1777    display.info('Detected changes in %d file(s).' % len(paths))
1778
1779    for path in paths:
1780        display.info(path, verbosity=1)
1781
1782    return paths
1783
1784
1785def get_integration_filter(args, targets):
1786    """
1787    :type args: IntegrationConfig
1788    :type targets: tuple[IntegrationTarget]
1789    :rtype: list[str]
1790    """
1791    if args.docker:
1792        return get_integration_docker_filter(args, targets)
1793
1794    if args.remote:
1795        return get_integration_remote_filter(args, targets)
1796
1797    return get_integration_local_filter(args, targets)
1798
1799
1800def common_integration_filter(args, targets, exclude):
1801    """
1802    :type args: IntegrationConfig
1803    :type targets: tuple[IntegrationTarget]
1804    :type exclude: list[str]
1805    """
1806    override_disabled = set(target for target in args.include if target.startswith('disabled/'))
1807
1808    if not args.allow_disabled:
1809        skip = 'disabled/'
1810        override = [target.name for target in targets if override_disabled & set(target.aliases)]
1811        skipped = [target.name for target in targets if skip in target.aliases and target.name not in override]
1812        if skipped:
1813            exclude.extend(skipped)
1814            display.warning('Excluding tests marked "%s" which require --allow-disabled or prefixing with "disabled/": %s'
1815                            % (skip.rstrip('/'), ', '.join(skipped)))
1816
1817    override_unsupported = set(target for target in args.include if target.startswith('unsupported/'))
1818
1819    if not args.allow_unsupported:
1820        skip = 'unsupported/'
1821        override = [target.name for target in targets if override_unsupported & set(target.aliases)]
1822        skipped = [target.name for target in targets if skip in target.aliases and target.name not in override]
1823        if skipped:
1824            exclude.extend(skipped)
1825            display.warning('Excluding tests marked "%s" which require --allow-unsupported or prefixing with "unsupported/": %s'
1826                            % (skip.rstrip('/'), ', '.join(skipped)))
1827
1828    override_unstable = set(target for target in args.include if target.startswith('unstable/'))
1829
1830    if args.allow_unstable_changed:
1831        override_unstable |= set(args.metadata.change_description.focused_targets or [])
1832
1833    if not args.allow_unstable:
1834        skip = 'unstable/'
1835        override = [target.name for target in targets if override_unstable & set(target.aliases)]
1836        skipped = [target.name for target in targets if skip in target.aliases and target.name not in override]
1837        if skipped:
1838            exclude.extend(skipped)
1839            display.warning('Excluding tests marked "%s" which require --allow-unstable or prefixing with "unstable/": %s'
1840                            % (skip.rstrip('/'), ', '.join(skipped)))
1841
1842    # only skip a Windows test if using --windows and all the --windows versions are defined in the aliases as skip/windows/%s
1843    if isinstance(args, WindowsIntegrationConfig) and args.windows:
1844        all_skipped = []
1845        not_skipped = []
1846
1847        for target in targets:
1848            if "skip/windows/" not in target.aliases:
1849                continue
1850
1851            skip_valid = []
1852            skip_missing = []
1853            for version in args.windows:
1854                if "skip/windows/%s/" % version in target.aliases:
1855                    skip_valid.append(version)
1856                else:
1857                    skip_missing.append(version)
1858
1859            if skip_missing and skip_valid:
1860                not_skipped.append((target.name, skip_valid, skip_missing))
1861            elif skip_valid:
1862                all_skipped.append(target.name)
1863
1864        if all_skipped:
1865            exclude.extend(all_skipped)
1866            skip_aliases = ["skip/windows/%s/" % w for w in args.windows]
1867            display.warning('Excluding tests marked "%s" which are set to skip with --windows %s: %s'
1868                            % ('", "'.join(skip_aliases), ', '.join(args.windows), ', '.join(all_skipped)))
1869
1870        if not_skipped:
1871            for target, skip_valid, skip_missing in not_skipped:
1872                # warn when failing to skip due to lack of support for skipping only some versions
1873                display.warning('Including test "%s" which was marked to skip for --windows %s but not %s.'
1874                                % (target, ', '.join(skip_valid), ', '.join(skip_missing)))
1875
1876
1877def get_integration_local_filter(args, targets):
1878    """
1879    :type args: IntegrationConfig
1880    :type targets: tuple[IntegrationTarget]
1881    :rtype: list[str]
1882    """
1883    exclude = []
1884
1885    common_integration_filter(args, targets, exclude)
1886
1887    if not args.allow_root and os.getuid() != 0:
1888        skip = 'needs/root/'
1889        skipped = [target.name for target in targets if skip in target.aliases]
1890        if skipped:
1891            exclude.append(skip)
1892            display.warning('Excluding tests marked "%s" which require --allow-root or running as root: %s'
1893                            % (skip.rstrip('/'), ', '.join(skipped)))
1894
1895    override_destructive = set(target for target in args.include if target.startswith('destructive/'))
1896
1897    if not args.allow_destructive:
1898        skip = 'destructive/'
1899        override = [target.name for target in targets if override_destructive & set(target.aliases)]
1900        skipped = [target.name for target in targets if skip in target.aliases and target.name not in override]
1901        if skipped:
1902            exclude.extend(skipped)
1903            display.warning('Excluding tests marked "%s" which require --allow-destructive or prefixing with "destructive/" to run locally: %s'
1904                            % (skip.rstrip('/'), ', '.join(skipped)))
1905
1906    exclude_targets_by_python_version(targets, args.python_version, exclude)
1907
1908    return exclude
1909
1910
1911def get_integration_docker_filter(args, targets):
1912    """
1913    :type args: IntegrationConfig
1914    :type targets: tuple[IntegrationTarget]
1915    :rtype: list[str]
1916    """
1917    exclude = []
1918
1919    common_integration_filter(args, targets, exclude)
1920
1921    skip = 'skip/docker/'
1922    skipped = [target.name for target in targets if skip in target.aliases]
1923    if skipped:
1924        exclude.append(skip)
1925        display.warning('Excluding tests marked "%s" which cannot run under docker: %s'
1926                        % (skip.rstrip('/'), ', '.join(skipped)))
1927
1928    if not args.docker_privileged:
1929        skip = 'needs/privileged/'
1930        skipped = [target.name for target in targets if skip in target.aliases]
1931        if skipped:
1932            exclude.append(skip)
1933            display.warning('Excluding tests marked "%s" which require --docker-privileged to run under docker: %s'
1934                            % (skip.rstrip('/'), ', '.join(skipped)))
1935
1936    python_version = get_python_version(args, get_docker_completion(), args.docker_raw)
1937
1938    exclude_targets_by_python_version(targets, python_version, exclude)
1939
1940    return exclude
1941
1942
1943def get_integration_remote_filter(args, targets):
1944    """
1945    :type args: IntegrationConfig
1946    :type targets: tuple[IntegrationTarget]
1947    :rtype: list[str]
1948    """
1949    remote = args.parsed_remote
1950
1951    exclude = []
1952
1953    common_integration_filter(args, targets, exclude)
1954
1955    skips = {
1956        'skip/%s' % remote.platform: remote.platform,
1957        'skip/%s/%s' % (remote.platform, remote.version): '%s %s' % (remote.platform, remote.version),
1958        'skip/%s%s' % (remote.platform, remote.version): '%s %s' % (remote.platform, remote.version),  # legacy syntax, use above format
1959    }
1960
1961    if remote.arch:
1962        skips.update({
1963            'skip/%s/%s' % (remote.arch, remote.platform): '%s on %s' % (remote.platform, remote.arch),
1964            'skip/%s/%s/%s' % (remote.arch, remote.platform, remote.version): '%s %s on %s' % (remote.platform, remote.version, remote.arch),
1965        })
1966
1967    for skip, description in skips.items():
1968        skipped = [target.name for target in targets if skip in target.skips]
1969        if skipped:
1970            exclude.append(skip + '/')
1971            display.warning('Excluding tests marked "%s" which are not supported on %s: %s' % (skip, description, ', '.join(skipped)))
1972
1973    python_version = get_python_version(args, get_remote_completion(), args.remote)
1974
1975    exclude_targets_by_python_version(targets, python_version, exclude)
1976
1977    return exclude
1978
1979
1980def exclude_targets_by_python_version(targets, python_version, exclude):
1981    """
1982    :type targets: tuple[IntegrationTarget]
1983    :type python_version: str
1984    :type exclude: list[str]
1985    """
1986    if not python_version:
1987        display.warning('Python version unknown. Unable to skip tests based on Python version.')
1988        return
1989
1990    python_major_version = python_version.split('.')[0]
1991
1992    skip = 'skip/python%s/' % python_version
1993    skipped = [target.name for target in targets if skip in target.aliases]
1994    if skipped:
1995        exclude.append(skip)
1996        display.warning('Excluding tests marked "%s" which are not supported on python %s: %s'
1997                        % (skip.rstrip('/'), python_version, ', '.join(skipped)))
1998
1999    skip = 'skip/python%s/' % python_major_version
2000    skipped = [target.name for target in targets if skip in target.aliases]
2001    if skipped:
2002        exclude.append(skip)
2003        display.warning('Excluding tests marked "%s" which are not supported on python %s: %s'
2004                        % (skip.rstrip('/'), python_version, ', '.join(skipped)))
2005
2006
2007def get_python_version(args, configs, name):
2008    """
2009    :type args: EnvironmentConfig
2010    :type configs: dict[str, dict[str, str]]
2011    :type name: str
2012    """
2013    config = configs.get(name, {})
2014    config_python = config.get('python')
2015
2016    if not config or not config_python:
2017        if args.python:
2018            return args.python
2019
2020        display.warning('No Python version specified. '
2021                        'Use completion config or the --python option to specify one.', unique=True)
2022
2023        return ''  # failure to provide a version may result in failures or reduced functionality later
2024
2025    supported_python_versions = config_python.split(',')
2026    default_python_version = supported_python_versions[0]
2027
2028    if args.python and args.python not in supported_python_versions:
2029        raise ApplicationError('Python %s is not supported by %s. Supported Python version(s) are: %s' % (
2030            args.python, name, ', '.join(sorted(supported_python_versions))))
2031
2032    python_version = args.python or default_python_version
2033
2034    return python_version
2035
2036
2037def get_python_interpreter(args, configs, name):
2038    """
2039    :type args: EnvironmentConfig
2040    :type configs: dict[str, dict[str, str]]
2041    :type name: str
2042    """
2043    if args.python_interpreter:
2044        return args.python_interpreter
2045
2046    config = configs.get(name, {})
2047
2048    if not config:
2049        if args.python:
2050            guess = 'python%s' % args.python
2051        else:
2052            guess = 'python'
2053
2054        display.warning('Using "%s" as the Python interpreter. '
2055                        'Use completion config or the --python-interpreter option to specify the path.' % guess, unique=True)
2056
2057        return guess
2058
2059    python_version = get_python_version(args, configs, name)
2060
2061    python_dir = config.get('python_dir', '/usr/bin')
2062    python_interpreter = os.path.join(python_dir, 'python%s' % python_version)
2063    python_interpreter = config.get('python%s' % python_version, python_interpreter)
2064
2065    return python_interpreter
2066
2067
2068class EnvironmentDescription:
2069    """Description of current running environment."""
2070    def __init__(self, args):
2071        """Initialize snapshot of environment configuration.
2072        :type args: IntegrationConfig
2073        """
2074        self.args = args
2075
2076        if self.args.explain:
2077            self.data = {}
2078            return
2079
2080        warnings = []
2081
2082        versions = ['']
2083        versions += SUPPORTED_PYTHON_VERSIONS
2084        versions += list(set(v.split('.')[0] for v in SUPPORTED_PYTHON_VERSIONS))
2085
2086        version_check = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'versions.py')
2087        python_paths = dict((v, find_executable('python%s' % v, required=False)) for v in sorted(versions))
2088        pip_paths = dict((v, find_executable('pip%s' % v, required=False)) for v in sorted(versions))
2089        program_versions = dict((v, self.get_version([python_paths[v], version_check], warnings)) for v in sorted(python_paths) if python_paths[v])
2090        pip_interpreters = dict((v, self.get_shebang(pip_paths[v])) for v in sorted(pip_paths) if pip_paths[v])
2091        known_hosts_hash = self.get_hash(os.path.expanduser('~/.ssh/known_hosts'))
2092
2093        for version in sorted(versions):
2094            self.check_python_pip_association(version, python_paths, pip_paths, pip_interpreters, warnings)
2095
2096        for warning in warnings:
2097            display.warning(warning, unique=True)
2098
2099        self.data = dict(
2100            python_paths=python_paths,
2101            pip_paths=pip_paths,
2102            program_versions=program_versions,
2103            pip_interpreters=pip_interpreters,
2104            known_hosts_hash=known_hosts_hash,
2105            warnings=warnings,
2106        )
2107
2108    @staticmethod
2109    def check_python_pip_association(version, python_paths, pip_paths, pip_interpreters, warnings):
2110        """
2111        :type version: str
2112        :param python_paths: dict[str, str]
2113        :param pip_paths:  dict[str, str]
2114        :param pip_interpreters:  dict[str, str]
2115        :param warnings: list[str]
2116        """
2117        python_label = 'Python%s' % (' %s' % version if version else '')
2118
2119        pip_path = pip_paths.get(version)
2120        python_path = python_paths.get(version)
2121
2122        if not python_path and not pip_path:
2123            # neither python or pip is present for this version
2124            return
2125
2126        if not python_path:
2127            warnings.append('A %s interpreter was not found, yet a matching pip was found at "%s".' % (python_label, pip_path))
2128            return
2129
2130        if not pip_path:
2131            warnings.append('A %s interpreter was found at "%s", yet a matching pip was not found.' % (python_label, python_path))
2132            return
2133
2134        pip_shebang = pip_interpreters.get(version)
2135
2136        match = re.search(r'#!\s*(?P<command>[^\s]+)', pip_shebang)
2137
2138        if not match:
2139            warnings.append('A %s pip was found at "%s", but it does not have a valid shebang: %s' % (python_label, pip_path, pip_shebang))
2140            return
2141
2142        pip_interpreter = os.path.realpath(match.group('command'))
2143        python_interpreter = os.path.realpath(python_path)
2144
2145        if pip_interpreter == python_interpreter:
2146            return
2147
2148        try:
2149            identical = filecmp.cmp(pip_interpreter, python_interpreter)
2150        except OSError:
2151            identical = False
2152
2153        if identical:
2154            return
2155
2156        warnings.append('A %s pip was found at "%s", but it uses interpreter "%s" instead of "%s".' % (
2157            python_label, pip_path, pip_interpreter, python_interpreter))
2158
2159    def __str__(self):
2160        """
2161        :rtype: str
2162        """
2163        return json.dumps(self.data, sort_keys=True, indent=4)
2164
2165    def validate(self, target_name, throw):
2166        """
2167        :type target_name: str
2168        :type throw: bool
2169        :rtype: bool
2170        """
2171        current = EnvironmentDescription(self.args)
2172
2173        return self.check(self, current, target_name, throw)
2174
2175    @staticmethod
2176    def check(original, current, target_name, throw):
2177        """
2178        :type original: EnvironmentDescription
2179        :type current: EnvironmentDescription
2180        :type target_name: str
2181        :type throw: bool
2182        :rtype: bool
2183        """
2184        original_json = str(original)
2185        current_json = str(current)
2186
2187        if original_json == current_json:
2188            return True
2189
2190        unified_diff = '\n'.join(difflib.unified_diff(
2191            a=original_json.splitlines(),
2192            b=current_json.splitlines(),
2193            fromfile='original.json',
2194            tofile='current.json',
2195            lineterm='',
2196        ))
2197
2198        message = ('Test target "%s" has changed the test environment!\n'
2199                   'If these changes are necessary, they must be reverted before the test finishes.\n'
2200                   '>>> Original Environment\n'
2201                   '%s\n'
2202                   '>>> Current Environment\n'
2203                   '%s\n'
2204                   '>>> Environment Diff\n'
2205                   '%s'
2206                   % (target_name, original_json, current_json, unified_diff))
2207
2208        if throw:
2209            raise ApplicationError(message)
2210
2211        display.error(message)
2212
2213        return False
2214
2215    @staticmethod
2216    def get_version(command, warnings):
2217        """
2218        :type command: list[str]
2219        :type warnings: list[text]
2220        :rtype: list[str]
2221        """
2222        try:
2223            stdout, stderr = raw_command(command, capture=True, cmd_verbosity=2)
2224        except SubprocessError as ex:
2225            warnings.append(u'%s' % ex)
2226            return None  # all failures are equal, we don't care why it failed, only that it did
2227
2228        return [line.strip() for line in ((stdout or '').strip() + (stderr or '').strip()).splitlines()]
2229
2230    @staticmethod
2231    def get_shebang(path):
2232        """
2233        :type path: str
2234        :rtype: str
2235        """
2236        with open_text_file(path) as script_fd:
2237            return script_fd.readline().strip()
2238
2239    @staticmethod
2240    def get_hash(path):
2241        """
2242        :type path: str
2243        :rtype: str | None
2244        """
2245        if not os.path.exists(path):
2246            return None
2247
2248        file_hash = hashlib.sha256()
2249
2250        file_hash.update(read_binary_file(path))
2251
2252        return file_hash.hexdigest()
2253
2254
2255class NoChangesDetected(ApplicationWarning):
2256    """Exception when change detection was performed, but no changes were found."""
2257    def __init__(self):
2258        super(NoChangesDetected, self).__init__('No changes detected.')
2259
2260
2261class NoTestsForChanges(ApplicationWarning):
2262    """Exception when changes detected, but no tests trigger as a result."""
2263    def __init__(self):
2264        super(NoTestsForChanges, self).__init__('No tests found for detected changes.')
2265
2266
2267class Delegate(Exception):
2268    """Trigger command delegation."""
2269    def __init__(self, exclude=None, require=None, integration_targets=None):
2270        """
2271        :type exclude: list[str] | None
2272        :type require: list[str] | None
2273        :type integration_targets: tuple[IntegrationTarget] | None
2274        """
2275        super(Delegate, self).__init__()
2276
2277        self.exclude = exclude or []
2278        self.require = require or []
2279        self.integration_targets = integration_targets or tuple()
2280
2281
2282class AllTargetsSkipped(ApplicationWarning):
2283    """All targets skipped."""
2284    def __init__(self):
2285        super(AllTargetsSkipped, self).__init__('All targets skipped.')
2286