1# coding=utf-8
2"""
3Behave BDD runner.
4See _bdd_utils#get_path_by_env for information how to pass list of features here.
5Each feature could be file, folder with feature files or folder with "features" subfolder
6
7Other args are tag expressionsin format (--tags=.. --tags=..).
8See https://pythonhosted.org/behave/behave.html#tag-expression
9"""
10
11import functools
12import glob
13import re
14import sys
15import traceback
16from behave import __version__ as behave_version
17from behave.formatter.base import Formatter
18from behave.model import Step, ScenarioOutline, Feature, Scenario
19from behave.tag_expression import TagExpression
20from distutils import version
21from _jb_django_behave import run_as_django_behave
22import _bdd_utils
23import tcmessages
24from _jb_utils import VersionAgnosticUtils
25
26_MAX_STEPS_SEARCH_FEATURES = 5000  # Do not look for features in folder that has more that this number of children
27_FEATURES_FOLDER = 'features'  # "features" folder name.
28
29__author__ = 'Ilya.Kazakevich'
30
31from behave import configuration, runner
32
33import os
34
35
36def _get_dirs_to_run(base_dir_to_search):
37    """
38    Searches for "features" dirs in some base_dir
39    :return: list of feature dirs to run
40    :rtype: list
41    :param base_dir_to_search root directory to search (should not have too many children!)
42    :type base_dir_to_search str
43
44    """
45    result = set()
46    for (step, (folder, sub_folders, files)) in enumerate(os.walk(base_dir_to_search)):
47        if os.path.basename(folder) == _FEATURES_FOLDER and os.path.isdir(folder):
48            result.add(os.path.abspath(folder))
49        if step == _MAX_STEPS_SEARCH_FEATURES:  # Guard
50            err = "Folder {0} is too deep to find any features folder. Please provider concrete folder".format(
51                base_dir_to_search)
52            raise Exception(err)
53    return list(result)
54
55
56def _merge_hooks_wrapper(*hooks):
57    """
58    Creates wrapper that runs provided behave hooks sequentally
59    :param hooks: hooks to run
60    :return: wrapper
61    """
62    # TODO: Wheel reinvented!!!!
63    def wrapper(*args, **kwargs):
64        for hook in hooks:
65            hook(*args, **kwargs)
66
67    return wrapper
68
69
70class _RunnerWrapper(runner.Runner):
71    """
72    Wrapper around behave native wrapper. Has nothing todo with BddRunner!
73    We need it to support dry runs (to fetch data from scenarios) and hooks api
74    """
75
76    def __init__(self, config, hooks):
77        """
78        :type config configuration.Configuration
79        :param config behave configuration
80        :type hooks dict or empty if new runner mode
81        :param hooks hooks in format "before_scenario" => f(context, scenario) to load after/before hooks, provided by user
82        """
83        super(_RunnerWrapper, self).__init__(config)
84        self.dry_run = False
85        """
86        Does not run tests (only fetches "self.features") if true. Runs tests otherwise.
87        """
88        self.__hooks = hooks
89
90    def load_hooks(self, filename='environment.py'):
91        """
92        Overrides parent "load_hooks" to add "self.__hooks"
93        :param filename: env. file name
94        """
95        super(_RunnerWrapper, self).load_hooks(filename)
96        for (hook_name, hook) in self.__hooks.items():
97            hook_to_add = hook
98            if hook_name in self.hooks:
99                user_hook = self.hooks[hook_name]
100                if hook_name.startswith("before"):
101                    user_and_custom_hook = [user_hook, hook]
102                else:
103                    user_and_custom_hook = [hook, user_hook]
104                hook_to_add = _merge_hooks_wrapper(*user_and_custom_hook)
105            self.hooks[hook_name] = hook_to_add
106
107    def run_model(self, features=None):
108        """
109        Overrides parent method to stop (do nothing) in case of "dry_run"
110        :param features: features to run
111        :return:
112        """
113        if self.dry_run:  # To stop further execution
114            return
115        return super(_RunnerWrapper, self).run_model(features)
116
117    def clean(self):
118        """
119        Cleans runner after dry run (clears hooks, features etc). To be called before real run!
120        """
121        self.dry_run = False
122        self.hooks.clear()
123        self.features = []
124
125
126class _BehaveRunner(_bdd_utils.BddRunner):
127    """
128    BddRunner for behave
129    """
130
131    def __process_hook(self, is_started, context, element):
132        """
133        Hook to be installed. Reports steps, features etc.
134        :param is_started true if test/feature/scenario is started
135        :type is_started bool
136        :param context behave context
137        :type context behave.runner.Context
138        :param element feature/suite/step
139        """
140        element.location.file = element.location.filename  # To preserve _bdd_utils contract
141        utils = VersionAgnosticUtils()
142        if isinstance(element, Step):
143            # Process step
144            step_name = u"{0} {1}".format(utils.to_unicode(element.keyword), utils.to_unicode(element.name))
145            duration_ms = element.duration * 1000
146            if is_started:
147                self._test_started(step_name, element.location)
148            elif element.status == 'passed':
149                self._test_passed(step_name, duration_ms)
150            elif element.status == 'failed':
151                # Correct way is to use element.errormessage
152                # but assertions do not have trace there (due to Behave internals)
153                # do, we collect it manually
154                error_message = element.error_message
155                fetch_log = not error_message  # If no error_message provided, need to fetch log manually
156                trace = ""
157                if isinstance(element.exception, AssertionError) or not error_message:
158                    trace = self._collect_trace(element, utils)
159
160                # May be empty https://github.com/behave/behave/issues/468 for some exceptions
161                if not trace and not error_message:
162                    try:
163                        error_message = traceback.format_exc()
164                    except AttributeError:
165                        # Exception may have empty stracktrace, and traceback.format_exc() throws
166                        # AttributeError in this case
167                        trace = self._collect_trace(element, utils)
168                if not error_message:
169                    # Format exception as last resort
170                    error_message = element.exception
171                message_as_string = utils.to_unicode(error_message)
172                if fetch_log and self.__real_runner.config.log_capture:
173                    try:
174                        capture = self.__real_runner.log_capture  # 1.2.5
175                    except AttributeError:
176                        capture = self.__real_runner.capture_controller.log_capture  # 1.2.6
177
178                    message_as_string += u"\n" + utils.to_unicode(capture.getvalue())
179                self._test_failed(step_name, message_as_string, trace, duration=duration_ms)
180            elif element.status == 'undefined':
181                self._test_undefined(step_name, element.location)
182            else:
183                self._test_skipped(step_name, element.status, element.location)
184        elif not is_started and isinstance(element, Scenario) and element.status == 'failed':
185            # To process scenarios with undefined/skipped tests
186            for step in element.steps:
187                assert isinstance(step, Step), step
188                if step.status not in ['passed', 'failed']:  # Something strange, probably skipped or undefined
189                    self.__process_hook(False, context, step)
190            self._feature_or_scenario(is_started, element.name, element.location)
191        elif isinstance(element, ScenarioOutline):
192            self._feature_or_scenario(is_started, str(element.examples), element.location)
193        else:
194            self._feature_or_scenario(is_started, element.name, element.location)
195
196    def _collect_trace(self, element, utils):
197        return u"".join([utils.to_unicode(l) for l in traceback.format_tb(element.exc_traceback)])
198
199    def __init__(self, config, base_dir, use_old_runner):
200        """
201        :type config configuration.Configuration
202        """
203        super(_BehaveRunner, self).__init__(base_dir)
204        self.__config = config
205        # Install hooks
206        self.__real_runner = _RunnerWrapper(config, {
207            "before_feature": functools.partial(self.__process_hook, True),
208            "after_feature": functools.partial(self.__process_hook, False),
209            "before_scenario": functools.partial(self.__process_hook, True),
210            "after_scenario": functools.partial(self.__process_hook, False),
211            "before_step": functools.partial(self.__process_hook, True),
212            "after_step": functools.partial(self.__process_hook, False)
213        } if use_old_runner else dict())
214
215    def _run_tests(self):
216        self.__real_runner.run()
217
218    def __filter_scenarios_by_args(self, scenario):
219        """
220        Filters out scenarios that should be skipped by tags or scenario names
221        :param scenario scenario to check
222        :return true if should pass
223        """
224        assert isinstance(scenario, Scenario), scenario
225        # TODO: share with lettuce_runner.py#_get_features_to_run
226        expected_tags = self.__config.tags
227        scenario_name_re = self.__config.name_re
228        if scenario_name_re and not scenario_name_re.match(scenario.name):
229            return False
230        if not expected_tags:
231            return True  # No tags nor names are required
232        return isinstance(expected_tags, TagExpression) and expected_tags.check(scenario.tags)
233
234    def _get_features_to_run(self):
235        self.__real_runner.dry_run = True
236        self.__real_runner.run()
237        features_to_run = self.__real_runner.features
238        self.__real_runner.clean()  # To make sure nothing left after dry run
239
240        # Change outline scenario skeletons with real scenarios
241        for feature in features_to_run:
242            assert isinstance(feature, Feature), feature
243            scenarios = []
244            for scenario in feature.scenarios:
245                try:
246                    scenario.tags.extend(feature.tags)
247                except AttributeError:
248                    pass
249                if isinstance(scenario, ScenarioOutline):
250                    scenarios.extend(scenario.scenarios)
251                else:
252                    scenarios.append(scenario)
253            feature.scenarios = filter(self.__filter_scenarios_by_args, scenarios)
254
255        return features_to_run
256
257
258if __name__ == "__main__":
259    # TODO: support all other params instead
260    command_args = list(filter(None, sys.argv[1:]))
261    if command_args:
262        if "--junit" in command_args:
263            raise Exception("--junit report type for Behave is unsupported in PyCharm. \n "
264                            "See: https://youtrack.jetbrains.com/issue/PY-14219")
265        _bdd_utils.fix_win_drive(command_args[0])
266    (base_dir, scenario_names, what_to_run) = _bdd_utils.get_what_to_run_by_env(os.environ)
267
268    for scenario_name in scenario_names:
269        command_args += ["-n", re.escape(scenario_name)]  # TODO : rewite pythonic
270
271    my_config = configuration.Configuration(command_args=command_args)
272
273    loose_version = version.LooseVersion(behave_version)
274    assert loose_version >= version.LooseVersion("1.2.5"), "Version not supported, please upgrade Behave"
275
276    # New version supports 1.2.6 only
277    use_old_runner = "PYCHARM_BEHAVE_OLD_RUNNER" in os.environ or loose_version < version.LooseVersion("1.2.6")
278    from behave.formatter import _registry
279
280    FORMAT_NAME = "com.jetbrains.pycharm.formatter"
281    if use_old_runner:
282        class _Null(Formatter):
283            """
284            Null formater to prevent stdout output
285            """
286            pass
287
288
289        _registry.register_as(FORMAT_NAME, _Null)
290    else:
291        custom_messages = tcmessages.TeamcityServiceMessages()
292        # Not safe to import it in old mode
293        from teamcity.jb_behave_formatter import TeamcityFormatter
294
295
296        class TeamcityFormatterWithLocation(TeamcityFormatter):
297
298            def _report_suite_started(self, suite, suite_name):
299                location = suite.location
300                custom_messages.testSuiteStarted(suite_name,
301                                                 _bdd_utils.get_location(base_dir, location.filename, location.line))
302
303            def _report_test_started(self, test, test_name):
304                location = test.location
305                custom_messages.testStarted(test_name,
306                                            _bdd_utils.get_location(base_dir, location.filename, location.line))
307
308
309        _registry.register_as(FORMAT_NAME, TeamcityFormatterWithLocation)
310
311    my_config.format = [FORMAT_NAME]  # To prevent output to stdout
312    my_config.reporters = []  # To prevent summary to stdout
313    my_config.stdout_capture = False  # For test output
314    my_config.stderr_capture = False  # For test output
315    features = set()
316    for feature in what_to_run:
317        if os.path.isfile(feature) or glob.glob(
318                os.path.join(feature, "*.feature")):  # File of folder with "features"  provided, load it
319            features.add(feature)
320        elif os.path.isdir(feature):
321            features |= set(_get_dirs_to_run(feature))  # Find "features" subfolder
322    my_config.paths = list(features)
323    if what_to_run and not my_config.paths:
324        raise Exception("Nothing to run in {0}".format(what_to_run))
325
326    # Run as Django if supported, run plain otherwise
327    if not run_as_django_behave(FORMAT_NAME, what_to_run, command_args):
328        _BehaveRunner(my_config, base_dir, use_old_runner).run()
329