xref: /openbsd/gnu/llvm/llvm/utils/lit/lit/cl_arguments.py (revision d415bd75)
1import argparse
2import enum
3import os
4import shlex
5import sys
6
7import lit.reports
8import lit.util
9
10
11@enum.unique
12class TestOrder(enum.Enum):
13    LEXICAL = 'lexical'
14    RANDOM = 'random'
15    SMART = 'smart'
16
17
18def parse_args():
19    parser = argparse.ArgumentParser(prog='lit', fromfile_prefix_chars='@')
20    parser.add_argument('test_paths',
21            nargs='+',
22            metavar="TEST_PATH",
23            help='File or path to include in the test suite')
24
25    parser.add_argument('--version',
26            action='version',
27            version='%(prog)s ' + lit.__version__)
28
29    parser.add_argument("-j", "--threads", "--workers",
30            dest="workers",
31            metavar="N",
32            help="Number of workers used for testing",
33            type=_positive_int,
34            default=lit.util.usable_core_count())
35    parser.add_argument("--config-prefix",
36            dest="configPrefix",
37            metavar="NAME",
38            help="Prefix for 'lit' config files")
39    parser.add_argument("-D", "--param",
40            dest="user_params",
41            metavar="NAME=VAL",
42            help="Add 'NAME' = 'VAL' to the user defined parameters",
43            action="append",
44            default=[])
45
46    format_group = parser.add_argument_group("Output Format")
47    # FIXME: I find these names very confusing, although I like the
48    # functionality.
49    format_group.add_argument("-q", "--quiet",
50            help="Suppress no error output",
51            action="store_true")
52    format_group.add_argument("-s", "--succinct",
53            help="Reduce amount of output."
54                 " Additionally, show a progress bar,"
55                 " unless --no-progress-bar is specified.",
56            action="store_true")
57    format_group.add_argument("-v", "--verbose",
58            dest="showOutput",
59            help="Show test output for failures",
60            action="store_true")
61    format_group.add_argument("-vv", "--echo-all-commands",
62            dest="echoAllCommands",
63            action="store_true",
64            help="Echo all commands as they are executed to stdout. In case of "
65                 "failure, last command shown will be the failing one.")
66    format_group.add_argument("-a", "--show-all",
67            dest="showAllOutput",
68            help="Display all commandlines and output",
69            action="store_true")
70    format_group.add_argument("-o", "--output",
71            type=lit.reports.JsonReport,
72            help="Write test results to the provided path",
73            metavar="PATH")
74    format_group.add_argument("--no-progress-bar",
75            dest="useProgressBar",
76            help="Do not use curses based progress bar",
77            action="store_false")
78
79    # Note: this does not generate flags for user-defined result codes.
80    success_codes = [c for c in lit.Test.ResultCode.all_codes()
81                     if not c.isFailure]
82    for code in success_codes:
83        format_group.add_argument(
84            "--show-{}".format(code.name.lower()),
85            dest="shown_codes",
86            help="Show {} tests ({})".format(code.label.lower(), code.name),
87            action="append_const",
88            const=code,
89            default=[])
90
91    execution_group = parser.add_argument_group("Test Execution")
92    execution_group.add_argument("--path",
93            help="Additional paths to add to testing environment",
94            action="append",
95            default=[],
96            type=os.path.abspath)
97    execution_group.add_argument("--vg",
98            dest="useValgrind",
99            help="Run tests under valgrind",
100            action="store_true")
101    execution_group.add_argument("--vg-leak",
102            dest="valgrindLeakCheck",
103            help="Check for memory leaks under valgrind",
104            action="store_true")
105    execution_group.add_argument("--vg-arg",
106            dest="valgrindArgs",
107            metavar="ARG",
108            help="Specify an extra argument for valgrind",
109            action="append",
110            default=[])
111    execution_group.add_argument("--time-tests",
112            help="Track elapsed wall time for each test",
113            action="store_true")
114    execution_group.add_argument("--no-execute",
115            dest="noExecute",
116            help="Don't execute any tests (assume PASS)",
117            action="store_true")
118    execution_group.add_argument("--xunit-xml-output",
119            type=lit.reports.XunitReport,
120            help="Write XUnit-compatible XML test reports to the specified file")
121    execution_group.add_argument("--resultdb-output",
122            type=lit.reports.ResultDBReport,
123            help="Write LuCI ResuldDB compatible JSON to the specified file")
124    execution_group.add_argument("--time-trace-output",
125            type=lit.reports.TimeTraceReport,
126            help="Write Chrome tracing compatible JSON to the specified file")
127    execution_group.add_argument("--timeout",
128            dest="maxIndividualTestTime",
129            help="Maximum time to spend running a single test (in seconds). "
130                 "0 means no time limit. [Default: 0]",
131            type=_non_negative_int)
132    execution_group.add_argument("--max-failures",
133            help="Stop execution after the given number of failures.",
134            type=_positive_int)
135    execution_group.add_argument("--allow-empty-runs",
136            help="Do not fail the run if all tests are filtered out",
137            action="store_true")
138    execution_group.add_argument("--ignore-fail",
139            dest="ignoreFail",
140            action="store_true",
141            help="Exit with status zero even if some tests fail")
142    execution_group.add_argument("--no-indirectly-run-check",
143            dest="indirectlyRunCheck",
144            help="Do not error if a test would not be run if the user had "
145                 "specified the containing directory instead of naming the "
146                 "test directly.",
147            action="store_false")
148
149    selection_group = parser.add_argument_group("Test Selection")
150    selection_group.add_argument("--max-tests",
151            metavar="N",
152            help="Maximum number of tests to run",
153            type=_positive_int)
154    selection_group.add_argument("--max-time",
155            dest="timeout",
156            metavar="N",
157            help="Maximum time to spend testing (in seconds)",
158            type=_positive_int)
159    selection_group.add_argument("--order",
160            choices=[x.value for x in TestOrder],
161            default=TestOrder.SMART,
162            help="Test order to use (default: smart)")
163    selection_group.add_argument("--shuffle",
164            dest="order",
165            help="Run tests in random order (DEPRECATED: use --order=random)",
166            action="store_const",
167            const=TestOrder.RANDOM)
168    selection_group.add_argument("-i", "--incremental",
169            help="Run failed tests first (DEPRECATED: use --order=smart)",
170            action="store_true")
171    selection_group.add_argument("--filter",
172            metavar="REGEX",
173            type=_case_insensitive_regex,
174            help="Only run tests with paths matching the given regular expression",
175            default=os.environ.get("LIT_FILTER", ".*"))
176    selection_group.add_argument("--filter-out",
177            metavar="REGEX",
178            type=_case_insensitive_regex,
179            help="Filter out tests with paths matching the given regular expression",
180            default=os.environ.get("LIT_FILTER_OUT", "^$"))
181    selection_group.add_argument("--xfail",
182            metavar="LIST",
183            type=_semicolon_list,
184            help="XFAIL tests with paths in the semicolon separated list",
185            default=os.environ.get("LIT_XFAIL", ""))
186    selection_group.add_argument("--xfail-not",
187            metavar="LIST",
188            type=_semicolon_list,
189            help="do not XFAIL tests with paths in the semicolon separated list",
190            default=os.environ.get("LIT_XFAIL_NOT", ""))
191    selection_group.add_argument("--num-shards",
192            dest="numShards",
193            metavar="M",
194            help="Split testsuite into M pieces and only run one",
195            type=_positive_int,
196            default=os.environ.get("LIT_NUM_SHARDS"))
197    selection_group.add_argument("--run-shard",
198            dest="runShard",
199            metavar="N",
200            help="Run shard #N of the testsuite",
201            type=_positive_int,
202            default=os.environ.get("LIT_RUN_SHARD"))
203
204    debug_group = parser.add_argument_group("Debug and Experimental Options")
205    debug_group.add_argument("--debug",
206            help="Enable debugging (for 'lit' development)",
207            action="store_true")
208    debug_group.add_argument("--show-suites",
209            help="Show discovered test suites and exit",
210            action="store_true")
211    debug_group.add_argument("--show-tests",
212            help="Show all discovered tests and exit",
213            action="store_true")
214    debug_group.add_argument("--show-used-features",
215            help="Show all features used in the test suite (in XFAIL, UNSUPPORTED and REQUIRES) and exit",
216            action="store_true")
217
218    # LIT is special: environment variables override command line arguments.
219    env_args = shlex.split(os.environ.get("LIT_OPTS", ""))
220    args = sys.argv[1:] + env_args
221    opts = parser.parse_args(args)
222
223    # Validate command line options
224    if opts.echoAllCommands:
225        opts.showOutput = True
226
227    if opts.incremental:
228        print('WARNING: --incremental is deprecated. Failing tests now always run first.')
229
230    if opts.numShards or opts.runShard:
231        if not opts.numShards or not opts.runShard:
232            parser.error("--num-shards and --run-shard must be used together")
233        if opts.runShard > opts.numShards:
234            parser.error("--run-shard must be between 1 and --num-shards (inclusive)")
235        opts.shard = (opts.runShard, opts.numShards)
236    else:
237        opts.shard = None
238
239    opts.reports = filter(None, [opts.output, opts.xunit_xml_output, opts.resultdb_output, opts.time_trace_output])
240
241    return opts
242
243
244def _positive_int(arg):
245    return _int(arg, 'positive', lambda i: i > 0)
246
247
248def _non_negative_int(arg):
249    return _int(arg, 'non-negative', lambda i: i >= 0)
250
251
252def _int(arg, kind, pred):
253    desc = "requires {} integer, but found '{}'"
254    try:
255        i = int(arg)
256    except ValueError:
257        raise _error(desc, kind, arg)
258    if not pred(i):
259        raise _error(desc, kind, arg)
260    return i
261
262
263def _case_insensitive_regex(arg):
264    import re
265    try:
266        return re.compile(arg, re.IGNORECASE)
267    except re.error as reason:
268        raise _error("invalid regular expression: '{}', {}", arg, reason)
269
270
271def _semicolon_list(arg):
272    return arg.split(';')
273
274
275def _error(desc, *args):
276    msg = desc.format(*args)
277    return argparse.ArgumentTypeError(msg)
278