1#!/usr/bin/env python
2#
3#
4# Licensed to the Apache Software Foundation (ASF) under one
5# or more contributor license agreements.  See the NOTICE file
6# distributed with this work for additional information
7# regarding copyright ownership.  The ASF licenses this file
8# to you under the Apache License, Version 2.0 (the
9# "License"); you may not use this file except in compliance
10# with the License.  You may obtain a copy of the License at
11#
12#   http://www.apache.org/licenses/LICENSE-2.0
13#
14# Unless required by applicable law or agreed to in writing,
15# software distributed under the License is distributed on an
16# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
17# KIND, either express or implied.  See the License for the
18# specific language governing permissions and limitations
19# under the License.
20#
21#
22#
23# run_tests.py - run the tests in the regression test suite.
24#
25
26'''usage: python run_tests.py
27            [--verbose] [--log-to-stdout] [--cleanup] [--bin=<path>]
28            [--parallel | --parallel=<n>] [--global-scheduler]
29            [--url=<base-url>] [--http-library=<http-library>] [--enable-sasl]
30            [--fs-type=<fs-type>] [--fsfs-packing] [--fsfs-sharding=<n>]
31            [--list] [--milestone-filter=<regex>] [--mode-filter=<type>]
32            [--server-minor-version=<version>] [--http-proxy=<host>:<port>]
33            [--httpd-version=<version>] [--httpd-whitelist=<version>]
34            [--config-file=<file>] [--ssl-cert=<file>]
35            [--exclusive-wc-locks] [--memcached-server=<url:port>]
36            [--fsfs-compression=<type>] [--fsfs-dir-deltification=<true|false>]
37            [--allow-remote-http-connection]
38            <abs_srcdir> <abs_builddir>
39            <prog ...>
40
41The optional flags and the first two parameters are passed unchanged
42to the TestHarness constructor.  All other parameters are names of
43test programs.
44
45Each <prog> should be the full path (absolute or from the current directory)
46and filename of a test program, optionally followed by '#' and a comma-
47separated list of test numbers; the default is to run all the tests in it.
48'''
49
50import os, sys, shutil
51import re
52import logging
53import optparse, subprocess, threading, traceback
54from datetime import datetime
55
56try:
57  # Python >=3.0
58  import queue
59except ImportError:
60  # Python <3.0
61  import Queue as queue
62
63if sys.version_info < (3, 0):
64  # Python >= 3.0 already has this build in
65  import exceptions
66
67if sys.version_info < (3, 5):
68  import imp
69else:
70  # The imp module is deprecated since Python 3.4; the replacement we use,
71  # module_from_spec(), is available since Python 3.5.
72  import importlib.util
73
74# Ensure the compiled C tests use a known locale (Python tests set the locale
75# explicitly).
76os.environ['LC_ALL'] = 'C'
77
78# Placeholder for the svntest module
79svntest = None
80
81class TextColors:
82  '''Some ANSI terminal constants for output color'''
83  ENDC = '\033[0;m'
84  FAILURE = '\033[1;31m'
85  SUCCESS = '\033[1;32m'
86
87  @classmethod
88  def disable(cls):
89    cls.ENDC = ''
90    cls.FAILURE = ''
91    cls.SUCCESS = ''
92
93
94def _get_term_width():
95  'Attempt to discern the width of the terminal'
96  # This may not work on all platforms, in which case the default of 80
97  # characters is used.  Improvements welcomed.
98
99  def ioctl_GWINSZ(fd):
100    try:
101      import fcntl, termios, struct, os
102      cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,
103                                           struct.pack('hh', 0, 0)))
104    except:
105      return None
106    return cr
107
108  cr = None
109  if not cr:
110    try:
111      cr = (os.environ['SVN_MAKE_CHECK_LINES'],
112            os.environ['SVN_MAKE_CHECK_COLUMNS'])
113    except:
114      cr = None
115  if not cr:
116    cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
117  if not cr:
118    try:
119      fd = os.open(os.ctermid(), os.O_RDONLY)
120      cr = ioctl_GWINSZ(fd)
121      os.close(fd)
122    except:
123      pass
124  if not cr:
125    try:
126      cr = (os.environ['LINES'], os.environ['COLUMNS'])
127    except:
128      cr = None
129  if not cr:
130    # Default
131    if sys.platform == 'win32':
132      cr = (25, 79)
133    else:
134      cr = (25, 80)
135  return int(cr[1])
136
137def ensure_str(s):
138  '''If S is not a string already, convert it to a string'''
139  if isinstance(s, str):
140    return s
141  else:
142    return s.decode("latin-1")
143
144def open_logfile(filename, mode, encoding='utf-8'):
145  if sys.version_info[0] != 2:
146    return open(filename, mode, encoding=encoding, errors='surrogateescape')
147  else:
148    class Wrapper(object):
149      def __init__(self, stream, encoding):
150        self._stream = stream
151        self.encoding = encoding
152      def __getattr__(self, name):
153        return getattr(self._stream, name)
154    return Wrapper(open(filename, mode), encoding)
155
156class TestHarness:
157  '''Test harness for Subversion tests.
158  '''
159
160  def __init__(self, abs_srcdir, abs_builddir, logfile, faillogfile, opts):
161    '''Construct a TestHarness instance.
162
163    ABS_SRCDIR and ABS_BUILDDIR are the source and build directories.
164    LOGFILE is the name of the log file. If LOGFILE is None, let tests
165    print their output to stdout and stderr, and don't print a summary
166    at the end (since there's no log file to analyze).
167    OPTS are the options that will be sent to the tests.
168    '''
169
170    # Canonicalize the test base URL
171    if opts.url is not None and opts.url[-1] == '/':
172      opts.url = opts.url[:-1]
173
174    # Make the configfile path absolute
175    if opts.config_file is not None:
176      opts.config_file = os.path.abspath(opts.config_file)
177
178    # Parse out the FSFS version number
179    if (opts.fs_type is not None
180         and opts.fs_type.startswith('fsfs-v')):
181      opts.fsfs_version = int(opts.fs_type[6:])
182      opts.fs_type = 'fsfs'
183    else:
184      opts.fsfs_version = None
185
186    self.srcdir = abs_srcdir
187    self.builddir = abs_builddir
188    self.logfile = logfile
189    self.faillogfile = faillogfile
190    self.log = None
191    self.opts = opts
192
193    if not sys.stdout.isatty() or sys.platform == 'win32':
194      TextColors.disable()
195
196  def _init_c_tests(self):
197    cmdline = [None, None]   # Program name and source dir
198
199    if self.opts.config_file is not None:
200      cmdline.append('--config-file=' + self.opts.config_file)
201    elif self.opts.memcached_server is not None:
202      cmdline.append('--memcached-server=' + self.opts.memcached_server)
203
204    if self.opts.url is not None:
205      subdir = 'subversion/tests/cmdline/svn-test-work'
206      cmdline.append('--repos-url=%s' % self.opts.url +
207                        '/svn-test-work/repositories')
208      cmdline.append('--repos-dir=%s'
209                     % os.path.abspath(
210                         os.path.join(self.builddir,
211                                      subdir, 'repositories')))
212
213      # Enable access for http
214      if self.opts.url.startswith('http'):
215        authzparent = os.path.join(self.builddir, subdir)
216        if not os.path.exists(authzparent):
217          os.makedirs(authzparent);
218        open(os.path.join(authzparent, 'authz'), 'w').write('[/]\n'
219                                                            '* = rw\n')
220
221    # ### Support --repos-template
222    if self.opts.list_tests is not None:
223      cmdline.append('--list')
224    if (self.opts.set_log_level is not None
225        and self.opts.set_log_level <= logging.DEBUG):
226      cmdline.append('--verbose')
227    if self.opts.cleanup is not None:
228      cmdline.append('--cleanup')
229    if self.opts.fs_type is not None:
230      cmdline.append('--fs-type=%s' % self.opts.fs_type)
231    if self.opts.fsfs_version is not None:
232      cmdline.append('--fsfs-version=%d' % self.opts.fsfs_version)
233    if self.opts.server_minor_version is not None:
234      cmdline.append('--server-minor-version=%d' %
235                     self.opts.server_minor_version)
236    if self.opts.mode_filter is not None:
237      cmdline.append('--mode-filter=' + self.opts.mode_filter)
238    if self.opts.parallel is not None:
239      cmdline.append('--parallel')
240
241    self.c_test_cmdline = cmdline
242
243
244  def _init_py_tests(self, basedir):
245    cmdline = ['--srcdir=%s' % self.srcdir]
246    if self.opts.list_tests is not None:
247      cmdline.append('--list')
248    if self.opts.cleanup is not None:
249      cmdline.append('--cleanup')
250    if self.opts.parallel is not None:
251      if self.opts.parallel == 1:
252        cmdline.append('--parallel')
253      else:
254        cmdline.append('--parallel-instances=%d' % self.opts.parallel)
255    if self.opts.svn_bin is not None:
256      cmdline.append('--bin=%s' % self.opts.svn_bin)
257    if self.opts.url is not None:
258      cmdline.append('--url=%s' % self.opts.url)
259    if self.opts.fs_type is not None:
260      cmdline.append('--fs-type=%s' % self.opts.fs_type)
261    if self.opts.http_library is not None:
262      cmdline.append('--http-library=%s' % self.opts.http_library)
263    if self.opts.fsfs_sharding is not None:
264      cmdline.append('--fsfs-sharding=%d' % self.opts.fsfs_sharding)
265    if self.opts.fsfs_packing is not None:
266      cmdline.append('--fsfs-packing')
267    if self.opts.fsfs_version is not None:
268      cmdline.append('--fsfs-version=%d' % self.opts.fsfs_version)
269    if self.opts.server_minor_version is not None:
270      cmdline.append('--server-minor-version=%d' % self.opts.server_minor_version)
271    if self.opts.dump_load_cross_check is not None:
272      cmdline.append('--dump-load-cross-check')
273    if self.opts.enable_sasl is not None:
274      cmdline.append('--enable-sasl')
275    if self.opts.config_file is not None:
276      cmdline.append('--config-file=%s' % self.opts.config_file)
277    if self.opts.milestone_filter is not None:
278      cmdline.append('--milestone-filter=%s' % self.opts.milestone_filter)
279    if self.opts.mode_filter is not None:
280      cmdline.append('--mode-filter=%s' % self.opts.mode_filter)
281    if self.opts.set_log_level is not None:
282      cmdline.append('--set-log-level=%s' % self.opts.set_log_level)
283    if self.opts.ssl_cert is not None:
284      cmdline.append('--ssl-cert=%s' % self.opts.ssl_cert)
285    if self.opts.http_proxy is not None:
286      cmdline.append('--http-proxy=%s' % self.opts.http_proxy)
287    if self.opts.http_proxy_username is not None:
288      cmdline.append('--http-proxy-username=%s' % self.opts.http_proxy_username)
289    if self.opts.http_proxy_password is not None:
290      cmdline.append('--http-proxy-password=%s' % self.opts.http_proxy_password)
291    if self.opts.httpd_version is not None:
292      cmdline.append('--httpd-version=%s' % self.opts.httpd_version)
293    if self.opts.httpd_whitelist is not None:
294      cmdline.append('--httpd-whitelist=%s' % self.opts.httpd_whitelist)
295    if self.opts.exclusive_wc_locks is not None:
296      cmdline.append('--exclusive-wc-locks')
297    if self.opts.memcached_server is not None:
298      cmdline.append('--memcached-server=%s' % self.opts.memcached_server)
299    if self.opts.fsfs_compression is not None:
300      cmdline.append('--fsfs-compression=%s' % self.opts.fsfs_compression)
301    if self.opts.fsfs_dir_deltification is not None:
302      cmdline.append('--fsfs-dir-deltification=%s' % self.opts.fsfs_dir_deltification)
303    if self.opts.allow_remote_http_connection is not None:
304      cmdline.append('--allow-remote-http-connection')
305
306    self.py_test_cmdline = cmdline
307
308    # The svntest module is very pedantic about the current working directory
309    old_cwd = os.getcwd()
310    try:
311      os.chdir(basedir)
312      sys.path.insert(0, os.path.abspath(os.path.join(self.srcdir, basedir)))
313
314      global svntest
315      __import__('svntest')
316      __import__('svntest.main')
317      __import__('svntest.testcase')
318      svntest = sys.modules['svntest']
319      svntest.main = sys.modules['svntest.main']
320      svntest.testcase = sys.modules['svntest.testcase']
321
322      svntest.main.parse_options(cmdline, optparse.SUPPRESS_USAGE)
323      svntest.testcase.TextColors.disable()
324    finally:
325      os.chdir(old_cwd)
326
327  class Job:
328    '''A single test or test suite to execute. After execution, the results
329    can be taken from the respective data fields.'''
330
331    def __init__(self, number, is_python, progabs, progdir, progbase):
332      '''number is the test count for C tests and the test nr for Python.'''
333      self.number = number
334      self.is_python = is_python
335      self.progabs = progabs
336      self.progdir = progdir
337      self.progbase = progbase
338      self.result = None
339      self.stdout_lines = []
340      self.stderr_lines = []
341      self.taken = 0
342
343    def test_count(self):
344      if self.is_python:
345        return 1
346      else:
347        return self.number
348
349    def _command_line(self, harness):
350      if self.is_python:
351        cmdline = list(harness.py_test_cmdline)
352        cmdline.insert(0, sys.executable)
353        cmdline.insert(1, self.progabs)
354        # Run the test apps in "child process" mode,
355        # i.e. w/o cleaning up global directories etc.
356        cmdline.append('-c')
357        cmdline.append(str(self.number))
358      else:
359        cmdline = list(harness.c_test_cmdline)
360        cmdline[0] = self.progabs
361        cmdline[1] = '--srcdir=%s' % os.path.join(harness.srcdir, self.progdir)
362      return cmdline
363
364    def execute(self, harness):
365      start_time = datetime.now()
366      prog = subprocess.Popen(self._command_line(harness),
367                              stdout=subprocess.PIPE,
368                              stderr=subprocess.PIPE,
369                              cwd=self.progdir)
370
371      self.stdout_lines = prog.stdout.readlines()
372      self.stderr_lines = prog.stderr.readlines()
373      prog.wait()
374      self.result = prog.returncode
375      self.taken = datetime.now() - start_time
376
377  class CollectingThread(threading.Thread):
378    '''A thread that lists the individual tests in a given case and creates
379    jobs objects for them.  in  in  test cases in their own processes.
380    Receives test numbers to run from the queue, and saves results into
381    the results field.'''
382    def __init__(self, srcdir, builddir, testcase):
383      threading.Thread.__init__(self)
384      self.srcdir = srcdir
385      self.builddir = builddir
386      self.testcase = testcase
387      self.result = []
388
389    def _count_c_tests(self, progabs, progdir, progbase):
390      'Run a c test, escaping parameters as required.'
391      cmdline = [ progabs, '--list' ]
392      prog = subprocess.Popen(cmdline, stdout=subprocess.PIPE, cwd=progdir)
393      lines = prog.stdout.readlines()
394      self.result.append(TestHarness.Job(len(lines) - 2, False, progabs,
395                                         progdir, progbase))
396      prog.wait()
397
398    def _count_py_tests(self, progabs, progdir, progbase):
399      'Run a c test, escaping parameters as required.'
400      cmdline = [ sys.executable, progabs, '--list' ]
401      prog = subprocess.Popen(cmdline, stdout=subprocess.PIPE, cwd=progdir)
402      lines = prog.stdout.readlines()
403
404      for i in range(0, len(lines) - 2):
405        self.result.append(TestHarness.Job(i + 1, True, progabs,
406                                           progdir, progbase))
407      prog.wait()
408
409    def run(self):
410      "Run a single test. Return the test's exit code."
411
412      progdir, progbase, test_nums = self.testcase
413
414      if progbase[-3:] == '.py':
415        progabs = os.path.abspath(os.path.join(self.srcdir, progdir, progbase))
416        self._count_py_tests(progabs, progdir, progbase)
417      else:
418        progabs = os.path.abspath(os.path.join(self.builddir, progdir,
419                                               progbase))
420        self._count_c_tests(progabs, progdir, progbase)
421
422    def get_result(self):
423      return self.result
424
425  class TestSpawningThread(threading.Thread):
426    '''A thread that runs test cases in their own processes.
427    Receives test jobs to run from the queue, and shows some progress
428    indication on stdout.  The detailed test results are stored inside
429    the job objects.'''
430    def __init__(self, queue, harness):
431      threading.Thread.__init__(self)
432      self.queue = queue
433      self.harness = harness
434      self.results = []
435
436    def run(self):
437      while True:
438        try:
439          job = self.queue.get_nowait()
440        except queue.Empty:
441          return
442
443        job.execute(self.harness)
444
445        if job.result:
446          os.write(sys.stdout.fileno(), b'!' * job.test_count())
447        else:
448          os.write(sys.stdout.fileno(), b'.' * job.test_count())
449
450
451  def _run_global_scheduler(self, testlist, has_py_tests):
452    # Collect all tests to execute (separate jobs for each test in python
453    # test cases, one job for each c test case).  Do that concurrently to
454    # mask latency.  This takes .5s instead of about 3s.
455    threads = [ ]
456    for count, testcase in enumerate(testlist):
457      threads.append(self.CollectingThread(self.srcdir, self.builddir,
458                                           testcase))
459
460    for t in threads:
461      t.start()
462
463    jobs = []
464    for t in threads:
465      t.join()
466      jobs.extend(t.result)
467
468    # Put all jobs into our "todo" queue.
469    # Scramble them for a more even resource utilization.
470    job_queue = queue.Queue()
471    total_count = 0
472    scrambled = list(jobs)
473    # TODO: What's this line doing, and what's the magic number?
474    scrambled.sort(key=lambda x: ("1" if x.test_count() < 30 else "0") + str(x.number))
475    for job in scrambled:
476      total_count += job.test_count()
477      job_queue.put(job)
478
479    # Use the svntest infrastructure to initialize the common test template
480    # wc and repos.
481    if has_py_tests:
482      old_cwd = os.getcwd()
483      os.chdir(jobs[-1].progdir)
484      svntest.main.options.keep_local_tmp = True
485      svntest.main.execute_tests([])
486      os.chdir(old_cwd)
487
488    # Some more prep work
489    if self.log:
490      log = self.log
491    else:
492      log = sys.stdout
493
494    if self.opts.parallel is None:
495      thread_count = 1
496    else:
497      if self.opts.parallel == 1:
498        thread_count = 5
499      else:
500        thread_count = self.opts.parallel
501
502    # Actually run the tests in concurrent sub-processes
503    print('Tests to execute: %d' % total_count)
504    sys.stdout.flush()
505
506    threads = [ TestHarness.TestSpawningThread(job_queue, self)
507                for i in range(thread_count) ]
508    for t in threads:
509      t.start()
510    for t in threads:
511      t.join()
512
513    print("")
514
515    # Aggregate and log the results
516    failed = 0
517    taken = 0
518    last_test_name = ""
519    for job in jobs:
520      if last_test_name != job.progbase:
521        if last_test_name != "":
522          log.write('ELAPSED: %s %s\n\n' % (last_test_name, str(taken)))
523        last_test_name = job.progbase
524        taken = job.taken
525      else:
526        taken += job.taken
527
528      for line in job.stderr_lines:
529        log.write(ensure_str(line))
530
531      for line in job.stdout_lines:
532        self._process_test_output_line(ensure_str(line))
533
534      self._check_for_unknown_failure(log, job.progbase, job.result)
535      failed = job.result or failed
536
537    log.write('ELAPSED: %s %s\n\n' % (last_test_name, str(taken)))
538
539    return failed
540
541  def _run_local_schedulers(self, testlist):
542    '''Serial execution of all test suites using their respective internal
543    schedulers.'''
544    testcount = len(testlist)
545
546    failed = 0
547    for count, testcase in enumerate(testlist):
548      failed = self._run_test(testcase, count, testcount) or failed
549
550    return failed
551
552  def run(self, testlist):
553    '''Run all test programs given in TESTLIST. Print a summary of results, if
554       there is a log file. Return zero iff all test programs passed.'''
555    self._open_log('w')
556    failed = 0
557
558    # Filter tests into Python and native groups and prepare arguments
559    # for each group. The resulting list will contain tuples of
560    # (program dir, program name, test numbers), where the test
561    # numbers may be None.
562
563    def split_nums(prog):
564      test_nums = []
565      if '#' in prog:
566        prog, test_nums = prog.split('#')
567        if test_nums:
568          test_nums = test_nums.split(',')
569      return prog, test_nums
570
571    py_basedir = set()
572    py_tests = []
573    c_tests = []
574
575    for prog in testlist:
576      progpath, testnums = split_nums(prog)
577      progdir, progbase = os.path.split(progpath)
578      if progpath.endswith('.py'):
579        py_basedir.add(progdir)
580        py_tests.append((progdir, progbase, testnums))
581      elif not self.opts.skip_c_tests:
582        c_tests.append((progdir, progbase, testnums))
583
584    # Initialize svntest.main.options for Python tests. Load the
585    # svntest.main module from the Python test path.
586    if len(py_tests):
587      if len(py_basedir) > 1:
588        sys.stderr.write('The test harness requires all Python tests'
589                         ' to be in the same directory.')
590        sys.exit(1)
591      self._init_py_tests(list(py_basedir)[0])
592      py_tests.sort(key=lambda x: x[1])
593
594    # Create the common command line for C tests
595    if len(c_tests):
596      self._init_c_tests()
597      c_tests.sort(key=lambda x: x[1])
598
599    # Run the tests
600    testlist = c_tests + py_tests
601    if self.opts.global_scheduler is None:
602      failed = self._run_local_schedulers(testlist)
603    else:
604      failed = self._run_global_scheduler(testlist, len(py_tests) > 0)
605
606    # Open the log again to for filtering.
607    if self.logfile:
608      self._open_log('r')
609      log_lines = self.log.readlines()
610    else:
611      log_lines = []
612
613    # Remove \r characters introduced by opening the log as binary
614    if sys.platform == 'win32':
615      log_lines = [x.replace('\r', '') for x in log_lines]
616
617    # Print the results, from least interesting to most interesting.
618
619    # Helper for Work-In-Progress indications for XFAIL tests.
620    wimptag = ' [[WIMP: '
621    def printxfail(x):
622      wip = x.find(wimptag)
623      if 0 > wip:
624        sys.stdout.write(x)
625      else:
626        sys.stdout.write('%s\n       [[%s'
627                         % (x[:wip], x[wip + len(wimptag):]))
628
629    if self.opts.list_tests:
630      passed = [x for x in log_lines if x[8:13] == '     ']
631    else:
632      passed = [x for x in log_lines if x[:6] == 'PASS: ']
633
634    if self.opts.list_tests:
635      skipped = [x for x in log_lines if x[8:12] == 'SKIP']
636    else:
637      skipped = [x for x in log_lines if x[:6] == 'SKIP: ']
638
639    if skipped and not self.opts.list_tests:
640      print('At least one test was SKIPPED, checking ' + self.logfile)
641      for x in skipped:
642        sys.stdout.write(x)
643
644    if self.opts.list_tests:
645      xfailed = [x for x in log_lines if x[8:13] == 'XFAIL']
646    else:
647      xfailed = [x for x in log_lines if x[:6] == 'XFAIL:']
648    if xfailed and not self.opts.list_tests:
649      print('At least one test XFAILED, checking ' + self.logfile)
650      for x in xfailed:
651        printxfail(x)
652
653    xpassed = [x for x in log_lines if x[:6] == 'XPASS:']
654    if xpassed:
655      print('At least one test XPASSED, checking ' + self.logfile)
656      for x in xpassed:
657        printxfail(x)
658
659    failed_list = [x for x in log_lines if x[:6] == 'FAIL: ']
660    if failed_list:
661      print('At least one test FAILED, checking ' + self.logfile)
662      for x in failed_list:
663        sys.stdout.write(x)
664
665    # Print summaries, from least interesting to most interesting.
666    if self.opts.list_tests:
667      print('Summary of test listing:')
668    else:
669      print('Summary of test results:')
670    if passed:
671      if self.opts.list_tests:
672        print('  %d test%s are set to PASS'
673              % (len(passed), 's'*min(len(passed) - 1, 1)))
674      else:
675        print('  %d test%s PASSED'
676              % (len(passed), 's'*min(len(passed) - 1, 1)))
677    if skipped:
678      if self.opts.list_tests:
679        print('  %d test%s are set as SKIP'
680              % (len(skipped), 's'*min(len(skipped) - 1, 1)))
681      else:
682        print('  %d test%s SKIPPED'
683              % (len(skipped), 's'*min(len(skipped) - 1, 1)))
684    if xfailed:
685      passwimp = [x for x in xfailed if 0 <= x.find(wimptag)]
686      if passwimp:
687        if self.opts.list_tests:
688          print('  %d test%s are set to XFAIL (%d WORK-IN-PROGRESS)'
689                % (len(xfailed), 's'*min(len(xfailed) - 1, 1), len(passwimp)))
690        else:
691          print('  %d test%s XFAILED (%d WORK-IN-PROGRESS)'
692                % (len(xfailed), 's'*min(len(xfailed) - 1, 1), len(passwimp)))
693      else:
694        if self.opts.list_tests:
695          print('  %d test%s are set as XFAIL'
696                % (len(xfailed), 's'*min(len(xfailed) - 1, 1)))
697        else:
698          print('  %d test%s XFAILED'
699                % (len(xfailed), 's'*min(len(xfailed) - 1, 1)))
700    if xpassed:
701      failwimp = [x for x in xpassed if 0 <= x.find(wimptag)]
702      if failwimp:
703        print('  %d test%s XPASSED (%d WORK-IN-PROGRESS)'
704              % (len(xpassed), 's'*min(len(xpassed) - 1, 1), len(failwimp)))
705      else:
706        print('  %d test%s XPASSED'
707              % (len(xpassed), 's'*min(len(xpassed) - 1, 1)))
708    if failed_list:
709      print('  %d test%s FAILED'
710            % (len(failed_list), 's'*min(len(failed_list) - 1, 1)))
711
712    # Copy the truly interesting verbose logs to a separate file, for easier
713    # viewing.
714    if xpassed or failed_list:
715      faillog = open_logfile(self.faillogfile, 'w')
716      last_start_lineno = None
717      last_start_re = re.compile('^(FAIL|SKIP|XFAIL|PASS|START|CLEANUP|END):')
718      for lineno, line in enumerate(log_lines):
719        # Iterate the lines.  If it ends a test we're interested in, dump that
720        # test to FAILLOG.  If it starts a test (at all), remember the line
721        # number (in case we need it later).
722        if line in xpassed or line in failed_list:
723          faillog.write('[[[\n')
724          faillog.writelines(log_lines[last_start_lineno : lineno+1])
725          faillog.write(']]]\n\n')
726        if last_start_re.match(line):
727          last_start_lineno = lineno + 1
728      faillog.close()
729    elif self.faillogfile and os.path.exists(self.faillogfile):
730      print("WARNING: no failures, but '%s' exists from a previous run."
731            % self.faillogfile)
732
733    # Summary.
734    if failed or xpassed or failed_list:
735      summary = "Some tests failed"
736    else:
737      summary = "All tests successful"
738    print("Python version: %d.%d.%d." % sys.version_info[:3])
739    print("SUMMARY: %s\n" % summary)
740
741    self._close_log()
742    return failed
743
744  def _open_log(self, mode):
745    'Open the log file with the required MODE.'
746    if self.logfile:
747      self._close_log()
748      self.log = open_logfile(self.logfile, mode)
749
750  def _close_log(self):
751    'Close the log file.'
752    if not self.log is None:
753      self.log.close()
754      self.log = None
755
756  def _process_test_output_line(self, line):
757    if sys.platform == 'win32':
758      # Remove CRs inserted because we parse the output as binary.
759      line = line.replace('\r', '')
760
761    # If using --log-to-stdout self.log in None.
762    if self.log:
763      self.log.write(line)
764
765    if line.startswith('PASS') or line.startswith('FAIL') \
766        or line.startswith('XFAIL') or line.startswith('XPASS') \
767        or line.startswith('SKIP'):
768      return 1
769
770    return 0
771
772  def _check_for_unknown_failure(self, log, progbase, test_failed):
773    # We always return 1 for failed tests. Some other failure than 1
774    # probably means the test didn't run at all and probably didn't
775    # output any failure info. In that case, log a generic failure message.
776    # ### Even if failure==1 it could be that the test didn't run at all.
777    if test_failed and test_failed != 1:
778      if self.log:
779        log.write('FAIL:  %s: Unknown test failure; see tests.log.\n' % progbase)
780        log.flush()
781      else:
782        log.write('FAIL:  %s: Unknown test failure.\n' % progbase)
783
784  def _run_c_test(self, progabs, progdir, progbase, test_nums, dot_count):
785    'Run a c test, escaping parameters as required.'
786    if self.opts.list_tests and self.opts.milestone_filter:
787      print('WARNING: --milestone-filter option does not currently work with C tests')
788
789    if not os.access(progbase, os.X_OK):
790      print("\nNot an executable file: " + progbase)
791      sys.exit(1)
792
793    cmdline = self.c_test_cmdline[:]
794    cmdline[0] = './' + progbase
795    cmdline[1] = '--srcdir=%s' % os.path.join(self.srcdir, progdir)
796
797    if test_nums:
798      cmdline.extend(test_nums)
799      total = len(test_nums)
800    else:
801      total_cmdline = [cmdline[0], '--list']
802      prog = subprocess.Popen(total_cmdline, stdout=subprocess.PIPE)
803      lines = prog.stdout.readlines()
804      total = len(lines) - 2
805
806    # This has to be class-scoped for use in the progress_func()
807    self.dots_written = 0
808    def progress_func(completed):
809      if not self.log or self.dots_written >= dot_count:
810        return
811      dots = (completed * dot_count) // total
812      if dots > dot_count:
813        dots = dot_count
814      dots_to_write = dots - self.dots_written
815      os.write(sys.stdout.fileno(), b'.' * dots_to_write)
816      self.dots_written = dots
817
818    tests_completed = 0
819    prog = subprocess.Popen(cmdline, stdout=subprocess.PIPE,
820                            stderr=self.log)
821    line = prog.stdout.readline()
822    while line:
823      line = ensure_str(line)
824      if self._process_test_output_line(line):
825        tests_completed += 1
826        progress_func(tests_completed)
827
828      line = prog.stdout.readline()
829
830    # If we didn't run any tests, still print out the dots
831    if not tests_completed:
832      os.write(sys.stdout.fileno(), b'.' * dot_count)
833
834    prog.wait()
835    return prog.returncode
836
837  def _run_py_test(self, progabs, progdir, progbase, test_nums, dot_count):
838    'Run a python test, passing parameters as needed.'
839    try:
840      if sys.version_info < (3, 0):
841        prog_mod = imp.load_module(progbase[:-3], open(progabs, 'r'), progabs,
842                                   ('.py', 'U', imp.PY_SOURCE))
843      elif sys.version_info < (3, 5):
844        prog_mod = imp.load_module(progbase[:-3],
845                                   open(progabs, 'r', encoding="utf-8"),
846                                   progabs, ('.py', 'U', imp.PY_SOURCE))
847      else:
848         spec = importlib.util.spec_from_file_location(progbase[:-3], progabs)
849         prog_mod = importlib.util.module_from_spec(spec)
850         sys.modules[progbase[:-3]] = prog_mod
851         spec.loader.exec_module(prog_mod)
852    except:
853      print("\nError loading test (details in following traceback): " + progbase)
854      traceback.print_exc()
855      sys.exit(1)
856
857    # setup the output pipes
858    old_stdout = sys.stdout.fileno()
859    if self.log:
860      sys.stdout.flush()
861      sys.stderr.flush()
862      self.log.flush()
863      saved_stds = sys.stdout, sys.stderr
864      sys.stdout = sys.stderr = self.log
865
866    # These have to be class-scoped for use in the progress_func()
867    self.dots_written = 0
868    self.progress_lock = threading.Lock()
869    def progress_func(completed, total):
870      """Report test suite progress. Can be called from multiple threads
871         in parallel mode."""
872      if not self.log:
873        return
874      dots = (completed * dot_count) // total
875      if dots > dot_count:
876        dots = dot_count
877      self.progress_lock.acquire()
878      if self.dots_written < dot_count:
879        dots_to_write = dots - self.dots_written
880        self.dots_written = dots
881        os.write(old_stdout, b'.' * dots_to_write)
882      self.progress_lock.release()
883
884    serial_only = hasattr(prog_mod, 'serial_only') and prog_mod.serial_only
885
886    # run the tests
887    if self.opts.list_tests:
888      prog_f = None
889    else:
890      prog_f = progress_func
891
892    try:
893      failed = svntest.main.execute_tests(prog_mod.test_list,
894                                          serial_only=serial_only,
895                                          test_name=progbase,
896                                          progress_func=prog_f,
897                                          test_selection=test_nums)
898    except svntest.Failure:
899      if self.log:
900        os.write(old_stdout, b'.' * dot_count)
901      failed = True
902
903    # restore some values
904    if self.log:
905      self.log.flush()
906      sys.stdout, sys.stderr = saved_stds
907
908    return failed
909
910  def _run_test(self, testcase, test_nr, total_tests):
911    "Run a single test. Return the test's exit code."
912
913    if self.log:
914      log = self.log
915    else:
916      log = sys.stdout
917
918    progdir, progbase, test_nums = testcase
919    if self.log:
920      # Using write here because we don't want even a trailing space
921      test_info = '[%s/%d] %s' % (str(test_nr + 1).zfill(len(str(total_tests))),
922                                  total_tests, progbase)
923      if self.opts.list_tests:
924        sys.stdout.write('Listing tests in %s' % (test_info, ))
925      else:
926        sys.stdout.write('%s' % (test_info, ))
927      sys.stdout.flush()
928    else:
929      # ### Hack for --log-to-stdout to work (but not print any dots).
930      test_info = ''
931
932    if self.opts.list_tests:
933      log.write('LISTING: %s\n' % progbase)
934    else:
935      log.write('START: %s\n' % progbase)
936
937    log.flush()
938
939    start_time = datetime.now()
940
941    progabs = os.path.abspath(os.path.join(self.srcdir, progdir, progbase))
942    old_cwd = os.getcwd()
943    line_length = _get_term_width()
944    dots_needed = line_length \
945                    - len(test_info) \
946                    - len('success')
947    try:
948      os.chdir(progdir)
949      if progbase[-3:] == '.py':
950        testcase = self._run_py_test
951      else:
952        testcase = self._run_c_test
953      failed = testcase(progabs, progdir, progbase, test_nums, dots_needed)
954    except:
955      os.chdir(old_cwd)
956      raise
957    else:
958      os.chdir(old_cwd)
959
960    self._check_for_unknown_failure(log, progbase, failed)
961
962    if not self.opts.list_tests:
963      # Log the elapsed time.
964      elapsed_time = str(datetime.now() - start_time)
965      log.write('END: %s\n' % progbase)
966      log.write('ELAPSED: %s %s\n' % (progbase, elapsed_time))
967
968    log.write('\n')
969
970    # If we are only listing the tests just add a newline, otherwise if
971    # we printed a "Running all tests in ..." line, add the test result.
972    if self.log:
973      if self.opts.list_tests:
974        print()
975      else:
976        if failed:
977          print(TextColors.FAILURE + 'FAILURE' + TextColors.ENDC)
978        else:
979          print(TextColors.SUCCESS + 'success' + TextColors.ENDC)
980
981    return failed
982
983
984def create_parser():
985  def set_log_level(option, opt, value, parser, level=None):
986    if level is None:
987      level = value
988    parser.values.set_log_level = getattr(logging, level, None) or int(level)
989
990  parser = optparse.OptionParser(usage=__doc__);
991
992  parser.add_option('-l', '--list', action='store_true', dest='list_tests',
993                    help='Print test doc strings instead of running them')
994  parser.add_option('-v', '--verbose', action='callback',
995                    callback=set_log_level, callback_args=(logging.DEBUG, ),
996                    help='Print binary command-lines')
997  parser.add_option('-c', '--cleanup', action='store_true',
998                    help='Clean up after successful tests')
999  parser.add_option('-p', '--parallel', action='store', type='int',
1000                    help='Run the tests in parallel')
1001  parser.add_option('-u', '--url', action='store',
1002                    help='Base url to the repos (e.g. svn://localhost)')
1003  parser.add_option('-f', '--fs-type', action='store',
1004                    help='Subversion file system type (fsfs(-v[46]), bdb or fsx)')
1005  parser.add_option('-g', '--global-scheduler', action='store_true',
1006                    help='Run tests from all scripts together')
1007  parser.add_option('--http-library', action='store',
1008                    help="Make svn use this DAV library (neon or serf)")
1009  parser.add_option('--bin', action='store', dest='svn_bin',
1010                    help='Use the svn binaries installed in this path')
1011  parser.add_option('--fsfs-sharding', action='store', type='int',
1012                    help='Default shard size (for fsfs)')
1013  parser.add_option('--fsfs-packing', action='store_true',
1014                    help="Run 'svnadmin pack' automatically")
1015  parser.add_option('--server-minor-version', type='int', action='store',
1016                    help="Set the minor version for the server")
1017  parser.add_option('--skip-c-tests', '--skip-C-tests', action='store_true',
1018                    help="Run only the Python tests")
1019  parser.add_option('--dump-load-cross-check', action='store_true',
1020                    help="After every test, run a series of dump and load " +
1021                         "tests with svnadmin, svnrdump and svndumpfilter " +
1022                         " on the testcase repositories to cross-check " +
1023                         " dump file compatibility.")
1024  parser.add_option('--enable-sasl', action='store_true',
1025                    help='Whether to enable SASL authentication')
1026  parser.add_option('--config-file', action='store',
1027                    help="Configuration file for tests.")
1028  parser.add_option('--log-to-stdout', action='store_true',
1029                    help='Print test progress to stdout instead of a log file')
1030  parser.add_option('--milestone-filter', action='store', dest='milestone_filter',
1031                    help='Limit --list to those with target milestone specified')
1032  parser.add_option('--mode-filter', action='store', dest='mode_filter',
1033                    default='ALL',
1034                    help='Limit tests to those with type specified (e.g. XFAIL)')
1035  parser.add_option('--set-log-level', action='callback', type='str',
1036                    callback=set_log_level,
1037                    help="Set log level (numerically or symbolically). " +
1038                         "Symbolic levels are: CRITICAL, ERROR, WARNING, " +
1039                         "INFO, DEBUG")
1040  parser.add_option('--ssl-cert', action='store',
1041                    help='Path to SSL server certificate.')
1042  parser.add_option('--http-proxy', action='store',
1043                    help='Use the HTTP Proxy at hostname:port.')
1044  parser.add_option('--http-proxy-username', action='store',
1045                    help='Username for the HTTP Proxy.')
1046  parser.add_option('--http-proxy-password', action='store',
1047                    help='Password for the HTTP Proxy.')
1048  parser.add_option('--httpd-version', action='store',
1049                    help='Assume HTTPD is this version.')
1050  parser.add_option('--httpd-whitelist', action='store',
1051                    help='Assume HTTPD whitelist is this version.')
1052  parser.add_option('--exclusive-wc-locks', action='store_true',
1053                    help='Use sqlite exclusive locking for working copies')
1054  parser.add_option('--memcached-server', action='store',
1055                    help='Use memcached server at specified URL (FSFS only)')
1056  parser.add_option('--fsfs-compression', action='store', type='str',
1057                    help='Set compression type (for fsfs)')
1058  parser.add_option('--fsfs-dir-deltification', action='store', type='str',
1059                    help='Set directory deltification option (for fsfs)')
1060  parser.add_option('--allow-remote-http-connection', action='store_true',
1061                    help='Run tests that connect to remote HTTP(S) servers')
1062
1063  parser.set_defaults(set_log_level=None)
1064  return parser
1065
1066def main():
1067  (opts, args) = create_parser().parse_args(sys.argv[1:])
1068
1069  if len(args) < 3:
1070    print("{}: at least three positional arguments required; got {!r}".format(
1071      os.path.basename(sys.argv[0]), args
1072    ))
1073    sys.exit(2)
1074
1075  if opts.log_to_stdout:
1076    logfile = None
1077    faillogfile = None
1078  else:
1079    logfile = os.path.abspath('tests.log')
1080    faillogfile = os.path.abspath('fails.log')
1081
1082  th = TestHarness(args[0], args[1], logfile, faillogfile, opts)
1083  failed = th.run(args[2:])
1084  if failed:
1085    sys.exit(1)
1086
1087
1088# Run main if not imported as a module
1089if __name__ == '__main__':
1090  main()
1091