1#
2#  main.py: a shared, automated test suite for Subversion
3#
4#  Subversion is a tool for revision control.
5#  See http://subversion.tigris.org for more information.
6#
7# ====================================================================
8#    Licensed to the Apache Software Foundation (ASF) under one
9#    or more contributor license agreements.  See the NOTICE file
10#    distributed with this work for additional information
11#    regarding copyright ownership.  The ASF licenses this file
12#    to you under the Apache License, Version 2.0 (the
13#    "License"); you may not use this file except in compliance
14#    with the License.  You may obtain a copy of the License at
15#
16#      http://www.apache.org/licenses/LICENSE-2.0
17#
18#    Unless required by applicable law or agreed to in writing,
19#    software distributed under the License is distributed on an
20#    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
21#    KIND, either express or implied.  See the License for the
22#    specific language governing permissions and limitations
23#    under the License.
24######################################################################
25
26import sys
27import os
28import shutil
29import re
30import stat
31import subprocess
32import time
33import threading
34import optparse
35import xml
36import urllib
37import logging
38import hashlib
39import zipfile
40import codecs
41
42try:
43  # Python >=3.0
44  import queue
45  from urllib.parse import quote as urllib_parse_quote
46  from urllib.parse import unquote as urllib_parse_unquote
47  from urllib.parse import urlparse
48except ImportError:
49  # Python <3.0
50  import Queue as queue
51  from urllib import quote as urllib_parse_quote
52  from urllib import unquote as urllib_parse_unquote
53  from urlparse import urlparse
54
55import svntest
56from svntest import Failure
57from svntest import Skip
58from svntest.wc import StateItem as Item
59
60SVN_VER_MINOR = 14
61
62######################################################################
63#
64#  HOW TO USE THIS MODULE:
65#
66#  Write a new python script that
67#
68#     1) imports this 'svntest' package
69#
70#     2) contains a number of related 'test' routines.  (Each test
71#        routine should take no arguments, and return None on success
72#        or throw a Failure exception on failure.  Each test should
73#        also contain a short docstring.)
74#
75#     3) places all the tests into a list that begins with None.
76#
77#     4) calls svntest.main.client_test() on the list.
78#
79#  Also, your tests will probably want to use some of the common
80#  routines in the 'Utilities' section below.
81#
82#####################################################################
83# Global stuff
84
85default_num_threads = 5
86
87# Don't try to use this before calling execute_tests()
88logger = None
89
90
91class SVNProcessTerminatedBySignal(Failure):
92  "Exception raised if a spawned process segfaulted, aborted, etc."
93  pass
94
95class SVNLineUnequal(Failure):
96  "Exception raised if two lines are unequal"
97  pass
98
99class SVNUnmatchedError(Failure):
100  "Exception raised if an expected error is not found"
101  pass
102
103class SVNCommitFailure(Failure):
104  "Exception raised if a commit failed"
105  pass
106
107class SVNRepositoryCopyFailure(Failure):
108  "Exception raised if unable to copy a repository"
109  pass
110
111class SVNRepositoryCreateFailure(Failure):
112  "Exception raised if unable to create a repository"
113  pass
114
115# Windows specifics
116if sys.platform == 'win32':
117  windows = True
118  file_scheme_prefix = 'file:///'
119  _exe = '.exe'
120  _bat = '.bat'
121  os.environ['SVN_DBG_STACKTRACES_TO_STDERR'] = 'y'
122else:
123  windows = False
124  file_scheme_prefix = 'file://'
125  _exe = ''
126  _bat = ''
127
128# The location of our mock svneditor script.
129if windows:
130  svneditor_script = os.path.join(sys.path[0], 'svneditor.bat')
131else:
132  svneditor_script = os.path.join(sys.path[0], 'svneditor.py')
133
134# Username and password used by the working copies
135wc_author = 'jrandom'
136wc_passwd = 'rayjandom'
137
138# Username and password used by svnrdump in dump/load cross-checks
139crosscheck_username = '__dumpster__'
140crosscheck_password = '__loadster__'
141
142# Username and password used by the working copies for "second user"
143# scenarios
144wc_author2 = 'jconstant' # use the same password as wc_author
145
146stack_trace_regexp = r'(?:.*subversion[\\//].*\.c:[0-9]*,$|.*apr_err=.*)'
147
148# Set C locale for command line programs
149os.environ['LC_ALL'] = 'C'
150
151######################################################################
152# Permission constants used with e.g. chmod() and open().
153# Define them here at a central location, so people aren't tempted to
154# use octal literals which are not portable between Python 2 and 3.
155
156S_ALL_READ  = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
157S_ALL_WRITE = stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH
158S_ALL_EXEC  = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
159
160S_ALL_RW  = S_ALL_READ | S_ALL_WRITE
161S_ALL_RX  = S_ALL_READ | S_ALL_EXEC
162S_ALL_RWX = S_ALL_READ | S_ALL_WRITE | S_ALL_EXEC
163
164######################################################################
165# The locations of the svn binaries.
166# Use --bin to override these defaults.
167def P(relpath,
168      head=os.path.dirname(os.path.dirname(os.path.abspath('.')))
169      ):
170  if sys.platform=='win32':
171    return os.path.join(head, relpath + '.exe')
172  else:
173    return os.path.join(head, relpath)
174svn_binary = P('svn/svn')
175svnadmin_binary = P('svnadmin/svnadmin')
176svnlook_binary = P('svnlook/svnlook')
177svnrdump_binary = P('svnrdump/svnrdump')
178svnsync_binary = P('svnsync/svnsync')
179svnversion_binary = P('svnversion/svnversion')
180svndumpfilter_binary = P('svndumpfilter/svndumpfilter')
181svnmucc_binary = P('svnmucc/svnmucc')
182svnfsfs_binary = P('svnfsfs/svnfsfs')
183entriesdump_binary = P('tests/cmdline/entries-dump')
184lock_helper_binary = P('tests/cmdline/lock-helper')
185atomic_ra_revprop_change_binary = P('tests/cmdline/atomic-ra-revprop-change')
186wc_lock_tester_binary = P('tests/libsvn_wc/wc-lock-tester')
187wc_incomplete_tester_binary = P('tests/libsvn_wc/wc-incomplete-tester')
188del P
189
190######################################################################
191# The location of svnauthz binary, relative to the only scripts that
192# import this file right now (they live in ../).
193# Use --tools to overide these defaults.
194svnauthz_binary = os.path.abspath('../../../tools/server-side/svnauthz' + _exe)
195svnauthz_validate_binary = os.path.abspath(
196    '../../../tools/server-side/svnauthz-validate' + _exe
197)
198svnmover_binary = os.path.abspath('../../../tools/dev/svnmover/svnmover' + _exe)
199
200# Location to the pristine repository, will be calculated from test_area_url
201# when we know what the user specified for --url.
202pristine_greek_repos_url = None
203pristine_trojan_repos_url = None
204
205# Global variable to track all of our options
206options = None
207
208# End of command-line-set global variables.
209######################################################################
210
211# All temporary repositories and working copies are created underneath
212# this dir, so there's one point at which to mount, e.g., a ramdisk.
213work_dir = "svn-test-work"
214
215# Constant for the merge info property.
216SVN_PROP_MERGEINFO = "svn:mergeinfo"
217
218# Constant for the inheritable auto-props property.
219SVN_PROP_INHERITABLE_AUTOPROPS = "svn:auto-props"
220
221# Constant for the inheritable ignores property.
222SVN_PROP_INHERITABLE_IGNORES = "svn:global-ignores"
223
224# Where we want all the repositories and working copies to live.
225# Each test will have its own!
226general_repo_dir = os.path.join(work_dir, "repositories")
227general_wc_dir = os.path.join(work_dir, "working_copies")
228
229# Directories used for DAV tests
230other_dav_root_dir = os.path.join(work_dir, "fsdavroot")
231non_dav_root_dir = os.path.join(work_dir, "nodavroot")
232
233# temp directory in which we will create our 'pristine' local
234# repository and other scratch data.  This should be removed when we
235# quit and when we startup.
236temp_dir = os.path.join(work_dir, 'local_tmp')
237
238# (derivatives of the tmp dir.)
239pristine_greek_repos_dir = os.path.join(temp_dir, "repos")
240pristine_trojan_repos_dir = os.path.join(temp_dir, "trojan")
241greek_dump_dir = os.path.join(temp_dir, "greekfiles")
242trojan_dump_dir = os.path.join(temp_dir, "trojanfiles")
243default_config_dir = os.path.abspath(os.path.join(temp_dir, "config"))
244
245#
246# Our pristine greek-tree state.
247#
248# If a test wishes to create an "expected" working-copy tree, it should
249# call main.greek_state.copy().  That method will return a copy of this
250# State object which can then be edited.
251#
252greek_state = svntest.wc.State('', {
253  'iota'        : Item("This is the file 'iota'.\n"),
254  'A'           : Item(),
255  'A/mu'        : Item("This is the file 'mu'.\n"),
256  'A/B'         : Item(),
257  'A/B/lambda'  : Item("This is the file 'lambda'.\n"),
258  'A/B/E'       : Item(),
259  'A/B/E/alpha' : Item("This is the file 'alpha'.\n"),
260  'A/B/E/beta'  : Item("This is the file 'beta'.\n"),
261  'A/B/F'       : Item(),
262  'A/C'         : Item(),
263  'A/D'         : Item(),
264  'A/D/gamma'   : Item("This is the file 'gamma'.\n"),
265  'A/D/G'       : Item(),
266  'A/D/G/pi'    : Item("This is the file 'pi'.\n"),
267  'A/D/G/rho'   : Item("This is the file 'rho'.\n"),
268  'A/D/G/tau'   : Item("This is the file 'tau'.\n"),
269  'A/D/H'       : Item(),
270  'A/D/H/chi'   : Item("This is the file 'chi'.\n"),
271  'A/D/H/psi'   : Item("This is the file 'psi'.\n"),
272  'A/D/H/omega' : Item("This is the file 'omega'.\n"),
273  })
274
275# Likewise our pristine trojan-tree state (for peg revision parsing tests)
276# NOTE: We don't use precooked trojan repositories.
277trojan_state = svntest.wc.State('', {
278  'iota'        : Item("This is the file 'iota'.\n"),
279  '@zeta'       : Item("This is the file 'zeta'.\n"),
280  '_@theta'     : Item("This is the file 'theta'.\n"),
281  '.@kappa'     : Item("This is the file 'kappa'.\n"),
282  'lambda@'     : Item("This is the file 'lambda'.\n"),
283  '@omicron@'   : Item("This is the file 'omicron'.\n"),
284  '@'           : Item(),
285  '@@'          : Item(),
286  '_@'          : Item(),
287  '.@'          : Item(),
288  'A'           : Item(),
289  'A/@@'        : Item("This is the file 'A/@@'.\n"),
290  'A/alpha'     : Item("This is the file 'alpha'.\n"),
291  'A/@omega@'   : Item("This is the file 'omega'.\n"),
292  'B'           : Item(),
293  'B/@'         : Item("This is the file 'B/@'.\n"),
294  'B/@beta'     : Item("This is the file 'beta'.\n"),
295  'B/pi@'       : Item("This is the file 'pi'.\n"),
296  'G'           : Item(),
297  'G/_@'        : Item("This is the file 'G/_@'.\n"),
298  'G/_@gamma'   : Item("This is the file 'gamma'.\n"),
299  'D'           : Item(),
300  'D/.@'        : Item("This is the file 'D/.@'.\n"),
301  'D/.@delta'   : Item("This is the file 'delta'.\n"),
302  'E'           : Item(),
303  })
304
305
306######################################################################
307# Utilities shared by the tests
308def wrap_ex(func, output):
309  "Wrap a function, catch, print and ignore exceptions"
310  def w(*args, **kwds):
311    try:
312      return func(*args, **kwds)
313    except Failure as ex:
314      if ex.__class__ != Failure or ex.args:
315        ex_args = str(ex)
316        if ex_args:
317          logger.warn('EXCEPTION: %s: %s', ex.__class__.__name__, ex_args)
318        else:
319          logger.warn('EXCEPTION: %s', ex.__class__.__name__)
320  return w
321
322def setup_development_mode():
323  "Wraps functions in module actions"
324  l = [ 'run_and_verify_svn',
325        'run_and_verify_svnversion',
326        'run_and_verify_load',
327        'run_and_verify_dump',
328        'run_and_verify_checkout',
329        'run_and_verify_export',
330        'run_and_verify_update',
331        'run_and_verify_merge',
332        'run_and_verify_switch',
333        'run_and_verify_commit',
334        'run_and_verify_unquiet_status',
335        'run_and_verify_status',
336        'run_and_verify_diff_summarize',
337        'run_and_verify_diff_summarize_xml',
338        'run_and_validate_lock']
339
340  for func in l:
341    setattr(svntest.actions, func, wrap_ex(getattr(svntest.actions, func)))
342
343def get_admin_name():
344  "Return name of SVN administrative subdirectory."
345
346  if (windows or sys.platform == 'cygwin') \
347      and 'SVN_ASP_DOT_NET_HACK' in os.environ:
348    return '_svn'
349  else:
350    return '.svn'
351
352def wc_is_singledb(wcpath):
353  """Temporary function that checks whether a working copy directory looks
354  like it is part of a single-db working copy."""
355
356  pristine = os.path.join(wcpath, get_admin_name(), 'pristine')
357  if not os.path.exists(pristine):
358    return True
359
360  # Now we must be looking at a multi-db WC dir or the root dir of a
361  # single-DB WC.  Sharded 'pristine' dir => single-db, else => multi-db.
362  for name in os.listdir(pristine):
363    if len(name) == 2:
364      return True
365    elif len(name) == 40:
366      return False
367
368  return False
369
370def get_start_commit_hook_path(repo_dir):
371  "Return the path of the start-commit-hook conf file in REPO_DIR."
372
373  return os.path.join(repo_dir, "hooks", "start-commit")
374
375def get_pre_commit_hook_path(repo_dir):
376  "Return the path of the pre-commit-hook conf file in REPO_DIR."
377
378  return os.path.join(repo_dir, "hooks", "pre-commit")
379
380def get_post_commit_hook_path(repo_dir):
381  "Return the path of the post-commit-hook conf file in REPO_DIR."
382
383  return os.path.join(repo_dir, "hooks", "post-commit")
384
385def get_pre_revprop_change_hook_path(repo_dir):
386  "Return the path of the pre-revprop-change hook script in REPO_DIR."
387
388  return os.path.join(repo_dir, "hooks", "pre-revprop-change")
389
390def get_pre_lock_hook_path(repo_dir):
391  "Return the path of the pre-lock hook script in REPO_DIR."
392
393  return os.path.join(repo_dir, "hooks", "pre-lock")
394
395def get_pre_unlock_hook_path(repo_dir):
396  "Return the path of the pre-unlock hook script in REPO_DIR."
397
398  return os.path.join(repo_dir, "hooks", "pre-unlock")
399
400def get_svnserve_conf_file_path(repo_dir):
401  "Return the path of the svnserve.conf file in REPO_DIR."
402
403  return os.path.join(repo_dir, "conf", "svnserve.conf")
404
405def get_fsfs_conf_file_path(repo_dir):
406  "Return the path of the fsfs.conf file in REPO_DIR."
407
408  return os.path.join(repo_dir, "db", "fsfs.conf")
409
410def get_fsfs_format_file_path(repo_dir):
411  "Return the path of the format file in REPO_DIR."
412
413  return os.path.join(repo_dir, "db", "format")
414
415def ensure_list(item):
416  "If ITEM is not already a list, convert it to a list."
417  if isinstance(item, list):
418    return item
419  elif isinstance(item, bytes) or isinstance(item, str):
420    return [ item ]
421  else:
422    return list(item)
423
424def filter_dbg(lines, binary = False):
425  if binary:
426    excluded = filter(lambda line: line.startswith(b'DBG:'), lines)
427    excluded = map(bytes.decode, excluded)
428    included = filter(lambda line: not line.startswith(b'DBG:'), lines)
429  else:
430    excluded = filter(lambda line: line.startswith('DBG:'), lines)
431    included = filter(lambda line: not line.startswith('DBG:'), lines)
432
433  sys.stdout.write(''.join(excluded))
434  return ensure_list(included)
435
436# Run any binary, logging the command line and return code
437def run_command(command, error_expected, binary_mode=False, *varargs):
438  """Run COMMAND with VARARGS. Return exit code as int; stdout, stderr
439  as lists of lines (including line terminators).  See run_command_stdin()
440  for details.  If ERROR_EXPECTED is None, any stderr output will be
441  printed and any stderr output or a non-zero exit code will raise an
442  exception."""
443
444  return run_command_stdin(command, error_expected, 0, binary_mode,
445                           None, *varargs)
446
447# Frequently used constants:
448# If any of these relative path strings show up in a server response,
449# then we can assume that the on-disk repository path was leaked to the
450# client.  Having these here as constants means we don't need to construct
451# them over and over again.
452_repos_diskpath1 = os.path.join('cmdline', 'svn-test-work', 'repositories')
453_repos_diskpath2 = os.path.join('cmdline', 'svn-test-work', 'local_tmp',
454                                'repos')
455_repos_diskpath1_bytes = _repos_diskpath1.encode()
456_repos_diskpath2_bytes = _repos_diskpath2.encode()
457
458# A regular expression that matches arguments that are trivially safe
459# to pass on a command line without quoting on any supported operating
460# system:
461_safe_arg_re = re.compile(r'^[A-Za-z\d\.\_\/\-\:\@]+$')
462
463def _quote_arg(arg):
464  """Quote ARG for a command line.
465
466  Return a quoted version of the string ARG, or just ARG if it contains
467  only universally harmless characters.
468
469  WARNING: This function cannot handle arbitrary command-line
470  arguments: it is just good enough for what we need here."""
471
472  arg = str(arg)
473  if _safe_arg_re.match(arg):
474    return arg
475
476  if windows:
477    # Note: subprocess.list2cmdline is Windows-specific.
478    return subprocess.list2cmdline([arg])
479  else:
480    # Quoting suitable for most Unix shells.
481    return "'" + arg.replace("'", "'\\''") + "'"
482
483def open_pipe(command, bufsize=-1, stdin=None, stdout=None, stderr=None):
484  """Opens a subprocess.Popen pipe to COMMAND using STDIN,
485  STDOUT, and STDERR.  BUFSIZE is passed to subprocess.Popen's
486  argument of the same name.
487
488  Returns (infile, outfile, errfile, waiter); waiter
489  should be passed to wait_on_pipe."""
490  command = [str(x) for x in command]
491
492  # Always run python scripts under the same Python executable as used
493  # for the test suite.
494  if command[0].endswith('.py'):
495    command.insert(0, sys.executable)
496
497  command_string = command[0] + ' ' + ' '.join(map(_quote_arg, command[1:]))
498
499  if not stdin:
500    stdin = subprocess.PIPE
501  if not stdout:
502    stdout = subprocess.PIPE
503  if not stderr:
504    stderr = subprocess.PIPE
505
506  p = subprocess.Popen(command,
507                       bufsize,
508                       stdin=stdin,
509                       stdout=stdout,
510                       stderr=stderr,
511                       close_fds=not windows)
512  return p.stdin, p.stdout, p.stderr, (p, command_string)
513
514def wait_on_pipe(waiter, binary_mode, stdin=None):
515  """WAITER is (KID, COMMAND_STRING).  Wait for KID (opened with open_pipe)
516  to finish, dying if it does.  If KID fails, create an error message
517  containing any stdout and stderr from the kid.  Show COMMAND_STRING in
518  diagnostic messages.  Normalize Windows line endings of stdout and stderr
519  if not BINARY_MODE.  Return KID's exit code as int; stdout, stderr as
520  lists of lines (including line terminators)."""
521  if waiter is None:
522    return
523
524  kid, command_string = waiter
525  stdout, stderr = kid.communicate(stdin)
526  exit_code = kid.returncode
527
528  # We always expect STDERR to be strings, not byte-arrays.
529  if not isinstance(stderr, str):
530    stderr = stderr.decode("utf-8", 'surrogateescape')
531  if not binary_mode:
532    if not isinstance(stdout, str):
533      stdout = stdout.decode("utf-8", 'surrogateescape')
534
535    # Normalize Windows line endings if in text mode.
536    if windows:
537      stdout = stdout.replace('\r\n', '\n')
538      stderr = stderr.replace('\r\n', '\n')
539
540  # Convert output strings to lists.
541  stdout_lines = stdout.splitlines(True)
542  stderr_lines = stderr.splitlines(True)
543
544  if exit_code < 0:
545    if not windows:
546      exit_signal = os.WTERMSIG(-exit_code)
547    else:
548      exit_signal = exit_code
549
550    if stdout_lines is not None:
551      logger.info("".join(stdout_lines))
552    if stderr_lines is not None:
553      logger.warning("".join(stderr_lines))
554    # show the whole path to make it easier to start a debugger
555    logger.warning("CMD: %s terminated by signal %d"
556                     % (command_string, exit_signal))
557    raise SVNProcessTerminatedBySignal
558  else:
559    if exit_code:
560      logger.info("CMD: %s exited with %d" % (command_string, exit_code))
561    return stdout_lines, stderr_lines, exit_code
562
563def spawn_process(command, bufsize=-1, binary_mode=False, stdin_lines=None,
564                  *varargs):
565  """Run any binary, supplying input text, logging the command line.
566
567  BUFSIZE dictates the pipe buffer size used in communication with the
568  subprocess: quoting from subprocess.Popen(), "0 means unbuffered,
569  1 means line buffered, any other positive value means use a buffer of
570  (approximately) that size. A negative bufsize means to use the system
571  default, which usually means fully buffered."
572
573  Normalize Windows line endings of stdout and stderr if not BINARY_MODE.
574  Return exit code as int; stdout, stderr as lists of lines (including
575  line terminators).
576  """
577  if stdin_lines and not isinstance(stdin_lines, list):
578    raise TypeError("stdin_lines should have list type")
579
580  # Log the command line
581  if not command.endswith('.py'):
582    logger.info('CMD: %s %s' % (os.path.basename(command),
583                                  ' '.join([_quote_arg(x) for x in varargs])))
584
585  infile, outfile, errfile, kid = open_pipe([command] + list(varargs), bufsize)
586
587  if stdin_lines:
588    for x in stdin_lines:
589      infile.write(x)
590
591  stdout_lines, stderr_lines, exit_code = wait_on_pipe(kid, binary_mode)
592  infile.close()
593
594  outfile.close()
595  errfile.close()
596
597  return exit_code, stdout_lines, stderr_lines
598
599def run_command_stdin(command, error_expected, bufsize=-1, binary_mode=False,
600                      stdin_lines=None, *varargs):
601  """Run COMMAND with VARARGS; input STDIN_LINES (a list of strings
602  which should include newline characters) to program via stdin - this
603  should not be very large, as if the program outputs more than the OS
604  is willing to buffer, this will deadlock, with both Python and
605  COMMAND waiting to write to each other for ever.  For tests where this
606  is a problem, setting BUFSIZE to a sufficiently large value will prevent
607  the deadlock, see spawn_process().
608  Normalize Windows line endings of stdout and stderr if not BINARY_MODE.
609  Return exit code as int; stdout, stderr as lists of lines (including
610  line terminators).
611  If ERROR_EXPECTED is None, any stderr output will be printed and any
612  stderr output or a non-zero exit code will raise an exception."""
613
614  start = time.time()
615
616  exit_code, stdout_lines, stderr_lines = spawn_process(command,
617                                                        bufsize,
618                                                        binary_mode,
619                                                        stdin_lines,
620                                                        *varargs)
621
622  def _line_contains_repos_diskpath(line):
623    # ### Note: this assumes that either svn-test-work isn't a symlink,
624    # ### or the diskpath isn't realpath()'d somewhere on the way from
625    # ### the server's configuration and the client's stderr.  We could
626    # ### check for both the symlinked path and the realpath.
627    if isinstance(line, str):
628      return _repos_diskpath1 in line or _repos_diskpath2 in line
629    else:
630      return _repos_diskpath1_bytes in line or _repos_diskpath2_bytes in line
631
632  for lines, name in [[stdout_lines, "stdout"], [stderr_lines, "stderr"]]:
633    if is_ra_type_file() or 'svnadmin' in command or 'svnlook' in command:
634      break
635    # Does the server leak the repository on-disk path?
636    # (prop_tests-12 installs a hook script that does that intentionally)
637    if any(map(_line_contains_repos_diskpath, lines)) \
638       and not any(map(lambda arg: 'prop_tests-12' in arg, varargs)):
639      raise Failure("Repository diskpath in %s: %r" % (name, lines))
640
641  valgrind_diagnostic = False
642  # A valgrind diagnostic will raise a failure if the command is
643  # expected to run without error.  When an error is expected any
644  # subsequent error pattern matching is usually lenient and will not
645  # detect the diagnostic so make sure a failure is raised here.
646  if error_expected and stderr_lines:
647    if any(map(lambda arg: re.match('==[0-9]+==', arg), stderr_lines)):
648      valgrind_diagnostic = True
649
650  stop = time.time()
651  logger.info('<TIME = %.6f>' % (stop - start))
652  for x in stdout_lines:
653    logger.info(x.rstrip())
654  for x in stderr_lines:
655    logger.info(x.rstrip())
656
657  if (((not error_expected) and ((stderr_lines) or (exit_code != 0)))
658      or valgrind_diagnostic):
659    for x in stderr_lines:
660      logger.warning(x.rstrip())
661    if len(varargs) <= 5:
662      brief_command = ' '.join((command,) + varargs)
663    else:
664      brief_command = ' '.join(((command,) + varargs)[:4]) + ' ...'
665    raise Failure('Command failed: "' + brief_command +
666                  '"; exit code ' + str(exit_code))
667
668  return exit_code, \
669         filter_dbg(stdout_lines, binary_mode), \
670         stderr_lines
671
672def create_config_dir(cfgdir, config_contents=None, server_contents=None,
673                      ssl_cert=None, ssl_url=None, http_proxy=None,
674                      exclusive_wc_locks=None):
675  "Create config directories and files"
676
677  # config file names
678  cfgfile_cfg = os.path.join(cfgdir, 'config')
679  cfgfile_srv = os.path.join(cfgdir, 'servers')
680
681  # create the directory
682  if not os.path.isdir(cfgdir):
683    os.makedirs(cfgdir)
684
685  # define default config file contents if none provided
686  if config_contents is None:
687    config_contents = """
688#
689[auth]
690password-stores =
691
692[miscellany]
693interactive-conflicts = false
694"""
695    if exclusive_wc_locks:
696      config_contents += """
697[working-copy]
698exclusive-locking = true
699"""
700  # define default server file contents if none provided
701  if server_contents is None:
702    http_library_str = ""
703    if options.http_library:
704      http_library_str = "http-library=%s" % (options.http_library)
705    http_proxy_str = ""
706    http_proxy_username_str = ""
707    http_proxy_password_str = ""
708    if options.http_proxy:
709      http_proxy_parsed = urlparse("//" + options.http_proxy)
710      http_proxy_str = "http-proxy-host=%s\n" % (http_proxy_parsed.hostname) + \
711                       "http-proxy-port=%d" % (http_proxy_parsed.port or 80)
712    if options.http_proxy_username:
713      http_proxy_username_str = "http-proxy-username=%s" % \
714                                     (options.http_proxy_username)
715    if options.http_proxy_password:
716      http_proxy_password_str = "http-proxy-password=%s" % \
717                                     (options.http_proxy_password)
718
719    server_contents = """
720#
721[global]
722%s
723%s
724%s
725%s
726store-plaintext-passwords=yes
727store-passwords=yes
728""" % (http_library_str, http_proxy_str, http_proxy_username_str,
729       http_proxy_password_str)
730
731  file_write(cfgfile_cfg, config_contents)
732  file_write(cfgfile_srv, server_contents)
733
734  if (ssl_cert and ssl_url):
735    trust_ssl_cert(cfgdir, ssl_cert, ssl_url)
736  elif cfgdir != default_config_dir:
737    copy_trust(cfgdir, default_config_dir)
738
739
740def trust_ssl_cert(cfgdir, ssl_cert, ssl_url):
741  """Setup config dir to trust the given ssl_cert for the given ssl_url
742  """
743
744  cert_rep = ''
745  fp = open(ssl_cert, 'r')
746  for line in fp.readlines()[1:-1]:
747    cert_rep = cert_rep + line.strip()
748
749  parsed_url = urlparse(ssl_url)
750  netloc_url = '%s://%s' % (parsed_url.scheme, parsed_url.netloc)
751  ssl_dir = os.path.join(cfgdir, 'auth', 'svn.ssl.server')
752  if not os.path.isdir(ssl_dir):
753    os.makedirs(ssl_dir)
754  md5_name = hashlib.md5(netloc_url).hexdigest()
755  md5_file = os.path.join(ssl_dir, md5_name)
756  md5_file_contents = """K 10
757ascii_cert
758V %d
759%s
760K 8
761failures
762V 1
7638
764K 15
765svn:realmstring
766V %d
767%s
768END
769""" % (len(cert_rep), cert_rep, len(netloc_url), netloc_url)
770  file_write(md5_file, md5_file_contents, mode='wb')
771
772def copy_trust(dst_cfgdir, src_cfgdir):
773  """Copy svn.ssl.server files from one config dir to another.
774  """
775
776  src_ssl_dir = os.path.join(src_cfgdir, 'auth', 'svn.ssl.server')
777  dst_ssl_dir = os.path.join(dst_cfgdir, 'auth', 'svn.ssl.server')
778  if not os.path.isdir(dst_ssl_dir):
779    os.makedirs(dst_ssl_dir)
780  for f in os.listdir(src_ssl_dir):
781    shutil.copy(os.path.join(src_ssl_dir, f), os.path.join(dst_ssl_dir, f))
782
783def _with_config_dir(args):
784  if '--config-dir' in args:
785    return args
786  else:
787    return args + ('--config-dir', default_config_dir)
788
789class svnrdump_crosscheck_authentication:
790  pass
791
792def _with_auth(args):
793  assert '--password' not in args
794  if svnrdump_crosscheck_authentication in args:
795    args = filter(lambda x: x is not svnrdump_crosscheck_authentication, args)
796    auth_username = crosscheck_username
797    auth_password = crosscheck_password
798  else:
799    auth_username = wc_author
800    auth_password = wc_passwd
801
802  args = args + ('--password', auth_password,
803                 '--no-auth-cache' )
804  if '--username' in args:
805    return args
806  else:
807    return args + ('--username', auth_username )
808
809# For running subversion and returning the output
810def run_svn(error_expected, *varargs):
811  """Run svn with VARARGS; return exit code as int; stdout, stderr as
812  lists of lines (including line terminators).  If ERROR_EXPECTED is
813  None, any stderr output will be printed and any stderr output or a
814  non-zero exit code will raise an exception.  If
815  you're just checking that something does/doesn't come out of
816  stdout/stderr, you might want to use actions.run_and_verify_svn()."""
817  return run_command(svn_binary, error_expected, False,
818                     *(_with_auth(_with_config_dir(varargs))))
819
820# For running svnadmin.  Ignores the output.
821def run_svnadmin(*varargs):
822  """Run svnadmin with VARARGS, returns exit code as int; stdout, stderr as
823  list of lines (including line terminators)."""
824
825  use_binary = ('dump' in varargs) or ('dump-revprops' in varargs)
826
827  exit_code, stdout_lines, stderr_lines = \
828                       run_command(svnadmin_binary, 1, use_binary, *varargs)
829
830  if use_binary and sys.platform == 'win32':
831    # Callers don't expect binary output on stderr
832    stderr_lines = [x.replace('\r', '') for x in stderr_lines]
833
834  return exit_code, stdout_lines, stderr_lines
835
836# For running svnlook.  Ignores the output.
837def run_svnlook(*varargs):
838  """Run svnlook with VARARGS, returns exit code as int; stdout, stderr as
839  list of lines (including line terminators)."""
840  return run_command(svnlook_binary, 1, False, *varargs)
841
842def run_svnrdump(stdin_input, *varargs):
843  """Run svnrdump with VARARGS, returns exit code as int; stdout, stderr as
844  list of lines (including line terminators).  Use binary mode for output."""
845  if stdin_input:
846    return run_command_stdin(svnrdump_binary, 1, 1, True, stdin_input,
847                             *(_with_auth(_with_config_dir(varargs))))
848  else:
849    return run_command(svnrdump_binary, 1, True,
850                       *(_with_auth(_with_config_dir(varargs))))
851
852def run_svnsync(*varargs):
853  """Run svnsync with VARARGS, returns exit code as int; stdout, stderr as
854  list of lines (including line terminators)."""
855  return run_command(svnsync_binary, 1, False,
856                     *(_with_auth(_with_config_dir(varargs))))
857
858def run_svnversion(*varargs):
859  """Run svnversion with VARARGS, returns exit code as int; stdout, stderr
860  as list of lines (including line terminators)."""
861  return run_command(svnversion_binary, 1, False, *varargs)
862
863def run_svnmover(*varargs):
864  """Run svnmover with VARARGS, returns exit code as int; stdout, stderr as
865  list of lines (including line terminators)."""
866  return run_command(svnmover_binary, 1, False,
867                     *(_with_auth(_with_config_dir(varargs))))
868
869def run_svnmucc(*varargs):
870  """Run svnmucc with VARARGS, returns exit code as int; stdout, stderr as
871  list of lines (including line terminators).  Use binary mode for output."""
872  return run_command(svnmucc_binary, 1, True,
873                     *(_with_auth(_with_config_dir(varargs))))
874
875def run_svnauthz(*varargs):
876  """Run svnauthz with VARARGS, returns exit code as int; stdout, stderr
877  as list of lines (including line terminators)."""
878  return run_command(svnauthz_binary, 1, False, *varargs)
879
880def run_svnauthz_validate(*varargs):
881  """Run svnauthz-validate with VARARGS, returns exit code as int; stdout,
882  stderr as list of lines (including line terminators)."""
883  return run_command(svnauthz_validate_binary, 1, False, *varargs)
884
885def run_svnfsfs(*varargs):
886  """Run svnfsfs with VARARGS, returns exit code as int; stdout, stderr
887  as list of lines (including line terminators)."""
888  return run_command(svnfsfs_binary, 1, False, *varargs)
889
890def run_lock_helper(repo, path, user, seconds):
891  """Run lock-helper to lock path in repo by username for seconds"""
892
893  return run_command(lock_helper_binary, 1, False, repo, path, user, seconds)
894
895def run_entriesdump(path):
896  """Run the entries-dump helper, returning a dict of Entry objects."""
897  # use spawn_process rather than run_command to avoid copying all the data
898  # to stdout in verbose mode.
899  exit_code, stdout_lines, stderr_lines = spawn_process(entriesdump_binary,
900                                                        0, False, None, path)
901  if exit_code or stderr_lines:
902    ### report on this? or continue to just skip it?
903    return None
904
905  entries = { }
906  exec(''.join(filter_dbg(stdout_lines)))
907  return entries
908
909def run_entriesdump_subdirs(path):
910  """Run the entries-dump helper, returning a list of directory names."""
911  # use spawn_process rather than run_command to avoid copying all the data
912  # to stdout in verbose mode.
913  exit_code, stdout_lines, stderr_lines = spawn_process(entriesdump_binary,
914                                                        0, False, None, '--subdirs', path)
915  return map(lambda line: line.strip(), filter_dbg(stdout_lines))
916
917def run_entriesdump_tree(path):
918  """Run the entries-dump helper, returning a dict of a dict of Entry objects."""
919  # use spawn_process rather than run_command to avoid copying all the data
920  # to stdout in verbose mode.
921  exit_code, stdout_lines, stderr_lines = spawn_process(entriesdump_binary,
922                                                        0, False, None,
923                                                        '--tree-dump', path)
924  if exit_code or stderr_lines:
925    ### report on this? or continue to just skip it?
926    return None
927
928  dirs = { }
929  exec(''.join(filter_dbg(stdout_lines)))
930  return dirs
931
932def run_atomic_ra_revprop_change(url, revision, propname, skel, want_error):
933  """Run the atomic-ra-revprop-change helper, returning its exit code, stdout,
934  and stderr.  For HTTP, default HTTP library is used."""
935  # use spawn_process rather than run_command to avoid copying all the data
936  # to stdout in verbose mode.
937  #exit_code, stdout_lines, stderr_lines = spawn_process(entriesdump_binary,
938  #                                                      0, False, None, path)
939
940  # This passes HTTP_LIBRARY in addition to our params.
941  return run_command(atomic_ra_revprop_change_binary, True, False,
942                     url, revision, propname, skel,
943                     want_error and 1 or 0, default_config_dir)
944
945def run_wc_lock_tester(recursive, path, work_queue=False):
946  "Run the wc-lock obtainer tool, returning its exit code, stdout and stderr"
947  if work_queue:
948    option = "-w"
949  elif recursive:
950    option = "-r"
951  else:
952    option = "-1"
953  return run_command(wc_lock_tester_binary, False, False, option, path)
954
955def run_wc_incomplete_tester(wc_dir, revision):
956  "Run the wc-incomplete tool, returning its exit code, stdout and stderr"
957  return run_command(wc_incomplete_tester_binary, False, False,
958                     wc_dir, revision)
959
960def youngest(repos_path):
961  "run 'svnlook youngest' on REPOS_PATH, returns revision as int"
962  exit_code, stdout_lines, stderr_lines = run_command(svnlook_binary, None, False,
963                                                      'youngest', repos_path)
964  if exit_code or stderr_lines:
965    raise Failure("Unexpected failure of 'svnlook youngest':\n%s" % stderr_lines)
966  if len(stdout_lines) != 1:
967    raise Failure("Wrong output from 'svnlook youngest':\n%s" % stdout_lines)
968  return int(stdout_lines[0].rstrip())
969
970# Chmod recursively on a whole subtree
971def chmod_tree(path, mode, mask):
972  """For each node in the OS filesystem tree PATH, subtract MASK from its
973  permissions and add MODE to them."""
974  for dirpath, dirs, files in os.walk(path):
975    for name in dirs + files:
976      fullname = os.path.join(dirpath, name)
977      if not os.path.islink(fullname):
978        new_mode = (os.stat(fullname)[stat.ST_MODE] & ~mask) | mode
979        os.chmod(fullname, new_mode)
980
981# For clearing away working copies
982def safe_rmtree(dirname, retry=0):
983  """Remove the tree at DIRNAME, making it writable first.
984     If DIRNAME is a symlink, only remove the symlink, not its target."""
985  def rmtree(dirname):
986    chmod_tree(dirname, S_ALL_RW, S_ALL_RW)
987    shutil.rmtree(dirname)
988
989  if os.path.islink(dirname):
990    os.unlink(dirname)
991    return
992
993  if not os.path.exists(dirname):
994    return
995
996  if retry:
997    for delay in (0.5, 1, 2, 4):
998      try:
999        rmtree(dirname)
1000        break
1001      except:
1002        time.sleep(delay)
1003    else:
1004      rmtree(dirname)
1005  else:
1006    rmtree(dirname)
1007
1008# For creating new files, and making local mods to existing files.
1009def file_write(path, contents, mode='w'):
1010  """Write the CONTENTS to the file at PATH, opening file using MODE,
1011  which is (w)rite by default."""
1012
1013  if sys.version_info < (3, 0):
1014    with open(path, mode) as f:
1015      f.write(contents)
1016  else:
1017    # Python 3:  Write data in the format required by MODE, i.e. byte arrays
1018    #            to 'b' files, utf-8 otherwise."""
1019    if 'b' in mode:
1020      if isinstance(contents, str):
1021        contents = contents.encode()
1022    else:
1023      if not isinstance(contents, str):
1024        contents = contents.decode("utf-8")
1025
1026    if isinstance(contents, str):
1027      with codecs.open(path, mode, "utf-8") as f:
1028        f.write(contents)
1029    else:
1030      with open(path, mode) as f:
1031        f.write(contents)
1032
1033# For making local mods to files
1034def file_append(path, new_text):
1035  "Append NEW_TEXT to file at PATH"
1036  file_write(path, new_text, 'a')
1037
1038# Append in binary mode
1039def file_append_binary(path, new_text):
1040  "Append NEW_TEXT to file at PATH in binary mode"
1041  file_write(path, new_text, 'ab')
1042
1043# For replacing parts of contents in an existing file, with new content.
1044def file_substitute(path, contents, new_contents):
1045  """Replace the CONTENTS in the file at PATH using the NEW_CONTENTS"""
1046  fcontent = open(path, 'r').read().replace(contents, new_contents)
1047  with open(path, 'w') as f:
1048    f.write(fcontent)
1049
1050# For setting up authz, hooks and making other tweaks to created repos
1051def _post_create_repos(path, minor_version = None):
1052  """Set default access right configurations for svnserve and mod_dav,
1053  install hooks and perform other various tweaks according to the test
1054  options in the SVN repository at PATH."""
1055
1056  # Require authentication to write to the repos, for ra_svn testing.
1057  file_write(get_svnserve_conf_file_path(path),
1058             "[general]\nauth-access = write\n");
1059  if options.enable_sasl:
1060    file_append(get_svnserve_conf_file_path(path),
1061                "realm = svntest\n[sasl]\nuse-sasl = true\n")
1062  else:
1063    file_append(get_svnserve_conf_file_path(path), "password-db = passwd\n")
1064    # This actually creates TWO [users] sections in the file (one of them is
1065    # uncommented in `svnadmin create`'s template), so we exercise the .ini
1066    # files reading code's handling of duplicates, too. :-)
1067    users = ("[users]\n"
1068             "jrandom = rayjandom\n"
1069             "jconstant = rayjandom\n")
1070    if tests_verify_dump_load_cross_check():
1071      # Insert a user for the dump/load cross-check.
1072      users += (crosscheck_username + " = " + crosscheck_password + "\n")
1073    file_append(os.path.join(path, "conf", "passwd"), users)
1074
1075  if options.fs_type is None or options.fs_type == 'fsfs' or \
1076     options.fs_type == 'fsx':
1077    # fsfs.conf file
1078    if (minor_version is None or minor_version >= 6):
1079      confpath = get_fsfs_conf_file_path(path)
1080      if options.config_file is not None:
1081        shutil.copy(options.config_file, confpath)
1082
1083      if options.memcached_server is not None or \
1084         options.fsfs_compression is not None or \
1085         options.fsfs_dir_deltification is not None and \
1086         os.path.exists(confpath):
1087        with open(confpath, 'r') as conffile:
1088          newlines = []
1089          for line in conffile.readlines():
1090            if line.startswith('# compression ') and \
1091               options.fsfs_compression is not None:
1092              line = 'compression = %s\n' % options.fsfs_compression
1093            if line.startswith('# enable-dir-deltification ') and \
1094               options.fsfs_dir_deltification is not None:
1095              line = 'enable-dir-deltification = %s\n' % \
1096                options.fsfs_dir_deltification
1097            newlines += line
1098            if options.memcached_server is not None and \
1099               line == '[memcached-servers]\n':
1100              newlines += ('key = %s\n' % options.memcached_server)
1101        with open(confpath, 'w') as conffile:
1102          conffile.writelines(newlines)
1103
1104    # format file
1105    if options.fsfs_sharding is not None:
1106      def transform_line(line):
1107        if line.startswith('layout '):
1108          if options.fsfs_sharding > 0:
1109            line = 'layout sharded %d' % options.fsfs_sharding
1110          else:
1111            line = 'layout linear'
1112        return line
1113
1114      # read it
1115      format_file_path = get_fsfs_format_file_path(path)
1116      contents = open(format_file_path, 'rb').read()
1117
1118      # tweak it
1119      new_contents = "".join([transform_line(line) + "\n"
1120                              for line in contents.split("\n")])
1121      if new_contents[-1] == "\n":
1122        # we don't currently allow empty lines (\n\n) in the format file.
1123        new_contents = new_contents[:-1]
1124
1125      # replace it
1126      os.chmod(format_file_path, S_ALL_RW)
1127      file_write(format_file_path, new_contents, 'wb')
1128
1129    # post-commit
1130    # Note that some tests (currently only commit_tests) create their own
1131    # post-commit hooks, which would override this one. :-(
1132    if options.fsfs_packing and minor_version >=6:
1133      # some tests chdir.
1134      abs_path = os.path.abspath(path)
1135      create_python_hook_script(get_post_commit_hook_path(abs_path),
1136          "import subprocess\n"
1137          "import sys\n"
1138          "command = %s\n"
1139          "sys.exit(subprocess.Popen(command).wait())\n"
1140          % repr([svnadmin_binary, 'pack', abs_path]))
1141
1142  # make the repos world-writeable, for mod_dav_svn's sake.
1143  chmod_tree(path, S_ALL_RW, S_ALL_RW)
1144
1145def _unpack_precooked_repos(path, template):
1146  testdir = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
1147  repozip = os.path.join(os.path.dirname(testdir), "templates", template)
1148  zipfile.ZipFile(repozip, 'r').extractall(path)
1149
1150# For creating new, pre-cooked greek repositories
1151def unpack_greek_repos(path):
1152  template = "greek-fsfs-v%d.zip" % options.fsfs_version
1153  _unpack_precooked_repos(path, template)
1154  _post_create_repos(path, options.server_minor_version)
1155
1156# For creating blank new repositories
1157def create_repos(path, minor_version = None):
1158  """Create a brand-new SVN repository at PATH.  If PATH does not yet
1159  exist, create it."""
1160
1161  if not os.path.exists(path):
1162    os.makedirs(path) # this creates all the intermediate dirs, if necessary
1163
1164  if options.fsfs_version is None:
1165    if options.fs_type == "bdb":
1166      opts = ("--bdb-txn-nosync",)
1167    else:
1168      opts = ()
1169    if minor_version is None or minor_version > options.server_minor_version:
1170      minor_version = options.server_minor_version
1171    opts += ("--compatible-version=1.%d" % (minor_version),)
1172    if options.fs_type is not None:
1173      opts += ("--fs-type=" + options.fs_type,)
1174    exit_code, stdout, stderr = run_command(svnadmin_binary, 1, False,
1175                                            "create", path, *opts)
1176  else:
1177    # Copy a pre-cooked FSFS repository
1178    assert options.fs_type == "fsfs"
1179    template = "empty-fsfs-v%d.zip" % options.fsfs_version
1180    _unpack_precooked_repos(path, template)
1181    exit_code, stdout, stderr = run_command(svnadmin_binary, 1, False,
1182                                            "setuuid", path)
1183
1184  # Skip tests if we can't create the repository.
1185  if stderr:
1186    stderr_lines = 0
1187    not_using_fsfs_backend = (options.fs_type != "fsfs")
1188    backend_deprecation_warning = False
1189    for line in stderr:
1190      stderr_lines += 1
1191      if line.find('Unknown FS type') != -1:
1192        raise Skip
1193      if not_using_fsfs_backend:
1194        if 0 < line.find('repository back-end is deprecated, consider using'):
1195          backend_deprecation_warning = True
1196
1197    # Creating BDB repositories will cause svnadmin to print a warning
1198    # which should be ignored.
1199    if (stderr_lines == 1
1200        and not_using_fsfs_backend
1201        and backend_deprecation_warning):
1202      pass
1203    else:
1204      # If the FS type is known and we noticed more than just the
1205      # BDB-specific warning, assume the repos couldn't be created
1206      # (e.g. due to a missing 'svnadmin' binary).
1207      raise SVNRepositoryCreateFailure("".join(stderr).rstrip())
1208
1209  # Configure the new repository.
1210  _post_create_repos(path, minor_version)
1211
1212# For copying a repository
1213def copy_repos(src_path, dst_path, head_revision, ignore_uuid = 1,
1214               minor_version = None):
1215  "Copy the repository SRC_PATH, with head revision HEAD_REVISION, to DST_PATH"
1216
1217  # Save any previous value of SVN_DBG_QUIET
1218  saved_quiet = os.environ.get('SVN_DBG_QUIET')
1219  os.environ['SVN_DBG_QUIET'] = 'y'
1220
1221  # Do an svnadmin dump|svnadmin load cycle. Print a fake pipe command so that
1222  # the displayed CMDs can be run by hand
1223  create_repos(dst_path, minor_version)
1224  dump_args = ['dump', src_path]
1225  load_args = ['load', dst_path]
1226
1227  if ignore_uuid:
1228    load_args = load_args + ['--ignore-uuid']
1229
1230  logger.info('CMD: %s %s | %s %s' %
1231                     (os.path.basename(svnadmin_binary), ' '.join(dump_args),
1232                      os.path.basename(svnadmin_binary), ' '.join(load_args)))
1233  start = time.time()
1234
1235  dump_in, dump_out, dump_err, dump_kid = open_pipe(
1236    [svnadmin_binary] + dump_args)
1237  load_in, load_out, load_err, load_kid = open_pipe(
1238    [svnadmin_binary] + load_args,
1239    stdin=dump_out) # Attached to dump_kid
1240
1241  load_stdout, load_stderr, load_exit_code = wait_on_pipe(load_kid, True)
1242  dump_stdout, dump_stderr, dump_exit_code = wait_on_pipe(dump_kid, True)
1243
1244  dump_in.close()
1245  dump_out.close()
1246  dump_err.close()
1247  #load_in is dump_out so it's already closed.
1248  load_out.close()
1249  load_err.close()
1250
1251  stop = time.time()
1252  logger.info('<TIME = %.6f>' % (stop - start))
1253
1254  if saved_quiet is None:
1255    del os.environ['SVN_DBG_QUIET']
1256  else:
1257    os.environ['SVN_DBG_QUIET'] = saved_quiet
1258
1259  dump_re = re.compile(r'^\* Dumped revision (\d+)\.\r?$')
1260  expect_revision = 0
1261  dump_failed = False
1262  for dump_line in dump_stderr:
1263    match = dump_re.match(dump_line)
1264    if not match or match.group(1) != str(expect_revision):
1265      logger.warn('ERROR:  dump failed: %s', dump_line.strip())
1266      dump_failed = True
1267    else:
1268      expect_revision += 1
1269  if dump_failed:
1270    raise SVNRepositoryCopyFailure
1271  if expect_revision != head_revision + 1:
1272    logger.warn('ERROR:  dump failed; did not see revision %s', head_revision)
1273    raise SVNRepositoryCopyFailure
1274
1275  load_re = re.compile(b'^------- Committed revision (\\d+) >>>\\r?$')
1276  expect_revision = 1
1277  for load_line in filter_dbg(load_stdout, True):
1278    match = load_re.match(load_line)
1279    if match:
1280      if match.group(1).decode() != str(expect_revision):
1281        logger.warn('ERROR:  load failed: %s', load_line.strip())
1282        raise SVNRepositoryCopyFailure
1283      expect_revision += 1
1284  if expect_revision != head_revision + 1:
1285    logger.warn('ERROR:  load failed; did not see revision %s', head_revision)
1286    raise SVNRepositoryCopyFailure
1287
1288
1289def canonicalize_url(input):
1290  "Canonicalize the url, if the scheme is unknown, returns intact input"
1291
1292  m = re.match(r"^((file://)|((svn|svn\+ssh|http|https)(://)))", input)
1293  if m:
1294    scheme = m.group(1)
1295    return scheme + re.sub(r'//*', '/', input[len(scheme):])
1296  else:
1297    return input
1298
1299
1300def create_python_hook_script(hook_path, hook_script_code,
1301                              cmd_alternative=None):
1302  """Create a Python hook script at HOOK_PATH with the specified
1303     HOOK_SCRIPT_CODE."""
1304
1305  if windows:
1306    if cmd_alternative is not None:
1307      file_write("%s.bat" % hook_path,
1308                  cmd_alternative)
1309    else:
1310      # Use an absolute path since the working directory is not guaranteed
1311      hook_path = os.path.abspath(hook_path)
1312      # Fill the python file.
1313      file_write("%s.py" % hook_path, hook_script_code)
1314      # Fill the batch wrapper file.
1315      file_write("%s.bat" % hook_path,
1316                 "@\"%s\" %s.py %%*\n" % (sys.executable, hook_path))
1317  else:
1318    # For all other platforms
1319    file_write(hook_path, "#!%s\n%s" % (sys.executable, hook_script_code))
1320    os.chmod(hook_path, S_ALL_RW | stat.S_IXUSR)
1321
1322def create_http_connection(url, debuglevel=9):
1323  """Create an http(s) connection to the host specified by URL.
1324     Set the debugging level (the amount of debugging output printed when
1325     working with this connection) to DEBUGLEVEL.  By default, all debugging
1326     output is printed. """
1327
1328  if sys.version_info < (3, 0):
1329    # Python <3.0
1330    import httplib
1331  else:
1332    # Python >=3.0
1333    import http.client as httplib
1334
1335  loc = urlparse(url)
1336  if loc.scheme == 'http':
1337    h = httplib.HTTPConnection(loc.hostname, loc.port)
1338  else:
1339    try:
1340      import ssl # new in python 2.6
1341      c = ssl.create_default_context()
1342      c.check_hostname = False
1343      c.verify_mode = ssl.CERT_NONE
1344      h = httplib.HTTPSConnection(loc.hostname, loc.port, context=c)
1345    except:
1346      h = httplib.HTTPSConnection(loc.hostname, loc.port)
1347  h.set_debuglevel(debuglevel)
1348  return h
1349
1350def write_restrictive_svnserve_conf(repo_dir, anon_access="none",
1351                                    separate_groups_db=False):
1352  "Create a restrictive authz file ( no anynomous access )."
1353
1354  fp = open(get_svnserve_conf_file_path(repo_dir), 'w')
1355  fp.write("[general]\n"
1356           "anon-access = %s\n"
1357           "auth-access = write\n"
1358           "authz-db = authz\n" % anon_access);
1359  if separate_groups_db:
1360    fp.write("groups-db = groups\n")
1361  if options.enable_sasl:
1362    fp.write("realm = svntest\n"
1363             "[sasl]\n",
1364             "use-sasl = true\n");
1365  else:
1366    fp.write("password-db = passwd\n")
1367  fp.close()
1368
1369def write_restrictive_svnserve_conf_with_groups(repo_dir, anon_access="none"):
1370  "Create a restrictive configuration with groups stored in a separate file."
1371
1372  return write_restrictive_svnserve_conf(repo_dir, anon_access, True)
1373
1374# Warning: because mod_dav_svn uses one shared authz file for all
1375# repositories, you *cannot* use write_authz_file in any test that
1376# might be run in parallel.
1377#
1378# write_authz_file can *only* be used in test suites which disable
1379# parallel execution at the bottom like so
1380#   if __name__ == '__main__':
1381#     svntest.main.run_tests(test_list, serial_only = True)
1382def write_authz_file(sbox, rules, sections=None, prefixed_rules=None):
1383  """Write an authz file to SBOX, appropriate for the RA method used,
1384with authorizations rules RULES mapping paths to strings containing
1385the rules. You can add sections SECTIONS (ex. groups, aliases...) with
1386an appropriate list of mappings.
1387"""
1388  fp = open(sbox.authz_file, 'w')
1389
1390  # When the sandbox repository is read only its name will be different from
1391  # the repository name.
1392  repo_name = os.path.basename(sbox.repo_dir.rstrip('/'))
1393
1394  if sbox.repo_url.startswith("http"):
1395    default_prefix = repo_name + ":"
1396  else:
1397    default_prefix = ""
1398
1399  if sections:
1400    for p, r in sections.items():
1401      fp.write("[%s]\n%s\n" % (p, r))
1402
1403  if not prefixed_rules:
1404    prefixed_rules = dict()
1405
1406  if rules:
1407    for p, r in rules.items():
1408      prefixed_rules[default_prefix + p] = r
1409
1410  for p, r in prefixed_rules.items():
1411    fp.write("[%s]\n%s\n" % (p, r))
1412    if tests_verify_dump_load_cross_check():
1413      # Insert an ACE that lets the dump/load cross-check bypass
1414      # authz restrictions.
1415      fp.write(crosscheck_username + " = rw\n")
1416
1417  if tests_verify_dump_load_cross_check() and '/' not in prefixed_rules:
1418    # We need a repository-root ACE for the dump/load cross-check
1419    fp.write("[/]\n" + crosscheck_username + " = rw\n")
1420
1421  fp.close()
1422
1423# See the warning about parallel test execution in write_authz_file
1424# method description.
1425def write_groups_file(sbox, groups):
1426  """Write a groups file to SBOX, appropriate for the RA method used,
1427with group contents set to GROUPS."""
1428  fp = open(sbox.groups_file, 'w')
1429  fp.write("[groups]\n")
1430  if groups:
1431    for p, r in groups.items():
1432      fp.write("%s = %s\n" % (p, r))
1433  fp.close()
1434
1435def use_editor(func):
1436  os.environ['SVN_EDITOR'] = svneditor_script
1437  os.environ['SVN_MERGE'] = svneditor_script
1438  os.environ['SVNTEST_EDITOR_FUNC'] = func
1439  os.environ['SVN_TEST_PYTHON'] = sys.executable
1440
1441def mergeinfo_notify_line(revstart, revend, target=None):
1442  """Return an expected output line that describes the beginning of a
1443  mergeinfo recording notification on revisions REVSTART through REVEND."""
1444  if target:
1445    target_re = re.escape(target)
1446  else:
1447    target_re = ".+"
1448  if (revend is None):
1449    if (revstart < 0):
1450      revstart = abs(revstart)
1451      return "--- Recording mergeinfo for reverse merge of r%ld into '%s':\n" \
1452             % (revstart, target_re)
1453    else:
1454      return "--- Recording mergeinfo for merge of r%ld into '%s':\n" \
1455             % (revstart, target_re)
1456  elif (revstart < revend):
1457    return "--- Recording mergeinfo for merge of r%ld through r%ld into '%s':\n" \
1458           % (revstart, revend, target_re)
1459  else:
1460    return "--- Recording mergeinfo for reverse merge of r%ld through " \
1461           "r%ld into '%s':\n" % (revstart, revend, target_re)
1462
1463def merge_notify_line(revstart=None, revend=None, same_URL=True,
1464                      foreign=False, target=None):
1465  """Return an expected output line that describes the beginning of a
1466  merge operation on revisions REVSTART through REVEND.  Omit both
1467  REVSTART and REVEND for the case where the left and right sides of
1468  the merge are from different URLs."""
1469  from_foreign_phrase = foreign and "\(from foreign repository\) " or ""
1470  if target:
1471    target_re = re.escape(target)
1472  else:
1473    target_re = ".+"
1474  if not same_URL:
1475    return "--- Merging differences between %srepository URLs into '%s':\n" \
1476           % (foreign and "foreign " or "", target_re)
1477  if revend is None:
1478    if revstart is None:
1479      # The left and right sides of the merge are from different URLs.
1480      return "--- Merging differences between %srepository URLs into '%s':\n" \
1481             % (foreign and "foreign " or "", target_re)
1482    elif revstart < 0:
1483      return "--- Reverse-merging %sr%ld into '%s':\n" \
1484             % (from_foreign_phrase, abs(revstart), target_re)
1485    else:
1486      return "--- Merging %sr%ld into '%s':\n" \
1487             % (from_foreign_phrase, revstart, target_re)
1488  else:
1489    if revstart > revend:
1490      return "--- Reverse-merging %sr%ld through r%ld into '%s':\n" \
1491             % (from_foreign_phrase, revstart, revend, target_re)
1492    else:
1493      return "--- Merging %sr%ld through r%ld into '%s':\n" \
1494             % (from_foreign_phrase, revstart, revend, target_re)
1495
1496def summary_of_conflicts(text_conflicts=0,
1497                         prop_conflicts=0,
1498                         tree_conflicts=0,
1499                         text_resolved=0,
1500                         prop_resolved=0,
1501                         tree_resolved=0,
1502                         skipped_paths=0,
1503                         as_regex=False):
1504  """Return a list of lines corresponding to the summary of conflicts and
1505     skipped paths that is printed by merge and update and switch.  If all
1506     parameters are zero, return an empty list.
1507  """
1508  lines = []
1509  if (text_conflicts or prop_conflicts or tree_conflicts
1510      or text_resolved or prop_resolved or tree_resolved
1511      or skipped_paths):
1512    lines.append("Summary of conflicts:\n")
1513    if text_conflicts or text_resolved:
1514      if text_resolved == 0:
1515        lines.append("  Text conflicts: %d\n" % text_conflicts)
1516      else:
1517        lines.append("  Text conflicts: %d remaining (and %d already resolved)\n"
1518                     % (text_conflicts, text_resolved))
1519    if prop_conflicts or prop_resolved:
1520      if prop_resolved == 0:
1521        lines.append("  Property conflicts: %d\n" % prop_conflicts)
1522      else:
1523        lines.append("  Property conflicts: %d remaining (and %d already resolved)\n"
1524                     % (prop_conflicts, prop_resolved))
1525    if tree_conflicts or tree_resolved:
1526      if tree_resolved == 0:
1527        lines.append("  Tree conflicts: %d\n" % tree_conflicts)
1528      else:
1529        lines.append("  Tree conflicts: %d remaining (and %d already resolved)\n"
1530                     % (tree_conflicts, tree_resolved))
1531    if skipped_paths:
1532      lines.append("  Skipped paths: %d\n" % skipped_paths)
1533
1534  if as_regex:
1535    lines = map(re.escape, lines)
1536  return lines
1537
1538
1539def make_log_msg():
1540  "Conjure up a log message based on the calling test."
1541
1542  for idx in range(1, 100):
1543    frame = sys._getframe(idx)
1544
1545    # If this frame isn't from a function in *_tests.py, then skip it.
1546    filename = frame.f_code.co_filename
1547    if not filename.endswith('_tests.py'):
1548      continue
1549
1550    # There should be a test_list in this module.
1551    test_list = frame.f_globals.get('test_list')
1552    if test_list is None:
1553      continue
1554
1555    # If the function is not in the test_list, then skip it.
1556    func_name = frame.f_code.co_name
1557    func_ob = frame.f_globals.get(func_name)
1558    if func_ob not in test_list:
1559      continue
1560
1561    # Make the log message look like a line from a traceback.
1562    # Well...close. We use single quotes to avoid interfering with the
1563    # double-quote quoting performed on Windows
1564    return "File '%s', line %d, in %s" % (filename, frame.f_lineno, func_name)
1565
1566
1567######################################################################
1568# Functions which check the test configuration
1569# (useful for conditional XFails)
1570
1571def tests_use_prepackaged_repository():
1572  return options.fsfs_version is not None
1573
1574def tests_verify_dump_load_cross_check():
1575  return options.dump_load_cross_check
1576
1577def is_ra_type_dav():
1578  return options.test_area_url.startswith('http')
1579
1580def is_ra_type_dav_neon():
1581  """Return True iff running tests over RA-Neon.
1582     CAUTION: Result is only valid if svn was built to support both."""
1583  return options.test_area_url.startswith('http') and \
1584    (options.http_library == "neon")
1585
1586def is_ra_type_dav_serf():
1587  """Return True iff running tests over RA-Serf.
1588     CAUTION: Result is only valid if svn was built to support both."""
1589  return options.test_area_url.startswith('http') and \
1590    (options.http_library == "serf")
1591
1592def is_ra_type_svn():
1593  """Return True iff running tests over RA-svn."""
1594  return options.test_area_url.startswith('svn')
1595
1596def is_ra_type_file():
1597  """Return True iff running tests over RA-local."""
1598  return options.test_area_url.startswith('file')
1599
1600def is_fs_type_fsfs():
1601  # This assumes that fsfs is the default fs implementation.
1602  return options.fs_type == 'fsfs' or options.fs_type is None
1603
1604def is_fs_type_fsx():
1605  return options.fs_type == 'fsx'
1606
1607def is_fs_type_bdb():
1608  return options.fs_type == 'bdb'
1609
1610def is_fs_log_addressing():
1611  return is_fs_type_fsx() or \
1612        (is_fs_type_fsfs() and options.server_minor_version >= 9)
1613
1614def fs_has_sha1():
1615  return fs_has_rep_sharing()
1616
1617def fs_has_rep_sharing():
1618  return options.server_minor_version >= 6
1619
1620def fs_has_pack():
1621  return is_fs_type_fsx() or \
1622        (is_fs_type_fsfs() and options.server_minor_version >= 6)
1623
1624def fs_has_unique_freeze():
1625  return (is_fs_type_fsfs() and options.server_minor_version >= 9
1626          or is_fs_type_bdb())
1627
1628def is_os_windows():
1629  return os.name == 'nt'
1630
1631def is_windows_type_dav():
1632  return is_os_windows() and is_ra_type_dav()
1633
1634def is_posix_os():
1635  return os.name == 'posix'
1636
1637def is_os_darwin():
1638  return sys.platform == 'darwin'
1639
1640def is_fs_case_insensitive():
1641  return (is_os_darwin() or is_os_windows())
1642
1643def is_threaded_python():
1644  return True
1645
1646def server_has_mergeinfo():
1647  return options.server_minor_version >= 5
1648
1649def server_has_revprop_commit():
1650  return options.server_caps.has_revprop_commit
1651
1652def server_authz_has_aliases():
1653  return options.server_caps.authz_has_aliases
1654
1655def server_gets_client_capabilities():
1656  return options.server_caps.gets_client_capabilities
1657
1658def server_has_partial_replay():
1659  return options.server_caps.has_partial_replay
1660
1661def server_enforces_UTF8_fspaths_in_verify():
1662  return options.server_caps.enforces_UTF8_fspaths_in_verify
1663
1664def server_enforces_date_syntax():
1665  return options.server_caps.enforces_date_syntax
1666
1667def server_has_atomic_revprop():
1668  return options.server_caps.has_atomic_revprop
1669
1670def server_has_reverse_get_file_revs():
1671  return options.server_caps.has_reverse_get_file_revs
1672
1673def python_sqlite_can_read_our_wc_db():
1674  """Check if the Python builtin is capable enough to peek into wc.db"""
1675  # Currently enough (1.7-1.9)
1676  return svntest.sqlite3.sqlite_version_info >= (3, 6, 18)
1677
1678def python_sqlite_can_read_without_rowid():
1679  """Check if the Python builtin is capable enough to read new rep-cache"""
1680  return svntest.sqlite3.sqlite_version_info >= (3, 8, 2)
1681
1682def is_plaintext_password_storage_disabled():
1683  try:
1684    predicate = re.compile("^WARNING: Plaintext password storage is enabled!")
1685    code, out, err = run_svn(False, "--version")
1686    for line in out:
1687      if predicate.match(line):
1688        return False
1689  except:
1690    return False
1691  return True
1692
1693# https://issues.apache.org/bugzilla/show_bug.cgi?id=56480
1694# https://issues.apache.org/bugzilla/show_bug.cgi?id=55397
1695__mod_dav_url_quoting_broken_versions = frozenset([
1696    '2.2.27',
1697    '2.2.26',
1698    '2.2.25',
1699    '2.4.9',
1700    '2.4.8',
1701    '2.4.7',
1702    '2.4.6',
1703    '2.4.5',
1704])
1705def is_mod_dav_url_quoting_broken():
1706    if is_ra_type_dav() and options.httpd_version != options.httpd_whitelist:
1707        return (options.httpd_version in __mod_dav_url_quoting_broken_versions)
1708    return None
1709
1710def is_httpd_authz_provider_enabled():
1711    if is_ra_type_dav():
1712      v = options.httpd_version.split('.')
1713      return (v[0] == '2' and int(v[1]) >= 3) or int(v[0]) > 2
1714    return None
1715
1716def is_remote_http_connection_allowed():
1717  return options.allow_remote_http_connection
1718
1719
1720######################################################################
1721
1722
1723class TestSpawningThread(threading.Thread):
1724  """A thread that runs test cases in their own processes.
1725  Receives test numbers to run from the queue, and saves results into
1726  the results field."""
1727  def __init__(self, queue, progress_func, tests_total):
1728    threading.Thread.__init__(self)
1729    self.queue = queue
1730    self.results = []
1731    self.progress_func = progress_func
1732    self.tests_total = tests_total
1733
1734  def run(self):
1735    while True:
1736      try:
1737        next_index = self.queue.get_nowait()
1738      except queue.Empty:
1739        return
1740
1741      self.run_one(next_index)
1742
1743      # signal progress
1744      if self.progress_func:
1745        self.progress_func(self.tests_total - self.queue.qsize(),
1746                           self.tests_total)
1747
1748  def run_one(self, index):
1749    command = os.path.abspath(sys.argv[0])
1750
1751    args = []
1752    args.append(str(index))
1753    args.append('-c')
1754    args.append('--set-log-level=%s' % logger.getEffectiveLevel())
1755    # add some startup arguments from this process
1756    if options.fs_type:
1757      args.append('--fs-type=' + options.fs_type)
1758    if options.test_area_url:
1759      args.append('--url=' + options.test_area_url)
1760    if options.cleanup:
1761      args.append('--cleanup')
1762    if options.enable_sasl:
1763      args.append('--enable-sasl')
1764    if options.http_library:
1765      args.append('--http-library=' + options.http_library)
1766    if options.server_minor_version:
1767      args.append('--server-minor-version=' + str(options.server_minor_version))
1768    if options.mode_filter:
1769      args.append('--mode-filter=' + options.mode_filter)
1770    if options.milestone_filter:
1771      args.append('--milestone-filter=' + options.milestone_filter)
1772    if options.ssl_cert:
1773      args.append('--ssl-cert=' + options.ssl_cert)
1774    if options.http_proxy:
1775      args.append('--http-proxy=' + options.http_proxy)
1776    if options.http_proxy_username:
1777      args.append('--http-proxy-username=' + options.http_proxy_username)
1778    if options.http_proxy_password:
1779      args.append('--http-proxy-password=' + options.http_proxy_password)
1780    if options.httpd_version:
1781      args.append('--httpd-version=' + options.httpd_version)
1782    if options.httpd_whitelist:
1783      args.append('--httpd-whitelist=' + options.httpd_whitelist)
1784    if options.exclusive_wc_locks:
1785      args.append('--exclusive-wc-locks')
1786    if options.memcached_server:
1787      args.append('--memcached-server=' + options.memcached_server)
1788    if options.fsfs_sharding:
1789      args.append('--fsfs-sharding=' + str(options.fsfs_sharding))
1790    if options.fsfs_packing:
1791      args.append('--fsfs-packing')
1792    if options.fsfs_version:
1793      args.append('--fsfs-version=' + str(options.fsfs_version))
1794    if options.dump_load_cross_check:
1795      args.append('--dump-load-cross-check')
1796    if options.fsfs_compression:
1797      args.append('--fsfs-compression=' + options.fsfs_compression)
1798    if options.fsfs_dir_deltification:
1799      args.append('--fsfs-dir-deltification=' + options.fsfs_dir_deltification)
1800    if options.allow_remote_http_connection:
1801      args.append('--allow-remote-http-connection')
1802    if options.svn_bin:
1803      args.append('--bin=' + options.svn_bin)
1804
1805    result, stdout_lines, stderr_lines = spawn_process(command, 0, False, None,
1806                                                       *args)
1807    self.results.append((index, result, stdout_lines, stderr_lines))
1808
1809class TestRunner:
1810  """Encapsulate a single test case (predicate), including logic for
1811  runing the test and test list output."""
1812
1813  def __init__(self, func, index):
1814    self.pred = svntest.testcase.create_test_case(func)
1815    self.index = index
1816
1817  def list(self, milestones_dict=None):
1818    """Print test doc strings.  MILESTONES_DICT is an optional mapping
1819    of issue numbers to an list containing target milestones and who
1820    the issue is assigned to."""
1821    if options.mode_filter.upper() == 'ALL' \
1822       or options.mode_filter.upper() == self.pred.list_mode().upper() \
1823       or (options.mode_filter.upper() == 'PASS' \
1824           and self.pred.list_mode() == ''):
1825      issues = []
1826      tail = ''
1827      if self.pred.issues:
1828        if not options.milestone_filter or milestones_dict is None:
1829          issues = self.pred.issues
1830          tail += " [%s]" % ','.join(['#%s' % str(i) for i in issues])
1831        else: # Limit listing by requested target milestone(s).
1832          filter_issues = []
1833          matches_filter = False
1834
1835          # Get the milestones for all the issues associated with this test.
1836          # If any one of them matches the MILESTONE_FILTER then we'll print
1837          # them all.
1838          for issue in self.pred.issues:
1839            # Some safe starting assumptions.
1840            milestone = 'unknown'
1841            assigned_to = 'unknown'
1842            if milestones_dict:
1843              if milestones_dict.has_key(str(issue)):
1844                milestone = milestones_dict[str(issue)][0]
1845                assigned_to = milestones_dict[str(issue)][1]
1846
1847            filter_issues.append(
1848              str(issue) + '(' + milestone + '/' + assigned_to + ')')
1849            pattern = re.compile(options.milestone_filter)
1850            if pattern.match(milestone):
1851              matches_filter = True
1852
1853          # Did at least one of the associated issues meet our filter?
1854          if matches_filter:
1855            issues = filter_issues
1856          # Wrap the issue#/target-milestone/assigned-to string
1857          # to the next line and add a line break to enhance
1858          # readability.
1859          tail += "\n               %s" % '\n               '.join(
1860            ['#%s' % str(i) for i in issues])
1861          tail += '\n'
1862      # If there is no filter or this test made if through
1863      # the filter then print it!
1864      if options.milestone_filter is None or len(issues):
1865        if self.pred.inprogress:
1866          tail += " [[%s]]" % self.pred.inprogress
1867        else:
1868          print(" %3d    %-5s  %s%s" % (self.index,
1869                                        self.pred.list_mode(),
1870                                        self.pred.description,
1871                                        tail))
1872    sys.stdout.flush()
1873
1874  def get_mode(self):
1875    return self.pred.list_mode()
1876
1877  def get_issues(self):
1878    return self.pred.issues
1879
1880  def get_function_name(self):
1881    return self.pred.get_function_name()
1882
1883  def _print_name(self, prefix):
1884    if self.pred.inprogress:
1885      print("%s %s %s: %s [[WIMP: %s]]" % (prefix,
1886                                           os.path.basename(sys.argv[0]),
1887                                           str(self.index),
1888                                           self.pred.description,
1889                                           self.pred.inprogress))
1890    else:
1891      print("%s %s %s: %s" % (prefix,
1892                              os.path.basename(sys.argv[0]),
1893                              str(self.index),
1894                              self.pred.description))
1895    sys.stdout.flush()
1896
1897  def run(self):
1898    """Run self.pred and return the result.  The return value is
1899        - 0 if the test was successful
1900        - 1 if it errored in a way that indicates test failure
1901        - 2 if the test skipped
1902        """
1903    sbox_name = self.pred.get_sandbox_name()
1904    if sbox_name:
1905      sandbox = svntest.sandbox.Sandbox(sbox_name, self.index)
1906    else:
1907      sandbox = None
1908
1909    # Explicitly set this so that commands that commit but don't supply a
1910    # log message will fail rather than invoke an editor.
1911    # Tests that want to use an editor should invoke svntest.main.use_editor.
1912    os.environ['SVN_EDITOR'] = ''
1913    os.environ['SVNTEST_EDITOR_FUNC'] = ''
1914
1915    if options.use_jsvn:
1916      # Set this SVNKit specific variable to the current test (test name plus
1917      # its index) being run so that SVNKit daemon could use this test name
1918      # for its separate log file
1919     os.environ['SVN_CURRENT_TEST'] = os.path.basename(sys.argv[0]) + "_" + \
1920                                      str(self.index)
1921
1922    svntest.actions.no_sleep_for_timestamps()
1923    svntest.actions.do_relocate_validation()
1924
1925    saved_dir = os.getcwd()
1926    try:
1927      rc = self.pred.run(sandbox)
1928      if rc is not None:
1929        self._print_name('STYLE ERROR in')
1930        print('Test driver returned a status code.')
1931        sys.exit(255)
1932      result = svntest.testcase.RESULT_OK
1933    except Skip as ex:
1934      result = svntest.testcase.RESULT_SKIP
1935    except Failure as ex:
1936      result = svntest.testcase.RESULT_FAIL
1937      msg = ''
1938      # We captured Failure and its subclasses. We don't want to print
1939      # anything for plain old Failure since that just indicates test
1940      # failure, rather than relevant information. However, if there
1941      # *is* information in the exception's arguments, then print it.
1942      if ex.__class__ != Failure or ex.args:
1943        ex_args = str(ex)
1944        logger.warn('CWD: %s' % os.getcwd())
1945        if ex_args:
1946          msg = 'EXCEPTION: %s: %s' % (ex.__class__.__name__, ex_args)
1947        else:
1948          msg = 'EXCEPTION: %s' % ex.__class__.__name__
1949      logger.warn(msg, exc_info=True)
1950    except KeyboardInterrupt:
1951      logger.error('Interrupted')
1952      sys.exit(0)
1953    except SystemExit as ex:
1954      logger.error('EXCEPTION: SystemExit(%d), skipping cleanup' % ex.code)
1955      self._print_name(ex.code and 'FAIL: ' or 'PASS: ')
1956      raise
1957    except:
1958      result = svntest.testcase.RESULT_FAIL
1959      logger.warn('CWD: %s' % os.getcwd(), exc_info=True)
1960
1961    os.chdir(saved_dir)
1962    exit_code, result_text, result_benignity = self.pred.results(result)
1963    if not (options.quiet and result_benignity):
1964      self._print_name(result_text)
1965    if sandbox is not None and exit_code != 1 and options.cleanup:
1966      sandbox.cleanup_test_paths()
1967    return exit_code
1968
1969######################################################################
1970# Main testing functions
1971
1972# These two functions each take a TEST_LIST as input.  The TEST_LIST
1973# should be a list of test functions; each test function should take
1974# no arguments and return a 0 on success, non-zero on failure.
1975# Ideally, each test should also have a short, one-line docstring (so
1976# it can be displayed by the 'list' command.)
1977
1978# Func to run one test in the list.
1979def run_one_test(n, test_list, finished_tests = None):
1980  """Run the Nth client test in TEST_LIST, return the result.
1981
1982  If we're running the tests in parallel spawn the test in a new process.
1983  """
1984
1985  # allow N to be negative, so './basic_tests.py -- -1' works
1986  num_tests = len(test_list) - 1
1987  if (n == 0) or (abs(n) > num_tests):
1988    print("There is no test %s.\n" % n)
1989    return 1
1990  if n < 0:
1991    n += 1+num_tests
1992
1993  test_mode = TestRunner(test_list[n], n).get_mode().upper()
1994  if options.mode_filter.upper() == 'ALL' \
1995     or options.mode_filter.upper() == test_mode \
1996     or (options.mode_filter.upper() == 'PASS' and test_mode == ''):
1997    # Run the test.
1998    exit_code = TestRunner(test_list[n], n).run()
1999    return exit_code
2000  else:
2001    return 0
2002
2003def _internal_run_tests(test_list, testnums, parallel, srcdir, progress_func):
2004  """Run the tests from TEST_LIST whose indices are listed in TESTNUMS.
2005
2006  If we're running the tests in parallel spawn as much parallel processes
2007  as requested and gather the results in a temp. buffer when a child
2008  process is finished.
2009  """
2010
2011  exit_code = 0
2012  finished_tests = []
2013  tests_started = 0
2014
2015  # Some of the tests use sys.argv[0] to locate their test data
2016  # directory.  Perhaps we should just be passing srcdir to the tests?
2017  if srcdir:
2018    sys.argv[0] = os.path.join(srcdir, 'subversion', 'tests', 'cmdline',
2019                               sys.argv[0])
2020
2021  if not parallel:
2022    for i, testnum in enumerate(testnums):
2023
2024      if run_one_test(testnum, test_list) == 1:
2025          exit_code = 1
2026      # signal progress
2027      if progress_func:
2028        progress_func(i+1, len(testnums))
2029  else:
2030    number_queue = queue.Queue()
2031    for num in testnums:
2032      number_queue.put(num)
2033
2034    threads = [ TestSpawningThread(number_queue, progress_func,
2035                                   len(testnums)) for i in range(parallel) ]
2036    for t in threads:
2037      t.start()
2038
2039    for t in threads:
2040      t.join()
2041
2042    # list of (index, result, stdout, stderr)
2043    results = []
2044    for t in threads:
2045      results += t.results
2046    results.sort()
2047
2048    # all tests are finished, find out the result and print the logs.
2049    for (index, result, stdout_lines, stderr_lines) in results:
2050      if stdout_lines:
2051        for line in stdout_lines:
2052          sys.stdout.write(line)
2053      if stderr_lines:
2054        for line in stderr_lines:
2055          sys.stdout.write(line)
2056      if result == 1:
2057        exit_code = 1
2058
2059  svntest.sandbox.cleanup_deferred_test_paths()
2060  return exit_code
2061
2062
2063class AbbreviatedFormatter(logging.Formatter):
2064  """A formatter with abbreviated loglevel indicators in the output.
2065
2066  Use %(levelshort)s in the format string to get a single character
2067  representing the loglevel..
2068  """
2069
2070  _level_short = {
2071    logging.CRITICAL : 'C',
2072    logging.ERROR : 'E',
2073    logging.WARNING : 'W',
2074    logging.INFO : 'I',
2075    logging.DEBUG : 'D',
2076    logging.NOTSET : '-',
2077    }
2078
2079  def format(self, record):
2080    record.levelshort = self._level_short[record.levelno]
2081    return logging.Formatter.format(self, record)
2082
2083
2084class LoggingStdoutHandler(logging.StreamHandler):
2085  """
2086  The handler is always writing using sys.stdout at call time rather than the
2087  value of sys.stdout at construction time.
2088
2089  Inspired by logging._StderrHandler on Python 3.
2090  """
2091
2092  def __init__(self, level=logging.NOTSET):
2093    logging.Handler.__init__(self, level)
2094
2095  @property
2096  def stream(self):
2097    return sys.stdout
2098
2099
2100def _create_parser(usage=None):
2101  """Return a parser for our test suite."""
2102
2103  global logger
2104
2105  # Initialize the LOGGER global variable so the option parsing can set
2106  # its loglevel, as appropriate.
2107  logger = logging.getLogger()
2108
2109  # Did some chucklehead log something before we configured it? If they
2110  # did, then a default handler/formatter would get installed. We want
2111  # to be the one to install the first (and only) handler.
2112  for handler in logger.handlers:
2113    if not isinstance(handler.formatter, AbbreviatedFormatter):
2114      raise Exception('Logging occurred before configuration. Some code'
2115                      ' path needs to be fixed. Examine the log output'
2116                      ' to find what/where logged something.')
2117
2118  # Set a sane default log level
2119  if logger.getEffectiveLevel() == logging.NOTSET:
2120    logger.setLevel(logging.WARN)
2121
2122  def set_log_level(option, opt, value, parser, level=None):
2123    if level:
2124      # called from --verbose
2125      logger.setLevel(level)
2126    else:
2127      # called from --set-log-level
2128      logger.setLevel(getattr(logging, value, None) or int(value))
2129
2130  # Set up the parser.
2131  # If you add new options, consider adding them in
2132  #
2133  #     .../build/run_tests.py:main()
2134  #
2135  # and handling them in
2136  #
2137  #     .../build/run_tests.py:TestHarness._init_py_tests()
2138  #
2139  _default_http_library = 'serf'
2140  if usage is None:
2141    usage = 'usage: %prog [options] [<test> ...]'
2142  parser = optparse.OptionParser(usage=usage)
2143  parser.add_option('-l', '--list', action='store_true', dest='list_tests',
2144                    help='Print test doc strings instead of running them')
2145  parser.add_option('--milestone-filter', action='store', dest='milestone_filter',
2146                    help='Limit --list to those with target milestone specified')
2147  parser.add_option('-v', '--verbose', action='callback',
2148                    callback=set_log_level, callback_args=(logging.DEBUG, ),
2149                    help='Print binary command-lines (same as ' +
2150                         '"--set-log-level logging.DEBUG")')
2151  parser.add_option('-q', '--quiet', action='store_true',
2152                    help='Print only unexpected results (not with --verbose)')
2153  parser.add_option('-p', '--parallel', action='store_const',
2154                    const=default_num_threads, dest='parallel',
2155                    help='Run the tests in parallel')
2156  parser.add_option('--parallel-instances', action='store',
2157                    type='int', dest='parallel',
2158                    help='Run the given number of tests in parallel')
2159  parser.add_option('-c', action='store_true', dest='is_child_process',
2160                    help='Flag if we are running this python test as a ' +
2161                    'child process; used by build/run_tests.py:334')
2162  parser.add_option('--mode-filter', action='store', dest='mode_filter',
2163                    default='ALL',
2164                    help='Limit tests to those with type specified (e.g. XFAIL)')
2165  parser.add_option('--url', action='store',
2166                    help='Base url to the repos (e.g. svn://localhost)')
2167  parser.add_option('--fs-type', action='store',
2168                    help='Subversion file system type (fsfs, bdb or fsx)')
2169  parser.add_option('--cleanup', action='store_true',
2170                    help='Whether to clean up')
2171  parser.add_option('--enable-sasl', action='store_true',
2172                    help='Whether to enable SASL authentication')
2173  parser.add_option('--bin', action='store', dest='svn_bin',
2174                    help='Use the svn binaries installed in this path')
2175  parser.add_option('--use-jsvn', action='store_true',
2176                    help="Use the jsvn (SVNKit based) binaries. Can be " +
2177                         "combined with --bin to point to a specific path")
2178  parser.add_option('--http-library', action='store',
2179                    help="Make svn use this DAV library (neon or serf) if " +
2180                         "it supports both, else assume it's using this " +
2181                         "one; the default is " + _default_http_library)
2182  parser.add_option('--server-minor-version', type='int', action='store',
2183                    help="Set the minor version for the server ('3'..'%d')."
2184                    % SVN_VER_MINOR)
2185  parser.add_option('--fsfs-packing', action='store_true',
2186                    help="Run 'svnadmin pack' automatically")
2187  parser.add_option('--fsfs-sharding', action='store', type='int',
2188                    help='Default shard size (for fsfs)')
2189  parser.add_option('--fsfs-version', type='int', action='store',
2190                    help='FSFS version (fsfs)')
2191  parser.add_option('--dump-load-cross-check', action='store_true',
2192                    help="After every test, run a series of dump and load " +
2193                         "tests with svnadmin, svnrdump and svndumpfilter " +
2194                         " on the testcase repositories to cross-check " +
2195                         " dump file compatibility.")
2196  parser.add_option('--config-file', action='store',
2197                    help="Configuration file for tests.")
2198  parser.add_option('--set-log-level', action='callback', type='str',
2199                    callback=set_log_level,
2200                    help="Set log level (numerically or symbolically). " +
2201                         "Symbolic levels are: CRITICAL, ERROR, WARNING, " +
2202                         "INFO, DEBUG")
2203  parser.add_option('--log-with-timestamps', action='store_true',
2204                    help="Show timestamps in test log.")
2205  parser.add_option('--keep-local-tmp', action='store_true',
2206                    help="Don't remove svn-test-work/local_tmp after test " +
2207                         "run is complete.  Useful for debugging failures.")
2208  parser.add_option('--development', action='store_true',
2209                    help='Test development mode: provides more detailed ' +
2210                         'test output and ignores all exceptions in the ' +
2211                         'run_and_verify* functions. This option is only ' +
2212                         'useful during test development!')
2213  parser.add_option('--srcdir', action='store', dest='srcdir',
2214                    help='Source directory.')
2215  parser.add_option('--ssl-cert', action='store',
2216                    help='Path to SSL server certificate.')
2217  parser.add_option('--http-proxy', action='store',
2218                    help='Use the HTTP Proxy at hostname:port.')
2219  parser.add_option('--http-proxy-username', action='store',
2220                    help='Username for the HTTP Proxy.')
2221  parser.add_option('--http-proxy-password', action='store',
2222                    help='Password for the HTTP Proxy.')
2223  parser.add_option('--httpd-version', action='store',
2224                    help='Assume HTTPD is this version.')
2225  parser.add_option('--httpd-whitelist', action='store',
2226                    help='httpd whitelist version.')
2227  parser.add_option('--tools-bin', action='store', dest='tools_bin',
2228                    help='Use the svn tools installed in this path')
2229  parser.add_option('--exclusive-wc-locks', action='store_true',
2230                    help='Use sqlite exclusive locking for working copies')
2231  parser.add_option('--memcached-server', action='store',
2232                    help='Use memcached server at specified URL (FSFS only)')
2233  parser.add_option('--fsfs-compression', action='store', type='str',
2234                    help='Set compression type (for fsfs)')
2235  parser.add_option('--fsfs-dir-deltification', action='store', type='str',
2236                    help='Set directory deltification option (for fsfs)')
2237  parser.add_option('--allow-remote-http-connection', action='store_true',
2238                    help='Run tests that connect to remote HTTP(S) servers')
2239
2240  # most of the defaults are None, but some are other values, set them here
2241  parser.set_defaults(
2242        server_minor_version=SVN_VER_MINOR,
2243        url=file_scheme_prefix + \
2244                        svntest.wc.svn_uri_quote(
2245                           os.path.abspath(
2246                               os.getcwd()).replace(os.path.sep, '/')),
2247        http_library=_default_http_library)
2248
2249  return parser
2250
2251class ServerCaps():
2252  """A simple struct that contains the actual server capabilities that don't
2253     depend on other settings like FS versions."""
2254
2255  def __init__(self, options):
2256    self.has_revprop_commit = options.server_minor_version >= 5
2257    self.authz_has_aliases = options.server_minor_version >= 5
2258    self.gets_client_capabilities = options.server_minor_version >= 5
2259    self.has_partial_replay = options.server_minor_version >= 5
2260    self.enforces_UTF8_fspaths_in_verify = options.server_minor_version >= 6
2261    self.enforces_date_syntax = options.server_minor_version >= 5
2262    self.has_atomic_revprop = options.server_minor_version >= 7
2263    self.has_reverse_get_file_revs = options.server_minor_version >= 8
2264
2265def parse_options(arglist=sys.argv[1:], usage=None):
2266  """Parse the arguments in arg_list, and set the global options object with
2267     the results"""
2268
2269  global options
2270
2271  parser = _create_parser(usage)
2272  (options, args) = parser.parse_args(arglist)
2273
2274  # Peg the actual server capabilities.
2275  # We tweak the server_minor_version later to accommodate FS restrictions,
2276  # but we don't want them to interfere with expectations towards the "pure"
2277  # server code.
2278  options.server_caps = ServerCaps(options)
2279
2280  # If there are no logging handlers registered yet, then install our
2281  # own with our custom formatter. (anything currently installed *is*
2282  # our handler as tested above, in _create_parser)
2283  if not logger.handlers:
2284    # Now that we have some options, let's get the logger configured before
2285    # doing anything more
2286    if options.log_with_timestamps:
2287      formatter = AbbreviatedFormatter('%(levelshort)s:'
2288                                       ' [%(asctime)s] %(message)s',
2289                                       datefmt='%Y-%m-%d %H:%M:%S')
2290    else:
2291      formatter = AbbreviatedFormatter('%(levelshort)s: %(message)s')
2292    handler = LoggingStdoutHandler()
2293    handler.setFormatter(formatter)
2294    logger.addHandler(handler)
2295
2296  # Normalize url to have no trailing slash
2297  if options.url:
2298    if options.url[-1:] == '/':
2299      options.test_area_url = options.url[:-1]
2300    else:
2301      options.test_area_url = options.url
2302
2303  # Some sanity checking
2304  if options.fsfs_packing and not options.fsfs_sharding:
2305    parser.error("--fsfs-packing requires --fsfs-sharding")
2306
2307  if options.server_minor_version not in range(3, SVN_VER_MINOR+1):
2308    parser.error("test harness only supports server minor versions 3-%d"
2309                 % SVN_VER_MINOR)
2310
2311  pass
2312
2313  return (parser, args)
2314
2315def tweak_options_for_precooked_repos():
2316  """Make sure the server-minor-version matches the fsfs-version parameter
2317     for pre-cooked repositories."""
2318
2319  global options
2320
2321  # Server versions that introduced the respective FSFS formats:
2322  introducing_version = { 1:1, 2:4, 3:5, 4:6, 6:8, 7:9 }
2323  if options.fsfs_version:
2324    if options.fsfs_version in introducing_version:
2325      introduced_in = introducing_version[options.fsfs_version]
2326      if options.server_minor_version \
2327        and options.server_minor_version != introduced_in \
2328        and options.server_minor_version != SVN_VER_MINOR:
2329        parser.error("--fsfs-version=%d requires --server-minor-version=%d" \
2330                     % (options.fsfs_version, introduced_in))
2331      options.server_minor_version = introduced_in
2332    # ### Add more tweaks here if and when we support pre-cooked versions
2333    # ### of FSFS repositories.
2334
2335
2336def run_tests(test_list, serial_only = False):
2337  """Main routine to run all tests in TEST_LIST.
2338
2339  NOTE: this function does not return. It does a sys.exit() with the
2340        appropriate exit code.
2341  """
2342
2343  sys.exit(execute_tests(test_list, serial_only))
2344
2345def get_issue_details(issue_numbers):
2346  """For each issue number in ISSUE_NUMBERS query the issue
2347     tracker and determine what the target milestone is and
2348     who the issue is assigned to.  Return this information
2349     as a dictionary mapping issue numbers to a list
2350     [target_milestone, assigned_to]"""
2351  xml_url = "http://subversion.tigris.org/issues/xml.cgi?id="
2352  issue_dict = {}
2353
2354  if isinstance(issue_numbers, int):
2355    issue_numbers = [str(issue_numbers)]
2356  elif isinstance(issue_numbers, str):
2357    issue_numbers = [issue_numbers]
2358
2359  if issue_numbers is None or len(issue_numbers) == 0:
2360    return issue_dict
2361
2362  for num in issue_numbers:
2363    xml_url += str(num) + ','
2364    issue_dict[str(num)] = 'unknown'
2365
2366  try:
2367    # Parse the xml for ISSUE_NO from the issue tracker into a Document.
2368    issue_xml_f = urllib.urlopen(xml_url)
2369  except:
2370    print("WARNING: Unable to contact issue tracker; " \
2371          "milestones defaulting to 'unknown'.")
2372    return issue_dict
2373
2374  try:
2375    xmldoc = xml.dom.minidom.parse(issue_xml_f)
2376    issue_xml_f.close()
2377
2378    # For each issue: Get the target milestone and who
2379    #                 the issue is assigned to.
2380    issue_element = xmldoc.getElementsByTagName('issue')
2381    for i in issue_element:
2382      issue_id_element = i.getElementsByTagName('issue_id')
2383      issue_id = issue_id_element[0].childNodes[0].nodeValue
2384      milestone_element = i.getElementsByTagName('target_milestone')
2385      milestone = milestone_element[0].childNodes[0].nodeValue
2386      assignment_element = i.getElementsByTagName('assigned_to')
2387      assignment = assignment_element[0].childNodes[0].nodeValue
2388      issue_dict[issue_id] = [milestone, assignment]
2389  except:
2390    print("ERROR: Unable to parse target milestones from issue tracker")
2391    raise
2392
2393  return issue_dict
2394
2395
2396# Main func.  This is the "entry point" that all the test scripts call
2397# to run their list of tests.
2398#
2399# This routine parses sys.argv to decide what to do.
2400def execute_tests(test_list, serial_only = False, test_name = None,
2401                  progress_func = None, test_selection = []):
2402  """Similar to run_tests(), but just returns the exit code, rather than
2403  exiting the process.  This function can be used when a caller doesn't
2404  want the process to die."""
2405
2406  global pristine_url
2407  global pristine_greek_repos_url
2408  global pristine_trojan_repos_url
2409  global other_dav_root_url
2410  global non_dav_root_url
2411  global svn_binary
2412  global svnadmin_binary
2413  global svnlook_binary
2414  global svnrdump_binary
2415  global svnsync_binary
2416  global svndumpfilter_binary
2417  global svnversion_binary
2418  global svnmover_binary
2419  global svnmucc_binary
2420  global svnauthz_binary
2421  global svnauthz_validate_binary
2422  global options
2423
2424  if test_name:
2425    sys.argv[0] = test_name
2426
2427  testnums = []
2428
2429  if not options:
2430    # Override which tests to run from the commandline
2431    (parser, args) = parse_options()
2432    tweak_options_for_precooked_repos()
2433    test_selection = args
2434  else:
2435    parser = _create_parser()
2436
2437  # parse the positional arguments (test nums, names)
2438  for arg in test_selection:
2439    appended = False
2440    try:
2441      testnums.append(int(arg))
2442      appended = True
2443    except ValueError:
2444      # Do nothing for now.
2445      appended = False
2446
2447    if not appended:
2448      try:
2449        # Check if the argument is a range
2450        numberstrings = arg.split(':');
2451        if len(numberstrings) != 2:
2452          numberstrings = arg.split('-');
2453          if len(numberstrings) != 2:
2454            raise ValueError
2455        left = int(numberstrings[0])
2456        right = int(numberstrings[1])
2457        if left > right:
2458          raise ValueError
2459
2460        for nr in range(left,right+1):
2461          testnums.append(nr)
2462        else:
2463          appended = True
2464      except ValueError:
2465        appended = False
2466
2467    if not appended:
2468      try:
2469        # Check if the argument is a function name, and translate
2470        # it to a number if possible
2471        for testnum in list(range(1, len(test_list))):
2472          test_case = TestRunner(test_list[testnum], testnum)
2473          if test_case.get_function_name() == str(arg).rstrip(','):
2474            testnums.append(testnum)
2475            appended = True
2476            break
2477      except ValueError:
2478        appended = False
2479
2480    if not appended:
2481      parser.error("invalid test number, range of numbers, " +
2482                   "or function '%s'\n" % arg)
2483
2484  # Calculate pristine_greek_repos_url from test_area_url.
2485  pristine_greek_repos_url = options.test_area_url + '/' + \
2486                                svntest.wc.svn_uri_quote(
2487                                  pristine_greek_repos_dir.replace(
2488                                      os.path.sep, '/'))
2489
2490  # Calculate pristine_trojan_repos_url from test_area_url.
2491  pristine_trojan_repos_url = options.test_area_url + '/' + \
2492                                svntest.wc.svn_uri_quote(
2493                                  pristine_trojan_repos_dir.replace(
2494                                      os.path.sep, '/'))
2495
2496  other_dav_root_url = options.test_area_url + '/fsdavroot'
2497  non_dav_root_url = options.test_area_url + '/nodavroot'
2498
2499
2500  if options.use_jsvn:
2501    if options.svn_bin is None:
2502      options.svn_bin = ''
2503    svn_binary = os.path.join(options.svn_bin, 'jsvn' + _bat)
2504    svnadmin_binary = os.path.join(options.svn_bin, 'jsvnadmin' + _bat)
2505    svnlook_binary = os.path.join(options.svn_bin, 'jsvnlook' + _bat)
2506    svnsync_binary = os.path.join(options.svn_bin, 'jsvnsync' + _bat)
2507    svndumpfilter_binary = os.path.join(options.svn_bin,
2508                                        'jsvndumpfilter' + _bat)
2509    svnversion_binary = os.path.join(options.svn_bin,
2510                                     'jsvnversion' + _bat)
2511    svnmucc_binary = os.path.join(options.svn_bin, 'jsvnmucc' + _bat)
2512  else:
2513    if options.svn_bin:
2514      svn_binary = os.path.join(options.svn_bin, 'svn' + _exe)
2515      svnadmin_binary = os.path.join(options.svn_bin, 'svnadmin' + _exe)
2516      svnlook_binary = os.path.join(options.svn_bin, 'svnlook' + _exe)
2517      svnrdump_binary = os.path.join(options.svn_bin, 'svnrdump' + _exe)
2518      svnsync_binary = os.path.join(options.svn_bin, 'svnsync' + _exe)
2519      svndumpfilter_binary = os.path.join(options.svn_bin,
2520                                          'svndumpfilter' + _exe)
2521      svnversion_binary = os.path.join(options.svn_bin, 'svnversion' + _exe)
2522      svnmucc_binary = os.path.join(options.svn_bin, 'svnmucc' + _exe)
2523
2524  if options.tools_bin:
2525    svnauthz_binary = os.path.join(options.tools_bin, 'svnauthz' + _exe)
2526    svnauthz_validate_binary = os.path.join(options.tools_bin,
2527                                            'svnauthz-validate' + _exe)
2528    svnmover_binary = os.path.join(options.tools_bin, 'svnmover' + _exe)
2529
2530  ######################################################################
2531
2532  # Cleanup: if a previous run crashed or interrupted the python
2533  # interpreter, then `temp_dir' was never removed.  This can cause wonkiness.
2534  if not options.is_child_process:
2535    safe_rmtree(temp_dir, 1)
2536
2537  if not testnums:
2538    # If no test numbers were listed explicitly, include all of them:
2539    testnums = list(range(1, len(test_list)))
2540
2541  if options.list_tests:
2542
2543    # If we want to list the target milestones, then get all the issues
2544    # associated with all the individual tests.
2545    milestones_dict = None
2546    if options.milestone_filter:
2547      issues_dict = {}
2548      for testnum in testnums:
2549        issues = TestRunner(test_list[testnum], testnum).get_issues()
2550        test_mode = TestRunner(test_list[testnum], testnum).get_mode().upper()
2551        if issues:
2552          for issue in issues:
2553            if (options.mode_filter.upper() == 'ALL' or
2554                options.mode_filter.upper() == test_mode or
2555                (options.mode_filter.upper() == 'PASS' and test_mode == '')):
2556              issues_dict[issue]=issue
2557      milestones_dict = get_issue_details(issues_dict.keys())
2558
2559    header = "Test #  Mode   Test Description\n"
2560    if options.milestone_filter:
2561      header += "               Issue#(Target Mileston/Assigned To)\n"
2562    header += "------  -----  ----------------"
2563
2564    printed_header = False
2565    for testnum in testnums:
2566      test_mode = TestRunner(test_list[testnum], testnum).get_mode().upper()
2567      if options.mode_filter.upper() == 'ALL' \
2568         or options.mode_filter.upper() == test_mode \
2569         or (options.mode_filter.upper() == 'PASS' and test_mode == ''):
2570        if not printed_header:
2571          print(header)
2572          printed_header = True
2573        TestRunner(test_list[testnum], testnum).list(milestones_dict)
2574    # We are simply listing the tests so always exit with success.
2575    return 0
2576
2577  # don't run tests in parallel when the tests don't support it or
2578  # there are only a few tests to run.
2579  options_parallel = options.parallel
2580  if serial_only or len(testnums) < 2:
2581    options.parallel = 0
2582
2583  try:
2584    if not options.is_child_process:
2585      # Build out the default configuration directory
2586      create_config_dir(default_config_dir,
2587                        ssl_cert=options.ssl_cert,
2588                        ssl_url=options.test_area_url,
2589                        http_proxy=options.http_proxy,
2590                        exclusive_wc_locks=options.exclusive_wc_locks)
2591
2592      # Setup the pristine repositories
2593      svntest.actions.setup_pristine_repositories()
2594
2595    # Run the tests.
2596    exit_code = _internal_run_tests(test_list, testnums, options.parallel,
2597                                    options.srcdir, progress_func)
2598  finally:
2599    options.parallel = options_parallel
2600
2601  # Remove all scratchwork: the 'pristine' repository, greek tree, etc.
2602  # This ensures that an 'import' will happen the next time we run.
2603  if not options.is_child_process and not options.keep_local_tmp:
2604    try:
2605      safe_rmtree(temp_dir, 1)
2606    except:
2607      logger.error("ERROR: cleanup of '%s' directory failed." % temp_dir)
2608      exit_code = 1
2609
2610  # Cleanup after ourselves.
2611  svntest.sandbox.cleanup_deferred_test_paths()
2612
2613  # Return the appropriate exit code from the tests.
2614  return exit_code
2615