1# Copyright (c) 1996, 2020 Oracle and/or its affiliates.  All rights reserved.
2#
3# See the file LICENSE for license information.
4#
5# $Id$
6
7source ./include.tcl
8
9# Add the default Windows build sub-directory to the path, so that
10# the binaries can be found without copies.
11if {[string match Win* $tcl_platform(os)]} {
12	global env
13	global buildpath
14	set env(PATH) "$env(PATH)\;$buildpath"
15}
16
17# Load DB's TCL API.
18load $tcllib
19
20# Check for existing files that might interfere with testing.
21set badfiles [glob -nocomplain DB_CONFIG __db.*]
22if { [llength $badfiles] > 0 } {
23	error "=====\nPlease move or delete these files from the current\
24	    directory: \n$badfiles \nThey can cause test failures.\n====="
25}
26
27if { [file exists $testdir] != 1 } {
28	file mkdir $testdir
29}
30
31global __debug_print
32global __debug_on
33global __debug_test
34
35# number_of_slices is used to mark that the test environment should be
36# sliced, and list how many slices it contains.
37global number_of_slices
38set number_of_slices 0
39
40#
41# Test if utilities work to figure out the path.  Most systems
42# use ., but QNX has a problem with execvp of shell scripts which
43# causes it to break.
44#
45set stat [catch {exec ./db_printlog -?} ret]
46if { [string first "exec format error" $ret] != -1 } {
47	set util_path ./.libs
48} else {
49	set util_path .
50}
51set __debug_print 0
52set encrypt 0
53set old_encrypt 0
54set passwd test_passwd
55
56# Error stream that (should!) always go to the console, even if we're
57# redirecting to ALL.OUT.
58set consoleerr stderr
59
60set dict $test_path/wordlist
61set alphabet "abcdefghijklmnopqrstuvwxyz"
62set datastr "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"
63
64# Random number seed.
65global rand_init
66set rand_init 11302005
67
68# Default record length for fixed record length access method(s)
69set fixed_len 20
70
71set recd_debug	0
72set log_log_record_types 0
73set ohandles {}
74
75# Some hosts are old and slow and need a little extra time
76# for various procs, for example the await_condition utilities.
77# List them here.
78global slow_hosts
79set slow_hosts [list scl58090]
80
81# Normally, we're not running an all-tests-in-one-env run.  This matters
82# for error stream/error prefix settings in berkdb_open.
83global is_envmethod
84set is_envmethod 0
85
86#
87# Set when we're running a child process in a rep test.
88#
89global is_repchild
90set is_repchild 0
91
92# Set when we want to use replication test messaging that cannot
93# share an env -- for example, because the replication processes
94# are not all from the same BDB version.
95global noenv_messaging
96set noenv_messaging 0
97
98# For testing locker id wrap around.
99global lock_curid
100global lock_maxid
101set lock_curid 0
102set lock_maxid 2147483647
103global txn_curid
104global txn_maxid
105set txn_curid 2147483648
106set txn_maxid 4294967295
107
108# The variable one_test allows us to run all the permutations
109# of a test with run_all or run_std.
110global one_test
111if { [info exists one_test] != 1 } {
112	set one_test "ALL"
113}
114
115# If you call a test with the proc find_valid_methods, it will
116# return the list of methods for which it will run, instead of
117# actually running.
118# Btree and recno are always built, but hash, heap, and queue
119# can be disabled, so verify that they are there before adding
120# them to the list.
121source $test_path/testutils.tcl
122global checking_valid_methods
123set checking_valid_methods 0
124global valid_methods
125set valid_methods { btree rbtree recno frecno rrecno }
126
127source $test_path/testutils.tcl
128set conf [berkdb getconfig]
129if { [is_substr $conf "queue"] } {
130	lappend valid_methods "queue"
131	lappend valid_methods "queueext"
132}
133if { [is_substr $conf "hash"] } {
134	lappend valid_methods "hash"
135}
136if { [is_substr $conf "heap"] } {
137	lappend valid_methods "heap"
138}
139
140# Here we track the official latest release for each major.minor
141# version.  This is the version you will find if you go to the
142# Oracle download site looking for a historical release.
143global valid_releases
144array set valid_releases [list 44 "db-4.4.20" 45 "db-4.5.20" 46 "db-4.6.21" \
145    47 "db-4.7.25" 48 "db-4.8.30" 50 "db-5.0.32" 51 "db-5.1.29" \
146    52 "db-5.2.42" 53 "db-5.3.28" 60 "db-6.0.30" 61 "db-6.1.36" \
147    62 "db-6.2.32" 181 "db-18.1.25" ]
148
149# The variable test_recopts controls whether we open envs in
150# replication tests with the -recover flag.   The default is
151# to test with and without the flag, but to run a meaningful
152# subset of rep tests more quickly, rep_subset will randomly
153# pick one or the other.
154global test_recopts
155set test_recopts { "-recover" "" }
156
157# Set up any OS-specific values.
158global tcl_platform
159set is_aix_test [string match AIX $tcl_platform(os)]
160set is_freebsd_test [string match FreeBSD $tcl_platform(os)]
161set is_hp_test [string match HP-UX $tcl_platform(os)]
162set is_linux_test [string match Linux $tcl_platform(os)]
163set is_osx_test [string match Darwin $tcl_platform(os)]
164set is_qnx_test [string match QNX $tcl_platform(os)]
165set is_sunos_test [string match SunOS $tcl_platform(os)]
166set is_windows_test [string match Win* $tcl_platform(os)]
167set is_windows9x_test [string match "Windows 95" $tcl_platform(osVersion)]
168set is_je_test 0
169set upgrade_be [big_endian]
170global is_fat32
171set is_fat32 [string match FAT32 [lindex [file system check] 1]]
172global EXE BAT
173if { $is_windows_test == 1 } {
174	set EXE ".exe"
175	set BAT ".bat"
176} else {
177	set EXE ""
178	set BAT ""
179}
180
181if { $is_windows_test == 1 } {
182	set util_path "./$buildpath"
183}
184
185# This is where the test numbering and parameters now live.
186source $test_path/testparams.tcl
187source $test_path/db_reptest.tcl
188
189# Try to open an encrypted database.  If it fails, this release
190# doesn't support encryption, and encryption tests should be skipped.
191set has_crypto 1
192set stat [catch {set db [eval {berkdb_open_noerr \
193    -create -btree -encryptaes test_passwd} ] } result ]
194if { $stat != 0 } {
195	# Make sure it's the right error for a non-crypto release.
196	error_check_good non_crypto_release \
197	    [expr [is_substr $result "operation not supported"] || \
198	    [is_substr $result "did not include support for cryptography"] || \
199	    [is_substr $result "invalid argument"]] 1
200	set has_crypto 0
201} else {
202	# It is a crypto release.  Get rid of the db, we don't need it.
203	error_check_good close_encrypted_db [$db close] 0
204}
205
206# Get the default page size of this system
207global default_pagesize
208set db [berkdb_open_noerr -create -btree]
209error_check_good "db open" [is_valid_db $db] TRUE
210set stat [catch {set default_pagesize [$db get_pagesize]} result]
211error_check_good "db get_pagesize" $stat 0
212error_check_good "db close" [$db close] 0
213
214# From here on out, test.tcl contains the procs that are used to
215# run all or part of the test suite.
216
217proc run_std { { testname ALL } args } {
218	global test_names
219	global one_test
220	global has_crypto
221	global valid_methods
222	source ./include.tcl
223
224	set one_test $testname
225	if { $one_test != "ALL" } {
226		# Source testparams again to adjust test_names.
227		source $test_path/testparams.tcl
228	}
229
230	set exflgs [eval extractflags $args]
231	set args [lindex $exflgs 0]
232	set flags [lindex $exflgs 1]
233
234	set display 1
235	set run 1
236	set am_only 0
237	set no_am 0
238	set std_only 1
239	set rflags {--}
240	foreach f $flags {
241		switch $f {
242			A {
243				set std_only 0
244			}
245			M {
246				set no_am 1
247				puts "run_std: all but access method tests."
248			}
249			m {
250				set am_only 1
251				puts "run_std: access method tests only."
252			}
253			n {
254				set display 1
255				set run 0
256				set rflags [linsert $rflags 0 "-n"]
257			}
258		}
259	}
260
261	if { $std_only == 1 } {
262		fileremove -f ALL.OUT
263
264		set o [open ALL.OUT a]
265		if { $run == 1 } {
266			puts -nonewline "Test suite run started at: "
267			puts [clock format [clock seconds] -format "%H:%M %D"]
268			puts [berkdb version -string]
269
270			puts -nonewline $o "Test suite run started at: "
271			puts $o [clock format [clock seconds] -format "%H:%M %D"]
272			puts $o [berkdb version -string]
273		}
274		close $o
275	}
276
277	set test_list {
278	{"environment"		"env"}
279	{"archive"		"archive"}
280	{"backup"		"backup"}
281	{"file operations"	"fop"}
282	{"locking"		"lock"}
283	{"logging"		"log"}
284	{"memory pool"		"memp"}
285	{"multiversion"		"multiversion"}
286	{"mutex"		"mutex"}
287	{"transaction"		"txn"}
288	{"deadlock detection"	"dead"}
289	{"subdatabase"		"sdb"}
290	{"byte-order"		"byte"}
291	{"recno backing file"	"rsrc"}
292	{"DBM interface"	"dbm"}
293	{"NDBM interface"	"ndbm"}
294	{"Hsearch interface"	"hsearch"}
295	{"secondary index"	"sindex"}
296	{"partition"		"partition"}
297	{"compression"		"compressed"}
298	{"automated repmgr tests" 	"repmgr_auto"}
299	{"repmgr multi-process"	"repmgr_multiproc"}
300	{"other repmgr tests" 	"repmgr_other"}
301	{"expected failures"	"fail"}
302	}
303
304	# If this is run_std only, run each rep test for a single
305	# access method.  If run_all, run for all access methods.
306	if { $std_only == 1 } {
307		lappend test_list {"replication"	"rep_subset"}
308	} else {
309		lappend test_list {"replication"	"rep_complete"}
310	}
311
312	# If release supports encryption, run security tests.
313	if { $has_crypto == 1 } {
314	        lappend test_list {"security"   "sec"}
315	}
316
317	# If slices are enabled, run slice tests.
318    	if { [berkdb slice_enabled ] } {
319	    	lappend test_list {"slices"	"slices_complete"}
320	}
321
322	if { $am_only == 0 } {
323		foreach pair $test_list {
324			set msg [lindex $pair 0]
325			set cmd [lindex $pair 1]
326			puts "Running $msg tests"
327			if [catch {exec $tclsh_path << \
328			    "global one_test; set one_test $one_test; \
329			    source $test_path/test.tcl; r $rflags $cmd" \
330			    >>& ALL.OUT } res] {
331				set o [open ALL.OUT a]
332				puts $o "FAIL: $cmd test: $res"
333				close $o
334			}
335		}
336
337		# Run recovery tests.
338		#
339		# XXX These too are broken into separate tclsh instantiations
340		# so we don't require so much memory, but I think it's cleaner
341		# and more useful to do it down inside proc r than here,
342		# since "r recd" gets done a lot and needs to work.
343		#
344		# Note that we still wrap the test in an exec so that
345		# its output goes to ALL.OUT.  run_recd will wrap each test
346		# so that both error streams go to stdout (which here goes
347		# to ALL.OUT);  information that run_recd wishes to print
348		# to the "real" stderr, but outside the wrapping for each test,
349		# such as which tests are being skipped, it can still send to
350		# stderr.
351		puts "Running recovery tests"
352		if [catch {
353		    exec $tclsh_path << \
354		    "global one_test; set one_test $one_test; \
355		    source $test_path/test.tcl; r $rflags recd" \
356			2>@ stderr >> ALL.OUT
357		    } res] {
358			set o [open ALL.OUT a]
359			puts $o "FAIL: recd tests: $res"
360			close $o
361		}
362
363		# Run join test
364		#
365		# XXX
366		# Broken up into separate tclsh instantiations so we don't
367		# require so much memory.
368		if { $one_test == "ALL" } {
369			puts "Running join test"
370			foreach test "join1 join2 join3 join4 join5 join6" {
371				if [catch {exec $tclsh_path << \
372				    "source $test_path/test.tcl; r $rflags $test" \
373				    >>& ALL.OUT } res] {
374					set o [open ALL.OUT a]
375					puts $o "FAIL: $test test: $res"
376					close $o
377				}
378			}
379		}
380	}
381
382	if { $no_am == 0 } {
383		# Access method tests.
384		#
385		# XXX
386		# Broken up into separate tclsh instantiations so we don't
387		# require so much memory.
388		foreach method $valid_methods {
389			puts "Running $method tests"
390			foreach test $test_names(test) {
391				if { $run == 0 } {
392					set o [open ALL.OUT a]
393					run_method \
394					    -$method $test $display $run $o
395					close $o
396				}
397				if { $run } {
398					if [catch {exec $tclsh_path << \
399					    "global one_test; \
400					    set one_test $one_test; \
401					    source $test_path/test.tcl; \
402					    run_method \
403					    -$method $test $display $run"\
404					    >>& ALL.OUT } res] {
405						set o [open ALL.OUT a]
406						puts $o "FAIL:$test $method: $res"
407						close $o
408					}
409				}
410			}
411		}
412	}
413
414	# If not actually running, no need to check for failure.
415	# If running in the context of the larger 'run_all' we don't
416	# check for failure here either.
417	if { $run == 0 || $std_only == 0 } {
418		return
419	}
420
421	set failed [check_output ALL.OUT]
422
423	set o [open ALL.OUT a]
424	if { $failed == 0 } {
425		puts "Regression Tests Succeeded"
426		puts $o "Regression Tests Succeeded"
427	} else {
428		puts "Regression Tests Failed"
429		puts "Check UNEXPECTED OUTPUT lines."
430		puts "Review ALL.OUT.x for details."
431		puts $o "Regression Tests Failed"
432	}
433
434	puts -nonewline "Test suite run completed at: "
435	puts [clock format [clock seconds] -format "%H:%M %D"]
436	puts -nonewline $o "Test suite run completed at: "
437	puts $o [clock format [clock seconds] -format "%H:%M %D"]
438	close $o
439}
440
441proc run_ssl { { testname ALL } args } {
442	global test_names
443	global one_test
444	global has_crypto
445	global valid_methods
446	source ./include.tcl
447
448	set one_test $testname
449	if { $one_test != "ALL" } {
450		# Source testparams again to adjust test_names.
451		source $test_path/testparams.tcl
452	}
453
454	set exflgs [eval extractflags $args]
455	set args [lindex $exflgs 0]
456	set flags [lindex $exflgs 1]
457
458	set display 1
459	set run 1
460	set am_only 0
461	set no_am 0
462	set std_only 1
463	set rflags {--}
464	foreach f $flags {
465		switch $f {
466			A {
467				set std_only 0
468			}
469			M {
470				set no_am 1
471				puts "run_std: all but access method tests."
472			}
473			m {
474				set am_only 1
475				puts "run_std: access method tests only."
476			}
477			n {
478				set display 1
479				set run 0
480				set rflags [linsert $rflags 0 "-n"]
481			}
482		}
483	}
484
485	if { $std_only == 1 } {
486		fileremove -f ALL.OUT
487
488		set o [open ALL.OUT a]
489		if { $run == 1 } {
490			puts -nonewline "Test suite run started at: "
491			puts [clock format [clock seconds] -format "%H:%M %D"]
492			puts [berkdb version -string]
493
494			puts -nonewline $o "Test suite run started at: "
495			puts $o [clock format [clock seconds] -format "%H:%M %D"]
496			puts $o [berkdb version -string]
497		}
498		close $o
499	}
500
501	set test_list {
502	{"automated repmgr tests" 	"repmgr_auto"}
503	{"repmgr multi-process"	"repmgr_multiproc"}
504	{"other repmgr tests" 	"repmgr_other"}
505	}
506
507	if { $am_only == 0 } {
508		foreach pair $test_list {
509			set msg [lindex $pair 0]
510			set cmd [lindex $pair 1]
511			puts "Running $msg tests"
512			if [catch {exec $tclsh_path << \
513			    "global one_test; set one_test $one_test; \
514			    global ssl_test_enabled; \
515			    set ssl_test_enabled 1; \
516			    source $test_path/test.tcl; \
517			    r $rflags $cmd" \
518			    >>& ALL.OUT } res] {
519				set o [open ALL.OUT a]
520				puts $o "FAIL: $cmd test: $res"
521				close $o
522			}
523		}
524	}
525
526	# If not actually running, no need to check for failure.
527	# If running in the context of the larger 'run_all' we don't
528	# check for failure here either.
529	if { $run == 0 || $std_only == 0 } {
530		return
531	}
532
533	set failed [check_output ALL.OUT]
534
535	set o [open ALL.OUT a]
536	if { $failed == 0 } {
537		puts "Regression Tests Succeeded"
538		puts $o "Regression Tests Succeeded"
539	} else {
540		puts "Regression Tests Failed"
541		puts "Check UNEXPECTED OUTPUT lines."
542		puts "Review ALL.OUT.x for details."
543		puts $o "Regression Tests Failed"
544	}
545
546	puts -nonewline "Test suite run completed at: "
547	puts [clock format [clock seconds] -format "%H:%M %D"]
548	puts -nonewline $o "Test suite run completed at: "
549	puts $o [clock format [clock seconds] -format "%H:%M %D"]
550	close $o
551}
552
553proc check_output { file } {
554	# These are all the acceptable patterns.
555	set pattern {(?x)
556		^[:space:]*$|
557		.*?wrap\.tcl.*|
558		.*?dbscript\.tcl.*|
559		.*?ddscript\.tcl.*|
560		.*?db_replicate.*|
561		.*?Freeing\slog\sinformation\s.*|
562		.*?Freeing\smutex\s.*|
563		.*?Freeing\sread\slocks\s.*|
564		.*?lt-db_replicate.*|
565		.*?mpoolscript\.tcl.*|
566		^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)$|
567		^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)\sCrashing$|
568		^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)\s[p|P]rocesses\srunning:.*|
569		^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)\s5\sprocesses\srunning.*|
570		^\d:\sPut\s\d*\sstrings\srandom\soffsets.*|
571		^100.*|
572		^basic_repmgr_.*\swith:|
573		^eval\s.*|
574		^exec\s.*|
575		^jointest.*$|
576		^r\sarchive\s*|
577		^r\sbackup\s*|
578		^r\sdbm\s*|
579		^r\shsearch\s*|
580		^r\sndbm\s*|
581		^run_ipv4\s.*|
582		^run_recd:\s.*|
583		^run_reptest\s.*|
584		^run_secenv:\s.*|
585		^All\sprocesses\shave\sexited.$|
586		^Backuptest\s.*|
587		^Beginning\scycle\s\d$|
588		^Berkeley\sDB\s.*|
589		^Byteorder:.*|
590		^Child\sruns\scomplete\.\s\sParent\smodifies\sdata\.$|
591		^Deadlock\sdetector:\s\d*\sCheckpoint\sdaemon\s\d*$|
592		^Ending\srecord.*|
593		^Environment\s.*?specified;\s\sskipping\.$|
594		^Executing\srecord\s.*|
595		^Join\stest:\.*|
596		^Method:\s.*|
597		^Putting\s.*databases.*|
598		^Regression\sTests\sSucceeded.*|
599		^Repl:\stest\d\d\d:.*|
600		^Repl:\ssdb\d\d\d:.*|
601		^Running\stest.*|
602		^Running\sall\scases\sof\s.*|
603		^run_inmem_db\s.*rep.*|
604		^run_inmem_log\s.*rep.*|
605		^run_mixedmode_log\s.*rep.*|
606		^run_in_sliced_env .*|
607		^run_with_slices .*|
608		^Script\swatcher\sprocess\s.*|
609		^Secondary\sindex\sjoin\s.*|
610		^SSL\stesting\s.*|
611		^Test\ssuite\srun\s.*|
612                ^Test\s.*rep.*|
613		^To\sreproduce\sthis\scase:.*|
614		^Turning\sSSL\stesting\sON*|
615		^Unlinking\slog:\serror\smessage\sOK$|
616		^Verifying\s.*|
617		^\t*\.\.\.dbc->get.*$|
618		^\t*\.\.\.dbc->put.*$|
619		^\t*\.\.\.key\s\d.*$|
620		^\t*\.\.\.Skipping\sdbc.*|
621		^\t*and\s\d*\sduplicate\sduplicates\.$|
622		^\t*About\sto\srun\srecovery\s.*complete$|
623		^\t*Add\sa\sthird\sversion\s.*|
624		^\t*Archive[:\.].*|
625		^\t*Backuptest.*|
626		^\t*Basic\srepmgr\s.*test.*:.*|
627		^\t*Bigfile[0-9][0-9][0-9].*|
628		^\t*Building\s.*|
629		^\t*bulk\sprocessing.*|
630		^\t*closing\ssecondaries\.$|
631		^\t*Command\sexecuted\sand\s.*$|
632		^\t*DBM.*|
633		^\t*[d|D]ead[0-9][0-9][0-9].*|
634		^\t*Dump\/load\sof.*|
635		^\t*[e|E]nv[0-9][0-9][0-9].*|
636		^\t*Executing\scommand$|
637		^\t*Executing\stxn_.*|
638		^\t*[F|f]ail[0-9][0-9][0-9].*|
639		^\t*File\srecd005\.\d\.db\sexecuted\sand\saborted\.$|
640		^\t*File\srecd005\.\d\.db\sexecuted\sand\scommitted\.$|
641		^\t*[f|F]op[0-9][0-9][0-9].*|
642		^\t*HSEARCH.*|
643		^\t*in-memory\s.*|
644		^\t*Initial\sCheckpoint$|
645		^\t*Iteration\s\d*:\sCheckpointing\.$|
646		^\t*Joining:\s.*|
647		^\t*Kid[1|2]\sabort\.\.\.complete$|
648		^\t*Kid[1|2]\scommit\.\.\.complete$|
649		^\t*[l|L]ock[0-9][0-9][0-9].*|
650		^\t*[l|L]og[0-9][0-9][0-9].*|
651		^\t*[m|M]emp[0-9][0-9][0-9].*|
652		^\t*[m|M]ut[0-9][0-9][0-9].*|
653		^\t*NDBM.*|
654		^\t*no\speering|
655		^\t*on-disk\s.*|
656		^\t*opening\ssecondaries\.$|
657		^\t*op_recover_rec:\sRunning\srecovery.*|
658		^\t*peering|
659		^\t*[r|R]ecd[0-9][0-9][0-9].*|
660		^\t*[r|R]ep[0-9][0-9][0-9].*|
661		^\t*[r|R]epmgr[0-9][0-9][0-9].*|
662		^\t*[r|R]ep_push.*|
663		^\t*[r|R]ep_test.*|
664		^\t*[r|R]pc[0-9][0-9][0-9].*|
665		^\t*[r|R]src[0-9][0-9][0-9].*|
666		^\t*Recover\sfrom\sfirst\sdatabase$|
667		^\t*Recover\sfrom\ssecond\sdatabase$|
668		^\t*regular\sprocessing.*|
669		^\t*Remove\ssecond\sdb$|
670		^\t*Rep_verify.*|
671		^\t*Running\srecovery\son\s.*|
672		^\t*[s|S]ec[0-9][0-9][0-9].*|
673		^\t*[s|S]i[0-9][0-9][0-9].*|
674		^\t*[s|S]ijoin.*|
675		^\t*Salvage\stests\sof.*|
676		^\t*sdb[0-9][0-9][0-9].*|
677		^\t*Skipping\s.*|
678		^\t*[s|S]lice[0-9][0-9][0-9].*|
679		^\t*Subdb[0-9][0-9][0-9].*|
680		^\t*Subdbtest[0-9][0-9][0-9].*|
681		^\t*Syncing$|
682		^\t*[t|T]est[0-9][0-9][0-9].*|
683		^\t*[t|T]xn[0-9][0-9][0-9].*|
684		^\t*Txnscript.*|
685		^\t*Using\s.*option.*$|
686		^\t*Using\s.*?\senvironment\.$|
687		^\t*Verification\sof.*|
688		^\t*with\stransactions$}
689
690	set failed 0
691	set f [open $file r]
692	while { [gets $f line] >= 0 } {
693		if { [regexp $pattern $line] == 0 } {
694			puts -nonewline "UNEXPECTED OUTPUT: "
695			puts $line
696			set failed 1
697		}
698	}
699	close $f
700	return $failed
701}
702
703proc r { args } {
704	global test_names
705	global has_crypto
706	global rand_init
707	global one_test
708	global test_recopts
709	global checking_valid_methods
710	global run_in_sliced_env_tests
711	global run_with_slices_tests
712
713	source ./include.tcl
714
715	set exflgs [eval extractflags $args]
716	set args [lindex $exflgs 0]
717	set flags [lindex $exflgs 1]
718
719	set display 1
720	set run 1
721	set saveflags "--"
722	foreach f $flags {
723		switch $f {
724			n {
725				set display 1
726				set run 0
727				set saveflags "-n $saveflags"
728			}
729		}
730	}
731
732	if {[catch {
733		set sub [ lindex $args 0 ]
734		set starttest [lindex $args 1]
735		switch $sub {
736			bigfile -
737			dead -
738			env -
739			fail -
740			lock -
741			log -
742			memp -
743			mutex -
744			repmgr_auto -
745			repmgr_multiproc -
746			repmgr_other -
747			rsrc -
748			sdbtest -
749			slice -
750			txn {
751				if { $display } {
752					run_subsystem $sub 1 0 $starttest
753				}
754				if { $run } {
755					run_subsystem $sub 0 1 $starttest
756				}
757			}
758			backup {
759				if { $one_test == "ALL" } {
760					run_test backup $display $run
761				}
762			}
763			byte {
764				if { $one_test == "ALL" } {
765					run_test byteorder $display $run
766				}
767			}
768			archive -
769			dbm -
770			hsearch -
771			ndbm -
772			run_ipv4_tests -
773			shelltest {
774				if { $one_test == "ALL" } {
775					if { $display } { puts "eval $sub" }
776					if { $run } {
777						check_handles
778						eval $sub
779					}
780				}
781			}
782			compact -
783			fop -
784			inmemdb -
785			rep_elect -
786			rep_init {
787				set tindx [lsearch $test_names($sub) $starttest]
788				if { $tindx == -1 } {
789					set tindx 0
790				}
791				set rlist [lrange $test_names($sub) $tindx end]
792				foreach test $rlist {
793					eval run_test $test $display $run
794				}
795			}
796			compressed {
797				set tindex [lsearch $test_names(test) $starttest]
798				if { $tindex == -1 } {
799					set tindex 0
800				}
801				set clist [lrange $test_names(test) $tindex end]
802				set clist [concat $clist $test_names(sdb)]
803				foreach test $clist {
804					eval run_compressed\
805					     btree $test $display $run
806				}
807			}
808			failchk {
809				env012
810				env030
811				repmgr150
812			}
813			join {
814				eval r $saveflags join1
815				eval r $saveflags join2
816				eval r $saveflags join3
817				eval r $saveflags join4
818				eval r $saveflags join5
819				eval r $saveflags join6
820			}
821			join1 {
822				if { $display } { puts "eval jointest" }
823				if { $run } {
824					check_handles
825					eval jointest
826				}
827			}
828			joinbench {
829				puts "[timestamp]"
830				eval r $saveflags join1
831				eval r $saveflags join2
832				puts "[timestamp]"
833			}
834			join2 {
835				if { $display } { puts "eval jointest 512" }
836				if { $run } {
837					check_handles
838					eval jointest 512
839				}
840			}
841			join3 {
842				if { $display } {
843					puts "eval jointest 8192 0 -join_item"
844				}
845				if { $run } {
846					check_handles
847					eval jointest 8192 0 -join_item
848				}
849			}
850			join4 {
851				if { $display } { puts "eval jointest 8192 2" }
852				if { $run } {
853					check_handles
854					eval jointest 8192 2
855				}
856			}
857			join5 {
858				if { $display } { puts "eval jointest 8192 3" }
859				if { $run } {
860					check_handles
861					eval jointest 8192 3
862				}
863			}
864			join6 {
865				if { $display } { puts "eval jointest 512 3" }
866				if { $run } {
867					check_handles
868					eval jointest 512 3
869				}
870			}
871			multiversion {
872				if { $one_test == "ALL" } {
873					if { $display } {
874						puts "eval rep065 -btree"
875						puts "eval repmgr035"
876					}
877					if { $run } {
878						eval rep065 -btree
879						eval repmgr035
880					}
881				}
882			}
883			partition {
884				foreach method { btree hash } {
885					foreach test "$test_names(recd)\
886					    $test_names(test)" {
887						run_range_partition\
888						    $test $method $display $run
889						run_partition_callback\
890						    $test $method $display $run
891					}
892				}
893			}
894			recd {
895				check_handles
896				eval {run_recds all $run $display} [lrange $args 1 end]
897			}
898			rep {
899				run_rep_subset rep $starttest $testdir \
900				    $display $run $args
901			}
902			repmgr {
903				r repmgr_other
904				foreach test $test_names(repmgr_basic) {
905					$test 100 1 1 1 1 1
906					$test 100 1 0 0 0 0
907					$test 100 0 1 0 0 0
908					$test 100 0 0 1 0 0
909					$test 100 0 0 0 1 0
910					$test 100 0 0 0 0 1
911					$test 100 0 0 0 0 0
912				}
913			}
914			rep_commit {
915				run_rep_subset rep_commit $starttest $testdir \
916				    $display $run $args
917				r repmgr
918			}
919			# To run a subset of the complete rep tests, use
920			# rep_subset, which randomly picks an access type to
921			# use, and randomly picks whether to open envs with
922			# the -recover flag.
923			rep_subset {
924				run_rep_subset rep $starttest $testdir \
925				    $display $run $args
926			}
927			rep_complete {
928				set tindex [lsearch $test_names(rep) $starttest]
929				if { $tindex == -1 } {
930					set tindex 0
931				}
932				set rlist [lrange $test_names(rep) $tindex end]
933				foreach test $rlist {
934					run_test $test $display $run
935				}
936				if { $one_test == "ALL" } {
937					if { $display } {
938						#puts "basic_db_reptest"
939						#puts "basic_db_reptest 1"
940					}
941					if { $run } {
942						#basic_db_reptest
943						#basic_db_reptest 1
944					}
945				}
946			}
947			replicate {
948				# We seed the random number generator here
949				# instead of in run_replicate so that we
950				# aren't always reusing the first few
951				# responses from random_int.
952				#
953				berkdb srand $rand_init
954				foreach sub { test sdb } {
955					foreach test $test_names($sub) {
956						eval run_test run_replicate \
957						    $display $run $test
958					}
959				}
960			}
961			repmethod {
962				# We seed the random number generator here
963				# instead of in run_repmethod so that we
964				# aren't always reusing the first few
965				# responses from random_int.
966				#
967				berkdb srand $rand_init
968				foreach sub { test sdb } {
969					foreach test $test_names($sub) {
970						eval run_test run_repmethod \
971						    $display $run $test
972					}
973				}
974			}
975			sec {
976				# Skip secure mode tests if release
977				# does not support encryption.
978				if { $has_crypto == 0 } {
979					return
980				}
981				if { $display } {
982					run_subsystem $sub 1 0
983				}
984				if { $run } {
985					run_subsystem $sub 0 1
986				}
987			}
988			secmethod {
989				# Skip secure mode tests if release
990				# does not support encryption.
991				if { $has_crypto == 0 } {
992					return
993				}
994				foreach test $test_names(test) {
995					eval run_test run_secmethod \
996					    $display $run $test
997					eval run_test run_secenv \
998					    $display $run $test
999				}
1000			}
1001			sdb {
1002				if { $one_test == "ALL" } {
1003					if { $display } {
1004						run_subsystem sdbtest 1 0
1005					}
1006					if { $run } {
1007						run_subsystem sdbtest 0 1
1008					}
1009				}
1010				foreach test $test_names(sdb) {
1011					eval run_test $test $display $run
1012				}
1013			}
1014			sindex {
1015				if { $one_test == "ALL" } {
1016					if { $display } {
1017						sindex 1 0
1018						sijoin 1 0
1019					}
1020					if { $run } {
1021						sindex 0 1
1022						sijoin 0 1
1023					}
1024				}
1025			}
1026			slices_complete {
1027				# Skip sliced tests if slices
1028				# are not enabled.
1029				if { ![berkdb slice_enabled ] } {
1030					return
1031				}
1032				if { $one_test == "ALL" } {
1033					if { $display } {
1034						run_subsystem slice 1 0
1035					}
1036					if { $run } {
1037						run_subsystem slice 0 1
1038					}
1039					foreach method {btree hash} {
1040						foreach test\
1041						    $run_in_sliced_env_tests {
1042							run_in_sliced_env\
1043							    -$method $test\
1044							    $display $run
1045						}
1046					}
1047					foreach test $run_with_slices_tests {
1048						run_with_slices \
1049						    $test $display $run
1050					}
1051				}
1052			}
1053			btree -
1054			rbtree -
1055			hash -
1056			iqueue -
1057			iqueueext -
1058			queue -
1059			queueext -
1060			recno -
1061			frecno -
1062			heap -
1063			rrecno {
1064				foreach test $test_names(test) {
1065					eval run_method [lindex $args 0] $test \
1066					    $display $run stdout [lrange $args 1 end]
1067				}
1068			}
1069			ipv4 {
1070				if { $one_test == "ALL" } {
1071					if { $display } {
1072						run_ipv4_tests 1 0
1073					}
1074					if { $run } {
1075						run_ipv4_tests 0 1
1076					}
1077				}
1078			}
1079
1080			default {
1081				error \
1082				    "FAIL:[timestamp] r: $args: unknown command"
1083			}
1084		}
1085		flush stdout
1086		flush stderr
1087	} res] != 0} {
1088		global errorInfo;
1089		set fnl [string first "\n" $errorInfo]
1090		set theError [string range $errorInfo 0 [expr $fnl - 1]]
1091		if {[string first FAIL $errorInfo] == -1} {
1092			error "FAIL:[timestamp] r: $args: $theError"
1093		} else {
1094			error $theError;
1095		}
1096	}
1097}
1098
1099proc run_rep_subset { sub starttest testdir display run args } {
1100	global one_test
1101	global rand_init
1102	global test_names
1103
1104	if  { [is_partition_callback $args] == 1 } {
1105		set nodump 1
1106	} else {
1107		set nodump 0
1108	}
1109	berkdb srand $rand_init
1110	set tindex [lsearch $test_names($sub) $starttest]
1111	if { $tindex == -1 } {
1112		set tindex 0
1113	}
1114	set rlist [lrange $test_names($sub) $tindex end]
1115	foreach test $rlist {
1116		set random_recopt [berkdb random_int 0 1]
1117		if { $random_recopt == 1 } {
1118			set test_recopts "-recover"
1119		} else {
1120			set test_recopts {""}
1121		}
1122
1123		set method_list [find_valid_methods $test]
1124		set list_length [expr [llength $method_list] - 1]
1125		set method_index [berkdb random_int 0 $list_length]
1126		set rand_method [lindex $method_list $method_index]
1127
1128		if { $display } {
1129			puts "eval $test $rand_method; verify_dir \
1130			    $testdir \"\" 1 0 $nodump; salvage_dir $testdir 1"
1131		}
1132		if { $run } {
1133	 		check_handles
1134			eval $test $rand_method
1135			verify_dir $testdir "" 1 0 $nodump
1136			salvage_dir $testdir 1
1137		}
1138	}
1139	if { $one_test == "ALL" } {
1140		if { $display } {
1141			#puts "basic_db_reptest"
1142			#puts "basic_db_reptest 1"
1143		}
1144		if { $run } {
1145			#basic_db_reptest
1146			#basic_db_reptest 1
1147		}
1148	}
1149	set test_recopts { "-recover" "" }
1150}
1151
1152proc run_subsystem { sub {display 0} {run 1} {starttest "NULL"} } {
1153	global test_names
1154	global databases_in_memory
1155
1156	if { [info exists test_names($sub)] != 1 } {
1157		puts stderr "Subsystem $sub has no tests specified in\
1158		    testparams.tcl; skipping."
1159		return
1160	}
1161
1162	set index [lsearch $test_names($sub) $starttest]
1163	if { $index == -1 } {
1164		set index 0
1165	}
1166	set testlist [lrange $test_names($sub) $index end]
1167
1168	foreach test $testlist {
1169		if { $display } {
1170			puts "eval $test"
1171		}
1172		if { $run } {
1173			check_handles
1174			if {[catch {eval $test} ret] != 0 } {
1175				set databases_in_memory 0
1176				error "FAIL: run_subsystem: $sub $test: \
1177				    $ret"
1178			}
1179		}
1180	}
1181}
1182
1183proc run_test { test {display 0} {run 1} args } {
1184	source ./include.tcl
1185	global valid_methods
1186
1187	foreach method $valid_methods {
1188		if { $display } {
1189			puts "eval $test -$method $args; \
1190			    verify_dir $testdir \"\" 1; \
1191			    salvage_dir $testdir 1"
1192		}
1193		if  { [is_partition_callback $args] == 1 } {
1194			set nodump 1
1195		} else {
1196			set nodump 0
1197		}
1198		if { $run } {
1199	 		check_handles
1200			eval {$test -$method} $args
1201			verify_dir $testdir "" 1 0 $nodump
1202			salvage_dir $testdir 1
1203		}
1204	}
1205}
1206
1207proc run_method { method test {display 0} {run 1} \
1208    { outfile stdout } args } {
1209	global __debug_on
1210	global __debug_print
1211	global __debug_test
1212	global test_names
1213	global parms
1214	source ./include.tcl
1215
1216	if  { [is_partition_callback $args] == 1 } {
1217		set nodump  1
1218	} else {
1219		set nodump  0
1220	}
1221
1222	if {[catch {
1223		if { $display } {
1224			puts -nonewline $outfile "eval \{ $test \} $method"
1225			puts -nonewline $outfile " $parms($test) { $args }"
1226			puts -nonewline $outfile " ; verify_dir $testdir \"\" 1 0 $nodump"
1227			puts $outfile " ; salvage_dir $testdir 1"
1228		}
1229		if { $run } {
1230			check_handles $outfile
1231			puts $outfile "[timestamp]"
1232			eval {$test} $method $parms($test) $args
1233			if { $__debug_print != 0 } {
1234				puts $outfile ""
1235			}
1236			# Verify all databases the test leaves behind
1237			verify_dir $testdir "" 1 0 $nodump
1238			if { $__debug_on != 0 } {
1239				debug $__debug_test
1240			}
1241			salvage_dir $testdir 1
1242		}
1243		flush stdout
1244		flush stderr
1245	} res] != 0} {
1246		global errorInfo;
1247
1248		set fnl [string first "\n" $errorInfo]
1249		set theError [string range $errorInfo 0 [expr $fnl - 1]]
1250		if {[string first FAIL $errorInfo] == -1} {
1251			error "FAIL:[timestamp]\
1252			    run_method: $method $test: $theError"
1253		} else {
1254			error $theError;
1255		}
1256	}
1257}
1258
1259# Run a testNNN or recdNNN test with range partitioning.
1260proc run_range_partition { test method {display 0} {run 1}\
1261    {outfile stdout} args } {
1262
1263	# The only allowed access method for range partitioning is btree.
1264	if { [is_btree $method] == 0 } {
1265		if { $display == 0 } {
1266			puts "Skipping range partition\
1267			    tests for method $method"
1268		}
1269		return
1270	}
1271
1272	# If we've passed in explicit partitioning args, use them;
1273	# otherwise set them.  This particular selection hits some
1274	# interesting cases where we set the key to "key".
1275	set largs $args
1276	if { [is_partitioned $args] == 0 } {
1277		lappend largs  -partition {ab cd key key1 zzz}
1278	}
1279
1280	if { [string first recd $test] == 0 } {
1281		eval {run_recd $method $test $run $display} $largs
1282	} elseif { [string first test $test] == 0 } {
1283		eval {run_method $method $test $display $run $outfile} $largs
1284	} else {
1285		puts "Skipping test $test with range partitioning."
1286	}
1287}
1288
1289# Run a testNNN or recdNNN test with partition callbacks.
1290proc run_partition_callback { test method {display 0} {run 1}\
1291    {outfile stdout} args } {
1292
1293	# The only allowed access methods are btree and hash.
1294	if { [is_btree $method] == 0 && [is_hash $method] == 0 } {
1295		if { $display == 0 } {
1296			puts "Skipping partition callback tests\
1297			    for method $method"
1298		}
1299		return
1300	}
1301
1302	# If we've passed in explicit partitioning args, use them;
1303	# otherwise set them.
1304	set largs $args
1305	if { [is_partition_callback $args] == 0 } {
1306		lappend largs  -partition_callback 5 part
1307	}
1308
1309	if { [string first recd $test] == 0 } {
1310		eval {run_recd $method $test $run $display} $largs
1311	} elseif { [string first test $test] == 0 } {
1312		eval {run_method $method $test $display $run $outfile} $largs
1313	} else {
1314		puts "Skipping test $test with partition callbacks."
1315	}
1316}
1317
1318#
1319# Run method tests for btree only using compression.
1320#
1321proc run_compressed { method test {display 0} {run 1} \
1322    { outfile stdout } args } {
1323
1324	if { [is_btree $method] == 0 } {
1325		puts "Skipping compression test for method $method."
1326		return
1327	}
1328
1329	set largs $args
1330	append largs " -compress "
1331	eval run_method $method $test $display $run $outfile $largs
1332}
1333
1334#
1335# Run method tests in secure mode.
1336#
1337proc run_secmethod { method test {display 0} {run 1} \
1338    { outfile stdout } args } {
1339	global passwd
1340	global has_crypto
1341
1342	# Skip secure mode tests if release does not support encryption.
1343	if { $has_crypto == 0 } {
1344		return
1345	}
1346
1347	set largs $args
1348	append largs " -encryptaes $passwd "
1349	eval run_method $method $test $display $run $outfile $largs
1350}
1351
1352#
1353# Run method tests each in its own, new secure environment.
1354#
1355proc run_secenv { method test {largs ""} } {
1356	global __debug_on
1357	global __debug_print
1358	global __debug_test
1359	global is_envmethod
1360	global has_crypto
1361	global test_names
1362	global parms
1363	global passwd
1364	source ./include.tcl
1365
1366	# Skip secure mode tests if release does not support encryption.
1367	if { $has_crypto == 0 } {
1368		return
1369	}
1370
1371	puts "run_secenv: $method $test $largs"
1372
1373	set save_largs $largs
1374	env_cleanup $testdir
1375	set is_envmethod 1
1376	set stat [catch {
1377		check_handles
1378		set env [eval {berkdb_env -create -mode 0644 -home $testdir \
1379		    -encryptaes $passwd -pagesize 512 -cachesize {0 4194304 1}}]
1380		error_check_good env_open [is_valid_env $env] TRUE
1381		append largs " -env $env "
1382
1383		puts "[timestamp]"
1384		if { [info exists parms($test)] != 1 } {
1385			puts stderr "$test disabled in\
1386			    testparams.tcl; skipping."
1387			continue
1388		}
1389
1390		#
1391		# Run each test multiple times in the secure env.
1392		# Once with a secure env + clear database
1393		# Once with a secure env + secure database
1394		#
1395		eval $test $method $parms($test) $largs
1396		append largs " -encrypt "
1397		eval $test $method $parms($test) $largs
1398
1399		if { $__debug_print != 0 } {
1400			puts ""
1401		}
1402		if { $__debug_on != 0 } {
1403			debug $__debug_test
1404		}
1405		flush stdout
1406		flush stderr
1407		set largs $save_largs
1408		error_check_good envclose [$env close] 0
1409		error_check_good envremove [berkdb envremove \
1410		    -home $testdir -encryptaes $passwd] 0
1411	} res]
1412	if { $stat != 0} {
1413		global errorInfo;
1414
1415		set fnl [string first "\n" $errorInfo]
1416		set theError [string range $errorInfo 0 [expr $fnl - 1]]
1417		if {[string first FAIL $errorInfo] == -1} {
1418			error "FAIL:[timestamp]\
1419			    run_secenv: $method $test: $theError"
1420		} else {
1421			error $theError;
1422		}
1423	set is_envmethod 0
1424	}
1425
1426}
1427
1428#
1429# Run tests with a sliced configuration.
1430#
1431proc run_with_slices { test {display 0} {run 1} {args ""} } {
1432	global number_of_slices
1433	source ./include.tcl
1434
1435	# Skip of this release is not slice enabled.
1436	if { ![berkdb slice_enabled] } {
1437		return
1438	}
1439
1440	if { $display } {
1441		puts "run_with_slices $test"
1442	}
1443	if {!$run} {
1444		return
1445	}
1446
1447	# Run with just the environment sliced, and with the environment
1448	# and databases sliced
1449	set number_of_slices 2
1450
1451	set sliced_args {" " " -sliced "}
1452	foreach sliced_arg $sliced_args {
1453		set largs $args
1454		append largs $sliced_arg
1455		if { $run } {
1456			if { [string match "*-sliced*" $largs] } {
1457		puts "run_with_slices $test with sliced databases."
1458			} else {
1459		puts "run_with_slices $test with non-sliced databases."
1460			}
1461			slice_db_config $number_of_slices
1462			eval $test $largs
1463			env_cleanup $testdir
1464		}
1465	}
1466	set number_of_slices 0
1467	fileremove $testdir/DB_CONFIG
1468}
1469
1470#
1471# Run method tests each in its own, new sliced environment.
1472#
1473proc run_in_sliced_env { method test {display 0} {run 1} {largs ""} } {
1474	global __debug_on
1475	global __debug_print
1476	global __debug_test
1477	global is_envmethod
1478	global test_names
1479	global parms
1480	global number_of_slices
1481	source ./include.tcl
1482
1483	# Skip of this release is not slice enabled.
1484	if { ![berkdb slice_enabled] } {
1485		return
1486	}
1487
1488	if {$display} {
1489		puts "run_in_sliced_env $method $test $largs"
1490	}
1491
1492	if {!$run} {
1493		return
1494	}
1495
1496	# exec rm -rf $testdir
1497	env_cleanup $testdir
1498	set save_largs $largs
1499	set is_envmethod 1
1500	set number_of_slices 2
1501	set container {"set_cachesize 0 4194304 1"}
1502	set slice_all {"set_cachesize 0 4194304 1"}
1503	#
1504	# Run each test multiple times in the sliced env.
1505	# Once with a sliced env + non-sliced database
1506	# Once with a sliced env + sliced database
1507	#
1508	set sliced_args {" " " -sliced "}
1509	foreach sliced_arg $sliced_args {
1510		set stat [catch {
1511			slice_db_config $number_of_slices $container $slice_all
1512			set env [eval {berkdb_env -create -mode 0644 -home $testdir}]
1513			error_check_good env_open [is_valid_env $env] TRUE
1514			append largs " -env $env "
1515			append largs $sliced_arg
1516
1517			puts "[timestamp]"
1518			if { [info exists parms($test)] != 1 } {
1519				puts stderr "$test disabled in\
1520				    testparams.tcl; skipping."
1521				continue
1522			}
1523			if { [string match "*-sliced*" $largs] } {
1524		puts "run_in_sliced_env $test with sliced databases."
1525			} else {
1526		puts "run_in_sliced_env $test with non-sliced databases."
1527			}
1528			eval $test $method $parms($test) $largs
1529
1530			if { $__debug_print != 0 } {
1531				puts ""
1532			}
1533			if { $__debug_on != 0 } {
1534				debug $__debug_test
1535			}
1536			flush stdout
1537			flush stderr
1538			set largs $save_largs
1539			error_check_good envclose [$env close] 0
1540			set nodump 0
1541			if { $is_hp_test } {
1542				set nodump 1
1543			}
1544			verify_dir $testdir "" 1 0 $nodump
1545			salvage_dir $testdir 1
1546			env_cleanup $testdir
1547		} res]
1548	}
1549	if { $stat != 0} {
1550		global errorInfo;
1551
1552		set fnl [string first "\n" $errorInfo]
1553		set theError [string range $errorInfo 0 [expr $fnl - 1]]
1554		if {[string first FAIL $errorInfo] == -1} {
1555			error "FAIL:[timestamp]\
1556			    run_in_sliced_env: $method $test: $theError"
1557		} else {
1558			error $theError;
1559		}
1560	set is_envmethod 0
1561	}
1562	set number_of_slices 0
1563	fileremove $testdir/DB_CONFIG
1564}
1565
1566#
1567# Run replication method tests in master and client env.
1568# This proc runs a specific test/method using the db_replicate utility.
1569#
1570proc run_replicate_test { method test {nsites 2} {largs "" } } {
1571	source ./include.tcl
1572
1573	global __debug_on
1574	global __debug_print
1575	global __debug_test
1576	global errorInfo
1577	global has_crypto
1578	global is_envmethod
1579	global masterdir
1580	global parms
1581	global passwd
1582	global rep_verbose
1583	global repenv
1584	global verbose_type
1585
1586	puts "run_replicate_test $method $test $nsites $largs"
1587
1588	# Test124 can't be run under reptest because we delete all
1589	# the test files at the end of the test to avoid triggering
1590	# verification failures (it uses a non-standard sort).
1591	if { $test == "test124" } {
1592		puts "Skipping $test under run_replicate"
1593		return
1594	}
1595
1596	set verbargs ""
1597	if { $rep_verbose == 1 } {
1598		set verbargs " -verbose {$verbose_type on}"
1599	}
1600	set do_sec 0
1601	env_cleanup $testdir
1602	set is_envmethod 1
1603
1604	# Some tests that use a small db pagesize need a small
1605	# mpool pagesize as well -- otherwise we'll run out of
1606	# mutexes.   First determine the natural pagesize, so
1607	# that can be used in the normal case, then adjust where
1608	# needed.
1609
1610	set tmpenv [berkdb_env -create -home $testdir]
1611	set pg [$tmpenv get_mp_pagesize]
1612	error_check_good env_close [$tmpenv close] 0
1613	berkdb envremove -home $testdir
1614
1615	set small_pagesize_tests [list test035 test096 test112 test113 test114]
1616	if { [lsearch -exact $small_pagesize_tests $test] != -1  } {
1617		set pg 512
1618	}
1619
1620	#
1621	# Set log smaller than default to force changing files,
1622	# but big enough so that the tests that use binary files
1623	# as keys/data can run.  Increase the size of the log region --
1624	# sdb004 needs this, now that subdatabase names are stored
1625	# in the env region.
1626	#
1627	# All the settings below will be the same for all sites in the group.
1628	#
1629	set logmax [expr 3 * 1024 * 1024]
1630	set lockmax 40000
1631	set logregion 2097152
1632
1633	#
1634	# TODO:  Turn on crypto and test with that.  Off for now.
1635	#
1636	if { $do_sec && $has_crypto } {
1637		set envargs "-encryptaes $passwd"
1638		append largs " -encrypt "
1639	} else {
1640		set envargs ""
1641	}
1642	check_handles
1643	set last_site [expr $nsites - 1]
1644	set winner [berkdb random_int 0 $last_site]
1645	for { set i 0 } { $i < $nsites } { incr i } {
1646		set repdir($i) $testdir/ENV$i
1647		file mkdir $repdir($i)
1648		if { $i == $winner } {
1649			set pri 10
1650		} else {
1651			set pri [berkdb random_int 0 1]
1652		}
1653		replicate_make_config $repdir($i) $nsites $i $pri
1654		set envcmd($i) "berkdb_env_noerr -create -log_max $logmax \
1655		    $envargs -rep -home $repdir($i) -txn -thread -pagesize $pg \
1656		    -log_regionmax $logregion -lock_max_objects $lockmax \
1657		    -lock_max_locks $lockmax -errpfx $repdir($i) $verbargs \
1658		    -log_blob"
1659		set env($i) [eval $envcmd($i)]
1660		error_check_good env_open($i) [is_valid_env $env($i)] TRUE
1661	}
1662
1663	#
1664	# Now that we have all of the envs opened, we can start db_replicate
1665	# in each one too.  Afterward, we check for which site is master.
1666	#
1667	for { set i 0 } { $i < $nsites } { incr i } {
1668		set dpid($i) [eval {exec $util_path/db_replicate -t 3 \
1669		    -h $repdir($i)} -L $testdir/LOG$i &]
1670		puts "Started db_replicate $repdir($i): $dpid($i)"
1671	}
1672
1673	#
1674	# Wait for enough sites to start and elect someone master.
1675	# For now assume that once the master is elected, all sites
1676	# have started up and we don't have any laggards.  If that
1677	# seems to be a problem we could loop checking whether every
1678	# single env knows this master and is at the right LSN.
1679	#
1680	puts "run_replicate_test: Wait for repmgr to elect a master."
1681	await_expected_master $env($winner) 30
1682
1683	set masterdir $repdir($winner)
1684	#
1685	# Set up list of client env handles for later checking
1686	# and verification.  Skip the master env.
1687	#
1688	set j 0
1689	set repenv(master) $env($winner)
1690	for { set i 0 } { $i < $nsites } { incr i } {
1691		if { $winner != $i } {
1692			set repenv($j) $env($i)
1693			incr j
1694		}
1695	}
1696	puts "run_replicate_test: Found master at $repdir($winner)"
1697	#
1698	# Give a few seconds for the clients to sync with the master
1699	# before we begin blasting at them.  If we don't pause here,
1700	# we otherwise will race with the db_replicate process that is
1701	# in rep_start and our test will fail with DB_LOCK_DEADLOCK.
1702	# This pause gives the group a chance to quiesce.
1703	#
1704	tclsleep 5
1705
1706	#
1707	# We went through all that so that we can append '-env masterenv'
1708	# to the largs for the test.  Clobber the 30-second anti-archive
1709	# timer in case the test we're about to run wants to do any log
1710	# archiving, database renaming and/or removal.
1711	#
1712	$env($winner) test force noarchive_timeout
1713	append largs " -env $env($winner) "
1714
1715	#
1716	# Now run the actual test.
1717	#
1718	set stat [catch {
1719		puts "[timestamp]"
1720		if { [info exists parms($test)] != 1 } {
1721			puts stderr "$test disabled in\
1722			    testparams.tcl; skipping."
1723			continue
1724		}
1725
1726		puts -nonewline "Replicate: $test: $nsites sites "
1727		if { $do_sec } {
1728			puts -nonewline " with security;"
1729		} else {
1730			puts -nonewline " no security;"
1731		}
1732		puts ""
1733
1734		eval $test $method $parms($test) $largs
1735
1736		if { $__debug_print != 0 } {
1737			puts ""
1738		}
1739		if { $__debug_on != 0 } {
1740			debug $__debug_test
1741		}
1742		flush stdout
1743		flush stderr
1744	} res]
1745	#
1746	# Test is over.  We must kill the db_replicate processes no matter
1747	# whether there was an error or not.
1748	# And we must close the envs.  We save the original errorInfo
1749	# because it could be overwritten by tclkill.
1750	#
1751	puts "Replicate: $test: Done ($stat).  Wait and kill db_replicate."
1752	set save_errInfo $errorInfo
1753	tclsleep 10
1754	#
1755	# We kill all the clients first then kill the master.  If we
1756	# just kill them in order, and kill the master first, the others
1757	# may complete an election and the processes get killed in the
1758	# middle of recovery, thus leaving the env locked out which is
1759	# a problem in the verify phase.
1760	#
1761	for { set i 0 } { $i < $nsites } { incr i } {
1762		if { $i != $winner } {
1763			tclkill $dpid($i)
1764		}
1765	}
1766	tclsleep 2
1767	tclkill $dpid($winner)
1768	if { $stat != 0} {
1769		for { set i 0 } { $i < $nsites } { incr i } {
1770			catch { $env($i) close } ignore
1771		}
1772
1773		puts "Error result string: $res"
1774		set fnl [string first "\n" $save_errInfo]
1775		set theError [string range $save_errInfo 0 [expr $fnl - 1]]
1776		if {[string first FAIL $save_errInfo] == -1} {
1777			error "FAIL:[timestamp]\
1778			    run_reptest: $method $test: $theError"
1779		} else {
1780			error $theError;
1781		}
1782	} else {
1783		repl_envver0 $test $method [expr $nsites - 1]
1784		for { set i 0 } { $i < $nsites } { incr i } {
1785			catch { $env($i) close } ignore
1786		}
1787	}
1788
1789	set is_envmethod 0
1790}
1791
1792#
1793# Run replication method tests in master and client env.
1794# This proc runs a specific test/method with our own message handling.
1795#
1796proc run_reptest { method test {droppct 0} {nclients 1} {do_del 0} \
1797    {do_sec 0} {do_oob 0} {largs "" } } {
1798	source ./include.tcl
1799
1800	global __debug_on
1801	global __debug_print
1802	global __debug_test
1803	global is_envmethod
1804	global parms
1805	global passwd
1806	global has_crypto
1807
1808	puts "run_reptest \
1809	    $method $test $droppct $nclients $do_del $do_sec $do_oob $largs"
1810
1811	# Test124 can't be run under reptest because we delete all
1812	# the test files at the end of the test to avoid triggering
1813	# verification failures (it uses a non-standard sort).
1814	if { $test == "test124"} {
1815		puts "Skipping $test under run_repmethod"
1816		return
1817	}
1818
1819	env_cleanup $testdir
1820	set is_envmethod 1
1821	set stat [catch {
1822		if { $do_sec && $has_crypto } {
1823			set envargs "-encryptaes $passwd"
1824			append largs " -encrypt "
1825		} else {
1826			set envargs ""
1827		}
1828		check_handles
1829		#
1830		# This will set up the master and client envs
1831		# and will return us the args to pass to the
1832		# test.
1833
1834		set largs [repl_envsetup \
1835		    $envargs $largs $test $nclients $droppct $do_oob]
1836
1837		puts "[timestamp]"
1838		if { [info exists parms($test)] != 1 } {
1839			puts stderr "$test disabled in\
1840			    testparams.tcl; skipping."
1841			continue
1842		}
1843
1844		puts -nonewline \
1845		    "Repl: $test: dropping $droppct%, $nclients clients "
1846		if { $do_del } {
1847			puts -nonewline " with delete verification;"
1848		} else {
1849			puts -nonewline " no delete verification;"
1850		}
1851		if { $do_sec } {
1852			puts -nonewline " with security;"
1853		} else {
1854			puts -nonewline " no security;"
1855		}
1856		if { $do_oob } {
1857			puts -nonewline " with out-of-order msgs;"
1858		} else {
1859			puts -nonewline " no out-of-order msgs;"
1860		}
1861		puts ""
1862
1863		eval $test $method $parms($test) $largs
1864
1865		if { $__debug_print != 0 } {
1866			puts ""
1867		}
1868		if { $__debug_on != 0 } {
1869			debug $__debug_test
1870		}
1871		flush stdout
1872		flush stderr
1873		repl_envprocq $test $nclients $do_oob
1874		repl_envver0 $test $method $nclients
1875		if { $do_del } {
1876			repl_verdel $test $method $nclients
1877		}
1878		repl_envclose $test $envargs
1879	} res]
1880	if { $stat != 0} {
1881		global errorInfo;
1882
1883		set fnl [string first "\n" $errorInfo]
1884		set theError [string range $errorInfo 0 [expr $fnl - 1]]
1885		if {[string first FAIL $errorInfo] == -1} {
1886			error "FAIL:[timestamp]\
1887			    run_reptest: $method $test: $theError"
1888		} else {
1889			error $theError;
1890		}
1891	}
1892	set is_envmethod 0
1893}
1894
1895#
1896# Run replication method tests in master and client env.
1897# Wrapper to run db_replicate utility test.
1898#
1899proc run_replicate { method test {nums 0} {display 0} {run 1} \
1900    {outfile stdout} {largs ""} } {
1901	source ./include.tcl
1902
1903	set save_largs $largs
1904	env_cleanup $testdir
1905
1906	#
1907	# Run 2 sites 40%, 3 sites 40%, 4 sites 10%, 5 sites 10%
1908	set site_list { 2 2 2 2 3 3 3 3 4 5 }
1909	set s_len [expr [llength $site_list] - 1]
1910
1911	if { $nums == 0 } {
1912		set sindex [berkdb random_int 0 $s_len]
1913		set nsites [lindex $site_list $sindex]
1914	} else {
1915		set nsites $nums
1916	}
1917
1918	if { $display == 1 } {
1919		puts $outfile "eval run_replicate_test $method $test \
1920		    $nsites $largs"
1921	}
1922	if { $run == 1 } {
1923		run_replicate_test $method $test $nsites $largs
1924	}
1925}
1926
1927#
1928# Run replication method tests in master and client env.
1929# Wrapper to run a test on a replicated group.
1930#
1931proc run_repmethod { method test {numcl 0} {display 0} {run 1} \
1932    {outfile stdout} {largs ""} } {
1933	source ./include.tcl
1934
1935	global __debug_on
1936	global __debug_print
1937	global __debug_test
1938	global is_envmethod
1939	global test_names
1940	global parms
1941	global has_crypto
1942	global passwd
1943
1944	set save_largs $largs
1945	env_cleanup $testdir
1946
1947	# Use an array for number of clients because we really don't
1948	# want to evenly-weight all numbers of clients.  Favor smaller
1949	# numbers but test more clients occasionally.
1950	set drop_list { 0 0 0 0 0 1 1 5 5 10 20 }
1951	set drop_len [expr [llength $drop_list] - 1]
1952	set client_list { 1 1 2 1 1 1 2 2 3 1 }
1953	set cl_len [expr [llength $client_list] - 1]
1954
1955	if { $numcl == 0 } {
1956		set clindex [berkdb random_int 0 $cl_len]
1957		set nclients [lindex $client_list $clindex]
1958	} else {
1959		set nclients $numcl
1960	}
1961	set drindex [berkdb random_int 0 $drop_len]
1962	set droppct [lindex $drop_list $drindex]
1963
1964	# Do not drop messages on Windows.  Since we can't set
1965	# re-request times with less than millisecond precision,
1966	# dropping messages will cause test failures.
1967	if { $is_windows_test == 1 } {
1968		set droppct 0
1969	}
1970
1971 	set do_sec [berkdb random_int 0 1]
1972	set do_oob [berkdb random_int 0 1]
1973
1974	# Test130 cannot run with delete verification. [#18944]
1975	if { $test == "test130" } {
1976		set do_del 0
1977	} else {
1978		set do_del [berkdb random_int 0 1]
1979	}
1980
1981	if { $display == 1 } {
1982		puts $outfile "eval run_reptest $method $test $droppct \
1983		    $nclients $do_del $do_sec $do_oob $largs"
1984	}
1985	if { $run == 1 } {
1986		run_reptest $method $test $droppct $nclients $do_del \
1987		    $do_sec $do_oob $largs
1988	}
1989}
1990
1991#
1992# Run method tests, each in its own, new environment.  (As opposed to
1993# run_envmethod1 which runs all the tests in a single environment.)
1994#
1995proc run_envmethod { method test {display 0} {run 1} {outfile stdout} \
1996    { largs "" } } {
1997	global __debug_on
1998	global __debug_print
1999	global __debug_test
2000	global is_envmethod
2001	global test_names
2002	global parms
2003	source ./include.tcl
2004
2005	set save_largs $largs
2006	set envargs ""
2007
2008	# Enlarge the logging region by default - sdb004 needs this because
2009	# it uses very long subdb names, and the names are stored in the
2010	# env region.
2011	set logargs " -log_regionmax 2057152 "
2012
2013	# Enlarge the cache by default - some compaction tests need it.
2014	set cacheargs "-cachesize {0 4194304 1} -pagesize 512"
2015	env_cleanup $testdir
2016
2017	if { $display == 1 } {
2018		if { $run == 0 } {
2019			puts $outfile "eval run_envmethod $method $test 0 1 \
2020			    stdout $largs; verify_log $testdir"
2021		} else {
2022			puts $outfile "eval run_envmethod $method \
2023			    $test 0 1 stdout $largs"
2024		}
2025	}
2026
2027	# To run a normal test using system memory, call run_envmethod
2028	# with the flag -shm.
2029	set sindex [lsearch -exact $largs "-shm"]
2030	if { $sindex >= 0 } {
2031		set shm_key 20
2032		if { [mem_chk " -system_mem -shm_key $shm_key"] == 1 } {
2033			break
2034		} else {
2035			append envargs " -system_mem -shm_key $shm_key"
2036			set largs [lreplace $largs $sindex $sindex]
2037		}
2038	}
2039
2040	set sindex [lsearch -exact $largs "-log_max"]
2041	if { $sindex >= 0 } {
2042		append envargs " -log_max 100000 "
2043		set largs [lreplace $largs $sindex $sindex]
2044	}
2045
2046	# Test for -thread option and pass to berkdb_env open.  Leave in
2047	# $largs because -thread can also be passed to an individual
2048	# test as an arg.  Double the number of lockers because a threaded
2049	# env requires more than an ordinary env.
2050	if { [lsearch -exact $largs "-thread"] != -1 } {
2051		append envargs " -thread -lock_max_lockers 2000 "
2052	}
2053
2054	# Test for -alloc option and pass to berkdb_env open only.
2055	# Remove from largs because -alloc is not an allowed test arg.
2056	set aindex [lsearch -exact $largs "-alloc"]
2057	if { $aindex >= 0 } {
2058		append envargs " -alloc "
2059		set largs [lreplace $largs $aindex $aindex]
2060	}
2061
2062	# We raise the number of locks and objects - there are a few
2063	# compaction tests that require a large number.
2064	set lockargs " -lock_max_locks 40000 -lock_max_objects 20000 "
2065
2066	if { $run == 1 } {
2067		set is_envmethod 1
2068		set stat [catch {
2069			check_handles
2070			set env [eval {berkdb_env -create -txn -mode 0644 \
2071			    -home $testdir} $logargs $cacheargs $lockargs $envargs]
2072			error_check_good env_open [is_valid_env $env] TRUE
2073			append largs " -env $env "
2074
2075			puts "[timestamp]"
2076			if { [info exists parms($test)] != 1 } {
2077				puts stderr "$test disabled in\
2078				    testparams.tcl; skipping."
2079				continue
2080			}
2081			eval $test $method $parms($test) $largs
2082
2083			if { $__debug_print != 0 } {
2084				puts ""
2085			}
2086			if { $__debug_on != 0 } {
2087				debug $__debug_test
2088			}
2089			flush stdout
2090			flush stderr
2091			set largs $save_largs
2092			error_check_good envclose [$env close] 0
2093#			error_check_good envremove [berkdb envremove \
2094#			    -home $testdir] 0
2095		} res]
2096		if { $stat != 0} {
2097			global errorInfo;
2098
2099			set fnl [string first "\n" $errorInfo]
2100			set theError [string range $errorInfo 0 [expr $fnl - 1]]
2101			if {[string first FAIL $errorInfo] == -1} {
2102				error "FAIL:[timestamp]\
2103				    run_envmethod: $method $test: $theError"
2104			} else {
2105				error $theError;
2106			}
2107		}
2108		set is_envmethod 0
2109	}
2110}
2111
2112proc run_compact { method } {
2113	source ./include.tcl
2114	for {set tnum 111} {$tnum <= 115} {incr tnum} {
2115		run_envmethod $method test$tnum 0 1 stdout -log_max
2116
2117		puts "\tTest$tnum: Test Recovery"
2118		set env1 [eval  berkdb env -create -txn \
2119		    -recover_fatal -home $testdir]
2120		error_check_good env_close [$env1 close] 0
2121		error_check_good verify_dir \
2122		    [verify_dir $testdir "" 0 0 1 ] 0
2123		puts "\tTest$tnum: Remove db and test Recovery"
2124		exec sh -c "rm -f $testdir/*.db"
2125		set env1 [eval  berkdb env -create -txn \
2126		    -recover_fatal -home $testdir]
2127		error_check_good env_close [$env1 close] 0
2128		error_check_good verify_dir \
2129		    [verify_dir $testdir "" 0 0 1 ] 0
2130	}
2131}
2132
2133proc run_recd { method test {run 1} {display 0} args } {
2134	global __debug_on
2135	global __debug_print
2136	global __debug_test
2137	global parms
2138	global test_names
2139	global log_log_record_types
2140	global gen_upgrade_log
2141	global upgrade_be
2142	global upgrade_dir
2143	global upgrade_method
2144	global upgrade_name
2145	source ./include.tcl
2146
2147	if { $run == 1 } {
2148		puts "run_recd: $method $test $parms($test) $args"
2149	}
2150	if {[catch {
2151		if { $display } {
2152			puts "eval { $test } $method $parms($test) { $args }"
2153		}
2154		if { $run } {
2155			check_handles
2156			set upgrade_method $method
2157			set upgrade_name $test
2158			puts "[timestamp]"
2159			# By redirecting stdout to stdout, we make exec
2160			# print output rather than simply returning it.
2161			# By redirecting stderr to stdout too, we make
2162			# sure everything winds up in the ALL.OUT file.
2163			set ret [catch { exec $tclsh_path << \
2164			    "source $test_path/test.tcl; \
2165			    set log_log_record_types $log_log_record_types;\
2166			    set gen_upgrade_log $gen_upgrade_log;\
2167			    set upgrade_be $upgrade_be; \
2168			    set upgrade_dir $upgrade_dir; \
2169			    set upgrade_method $upgrade_method; \
2170			    set upgrade_name $upgrade_name; \
2171			    eval { $test } $method $parms($test) {$args}" \
2172			    >&@ stdout
2173			} res]
2174
2175			# Don't die if the test failed;  we want
2176			# to just proceed.
2177			if { $ret != 0 } {
2178				puts "FAIL:[timestamp] $res"
2179			}
2180
2181			if { $__debug_print != 0 } {
2182				puts ""
2183			}
2184			if { $__debug_on != 0 } {
2185				debug $__debug_test
2186			}
2187			flush stdout
2188			flush stderr
2189		}
2190	} res] != 0} {
2191		global errorInfo;
2192
2193		set fnl [string first "\n" $errorInfo]
2194		set theError [string range $errorInfo 0 [expr $fnl - 1]]
2195		if {[string first FAIL $errorInfo] == -1} {
2196			error "FAIL:[timestamp]\
2197			    run_recd: $method: $theError"
2198		} else {
2199			error $theError;
2200		}
2201	}
2202}
2203
2204proc recds {method args} {
2205	eval {run_recds $method 1 0} $args
2206}
2207
2208proc run_recds {{run_methods "all"} {run 1} {display 0} args } {
2209	source ./include.tcl
2210	global log_log_record_types
2211	global test_names
2212	global gen_upgrade_log
2213	global encrypt
2214	global valid_methods
2215
2216	set log_log_record_types 1
2217	set run_zero 0
2218	if { $run_methods == "all" } {
2219		set run_methods  $valid_methods
2220		set run_zero 1
2221	}
2222	logtrack_init
2223
2224	# Define a small set of tests to run with log file zeroing.
2225	set zero_log_tests \
2226	    {recd001 recd002 recd003 recd004 recd005 recd006 recd007}
2227
2228	foreach method $run_methods {
2229		check_handles
2230#set test_names(recd) "recd005 recd017"
2231		foreach test $test_names(recd) {
2232			# Skip recd017 for non-crypto upgrade testing.
2233			# Run only recd017 for crypto upgrade testing.
2234			if { $gen_upgrade_log == 1 && $test == "recd017" && \
2235			    $encrypt == 0 } {
2236				puts "Skipping recd017 for non-crypto run."
2237				continue
2238			}
2239			if { $gen_upgrade_log == 1 && $test != "recd017" && \
2240			    $encrypt == 1 } {
2241				puts "Skipping $test for crypto run."
2242				continue
2243			}
2244			if { [catch {eval {run_recd $method $test $run \
2245			    $display} $args} ret ] != 0 } {
2246				puts $ret
2247			}
2248
2249			# If it's one of the chosen tests, and btree, run with
2250			# log file zeroing.
2251			set zlog_idx [lsearch -exact $zero_log_tests $test]
2252			if { $run_zero == 1 && \
2253			    $method == "btree" && $zlog_idx > -1 } {
2254				if { [catch {eval {run_recd $method $test \
2255				    $run $display -zero_log} $args} ret ] != 0 } {
2256					puts $ret
2257				}
2258			}
2259
2260			if { $gen_upgrade_log == 1 } {
2261				save_upgrade_files $testdir
2262			}
2263		}
2264	}
2265
2266	# We can skip logtrack_summary during the crypto upgrade run -
2267	# it doesn't introduce any new log types.
2268	if { $run } {
2269		if { $gen_upgrade_log == 0 || $encrypt == 0 } {
2270			logtrack_summary
2271		}
2272	}
2273	set log_log_record_types 0
2274}
2275
2276# A small subset of tests to be used in conjunction with the
2277# automated builds.  Ideally these tests will cover a lot of ground
2278# but run in only 15 minutes or so.  You can put any test in the
2279# list of tests and it will be run all the ways that run_all
2280# runs it.
2281proc run_smoke { } {
2282	source ./include.tcl
2283	global valid_methods
2284
2285	fileremove -f SMOKE.OUT
2286
2287	set smoke_tests { \ 
2288	    lock001 log001 test001 test004 sdb001 sec001 rep001 txn001 }
2289
2290	# Run each test in all its permutations, and
2291	# concatenate the results in the file SMOKE.OUT.
2292	foreach test $smoke_tests {
2293		run_all $test
2294		set in [open ALL.OUT r]
2295		set out [open SMOKE.OUT a]
2296		while { [gets $in str] != -1 } {
2297			puts $out $str
2298		}
2299		close $in
2300		close $out
2301	}
2302}
2303
2304proc run_inmem_tests { { testname ALL } args } {
2305	global test_names
2306	global one_test
2307	global valid_methods
2308	source ./include.tcl
2309
2310	fileremove -f ALL.OUT
2311
2312	set one_test $testname
2313	# Source testparams again to adjust test_names.
2314	source $test_path/testparams.tcl
2315
2316	set exflgs [eval extractflags $args]
2317	set flags [lindex $exflgs 1]
2318	set display 1
2319	set run 1
2320	foreach f $flags {
2321		switch $f {
2322			n {
2323				set display 1
2324				set run 0
2325			}
2326		}
2327	}
2328
2329	set o [open ALL.OUT a]
2330	if { $run == 1 } {
2331		puts -nonewline "Test suite run started at: "
2332		puts [clock format [clock seconds] -format "%H:%M %D"]
2333		puts [berkdb version -string]
2334
2335		puts -nonewline $o "Test suite run started at: "
2336		puts $o [clock format [clock seconds] -format "%H:%M %D"]
2337		puts $o [berkdb version -string]
2338	}
2339	close $o
2340
2341	# Run in-memory testing for databases, logs, replication files,
2342	# and region files (env -private).  It is not necessary to run
2343	# both run_inmem_log and run_mixedmode_log because run_mixedmode_log
2344	# includes the pure in-memory case.
2345	set inmem_procs [list run_inmem_db \
2346	    run_inmem_log run_mixedmode_log run_inmem_rep run_env_private]
2347
2348	# The above 3 procs only support tests like repXXX, so we only run
2349	# these tests here.
2350	foreach inmem_proc $inmem_procs {
2351		foreach method $valid_methods {
2352			foreach test $test_names(rep) {
2353				# Skip the rep tests that don't support
2354				# particular kinds of in-memory testing
2355				# when appropriate.
2356				if { $inmem_proc == "run_inmem_db" } {
2357					set indx [lsearch -exact \
2358					    $test_names(skip_for_inmem_db) $test]
2359					if { $indx >= 0 } {
2360						continue
2361					}
2362				}
2363				if { $inmem_proc == "run_inmem_rep" } {
2364					set indx [lsearch -exact \
2365					    $test_names(skip_for_inmem_rep) $test]
2366					if { $indx >= 0 } {
2367						continue
2368					}
2369				}
2370				if { $inmem_proc == "run_env_private" } {
2371					set indx [lsearch -exact \
2372					    $test_names(skip_for_env_private) $test]
2373					if { $indx >= 0 } {
2374						continue
2375					}
2376				}
2377
2378				if { $display } {
2379					set o [open ALL.OUT a]
2380					puts $o "eval \
2381					    $inmem_proc $test -$method; \
2382					    verify_dir $testdir \"\" 1 0 0; \
2383					    salvage_dir $testdir"
2384					close $o
2385				}
2386
2387				if { $run } {
2388					if [catch {exec $tclsh_path << \
2389					    "global one_test; \
2390					    set one_test $one_test; \
2391					    source $test_path/test.tcl; \
2392					    eval $inmem_proc $test -$method;\
2393					    verify_dir $testdir \"\" 1 0 0; \
2394					    salvage_dir $testdir" \
2395					    >>& ALL.OUT } res ] {
2396						set o [open ALL.OUT a]
2397						puts $o "FAIL:$inmem_proc \
2398						    $test -$method: $res"
2399						close $o
2400					}
2401				}
2402			}
2403		}
2404	}
2405
2406	if { $run == 0 } {
2407		return
2408	}
2409
2410	set failed [check_output ALL.OUT]
2411
2412	set o [open ALL.OUT a]
2413	if { $failed == 0 } {
2414		puts "Regression Tests Succeeded"
2415		puts $o "Regression Tests Succeeded"
2416	} else {
2417		puts "Regression Tests Failed"
2418		puts "Check UNEXPECTED OUTPUT lines."
2419		puts "Review ALL.OUT.x for details."
2420		puts $o "Regression Tests Failed"
2421	}
2422
2423	puts -nonewline "Test suite run completed at: "
2424	puts [clock format [clock seconds] -format "%H:%M %D"]
2425	puts -nonewline $o "Test suite run completed at: "
2426	puts $o [clock format [clock seconds] -format "%H:%M %D"]
2427	close $o
2428
2429}
2430
2431
2432proc run_all { { testname ALL } args } {
2433	global test_names
2434	global one_test
2435	global has_crypto
2436	global valid_methods
2437	source ./include.tcl
2438
2439	fileremove -f ALL.OUT
2440
2441	set one_test $testname
2442	if { $one_test != "ALL" } {
2443		# Source testparams again to adjust test_names.
2444		source $test_path/testparams.tcl
2445	}
2446
2447	set exflgs [eval extractflags $args]
2448	set flags [lindex $exflgs 1]
2449	set display 1
2450	set run 1
2451	set am_only 0
2452	set parallel 0
2453	set nparalleltests 0
2454	set rflags {--}
2455	foreach f $flags {
2456		switch $f {
2457			m {
2458				set am_only 1
2459			}
2460			n {
2461				set display 1
2462				set run 0
2463				set rflags [linsert $rflags 0 "-n"]
2464			}
2465		}
2466	}
2467
2468	set o [open ALL.OUT a]
2469	if { $run == 1 } {
2470		puts -nonewline "Test suite run started at: "
2471		puts [clock format [clock seconds] -format "%H:%M %D"]
2472		puts [berkdb version -string]
2473
2474		puts -nonewline $o "Test suite run started at: "
2475		puts $o [clock format [clock seconds] -format "%H:%M %D"]
2476		puts $o [berkdb version -string]
2477	}
2478	close $o
2479	#
2480	# First run standard tests.  Send in a -A to let run_std know
2481	# that it is part of the "run_all" run, so that it doesn't
2482	# print out start/end times.
2483	#
2484	lappend args -A
2485	eval {run_std} $one_test $args
2486
2487	set test_pagesizes [get_test_pagesizes]
2488	set args [lindex $exflgs 0]
2489	set save_args $args
2490
2491	foreach pgsz $test_pagesizes {
2492		set args $save_args
2493		append args " -pagesize $pgsz -chksum"
2494		if { $am_only == 0 } {
2495			# Run recovery tests.
2496			#
2497			# XXX These don't actually work at multiple pagesizes;
2498			# disable them for now.
2499			#
2500			# XXX These too are broken into separate tclsh
2501			# instantiations so we don't require so much
2502			# memory, but I think it's cleaner
2503			# and more useful to do it down inside proc r than here,
2504			# since "r recd" gets done a lot and needs to work.
2505			#
2506			# XXX See comment in run_std for why this only directs
2507			# stdout and not stderr.  Don't worry--the right stuff
2508			# happens.
2509			#puts "Running recovery tests with pagesize $pgsz"
2510			#if [catch {exec $tclsh_path \
2511			#    << "source $test_path/test.tcl; \
2512			#    r $rflags recd $args" \
2513			#    2>@ stderr >> ALL.OUT } res] {
2514			#	set o [open ALL.OUT a]
2515			#	puts $o "FAIL: recd test:"
2516			#	puts $o $res
2517			#	close $o
2518			#}
2519		}
2520
2521		# Access method tests.
2522		# Run subdb tests with varying pagesizes too.
2523		# XXX
2524		# Broken up into separate tclsh instantiations so
2525		# we don't require so much memory.
2526		foreach method $valid_methods {
2527			puts "Running $method tests with pagesize $pgsz"
2528			foreach sub {test sdb si} {
2529				foreach test $test_names($sub) {
2530					if { $run == 0 } {
2531						set o [open ALL.OUT a]
2532						eval {run_method -$method \
2533						    $test $display $run $o} \
2534						    $args
2535						close $o
2536					}
2537					if { $run } {
2538						if [catch {exec $tclsh_path << \
2539						    "global one_test; \
2540						    set one_test $one_test; \
2541						    source $test_path/test.tcl; \
2542						    eval {run_method -$method \
2543						    $test $display $run \
2544						    stdout} $args" \
2545						    >>& ALL.OUT } res] {
2546							set o [open ALL.OUT a]
2547							puts $o "FAIL: \
2548							    -$method $test: $res"
2549							close $o
2550						}
2551					}
2552				}
2553			}
2554		}
2555	}
2556	set args $save_args
2557	#
2558	# Run access method tests at default page size in one env.
2559	#
2560	foreach method $valid_methods {
2561		puts "Running $method tests in a txn env"
2562		foreach sub {test sdb si} {
2563			foreach test $test_names($sub) {
2564				if { $run == 0 } {
2565					set o [open ALL.OUT a]
2566					run_envmethod -$method $test $display \
2567					    $run $o $args
2568					close $o
2569				}
2570				if { $run } {
2571					if [catch {exec $tclsh_path << \
2572					    "global one_test; \
2573					    set one_test $one_test; \
2574					    source $test_path/test.tcl; \
2575					    run_envmethod -$method $test \
2576				  	    $display $run stdout $args" \
2577					    >>& ALL.OUT } res] {
2578						set o [open ALL.OUT a]
2579						puts $o "FAIL: run_envmethod \
2580						    $method $test: $res"
2581						close $o
2582					}
2583				}
2584			}
2585		}
2586	}
2587	#
2588	# Run access method tests at default page size in thread-enabled env.
2589	# We're not truly running threaded tests, just testing the interface.
2590	#
2591	foreach method $valid_methods {
2592		puts "Running $method tests in a threaded txn env"
2593		foreach sub {test sdb si} {
2594			foreach test $test_names($sub) {
2595				if { $run == 0 } {
2596					set o [open ALL.OUT a]
2597					eval {run_envmethod -$method $test \
2598					    $display $run $o -thread}
2599					close $o
2600				}
2601				if { $run } {
2602					if [catch {exec $tclsh_path << \
2603					    "global one_test; \
2604					    set one_test $one_test; \
2605					    source $test_path/test.tcl; \
2606					    eval {run_envmethod -$method $test \
2607				  	    $display $run stdout -thread}" \
2608					    >>& ALL.OUT } res] {
2609						set o [open ALL.OUT a]
2610						puts $o "FAIL: run_envmethod \
2611						    $method $test -thread: $res"
2612						close $o
2613					}
2614				}
2615			}
2616		}
2617	}
2618	#
2619	# Run access method tests at default page size with -alloc enabled.
2620	#
2621	foreach method $valid_methods {
2622		puts "Running $method tests in an env with -alloc"
2623		foreach sub {test sdb si} {
2624			foreach test $test_names($sub) {
2625				if { $run == 0 } {
2626					set o [open ALL.OUT a]
2627					eval {run_envmethod -$method $test \
2628					    $display $run $o -alloc}
2629					close $o
2630				}
2631				if { $run } {
2632					if [catch {exec $tclsh_path << \
2633					    "global one_test; \
2634					    set one_test $one_test; \
2635					    source $test_path/test.tcl; \
2636					    eval {run_envmethod -$method $test \
2637				  	    $display $run stdout -alloc}" \
2638					    >>& ALL.OUT } res] {
2639						set o [open ALL.OUT a]
2640						puts $o "FAIL: run_envmethod \
2641						    $method $test -alloc: $res"
2642						close $o
2643					}
2644				}
2645			}
2646		}
2647	}
2648
2649	# Add a few more tests that are suitable for run_all but not run_std.
2650	set test_list {
2651		{"testNNN under replication"	"repmethod"}
2652		{"IPv4"			"ipv4"}}
2653
2654	# If we're on Windows, Linux, FreeBSD, or Solaris, run the
2655	# bigfile tests.  These create files larger than 4 GB.
2656	if { $is_freebsd_test == 1 || $is_linux_test == 1 || \
2657	    $is_sunos_test == 1 || $is_windows_test == 1 } {
2658		lappend test_list {"big files"	"bigfile"}
2659	}
2660
2661	# If release supports encryption, run security tests.
2662	#
2663	if { $has_crypto == 1 } {
2664		lappend test_list {"testNNN with security"	"secmethod"}
2665	}
2666
2667	foreach pair $test_list {
2668		set msg [lindex $pair 0]
2669		set cmd [lindex $pair 1]
2670		puts "Running $msg tests"
2671		if [catch {exec $tclsh_path << \
2672		    "global one_test; set one_test $one_test; \
2673		    source $test_path/test.tcl; \
2674		    r $rflags $cmd $args" >>& ALL.OUT } res] {
2675			set o [open ALL.OUT a]
2676			puts $o "FAIL: $cmd test: $res"
2677			close $o
2678		}
2679	}
2680
2681	# If not actually running, no need to check for failure.
2682	if { $run == 0 } {
2683		return
2684	}
2685
2686	set failed 0
2687	set o [open ALL.OUT r]
2688	while { [gets $o line] >= 0 } {
2689		if { [regexp {^FAIL} $line] != 0 } {
2690			set failed 1
2691		}
2692	}
2693	close $o
2694	set o [open ALL.OUT a]
2695	if { $failed == 0 } {
2696		puts "Regression Tests Succeeded"
2697		puts $o "Regression Tests Succeeded"
2698	} else {
2699		puts "Regression Tests Failed; see ALL.OUT for log"
2700		puts $o "Regression Tests Failed"
2701	}
2702
2703	puts -nonewline "Test suite run completed at: "
2704	puts [clock format [clock seconds] -format "%H:%M %D"]
2705	puts -nonewline $o "Test suite run completed at: "
2706	puts $o [clock format [clock seconds] -format "%H:%M %D"]
2707	close $o
2708}
2709
2710proc run_all_new { { testname ALL } args } {
2711	global test_names
2712	global one_test
2713	global has_crypto
2714	global valid_methods
2715	source ./include.tcl
2716
2717	fileremove -f ALL.OUT
2718
2719	set one_test $testname
2720	if { $one_test != "ALL" } {
2721		# Source testparams again to adjust test_names.
2722		source $test_path/testparams.tcl
2723	}
2724
2725	set exflgs [eval extractflags $args]
2726	set flags [lindex $exflgs 1]
2727	set display 1
2728	set run 1
2729	set am_only 0
2730	set parallel 0
2731	set nparalleltests 0
2732	set rflags {--}
2733	foreach f $flags {
2734		switch $f {
2735			m {
2736				set am_only 1
2737			}
2738			n {
2739				set display 1
2740				set run 0
2741				set rflags [linsert $rflags 0 "-n"]
2742			}
2743		}
2744	}
2745
2746	set o [open ALL.OUT a]
2747	if { $run == 1 } {
2748		puts -nonewline "Test suite run started at: "
2749		puts [clock format [clock seconds] -format "%H:%M %D"]
2750		puts [berkdb version -string]
2751
2752		puts -nonewline $o "Test suite run started at: "
2753		puts $o [clock format [clock seconds] -format "%H:%M %D"]
2754		puts $o [berkdb version -string]
2755	}
2756	close $o
2757	#
2758	# First run standard tests.  Send in a -A to let run_std know
2759	# that it is part of the "run_all" run, so that it doesn't
2760	# print out start/end times.
2761	#
2762	lappend args -A
2763	eval {run_std} $one_test $args
2764
2765	set test_pagesizes [get_test_pagesizes]
2766	set args [lindex $exflgs 0]
2767	set save_args $args
2768
2769	#
2770	# Run access method tests at default page size in one env.
2771	#
2772	foreach method $valid_methods {
2773		puts "Running $method tests in a txn env"
2774		foreach sub {test sdb si} {
2775			foreach test $test_names($sub) {
2776				if { $run == 0 } {
2777					set o [open ALL.OUT a]
2778					run_envmethod -$method $test $display \
2779					    $run $o $args
2780					close $o
2781				}
2782				if { $run } {
2783					if [catch {exec $tclsh_path << \
2784					    "global one_test; \
2785					    set one_test $one_test; \
2786					    source $test_path/test.tcl; \
2787					    run_envmethod -$method $test \
2788				  	    $display $run stdout $args" \
2789					    >>& ALL.OUT } res] {
2790						set o [open ALL.OUT a]
2791						puts $o "FAIL: run_envmethod \
2792						    $method $test: $res"
2793						close $o
2794					}
2795				}
2796			}
2797		}
2798	}
2799	#
2800	# Run access method tests at default page size in thread-enabled env.
2801	# We're not truly running threaded tests, just testing the interface.
2802	#
2803	foreach method $valid_methods {
2804		puts "Running $method tests in a threaded txn env"
2805		set thread_tests "test001"
2806		foreach test $thread_tests {
2807			if { $run == 0 } {
2808				set o [open ALL.OUT a]
2809				eval {run_envmethod -$method $test \
2810				    $display $run $o -thread}
2811				close $o
2812			}
2813			if { $run } {
2814				if [catch {exec $tclsh_path << \
2815				    "global one_test; \
2816				    set one_test $one_test; \
2817				    source $test_path/test.tcl; \
2818				    eval {run_envmethod -$method $test \
2819			  	    $display $run stdout -thread}" \
2820				    >>& ALL.OUT } res] {
2821					set o [open ALL.OUT a]
2822					puts $o "FAIL: run_envmethod \
2823					    $method $test -thread: $res"
2824					close $o
2825				}
2826			}
2827		}
2828	}
2829	#
2830	# Run access method tests at default page size with -alloc enabled.
2831	#
2832	foreach method $valid_methods {
2833		puts "Running $method tests in an env with -alloc"
2834		set alloc_tests "test001"
2835		foreach test $alloc_tests {
2836			if { $run == 0 } {
2837				set o [open ALL.OUT a]
2838				eval {run_envmethod -$method $test \
2839				    $display $run $o -alloc}
2840				close $o
2841			}
2842			if { $run } {
2843				if [catch {exec $tclsh_path << \
2844				    "global one_test; \
2845				    set one_test $one_test; \
2846				    source $test_path/test.tcl; \
2847				    eval {run_envmethod -$method $test \
2848			  	    $display $run stdout -alloc}" \
2849				    >>& ALL.OUT } res] {
2850					set o [open ALL.OUT a]
2851					puts $o "FAIL: run_envmethod \
2852					    $method $test -alloc: $res"
2853					close $o
2854				}
2855			}
2856		}
2857	}
2858
2859	# Run standard access method tests under replication.
2860	#
2861	set test_list [list {"testNNN under replication"	"repmethod"}]
2862
2863	# If we're on Windows, Linux, FreeBSD, or Solaris, run the
2864	# bigfile tests.  These create files larger than 4 GB.
2865	if { $is_freebsd_test == 1 || $is_linux_test == 1 || \
2866	    $is_sunos_test == 1 || $is_windows_test == 1 } {
2867		lappend test_list {"big files"	"bigfile"}
2868	}
2869
2870	# If release supports encryption, run security tests.
2871	#
2872	if { $has_crypto == 1 } {
2873		lappend test_list {"testNNN with security"	"secmethod"}
2874	}
2875
2876	foreach pair $test_list {
2877		set msg [lindex $pair 0]
2878		set cmd [lindex $pair 1]
2879		puts "Running $msg tests"
2880		if [catch {exec $tclsh_path << \
2881		    "global one_test; set one_test $one_test; \
2882		    source $test_path/test.tcl; \
2883		    r $rflags $cmd $args" >>& ALL.OUT } res] {
2884			set o [open ALL.OUT a]
2885			puts $o "FAIL: $cmd test: $res"
2886			close $o
2887		}
2888	}
2889
2890	# If not actually running, no need to check for failure.
2891	if { $run == 0 } {
2892		return
2893	}
2894
2895	set failed 0
2896	set o [open ALL.OUT r]
2897	while { [gets $o line] >= 0 } {
2898		if { [regexp {^FAIL} $line] != 0 } {
2899			set failed 1
2900		}
2901	}
2902	close $o
2903	set o [open ALL.OUT a]
2904	if { $failed == 0 } {
2905		puts "Regression Tests Succeeded"
2906		puts $o "Regression Tests Succeeded"
2907	} else {
2908		puts "Regression Tests Failed; see ALL.OUT for log"
2909		puts $o "Regression Tests Failed"
2910	}
2911
2912	puts -nonewline "Test suite run completed at: "
2913	puts [clock format [clock seconds] -format "%H:%M %D"]
2914	puts -nonewline $o "Test suite run completed at: "
2915	puts $o [clock format [clock seconds] -format "%H:%M %D"]
2916	close $o
2917}
2918
2919#
2920# Run method tests in one environment.  (As opposed to run_envmethod
2921# which runs each test in its own, new environment.)
2922#
2923proc run_envmethod1 { method {display 0} {run 1} { outfile stdout } args } {
2924	global __debug_on
2925	global __debug_print
2926	global __debug_test
2927	global is_envmethod
2928	global test_names
2929	global parms
2930	source ./include.tcl
2931
2932	if { $run == 1 } {
2933		puts "run_envmethod1: $method $args"
2934	}
2935
2936	set is_envmethod 1
2937	if { $run == 1 } {
2938		check_handles
2939		env_cleanup $testdir
2940		error_check_good envremove [berkdb envremove -home $testdir] 0
2941		set env [eval {berkdb_env -create -cachesize {0 10000000 0}} \
2942		    {-pagesize 512 -mode 0644 -home $testdir} $args ]
2943		error_check_good env_open [is_valid_env $env] TRUE
2944		append largs " -env $env "
2945	}
2946
2947	if { $display } {
2948		# The envmethod1 tests can't be split up, since they share
2949		# an env.
2950		puts $outfile "eval run_envmethod1 $method $args"
2951	}
2952
2953	set stat [catch {
2954		foreach test $test_names(test) {
2955			if { [info exists parms($test)] != 1 } {
2956				puts stderr "$test disabled in\
2957				    testparams.tcl; skipping."
2958				continue
2959			}
2960			if { $run } {
2961				puts $outfile "[timestamp]"
2962				eval $test $method $parms($test) $largs
2963				if { $__debug_print != 0 } {
2964					puts $outfile ""
2965				}
2966				if { $__debug_on != 0 } {
2967					debug $__debug_test
2968				}
2969			}
2970			flush stdout
2971			flush stderr
2972		}
2973	} res]
2974	if { $stat != 0} {
2975		global errorInfo;
2976
2977		set fnl [string first "\n" $errorInfo]
2978		set theError [string range $errorInfo 0 [expr $fnl - 1]]
2979		if {[string first FAIL $errorInfo] == -1} {
2980			error "FAIL:[timestamp]\
2981			    run_envmethod: $method $test: $theError"
2982		} else {
2983			error $theError;
2984		}
2985	}
2986	set stat [catch {
2987		foreach test $test_names(test) {
2988			if { [info exists parms($test)] != 1 } {
2989				puts stderr "$test disabled in\
2990				    testparams.tcl; skipping."
2991				continue
2992			}
2993			if { $run } {
2994				puts $outfile "[timestamp]"
2995				eval $test $method $parms($test) $largs
2996				if { $__debug_print != 0 } {
2997					puts $outfile ""
2998				}
2999				if { $__debug_on != 0 } {
3000					debug $__debug_test
3001				}
3002			}
3003			flush stdout
3004			flush stderr
3005		}
3006	} res]
3007	if { $stat != 0} {
3008		global errorInfo;
3009
3010		set fnl [string first "\n" $errorInfo]
3011		set theError [string range $errorInfo 0 [expr $fnl - 1]]
3012		if {[string first FAIL $errorInfo] == -1} {
3013			error "FAIL:[timestamp]\
3014			    run_envmethod1: $method $test: $theError"
3015		} else {
3016			error $theError;
3017		}
3018	}
3019	if { $run == 1 } {
3020		error_check_good envclose [$env close] 0
3021		check_handles $outfile
3022	}
3023	set is_envmethod 0
3024
3025}
3026
3027# Run the secondary index tests.
3028proc sindex { {display 0} {run 1} {outfile stdout} {verbose 0} args } {
3029	global test_names
3030	global testdir
3031	global verbose_check_secondaries
3032	set verbose_check_secondaries $verbose
3033	# Standard number of secondary indices to create if a single-element
3034	# list of methods is passed into the secondary index tests.
3035	global nsecondaries
3036	set nsecondaries 2
3037
3038	# Run basic tests with a single secondary index and a small number
3039	# of keys, then again with a larger number of keys.  (Note that
3040	# we can't go above 5000, since we use two items from our
3041	# 10K-word list for each key/data pair.)
3042	foreach n { 200 5000 } {
3043		foreach pm { btree hash recno frecno queue queueext } {
3044			foreach sm { dbtree dhash ddbtree ddhash btree hash } {
3045				foreach test $test_names(si) {
3046					if { $display } {
3047						puts -nonewline $outfile \
3048						    "eval $test {\[list\
3049						    $pm $sm $sm\]} $n ;"
3050						puts -nonewline $outfile \
3051						    " verify_dir \
3052						    $testdir \"\" 1; "
3053						puts $outfile " salvage_dir \
3054						    $testdir 1"
3055					}
3056					if { $run } {
3057			 			check_handles $outfile
3058						eval $test \
3059						    {[list $pm $sm $sm]} $n
3060						verify_dir $testdir "" 1
3061						salvage_dir $testdir 1
3062					}
3063				}
3064			}
3065		}
3066	}
3067
3068	# Run tests with 20 secondaries.
3069	foreach pm { btree hash } {
3070		set methlist [list $pm]
3071		for { set j 1 } { $j <= 20 } {incr j} {
3072			# XXX this should incorporate hash after #3726
3073			if { $j % 2 == 0 } {
3074				lappend methlist "dbtree"
3075			} else {
3076				lappend methlist "ddbtree"
3077			}
3078		}
3079		foreach test $test_names(si) {
3080			if { $display } {
3081				puts "eval $test {\[list $methlist\]} 500"
3082			}
3083			if { $run } {
3084				eval $test {$methlist} 500
3085			}
3086		}
3087	}
3088}
3089
3090# Run secondary index join test.  (There's no point in running
3091# this with both lengths, the primary is unhappy for now with fixed-
3092# length records (XXX), and we need unsorted dups in the secondaries.)
3093proc sijoin { {display 0} {run 1} {outfile stdout} } {
3094	foreach pm { btree hash recno } {
3095		if { $display } {
3096			foreach sm { btree hash } {
3097				puts $outfile "eval sijointest\
3098				    {\[list $pm $sm $sm\]} 1000"
3099			}
3100			puts $outfile "eval sijointest\
3101			    {\[list $pm btree hash\]} 1000"
3102			puts $outfile "eval sijointest\
3103			    {\[list $pm hash btree\]} 1000"
3104		}
3105		if { $run } {
3106			foreach sm { btree hash } {
3107				eval sijointest {[list $pm $sm $sm]} 1000
3108			}
3109			eval sijointest {[list $pm btree hash]} 1000
3110			eval sijointest {[list $pm hash btree]} 1000
3111		}
3112	}
3113}
3114
3115proc run { proc_suffix method {start 1} {stop 999} } {
3116	global test_names
3117
3118	switch -exact -- $proc_suffix {
3119		envmethod -
3120		method -
3121		recd -
3122		repmethod -
3123		reptest -
3124		secenv -
3125		secmethod {
3126			# Run_recd runs the recd tests, all others
3127			# run the "testxxx" tests.
3128			if { $proc_suffix == "recd" } {
3129				set testtype recd
3130			} else {
3131				set testtype test
3132			}
3133
3134			for { set i $start } { $i <= $stop } { incr i } {
3135				set name [format "%s%03d" $testtype $i]
3136				# If a test number is missing, silently skip
3137				# to next test; sparse numbering is allowed.
3138				if { [lsearch -exact $test_names($testtype) \
3139				    $name] == -1 } {
3140					continue
3141				}
3142				run_$proc_suffix $method $name
3143			}
3144		}
3145		default {
3146			puts "$proc_suffix is not set up with to be used with run"
3147		}
3148	}
3149}
3150
3151
3152# We want to test all of 512b, 8Kb, and 64Kb pages, but chances are one
3153# of these is the default pagesize.  We don't want to run all the AM tests
3154# twice, so figure out what the default page size is, then return the
3155# other two.
3156proc get_test_pagesizes { } {
3157	# Create an in-memory database.
3158	set db [berkdb_open -create -btree]
3159	error_check_good gtp_create [is_valid_db $db] TRUE
3160	set statret [$db stat]
3161	set pgsz 0
3162	foreach pair $statret {
3163		set fld [lindex $pair 0]
3164		if { [string compare $fld {Page size}] == 0 } {
3165			set pgsz [lindex $pair 1]
3166		}
3167	}
3168
3169	error_check_good gtp_close [$db close] 0
3170
3171	error_check_bad gtp_pgsz $pgsz 0
3172	switch $pgsz {
3173		512 { return {8192 65536} }
3174		8192 { return {512 65536} }
3175		65536 { return {512 8192} }
3176		default { return {512 8192 65536} }
3177	}
3178	error_check_good NOTREACHED 0 1
3179}
3180
3181proc run_timed_once { timedtest args } {
3182	set start [timestamp -r]
3183	set ret [catch {
3184		eval $timedtest $args
3185		flush stdout
3186		flush stderr
3187	} res]
3188	set stop [timestamp -r]
3189	if { $ret != 0 } {
3190		global errorInfo
3191
3192		set fnl [string first "\n" $errorInfo]
3193		set theError [string range $errorInfo 0 [expr $fnl - 1]]
3194		if {[string first FAIL $errorInfo] == -1} {
3195			error "FAIL:[timestamp]\
3196			    run_timed: $timedtest: $theError"
3197		} else {
3198			error $theError;
3199		}
3200	}
3201	return [expr $stop - $start]
3202}
3203
3204proc run_timed { niter timedtest args } {
3205	if { $niter < 1 } {
3206		error "run_timed: Invalid number of iterations $niter"
3207	}
3208	set sum 0
3209	set e {}
3210	for { set i 1 } { $i <= $niter } { incr i } {
3211		set elapsed [eval run_timed_once $timedtest $args]
3212		lappend e $elapsed
3213		set sum [expr $sum + $elapsed]
3214		puts "Test $timedtest run $i completed: $elapsed seconds"
3215	}
3216	if { $niter > 1 } {
3217		set avg [expr $sum / $niter]
3218		puts "Average $timedtest time: $avg"
3219		puts "Raw $timedtest data: $e"
3220	}
3221}
3222