1# -*- Mode: cperl; cperl-indent-level: 4 -*-
2
3package Test::Harness;
4
5require 5.00405;
6use Test::Harness::Straps;
7use Test::Harness::Assert;
8use Exporter;
9use Benchmark;
10use Config;
11use strict;
12
13
14use vars qw(
15    $VERSION
16    @ISA @EXPORT @EXPORT_OK
17    $Verbose $Switches $Debug
18    $verbose $switches $debug
19    $Columns
20    $Timer
21    $ML $Last_ML_Print
22    $Strap
23    $has_time_hires
24);
25
26BEGIN {
27    eval "use Time::HiRes 'time'";
28    $has_time_hires = !$@;
29}
30
31=head1 NAME
32
33Test::Harness - Run Perl standard test scripts with statistics
34
35=head1 VERSION
36
37Version 2.62
38
39=cut
40
41$VERSION = '2.62';
42
43# Backwards compatibility for exportable variable names.
44*verbose  = *Verbose;
45*switches = *Switches;
46*debug    = *Debug;
47
48$ENV{HARNESS_ACTIVE} = 1;
49$ENV{HARNESS_VERSION} = $VERSION;
50
51END {
52    # For VMS.
53    delete $ENV{HARNESS_ACTIVE};
54    delete $ENV{HARNESS_VERSION};
55}
56
57my $Files_In_Dir = $ENV{HARNESS_FILELEAK_IN_DIR};
58
59$Strap = Test::Harness::Straps->new;
60
61sub strap { return $Strap };
62
63@ISA = ('Exporter');
64@EXPORT    = qw(&runtests);
65@EXPORT_OK = qw(&execute_tests $verbose $switches);
66
67$Verbose  = $ENV{HARNESS_VERBOSE} || 0;
68$Debug    = $ENV{HARNESS_DEBUG} || 0;
69$Switches = "-w";
70$Columns  = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80;
71$Columns--;             # Some shells have trouble with a full line of text.
72$Timer    = $ENV{HARNESS_TIMER} || 0;
73
74=head1 SYNOPSIS
75
76  use Test::Harness;
77
78  runtests(@test_files);
79
80=head1 DESCRIPTION
81
82B<STOP!> If all you want to do is write a test script, consider
83using Test::Simple.  Test::Harness is the module that reads the
84output from Test::Simple, Test::More and other modules based on
85Test::Builder.  You don't need to know about Test::Harness to use
86those modules.
87
88Test::Harness runs tests and expects output from the test in a
89certain format.  That format is called TAP, the Test Anything
90Protocol.  It is defined in L<Test::Harness::TAP>.
91
92C<Test::Harness::runtests(@tests)> runs all the testscripts named
93as arguments and checks standard output for the expected strings
94in TAP format.
95
96The F<prove> utility is a thin wrapper around Test::Harness.
97
98=head2 Taint mode
99
100Test::Harness will honor the C<-T> or C<-t> in the #! line on your
101test files.  So if you begin a test with:
102
103    #!perl -T
104
105the test will be run with taint mode on.
106
107=head2 Configuration variables.
108
109These variables can be used to configure the behavior of
110Test::Harness.  They are exported on request.
111
112=over 4
113
114=item C<$Test::Harness::Verbose>
115
116The package variable C<$Test::Harness::Verbose> is exportable and can be
117used to let C<runtests()> display the standard output of the script
118without altering the behavior otherwise.  The F<prove> utility's C<-v>
119flag will set this.
120
121=item C<$Test::Harness::switches>
122
123The package variable C<$Test::Harness::switches> is exportable and can be
124used to set perl command line options used for running the test
125script(s). The default value is C<-w>. It overrides C<HARNESS_PERL_SWITCHES>.
126
127=item C<$Test::Harness::Timer>
128
129If set to true, and C<Time::HiRes> is available, print elapsed seconds
130after each test file.
131
132=back
133
134
135=head2 Failure
136
137When tests fail, analyze the summary report:
138
139  t/base..............ok
140  t/nonumbers.........ok
141  t/ok................ok
142  t/test-harness......ok
143  t/waterloo..........dubious
144          Test returned status 3 (wstat 768, 0x300)
145  DIED. FAILED tests 1, 3, 5, 7, 9, 11, 13, 15, 17, 19
146          Failed 10/20 tests, 50.00% okay
147  Failed Test  Stat Wstat Total Fail  List of Failed
148  ---------------------------------------------------------------
149  t/waterloo.t    3   768    20   10  1 3 5 7 9 11 13 15 17 19
150  Failed 1/5 test scripts, 80.00% okay. 10/44 subtests failed, 77.27% okay.
151
152Everything passed but F<t/waterloo.t>.  It failed 10 of 20 tests and
153exited with non-zero status indicating something dubious happened.
154
155The columns in the summary report mean:
156
157=over 4
158
159=item B<Failed Test>
160
161The test file which failed.
162
163=item B<Stat>
164
165If the test exited with non-zero, this is its exit status.
166
167=item B<Wstat>
168
169The wait status of the test.
170
171=item B<Total>
172
173Total number of tests expected to run.
174
175=item B<Fail>
176
177Number which failed, either from "not ok" or because they never ran.
178
179=item B<List of Failed>
180
181A list of the tests which failed.  Successive failures may be
182abbreviated (ie. 15-20 to indicate that tests 15, 16, 17, 18, 19 and
18320 failed).
184
185=back
186
187
188=head1 FUNCTIONS
189
190The following functions are available.
191
192=head2 runtests( @test_files )
193
194This runs all the given I<@test_files> and divines whether they passed
195or failed based on their output to STDOUT (details above).  It prints
196out each individual test which failed along with a summary report and
197a how long it all took.
198
199It returns true if everything was ok.  Otherwise it will C<die()> with
200one of the messages in the DIAGNOSTICS section.
201
202=cut
203
204sub runtests {
205    my(@tests) = @_;
206
207    local ($\, $,);
208
209    my ($tot, $failedtests,$todo_passed) = execute_tests(tests => \@tests);
210    print get_results($tot, $failedtests,$todo_passed);
211
212    my $ok = _all_ok($tot);
213
214    assert(($ok xor keys %$failedtests),
215           q{ok status jives with $failedtests});
216
217    if (! $ok) {
218        die("Failed $tot->{bad}/$tot->{tests} test programs. " .
219            "@{[$tot->{max} - $tot->{ok}]}/$tot->{max} subtests failed.\n");
220    }
221
222    return $ok;
223}
224
225# my $ok = _all_ok(\%tot);
226# Tells you if this test run is overall successful or not.
227
228sub _all_ok {
229    my($tot) = shift;
230
231    return $tot->{bad} == 0 && ($tot->{max} || $tot->{skipped}) ? 1 : 0;
232}
233
234# Returns all the files in a directory.  This is shorthand for backwards
235# compatibility on systems where C<glob()> doesn't work right.
236
237sub _globdir {
238    local *DIRH;
239
240    opendir DIRH, shift;
241    my @f = readdir DIRH;
242    closedir DIRH;
243
244    return @f;
245}
246
247=head2 execute_tests( tests => \@test_files, out => \*FH )
248
249Runs all the given C<@test_files> (just like C<runtests()>) but
250doesn't generate the final report.  During testing, progress
251information will be written to the currently selected output
252filehandle (usually C<STDOUT>), or to the filehandle given by the
253C<out> parameter.  The I<out> is optional.
254
255Returns a list of two values, C<$total> and C<$failed>, describing the
256results.  C<$total> is a hash ref summary of all the tests run.  Its
257keys and values are this:
258
259    bonus           Number of individual todo tests unexpectedly passed
260    max             Number of individual tests ran
261    ok              Number of individual tests passed
262    sub_skipped     Number of individual tests skipped
263    todo            Number of individual todo tests
264
265    files           Number of test files ran
266    good            Number of test files passed
267    bad             Number of test files failed
268    tests           Number of test files originally given
269    skipped         Number of test files skipped
270
271If C<< $total->{bad} == 0 >> and C<< $total->{max} > 0 >>, you've
272got a successful test.
273
274C<$failed> is a hash ref of all the test scripts that failed.  Each key
275is the name of a test script, each value is another hash representing
276how that script failed.  Its keys are these:
277
278    name        Name of the test which failed
279    estat       Script's exit value
280    wstat       Script's wait status
281    max         Number of individual tests
282    failed      Number which failed
283    canon       List of tests which failed (as string).
284
285C<$failed> should be empty if everything passed.
286
287=cut
288
289sub execute_tests {
290    my %args = @_;
291    my @tests = @{$args{tests}};
292    my $out = $args{out} || select();
293
294    # We allow filehandles that are symbolic refs
295    no strict 'refs';
296    _autoflush($out);
297    _autoflush(\*STDERR);
298
299    my %failedtests;
300    my %todo_passed;
301
302    # Test-wide totals.
303    my(%tot) = (
304                bonus    => 0,
305                max      => 0,
306                ok       => 0,
307                files    => 0,
308                bad      => 0,
309                good     => 0,
310                tests    => scalar @tests,
311                sub_skipped  => 0,
312                todo     => 0,
313                skipped  => 0,
314                bench    => 0,
315               );
316
317    my @dir_files;
318    @dir_files = _globdir $Files_In_Dir if defined $Files_In_Dir;
319    my $run_start_time = Benchmark->new;
320
321    my $width = _leader_width(@tests);
322    foreach my $tfile (@tests) {
323        $Last_ML_Print = 0;  # so each test prints at least once
324        my($leader, $ml) = _mk_leader($tfile, $width);
325        local $ML = $ml;
326
327        print $out $leader;
328
329        $tot{files}++;
330
331        $Strap->{_seen_header} = 0;
332        if ( $Test::Harness::Debug ) {
333            print $out "# Running: ", $Strap->_command_line($tfile), "\n";
334        }
335        my $test_start_time = $Timer ? time : 0;
336        my %results = $Strap->analyze_file($tfile) or
337          do { warn $Strap->{error}, "\n";  next };
338        my $elapsed;
339        if ( $Timer ) {
340            $elapsed = time - $test_start_time;
341            if ( $has_time_hires ) {
342                $elapsed = sprintf( " %8d ms", $elapsed*1000 );
343            }
344            else {
345                $elapsed = sprintf( " %8s s", $elapsed ? $elapsed : "<1" );
346            }
347        }
348        else {
349            $elapsed = "";
350        }
351
352        # state of the current test.
353        my @failed = grep { !$results{details}[$_-1]{ok} }
354                     1..@{$results{details}};
355        my @todo_pass = grep { $results{details}[$_-1]{actual_ok} &&
356                               $results{details}[$_-1]{type} eq 'todo' }
357                        1..@{$results{details}};
358
359        my %test = (
360                    ok          => $results{ok},
361                    'next'      => $Strap->{'next'},
362                    max         => $results{max},
363                    failed      => \@failed,
364                    todo_pass   => \@todo_pass,
365                    todo        => $results{todo},
366                    bonus       => $results{bonus},
367                    skipped     => $results{skip},
368                    skip_reason => $results{skip_reason},
369                    skip_all    => $Strap->{skip_all},
370                    ml          => $ml,
371                   );
372
373        $tot{bonus}       += $results{bonus};
374        $tot{max}         += $results{max};
375        $tot{ok}          += $results{ok};
376        $tot{todo}        += $results{todo};
377        $tot{sub_skipped} += $results{skip};
378
379        my($estatus, $wstatus) = @results{qw(exit wait)};
380
381        if ($results{passing}) {
382            # XXX Combine these first two
383            if ($test{max} and $test{skipped} + $test{bonus}) {
384                my @msg;
385                push(@msg, "$test{skipped}/$test{max} skipped: $test{skip_reason}")
386                    if $test{skipped};
387                if ($test{bonus}) {
388                    my ($txt, $canon) = _canondetail($test{todo},0,'TODO passed',
389                                                    @{$test{todo_pass}});
390                    $todo_passed{$tfile} = {
391                        canon   => $canon,
392                        max     => $test{todo},
393                        failed  => $test{bonus},
394                        name    => $tfile,
395                        estat   => '',
396                        wstat   => '',
397                    };
398
399                    push(@msg, "$test{bonus}/$test{max} unexpectedly succeeded\n$txt");
400                }
401                print $out "$test{ml}ok$elapsed\n        ".join(', ', @msg)."\n";
402            }
403            elsif ( $test{max} ) {
404                print $out "$test{ml}ok$elapsed\n";
405            }
406            elsif ( defined $test{skip_all} and length $test{skip_all} ) {
407                print $out "skipped\n        all skipped: $test{skip_all}\n";
408                $tot{skipped}++;
409            }
410            else {
411                print $out "skipped\n        all skipped: no reason given\n";
412                $tot{skipped}++;
413            }
414            $tot{good}++;
415        }
416        else {
417            # List unrun tests as failures.
418            if ($test{'next'} <= $test{max}) {
419                push @{$test{failed}}, $test{'next'}..$test{max};
420            }
421            # List overruns as failures.
422            else {
423                my $details = $results{details};
424                foreach my $overrun ($test{max}+1..@$details) {
425                    next unless ref $details->[$overrun-1];
426                    push @{$test{failed}}, $overrun
427                }
428            }
429
430            if ($wstatus) {
431                $failedtests{$tfile} = _dubious_return(\%test, \%tot,
432                                                       $estatus, $wstatus);
433                $failedtests{$tfile}{name} = $tfile;
434            }
435            elsif($results{seen}) {
436                if (@{$test{failed}} and $test{max}) {
437                    my ($txt, $canon) = _canondetail($test{max},$test{skipped},'Failed',
438                                                    @{$test{failed}});
439                    print $out "$test{ml}$txt";
440                    $failedtests{$tfile} = { canon   => $canon,
441                                             max     => $test{max},
442                                             failed  => scalar @{$test{failed}},
443                                             name    => $tfile,
444                                             estat   => '',
445                                             wstat   => '',
446                                           };
447                }
448                else {
449                    print $out "Don't know which tests failed: got $test{ok} ok, ".
450                          "expected $test{max}\n";
451                    $failedtests{$tfile} = { canon   => '??',
452                                             max     => $test{max},
453                                             failed  => '??',
454                                             name    => $tfile,
455                                             estat   => '',
456                                             wstat   => '',
457                                           };
458                }
459                $tot{bad}++;
460            }
461            else {
462                print $out "FAILED before any test output arrived\n";
463                $tot{bad}++;
464                $failedtests{$tfile} = { canon       => '??',
465                                         max         => '??',
466                                         failed      => '??',
467                                         name        => $tfile,
468                                         estat       => '',
469                                         wstat       => '',
470                                       };
471            }
472        }
473
474        if (defined $Files_In_Dir) {
475            my @new_dir_files = _globdir $Files_In_Dir;
476            if (@new_dir_files != @dir_files) {
477                my %f;
478                @f{@new_dir_files} = (1) x @new_dir_files;
479                delete @f{@dir_files};
480                my @f = sort keys %f;
481                print $out "LEAKED FILES: @f\n";
482                @dir_files = @new_dir_files;
483            }
484        }
485    } # foreach test
486    $tot{bench} = timediff(Benchmark->new, $run_start_time);
487
488    $Strap->_restore_PERL5LIB;
489
490    return(\%tot, \%failedtests, \%todo_passed);
491}
492
493# Turns on autoflush for the handle passed
494sub _autoflush {
495    my $flushy_fh = shift;
496    my $old_fh = select $flushy_fh;
497    $| = 1;
498    select $old_fh;
499}
500
501=for private _mk_leader
502
503    my($leader, $ml) = _mk_leader($test_file, $width);
504
505Generates the 't/foo........' leader for the given C<$test_file> as well
506as a similar version which will overwrite the current line (by use of
507\r and such).  C<$ml> may be empty if Test::Harness doesn't think you're
508on TTY.
509
510The C<$width> is the width of the "yada/blah.." string.
511
512=cut
513
514sub _mk_leader {
515    my($te, $width) = @_;
516    chomp($te);
517    $te =~ s/\.\w+$/./;
518
519    if ($^O eq 'VMS') {
520        $te =~ s/^.*\.t\./\[.t./s;
521    }
522    my $leader = "$te" . '.' x ($width - length($te));
523    my $ml = "";
524
525    if ( -t STDOUT and not $ENV{HARNESS_NOTTY} and not $Verbose ) {
526        $ml = "\r" . (' ' x 77) . "\r$leader"
527    }
528
529    return($leader, $ml);
530}
531
532=for private _leader_width
533
534  my($width) = _leader_width(@test_files);
535
536Calculates how wide the leader should be based on the length of the
537longest test name.
538
539=cut
540
541sub _leader_width {
542    my $maxlen = 0;
543    my $maxsuflen = 0;
544    foreach (@_) {
545        my $suf    = /\.(\w+)$/ ? $1 : '';
546        my $len    = length;
547        my $suflen = length $suf;
548        $maxlen    = $len    if $len    > $maxlen;
549        $maxsuflen = $suflen if $suflen > $maxsuflen;
550    }
551    # + 3 : we want three dots between the test name and the "ok"
552    return $maxlen + 3 - $maxsuflen;
553}
554
555sub get_results {
556    my $tot = shift;
557    my $failedtests = shift;
558    my $todo_passed = shift;
559
560    my $out = '';
561
562    my $bonusmsg = _bonusmsg($tot);
563
564    if (_all_ok($tot)) {
565        $out .= "All tests successful$bonusmsg.\n";
566        if ($tot->{bonus}) {
567            my($fmt_top, $fmt) = _create_fmts("Passed TODO",$todo_passed);
568            # Now write to formats
569            $out .= swrite( $fmt_top );
570            for my $script (sort keys %{$todo_passed||{}}) {
571                my $Curtest = $todo_passed->{$script};
572                $out .= swrite( $fmt, @{ $Curtest }{qw(name estat wstat max failed canon)} );
573            }
574        }
575    }
576    elsif (!$tot->{tests}){
577        die "FAILED--no tests were run for some reason.\n";
578    }
579    elsif (!$tot->{max}) {
580        my $blurb = $tot->{tests}==1 ? "script" : "scripts";
581        die "FAILED--$tot->{tests} test $blurb could be run, ".
582            "alas--no output ever seen\n";
583    }
584    else {
585        my $subresults = sprintf( " %d/%d subtests failed.",
586                              $tot->{max} - $tot->{ok}, $tot->{max} );
587
588        my($fmt_top, $fmt1, $fmt2) = _create_fmts("Failed Test",$failedtests);
589
590        # Now write to formats
591        $out .= swrite( $fmt_top );
592        for my $script (sort keys %$failedtests) {
593            my $Curtest = $failedtests->{$script};
594            $out .= swrite( $fmt1, @{ $Curtest }{qw(name estat wstat max failed canon)} );
595            $out .= swrite( $fmt2, $Curtest->{canon} );
596        }
597        if ($tot->{bad}) {
598            $bonusmsg =~ s/^,\s*//;
599            $out .= "$bonusmsg.\n" if $bonusmsg;
600            $out .= "Failed $tot->{bad}/$tot->{tests} test scripts.$subresults\n";
601        }
602    }
603
604    $out .= sprintf("Files=%d, Tests=%d, %s\n",
605           $tot->{files}, $tot->{max}, timestr($tot->{bench}, 'nop'));
606    return $out;
607}
608
609sub swrite {
610    my $format = shift;
611    $^A = '';
612    formline($format,@_);
613    my $out = $^A;
614    $^A = '';
615    return $out;
616}
617
618
619my %Handlers = (
620    header => \&header_handler,
621    test => \&test_handler,
622    bailout => \&bailout_handler,
623);
624
625$Strap->{callback} = \&strap_callback;
626sub strap_callback {
627    my($self, $line, $type, $totals) = @_;
628    print $line if $Verbose;
629
630    my $meth = $Handlers{$type};
631    $meth->($self, $line, $type, $totals) if $meth;
632};
633
634
635sub header_handler {
636    my($self, $line, $type, $totals) = @_;
637
638    warn "Test header seen more than once!\n" if $self->{_seen_header};
639
640    $self->{_seen_header}++;
641
642    warn "1..M can only appear at the beginning or end of tests\n"
643      if $totals->{seen} &&
644         $totals->{max}  < $totals->{seen};
645};
646
647sub test_handler {
648    my($self, $line, $type, $totals) = @_;
649
650    my $curr = $totals->{seen};
651    my $next = $self->{'next'};
652    my $max  = $totals->{max};
653    my $detail = $totals->{details}[-1];
654
655    if( $detail->{ok} ) {
656        _print_ml_less("ok $curr/$max");
657
658        if( $detail->{type} eq 'skip' ) {
659            $totals->{skip_reason} = $detail->{reason}
660              unless defined $totals->{skip_reason};
661            $totals->{skip_reason} = 'various reasons'
662              if $totals->{skip_reason} ne $detail->{reason};
663        }
664    }
665    else {
666        _print_ml("NOK $curr");
667    }
668
669    if( $curr > $next ) {
670        print "Test output counter mismatch [test $curr]\n";
671    }
672    elsif( $curr < $next ) {
673        print "Confused test output: test $curr answered after ".
674              "test ", $next - 1, "\n";
675    }
676
677};
678
679sub bailout_handler {
680    my($self, $line, $type, $totals) = @_;
681
682    die "FAILED--Further testing stopped" .
683      ($self->{bailout_reason} ? ": $self->{bailout_reason}\n" : ".\n");
684};
685
686
687sub _print_ml {
688    print join '', $ML, @_ if $ML;
689}
690
691
692# Print updates only once per second.
693sub _print_ml_less {
694    my $now = CORE::time;
695    if ( $Last_ML_Print != $now ) {
696        _print_ml(@_);
697        $Last_ML_Print = $now;
698    }
699}
700
701sub _bonusmsg {
702    my($tot) = @_;
703
704    my $bonusmsg = '';
705    $bonusmsg = (" ($tot->{bonus} subtest".($tot->{bonus} > 1 ? 's' : '').
706               " UNEXPECTEDLY SUCCEEDED)")
707        if $tot->{bonus};
708
709    if ($tot->{skipped}) {
710        $bonusmsg .= ", $tot->{skipped} test"
711                     . ($tot->{skipped} != 1 ? 's' : '');
712        if ($tot->{sub_skipped}) {
713            $bonusmsg .= " and $tot->{sub_skipped} subtest"
714                         . ($tot->{sub_skipped} != 1 ? 's' : '');
715        }
716        $bonusmsg .= ' skipped';
717    }
718    elsif ($tot->{sub_skipped}) {
719        $bonusmsg .= ", $tot->{sub_skipped} subtest"
720                     . ($tot->{sub_skipped} != 1 ? 's' : '')
721                     . " skipped";
722    }
723    return $bonusmsg;
724}
725
726# Test program go boom.
727sub _dubious_return {
728    my($test, $tot, $estatus, $wstatus) = @_;
729
730    my $failed = '??';
731    my $canon  = '??';
732
733    printf "$test->{ml}dubious\n\tTest returned status $estatus ".
734           "(wstat %d, 0x%x)\n",
735           $wstatus,$wstatus;
736    print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
737
738    $tot->{bad}++;
739
740    if ($test->{max}) {
741        if ($test->{'next'} == $test->{max} + 1 and not @{$test->{failed}}) {
742            print "\tafter all the subtests completed successfully\n";
743            $failed = 0;        # But we do not set $canon!
744        }
745        else {
746            push @{$test->{failed}}, $test->{'next'}..$test->{max};
747            $failed = @{$test->{failed}};
748            (my $txt, $canon) = _canondetail($test->{max},$test->{skipped},'Failed',@{$test->{failed}});
749            print "DIED. ",$txt;
750        }
751    }
752
753    return { canon => $canon,  max => $test->{max} || '??',
754             failed => $failed,
755             estat => $estatus, wstat => $wstatus,
756           };
757}
758
759
760sub _create_fmts {
761    my $failed_str = shift;
762    my $failedtests = shift;
763
764    my ($type) = split /\s/,$failed_str;
765    my $short = substr($type,0,4);
766    my $total = $short eq 'Pass' ? 'TODOs' : 'Total';
767    my $middle_str = " Stat Wstat $total $short  ";
768    my $list_str = "List of $type";
769
770    # Figure out our longest name string for formatting purposes.
771    my $max_namelen = length($failed_str);
772    foreach my $script (keys %$failedtests) {
773        my $namelen = length $failedtests->{$script}->{name};
774        $max_namelen = $namelen if $namelen > $max_namelen;
775    }
776
777    my $list_len = $Columns - length($middle_str) - $max_namelen;
778    if ($list_len < length($list_str)) {
779        $list_len = length($list_str);
780        $max_namelen = $Columns - length($middle_str) - $list_len;
781        if ($max_namelen < length($failed_str)) {
782            $max_namelen = length($failed_str);
783            $Columns = $max_namelen + length($middle_str) + $list_len;
784        }
785    }
786
787    my $fmt_top =   sprintf("%-${max_namelen}s", $failed_str)
788                  . $middle_str
789                  . $list_str . "\n"
790                  . "-" x $Columns
791                  . "\n";
792
793    my $fmt1 =  "@" . "<" x ($max_namelen - 1)
794              . "  @>> @>>>> @>>>> @>>>  "
795              . "^" . "<" x ($list_len - 1) . "\n";
796    my $fmt2 =  "~~" . " " x ($Columns - $list_len - 2) . "^"
797              . "<" x ($list_len - 1) . "\n";
798
799    return($fmt_top, $fmt1, $fmt2);
800}
801
802sub _canondetail {
803    my $max = shift;
804    my $skipped = shift;
805    my $type = shift;
806    my @detail = @_;
807    my %seen;
808    @detail = sort {$a <=> $b} grep !$seen{$_}++, @detail;
809    my $detail = @detail;
810    my @result = ();
811    my @canon = ();
812    my $min;
813    my $last = $min = shift @detail;
814    my $canon;
815    my $uc_type = uc($type);
816    if (@detail) {
817        for (@detail, $detail[-1]) { # don't forget the last one
818            if ($_ > $last+1 || $_ == $last) {
819                push @canon, ($min == $last) ? $last : "$min-$last";
820                $min = $_;
821            }
822            $last = $_;
823        }
824        local $" = ", ";
825        push @result, "$uc_type tests @canon\n";
826        $canon = join ' ', @canon;
827    }
828    else {
829        push @result, "$uc_type test $last\n";
830        $canon = $last;
831    }
832
833    return (join("", @result), $canon)
834        if $type=~/todo/i;
835    push @result, "\t$type $detail/$max tests, ";
836    if ($max) {
837	push @result, sprintf("%.2f",100*(1-$detail/$max)), "% okay";
838    }
839    else {
840	push @result, "?% okay";
841    }
842    my $ender = 's' x ($skipped > 1);
843    if ($skipped) {
844        my $good = $max - $detail - $skipped;
845	my $skipmsg = " (less $skipped skipped test$ender: $good okay, ";
846	if ($max) {
847	    my $goodper = sprintf("%.2f",100*($good/$max));
848	    $skipmsg .= "$goodper%)";
849        }
850        else {
851	    $skipmsg .= "?%)";
852	}
853	push @result, $skipmsg;
854    }
855    push @result, "\n";
856    my $txt = join "", @result;
857    return ($txt, $canon);
858}
859
8601;
861__END__
862
863
864=head1 EXPORT
865
866C<&runtests> is exported by Test::Harness by default.
867
868C<&execute_tests>, C<$verbose>, C<$switches> and C<$debug> are
869exported upon request.
870
871=head1 DIAGNOSTICS
872
873=over 4
874
875=item C<All tests successful.\nFiles=%d,  Tests=%d, %s>
876
877If all tests are successful some statistics about the performance are
878printed.
879
880=item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
881
882For any single script that has failing subtests statistics like the
883above are printed.
884
885=item C<Test returned status %d (wstat %d)>
886
887Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8>
888and C<$?> are printed in a message similar to the above.
889
890=item C<Failed 1 test, %.2f%% okay. %s>
891
892=item C<Failed %d/%d tests, %.2f%% okay. %s>
893
894If not all tests were successful, the script dies with one of the
895above messages.
896
897=item C<FAILED--Further testing stopped: %s>
898
899If a single subtest decides that further testing will not make sense,
900the script dies with this message.
901
902=back
903
904=head1 ENVIRONMENT VARIABLES THAT TEST::HARNESS SETS
905
906Test::Harness sets these before executing the individual tests.
907
908=over 4
909
910=item C<HARNESS_ACTIVE>
911
912This is set to a true value.  It allows the tests to determine if they
913are being executed through the harness or by any other means.
914
915=item C<HARNESS_VERSION>
916
917This is the version of Test::Harness.
918
919=back
920
921=head1 ENVIRONMENT VARIABLES THAT AFFECT TEST::HARNESS
922
923=over 4
924
925=item C<HARNESS_COLUMNS>
926
927This value will be used for the width of the terminal. If it is not
928set then it will default to C<COLUMNS>. If this is not set, it will
929default to 80. Note that users of Bourne-sh based shells will need to
930C<export COLUMNS> for this module to use that variable.
931
932=item C<HARNESS_COMPILE_TEST>
933
934When true it will make harness attempt to compile the test using
935C<perlcc> before running it.
936
937B<NOTE> This currently only works when sitting in the perl source
938directory!
939
940=item C<HARNESS_DEBUG>
941
942If true, Test::Harness will print debugging information about itself as
943it runs the tests.  This is different from C<HARNESS_VERBOSE>, which prints
944the output from the test being run.  Setting C<$Test::Harness::Debug> will
945override this, or you can use the C<-d> switch in the F<prove> utility.
946
947=item C<HARNESS_FILELEAK_IN_DIR>
948
949When set to the name of a directory, harness will check after each
950test whether new files appeared in that directory, and report them as
951
952  LEAKED FILES: scr.tmp 0 my.db
953
954If relative, directory name is with respect to the current directory at
955the moment runtests() was called.  Putting absolute path into
956C<HARNESS_FILELEAK_IN_DIR> may give more predictable results.
957
958=item C<HARNESS_NOTTY>
959
960When set to a true value, forces it to behave as though STDOUT were
961not a console.  You may need to set this if you don't want harness to
962output more frequent progress messages using carriage returns.  Some
963consoles may not handle carriage returns properly (which results in a
964somewhat messy output).
965
966=item C<HARNESS_PERL>
967
968Usually your tests will be run by C<$^X>, the currently-executing Perl.
969However, you may want to have it run by a different executable, such as
970a threading perl, or a different version.
971
972If you're using the F<prove> utility, you can use the C<--perl> switch.
973
974=item C<HARNESS_PERL_SWITCHES>
975
976Its value will be prepended to the switches used to invoke perl on
977each test.  For example, setting C<HARNESS_PERL_SWITCHES> to C<-W> will
978run all tests with all warnings enabled.
979
980=item C<HARNESS_TIMER>
981
982Setting this to true will make the harness display the number of
983milliseconds each test took.  You can also use F<prove>'s C<--timer>
984switch.
985
986=item C<HARNESS_VERBOSE>
987
988If true, Test::Harness will output the verbose results of running
989its tests.  Setting C<$Test::Harness::verbose> will override this,
990or you can use the C<-v> switch in the F<prove> utility.
991
992=back
993
994=head1 EXAMPLE
995
996Here's how Test::Harness tests itself
997
998  $ cd ~/src/devel/Test-Harness
999  $ perl -Mblib -e 'use Test::Harness qw(&runtests $verbose);
1000    $verbose=0; runtests @ARGV;' t/*.t
1001  Using /home/schwern/src/devel/Test-Harness/blib
1002  t/base..............ok
1003  t/nonumbers.........ok
1004  t/ok................ok
1005  t/test-harness......ok
1006  All tests successful.
1007  Files=4, Tests=24, 2 wallclock secs ( 0.61 cusr + 0.41 csys = 1.02 CPU)
1008
1009=head1 SEE ALSO
1010
1011The included F<prove> utility for running test scripts from the command line,
1012L<Test> and L<Test::Simple> for writing test scripts, L<Benchmark> for
1013the underlying timing routines, and L<Devel::Cover> for test coverage
1014analysis.
1015
1016=head1 TODO
1017
1018Provide a way of running tests quietly (ie. no printing) for automated
1019validation of tests.  This will probably take the form of a version
1020of runtests() which rather than printing its output returns raw data
1021on the state of the tests.  (Partially done in Test::Harness::Straps)
1022
1023Document the format.
1024
1025Fix HARNESS_COMPILE_TEST without breaking its core usage.
1026
1027Figure a way to report test names in the failure summary.
1028
1029Rework the test summary so long test names are not truncated as badly.
1030(Partially done with new skip test styles)
1031
1032Add option for coverage analysis.
1033
1034Trap STDERR.
1035
1036Implement Straps total_results()
1037
1038Remember exit code
1039
1040Completely redo the print summary code.
1041
1042Implement Straps callbacks.  (experimentally implemented)
1043
1044Straps->analyze_file() not taint clean, don't know if it can be
1045
1046Fix that damned VMS nit.
1047
1048Add a test for verbose.
1049
1050Change internal list of test results to a hash.
1051
1052Fix stats display when there's an overrun.
1053
1054Fix so perls with spaces in the filename work.
1055
1056Keeping whittling away at _run_all_tests()
1057
1058Clean up how the summary is printed.  Get rid of those damned formats.
1059
1060=head1 BUGS
1061
1062Please report any bugs or feature requests to
1063C<bug-test-harness at rt.cpan.org>, or through the web interface at
1064L<http://rt.cpan.org/NoAuth/ReportBug.html?Queue=Test-Harness>.
1065I will be notified, and then you'll automatically be notified of progress on
1066your bug as I make changes.
1067
1068=head1 SUPPORT
1069
1070You can find documentation for this module with the F<perldoc> command.
1071
1072    perldoc Test::Harness
1073
1074You can get docs for F<prove> with
1075
1076    prove --man
1077
1078You can also look for information at:
1079
1080=over 4
1081
1082=item * AnnoCPAN: Annotated CPAN documentation
1083
1084L<http://annocpan.org/dist/Test-Harness>
1085
1086=item * CPAN Ratings
1087
1088L<http://cpanratings.perl.org/d/Test-Harness>
1089
1090=item * RT: CPAN's request tracker
1091
1092L<http://rt.cpan.org/NoAuth/Bugs.html?Dist=Test-Harness>
1093
1094=item * Search CPAN
1095
1096L<http://search.cpan.org/dist/Test-Harness>
1097
1098=back
1099
1100=head1 SOURCE CODE
1101
1102The source code repository for Test::Harness is at
1103L<http://svn.perl.org/modules/Test-Harness>.
1104
1105=head1 AUTHORS
1106
1107Either Tim Bunce or Andreas Koenig, we don't know. What we know for
1108sure is, that it was inspired by Larry Wall's F<TEST> script that came
1109with perl distributions for ages. Numerous anonymous contributors
1110exist.  Andreas Koenig held the torch for many years, and then
1111Michael G Schwern.
1112
1113Current maintainer is Andy Lester C<< <andy at petdance.com> >>.
1114
1115=head1 COPYRIGHT
1116
1117Copyright 2002-2006
1118by Michael G Schwern C<< <schwern at pobox.com> >>,
1119Andy Lester C<< <andy at petdance.com> >>.
1120
1121This program is free software; you can redistribute it and/or
1122modify it under the same terms as Perl itself.
1123
1124See L<http://www.perl.com/perl/misc/Artistic.html>.
1125
1126=cut
1127