1# Copyright 2005 Dave Abrahams
2# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus
3# Copyright 2014-2015 Rene Rivera
4# Copyright 2014 Microsoft Corporation
5# Distributed under the Boost Software License, Version 1.0.
6# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
7
8# This module implements regression testing framework. It declares a number of
9# main target rules which perform some action and, if the results are OK,
10# creates an output file.
11#
12# The exact list of rules is:
13# 'compile'       -- creates .test file if compilation of sources was
14#                    successful.
15# 'compile-fail'  -- creates .test file if compilation of sources failed.
16# 'run'           -- creates .test file is running of executable produced from
17#                    sources was successful. Also leaves behind .output file
18#                    with the output from program run.
19# 'run-fail'      -- same as above, but .test file is created if running fails.
20#
21# In all cases, presence of .test file is an indication that the test passed.
22# For more convenient reporting, you might want to use C++ Boost regression
23# testing utilities (see http://www.boost.org/more/regression.html).
24#
25# For historical reason, a 'unit-test' rule is available which has the same
26# syntax as 'exe' and behaves just like 'run'.
27
28# Things to do:
29#  - Teach compiler_status handle Jamfile.v2.
30# Notes:
31#  - <no-warn> is not implemented, since it is Como-specific, and it is not
32#    clear how to implement it
33#  - std::locale-support is not implemented (it is used in one test).
34
35
36import alias ;
37import build-system ;
38import "class" ;
39import common ;
40import errors ;
41import feature ;
42import generators ;
43import os ;
44import param ;
45import path ;
46import project ;
47import property ;
48import property-set ;
49import regex ;
50import sequence ;
51import targets ;
52import toolset ;
53import type ;
54import virtual-target ;
55
56
57rule init ( )
58{
59}
60
61
62# Feature controlling the command used to launch test programs.
63feature.feature testing.launcher   : : free optional ;
64
65feature.feature test-info          : : free incidental ;
66feature.feature testing.arg        : : free incidental ;
67feature.feature testing.input-file : : free dependency ;
68
69feature.feature preserve-test-targets : on off : incidental propagated ;
70
71# Feature to control whether executable binaries are run as part of test.
72# This can be used to just compile test cases in cross compilation situations.
73feature.feature testing.execute : on off : incidental propagated ;
74feature.set-default testing.execute : on ;
75
76# Register target types.
77type.register TEST         : test          ;
78type.register COMPILE      :        : TEST ;
79type.register COMPILE_FAIL :        : TEST ;
80type.register RUN_OUTPUT   : run           ;
81type.register RUN          :        : TEST ;
82type.register RUN_FAIL     :        : TEST ;
83type.register LINK_FAIL    :        : TEST ;
84type.register LINK         :        : TEST ;
85type.register UNIT_TEST    : passed : TEST ;
86
87
88# Suffix to denote test target directory
89#
90.TEST-DIR-SUFFIX = ".test" ;
91if [ os.name ] = VMS
92{
93    .TEST-DIR-SUFFIX = "$test" ;
94}
95
96# Declare the rules which create main targets. While the 'type' module already
97# creates rules with the same names for us, we need extra convenience: default
98# name of main target, so write our own versions.
99
100# Helper rule. Create a test target, using basename of first source if no target
101# name is explicitly passed. Remembers the created target in a global variable.
102#
103rule make-test ( target-type : sources + : requirements * : target-name ? )
104{
105    target-name ?= $(sources[1]:D=:S=) ;
106
107    # Having periods (".") in the target name is problematic because the typed
108    # generator will strip the suffix and use the bare name for the file
109    # targets. Even though the location-prefix averts problems most times it
110    # does not prevent ambiguity issues when referring to the test targets. For
111    # example when using the XML log output. So we rename the target to remove
112    # the periods, and provide an alias for users.
113    local real-name = [ regex.replace $(target-name) "[.]" "~" ] ;
114
115    local project = [ project.current ] ;
116    # The <location-prefix> forces the build system for generate paths in the
117    # form '$build_dir/array1$(.TEST-DIR-SUFFIX)/gcc/debug'. This is necessary
118    # to allow post-processing tools to work.
119    local t = [ targets.create-typed-target [ type.type-from-rule-name
120        $(target-type) ] : $(project) : $(real-name) : $(sources) :
121        $(requirements) <location-prefix>$(real-name)$(.TEST-DIR-SUFFIX)
122        <relevant>toolset ] ;
123
124    # The alias to the real target, per period replacement above.
125    if $(real-name) != $(target-name)
126    {
127        alias $(target-name) : $(t) ;
128    }
129
130    # Remember the test (for --dump-tests). A good way would be to collect all
131    # given a project. This has some technical problems: e.g. we can not call
132    # this dump from a Jamfile since projects referred by 'build-project' are
133    # not available until the whole Jamfile has been loaded.
134    .all-tests += $(t) ;
135    return $(t) ;
136}
137
138
139# Note: passing more that one cpp file here is known to fail. Passing a cpp file
140# and a library target works.
141#
142rule compile ( sources + : requirements * : target-name ? )
143{
144    param.handle-named-params sources requirements target-name ;
145    return [ make-test compile : $(sources) : $(requirements) : $(target-name) ]
146        ;
147}
148
149
150rule compile-fail ( sources + : requirements * : target-name ? )
151{
152    param.handle-named-params sources requirements target-name ;
153    return [ make-test compile-fail : $(sources) : $(requirements) :
154        $(target-name) ] ;
155}
156
157
158rule link ( sources + : requirements * : target-name ? )
159{
160    param.handle-named-params sources requirements target-name ;
161    return [ make-test link : $(sources) : $(requirements) : $(target-name) ] ;
162}
163
164
165rule link-fail ( sources + : requirements * : target-name ? )
166{
167    param.handle-named-params sources requirements target-name ;
168    return [ make-test link-fail : $(sources) : $(requirements) : $(target-name)
169        ] ;
170}
171
172
173rule handle-input-files ( input-files * )
174{
175    if $(input-files[2])
176    {
177        # Check that sorting made when creating property-set instance will not
178        # change the ordering.
179        if [ sequence.insertion-sort $(input-files) ] != $(input-files)
180        {
181            errors.user-error "Names of input files must be sorted alphabetically"
182                : "due to internal limitations" ;
183        }
184    }
185    return <testing.input-file>$(input-files) ;
186}
187
188
189rule run ( sources + : args * : input-files * : requirements * : target-name ? :
190    default-build * )
191{
192    param.handle-named-params sources args input-files requirements
193        target-name default-build ;
194    requirements += <testing.arg>$(args:J=" ") ;
195    requirements += [ handle-input-files $(input-files) ] ;
196    return [ make-test run : $(sources) : $(requirements) : $(target-name) ] ;
197}
198
199
200rule run-fail ( sources + : args * : input-files * : requirements * :
201    target-name ? : default-build * )
202{
203    param.handle-named-params sources args input-files requirements
204        target-name default-build ;
205    requirements += <testing.arg>$(args:J=" ") ;
206    requirements += [ handle-input-files $(input-files) ] ;
207    return [ make-test run-fail : $(sources) : $(requirements) : $(target-name)
208        ] ;
209}
210
211
212# Use 'test-suite' as a synonym for 'alias', for backward compatibility.
213IMPORT : alias : : test-suite ;
214
215
216# For all main targets in 'project-module', which are typed targets with type
217# derived from 'TEST', produce some interesting information.
218#
219rule dump-tests
220{
221    for local t in $(.all-tests)
222    {
223        dump-test $(t) ;
224    }
225}
226
227if ( --dump-tests in [ modules.peek : ARGV ] )
228{
229    IMPORT testing : dump-tests : : testing.dump-tests ;
230    build-system.add-pre-build-hook testing.dump-tests ;
231}
232
233# Given a project location in normalized form (slashes are forward), compute the
234# name of the Boost library.
235#
236local rule get-library-name ( path )
237{
238    # Path is in normalized form, so all slashes are forward.
239    local match1 = [ MATCH /(tools|libs)/(.*)/(test|example) : $(path) ] ;
240    local match2 = [ MATCH /(tools|libs)/(.*)$ : $(path) ] ;
241    local match3 = [ MATCH (/status$) : $(path) ] ;
242
243    if $(match1) { return $(match1[2]) ; }
244    else if $(match2) { return $(match2[2]) ; }
245    else if $(match3) { return "" ; }
246    else if --dump-tests in [ modules.peek : ARGV ]
247    {
248        # The 'run' rule and others might be used outside boost. In that case,
249        # just return the path, since the 'library name' makes no sense.
250        return $(path) ;
251    }
252}
253
254
255# Was an XML dump requested?
256.out-xml = [ MATCH --out-xml=(.*) : [ modules.peek : ARGV ] ] ;
257
258
259# Takes a target (instance of 'basic-target') and prints
260#   - its type
261#   - its name
262#   - comments specified via the <test-info> property
263#   - relative location of all source from the project root.
264#
265rule dump-test ( target )
266{
267    local type = [ $(target).type ] ;
268    local name = [ $(target).name ] ;
269    local project = [ $(target).project ] ;
270
271    local project-root = [ $(project).get project-root ] ;
272    local library = [ get-library-name [ path.root [ $(project).get location ]
273        [ path.pwd ] ] ] ;
274    if $(library)
275    {
276        name = $(library)/$(name) ;
277    }
278
279    local sources = [ $(target).sources ] ;
280    local source-files ;
281    for local s in $(sources)
282    {
283        if [ class.is-a $(s) : file-reference ]
284        {
285            local location = [ path.root [ path.root [ $(s).name ]
286                [ $(s).location ] ] [ path.pwd ] ] ;
287
288            source-files += [ path.relative-to [ path.root $(project-root)
289                [ path.pwd ] ] $(location) ] ;
290        }
291    }
292
293    local target-name =
294        [ $(project).get location ] // [ $(target).name ] $(.TEST-DIR-SUFFIX) ;
295    target-name = $(target-name:J=) ;
296
297    local r = [ $(target).requirements ] ;
298    # Extract values of the <test-info> feature.
299    local test-info = [ $(r).get <test-info> ] ;
300
301    # If the user requested XML output on the command-line, add the test info to
302    # that XML file rather than dumping them to stdout.
303    if $(.out-xml)
304    {
305        local nl = "
306" ;
307        .contents on $(.out-xml) +=
308            "$(nl)  <test type=\"$(type)\" name=\"$(name)\">"
309            "$(nl)    <target><![CDATA[$(target-name)]]></target>"
310            "$(nl)    <info><![CDATA[$(test-info)]]></info>"
311            "$(nl)    <source><![CDATA[$(source-files)]]></source>"
312            "$(nl)  </test>"
313            ;
314    }
315    else
316    {
317        # Format them into a single string of quoted strings.
318        test-info = \"$(test-info:J=\"\ \")\" ;
319
320        ECHO boost-test($(type)) \"$(name)\" "[$(test-info)]" ":"
321            \"$(source-files)\" ;
322    }
323}
324
325class testing.expect-failure-generator : generator
326{
327    rule generated-targets ( sources + : property-set : project name ? )
328    {
329        for local s in $(sources)
330        {
331            local a = [ $(s).action ] ;
332            if $(a)
333            {
334                for local t in [ $(a).targets ]
335                {
336                    $(t).fail-expected ;
337                }
338            }
339        }
340        return [ generator.generated-targets $(sources)
341            : $(property-set) : $(project) $(name) ] ;
342    }
343}
344
345local rule register-fail-expected ( source-type : test-type )
346{
347    generators.register [ class.new testing.expect-failure-generator
348        testing.expect-failure : $(source-type) : $(test-type) ] ;
349}
350
351# Register generators. Depending on target type, either 'expect-success' or
352# 'expect-failure' rule will be used.
353generators.register-standard testing.expect-success : OBJ        : COMPILE      ;
354register-fail-expected                                OBJ        : COMPILE_FAIL ;
355generators.register-standard testing.expect-success : RUN_OUTPUT : RUN          ;
356register-fail-expected                                RUN_OUTPUT : RUN_FAIL     ;
357generators.register-standard testing.expect-success : EXE        : LINK         ;
358register-fail-expected                                EXE        : LINK_FAIL    ;
359
360# Generator which runs an EXE and captures output.
361generators.register-standard testing.capture-output : EXE : RUN_OUTPUT ;
362
363# Generator which creates a target if sources run successfully. Differs from RUN
364# in that run output is not captured. The reason why it exists is that the 'run'
365# rule is much better for automated testing, but is not user-friendly (see
366# http://article.gmane.org/gmane.comp.lib.boost.build/6353).
367generators.register-standard testing.unit-test : EXE : UNIT_TEST ;
368
369toolset.uses-features testing.expect-success : <preserve-test-targets> ;
370toolset.uses-features testing.expect-failure : <preserve-test-targets> ;
371
372# The action rules called by generators.
373
374# Causes the 'target' to exist after bjam invocation if and only if all the
375# dependencies were successfully built.
376#
377rule expect-success ( target : dependency + : requirements * )
378{
379    **passed** $(target) : $(dependency) : $(requirements) ;
380}
381
382
383# Causes the 'target' to exist after bjam invocation if and only if all some of
384# the dependencies were not successfully built.
385#
386rule expect-failure ( target : dependency + : properties * )
387{
388    local grist = [ MATCH ^<(.*)> : $(dependency:G) ] ;
389    local marker = $(dependency:G=$(grist)*fail) ;
390    (failed-as-expected) $(marker) ;
391    LOCATE on $(marker) = [ on $(dependency) return $(LOCATE) ] ;
392    RMOLD $(marker) ;
393    DEPENDS $(marker) : $(dependency) ;
394    DEPENDS $(target) : $(marker) ;
395    **passed** $(target) : $(marker) : $(properties) ;
396}
397
398
399# The rule/action combination used to report successful passing of a test.
400#
401rule **passed** ( target : sources * : properties * )
402{
403    if [ feature.get-values preserve-test-targets : $(properties) ] = off
404    {
405        remove-test-targets $(<) ;
406    }
407    # Force deletion of the target, in case any dependencies failed to build.
408    RMOLD $(<) ;
409}
410
411
412
413# Used to create test files signifying passed tests.
414#
415actions **passed**
416{
417    echo passed > "$(<)"
418}
419
420# Used to create replacement object files that do not get created during tests
421# that are expected to fail.
422#
423actions (failed-as-expected)
424{
425    echo failed as expected > "$(<)"
426}
427
428
429if [ os.name ] = VMS
430{
431    actions **passed**
432    {
433        PIPE WRITE SYS$OUTPUT "passed" > $(<:W)
434    }
435
436    actions (failed-as-expected)
437    {
438        PIPE WRITE SYS$OUTPUT "failed as expected" > $(<:W)
439    }
440}
441
442rule run-path-setup ( target : source : properties * )
443{
444    # For testing, we need to make sure that all dynamic libraries needed by the
445    # test are found. So, we collect all paths from dependency libraries (via
446    # xdll-path property) and add whatever explicit dll-path user has specified.
447    # The resulting paths are added to the environment on each test invocation.
448    local target-os = [ feature.get-values <target-os> : $(properties) ] ;
449    local dll-paths = [ feature.get-values <dll-path> : $(properties) ] ;
450    dll-paths += [ feature.get-values <xdll-path> : $(properties) ] ;
451    if $(target-os) != vxworks
452    {
453         dll-paths += [ on $(source) return $(RUN_PATH) ] ;
454    }
455    dll-paths = [ sequence.unique $(dll-paths) ] ;
456    if $(dll-paths)
457    {
458        translate-to-os = path.native ;
459        if [ os.name ] = VMS
460        {
461            translate-to-os = path.to-VMS ;
462        }
463        if $(target-os) = vxworks
464        {
465            # map <build-os> paths to <target-os> paths
466            local save-os = [ modules.peek os : .name ] ;
467            modules.poke os : .name : VXWORKS ;
468            local parent = [ os.environ PKG_SRC_BUILD_DIR ] ;
469            local prefix = [ os.environ LAYER_SRC_PATH ] ;
470            local target-dll-paths ;
471            for local e in $(dll-paths)
472            {
473                target-dll-paths += [ path.join  $(prefix) [ path.relative $(e) $(parent) : noerror ] ] ;
474            }
475            PATH_SETUP on $(target) = [ common.prepend-path-variable-command
476                [ os.shared-library-path-variable ] : $(target-dll-paths) ] ;
477            modules.poke os : .name : $(save-os) ;
478        }
479        else
480        {
481            dll-paths = [ sequence.transform $(translate-to-os) : $(dll-paths) ] ;
482            PATH_SETUP on $(target) = [ common.prepend-path-variable-command
483                [ os.shared-library-path-variable ] : $(dll-paths) ] ;
484        }
485    }
486}
487
488
489local argv = [ modules.peek : ARGV ] ;
490
491toolset.flags testing.capture-output ARGS <testing.arg> ;
492toolset.flags testing.capture-output INPUT_FILES <testing.input-file> ;
493toolset.flags testing.capture-output LAUNCHER <testing.launcher> ;
494
495toolset.uses-features testing.capture-output :
496    <testing.launcher> <testing.execute> <dll-path> <xdll-path> <target-os> ;
497
498if --remove-test-targets in [ modules.peek : ARGV ]
499{
500    feature.set-default preserve-test-targets : off ;
501}
502
503
504# Runs executable 'sources' and stores stdout in file 'target'. Unless
505# --preserve-test-targets command line option has been specified, removes the
506# executable.
507#
508rule capture-output ( target : source : properties * )
509{
510    output-file on $(target) = $(target:S=.output) ;
511    LOCATE on $(target:S=.output) = [ on $(target) return $(LOCATE) ] ;
512
513    # The INCLUDES kill a warning about independent target...
514    INCLUDES $(target) : $(target:S=.output) ;
515    # but it also puts .output into dependency graph, so we must tell jam it is
516    # OK if it cannot find the target or updating rule.
517    NOCARE $(target:S=.output) ;
518
519    # This has two-fold effect. First it adds input files to the dependency
520    # graph, preventing a warning. Second, it causes input files to be bound
521    # before target is created. Therefore, they are bound using SEARCH setting
522    # on them and not LOCATE setting of $(target), as in other case (due to jam
523    # bug).
524    DEPENDS $(target) : [ on $(target) return $(INPUT_FILES) ] ;
525
526    run-path-setup $(target) : $(source) : $(properties) ;
527
528    DISABLE_TEST_EXECUTION on $(target) = 0 ;
529    if [ feature.get-values testing.execute : $(properties) ] = off
530    {
531        DISABLE_TEST_EXECUTION on $(target) = 1 ;
532    }
533
534    if ! [ feature.get-values testing.launcher : $(properties) ]
535    {
536        ## On VMS set default launcher to MCR
537        if [ os.name ] = VMS { LAUNCHER on $(target) = MCR ; }
538    }
539}
540
541.types-to-remove = EXE OBJ ;
542
543local rule remove-test-targets ( target )
544{
545    local action = [ on $(target) return $(.action) ] ;
546    local associated-targets = [ virtual-target.traverse [ $(action).targets ] ] ;
547    local targets-to-remove ;
548    for local t in [ sequence.unique $(associated-targets) ]
549    {
550        if [ $(t).type ] in $(.types-to-remove)
551        {
552            targets-to-remove += [ $(t).actual-name ] ;
553        }
554    }
555    rmtemp-sources $(target) : $(targets-to-remove) ;
556}
557
558local rule rmtemp-sources ( target : sources * )
559{
560    if $(sources)
561    {
562        TEMPORARY $(sources) ;
563        # Set a second action on target that will be executed after capture
564        # output action. The 'RmTemps' rule has the 'ignore' modifier so it is
565        # always considered succeeded. This is needed for 'run-fail' test. For
566        # that test the target will be marked with FAIL_EXPECTED, and without
567        # 'ignore' successful execution will be negated and be reported as
568        # failure. With 'ignore' we do not detect a case where removing files
569        # fails, but it is not likely to happen.
570        RmTemps $(target) : $(sources) ;
571    }
572}
573
574
575if [ os.name ] = NT
576{
577    .STATUS        = %status% ;
578    .SET_STATUS    = "set status=%ERRORLEVEL%" ;
579    .RUN_OUTPUT_NL = "echo." ;
580    .THEN          = "(" ;
581    .EXIT_SUCCESS  = "0" ;
582    .STATUS_0      = "%status% EQU 0 $(.THEN)" ;
583    .STATUS_NOT_0  = "%status% NEQ 0 $(.THEN)" ;
584    .VERBOSE       = "%verbose% EQU 1 $(.THEN)" ;
585    .ENDIF         = ")" ;
586    .SHELL_SET     = "set " ;
587    .CATENATE      = type ;
588    .CP            = copy ;
589    .NULLIN        = ;
590}
591else if [ os.name ] = VMS
592{
593    local nl = "
594" ;
595
596    .STATUS        = "''status'" ;
597    .SET_STATUS    = "status=$STATUS" ;
598    .SAY           = "pipe write sys$output" ; ## not really echo
599    .RUN_OUTPUT_NL = "$(.SAY) \"\"" ;
600    .THEN          = "$(nl)then" ;
601    .EXIT_SUCCESS  = "1" ;
602    .SUCCESS       = "status .eq. $(.EXIT_SUCCESS) $(.THEN)" ;
603    .STATUS_0      = "status .eq. 0 $(.THEN)" ;
604    .STATUS_NOT_0  = "status .ne. 0 $(.THEN)" ;
605    .VERBOSE       = "verbose .eq. 1 $(.THEN)" ;
606    .ENDIF         = "endif" ;
607    .SHELL_SET     = "" ;
608    .CATENATE      = type ;
609    .CP            = copy ;
610    .NULLIN        = ;
611}
612else
613{
614    .STATUS        = "$status" ;
615    .SET_STATUS    = "status=$?" ;
616    .RUN_OUTPUT_NL = "echo" ;
617    .THEN          = "; then" ;
618    .EXIT_SUCCESS  = "0" ;
619    .STATUS_0      = "test $status -eq 0 $(.THEN)" ;
620    .STATUS_NOT_0  = "test $status -ne 0 $(.THEN)" ;
621    .VERBOSE       = "test $verbose -eq 1 $(.THEN)" ;
622    .ENDIF         = "fi" ;
623    .SHELL_SET     = "" ;
624    .CATENATE      = cat ;
625    .CP            = cp ;
626    .NULLIN        = "<" "/dev/null" ;
627}
628
629
630.VERBOSE_TEST = 0 ;
631if --verbose-test in [ modules.peek : ARGV ]
632{
633    .VERBOSE_TEST = 1 ;
634}
635
636
637.RM = [ common.rm-command ] ;
638
639
640actions capture-output bind INPUT_FILES output-file
641{
642    $(PATH_SETUP)
643    $(.SHELL_SET)status=$(DISABLE_TEST_EXECUTION)
644    if $(.STATUS_NOT_0)
645        echo Skipping test execution due to testing.execute=off
646        exit $(.EXIT_SUCCESS)
647    $(.ENDIF)
648    $(LAUNCHER) "$(>)" $(ARGS) "$(INPUT_FILES)" > "$(output-file)" 2>&1 $(.NULLIN)
649    $(.SET_STATUS)
650    $(.RUN_OUTPUT_NL) >> "$(output-file)"
651    echo EXIT STATUS: $(.STATUS) >> "$(output-file)"
652    if $(.STATUS_0)
653        $(.CP) "$(output-file)" "$(<)"
654    $(.ENDIF)
655    $(.SHELL_SET)verbose=$(.VERBOSE_TEST)
656    if $(.STATUS_NOT_0)
657        $(.SHELL_SET)verbose=1
658    $(.ENDIF)
659    if $(.VERBOSE)
660        echo ====== BEGIN OUTPUT ======
661        $(.CATENATE) "$(output-file)"
662        echo ====== END OUTPUT ======
663    $(.ENDIF)
664    exit $(.STATUS)
665}
666
667
668actions quietly updated ignore piecemeal together RmTemps
669{
670    $(.RM) "$(>)"
671}
672
673if [ os.name ] = VMS
674{
675    actions capture-output bind INPUT_FILES output-file
676    {
677        $(PATH_SETUP)
678        $(.SHELL_SET)status=$(DISABLE_TEST_EXECUTION)
679        if $(.STATUS_NOT_0)
680            $(.SAY) "Skipping test execution due to testing.execute=off"
681            exit "$(.EXIT_SUCCESS)"
682        $(.ENDIF)
683        !! Execute twice - first for status, second for output
684        set noon
685        pipe $(LAUNCHER) $(>:W) $(ARGS) $(INPUT_FILES:W) 2>NL: >NL:
686        $(.SET_STATUS)
687        pipe $(LAUNCHER) $(>:W) $(ARGS) $(INPUT_FILES:W) | type sys$input /out=$(output-file:W)
688        set on
689        !! Harmonize VMS success status with POSIX
690        if $(.SUCCESS)
691            $(.SHELL_SET)status="0"
692        $(.ENDIF)
693        $(.RUN_OUTPUT_NL) | append /new sys$input $(output-file:W)
694        $(.SAY) "EXIT STATUS: $(.STATUS)" | append /new sys$input $(output-file:W)
695        if $(.STATUS_0)
696            $(.CP) $(output-file:W) $(<:W)
697        $(.ENDIF)
698        $(.SHELL_SET)verbose=$(.VERBOSE_TEST)
699        if $(.STATUS_NOT_0)
700            $(.SHELL_SET)verbose=1
701        $(.ENDIF)
702        if $(.VERBOSE)
703            $(.SAY) "====== BEGIN OUTPUT ======"
704            $(.CATENATE) $(output-file:W)
705            $(.SAY) "====== END OUTPUT ======"
706        $(.ENDIF)
707        !! Harmonize VMS success status with POSIX on exit
708        if $(.STATUS_0)
709            $(.SHELL_SET)status="$(.EXIT_SUCCESS)"
710        $(.ENDIF)
711        exit "$(.STATUS)"
712    }
713
714    actions quietly updated ignore piecemeal together RmTemps
715    {
716        $(.RM) $(>:WJ=;*,);*
717    }
718}
719
720.MAKE_FILE = [ common.file-creation-command ] ;
721
722toolset.flags testing.unit-test LAUNCHER <testing.launcher> ;
723toolset.flags testing.unit-test ARGS <testing.arg> ;
724
725
726rule unit-test ( target : source : properties * )
727{
728    run-path-setup $(target) : $(source) : $(properties) ;
729
730    if ! [ feature.get-values testing.launcher : $(properties) ]
731    {
732        ## On VMS set default launcher to MCR
733        if [ os.name ] = VMS { LAUNCHER on $(target) = MCR ; }
734    }
735}
736
737
738actions unit-test
739{
740    $(PATH_SETUP)
741    $(LAUNCHER) "$(>)" $(ARGS) && $(.MAKE_FILE) "$(<)"
742}
743
744if [ os.name ] = VMS
745{
746    actions unit-test
747    {
748        $(PATH_SETUP)
749        pipe $(LAUNCHER) $(>:W) $(ARGS) && $(.MAKE_FILE) $(<:W)
750    }
751}
752
753IMPORT $(__name__) : compile compile-fail run run-fail link link-fail
754    : : compile compile-fail run run-fail link link-fail ;
755
756
757# This is a composing generator to support cases where a generator for the
758# specified target constructs other targets as well. One such example is msvc's
759# exe generator that constructs both EXE and PDB targets.
760type.register TIME : time ;
761generators.register-composing testing.time : : TIME ;
762
763
764# Note that this rule may be called multiple times for a single target in case
765# there are multiple actions operating on the same target in sequence. One such
766# example are msvc exe targets first created by a linker action and then updated
767# with an embedded manifest file by a separate action.
768rule record-time ( target : source : start end user system clock )
769{
770    local src-string = "[$(source:G=:J=,)] " ;
771    USER_TIME on $(target) += $(src-string)$(user) ;
772    SYSTEM_TIME on $(target) += $(src-string)$(system) ;
773    CLOCK_TIME on $(target) += $(src-string)$(clock) ;
774
775    # We need the following variables because attempting to perform such
776    # variable expansion in actions would not work due to quotes getting treated
777    # as regular characters.
778    USER_TIME_SECONDS on $(target) += $(src-string)$(user)" seconds" ;
779    SYSTEM_TIME_SECONDS on $(target) += $(src-string)$(system)" seconds" ;
780    CLOCK_TIME_SECONDS on $(target) += $(src-string)$(clock)" seconds" ;
781}
782
783
784# Support for generating timing information for any main target. To use
785# declare a custom make target that uses the testing.time generator rule
786# specified here. For example:
787#
788# make main.cpp : main_cpp.pro : @do-something ;
789# time main.time : main.cpp ;
790# actions do-something
791# {
792#     sleep 2 && echo "$(<)" > "$(<)"
793# }
794#
795# The above will generate a "main.time", and echo to output, timing
796# information for the action of source "main.cpp".
797
798
799IMPORT testing : record-time : : testing.record-time ;
800
801
802# Calling this rule requests that Boost Build time how long it takes to build
803# the 'source' target and display the results both on the standard output and in
804# the 'target' file.
805#
806rule time ( target : sources + : properties *  )
807{
808    # Set up rule for recording timing information.
809    local action = [ on $(target) return $(.action) ] ;
810    for local action.source in [ $(action).sources ]
811    {
812        # Yes, this uses the private "actual-name" of the target action.
813        # But it's the only way to get at the real name of the sources
814        # given the context of header scanners.
815        __TIMING_RULE__ on [ $(action.source).actual-name ] = testing.record-time $(target) ;
816    }
817
818    # Make sure the sources get rebuilt any time we need to retrieve that
819    # information.
820    REBUILDS $(target) : $(sources) ;
821}
822
823
824actions time
825{
826    echo user: $(USER_TIME)
827    echo system: $(SYSTEM_TIME)
828    echo clock: $(CLOCK_TIME)
829
830    echo user: $(USER_TIME_SECONDS) > "$(<)"
831    echo system: $(SYSTEM_TIME_SECONDS) >> "$(<)"
832    echo clock: $(CLOCK_TIME_SECONDS) >> "$(<)"
833}
834
835if [ os.name ] = VMS
836{
837    actions time
838    {
839        WRITE SYS$OUTPUT "user: ", "$(USER_TIME)"
840        WRITE SYS$OUTPUT "system: ", "(SYSTEM_TIME)"
841        WRITE SYS$OUTPUT "clock: ", "(CLOCK_TIME)"
842
843        PIPE WRITE SYS$OUTPUT "user: ", "$(USER_TIME_SECONDS)" | TYPE SYS$INPUT /OUT=$(<:W)
844        PIPE WRITE SYS$OUTPUT "system: ", "$(SYSTEM_TIME_SECONDS)" | APPEND /NEW SYS$INPUT $(<:W)
845        PIPE WRITE SYS$OUTPUT "clock: ", "$(CLOCK_TIME_SECONDS)" | APPEND /NEW SYS$INPUT $(<:W)
846    }
847}
848