1# Copyright 2005 Dave Abrahams 2# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus 3# Copyright 2014-2015 Rene Rivera 4# Copyright 2014 Microsoft Corporation 5# Distributed under the Boost Software License, Version 1.0. 6# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) 7 8# This module implements regression testing framework. It declares a number of 9# main target rules which perform some action and, if the results are OK, 10# creates an output file. 11# 12# The exact list of rules is: 13# 'compile' -- creates .test file if compilation of sources was 14# successful. 15# 'compile-fail' -- creates .test file if compilation of sources failed. 16# 'run' -- creates .test file is running of executable produced from 17# sources was successful. Also leaves behind .output file 18# with the output from program run. 19# 'run-fail' -- same as above, but .test file is created if running fails. 20# 21# In all cases, presence of .test file is an indication that the test passed. 22# For more convenient reporting, you might want to use C++ Boost regression 23# testing utilities (see http://www.boost.org/more/regression.html). 24# 25# For historical reason, a 'unit-test' rule is available which has the same 26# syntax as 'exe' and behaves just like 'run'. 27 28# Things to do: 29# - Teach compiler_status handle Jamfile.v2. 30# Notes: 31# - <no-warn> is not implemented, since it is Como-specific, and it is not 32# clear how to implement it 33# - std::locale-support is not implemented (it is used in one test). 34 35 36import alias ; 37import "class" ; 38import common ; 39import errors ; 40import feature ; 41import generators ; 42import os ; 43import path ; 44import project ; 45import property ; 46import property-set ; 47import regex ; 48import sequence ; 49import targets ; 50import toolset ; 51import type ; 52import virtual-target ; 53 54 55rule init ( ) 56{ 57} 58 59 60# Feature controlling the command used to launch test programs. 61feature.feature testing.launcher : : free optional ; 62 63feature.feature test-info : : free incidental ; 64feature.feature testing.arg : : free incidental ; 65feature.feature testing.input-file : : free dependency ; 66 67feature.feature preserve-test-targets : on off : incidental propagated ; 68 69# Feature to control whether executable binaries are run as part of test. 70# This can be used to just compile test cases in cross compilation situations. 71feature.feature testing.execute : on off : incidental propagated ; 72feature.set-default testing.execute : on ; 73 74# Register target types. 75type.register TEST : test ; 76type.register COMPILE : : TEST ; 77type.register COMPILE_FAIL : : TEST ; 78type.register RUN_OUTPUT : run ; 79type.register RUN : : TEST ; 80type.register RUN_FAIL : : TEST ; 81type.register LINK_FAIL : : TEST ; 82type.register LINK : : TEST ; 83type.register UNIT_TEST : passed : TEST ; 84 85 86# Declare the rules which create main targets. While the 'type' module already 87# creates rules with the same names for us, we need extra convenience: default 88# name of main target, so write our own versions. 89 90# Helper rule. Create a test target, using basename of first source if no target 91# name is explicitly passed. Remembers the created target in a global variable. 92# 93rule make-test ( target-type : sources + : requirements * : target-name ? ) 94{ 95 target-name ?= $(sources[1]:D=:S=) ; 96 97 # Having periods (".") in the target name is problematic because the typed 98 # generator will strip the suffix and use the bare name for the file 99 # targets. Even though the location-prefix averts problems most times it 100 # does not prevent ambiguity issues when referring to the test targets. For 101 # example when using the XML log output. So we rename the target to remove 102 # the periods, and provide an alias for users. 103 local real-name = [ regex.replace $(target-name) "[.]" "~" ] ; 104 105 local project = [ project.current ] ; 106 # The <location-prefix> forces the build system for generate paths in the 107 # form '$build_dir/array1.test/gcc/debug'. This is necessary to allow 108 # post-processing tools to work. 109 local t = [ targets.create-typed-target [ type.type-from-rule-name 110 $(target-type) ] : $(project) : $(real-name) : $(sources) : 111 $(requirements) <location-prefix>$(real-name).test ] ; 112 113 # The alias to the real target, per period replacement above. 114 if $(real-name) != $(target-name) 115 { 116 alias $(target-name) : $(t) ; 117 } 118 119 # Remember the test (for --dump-tests). A good way would be to collect all 120 # given a project. This has some technical problems: e.g. we can not call 121 # this dump from a Jamfile since projects referred by 'build-project' are 122 # not available until the whole Jamfile has been loaded. 123 .all-tests += $(t) ; 124 return $(t) ; 125} 126 127 128# Note: passing more that one cpp file here is known to fail. Passing a cpp file 129# and a library target works. 130# 131rule compile ( sources + : requirements * : target-name ? ) 132{ 133 return [ make-test compile : $(sources) : $(requirements) : $(target-name) ] 134 ; 135} 136 137 138rule compile-fail ( sources + : requirements * : target-name ? ) 139{ 140 return [ make-test compile-fail : $(sources) : $(requirements) : 141 $(target-name) ] ; 142} 143 144 145rule link ( sources + : requirements * : target-name ? ) 146{ 147 return [ make-test link : $(sources) : $(requirements) : $(target-name) ] ; 148} 149 150 151rule link-fail ( sources + : requirements * : target-name ? ) 152{ 153 return [ make-test link-fail : $(sources) : $(requirements) : $(target-name) 154 ] ; 155} 156 157 158rule handle-input-files ( input-files * ) 159{ 160 if $(input-files[2]) 161 { 162 # Check that sorting made when creating property-set instance will not 163 # change the ordering. 164 if [ sequence.insertion-sort $(input-files) ] != $(input-files) 165 { 166 errors.user-error "Names of input files must be sorted alphabetically" 167 : "due to internal limitations" ; 168 } 169 } 170 return <testing.input-file>$(input-files) ; 171} 172 173 174rule run ( sources + : args * : input-files * : requirements * : target-name ? : 175 default-build * ) 176{ 177 requirements += <testing.arg>$(args:J=" ") ; 178 requirements += [ handle-input-files $(input-files) ] ; 179 return [ make-test run : $(sources) : $(requirements) : $(target-name) ] ; 180} 181 182 183rule run-fail ( sources + : args * : input-files * : requirements * : 184 target-name ? : default-build * ) 185{ 186 requirements += <testing.arg>$(args:J=" ") ; 187 requirements += [ handle-input-files $(input-files) ] ; 188 return [ make-test run-fail : $(sources) : $(requirements) : $(target-name) 189 ] ; 190} 191 192 193# Use 'test-suite' as a synonym for 'alias', for backward compatibility. 194IMPORT : alias : : test-suite ; 195 196 197# For all main targets in 'project-module', which are typed targets with type 198# derived from 'TEST', produce some interesting information. 199# 200rule dump-tests 201{ 202 for local t in $(.all-tests) 203 { 204 dump-test $(t) ; 205 } 206} 207 208 209# Given a project location in normalized form (slashes are forward), compute the 210# name of the Boost library. 211# 212local rule get-library-name ( path ) 213{ 214 # Path is in normalized form, so all slashes are forward. 215 local match1 = [ MATCH /(tools|libs)/(.*)/(test|example) : $(path) ] ; 216 local match2 = [ MATCH /(tools|libs)/(.*)$ : $(path) ] ; 217 local match3 = [ MATCH (/status$) : $(path) ] ; 218 219 if $(match1) { return $(match1[2]) ; } 220 else if $(match2) { return $(match2[2]) ; } 221 else if $(match3) { return "" ; } 222 else if --dump-tests in [ modules.peek : ARGV ] 223 { 224 # The 'run' rule and others might be used outside boost. In that case, 225 # just return the path, since the 'library name' makes no sense. 226 return $(path) ; 227 } 228} 229 230 231# Was an XML dump requested? 232.out-xml = [ MATCH --out-xml=(.*) : [ modules.peek : ARGV ] ] ; 233 234 235# Takes a target (instance of 'basic-target') and prints 236# - its type 237# - its name 238# - comments specified via the <test-info> property 239# - relative location of all source from the project root. 240# 241rule dump-test ( target ) 242{ 243 local type = [ $(target).type ] ; 244 local name = [ $(target).name ] ; 245 local project = [ $(target).project ] ; 246 247 local project-root = [ $(project).get project-root ] ; 248 local library = [ get-library-name [ path.root [ $(project).get location ] 249 [ path.pwd ] ] ] ; 250 if $(library) 251 { 252 name = $(library)/$(name) ; 253 } 254 255 local sources = [ $(target).sources ] ; 256 local source-files ; 257 for local s in $(sources) 258 { 259 if [ class.is-a $(s) : file-reference ] 260 { 261 local location = [ path.root [ path.root [ $(s).name ] 262 [ $(s).location ] ] [ path.pwd ] ] ; 263 264 source-files += [ path.relative-to [ path.root $(project-root) 265 [ path.pwd ] ] $(location) ] ; 266 } 267 } 268 269 local target-name = [ $(project).get location ] // [ $(target).name ] .test 270 ; 271 target-name = $(target-name:J=) ; 272 273 local r = [ $(target).requirements ] ; 274 # Extract values of the <test-info> feature. 275 local test-info = [ $(r).get <test-info> ] ; 276 277 # If the user requested XML output on the command-line, add the test info to 278 # that XML file rather than dumping them to stdout. 279 if $(.out-xml) 280 { 281 local nl = " 282" ; 283 .contents on $(.out-xml) += 284 "$(nl) <test type=\"$(type)\" name=\"$(name)\">" 285 "$(nl) <target><![CDATA[$(target-name)]]></target>" 286 "$(nl) <info><![CDATA[$(test-info)]]></info>" 287 "$(nl) <source><![CDATA[$(source-files)]]></source>" 288 "$(nl) </test>" 289 ; 290 } 291 else 292 { 293 # Format them into a single string of quoted strings. 294 test-info = \"$(test-info:J=\"\ \")\" ; 295 296 ECHO boost-test($(type)) \"$(name)\" [$(test-info)] ":" 297 \"$(source-files)\" ; 298 } 299} 300 301 302# Register generators. Depending on target type, either 'expect-success' or 303# 'expect-failure' rule will be used. 304generators.register-standard testing.expect-success : OBJ : COMPILE ; 305generators.register-standard testing.expect-failure : OBJ : COMPILE_FAIL ; 306generators.register-standard testing.expect-success : RUN_OUTPUT : RUN ; 307generators.register-standard testing.expect-failure : RUN_OUTPUT : RUN_FAIL ; 308generators.register-standard testing.expect-failure : EXE : LINK_FAIL ; 309generators.register-standard testing.expect-success : EXE : LINK ; 310 311# Generator which runs an EXE and captures output. 312generators.register-standard testing.capture-output : EXE : RUN_OUTPUT ; 313 314# Generator which creates a target if sources run successfully. Differs from RUN 315# in that run output is not captured. The reason why it exists is that the 'run' 316# rule is much better for automated testing, but is not user-friendly (see 317# http://article.gmane.org/gmane.comp.lib.boost.build/6353). 318generators.register-standard testing.unit-test : EXE : UNIT_TEST ; 319 320 321# The action rules called by generators. 322 323# Causes the 'target' to exist after bjam invocation if and only if all the 324# dependencies were successfully built. 325# 326rule expect-success ( target : dependency + : requirements * ) 327{ 328 **passed** $(target) : $(dependency) ; 329} 330 331 332# Causes the 'target' to exist after bjam invocation if and only if all some of 333# the dependencies were not successfully built. 334# 335rule expect-failure ( target : dependency + : properties * ) 336{ 337 local grist = [ MATCH ^<(.*)> : $(dependency:G) ] ; 338 local marker = $(dependency:G=$(grist)*fail) ; 339 (failed-as-expected) $(marker) ; 340 FAIL_EXPECTED $(dependency) ; 341 LOCATE on $(marker) = [ on $(dependency) return $(LOCATE) ] ; 342 RMOLD $(marker) ; 343 DEPENDS $(marker) : $(dependency) ; 344 DEPENDS $(target) : $(marker) ; 345 **passed** $(target) : $(marker) ; 346} 347 348 349# The rule/action combination used to report successful passing of a test. 350# 351rule **passed** 352{ 353 remove-test-targets $(<) ; 354 355 # Dump all the tests, if needed. We do it here, since dump should happen 356 # only after all Jamfiles have been read, and there is no such place 357 # currently defined (but there should be). 358 if ! $(.dumped-tests) && ( --dump-tests in [ modules.peek : ARGV ] ) 359 { 360 .dumped-tests = true ; 361 dump-tests ; 362 } 363 364 # Force deletion of the target, in case any dependencies failed to build. 365 RMOLD $(<) ; 366} 367 368 369# Used to create test files signifying passed tests. 370# 371actions **passed** 372{ 373 echo passed > "$(<)" 374} 375 376 377# Used to create replacement object files that do not get created during tests 378# that are expected to fail. 379# 380actions (failed-as-expected) 381{ 382 echo failed as expected > "$(<)" 383} 384 385 386rule run-path-setup ( target : source : properties * ) 387{ 388 # For testing, we need to make sure that all dynamic libraries needed by the 389 # test are found. So, we collect all paths from dependency libraries (via 390 # xdll-path property) and add whatever explicit dll-path user has specified. 391 # The resulting paths are added to the environment on each test invocation. 392 local dll-paths = [ feature.get-values <dll-path> : $(properties) ] ; 393 dll-paths += [ feature.get-values <xdll-path> : $(properties) ] ; 394 dll-paths += [ on $(source) return $(RUN_PATH) ] ; 395 dll-paths = [ sequence.unique $(dll-paths) ] ; 396 if $(dll-paths) 397 { 398 dll-paths = [ sequence.transform path.native : $(dll-paths) ] ; 399 PATH_SETUP on $(target) = [ common.prepend-path-variable-command 400 [ os.shared-library-path-variable ] : $(dll-paths) ] ; 401 } 402} 403 404 405local argv = [ modules.peek : ARGV ] ; 406 407toolset.flags testing.capture-output ARGS <testing.arg> ; 408toolset.flags testing.capture-output INPUT_FILES <testing.input-file> ; 409toolset.flags testing.capture-output LAUNCHER <testing.launcher> ; 410 411.preserve-test-targets = on ; 412if --remove-test-targets in [ modules.peek : ARGV ] 413{ 414 .preserve-test-targets = off ; 415} 416 417 418# Runs executable 'sources' and stores stdout in file 'target'. Unless 419# --preserve-test-targets command line option has been specified, removes the 420# executable. The 'target-to-remove' parameter controls what should be removed: 421# - if 'none', does not remove anything, ever 422# - if empty, removes 'source' 423# - if non-empty and not 'none', contains a list of sources to remove. 424# 425rule capture-output ( target : source : properties * : targets-to-remove * ) 426{ 427 output-file on $(target) = $(target:S=.output) ; 428 LOCATE on $(target:S=.output) = [ on $(target) return $(LOCATE) ] ; 429 430 # The INCLUDES kill a warning about independent target... 431 INCLUDES $(target) : $(target:S=.output) ; 432 # but it also puts .output into dependency graph, so we must tell jam it is 433 # OK if it cannot find the target or updating rule. 434 NOCARE $(target:S=.output) ; 435 436 # This has two-fold effect. First it adds input files to the dependency 437 # graph, preventing a warning. Second, it causes input files to be bound 438 # before target is created. Therefore, they are bound using SEARCH setting 439 # on them and not LOCATE setting of $(target), as in other case (due to jam 440 # bug). 441 DEPENDS $(target) : [ on $(target) return $(INPUT_FILES) ] ; 442 443 if $(targets-to-remove) = none 444 { 445 targets-to-remove = ; 446 } 447 else if ! $(targets-to-remove) 448 { 449 targets-to-remove = $(source) ; 450 } 451 452 run-path-setup $(target) : $(source) : $(properties) ; 453 454 DISABLE_TEST_EXECUTION on $(target) = 0 ; 455 if [ feature.get-values testing.execute : $(properties) ] = off 456 { 457 DISABLE_TEST_EXECUTION on $(target) = 1 ; 458 } 459 460 if [ feature.get-values preserve-test-targets : $(properties) ] = off 461 || $(.preserve-test-targets) = off 462 { 463 rmtemp-sources $(target) : $(targets-to-remove) ; 464 for local to-remove in $(targets-to-remove) 465 { 466 rmtemp-all-sources $(to-remove) ; 467 } 468 } 469} 470 471.types-to-remove = EXE OBJ ; 472 473local rule remove-test-targets ( targets + ) 474{ 475 if $(.preserve-test-targets) = off 476 { 477 rmtemp-all-sources $(target) ; 478 } 479} 480 481local rule rmtemp-all-sources ( target ) 482{ 483 local sources ; 484 local action = [ on $(target) return $(.action) ] ; 485 if $(action) 486 { 487 local action-sources = [ $(action).sources ] ; 488 for local source in $(action-sources) 489 { 490 local source-type = [ $(source).type ] ; 491 if $(source-type) in $(.types-to-remove) 492 { 493 sources += [ $(source).actual-name ] ; 494 } 495 else 496 { 497 # ECHO IGNORED: $(source) :: $(source-type) ; 498 } 499 } 500 if $(sources) 501 { 502 rmtemp-sources $(target) : $(sources) ; 503 for local source in $(sources) 504 { 505 rmtemp-all-sources $(source) ; 506 } 507 } 508 } 509} 510 511local rule rmtemp-sources ( target : sources * ) 512{ 513 if $(sources) 514 { 515 TEMPORARY $(sources) ; 516 # Set a second action on target that will be executed after capture 517 # output action. The 'RmTemps' rule has the 'ignore' modifier so it is 518 # always considered succeeded. This is needed for 'run-fail' test. For 519 # that test the target will be marked with FAIL_EXPECTED, and without 520 # 'ignore' successful execution will be negated and be reported as 521 # failure. With 'ignore' we do not detect a case where removing files 522 # fails, but it is not likely to happen. 523 RmTemps $(target) : $(sources) ; 524 } 525} 526 527 528if [ os.name ] = NT 529{ 530 .STATUS = %status% ; 531 .SET_STATUS = "set status=%ERRORLEVEL%" ; 532 .RUN_OUTPUT_NL = "echo." ; 533 .STATUS_0 = "%status% EQU 0 (" ; 534 .STATUS_NOT_0 = "%status% NEQ 0 (" ; 535 .VERBOSE = "%verbose% EQU 1 (" ; 536 .ENDIF = ")" ; 537 .SHELL_SET = "set " ; 538 .CATENATE = type ; 539 .CP = copy ; 540 .NULLIN = ; 541} 542else 543{ 544 .STATUS = "$status" ; 545 .SET_STATUS = "status=$?" ; 546 .RUN_OUTPUT_NL = "echo" ; 547 .STATUS_0 = "test $status -eq 0 ; then" ; 548 .STATUS_NOT_0 = "test $status -ne 0 ; then" ; 549 .VERBOSE = "test $verbose -eq 1 ; then" ; 550 .ENDIF = "fi" ; 551 .SHELL_SET = "" ; 552 .CATENATE = cat ; 553 .CP = cp ; 554 .NULLIN = "<" "/dev/null" ; 555} 556 557 558.VERBOSE_TEST = 0 ; 559if --verbose-test in [ modules.peek : ARGV ] 560{ 561 .VERBOSE_TEST = 1 ; 562} 563 564 565.RM = [ common.rm-command ] ; 566 567 568actions capture-output bind INPUT_FILES output-file 569{ 570 $(PATH_SETUP) 571 $(.SHELL_SET)status=$(DISABLE_TEST_EXECUTION) 572 if $(.STATUS_NOT_0) 573 echo Skipping test execution due to testing.execute=off 574 exit 0 575 $(.ENDIF) 576 $(LAUNCHER) "$(>)" $(ARGS) "$(INPUT_FILES)" > "$(output-file)" 2>&1 $(.NULLIN) 577 $(.SET_STATUS) 578 $(.RUN_OUTPUT_NL) >> "$(output-file)" 579 echo EXIT STATUS: $(.STATUS) >> "$(output-file)" 580 if $(.STATUS_0) 581 $(.CP) "$(output-file)" "$(<)" 582 $(.ENDIF) 583 $(.SHELL_SET)verbose=$(.VERBOSE_TEST) 584 if $(.STATUS_NOT_0) 585 $(.SHELL_SET)verbose=1 586 $(.ENDIF) 587 if $(.VERBOSE) 588 echo ====== BEGIN OUTPUT ====== 589 $(.CATENATE) "$(output-file)" 590 echo ====== END OUTPUT ====== 591 $(.ENDIF) 592 exit $(.STATUS) 593} 594 595 596actions quietly updated ignore piecemeal together RmTemps 597{ 598 $(.RM) "$(>)" 599} 600 601 602.MAKE_FILE = [ common.file-creation-command ] ; 603 604toolset.flags testing.unit-test LAUNCHER <testing.launcher> ; 605toolset.flags testing.unit-test ARGS <testing.arg> ; 606 607 608rule unit-test ( target : source : properties * ) 609{ 610 run-path-setup $(target) : $(source) : $(properties) ; 611} 612 613 614actions unit-test 615{ 616 $(PATH_SETUP) 617 $(LAUNCHER) "$(>)" $(ARGS) && $(.MAKE_FILE) "$(<)" 618} 619 620 621IMPORT $(__name__) : compile compile-fail run run-fail link link-fail 622 : : compile compile-fail run run-fail link link-fail ; 623 624 625# This is a composing generator to support cases where a generator for the 626# specified target constructs other targets as well. One such example is msvc's 627# exe generator that constructs both EXE and PDB targets. 628type.register TIME : time ; 629generators.register-composing testing.time : : TIME ; 630 631 632# Note that this rule may be called multiple times for a single target in case 633# there are multiple actions operating on the same target in sequence. One such 634# example are msvc exe targets first created by a linker action and then updated 635# with an embedded manifest file by a separate action. 636rule record-time ( target : source : start end user system ) 637{ 638 local src-string = [$(source:G=:J=",")"] " ; 639 USER_TIME on $(target) += $(src-string)$(user) ; 640 SYSTEM_TIME on $(target) += $(src-string)$(system) ; 641 642 # We need the following variables because attempting to perform such 643 # variable expansion in actions would not work due to quotes getting treated 644 # as regular characters. 645 USER_TIME_SECONDS on $(target) += $(src-string)$(user)" seconds" ; 646 SYSTEM_TIME_SECONDS on $(target) += $(src-string)$(system)" seconds" ; 647} 648 649 650IMPORT testing : record-time : : testing.record-time ; 651 652 653# Calling this rule requests that Boost Build time how long it takes to build 654# the 'source' target and display the results both on the standard output and in 655# the 'target' file. 656# 657rule time ( target : sources + : properties * ) 658{ 659 # Set up rule for recording timing information. 660 __TIMING_RULE__ on $(sources) = testing.record-time $(target) ; 661 662 # Make sure the sources get rebuilt any time we need to retrieve that 663 # information. 664 REBUILDS $(target) : $(sources) ; 665} 666 667 668actions time 669{ 670 echo user: $(USER_TIME) 671 echo system: $(SYSTEM_TIME) 672 673 echo user: $(USER_TIME_SECONDS) > "$(<)" 674 echo system: $(SYSTEM_TIME_SECONDS) >> "$(<)" 675} 676