1#! /bin/sh 2 3# $Id: check_make_unix.sh 633243 2021-06-15 18:16:21Z ivanov $ 4# Author: Vladimir Ivanov, NCBI 5# 6########################################################################### 7# 8# Compile a check script and copy necessary files to run tests in the 9# UNIX build tree. 10# 11# Usage: 12# check_make_unix.sh <test_list> <signature> <build_dir> <top_srcdir> <target_dir> <check_script> 13# 14# test_list - a list of tests (it build with "make check_r") 15# (default: "<build_dir>/check.sh.list") 16# signature - build signature 17# build_dir - path to UNIX build tree like".../build/..." 18# (default: will try determine path from current work 19# directory -- root of build tree ) 20# top_srcdir - path to the root src directory 21# (default: will try determine path from current work 22# directory -- root of build tree ) 23# target_dir - path where the check script and logs will be created 24# (default: current dir) 25# check_script - name of the check script (without path). 26# (default: "check.sh" / "<target_dir>/check.sh") 27# 28# If any parameter is skipped that will be used default value for it. 29# 30# Note: 31# Work with UNIX build tree only (any configuration). 32# 33########################################################################### 34 35# Load configuration options 36x_check_scripts_dir=`dirname "$0"` 37x_scripts_dir=`dirname "$x_check_scripts_dir"` 38x_scripts_dir=`dirname "$x_scripts_dir"` 39. ${x_check_scripts_dir}/check_setup.cfg 40 41 42# Parameters 43 44res_out="check.sh" 45res_list="$res_out.list" 46 47# Fields delimiters in the list 48# (this symbols used directly in the "sed" command) 49x_delim=" ____ " 50x_delim_internal="~" 51x_tmp="/var/tmp" 52 53x_date_format="%m/%d/%Y %H:%M:%S" 54 55x_list=$1 56x_signature=$2 57x_build_dir=$3 58x_top_srcdir=$4 59x_target_dir=$5 60x_out=$6 61 62# Detect Cygwin 63case `uname -s` in 64 CYGWIN* ) cygwin=true ;; 65 *) cygwin=false ;; 66esac 67 68# Check for build dir 69if test ! -z "$x_build_dir"; then 70 if test ! -d "$x_build_dir"; then 71 echo "Build directory \"$x_build_dir\" don't exist." 72 exit 1 73 fi 74 x_build_dir=`(cd "$x_build_dir"; pwd | sed -e 's/\/$//g')` 75else 76 # Get build dir name from the current path 77 x_build_dir=`pwd | sed -e 's%/build.*$%%'` 78 if test -d "$x_build_dir/build"; then 79 x_build_dir="$x_build_dir/build" 80 fi 81fi 82 83x_conf_dir=`dirname "$x_build_dir"` 84x_bin_dir=`(cd "$x_build_dir/../bin"; pwd | sed -e 's/\/$//g')` 85 86# Check for top_srcdir 87if test ! -z "$x_top_srcdir"; then 88 if test ! -d "$x_top_srcdir"; then 89 echo "Top source directory \"$x_top_srcdir\" don't exist." 90 exit 1 91 fi 92 x_root_dir=`(cd "$x_top_srcdir"; pwd | sed -e 's/\/$//g')` 93else 94 # Get top src dir name from the script directory 95 x_root_dir=`dirname "$x_scripts_dir"` 96fi 97 98# Check for target dir 99if test ! -z "$x_target_dir"; then 100 if test ! -d "$x_target_dir"; then 101 echo "Target directory \"$x_target_dir\" don't exist." 102 exit 1 103 fi 104 x_target_dir=`(cd "$x_target_dir"; pwd | sed -e 's/\/$//g')` 105else 106 x_target_dir=`pwd` 107fi 108 109# Check for a imported project or intree project 110if test -f Makefile.out ; then 111 x_import_prj="yes" 112 x_import_root=`sed -ne 's/^import_root *= *//p' Makefile` 113 # x_compile_dir="`pwd | sed -e 's%/internal/c++/src.*$%%g'`/internal/c++/src" 114 x_compile_dir=`cd $x_import_root; pwd` 115else 116 x_import_prj="no" 117 x_compile_dir="$x_build_dir" 118fi 119 120if test -z "$x_list"; then 121 x_list="$x_target_dir/$res_list" 122fi 123 124if test -z "$x_out"; then 125 x_out="$x_target_dir/$res_out" 126fi 127 128x_script_name=`echo "$x_out" | sed -e 's%^.*/%%'` 129 130# Check for a list file 131if test ! -f "$x_list"; then 132 echo "Check list file \"$x_list\" not found." 133 exit 1 134fi 135 136# Features detection 137x_features="" 138for f in `ls $x_conf_dir/status/*.enabled | sort -df`; do 139 f=`echo $f | sed 's|^.*/status/\(.*\).enabled$|\1|g'` 140 x_features="$x_features$f " 141done 142 143 144#echo ---------------------------------------------------------------------- 145#echo "Imported project :" $x_import_prj 146#echo "C++ root dir :" $x_root_dir 147#echo "Configuration dir :" $x_conf_dir 148#echo "Build dir :" $x_build_dir 149#echo "Compile dir :" $x_compile_dir 150#echo "Target dir :" $x_target_dir 151#echo "Check script :" $x_out 152#echo ---------------------------------------------------------------------- 153 154#////////////////////////////////////////////////////////////////////////// 155 156cat > $x_out <<EOF 157#! /bin/sh 158 159root_dir="$x_root_dir" 160build_dir="$x_build_dir" 161conf_dir="$x_conf_dir" 162compile_dir="$x_compile_dir" 163bin_dir="$x_bin_dir" 164script_dir="$x_scripts_dir" 165script="$x_out" 166cygwin=$cygwin 167signature="$x_signature" 168sendmail='' 169domain='@ncbi.nlm.nih.gov' 170 171res_journal="\$script.journal" 172res_log="\$script.log" 173res_list="$x_list" 174res_concat="\$script.out" 175res_concat_err="\$script.out_err" 176 177# Define both senses to accommodate shells lacking ! 178is_run=false 179no_run=true 180is_report_err=false 181no_report_err=true 182 183 184# Include COMMON.SH 185. \${script_dir}/common/common.sh 186 187 188# Printout USAGE info and exit 189 190Usage() { 191 cat <<EOF_usage 192 193USAGE: $x_script_name {run | clean | concat | concat_err} 194 195 run Run the tests. Create output file ("*.test_out") for each test, 196 plus journal and log files. 197 clean Remove all files created during the last "run" and this script 198 itself. 199 concat Concatenate all files created during the last "run" into one big 200 file "\$res_log". 201 concat_err Like previous. But into the file "\$res_concat_err" 202 will be added outputs of failed tests only. 203 204ERROR: \$1 205EOF_usage 206# Undocumented commands: 207# report_err Report failed tests directly to developers. 208 209 exit 1 210} 211 212if test \$# -ne 1; then 213 Usage "Invalid number of arguments." 214fi 215 216 217# Action 218 219method="\$1" 220 221case "\$method" in 222#---------------------------------------------------------- 223 run ) 224 is_run=true 225 no_run=false 226 # See RunTest() below 227 ;; 228#---------------------------------------------------------- 229 clean ) 230 x_files=\`cat \$res_journal | sed -e 's/ /%gj_s4%/g'\` 231 for x_file in \$x_files; do 232 x_file=\`echo "\$x_file" | sed -e 's/%gj_s4%/ /g'\` 233 rm -f \$x_file > /dev/null 234 done 235 rm -f \$res_journal \$res_log \$res_list \$res_concat \$res_concat_err > /dev/null 236 rm -f \$script > /dev/null 237 exit 0 238 ;; 239#---------------------------------------------------------- 240 concat ) 241 rm -f "\$res_concat" 242 ( 243 cat \$res_log 244 x_files=\`cat \$res_journal | sed -e 's/ /%gj_s4%/g'\` 245 for x_file in \$x_files; do 246 x_file=\`echo "\$x_file" | sed -e 's/%gj_s4%/ /g'\` 247 echo 248 echo 249 cat \$x_file 250 done 251 ) >> \$res_concat 252 exit 0 253 ;; 254#---------------------------------------------------------- 255 concat_err ) 256 rm -f "\$res_concat_err" 257 ( 258 cat \$res_log | egrep 'ERR \[|TO -' 259 x_files=\`cat \$res_journal | sed -e 's/ /%gj_s4%/g'\` 260 for x_file in \$x_files; do 261 x_file=\`echo "\$x_file" | sed -e 's/%gj_s4%/ /g'\` 262 x_code=\`cat \$x_file | grep -c '@@@ EXIT CODE:'\` 263 test \$x_code -ne 0 || continue 264 x_good=\`cat \$x_file | grep -c '@@@ EXIT CODE: 0'\` 265 if test \$x_good -ne 1; then 266 echo 267 echo 268 cat \$x_file 269 fi 270 done 271 ) >> \$res_concat_err 272 exit 0 273 ;; 274#---------------------------------------------------------- 275 report_err ) 276 # This method works inside NCBI only 277 test "\$NCBI_CHECK_MAILTO_AUTHORS." = 'Y.' || exit 0; 278 if test -x /usr/sbin/sendmail; then 279 sendmail="/usr/sbin/sendmail -oi" 280 elif test -x /usr/lib/sendmail; then 281 sendmail="/usr/lib/sendmail -oi" 282 else 283 echo sendmail not found on this platform 284 exit 0 285 fi 286 is_report_err=true 287 no_report_err=false 288 # See RunTest() below 289 ;; 290#---------------------------------------------------------- 291 * ) 292 Usage "Invalid method name \$method." 293 ;; 294esac 295 296 297#////////////////////////////////////////////////////////////////////////// 298 299 300trap "touch $x_target_dir/check.failed; exit 1" 1 2 15 301rm $x_target_dir/check.failed $x_target_dir/check.success > /dev/null 2>&1 302 303# Set log_site for tests 304NCBI_APPLOG_SITE=testcxx 305export NCBI_APPLOG_SITE 306 307# Include configuration file 308. \${build_dir}/check.cfg 309if test -z "\$NCBI_CHECK_TOOLS"; then 310 NCBI_CHECK_TOOLS="regular" 311fi 312# Check timeout multiplier (increase default check timeout in x times) 313if test -z "\$NCBI_CHECK_TIMEOUT_MULT"; then 314 NCBI_CHECK_TIMEOUT_MULT=1 315fi 316 317# Path to test data, used by some scripts and applications 318if test -z "\$NCBI_TEST_DATA"; then 319 if [ \$cygwin = true ]; then 320 NCBI_TEST_DATA=//snowman/win-coremake/Scripts/test_data 321 elif test -d /Volumes/ncbiapdata/test_data; then 322 NCBI_TEST_DATA=/Volumes/ncbiapdata/test_data 323 else 324 NCBI_TEST_DATA=/am/ncbiapdata/test_data 325 fi 326 export NCBI_TEST_DATA 327fi 328# Add synonym for it, see: include/common/test_data_path.h (CXX-9239) 329if test -z "\$NCBI_TEST_DATA_PATH"; then 330 NCBI_TEST_DATA_PATH=\$NCBI_TEST_DATA 331 export NCBI_TEST_DATA_PATH 332fi 333 334# Valgrind/Helgrind configurations 335VALGRIND_SUP="\${script_dir}/common/check/valgrind.supp" 336VALGRIND_CMD="--tool=memcheck --suppressions=\$VALGRIND_SUP" 337HELGRIND_CMD="--tool=helgrind --suppressions=\$VALGRIND_SUP" 338if (valgrind --ncbi --help) >/dev/null 2>&1; then 339 VALGRIND_CMD="--ncbi \$VALGRIND_CMD" # --ncbi must be the first option! 340 HELGRIND_CMD="--ncbi \$HELGRIND_CMD" # --ncbi must be the first option! 341fi 342 343# Leak- and Thread- Sanitizers (GCC 7.3, -fsanitize= flags) 344LSAN_OPTIONS="suppressions=\${script_dir}/common/check/lsan.supp:exitcode=0" 345export LSAN_OPTIONS 346TSAN_OPTIONS="suppressions=\${script_dir}/common/check/tsan.supp" 347export TSAN_OPTIONS 348# There also ASAN_OPTIONS, that we don't use right now 349# https://github.com/google/sanitizers/wiki/AddressSanitizerFlags 350 351# Disable BOOST tests to catch asynchronous system failures 352# (signals on *NIX platforms or structured exceptions on Windows) 353BOOST_TEST_CATCH_SYSTEM_ERRORS=no 354export BOOST_TEST_CATCH_SYSTEM_ERRORS 355 356BOOST_TEST_COLOR_OUTPUT=no 357export BOOST_TEST_COLOR_OUTPUT 358 359# Export some global vars 360top_srcdir="\$root_dir" 361export top_srcdir 362FEATURES="$x_features" 363export FEATURES 364 365# Redirect output for C++ diagnostic framework to stderr, 366# except if using under 'export_project' tool. 367if test -z "\$NCBI_EXPORT_PROJECT"; then 368 NCBI_CONFIG__LOG__FILE="-" 369 export NCBI_CONFIG__LOG__FILE 370fi 371 372# Add additional necessary directories to PATH: current, build, scripts, utility and $HOME/bin (for Ubuntu). 373PATH="\${script_dir}/common/impl:\$NCBI/bin/_production/CPPCORE:\$HOME/bin:.:\${build_dir}:\${bin_dir}:\${PATH}" 374export PATH 375 376# Export bin and lib pathes 377CFG_BIN="\${conf_dir}/bin" 378CFG_LIB="\${conf_dir}/lib" 379export CFG_BIN CFG_LIB 380 381# Define time-guard script to run tests from other scripts 382check_exec="\${script_dir}/common/check/check_exec.sh" 383CHECK_EXEC="\${script_dir}/common/check/check_exec_test.sh" 384CHECK_EXEC_STDIN="\$CHECK_EXEC -stdin" 385CHECK_SIGNATURE="\$signature" 386export CHECK_EXEC 387export CHECK_EXEC_STDIN 388export CHECK_SIGNATURE 389 390# Debug tools to get stack/back trace (except running under memory checkers) 391NCBI_CHECK_STACK_TRACE='' 392NCBI_CHECK_BACK_TRACE='' 393if test "\$NCBI_CHECK_TOOLS" = "regular"; then 394 if (which gdb) >/dev/null 2>&1; then 395 NCBI_CHECK_BACK_TRACE='gdb --batch --quiet -ex "thread apply all bt" -ex "quit"' 396 fi 397 if (which gstack) >/dev/null 2>&1; then 398 NCBI_CHECK_STACK_TRACE='gstack' 399 fi 400 export NCBI_CHECK_BACK_TRACE 401 export NCBI_CHECK_STACK_TRACE 402fi 403 404# Use AppLog-style output format in the testsuite by default 405if test -z "\$DIAG_OLD_POST_FORMAT"; then 406 DIAG_OLD_POST_FORMAT=false 407 export DIAG_OLD_POST_FORMAT 408fi 409 410# Avoid possible hangs on Mac OS X. 411DYLD_BIND_AT_LAUNCH=1 412export DYLD_BIND_AT_LAUNCH 413 414case " \$FEATURES " in 415 *\ MaxDebug\ * ) 416 case "\$signature" in 417 *-linux* ) MALLOC_DEBUG_=2; export MALLOC_DEBUG_ ;; 418 esac 419 case "\$signature" in 420 GCC* | ICC* ) NCBI_CHECK_TIMEOUT_MULT=20 ;; 421 esac 422 ;; 423esac 424 425# Check on linkerd and set backup 426echo test | nc -w 1 linkerd 4142 > /dev/null 2>&1 427if test \$? -ne 0; then 428 NCBI_CONFIG__ID2SNP__PTIS_NAME="pool.linkerd-proxy.service.bethesda-dev.consul.ncbi.nlm.nih.gov:4142" 429 export NCBI_CONFIG__ID2SNP__PTIS_NAME 430fi 431 432 433EOF 434 435if test -n "$x_conf_dir" -a -d "$x_conf_dir/lib"; then 436 cat >> $x_out <<EOF 437# Add a library path for running tests 438. \${script_dir}/common/common.sh 439COMMON_AddRunpath "\$conf_dir/lib" 440EOF 441else 442 echo "WARNING: Cannot find path to the library dir." 443fi 444# Add additional path for imported projects to point to local /lib first 445if test "$x_import_prj" = "yes"; then 446 local_lib=`(cd "$x_compile_dir/../lib"; pwd | sed -e 's/\/$//g')` 447 if test -n "$local_lib" -a -d "$local_lib"; then 448 cat >> $x_out <<EOF 449COMMON_AddRunpath "$local_lib" 450EOF 451 fi 452fi 453 454 455#////////////////////////////////////////////////////////////////////////// 456 457cat >> $x_out <<EOF 458 459# Check for automated build 460is_automated=false 461is_db_load=false 462if test -n "\$NCBI_AUTOMATED_BUILD"; then 463 is_automated=true 464 if test -n "\$NCBI_CHECK_DB_LOAD"; then 465 is_db_load=true 466 fi 467fi 468 469# Check for some executables 470have_ncbi_applog=false 471if (ncbi_applog generate) >/dev/null 2>&1; then 472 have_ncbi_applog=true 473fi 474have_uptime=false 475if (which uptime) >/dev/null 2>&1; then 476 have_uptime=true 477fi 478 479 480#////////////////////////////////////////////////////////////////////////// 481 482 483# Run 484count_ok=0 485count_err=0 486count_timeout=0 487count_absent=0 488count_total=0 489 490if \$is_run; then 491 rm -f "\$res_journal" 492 rm -f "\$res_log" 493 #rm -f "$x_build_dir/test_stat_load.log" 494fi 495 496# Set app limits: 497# Only if $NCBI_CHECK_SETLIMITS not set to 0 before, or not configured with -with-max-debug. 498# Some tools that use this configure flag, like AddressSanitizer, can fail if limited. 499 500is_max_debug=false 501if test -f "$x_conf_dir/status/MaxDebug.enabled"; then 502 is_max_debug=true 503fi 504if test "\$NCBI_CHECK_SETLIMITS" != "0" -a ! \$is_max_debug; then 505 ulimit -c 1000000 506 ulimit -n 8192 507 if [ \$cygwin = false ]; then 508 if test "\$NCBI_CHECK_TOOLS" = "regular"; then 509 ulimit -v 48000000 510 else 511 # Increase memory limits if run under check tool 512 ulimit -v 64000000 513 fi 514 fi 515fi 516 517 518# Run one test 519 520RunTest() 521{ 522 # Parameters 523 x_work_dir_tail="\$1" 524 x_work_dir="\$compile_dir/\$x_work_dir_tail" 525 x_test="\$2" 526 x_app="\$3" 527 x_run="\${4:-\$x_app}" 528 x_alias="\$5" 529 x_name="\${5:-\$x_run}" 530 x_ext="\$6" 531 x_timeout="\$7" 532 x_authors="\$8" 533 534 if test -f "/etc/nologin"; then 535 echo "Nologin detected, probably host going to reboot. Skipping test:" \$x_name 536 return 0 537 fi 538 if \$is_report_err; then 539 # Authors are not defined for this test 540 test -z "\$x_authors" && return 0 541 fi 542 543 count_total=\`expr \$count_total + 1\` 544 x_log="$x_tmp/\$\$.out\$count_total" 545 546 547 # Run test under all specified check tools 548 for tool in \$NCBI_CHECK_TOOLS; do 549 550 saved_phid='' 551 552 tool_lo=\`echo \$tool | tr '[A-Z]' '[a-z]'\` 553 tool_up=\`echo \$tool | tr '[a-z]' '[A-Z]'\` 554 555 case "\$tool_lo" in 556 regular | valgrind | helgrind ) ;; 557 * ) continue ;; 558 esac 559 560 x_cmd="[\$x_work_dir_tail] \$x_name" 561 if test \$tool_lo = "regular"; then 562 #x_cmd="[\$x_work_dir_tail] \$x_name" 563 x_test_out="\$x_work_dir/\$x_test.test_out\$x_ext" 564 x_test_rep="\$x_work_dir/\$x_test.test_rep\$x_ext" 565 x_boost_rep="\$x_work_dir/\$x_test.boost_rep\$x_ext" 566 else 567 #x_cmd="[\$x_work_dir_tail] \$tool_up \$x_name" 568 x_test_out="\$x_work_dir/\$x_test.test_out\$x_ext.\$tool_lo" 569 x_test_rep="\$x_work_dir/\$x_test.test_rep\$x_ext.\$tool_lo" 570 x_boost_rep="\$x_work_dir/\$x_test.boost_rep\$x_ext.\$tool_lo" 571 fi 572 573 574 if \$is_run && \$is_automated; then 575 echo "\$signature \$NCBI_CHECK_OS_NAME" > "\$x_test_rep" 576 echo "\$x_work_dir_tail" >> "\$x_test_rep" 577 echo "\$x_run" >> "\$x_test_rep" 578 echo "\$x_alias" >> "\$x_test_rep" 579 NCBI_BOOST_REPORT_FILE="\$x_boost_rep" 580 export NCBI_BOOST_REPORT_FILE 581 fi 582 583 # Check existence of the test's application directory 584 if test -d "\$x_work_dir"; then 585 586 # Goto the test's directory 587 cd "\$x_work_dir" 588 589 # Run test if it exist 590 if test -f "\$x_app" -o -f "\$bin_dir/\$x_app"; then 591 592 _RLD_ARGS="-log \$x_log" 593 export _RLD_ARGS 594 595 # Fix empty parameters (replace "" to \"\", '' to \'\') 596 x_run_fix=\`echo "\$x_run" | sed -e 's/""/\\\\\\\\\\"\\\\\\\\\\"/g' -e "s/''/\\\\\\\\\\'\\\\\\\\\\'/g"\` 597 598 # Define check tool variables 599 NCBI_CHECK_TOOL=\`eval echo "\$"NCBI_CHECK_\${tool_up}""\` 600 case "\$tool_lo" in 601 regular ) ;; 602 valgrind | helgrind ) 603 if test "\$tool_lo" = "valgrind"; then 604 NCBI_CHECK_TOOL="\$NCBI_CHECK_VALGRIND \$VALGRIND_CMD" 605 else 606 NCBI_CHECK_TOOL="\$NCBI_CHECK_VALGRIND \$HELGRIND_CMD" 607 fi 608 NCBI_CHECK_TIMEOUT_MULT=15 609 NCBI_RUN_UNDER_VALGRIND="yes" 610 export NCBI_RUN_UNDER_VALGRIND 611 NCBI_RUN_UNDER_CHECK_TOOL="yes" 612 export NCBI_RUN_UNDER_CHECK_TOOL 613 ;; 614 esac 615 export NCBI_CHECK_TOOL 616 CHECK_TIMEOUT=\`expr \$x_timeout \* \$NCBI_CHECK_TIMEOUT_MULT\` 617 export CHECK_TIMEOUT 618 619 # Just need to report errors to authors? 620 if \$is_report_err; then 621 test -f "\$x_test_out" || continue 622 x_code=\`cat \$x_test_out | grep -c '@@@ EXIT CODE:'\` 623 test \$x_code -ne 0 || continue 624 x_good=\`cat \$x_test_out | grep -c '@@@ EXIT CODE: 0'\` 625 if test \$x_good -eq 1; then 626 continue 627 fi 628 MailToAuthors "\$x_authors" "\$x_test_out" 629 continue 630 fi 631 632 echo \$x_run | grep '\.sh' > /dev/null 2>&1 633 if test \$? -eq 0; then 634 # Run script without any check tools. 635 # It will be applied inside script using \$CHECK_EXEC. 636 xx_run="\$x_run_fix" 637 else 638 # Run under check tool 639 xx_run="\$NCBI_CHECK_TOOL \$x_run_fix" 640 fi 641 642 # Write header to output file 643 echo "\$x_test_out" >> \$res_journal 644 ( 645 echo "======================================================================" 646 echo "\$x_name" 647 echo "======================================================================" 648 echo 649 if test "\$x_run" != "\$x_name"; then 650 echo "Command line: \$x_run" 651 echo 652 fi 653 if test -n "\$NCBI_CHECK_ENV_SETTINGS"; then 654 echo "NCBI_CHECK_ENV_SETTINGS:" 655 for env in \$NCBI_CHECK_ENV_SETTINGS; do 656 echo " \$env = \`eval echo '$'\$env\`" 657 done 658 echo 659 fi 660 ) > \$x_test_out 2>&1 661 662 # Remove old core file if any 663 corefile="\$x_work_dir/core" 664 rm -f "\$corefile" > /dev/null 2>&1 665 rm -f check_exec.pid > /dev/null 2>&1 666 667 # Generate PHID and SID, to use it by any application in the current test, 668 # and for loading test statistics later (test_stat_load -> ncbi_applog), 669 # to have same values in Applog. 670 logfile=\$NCBI_CONFIG__LOG__FILE 671 NCBI_CONFIG__LOG__FILE= 672 export NCBI_CONFIG__LOG__FILE 673 if \$have_ncbi_applog; then 674 eval "\`ncbi_applog generate -phid -sid -format=shell-export | tr -d '\r'\`" 675 if \$is_run && \$is_db_load; then 676 # Use generated PHID for test statistics, and sub-PHID.1 for test itself 677 saved_phid=\$NCBI_LOG_HIT_ID 678 NCBI_LOG_HIT_ID=\$NCBI_LOG_HIT_ID.1 679 export NCBI_LOG_HIT_ID 680 # Create a file in the cirrent directory with initial sub-PHID 681 # (will be incremented by $CHECK_EXEC, if any) 682 echo "0" > \$NCBI_LOG_HIT_ID 683 fi 684 fi 685 NCBI_CONFIG__LOG__FILE=\$logfile 686 export NCBI_CONFIG__LOG__FILE 687 688 # Run check 689 start_time="\`date +'$x_date_format'\`" 690 691 # Use separate shell to run test. 692 # This will allow to know execution time for applications with timeout. 693 # Also, process guard works better if used after "time -p". 694 launch_sh="/var/tmp/launch.\$\$.sh" 695cat > \$launch_sh <<EOF_launch 696#! /bin/sh 697exec time -p \$check_exec \`eval echo \$xx_run\` 698EOF_launch 699 chmod a+x \$launch_sh 700 \$launch_sh >\$x_log 2>&1 701 result=\$? 702 stop_time="\`date +'$x_date_format'\`" 703 if \${have_uptime}; then 704 load_avg="\`uptime | sed -e 's/.*averages*: *\(.*\) *$/\1/' -e 's/[, ][, ]*/ /g'\`" 705 else 706 load_avg="unavailable" 707 fi 708 rm \$launch_sh 709 710 LC_ALL=C sed -e '/ ["][$][@]["].*\$/ { 711 s/^.*: // 712 s/ ["][$][@]["].*$// 713 }' \$x_log >> \$x_test_out 714 715 # RunID 716 runpid='?' 717 test -f check_exec.pid && runpid="\`cat check_exec.pid\`" 718 runid="\`date -u +%y%m%d%H%M%S\`-\$runpid-\`uname -n\`" 719 runid="\`echo \$runid | tr -d '\n\r'\`" 720 rm -f check_exec.pid > /dev/null 2>&1 721 722 # Get application execution time 723 exec_time=\`\$build_dir/sysdep.sh tl 7 \$x_log | tr '\n\r' '%%' | tr -d '\000-\037' | tr -d '\176-\377'\` 724 echo \$exec_time | egrep 'real [0-9]|Maximum execution .* is exceeded' > /dev/null 2>&1 725 if test \$? -eq 0; then 726 exec_time=\`echo \$exec_time | \\ 727 sed -e 's/%%/%/g' \\ 728 -e 's/%$//' \\ 729 -e 's/%/, /g' \\ 730 -e 's/[ ] */ /g' \\ 731 -e 's/^.*\(Maximum execution [0-9][0-9]* is exceeded\).*$/\1/' \\ 732 -e 's/^.*\(real [0-9][0-9]*[.][0-9][0-9]*\)/\1/' \\ 733 -e 's/\(sys [0-9][0-9]*[.][0-9][0-9]*\).*/\1/'\` 734 else 735 exec_time='unparsable timing stats' 736 fi 737 738 rm -f \$x_log 739 740 # Analize check tool output 741 case "\$tool_lo" in 742 valgrind | helgrind ) 743 summary_all=\`grep -c 'ERROR SUMMARY:' \$x_test_out\` 744 summary_ok=\`grep -c 'ERROR SUMMARY: 0 ' \$x_test_out\` 745 # The number of given lines can be zero. 746 # In some cases we can lost valgrind's summary. 747 if test \$summary_all -ne \$summary_ok; then 748 result=254 749 fi 750 ;; 751 * ) 752 # GCC Sanitizer can fails with a 0 exit code 753 if \$is_max_debug; then 754 grep '==ERROR: AddressSanitizer:' \$x_test_out > /dev/null 2>&1 755 if test \$? -eq 0; then 756 result=253 757 fi 758 fi 759 760 esac 761 762 # Write result of the test into the his output file 763 echo "Start time : \$start_time" >> \$x_test_out 764 echo "Stop time : \$stop_time" >> \$x_test_out 765 echo "Load averages: \$load_avg" >> \$x_test_out 766 echo >> \$x_test_out 767 echo "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@" >> \$x_test_out 768 echo "@@@ EXIT CODE: \$result" >> \$x_test_out 769 770 if test -f "\$corefile"; then 771 echo "@@@ CORE DUMPED" >> \$x_test_out 772 if test -d "\$bin_dir" -a -f "\$bin_dir/\$x_test"; then 773 mv "\$corefile" "\$bin_dir/\$x_test.core" 774 else 775 rm -f "\$corefile" 776 fi 777 fi 778 779 # Write result also on the screen and into the log 780 if grep NCBI_UNITTEST_DISABLED \$x_test_out >/dev/null; then 781 echo "DIS -- \$x_cmd" 782 echo "DIS -- \$x_cmd" >> \$res_log 783 count_absent=\`expr \$count_absent + 1\` 784 \$is_automated && echo "DIS" >> "\$x_test_rep" 785 786 elif grep NCBI_UNITTEST_SKIPPED \$x_test_out >/dev/null; then 787 echo "SKP -- \$x_cmd" 788 echo "SKP -- \$x_cmd" >> \$res_log 789 count_absent=\`expr \$count_absent + 1\` 790 \$is_automated && echo "SKP" >> "\$x_test_rep" 791 792 elif grep NCBI_UNITTEST_TIMEOUTS_BUT_NO_ERRORS \$x_test_out >/dev/null; then 793 echo "TO -- \$x_cmd" 794 echo "TO -- \$x_cmd" >> \$res_log 795 count_timeout=\`expr \$count_timeout + 1\` 796 \$is_automated && echo "TO" >> "\$x_test_rep" 797 798 elif echo "\$exec_time" | egrep 'Maximum execution .* is exceeded' >/dev/null || egrep "Maximum execution .* is exceeded" \$x_test_out >/dev/null; then 799 echo "TO -- \$x_cmd (\$exec_time)" 800 echo "TO -- \$x_cmd (\$exec_time)" >> \$res_log 801 count_timeout=\`expr \$count_timeout + 1\` 802 \$is_automated && echo "TO" >> "\$x_test_rep" 803 804 elif test \$result -eq 0; then 805 echo "OK -- \$x_cmd (\$exec_time)" 806 echo "OK -- \$x_cmd (\$exec_time)" >> \$res_log 807 count_ok=\`expr \$count_ok + 1\` 808 \$is_automated && echo "OK" >> "\$x_test_rep" 809 810 else 811 echo "ERR [\$result] -- \$x_cmd (\$exec_time)" 812 echo "ERR [\$result] -- \$x_cmd (\$exec_time)" >> \$res_log 813 count_err=\`expr \$count_err + 1\` 814 \$is_automated && echo "ERR" >> "\$x_test_rep" 815 fi 816 817 if \$is_automated; then 818 echo "\$start_time" >> "\$x_test_rep" 819 echo "\$result" >> "\$x_test_rep" 820 echo "\$exec_time" >> "\$x_test_rep" 821 echo "\$x_authors" >> "\$x_test_rep" 822 echo "\$load_avg" >> "\$x_test_rep" 823 echo "\$runid" >> "\$x_test_rep" 824 fi 825 826 else # Run test if it exist 827 if \$is_run; then 828 echo "ABS -- \$x_cmd" 829 echo "ABS -- \$x_cmd" >> \$res_log 830 count_absent=\`expr \$count_absent + 1\` 831 832 if \$is_automated; then 833 echo "ABS" >> "\$x_test_rep" 834 echo "\`date +'$x_date_format'\`" >> "\$x_test_rep" 835 echo "\$x_authors" >> "\$x_test_rep" 836 fi 837 fi 838 fi 839 840 else # Check existence of the test's application directory 841 if \$is_run; then 842 # Test application is absent 843 echo "ABS -- \$x_work_dir - \$x_test" 844 echo "ABS -- \$x_work_dir - \$x_test" >> \$res_log 845 count_absent=\`expr \$count_absent + 1\` 846 847 if \$is_automated; then 848 echo "ABS" >> "\$x_test_rep" 849 echo "\`date +'$x_date_format'\`" >> "\$x_test_rep" 850 echo "\$x_authors" >> "\$x_test_rep" 851 fi 852 fi 853 fi 854 855 # Load test results to Database and Applog immediately after a test. 856 # Always load test results for automated builds on a 'run' command. 857 858 if \$is_run && \$is_db_load; then 859 if test -n "\$saved_phid"; then 860 NCBI_LOG_HIT_ID=\$saved_phid 861 export NCBI_LOG_HIT_ID 862 fi 863 case \`uname -s\` in 864 CYGWIN* ) 865 test_stat_load "\$(cygpath -w "\$x_test_rep")" "\$(cygpath -w "\$x_test_out")" "\$(cygpath -w "\$x_boost_rep")" "\$(cygpath -w "\$top_srcdir/build_info")" >> "$x_build_dir/test_stat_load.log" 2>&1 ;; 866 IRIX* ) 867 test_stat_load.sh "\$x_test_rep" "\$x_test_out" "\$x_boost_rep" "\$top_srcdir/build_info" >> "$x_build_dir/test_stat_load.log" 2>&1 ;; 868 * ) 869 test_stat_load "\$x_test_rep" "\$x_test_out" "\$x_boost_rep" "\$top_srcdir/build_info" >> "$x_build_dir/test_stat_load.log" 2>&1 ;; 870 esac 871 echo >> "$x_build_dir/test_stat_load.log" 2>&1 872 fi 873 if test \$is_run -a -n "\$saved_phid"; then 874 rm -f \$saved_phid* > /dev/null 2>&1 875 fi 876 877 done # Run test under all specified check tools 878} 879 880MailToAuthors() 881{ 882 # The limit on the sending email size in Kbytes 883 mail_limit=1024 884 tmp="./check_mailtoauthors.tmp.\$\$" 885 886 test -z "\$sendmail" && return 0 887 test -z "\$1" && return 0 888 x_authors="" 889 for author in \$1; do 890 x_authors="\$x_authors \$author\$domain" 891 done 892 x_logfile="\$2" 893 894 echo '-----------------------' 895 echo "Send results of the test \$x_app to \$x_authors" 896 echo '-----------------------' 897 echo "To: \$x_authors" 898 echo "Subject: [WATCHERS] \$x_app | \$signature" 899 echo 900 echo \$x_cmd 901 echo 902 echo "cmd = \$sendmail \$x_authors" 903 904 COMMON_LimitTextFileSize \$x_logfile \$tmp \$mail_limit 905 { 906 echo "To: \$x_authors" 907 echo "Subject: [WATCHERS] \$x_app | \$signature" 908 echo 909 echo \$x_cmd 910 echo 911 cat \$tmp 912 echo 913 cat \$top_srcdir/build_info 914 } | \$sendmail \$x_authors 915 echo '-----------------------' 916 rm -f \$tmp > /dev/null 917} 918 919EOF 920 921#////////////////////////////////////////////////////////////////////////// 922 923 924# Read list with tests 925x_tests=`cat "$x_list" | sed -e 's/ /%gj_s4%/g'` 926x_test_prev="" 927 928# For all tests 929for x_row in $x_tests; do 930 # Get one row from list 931 x_row=`echo "$x_row" | sed -e 's/%gj_s4%/ /g' -e 's/^ *//' -e 's/ ____ /~/g'` 932 933 # Split it to parts 934 x_src_dir="$x_root_dir/src/`echo \"$x_row\" | sed -e 's/~.*$//'`" 935 x_test=`echo "$x_row" | sed -e 's/^[^~]*~//' -e 's/~.*$//'` 936 x_app=`echo "$x_row" | sed -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/~.*$//'` 937 x_cmd=`echo "$x_row" | sed -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/~.*$//'` 938 x_name=`echo "$x_row" | sed -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/~.*$//'` 939 x_files=`echo "$x_row" | sed -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/~.*$//'` 940 x_timeout=`echo "$x_row" | sed -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/~.*$//'` 941 x_requires=" `echo "$x_row" | sed -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/~.*$//'` " 942 x_authors=`echo "$x_row" | sed -e 's/.*~//'` 943 944 # Default timeout 945 test -z "$x_timeout" && x_timeout=$NCBI_CHECK_TIMEOUT_DEFAULT 946 947 # Application base build directory 948 x_work_dir_tail="`echo \"$x_row\" | sed -e 's/~.*$//'`" 949 x_work_dir="$x_compile_dir/$x_work_dir_tail" 950 951 # Check application requirements ($CHECK_REQUIRES) 952 for x_req in $x_requires; do 953 if test ! -f "$x_conf_dir/status/$x_req.enabled" ; then 954 echo "SKIP -- $x_work_dir_tail/$x_app (unmet CHECK_REQUIRES)" 955 continue 2 956 fi 957 done 958 959 # Copy specified files to the build directory 960 961 if test "$x_import_prj" = "no"; then 962 # Automatically copy .ini file if exists 963 x_copy="$x_src_dir/$x_app.ini" 964 test -f $x_copy && cp -pf "$x_copy" "$x_work_dir" 965 # Copy specified CHECK_COPY files/dirs 966 if test ! -z "$x_files"; then 967 for i in $x_files ; do 968 x_copy="$x_src_dir/$i" 969 if test -f "$x_copy" -o -d "$x_copy"; then 970 cp -prf "$x_copy" "$x_work_dir" 971 test -d "$x_work_dir/$i" && find "$x_work_dir/$i" -name .svn -print | xargs rm -rf 972 else 973 echo "Warning: The copied object \"$x_copy\" should be a file or directory!" 974 continue 1 975 fi 976 done 977 fi 978 fi 979 980 # Generate extension for tests output file 981 if test "$x_test" != "$x_test_prev"; then 982 x_cnt=1 983 x_test_ext="" 984 else 985 x_cnt=`expr $x_cnt + 1` 986 x_test_ext="$x_cnt" 987 fi 988 x_test_prev="$x_test" 989 990#////////////////////////////////////////////////////////////////////////// 991 992 # Write test commands for current test into a shell script file 993 cat >> $x_out <<EOF 994###################################################################### 995RunTest "$x_work_dir_tail" \\ 996 "$x_test" \\ 997 "$x_app" \\ 998 "$x_cmd" \\ 999 "$x_name" \\ 1000 "$x_test_ext" \\ 1001 "$x_timeout" \\ 1002 "$x_authors" 1003EOF 1004 1005#////////////////////////////////////////////////////////////////////////// 1006 1007done # for x_row in x_tests 1008 1009 1010# Write ending code into the script 1011cat >> $x_out <<EOF 1012 1013if \$is_run; then 1014 # Write result of the tests execution 1015 echo 1016 echo "Succeeded : \$count_ok" 1017 echo "Timeout : \$count_timeout" 1018 echo "Failed : \$count_err" 1019 echo "Absent : \$count_absent" 1020 echo 1021 if test \$count_err -eq 0; then 1022 echo 1023 echo "******** ALL TESTS COMPLETED SUCCESSFULLY ********" 1024 echo 1025 fi 1026fi 1027 1028if test \$count_err -eq 0; then 1029 touch $x_target_dir/check.success 1030else 1031 touch $x_target_dir/check.failed 1032fi 1033 1034exit \$count_err 1035EOF 1036 1037# Set execute mode to script 1038chmod a+x "$x_out" 1039 1040exit 0 1041