1#! /bin/sh
2
3# $Id: check_make_unix.sh 630684 2021-05-06 15:49:59Z ivanov $
4# Author:  Vladimir Ivanov, NCBI
5#
6###########################################################################
7#
8# Compile a check script and copy necessary files to run tests in the
9# UNIX build tree.
10#
11# Usage:
12#    check_make_unix.sh <test_list> <signature> <build_dir> <top_srcdir> <target_dir> <check_script>
13#
14#    test_list       - a list of tests (it build with "make check_r")
15#                      (default: "<build_dir>/check.sh.list")
16#    signature       - build signature
17#    build_dir       - path to UNIX build tree like".../build/..."
18#                      (default: will try determine path from current work
19#                      directory -- root of build tree )
20#    top_srcdir      - path to the root src directory
21#                      (default: will try determine path from current work
22#                      directory -- root of build tree )
23#    target_dir      - path where the check script and logs will be created
24#                      (default: current dir)
25#    check_script    - name of the check script (without path).
26#                      (default: "check.sh" / "<target_dir>/check.sh")
27#
28#    If any parameter is skipped that will be used default value for it.
29#
30# Note:
31#    Work with UNIX build tree only (any configuration).
32#
33###########################################################################
34
35# Load configuration options
36x_check_scripts_dir=`dirname "$0"`
37x_scripts_dir=`dirname "$x_check_scripts_dir"`
38x_scripts_dir=`dirname "$x_scripts_dir"`
39. ${x_check_scripts_dir}/check_setup.cfg
40
41
42# Parameters
43
44res_out="check.sh"
45res_list="$res_out.list"
46
47# Fields delimiters in the list
48# (this symbols used directly in the "sed" command)
49x_delim=" ____ "
50x_delim_internal="~"
51x_tmp="/var/tmp"
52
53x_date_format="%m/%d/%Y %H:%M:%S"
54
55x_list=$1
56x_signature=$2
57x_build_dir=$3
58x_top_srcdir=$4
59x_target_dir=$5
60x_out=$6
61
62# Detect Cygwin
63case `uname -s` in
64   CYGWIN* ) cygwin=true  ;;
65   *)        cygwin=false ;;
66esac
67
68# Check for build dir
69if test ! -z "$x_build_dir"; then
70   if test ! -d "$x_build_dir"; then
71      echo "Build directory \"$x_build_dir\" don't exist."
72      exit 1
73   fi
74   x_build_dir=`(cd "$x_build_dir"; pwd | sed -e 's/\/$//g')`
75else
76   # Get build dir name from the current path
77   x_build_dir=`pwd | sed -e 's%/build.*$%%'`
78   if test -d "$x_build_dir/build"; then
79      x_build_dir="$x_build_dir/build"
80   fi
81fi
82
83x_conf_dir=`dirname "$x_build_dir"`
84x_bin_dir=`(cd "$x_build_dir/../bin"; pwd | sed -e 's/\/$//g')`
85
86# Check for top_srcdir
87if test ! -z "$x_top_srcdir"; then
88   if test ! -d "$x_top_srcdir"; then
89      echo "Top source directory \"$x_top_srcdir\" don't exist."
90      exit 1
91   fi
92   x_root_dir=`(cd "$x_top_srcdir"; pwd | sed -e 's/\/$//g')`
93else
94   # Get top src dir name from the script directory
95   x_root_dir=`dirname "$x_scripts_dir"`
96fi
97
98# Check for target dir
99if test ! -z "$x_target_dir"; then
100   if test ! -d "$x_target_dir"; then
101      echo "Target directory \"$x_target_dir\" don't exist."
102      exit 1
103   fi
104    x_target_dir=`(cd "$x_target_dir"; pwd | sed -e 's/\/$//g')`
105else
106   x_target_dir=`pwd`
107fi
108
109# Check for a imported project or intree project
110if test -f Makefile.out ; then
111   x_import_prj="yes"
112   x_import_root=`sed -ne 's/^import_root *= *//p' Makefile`
113   # x_compile_dir="`pwd | sed -e 's%/internal/c++/src.*$%%g'`/internal/c++/src"
114   x_compile_dir=`cd $x_import_root; pwd`
115else
116   x_import_prj="no"
117   x_compile_dir="$x_build_dir"
118fi
119
120if test -z "$x_list"; then
121   x_list="$x_target_dir/$res_list"
122fi
123
124if test -z "$x_out"; then
125   x_out="$x_target_dir/$res_out"
126fi
127
128x_script_name=`echo "$x_out" | sed -e 's%^.*/%%'`
129
130# Check for a list file
131if test ! -f "$x_list"; then
132   echo "Check list file \"$x_list\" not found."
133   exit 1
134fi
135
136# Features detection
137x_features=""
138for f in `ls $x_conf_dir/status/*.enabled | sort -df`; do
139   f=`echo $f | sed 's|^.*/status/\(.*\).enabled$|\1|g'`
140   x_features="$x_features$f "
141done
142
143
144#echo ----------------------------------------------------------------------
145#echo "Imported project  :" $x_import_prj
146#echo "C++ root dir      :" $x_root_dir
147#echo "Configuration dir :" $x_conf_dir
148#echo "Build dir         :" $x_build_dir
149#echo "Compile dir       :" $x_compile_dir
150#echo "Target dir        :" $x_target_dir
151#echo "Check script      :" $x_out
152#echo ----------------------------------------------------------------------
153
154#//////////////////////////////////////////////////////////////////////////
155
156cat > $x_out <<EOF
157#! /bin/sh
158
159root_dir="$x_root_dir"
160build_dir="$x_build_dir"
161conf_dir="$x_conf_dir"
162compile_dir="$x_compile_dir"
163bin_dir="$x_bin_dir"
164script_dir="$x_scripts_dir"
165script="$x_out"
166cygwin=$cygwin
167signature="$x_signature"
168sendmail=''
169domain='@ncbi.nlm.nih.gov'
170
171res_journal="\$script.journal"
172res_log="\$script.log"
173res_list="$x_list"
174res_concat="\$script.out"
175res_concat_err="\$script.out_err"
176
177# Define both senses to accommodate shells lacking !
178is_run=false
179no_run=true
180is_report_err=false
181no_report_err=true
182
183
184# Include COMMON.SH
185. \${script_dir}/common/common.sh
186
187
188# Printout USAGE info and exit
189
190Usage() {
191   cat <<EOF_usage
192
193USAGE:  $x_script_name {run | clean | concat | concat_err}
194
195 run         Run the tests. Create output file ("*.test_out") for each test, 
196             plus journal and log files. 
197 clean       Remove all files created during the last "run" and this script 
198             itself.
199 concat      Concatenate all files created during the last "run" into one big 
200             file "\$res_log".
201 concat_err  Like previous. But into the file "\$res_concat_err" 
202             will be added outputs of failed tests only.
203
204ERROR:  \$1
205EOF_usage
206# Undocumented commands:
207#     report_err  Report failed tests directly to developers.
208
209    exit 1
210}
211
212if test \$# -ne 1; then
213   Usage "Invalid number of arguments."
214fi
215
216
217# Action
218
219method="\$1"
220
221case "\$method" in
222#----------------------------------------------------------
223   run )
224      is_run=true
225      no_run=false
226      # See RunTest() below
227      ;;
228#----------------------------------------------------------
229   clean )
230      x_files=\`cat \$res_journal | sed -e 's/ /%gj_s4%/g'\`
231      for x_file in \$x_files; do
232         x_file=\`echo "\$x_file" | sed -e 's/%gj_s4%/ /g'\`
233         rm -f \$x_file > /dev/null
234      done
235      rm -f \$res_journal \$res_log \$res_list \$res_concat \$res_concat_err > /dev/null
236      rm -f \$script > /dev/null
237      exit 0
238      ;;
239#----------------------------------------------------------
240   concat )
241      rm -f "\$res_concat"
242      ( 
243      cat \$res_log
244      x_files=\`cat \$res_journal | sed -e 's/ /%gj_s4%/g'\`
245      for x_file in \$x_files; do
246         x_file=\`echo "\$x_file" | sed -e 's/%gj_s4%/ /g'\`
247         echo 
248         echo 
249         cat \$x_file
250      done
251      ) >> \$res_concat
252      exit 0
253      ;;
254#----------------------------------------------------------
255   concat_err )
256      rm -f "\$res_concat_err"
257      ( 
258      cat \$res_log | egrep 'ERR \[|TO  -'
259      x_files=\`cat \$res_journal | sed -e 's/ /%gj_s4%/g'\`
260      for x_file in \$x_files; do
261         x_file=\`echo "\$x_file" | sed -e 's/%gj_s4%/ /g'\`
262         x_code=\`cat \$x_file | grep -c '@@@ EXIT CODE:'\`
263         test \$x_code -ne 0 || continue
264         x_good=\`cat \$x_file | grep -c '@@@ EXIT CODE: 0'\`
265         if test \$x_good -ne 1; then
266            echo 
267            echo 
268            cat \$x_file
269         fi
270      done
271      ) >> \$res_concat_err
272      exit 0
273      ;;
274#----------------------------------------------------------
275   report_err )
276      # This method works inside NCBI only 
277      test "\$NCBI_CHECK_MAILTO_AUTHORS." = 'Y.'  ||  exit 0;
278      if test -x /usr/sbin/sendmail; then
279         sendmail="/usr/sbin/sendmail -oi"
280      elif test -x /usr/lib/sendmail; then
281         sendmail="/usr/lib/sendmail -oi"
282      else
283         echo sendmail not found on this platform
284         exit 0
285      fi
286      is_report_err=true
287      no_report_err=false
288      # See RunTest() below
289      ;;
290#----------------------------------------------------------
291   * )
292      Usage "Invalid method name \$method."
293      ;;
294esac
295
296
297#//////////////////////////////////////////////////////////////////////////
298
299
300trap "touch $x_target_dir/check.failed; exit 1"  1 2 15
301rm $x_target_dir/check.failed $x_target_dir/check.success > /dev/null 2>&1 
302
303# Set log_site for tests
304NCBI_APPLOG_SITE=testcxx
305export NCBI_APPLOG_SITE
306
307# Include configuration file
308. \${build_dir}/check.cfg
309if test -z "\$NCBI_CHECK_TOOLS"; then
310   NCBI_CHECK_TOOLS="regular"
311fi
312# Check timeout multiplier (increase default check timeout in x times)
313if test -z "\$NCBI_CHECK_TIMEOUT_MULT"; then
314   NCBI_CHECK_TIMEOUT_MULT=1
315fi
316
317# Path to test data, used by some scripts and applications
318if test -z "\$NCBI_TEST_DATA"; then
319    if [ \$cygwin = true ]; then
320       NCBI_TEST_DATA=//snowman/win-coremake/Scripts/test_data
321    else
322       NCBI_TEST_DATA=/am/ncbiapdata/test_data
323    fi
324    export NCBI_TEST_DATA
325fi
326# Add synonym for it, see: include/common/test_data_path.h (CXX-9239)
327if test -z "\$NCBI_TEST_DATA_PATH"; then
328    NCBI_TEST_DATA_PATH=\$NCBI_TEST_DATA
329    export NCBI_TEST_DATA_PATH
330fi
331
332# Valgrind/Helgrind configurations
333VALGRIND_SUP="\${script_dir}/common/check/valgrind.supp"
334VALGRIND_CMD="--tool=memcheck --suppressions=\$VALGRIND_SUP"
335HELGRIND_CMD="--tool=helgrind --suppressions=\$VALGRIND_SUP"
336if (valgrind --ncbi --help) >/dev/null 2>&1; then
337    VALGRIND_CMD="--ncbi \$VALGRIND_CMD" # --ncbi must be the first option!
338    HELGRIND_CMD="--ncbi \$HELGRIND_CMD" # --ncbi must be the first option!
339fi
340
341# Leak- and Thread- Sanitizers (GCC 7.3, -fsanitize= flags)
342LSAN_OPTIONS="suppressions=\${script_dir}/common/check/lsan.supp:exitcode=0"
343export LSAN_OPTIONS
344TSAN_OPTIONS="suppressions=\${script_dir}/common/check/tsan.supp"
345export TSAN_OPTIONS
346# There also ASAN_OPTIONS, that we don't use right now
347# https://github.com/google/sanitizers/wiki/AddressSanitizerFlags
348
349# Disable BOOST tests to catch asynchronous system failures
350# (signals on *NIX platforms or structured exceptions on Windows)
351BOOST_TEST_CATCH_SYSTEM_ERRORS=no
352export BOOST_TEST_CATCH_SYSTEM_ERRORS
353
354BOOST_TEST_COLOR_OUTPUT=no
355export BOOST_TEST_COLOR_OUTPUT
356
357# Export some global vars
358top_srcdir="\$root_dir"
359export top_srcdir
360FEATURES="$x_features"
361export FEATURES
362
363# Redirect output for C++ diagnostic framework to stderr,
364# except if using under 'export_project' tool.
365if test -z "\$NCBI_EXPORT_PROJECT"; then
366    NCBI_CONFIG__LOG__FILE="-"
367    export NCBI_CONFIG__LOG__FILE
368fi
369
370# Add additional necessary directories to PATH: current, build, scripts, utility and $HOME/bin (for Ubuntu).
371PATH="\${script_dir}/common/impl:\$NCBI/bin/_production/CPPCORE:\$HOME/bin:.:\${build_dir}:\${bin_dir}:\${PATH}"
372export PATH
373
374# Export bin and lib pathes
375CFG_BIN="\${conf_dir}/bin"
376CFG_LIB="\${conf_dir}/lib"
377export CFG_BIN CFG_LIB
378
379# Define time-guard script to run tests from other scripts
380check_exec="\${script_dir}/common/check/check_exec.sh"
381CHECK_EXEC="\${script_dir}/common/check/check_exec_test.sh"
382CHECK_EXEC_STDIN="\$CHECK_EXEC -stdin"
383CHECK_SIGNATURE="\$signature"
384export CHECK_EXEC
385export CHECK_EXEC_STDIN
386export CHECK_SIGNATURE
387
388# Debug tools to get stack/back trace (except running under memory checkers)
389NCBI_CHECK_STACK_TRACE=''
390NCBI_CHECK_BACK_TRACE=''
391if test "\$NCBI_CHECK_TOOLS" = "regular"; then
392   if (which gdb) >/dev/null 2>&1; then
393       NCBI_CHECK_BACK_TRACE='gdb --batch --quiet -ex "thread apply all bt" -ex "quit"'
394   fi
395   if (which gstack) >/dev/null 2>&1; then
396       NCBI_CHECK_STACK_TRACE='gstack'
397   fi
398   export NCBI_CHECK_BACK_TRACE
399   export NCBI_CHECK_STACK_TRACE
400fi
401
402# Use AppLog-style output format in the testsuite by default
403if test -z "\$DIAG_OLD_POST_FORMAT"; then
404    DIAG_OLD_POST_FORMAT=false
405    export DIAG_OLD_POST_FORMAT
406fi
407
408# Avoid possible hangs on Mac OS X.
409DYLD_BIND_AT_LAUNCH=1
410export DYLD_BIND_AT_LAUNCH
411
412case " \$FEATURES " in
413    *\ MaxDebug\ * )
414         case "\$signature" in
415	     *-linux* ) MALLOC_DEBUG_=2; export MALLOC_DEBUG_ ;;
416         esac
417         case "\$signature" in
418             GCC* | ICC* ) NCBI_CHECK_TIMEOUT_MULT=20 ;;
419         esac
420         ;;
421esac
422
423# Check on linkerd and set backup
424echo test | nc -w 1 linkerd 4142 > /dev/null 2>&1
425if test \$? -ne 0;  then
426   NCBI_CONFIG__ID2SNP__PTIS_NAME="pool.linkerd-proxy.service.bethesda-dev.consul.ncbi.nlm.nih.gov:4142"
427   export NCBI_CONFIG__ID2SNP__PTIS_NAME
428fi
429
430
431EOF
432
433if test -n "$x_conf_dir"  -a  -d "$x_conf_dir/lib";  then
434   cat >> $x_out <<EOF
435# Add a library path for running tests
436. \${script_dir}/common/common.sh
437COMMON_AddRunpath "\$conf_dir/lib"
438EOF
439else
440   echo "WARNING:  Cannot find path to the library dir."
441fi
442# Add additional path for imported projects to point to local /lib first
443if test "$x_import_prj" = "yes"; then
444    local_lib=`(cd "$x_compile_dir/../lib"; pwd | sed -e 's/\/$//g')`
445    if test -n "$local_lib"  -a  -d "$local_lib";  then
446   cat >> $x_out <<EOF
447COMMON_AddRunpath "$local_lib"
448EOF
449    fi
450fi
451
452
453#//////////////////////////////////////////////////////////////////////////
454
455cat >> $x_out <<EOF
456
457# Check for automated build
458is_automated=false
459is_db_load=false
460if test -n "\$NCBI_AUTOMATED_BUILD"; then
461   is_automated=true
462   if test -n "\$NCBI_CHECK_DB_LOAD"; then
463      is_db_load=true
464   fi
465fi
466
467# Check for some executables
468have_ncbi_applog=false
469if (ncbi_applog generate) >/dev/null 2>&1; then
470   have_ncbi_applog=true
471fi
472have_uptime=false
473if (which uptime) >/dev/null 2>&1; then
474   have_uptime=true
475fi
476
477
478#//////////////////////////////////////////////////////////////////////////
479
480
481# Run
482count_ok=0
483count_err=0
484count_timeout=0
485count_absent=0
486count_total=0
487
488if \$is_run; then
489   rm -f "\$res_journal"
490   rm -f "\$res_log"
491   #rm -f "$x_build_dir/test_stat_load.log"
492fi
493
494# Set app limits:
495# Only if $NCBI_CHECK_SETLIMITS not set to 0 before, or not configured with -with-max-debug.
496# Some tools that use this configure flag, like AddressSanitizer, can fail if limited.
497
498is_max_debug=false
499if test -f "$x_conf_dir/status/MaxDebug.enabled"; then
500   is_max_debug=true
501fi
502if test "\$NCBI_CHECK_SETLIMITS" != "0"  -a  ! \$is_max_debug; then
503   ulimit -c 1000000
504   ulimit -n 8192
505   if [ \$cygwin = false ]; then
506       if test "\$NCBI_CHECK_TOOLS" = "regular"; then
507          ulimit -v 48000000
508       else
509          # Increase memory limits if run under check tool
510          ulimit -v 64000000
511       fi
512   fi
513fi
514
515
516# Run one test
517
518RunTest()
519{
520    # Parameters
521    x_work_dir_tail="\$1"
522    x_work_dir="\$compile_dir/\$x_work_dir_tail"
523    x_test="\$2"
524    x_app="\$3"
525    x_run="\${4:-\$x_app}"
526    x_alias="\$5"
527    x_name="\${5:-\$x_run}"
528    x_ext="\$6"
529    x_timeout="\$7"
530    x_authors="\$8"
531
532    if test -f "/etc/nologin"; then
533        echo "Nologin detected, probably host going to reboot. Skipping test:" \$x_name
534        return 0
535    fi
536    if \$is_report_err; then
537        # Authors are not defined for this test
538        test -z "\$x_authors"  &&  return 0
539    fi
540
541    count_total=\`expr \$count_total + 1\`
542    x_log="$x_tmp/\$\$.out\$count_total"
543
544
545    # Run test under all specified check tools   
546    for tool in \$NCBI_CHECK_TOOLS; do
547
548        saved_phid=''
549
550        tool_lo=\`echo \$tool | tr '[A-Z]' '[a-z]'\`
551        tool_up=\`echo \$tool | tr '[a-z]' '[A-Z]'\`
552        
553        case "\$tool_lo" in
554            regular | valgrind | helgrind ) ;;
555                             * ) continue ;;
556        esac
557        
558        x_cmd="[\$x_work_dir_tail] \$x_name"
559        if test \$tool_lo = "regular"; then
560           #x_cmd="[\$x_work_dir_tail] \$x_name"
561           x_test_out="\$x_work_dir/\$x_test.test_out\$x_ext"
562           x_test_rep="\$x_work_dir/\$x_test.test_rep\$x_ext"
563           x_boost_rep="\$x_work_dir/\$x_test.boost_rep\$x_ext"
564        else
565           #x_cmd="[\$x_work_dir_tail] \$tool_up \$x_name"
566           x_test_out="\$x_work_dir/\$x_test.test_out\$x_ext.\$tool_lo"
567           x_test_rep="\$x_work_dir/\$x_test.test_rep\$x_ext.\$tool_lo"
568           x_boost_rep="\$x_work_dir/\$x_test.boost_rep\$x_ext.\$tool_lo"
569        fi
570
571   
572        if \$is_run && \$is_automated; then
573           echo "\$signature \$NCBI_CHECK_OS_NAME" > "\$x_test_rep"
574           echo "\$x_work_dir_tail" >> "\$x_test_rep"
575           echo "\$x_run" >> "\$x_test_rep"
576           echo "\$x_alias" >> "\$x_test_rep"
577           NCBI_BOOST_REPORT_FILE="\$x_boost_rep"
578           export NCBI_BOOST_REPORT_FILE
579        fi
580
581        # Check existence of the test's application directory
582        if test -d "\$x_work_dir"; then
583
584            # Goto the test's directory 
585            cd "\$x_work_dir"
586
587            # Run test if it exist
588            if test -f "\$x_app" -o -f "\$bin_dir/\$x_app"; then
589
590                _RLD_ARGS="-log \$x_log"
591                export _RLD_ARGS
592
593                # Fix empty parameters (replace "" to \"\", '' to \'\')
594                x_run_fix=\`echo "\$x_run" | sed -e 's/""/\\\\\\\\\\"\\\\\\\\\\"/g' -e "s/''/\\\\\\\\\\'\\\\\\\\\\'/g"\`
595
596                # Define check tool variables
597                NCBI_CHECK_TOOL=\`eval echo "\$"NCBI_CHECK_\${tool_up}""\`
598                case "\$tool_lo" in
599                regular  ) ;;
600                valgrind | helgrind ) 
601                           if test "\$tool_lo" = "valgrind"; then
602                              NCBI_CHECK_TOOL="\$NCBI_CHECK_VALGRIND \$VALGRIND_CMD" 
603                           else
604                              NCBI_CHECK_TOOL="\$NCBI_CHECK_VALGRIND \$HELGRIND_CMD" 
605                           fi
606                           NCBI_CHECK_TIMEOUT_MULT=15
607                           NCBI_RUN_UNDER_VALGRIND="yes"
608                           export NCBI_RUN_UNDER_VALGRIND
609                           NCBI_RUN_UNDER_CHECK_TOOL="yes"
610                           export NCBI_RUN_UNDER_CHECK_TOOL
611                           ;;
612                esac
613                export NCBI_CHECK_TOOL
614                CHECK_TIMEOUT=\`expr \$x_timeout \* \$NCBI_CHECK_TIMEOUT_MULT\`
615                export CHECK_TIMEOUT
616
617                # Just need to report errors to authors?
618                if \$is_report_err; then
619                    test -f "\$x_test_out" || continue
620                    x_code=\`cat \$x_test_out | grep -c '@@@ EXIT CODE:'\`
621                    test \$x_code -ne 0 || continue
622                    x_good=\`cat \$x_test_out | grep -c '@@@ EXIT CODE: 0'\`
623                    if test \$x_good -eq 1; then
624                        continue
625                    fi
626                    MailToAuthors "\$x_authors" "\$x_test_out"
627                    continue
628                fi
629         
630                echo \$x_run | grep '\.sh' > /dev/null 2>&1 
631                if test \$? -eq 0;  then
632                    # Run script without any check tools.
633                    # It will be applied inside script using \$CHECK_EXEC.
634                    xx_run="\$x_run_fix"
635                else
636                    # Run under check tool
637                    xx_run="\$NCBI_CHECK_TOOL \$x_run_fix"
638                fi
639
640                # Write header to output file 
641                echo "\$x_test_out" >> \$res_journal
642                (
643                    echo "======================================================================"
644                    echo "\$x_name"
645                    echo "======================================================================"
646                    echo 
647                    if test "\$x_run" != "\$x_name"; then
648                       echo "Command line: \$x_run"
649                       echo 
650                    fi
651                    if test -n "\$NCBI_CHECK_ENV_SETTINGS"; then
652                       echo "NCBI_CHECK_ENV_SETTINGS:"
653                       for env in \$NCBI_CHECK_ENV_SETTINGS; do
654                           echo "    \$env = \`eval echo '$'\$env\`"
655                       done
656                       echo
657                    fi
658                ) > \$x_test_out 2>&1
659
660                # Remove old core file if any
661                corefile="\$x_work_dir/core"
662                rm -f "\$corefile" > /dev/null 2>&1
663                rm -f check_exec.pid > /dev/null 2>&1
664
665                # Generate PHID and SID, to use it by any application in the current test,
666                # and for loading test statistics later (test_stat_load -> ncbi_applog),
667                # to have same values in Applog.
668                logfile=\$NCBI_CONFIG__LOG__FILE
669                NCBI_CONFIG__LOG__FILE=
670                export NCBI_CONFIG__LOG__FILE
671                if \$have_ncbi_applog; then
672                   eval "\`ncbi_applog generate -phid -sid -format=shell-export | tr -d '\r'\`"
673                   if \$is_run && \$is_db_load; then
674                      # Use generated PHID for test statistics, and sub-PHID.1 for test itself
675                      saved_phid=\$NCBI_LOG_HIT_ID
676                      NCBI_LOG_HIT_ID=\$NCBI_LOG_HIT_ID.1
677                      export NCBI_LOG_HIT_ID
678                      # Create a file in the cirrent directory with initial sub-PHID
679                      # (will be incremented by $CHECK_EXEC, if any)
680                      echo "0" > \$NCBI_LOG_HIT_ID
681                   fi
682                fi
683                NCBI_CONFIG__LOG__FILE=\$logfile
684                export NCBI_CONFIG__LOG__FILE
685
686                # Run check
687                start_time="\`date +'$x_date_format'\`"
688                        
689                # Use separate shell to run test.
690                # This will allow to know execution time for applications with timeout.
691                # Also, process guard works better if used after "time -p".
692                launch_sh="/var/tmp/launch.\$\$.sh"
693cat > \$launch_sh <<EOF_launch
694#! /bin/sh
695exec time -p \$check_exec \`eval echo \$xx_run\`
696EOF_launch
697                chmod a+x \$launch_sh
698                \$launch_sh >\$x_log 2>&1
699                result=\$?
700                stop_time="\`date +'$x_date_format'\`"
701                if \${have_uptime}; then
702                    load_avg="\`uptime | sed -e 's/.*averages*: *\(.*\) *$/\1/' -e 's/[, ][, ]*/ /g'\`"
703                else
704                    load_avg="unavailable"
705                fi
706                rm \$launch_sh
707
708                LC_ALL=C sed -e '/ ["][$][@]["].*\$/ {
709                        s/^.*: //
710                        s/ ["][$][@]["].*$//
711                }' \$x_log >> \$x_test_out
712
713                # RunID
714                runpid='?'
715                test -f check_exec.pid  &&  runpid="\`cat check_exec.pid\`"
716                runid="\`date -u +%y%m%d%H%M%S\`-\$runpid-\`uname -n\`"
717                runid="\`echo \$runid | tr -d '\n\r'\`"
718                rm -f check_exec.pid > /dev/null 2>&1
719                
720                # Get application execution time
721                exec_time=\`\$build_dir/sysdep.sh tl 7 \$x_log | tr '\n\r' '%%' | tr -d '\000-\037' | tr  -d '\176-\377'\`
722                echo \$exec_time | egrep 'real [0-9]|Maximum execution .* is exceeded' > /dev/null 2>&1 
723                if test \$? -eq 0;  then
724                    exec_time=\`echo \$exec_time |   \\
725                                sed -e 's/%%/%/g'    \\
726                                    -e 's/%$//'      \\
727                                    -e 's/%/, /g'    \\
728                                    -e 's/[ ] */ /g' \\
729                                    -e 's/^.*\(Maximum execution [0-9][0-9]* is exceeded\).*$/\1/' \\
730                                    -e 's/^.*\(real [0-9][0-9]*[.][0-9][0-9]*\)/\1/' \\
731                                    -e 's/\(sys [0-9][0-9]*[.][0-9][0-9]*\).*/\1/'\`
732                else
733                    exec_time='unparsable timing stats'
734                fi
735               
736                rm -f \$x_log
737
738                # Analize check tool output
739                case "\$tool_lo" in
740                    valgrind | helgrind ) 
741                               summary_all=\`grep -c 'ERROR SUMMARY:' \$x_test_out\`
742                               summary_ok=\`grep -c 'ERROR SUMMARY: 0 ' \$x_test_out\`
743                               # The number of given lines can be zero.
744                               # In some cases we can lost valgrind's summary.
745                               if test \$summary_all -ne \$summary_ok; then
746                                   result=254
747                               fi
748                               ;;
749                    * )
750                               # GCC Sanitizer can fails with a 0 exit code
751                               if \$is_max_debug; then
752                                   grep '==ERROR: AddressSanitizer:' \$x_test_out > /dev/null 2>&1 
753                                   if test \$? -eq 0;  then
754                                      result=253
755                                   fi
756                               fi
757                    
758                esac
759
760                # Write result of the test into the his output file
761                echo "Start time   : \$start_time"   >> \$x_test_out
762                echo "Stop time    : \$stop_time"    >> \$x_test_out
763                echo "Load averages: \$load_avg"     >> \$x_test_out
764                echo >> \$x_test_out
765                echo "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@" >> \$x_test_out
766                echo "@@@ EXIT CODE: \$result" >> \$x_test_out
767
768                if test -f "\$corefile"; then
769                    echo "@@@ CORE DUMPED" >> \$x_test_out
770                    if test -d "\$bin_dir" -a -f "\$bin_dir/\$x_test"; then
771                        mv "\$corefile" "\$bin_dir/\$x_test.core"
772                    else
773                        rm -f "\$corefile"
774                    fi
775                fi
776
777                # Write result also on the screen and into the log
778                if grep NCBI_UNITTEST_DISABLED \$x_test_out >/dev/null; then
779                    echo "DIS --  \$x_cmd"
780                    echo "DIS --  \$x_cmd" >> \$res_log
781                    count_absent=\`expr \$count_absent + 1\`
782                    \$is_automated && echo "DIS" >> "\$x_test_rep"
783
784                elif grep NCBI_UNITTEST_SKIPPED \$x_test_out >/dev/null; then
785                    echo "SKP --  \$x_cmd"
786                    echo "SKP --  \$x_cmd" >> \$res_log
787                    count_absent=\`expr \$count_absent + 1\`
788                    \$is_automated && echo "SKP" >> "\$x_test_rep"
789
790                elif grep NCBI_UNITTEST_TIMEOUTS_BUT_NO_ERRORS \$x_test_out >/dev/null; then
791                    echo "TO  --  \$x_cmd"
792                    echo "TO  --  \$x_cmd" >> \$res_log
793                    count_timeout=\`expr \$count_timeout + 1\`
794                    \$is_automated && echo "TO" >> "\$x_test_rep"
795
796                elif echo "\$exec_time" | egrep 'Maximum execution .* is exceeded' >/dev/null || egrep "Maximum execution .* is exceeded" \$x_test_out >/dev/null; then
797                    echo "TO  --  \$x_cmd     (\$exec_time)"
798                    echo "TO  --  \$x_cmd     (\$exec_time)" >> \$res_log
799                    count_timeout=\`expr \$count_timeout + 1\`
800                    \$is_automated && echo "TO" >> "\$x_test_rep"
801
802                elif test \$result -eq 0; then
803                    echo "OK  --  \$x_cmd     (\$exec_time)"
804                    echo "OK  --  \$x_cmd     (\$exec_time)" >> \$res_log
805                    count_ok=\`expr \$count_ok + 1\`
806                    \$is_automated && echo "OK" >> "\$x_test_rep"
807
808                else
809                    echo "ERR [\$result] --  \$x_cmd     (\$exec_time)"
810                    echo "ERR [\$result] --  \$x_cmd     (\$exec_time)" >> \$res_log
811                    count_err=\`expr \$count_err + 1\`
812                    \$is_automated && echo "ERR" >> "\$x_test_rep"
813                fi
814
815                if \$is_automated; then
816                    echo "\$start_time" >> "\$x_test_rep"
817                    echo "\$result"     >> "\$x_test_rep"
818                    echo "\$exec_time"  >> "\$x_test_rep"
819                    echo "\$x_authors"  >> "\$x_test_rep"
820                    echo "\$load_avg"   >> "\$x_test_rep"
821                    echo "\$runid"      >> "\$x_test_rep"
822                fi
823
824            else  # Run test if it exist
825                if \$is_run; then
826                    echo "ABS --  \$x_cmd"
827                    echo "ABS --  \$x_cmd" >> \$res_log
828                    count_absent=\`expr \$count_absent + 1\`
829
830                    if \$is_automated; then
831                        echo "ABS"         >> "\$x_test_rep"
832                        echo "\`date +'$x_date_format'\`" >> "\$x_test_rep"
833                        echo "\$x_authors" >> "\$x_test_rep"
834                    fi
835                fi
836            fi
837
838        else  # Check existence of the test's application directory
839            if \$is_run; then
840                # Test application is absent
841                echo "ABS -- \$x_work_dir - \$x_test"
842                echo "ABS -- \$x_work_dir - \$x_test" >> \$res_log
843                count_absent=\`expr \$count_absent + 1\`
844
845                if \$is_automated; then
846                    echo "ABS"         >> "\$x_test_rep"
847                    echo "\`date +'$x_date_format'\`" >> "\$x_test_rep"
848                    echo "\$x_authors" >> "\$x_test_rep"
849                fi
850            fi
851        fi
852
853        # Load test results to Database and Applog immediately after a test.
854        # Always load test results for automated builds on a 'run' command.
855        
856        if \$is_run && \$is_db_load; then
857           if test -n "\$saved_phid";  then
858              NCBI_LOG_HIT_ID=\$saved_phid
859              export NCBI_LOG_HIT_ID
860           fi
861           case \`uname -s\` in
862              CYGWIN* )
863                test_stat_load "\$(cygpath -w "\$x_test_rep")" "\$(cygpath -w "\$x_test_out")" "\$(cygpath -w "\$x_boost_rep")" "\$(cygpath -w "\$top_srcdir/build_info")" >> "$x_build_dir/test_stat_load.log" 2>&1 ;;
864              IRIX* )
865                test_stat_load.sh "\$x_test_rep" "\$x_test_out" "\$x_boost_rep" "\$top_srcdir/build_info" >> "$x_build_dir/test_stat_load.log" 2>&1 ;;
866              * )
867                test_stat_load "\$x_test_rep" "\$x_test_out" "\$x_boost_rep" "\$top_srcdir/build_info" >> "$x_build_dir/test_stat_load.log" 2>&1 ;;
868            esac
869           echo >> "$x_build_dir/test_stat_load.log" 2>&1
870        fi
871        if test \$is_run  -a  -n "\$saved_phid"; then
872            rm -f \$saved_phid* > /dev/null 2>&1
873        fi
874        
875    done  # Run test under all specified check tools   
876}
877
878MailToAuthors()
879{
880   # The limit on the sending email size in Kbytes
881   mail_limit=1024
882   tmp="./check_mailtoauthors.tmp.\$\$"
883
884   test -z "\$sendmail"  &&  return 0
885   test -z "\$1"  &&  return 0
886   x_authors=""
887   for author in \$1; do
888       x_authors="\$x_authors \$author\$domain"
889   done
890   x_logfile="\$2"
891   
892   echo '-----------------------'
893   echo "Send results of the test \$x_app to \$x_authors"
894   echo '-----------------------'
895        echo "To: \$x_authors"
896        echo "Subject: [WATCHERS] \$x_app | \$signature"
897        echo
898        echo \$x_cmd
899        echo
900   echo "cmd = \$sendmail \$x_authors"
901   
902   COMMON_LimitTextFileSize \$x_logfile \$tmp \$mail_limit
903   {
904        echo "To: \$x_authors"
905        echo "Subject: [WATCHERS] \$x_app | \$signature"
906        echo
907        echo \$x_cmd
908        echo
909        cat \$tmp
910        echo 
911        cat \$top_srcdir/build_info
912   } | \$sendmail \$x_authors
913   echo '-----------------------'
914   rm -f \$tmp > /dev/null
915}
916
917EOF
918
919#//////////////////////////////////////////////////////////////////////////
920
921
922# Read list with tests
923x_tests=`cat "$x_list" | sed -e 's/ /%gj_s4%/g'`
924x_test_prev=""
925
926# For all tests
927for x_row in $x_tests; do
928   # Get one row from list
929   x_row=`echo "$x_row" | sed -e 's/%gj_s4%/ /g' -e 's/^ *//' -e 's/ ____ /~/g'`
930
931   # Split it to parts
932   x_src_dir="$x_root_dir/src/`echo \"$x_row\" | sed -e 's/~.*$//'`"
933   x_test=`echo "$x_row" | sed -e 's/^[^~]*~//' -e 's/~.*$//'`
934   x_app=`echo "$x_row" | sed -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/~.*$//'`
935   x_cmd=`echo "$x_row" | sed -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/~.*$//'`
936   x_name=`echo "$x_row" | sed -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/~.*$//'`
937   x_files=`echo "$x_row" | sed -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/~.*$//'`
938   x_timeout=`echo "$x_row" | sed -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//'  -e 's/^[^~]*~//' -e 's/~.*$//'`
939   x_requires=" `echo "$x_row" | sed -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/^[^~]*~//'  -e 's/^[^~]*~//' -e 's/^[^~]*~//' -e 's/~.*$//'` "
940   x_authors=`echo "$x_row" | sed -e 's/.*~//'`
941
942   # Default timeout
943   test -z "$x_timeout"  &&  x_timeout=$NCBI_CHECK_TIMEOUT_DEFAULT
944
945   # Application base build directory
946   x_work_dir_tail="`echo \"$x_row\" | sed -e 's/~.*$//'`"
947   x_work_dir="$x_compile_dir/$x_work_dir_tail"
948
949   # Check application requirements ($CHECK_REQUIRES)
950   for x_req in $x_requires; do
951      if test ! -f "$x_conf_dir/status/$x_req.enabled" ; then
952         echo "SKIP -- $x_work_dir_tail/$x_app (unmet CHECK_REQUIRES)"
953         continue 2
954      fi
955   done
956
957   # Copy specified files to the build directory
958
959   if test "$x_import_prj" = "no"; then
960      # Automatically copy .ini file if exists
961      x_copy="$x_src_dir/$x_app.ini"
962      test -f $x_copy  &&  cp -pf "$x_copy" "$x_work_dir"
963      # Copy specified CHECK_COPY files/dirs
964      if test ! -z "$x_files"; then
965         for i in $x_files ; do
966            x_copy="$x_src_dir/$i"
967            if test -f "$x_copy"  -o  -d "$x_copy"; then
968               cp -prf "$x_copy" "$x_work_dir"
969               test -d "$x_work_dir/$i" &&  find "$x_work_dir/$i" -name .svn -print | xargs rm -rf
970            else
971               echo "Warning:  The copied object \"$x_copy\" should be a file or directory!"
972               continue 1
973            fi
974         done
975      fi
976   fi
977
978   # Generate extension for tests output file
979   if test "$x_test" != "$x_test_prev"; then
980      x_cnt=1
981      x_test_ext=""
982   else
983      x_cnt=`expr $x_cnt + 1`
984      x_test_ext="$x_cnt"
985   fi
986   x_test_prev="$x_test"
987
988#//////////////////////////////////////////////////////////////////////////
989
990   # Write test commands for current test into a shell script file
991   cat >> $x_out <<EOF
992######################################################################
993RunTest "$x_work_dir_tail" \\
994        "$x_test" \\
995        "$x_app" \\
996        "$x_cmd" \\
997        "$x_name" \\
998        "$x_test_ext" \\
999        "$x_timeout" \\
1000        "$x_authors"
1001EOF
1002
1003#//////////////////////////////////////////////////////////////////////////
1004
1005done # for x_row in x_tests
1006
1007
1008# Write ending code into the script
1009cat >> $x_out <<EOF
1010
1011if \$is_run; then
1012   # Write result of the tests execution
1013   echo
1014   echo "Succeeded : \$count_ok"
1015   echo "Timeout   : \$count_timeout"
1016   echo "Failed    : \$count_err"
1017   echo "Absent    : \$count_absent"
1018   echo
1019   if test \$count_err -eq 0; then
1020      echo
1021      echo "******** ALL TESTS COMPLETED SUCCESSFULLY ********"
1022      echo
1023   fi
1024fi
1025
1026if test \$count_err -eq 0; then
1027   touch $x_target_dir/check.success
1028else 
1029   touch $x_target_dir/check.failed
1030fi
1031
1032exit \$count_err
1033EOF
1034
1035# Set execute mode to script
1036chmod a+x "$x_out"
1037
1038exit 0
1039