1#! /bin/sh
2
3# $Id: check_make_unix_cmake.sh 630684 2021-05-06 15:49:59Z ivanov $
4# Author:  Vladimir Ivanov, NCBI
5#
6###########################################################################
7#
8# Compile a check script and copy necessary files to run tests in the
9# UNIX build tree.
10#
11# Usage:
12#    check_make_unix.sh <test_list> <signature> <build_dir> <top_srcdir> <target_dir> <check_script>
13#
14#    test_list       - a list of tests (it build with "make check_r")
15#                      (default: "<build_dir>/check.sh.list")
16#    signature       - build signature
17#    build_dir       - path to UNIX build tree like".../build/..."
18#                      (default: will try determine path from current work
19#                      directory -- root of build tree )
20#    top_srcdir      - path to the root src directory
21#                      (default: will try determine path from current work
22#                      directory -- root of build tree )
23#    target_dir      - path where the check script and logs will be created
24#                      (default: current dir)
25#    check_script    - name of the check script (without path).
26#                      (default: "check.sh" / "<target_dir>/check.sh")
27#
28#    If any parameter is skipped that will be used default value for it.
29#
30# Note:
31#    Work with UNIX build tree only (any configuration).
32#
33###########################################################################
34
35# Load configuration options
36x_check_scripts_dir=`dirname "$0"`
37x_scripts_dir=`dirname "$x_check_scripts_dir"`
38x_scripts_dir=`dirname "$x_scripts_dir"`
39. ${x_check_scripts_dir}/check_setup.cfg
40
41
42# Parameters
43
44res_out="check.sh"
45res_list="$res_out.list"
46
47# Fields delimiters in the list
48# (this symbols used directly in the "sed" command)
49x_delim=" ____ "
50x_delim_internal="~"
51x_tmp="/var/tmp"
52
53x_date_format="%m/%d/%Y %H:%M:%S"
54
55x_list=$1
56x_signature=$2
57x_build_dir=$3
58x_top_srcdir=$4
59x_target_dir=$5
60x_out=$6
61x_out_name=$x_out
62x_buildcfg=$7
63
64if test "$x_buildcfg" != ""; then
65  x_buildcfg="/$x_buildcfg"
66fi
67
68# Detect Cygwin
69case `uname -s` in
70   CYGWIN* ) cygwin=true  ;;
71   *)        cygwin=false ;;
72esac
73
74# Check for build dir
75if test ! -z "$x_build_dir"; then
76   if test ! -d "$x_build_dir"; then
77      echo "Build directory \"$x_build_dir\" don't exist."
78      exit 1
79   fi
80   x_build_dir=`(cd "$x_build_dir"; pwd | sed -e 's/\/$//g')`
81else
82   # Get build dir name from the current path
83   x_build_dir=`pwd | sed -e 's%/build.*$%%'`
84   if test -d "$x_build_dir/build"; then
85      x_build_dir="$x_build_dir/build"
86   fi
87fi
88
89x_conf_dir=`dirname "$x_build_dir"`
90x_bin_dir=`(cd "$x_build_dir/../bin"; pwd | sed -e 's/\/$//g')`
91
92# Check for top_srcdir
93if test ! -z "$x_top_srcdir"; then
94   if test ! -d "$x_top_srcdir"; then
95      echo "Top source directory \"$x_top_srcdir\" don't exist."
96      exit 1
97   fi
98   x_root_dir=`(cd "$x_top_srcdir"; pwd | sed -e 's/\/$//g')`
99else
100   # Get top src dir name from the script directory
101   x_root_dir=`dirname "$x_scripts_dir"`
102fi
103
104# Check for target dir
105if test ! -z "$x_target_dir"; then
106   if test ! -d "$x_target_dir"; then
107      echo "Target directory \"$x_target_dir\" don't exist."
108      exit 1
109   fi
110    x_target_dir=`(cd "$x_target_dir"; pwd | sed -e 's/\/$//g')`
111else
112   x_target_dir=`pwd`
113fi
114
115# Check for a imported project or intree project
116if test -f Makefile.out ; then
117   x_import_prj="yes"
118   x_import_root=`sed -ne 's/^import_root *= *//p' Makefile`
119   # x_compile_dir="`pwd | sed -e 's%/internal/c++/src.*$%%g'`/internal/c++/src"
120   x_compile_dir=`cd $x_import_root; pwd`
121else
122   x_import_prj="no"
123#   x_compile_dir="$x_build_dir"
124   x_compile_dir="$x_target_dir$x_buildcfg"
125fi
126
127if test -z "$x_list"; then
128   x_list="$x_target_dir/$res_list"
129fi
130
131if test -z "$x_out"; then
132   x_out="$x_target_dir$x_buildcfg/$res_out"
133else
134   x_out="$x_target_dir$x_buildcfg/$x_out_name"
135fi
136
137x_script_name=`echo "$x_out" | sed -e 's%^.*/%%'`
138
139# Check for a list file
140if test ! -f "$x_list"; then
141   echo "Check list file \"$x_list\" not found."
142   exit 1
143fi
144
145# Features detection
146x_features=""
147for f in `ls $x_conf_dir/status/*.enabled | sort -df`; do
148   f=`echo $f | sed 's|^.*/status/\(.*\).enabled$|\1|g'`
149   x_features="$x_features$f "
150done
151
152
153#echo ----------------------------------------------------------------------
154#echo "Imported project  :" $x_import_prj
155#echo "C++ root dir      :" $x_root_dir
156#echo "Configuration dir :" $x_conf_dir
157#echo "Build dir         :" $x_build_dir
158#echo "Compile dir       :" $x_compile_dir
159#echo "Target dir        :" $x_target_dir
160#echo "Check script      :" $x_out
161#echo ----------------------------------------------------------------------
162
163#//////////////////////////////////////////////////////////////////////////
164if test ! -d "${x_target_dir}${x_buildcfg}"; then
165  mkdir -p "${x_target_dir}${x_buildcfg}"
166fi
167cat > $x_out <<EOF
168#! /bin/sh
169
170buildcfg="${x_buildcfg}"
171checkroot="$x_target_dir"
172checkdir="\${checkroot}\${buildcfg}"
173
174root_dir="$x_root_dir"
175#build_dir="$x_build_dir"
176build_dir="\${checkdir}"
177conf_dir="$x_conf_dir"
178#compile_dir="$x_compile_dir"
179compile_dir="\${checkdir}"
180bin_dir="$x_bin_dir\${buildcfg}"
181script_dir="$x_scripts_dir"
182script="\${checkroot}/$x_out_name"
183cygwin=$cygwin
184signature="$x_signature"
185sendmail=''
186domain='@ncbi.nlm.nih.gov'
187
188test -d "\${checkdir}" || mkdir -p "\${checkdir}"
189#res_journal="\$script.journal"
190#res_log="\$script.log"
191#res_list="$x_list"
192#res_concat="\$script.out"
193#res_concat_err="\$script.out_err"
194res_journal="\${checkdir}/${x_out_name}.journal"
195res_log="\${checkdir}/${x_out_name}.log"
196res_list="\${checkroot}/${x_out_name}.list"
197res_log="\${checkdir}/${x_out_name}.out"
198res_concat_err="\${checkdir}/${x_out_name}.out_err"
199
200# Define both senses to accommodate shells lacking !
201is_run=false
202no_run=true
203is_report_err=false
204no_report_err=true
205
206
207# Include COMMON.SH
208. \${script_dir}/common/common.sh
209
210
211# Printout USAGE info and exit
212
213Usage() {
214   cat <<EOF_usage
215
216USAGE:  $x_script_name {run | clean | concat | concat_err}
217
218 run         Run the tests. Create output file ("*.test_out") for each test, 
219             plus journal and log files. 
220 clean       Remove all files created during the last "run" and this script 
221             itself.
222 concat      Concatenate all files created during the last "run" into one big 
223             file "\$res_log".
224 concat_err  Like previous. But into the file "\$res_concat_err" 
225             will be added outputs of failed tests only.
226
227ERROR:  \$1
228EOF_usage
229# Undocumented commands:
230#     report_err  Report failed tests directly to developers.
231
232    exit 1
233}
234
235if test \$# -ne 1; then
236   Usage "Invalid number of arguments."
237fi
238
239
240# Action
241
242method="\$1"
243
244case "\$method" in
245#----------------------------------------------------------
246   run )
247      is_run=true
248      no_run=false
249      # See RunTest() below
250      ;;
251#----------------------------------------------------------
252   clean )
253      x_files=\`cat \$res_journal | sed -e 's/ /%gj_s4%/g'\`
254      for x_file in \$x_files; do
255         x_file=\`echo "\$x_file" | sed -e 's/%gj_s4%/ /g'\`
256         rm -f \$x_file > /dev/null
257      done
258      rm -f \$res_journal \$res_log \$res_list \$res_concat \$res_concat_err > /dev/null
259      rm -f \$script > /dev/null
260      exit 0
261      ;;
262#----------------------------------------------------------
263   concat )
264      rm -f "\$res_concat"
265      ( 
266      cat \$res_log
267      x_files=\`cat \$res_journal | sed -e 's/ /%gj_s4%/g'\`
268      for x_file in \$x_files; do
269         x_file=\`echo "\$x_file" | sed -e 's/%gj_s4%/ /g'\`
270         echo 
271         echo 
272         cat \$x_file
273      done
274      ) >> \$res_concat
275      exit 0
276      ;;
277#----------------------------------------------------------
278   concat_err )
279      rm -f "\$res_concat_err"
280      ( 
281      cat \$res_log | egrep 'ERR \[|TO  -'
282      x_files=\`cat \$res_journal | sed -e 's/ /%gj_s4%/g'\`
283      for x_file in \$x_files; do
284         x_file=\`echo "\$x_file" | sed -e 's/%gj_s4%/ /g'\`
285         x_code=\`cat \$x_file | grep -c '@@@ EXIT CODE:'\`
286         test \$x_code -ne 0 || continue
287         x_good=\`cat \$x_file | grep -c '@@@ EXIT CODE: 0'\`
288         if test \$x_good -ne 1; then
289            echo 
290            echo 
291            cat \$x_file
292         fi
293      done
294      ) >> \$res_concat_err
295      exit 0
296      ;;
297#----------------------------------------------------------
298   report_err )
299      # This method works inside NCBI only 
300      test "\$NCBI_CHECK_MAILTO_AUTHORS." = 'Y.'  ||  exit 0;
301      if test -x /usr/sbin/sendmail; then
302         sendmail="/usr/sbin/sendmail -oi"
303      elif test -x /usr/lib/sendmail; then
304         sendmail="/usr/lib/sendmail -oi"
305      else
306         echo sendmail not found on this platform
307         exit 0
308      fi
309      is_report_err=true
310      no_report_err=false
311      # See RunTest() below
312      ;;
313#----------------------------------------------------------
314   * )
315      Usage "Invalid method name \$method."
316      ;;
317esac
318
319
320#//////////////////////////////////////////////////////////////////////////
321
322
323trap "touch \${checkdir}/check.failed; exit 1"  1 2 15
324rm \${checkdir}/check.failed \${checkdir}/check.success > /dev/null 2>&1 
325
326# Set log_site for tests
327NCBI_APPLOG_SITE=testcxx
328export NCBI_APPLOG_SITE
329
330# Include configuration file
331. \${checkroot}/check.cfg
332if test -z "\$NCBI_CHECK_TOOLS"; then
333   NCBI_CHECK_TOOLS="regular"
334fi
335# Check timeout multiplier (increase default check timeout in x times)
336if test -z "\$NCBI_CHECK_TIMEOUT_MULT"; then
337   NCBI_CHECK_TIMEOUT_MULT=1
338fi
339export NCBI_CHECK_TIMEOUT_DEFAULT=$NCBI_CHECK_TIMEOUT_DEFAULT
340
341# Path to test data, used by some scripts and applications
342if test -z "\$NCBI_TEST_DATA"; then
343    if [ \$cygwin = true ]; then
344       NCBI_TEST_DATA=//snowman/win-coremake/Scripts/test_data
345    else
346       NCBI_TEST_DATA=/am/ncbiapdata/test_data
347    fi
348    export NCBI_TEST_DATA
349fi
350# Add synonym for it, see: include/common/test_data_path.h (CXX-9239)
351if test -z "\$NCBI_TEST_DATA_PATH"; then
352    NCBI_TEST_DATA_PATH=\$NCBI_TEST_DATA
353    export NCBI_TEST_DATA_PATH
354fi
355
356# Valgrind/Helgrind configurations
357VALGRIND_SUP="\${script_dir}/common/check/valgrind.supp"
358VALGRIND_CMD="--tool=memcheck --suppressions=\$VALGRIND_SUP"
359HELGRIND_CMD="--tool=helgrind --suppressions=\$VALGRIND_SUP"
360if (valgrind --ncbi --help) >/dev/null 2>&1; then
361    VALGRIND_CMD="--ncbi \$VALGRIND_CMD" # --ncbi must be the first option!
362    HELGRIND_CMD="--ncbi \$HELGRIND_CMD" # --ncbi must be the first option!
363fi
364
365# Leak- and Thread- Sanitizers (GCC 7.3, -fsanitize= flags)
366LSAN_OPTIONS="suppressions=\${script_dir}/common/check/lsan.supp:exitcode=0"
367export LSAN_OPTIONS
368TSAN_OPTIONS="suppressions=\${script_dir}/common/check/tsan.supp"
369export TSAN_OPTIONS
370
371# Disable BOOST tests to catch asynchronous system failures
372# (signals on *NIX platforms or structured exceptions on Windows)
373BOOST_TEST_CATCH_SYSTEM_ERRORS=no
374export BOOST_TEST_CATCH_SYSTEM_ERRORS
375
376BOOST_TEST_COLOR_OUTPUT=no
377export BOOST_TEST_COLOR_OUTPUT
378
379# Export some global vars
380top_srcdir="\$root_dir"
381export top_srcdir
382FEATURES="$x_features"
383export FEATURES
384
385# Redirect output for C++ diagnostic framework to stderr,
386# except if using under 'export_project' tool.
387if test -z "\$NCBI_EXPORT_PROJECT"; then
388    NCBI_CONFIG__LOG__FILE="-"
389    export NCBI_CONFIG__LOG__FILE
390fi
391
392# Add additional necessary directories to PATH: current, build, scripts, utility and $HOME/bin (for Ubuntu).
393PATH="\${script_dir}/common/impl:\$NCBI/bin/_production/CPPCORE:\$HOME/bin:.:\${build_dir}:\${bin_dir}:\${PATH}"
394export PATH
395
396# Export bin and lib pathes
397CFG_BIN="\${conf_dir}/bin\${buildcfg}"
398CFG_LIB="\${conf_dir}/lib\${buildcfg}"
399export CFG_BIN CFG_LIB
400
401# Define time-guard script to run tests from other scripts
402check_exec="\${script_dir}/common/check/check_exec.sh"
403CHECK_EXEC="\${script_dir}/common/check/check_exec_test.sh"
404CHECK_EXEC_STDIN="\$CHECK_EXEC -stdin"
405CHECK_SIGNATURE="\$signature"
406export CHECK_EXEC
407export CHECK_EXEC_STDIN
408export CHECK_SIGNATURE
409
410# Debug tools to get stack/back trace (except running under memory checkers)
411NCBI_CHECK_STACK_TRACE=''
412NCBI_CHECK_BACK_TRACE=''
413if test "\$NCBI_CHECK_TOOLS" = "regular"; then
414   if (which gdb) >/dev/null 2>&1; then
415       NCBI_CHECK_BACK_TRACE='gdb --batch --quiet -ex "thread apply all bt" -ex "quit"'
416   fi
417   if (which gstack) >/dev/null 2>&1; then
418       NCBI_CHECK_STACK_TRACE='gstack'
419   fi
420   export NCBI_CHECK_BACK_TRACE
421   export NCBI_CHECK_STACK_TRACE
422fi
423
424# Use AppLog-style output format in the testsuite by default
425if test -z "\$DIAG_OLD_POST_FORMAT"; then
426    DIAG_OLD_POST_FORMAT=false
427    export DIAG_OLD_POST_FORMAT
428fi
429
430# Avoid possible hangs on Mac OS X.
431DYLD_BIND_AT_LAUNCH=1
432export DYLD_BIND_AT_LAUNCH
433
434case " \$FEATURES " in
435    *\ MaxDebug\ * )
436         case "\$signature" in
437	     *-linux* ) MALLOC_DEBUG_=2; export MALLOC_DEBUG_ ;;
438         esac
439         case "\$signature" in
440             GCC* | ICC* ) NCBI_CHECK_TIMEOUT_MULT=20 ;;
441         esac
442         ;;
443esac
444
445# Check on linkerd and set backup
446echo test | nc -w 1 linkerd 4142 > /dev/null 2>&1
447if test \$? -ne 0;  then
448   NCBI_CONFIG__ID2SNP__PTIS_NAME="pool.linkerd-proxy.service.bethesda-dev.consul.ncbi.nlm.nih.gov:4142"
449   export NCBI_CONFIG__ID2SNP__PTIS_NAME
450fi
451
452EOF
453
454if test -n "$x_conf_dir"  -a  -d "$x_conf_dir/lib";  then
455   cat >> $x_out <<EOF
456# Add a library path for running tests
457. \${script_dir}/common/common.sh
458COMMON_AddRunpath "\$conf_dir/lib\${buildcfg}"
459EOF
460else
461   echo "WARNING:  Cannot find path to the library dir."
462fi
463# Add additional path for imported projects to point to local /lib first
464if test "$x_import_prj" = "yes"; then
465    local_lib=`(cd "$x_compile_dir/../lib"; pwd | sed -e 's/\/$//g')`
466    if test -n "$local_lib"  -a  -d "$local_lib";  then
467   cat >> $x_out <<EOF
468COMMON_AddRunpath "$local_lib"
469EOF
470    fi
471fi
472
473
474#//////////////////////////////////////////////////////////////////////////
475
476cat >> $x_out <<EOF
477
478# Check for automated build
479is_automated=false
480is_db_load=false
481if test -n "\$NCBI_AUTOMATED_BUILD"; then
482   is_automated=true
483   if test -n "\$NCBI_CHECK_DB_LOAD"; then
484      is_db_load=true
485   fi
486fi
487
488# Check for some executables
489have_ncbi_applog=false
490if (ncbi_applog generate) >/dev/null 2>&1; then
491   have_ncbi_applog=true
492fi
493have_uptime=false
494if (which uptime) >/dev/null 2>&1; then
495   have_uptime=true
496fi
497
498
499#//////////////////////////////////////////////////////////////////////////
500
501
502# Run
503count_ok=0
504count_err=0
505count_timeout=0
506count_absent=0
507count_total=0
508
509if \$is_run; then
510   rm -f "\$res_journal"
511   rm -f "\$res_log"
512   #rm -f "$x_build_dir/test_stat_load.log"
513fi
514
515# Set app limits:
516# Only if $NCBI_CHECK_SETLIMITS not set to 0 before, or not configured with -with-max-debug.
517# Some tools that use this configure flag, like AddressSanitizer, can fail if limited.
518
519is_max_debug=false
520if test -f "\${conf_dir}/status/MaxDebug.enabled"; then
521   is_max_debug=true
522fi
523if test "\$NCBI_CHECK_SETLIMITS" != "0"  -a  ! \$is_max_debug; then
524   ulimit -c 1000000
525   ulimit -n 8192
526   if [ \$cygwin = false ]; then
527       if test "\$NCBI_CHECK_TOOLS" = "regular"; then
528          ulimit -v 48000000
529       else
530          # Increase memory limits if run under check tool
531          ulimit -v 64000000
532       fi
533   fi
534fi
535
536
537# Run one test
538
539RunTest()
540{
541    # Parameters
542    IFS=';'; rargs=(\$1); unset IFS;
543    x_work_dir_tail=\${rargs[0]};
544    x_test=\${rargs[1]};
545    x_app=\${rargs[2]};
546    x_run=\${rargs[3]};
547    x_alias=\${rargs[10]};
548    x_name=\${rargs[4]};
549    x_files=\${rargs[5]};
550    x_timeout=\${rargs[6]};
551    x_requires=\${rargs[7]};
552    x_authors=\${rargs[8]};
553    x_resources=\${rargs[9]};
554    r_id=\$2
555
556    test -z "\$x_timeout"  &&  x_timeout=\$NCBI_CHECK_TIMEOUT_DEFAULT
557    x_work_dir="\$checkdir/\$x_work_dir_tail"
558    x_done="\$checkdir/~\$x_name"
559    x_wlog_dir="\$x_work_dir"
560    x_work_dir="\$x_work_dir/~\$x_name"
561    x_log="$x_tmp/\$\$.~\$x_name.out"
562    x_info="$x_tmp/\$\$.~\$x_name.info"
563
564    if test -f "/etc/nologin"; then
565        echo "Nologin detected, probably host going to reboot. Skipping test:" \$x_name
566        touch "\$checkdir/~RUN_CHECKS.next"
567        return 0
568    fi
569    if \$is_report_err; then
570        # Authors are not defined for this test
571        if test -z "\$x_authors"; then
572            touch "\$checkdir/~RUN_CHECKS.next"
573            return 0
574        fi
575    fi
576    test -d \${x_work_dir} || mkdir -p \${x_work_dir}
577
578    for x_req in \$x_requires; do
579      t_sub=\${x_req::1}
580      t_res="yes"
581      if test \$t_sub = "-"; then
582          t_sub=\${x_req:1}
583          if test -f "\$conf_dir/status/\$t_sub.enabled" ; then
584            t_res="no"
585          fi
586      else
587          t_sub=\${x_req}
588          if test ! -f "\$conf_dir/status/\$t_sub.enabled" ; then
589            t_res="no"
590          fi
591      fi
592      if test "\$t_res" = "no"; then
593           x_test_out="\$x_wlog_dir/\$x_name.test_out\$x_ext"
594           echo NCBI_UNITTEST_SKIPPED > \$x_test_out
595           echo "t_cmd=\\"[\$r_id/\$x_TestsTotal \$x_work_dir_tail] \$x_name (unmet CHECK_REQUIRES=\$x_req)\\"" > \$x_info
596           echo "t_test_out=\\"\$x_test_out\\"" >> \$x_info
597           mv \$x_info "\${x_done}.done"
598           touch "\$checkdir/~RUN_CHECKS.next"
599           rm -rf "\${x_work_dir}"
600           return 0
601      fi
602    done
603
604    if test -n "\$x_resources"; then
605        for r in \$x_resources ; do
606            if test \$r = "SERIAL"; then
607                while ! mkdir "\$checkdir/~\$r.lock" 2>/dev/null
608                do
609	                sleep 1
610                done
611                r_count=\`ls \${checkdir}/*.in_progress 2>/dev/null | wc -w | sed -e 's/ //g'\`
612                while test \$r_count -gt 0; do
613                    sleep 1
614                    r_count=\`ls \${checkdir}/*.in_progress 2>/dev/null | wc -w | sed -e 's/ //g'\`
615                done
616            fi
617        done
618    fi
619
620    touch "\${x_done}.in_progress"
621    if test -f "\${x_done}.done"; then 
622        rm -f "\${x_done}.done"
623    fi
624    touch "\$checkdir/~RUN_CHECKS.next"
625
626    if test -n "\$x_resources"; then
627        for r in \$x_resources ; do
628            if test \$r != "SERIAL"; then
629                while ! mkdir "\$checkdir/~\$r.lock" 2>/dev/null
630                do
631	                sleep 1
632                done
633            fi
634        done
635    fi
636
637    if test -n "\$x_files"; then
638        for i in \$x_files ; do
639            x_copy="\$root_dir/src/\$x_work_dir_tail/\$i"
640            if test -f "\$x_copy"  -o  -d "\$x_copy"; then
641                cp -rf "\$x_copy" "\$x_work_dir"
642                test -d "\$x_work_dir/\$i" &&  find "\$x_work_dir/\$i" -name .svn -print | xargs rm -rf
643            else
644                echo "[\$x_work_dir_tail] \$x_name: Warning:  The copied object \"\$x_copy\" should be a file or directory!"
645                continue
646            fi
647        done
648    fi
649
650    # Run test under all specified check tools   
651    for tool in \$NCBI_CHECK_TOOLS; do
652
653        saved_phid=''
654
655        tool_lo=\`echo \$tool | tr '[A-Z]' '[a-z]'\`
656        tool_up=\`echo \$tool | tr '[a-z]' '[A-Z]'\`
657        
658        case "\$tool_lo" in
659            regular | valgrind | helgrind ) ;;
660                             * ) continue ;;
661        esac
662        
663        x_cmd="[\$r_id/\$x_TestsTotal \$x_work_dir_tail] \$x_name"
664        if test \$tool_lo = "regular"; then
665           #x_cmd="[\$x_work_dir_tail] \$x_name"
666           x_test_out="\$x_wlog_dir/\$x_name.test_out\$x_ext"
667           x_test_rep="\$x_wlog_dir/\$x_name.test_rep\$x_ext"
668           x_boost_rep="\$x_wlog_dir/\$x_name.boost_rep\$x_ext"
669        else
670           #x_cmd="[\$x_work_dir_tail] \$tool_up \$x_name"
671           x_test_out="\$x_wlog_dir/\$x_name.test_out\$x_ext.\$tool_lo"
672           x_test_rep="\$x_wlog_dir/\$x_name.test_rep\$x_ext.\$tool_lo"
673           x_boost_rep="\$x_wlog_dir/\$x_name.boost_rep\$x_ext.\$tool_lo"
674        fi
675
676   
677        if \$is_run && \$is_automated; then
678           echo "\$signature \$NCBI_CHECK_OS_NAME" > "\$x_test_rep"
679           echo "\$x_work_dir_tail" >> "\$x_test_rep"
680           echo "\$x_run" >> "\$x_test_rep"
681           echo "\$x_alias" >> "\$x_test_rep"
682           NCBI_BOOST_REPORT_FILE="\$x_boost_rep"
683           if $cygwin; then
684               export NCBI_BOOST_REPORT_FILE="\$(cygpath -w "\$x_boost_rep")"
685           else
686               export NCBI_BOOST_REPORT_FILE
687           fi
688        fi
689
690        # Check existence of the test's application directory
691        if test -d "\$x_work_dir"; then
692
693            # Goto the test's directory 
694            cd "\$x_work_dir"
695
696            # Run test if it exist
697            if test -f "\$x_app" -o -f "\$bin_dir/\$x_app"; then
698
699                _RLD_ARGS="-log \$x_log"
700                export _RLD_ARGS
701
702                # Fix empty parameters (replace "" to \"\", '' to \'\')
703                x_run_fix=\`echo "\$x_run" | sed -e 's/""/\\\\\\\\\\"\\\\\\\\\\"/g' -e "s/''/\\\\\\\\\\'\\\\\\\\\\'/g"\`
704
705                # Define check tool variables
706                NCBI_CHECK_TOOL=\`eval echo "\$"NCBI_CHECK_\${tool_up}""\`
707                case "\$tool_lo" in
708                regular  ) ;;
709                valgrind | helgrind ) 
710                           if test "\$tool_lo" = "valgrind"; then
711                              NCBI_CHECK_TOOL="\$NCBI_CHECK_VALGRIND \$VALGRIND_CMD" 
712                           else
713                              NCBI_CHECK_TOOL="\$NCBI_CHECK_VALGRIND \$HELGRIND_CMD" 
714                           fi
715                           NCBI_CHECK_TIMEOUT_MULT=15
716                           NCBI_RUN_UNDER_VALGRIND="yes"
717                           export NCBI_RUN_UNDER_VALGRIND
718                           NCBI_RUN_UNDER_CHECK_TOOL="yes"
719                           export NCBI_RUN_UNDER_CHECK_TOOL
720                           ;;
721                esac
722                export NCBI_CHECK_TOOL
723                CHECK_TIMEOUT=\`expr \$x_timeout \* \$NCBI_CHECK_TIMEOUT_MULT\`
724                export CHECK_TIMEOUT
725
726                # Just need to report errors to authors?
727                if \$is_report_err; then
728                    test -f "\$x_test_out" || continue
729                    x_code=\`cat \$x_test_out | grep -c '@@@ EXIT CODE:'\`
730                    test \$x_code -ne 0 || continue
731                    x_good=\`cat \$x_test_out | grep -c '@@@ EXIT CODE: 0'\`
732                    if test \$x_good -eq 1; then
733                        continue
734                    fi
735                    MailToAuthors "\$x_authors" "\$x_test_out"
736                    continue
737                fi
738         
739                echo \$x_run | grep '\.sh' > /dev/null 2>&1 
740                if test \$? -eq 0;  then
741                    # Run script without any check tools.
742                    # It will be applied inside script using \$CHECK_EXEC.
743                    xx_run="\$x_run_fix"
744                else
745                    # Run under check tool
746                    xx_run="\$NCBI_CHECK_TOOL \$x_run_fix"
747                fi
748
749                # Write header to output file 
750                (
751                    echo "======================================================================"
752                    echo "\$x_name"
753                    echo "======================================================================"
754                    echo 
755                    if test "\$x_run" != "\$x_name"; then
756                       echo "Command line: \$x_run"
757                       echo 
758                    fi
759                    if test -n "\$NCBI_CHECK_ENV_SETTINGS"; then
760                       echo "NCBI_CHECK_ENV_SETTINGS:"
761                       for env in \$NCBI_CHECK_ENV_SETTINGS; do
762                           echo "    \$env = \`eval echo '$'\$env\`"
763                       done
764                       echo
765                    fi
766                ) > \$x_test_out 2>&1
767
768                # Remove old core file if any
769                corefile="\$x_work_dir/core"
770                rm -f "\$corefile" > /dev/null 2>&1
771                rm -f check_exec.pid > /dev/null 2>&1
772
773                # Generate PHID and SID, to use it by any application in the current test,
774                # and for loading test statistics later (test_stat_load -> ncbi_applog),
775                # to have same values in Applog.
776                logfile=\$NCBI_CONFIG__LOG__FILE
777                NCBI_CONFIG__LOG__FILE=
778                export NCBI_CONFIG__LOG__FILE
779                if \$have_ncbi_applog; then
780                   eval "\`ncbi_applog generate -phid -sid -format=shell-export | tr -d '\r'\`"
781                   if \$is_run && \$is_db_load; then
782                      # Use generated PHID for test statistics, and sub-PHID.1 for test itself
783                      saved_phid=\$NCBI_LOG_HIT_ID
784                      NCBI_LOG_HIT_ID=\$NCBI_LOG_HIT_ID.1
785                      export NCBI_LOG_HIT_ID
786                      # Create a file in the cirrent directory with initial sub-PHID
787                      # (will be incremented by $CHECK_EXEC, if any)
788                      echo "0" > \$NCBI_LOG_HIT_ID
789                   fi
790                fi
791                NCBI_CONFIG__LOG__FILE=\$logfile
792                export NCBI_CONFIG__LOG__FILE
793
794                # Run check
795                start_time="\`date +'$x_date_format'\`"
796                        
797                # Use separate shell to run test.
798                # This will allow to know execution time for applications with timeout.
799                # Also, process guard works better if used after "time -p".
800                launch_sh="$x_tmp/launch.\$\$.~\$x_name.sh"
801cat > \$launch_sh <<EOF_launch
802#! /bin/sh
803exec time -p \$check_exec \`eval echo \$xx_run\`
804EOF_launch
805                chmod a+x \$launch_sh
806                \$launch_sh >\$x_log 2>&1
807                result=\$?
808                stop_time="\`date +'$x_date_format'\`"
809                if \${have_uptime}; then
810                    load_avg="\`uptime | sed -e 's/.*averages*: *\(.*\) *$/\1/' -e 's/[, ][, ]*/ /g'\`"
811                else
812                    load_avg="unavailable"
813                fi
814                rm \$launch_sh
815
816                LC_ALL=C sed -e '/ ["][$][@]["].*\$/ {
817                        s/^.*: //
818                        s/ ["][$][@]["].*$//
819                }' \$x_log >> \$x_test_out
820
821                # RunID
822                runpid='?'
823                test -f check_exec.pid  &&  runpid="\`cat check_exec.pid\`"
824                runid="\`date -u +%y%m%d%H%M%S\`-\$runpid-\`uname -n\`"
825                runid="\`echo \$runid | tr -d '\n\r'\`"
826                rm -f check_exec.pid > /dev/null 2>&1
827                
828                # Get application execution time
829                exec_time=\`\${checkroot}/sysdep.sh tl 7 \$x_log | tr '\n\r' '%%' | tr -d '\000-\037' | tr  -d '\176-\377'\`
830                echo \$exec_time | egrep 'real [0-9]|Maximum execution .* is exceeded' > /dev/null 2>&1 
831                if test \$? -eq 0;  then
832                    exec_time=\`echo \$exec_time |   \\
833                                sed -e 's/%%/%/g'    \\
834                                    -e 's/%$//'      \\
835                                    -e 's/%/, /g'    \\
836                                    -e 's/[ ] */ /g' \\
837                                    -e 's/^.*\(Maximum execution [0-9][0-9]* is exceeded\).*$/\1/' \\
838                                    -e 's/^.*\(real [0-9][0-9]*[.][0-9][0-9]*\)/\1/' \\
839                                    -e 's/\(sys [0-9][0-9]*[.][0-9][0-9]*\).*/\1/'\`
840                else
841                    exec_time='unparsable timing stats'
842                fi
843               
844                rm -f \$x_log
845
846                # Analize check tool output
847                case "\$tool_lo" in
848                    valgrind | helgrind ) 
849                               summary_all=\`grep -c 'ERROR SUMMARY:' \$x_test_out\`
850                               summary_ok=\`grep -c 'ERROR SUMMARY: 0 ' \$x_test_out\`
851                               # The number of given lines can be zero.
852                               # In some cases we can lost valgrind's summary.
853                               if test \$summary_all -ne \$summary_ok; then
854                                   result=254
855                               fi
856                               ;;
857                    * )
858                               # GCC Sanitizer can fails with a 0 exit code
859                               if \$is_max_debug; then
860                                   grep '==ERROR: AddressSanitizer:' \$x_test_out > /dev/null 2>&1 
861                                   if test \$? -eq 0;  then
862                                      result=253
863                                   fi
864                               fi
865                    
866                esac
867
868                # Write result of the test into the his output file
869                echo "Start time   : \$start_time"   >> \$x_test_out
870                echo "Stop time    : \$stop_time"    >> \$x_test_out
871                echo "Load averages: \$load_avg"     >> \$x_test_out
872                echo >> \$x_test_out
873                echo "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@" >> \$x_test_out
874                echo "@@@ EXIT CODE: \$result" >> \$x_test_out
875
876                if test -f "\$corefile"; then
877                    echo "@@@ CORE DUMPED" >> \$x_test_out
878                    if test -d "\$bin_dir" -a -f "\$bin_dir/\$x_test"; then
879                        mv "\$corefile" "\$bin_dir/\$x_test.core"
880                    else
881                        rm -f "\$corefile"
882                    fi
883                fi
884
885                echo "t_cmd=\\"\$x_cmd\\"" > \$x_info
886                echo "t_test_out=\\"\$x_test_out\\"" >> \$x_info
887                echo "t_exec_time=\\"\$exec_time\\"" >> \$x_info
888                echo "t_result=\\"\$result\\"" >> \$x_info
889
890                # Write results
891                if \$is_automated; then
892                    if grep NCBI_UNITTEST_DISABLED \$x_test_out >/dev/null; then
893                        echo "DIS" >> "\$x_test_rep"
894
895                    elif grep NCBI_UNITTEST_SKIPPED \$x_test_out >/dev/null; then
896                        echo "SKP" >> "\$x_test_rep"
897
898                    elif grep NCBI_UNITTEST_TIMEOUTS_BUT_NO_ERRORS \$x_test_out >/dev/null; then
899                        echo "TO" >> "\$x_test_rep"
900
901                    elif echo "\$exec_time" | egrep 'Maximum execution .* is exceeded' >/dev/null || egrep "Maximum execution .* is exceeded" \$x_test_out >/dev/null; then
902                        echo "TO" >> "\$x_test_rep"
903
904                    elif test \$result -eq 0; then
905                        echo "OK" >> "\$x_test_rep"
906
907                    else
908                        echo "ERR" >> "\$x_test_rep"
909                    fi
910                    echo "\$start_time" >> "\$x_test_rep"
911                    echo "\$result"     >> "\$x_test_rep"
912                    echo "\$exec_time"  >> "\$x_test_rep"
913                    echo "\$x_authors"  >> "\$x_test_rep"
914                    echo "\$load_avg"   >> "\$x_test_rep"
915                    echo "\$runid"      >> "\$x_test_rep"
916                fi
917
918            else  # Run test if it exist
919                if \$is_run; then
920                    echo "t_cmd=\\"\$x_cmd\\"" > \$x_info
921                    echo "t_test_out=\\"\$x_test_out\\"" >> \$x_info
922                    if \$is_automated; then
923                        echo "ABS"         >> "\$x_test_rep"
924                        echo "\`date +'$x_date_format'\`" >> "\$x_test_rep"
925                        echo "\$x_authors" >> "\$x_test_rep"
926                    fi
927                fi
928            fi
929
930        else  # Check existence of the test's application directory
931            if \$is_run; then
932                # Test application is absent
933                echo "t_cmd=\\"\$x_cmd\\"" > \$x_info
934                echo "t_test_out=\\"\$x_test_out\\"" >> \$x_info
935                if \$is_automated; then
936                    echo "ABS"         >> "\$x_test_rep"
937                    echo "\`date +'$x_date_format'\`" >> "\$x_test_rep"
938                    echo "\$x_authors" >> "\$x_test_rep"
939                fi
940            fi
941        fi
942
943        # Load test results to Database and Applog immediately after a test.
944        # Always load test results for automated builds on a 'run' command.
945        
946        if \$is_run && \$is_db_load; then
947            while ! mkdir "\$checkdir/~test_stat_load.lock" 2>/dev/null
948            do
949	            sleep 1
950            done
951           if test -n "\$saved_phid";  then
952              NCBI_LOG_HIT_ID=\$saved_phid
953              export NCBI_LOG_HIT_ID
954           fi
955           case \`uname -s\` in
956              CYGWIN* )
957                test_stat_load "\$(cygpath -w "\$x_test_rep")" "\$(cygpath -w "\$x_test_out")" "\$(cygpath -w "\$x_boost_rep")" "\$(cygpath -w "\$top_srcdir/build_info")" >> "\${checkdir}/test_stat_load.log" 2>&1 ;;
958              IRIX* )
959                test_stat_load.sh "\$x_test_rep" "\$x_test_out" "\$x_boost_rep" "\$top_srcdir/build_info" >> "\${checkdir}/test_stat_load.log" 2>&1 ;;
960              * )
961                test_stat_load "\$x_test_rep" "\$x_test_out" "\$x_boost_rep" "\$top_srcdir/build_info" >> "\${checkdir}/test_stat_load.log" 2>&1 ;;
962            esac
963            echo >> "\${checkdir}/test_stat_load.log" 2>&1
964            rm -rf "\$checkdir/~test_stat_load.lock"
965        fi
966        if test \$is_run  -a  -n "\$saved_phid"; then
967            rm -f \$saved_phid* > /dev/null 2>&1
968        fi
969        
970    done  # Run test under all specified check tools   
971
972    if test -n "\$x_resources"; then
973        rev_resources=""
974        for r in \$x_resources ; do
975            rev_resources="\$r \${rev_resources}"
976        done
977        for r in \$rev_resources ; do
978            rm -rf "\$checkdir/~\$r.lock"
979        done
980    fi
981    if test -f \$x_info; then 
982        mv \$x_info "\${x_done}.done"
983    else
984        touch "\${x_done}.done"
985    fi
986    rm "\${x_done}.in_progress"
987    rm -rf "\${x_work_dir}" 2>/dev/null
988    if test \$? -ne 0;  then
989        sleep 1
990        rm -rf "\${x_work_dir}" 2>/dev/null
991    fi
992}
993
994MailToAuthors()
995{
996   # The limit on the sending email size in Kbytes
997   mail_limit=1024
998
999   test -z "\$sendmail"  &&  return 0
1000   test -z "\$1"  &&  return 0
1001   x_authors=""
1002   for author in \$1; do
1003       x_authors="\$x_authors \$author\$domain"
1004   done
1005   x_logfile="\$2"
1006   tmp="./check_mailtoauthors.tmp.\$\$.\`basename \$x_logfile\`"
1007   
1008   echo '-----------------------'
1009   echo "Send results of the test \$x_app to \$x_authors"
1010   echo '-----------------------'
1011        echo "To: \$x_authors"
1012        echo "Subject: [WATCHERS] \$x_app | \$signature"
1013        echo
1014        echo \$x_cmd
1015        echo
1016   echo "cmd = \$sendmail \$x_authors"
1017   
1018   COMMON_LimitTextFileSize \$x_logfile \$tmp \$mail_limit
1019   {
1020        echo "To: \$x_authors"
1021        echo "Subject: [WATCHERS] \$x_app | \$signature"
1022        echo
1023        echo \$x_cmd
1024        echo
1025        cat \$tmp
1026        echo 
1027        cat \$top_srcdir/build_info
1028   } | \$sendmail \$x_authors
1029   echo '-----------------------'
1030   rm -f \$tmp > /dev/null
1031}
1032
1033ProcessDone()
1034{
1035    while true; do
1036        sleep 2
1037        p_done=\`ls \${checkdir}/*.started 2>/dev/null\`
1038        if test -n "\$p_done"; then
1039            for p_file in \$p_done; do
1040                cat \$p_file
1041                rm \$p_file
1042            done
1043        fi
1044        p_done=\`ls \${checkdir}/*.done 2>/dev/null\`
1045        if test -n "\$p_done"; then
1046            for p_file in \$p_done; do
1047                source \$p_file
1048                if test ! -e "\$t_test_out"; then
1049                    echo "ABS --  \$t_cmd"
1050                    echo "ABS --  \$t_cmd" >> \$res_log
1051                    count_absent=\`expr \$count_absent + 1\`
1052                    rm -f \$p_file
1053                    continue
1054                fi
1055                echo "\$t_test_out" >> \$res_journal
1056                count_total=\`expr \$count_total + 1\`
1057                # Write result on the screen
1058                if grep NCBI_UNITTEST_DISABLED \$t_test_out >/dev/null; then
1059                    echo "DIS --  \$t_cmd"
1060                    echo "DIS --  \$t_cmd" >> \$res_log
1061                    count_absent=\`expr \$count_absent + 1\`
1062
1063                elif grep NCBI_UNITTEST_SKIPPED \$t_test_out >/dev/null; then
1064                    echo "SKP --  \$t_cmd"
1065                    echo "SKP --  \$t_cmd" >> \$res_log
1066                    count_absent=\`expr \$count_absent + 1\`
1067
1068                elif grep NCBI_UNITTEST_TIMEOUTS_BUT_NO_ERRORS \$t_test_out >/dev/null; then
1069                    echo "TO  --  \$t_cmd"
1070                    echo "TO  --  \$t_cmd" >> \$res_log
1071                    count_timeout=\`expr \$count_timeout + 1\`
1072
1073                elif echo "\$t_exec_time" | egrep 'Maximum execution .* is exceeded' >/dev/null || egrep "Maximum execution .* is exceeded" \$t_test_out >/dev/null; then
1074                    echo "TO  --  \$t_cmd     (\$t_exec_time)"
1075                    echo "TO  --  \$t_cmd     (\$t_exec_time)" >> \$res_log
1076                    count_timeout=\`expr \$count_timeout + 1\`
1077
1078                elif test \$t_result -eq 0; then
1079                    echo "OK  --  \$t_cmd     (\$t_exec_time)"
1080                    echo "OK  --  \$t_cmd     (\$t_exec_time)" >> \$res_log
1081                    count_ok=\`expr \$count_ok + 1\`
1082
1083                else
1084                    echo "ERR [\$t_result] --  \$t_cmd     (\$t_exec_time)"
1085                    echo "ERR [\$t_result] --  \$t_cmd     (\$t_exec_time)" >> \$res_log
1086                    count_err=\`expr \$count_err + 1\`
1087                fi
1088                rm -f \$p_file
1089            done
1090        else
1091            if test -e "\$checkdir/~DONE"; then
1092                break
1093            fi
1094        fi
1095    done
1096    rm "\$checkdir/~DONE"
1097}
1098
1099AddJob()
1100{
1101    a_pid="\$1"
1102    a_name="\$2"
1103    a_id="\$3"
1104
1105    if test "\${a_pid}" -gt 0; then
1106        echo "        Start \$a_id: \${a_name} (\$a_pid)" > "\$checkdir/~\$a_name.started"
1107        while test ! -e "\$checkdir/~RUN_CHECKS.next"; do
1108            if test -e "\$checkdir/~SERIAL.lock"; then
1109                sleep 2
1110            else
1111                sleep .1
1112            fi
1113        done
1114        rm "\$checkdir/~RUN_CHECKS.next"
1115    fi
1116    while test -e "\$checkdir/~SERIAL.lock"; do
1117        sleep 2
1118    done
1119
1120    if test "\${a_pid}" -gt 0; then
1121        if test -n "\$NTEST_PARALLEL_LEVEL"; then
1122            a_maxjob=\$NTEST_PARALLEL_LEVEL
1123        elif test -n "\$CTEST_PARALLEL_LEVEL"; then
1124            a_maxjob=\$CTEST_PARALLEL_LEVEL
1125        elif test -n "\$NUMBER_OF_PROCESSORS"; then
1126            a_maxjob=\$NUMBER_OF_PROCESSORS
1127        else
1128            a_maxjob=4
1129        fi
1130    else
1131        a_maxjob=0
1132    fi
1133
1134    a_run=\`ls \${checkdir}/*.in_progress 2>/dev/null\`
1135    a_run=\`echo \$a_run | wc -w | sed -e 's/ //g'\`
1136    if test "\${a_run}" -ne "\${a_run}"; then
1137echo "error:  1 a_run = \$a_run"
1138        a_run=0
1139    fi
1140
1141    while test "\$a_run" -ge "\$a_maxjob"; do
1142        sleep 1
1143        a_run=\`ls \${checkdir}/*.in_progress 2>/dev/null\`
1144        a_run=\`echo \$a_run | wc -w | sed -e 's/ //g'\`
1145        if test "\${a_run}" -ne "\${a_run}"; then
1146echo "error:  2 a_run = \$a_run"
1147            break
1148        fi
1149        if test "\${a_run}" -le 0; then
1150            break
1151        fi
1152    done
1153    if test "\${a_maxjob}" -le 0; then
1154        touch "\$checkdir/~DONE"
1155    fi
1156}
1157
1158#//////////////////////////////////////////////////////////////////////////
1159# Run tests
1160
1161RunJobs()
1162{
1163    res_list=\$1
1164    x_i=0
1165    while read x_row; do
1166        x_row=\`echo "\$x_row" | sed -e 's/ ____ /;/g' | sed -e 's/ ____/;/g' | sed -e 's/ ;/;/g'\`
1167        IFS=';'; arrIN=(\$x_row); unset IFS;
1168        x_name=\${arrIN[4]};
1169        x_i=\`expr \$x_i + 1\`
1170
1171        RunTest "\$x_row" "\$x_i" &
1172        AddJob "\$!" "\$x_name" "\$x_i"
1173    done < "\$res_list"
1174    AddJob "0" "" "" ""
1175}
1176
1177rm -rf "\$checkdir/~*" 2>/dev/null
1178locks=\`ls -d \${checkdir}/~*.lock 2>/dev/null | wc -w | sed -e 's/ //g'\`
1179if test \$locks -ne 0; then
1180  echo "ERROR: there are locks in \${checkdir}" 1>&2
1181  exit 1
1182fi
1183x_test=""
1184x_TestsTotal=\`cat "\$res_list" | wc -l | sed -e 's/ //g'\`
1185x_START=\$SECONDS
1186RunJobs "\$res_list" &
1187ProcessDone
1188x_DURATION=\`expr \$SECONDS - \$x_START\`
1189echo
1190echo "Total Test time (real) = \${x_DURATION} sec"
1191
1192if \$is_run; then
1193   # Write result of the tests execution
1194   echo
1195   echo "Succeeded : \$count_ok"
1196   echo "Timeout   : \$count_timeout"
1197   echo "Failed    : \$count_err"
1198   echo "Absent    : \$count_absent"
1199   echo
1200   if test \$count_err -eq 0; then
1201      echo
1202      echo "******** ALL TESTS COMPLETED SUCCESSFULLY ********"
1203      echo
1204   fi
1205fi
1206
1207if test \$count_err -eq 0; then
1208   touch \${checkdir}/check.success
1209else 
1210   touch \${checkdir}/check.failed
1211fi
1212
1213exit \$count_err
1214EOF
1215
1216# Set execute mode to script
1217chmod a+x "$x_out"
1218
1219exit 0
1220