1 # Copyright (c) 2012 OPEN CASCADE SAS
3 # The content of this file is subject to the Open CASCADE Technology Public
4 # License Version 6.5 (the "License"). You may not use the content of this file
5 # except in compliance with the License. Please obtain a copy of the License
6 # at http://www.opencascade.org and read it completely before using this file.
8 # The Initial Developer of the Original Code is Open CASCADE S.A.S., having its
9 # main offices at: 1, place des Freres Montgolfier, 78280 Guyancourt, France.
11 # The Original Code and all software distributed under the License is
12 # distributed on an "AS IS" basis, without warranty of any kind, and the
13 # Initial Developer hereby disclaims all such warranties, including without
14 # limitation, any warranties of merchantability, fitness for a particular
15 # purpose or non-infringement. Please see the License for the specific terms
16 # and conditions governing the rights and limitations under the License.
18 ############################################################################
19 # This file defines scripts for execution of OCCT tests.
20 # It should be loaded automatically when DRAW is started, and provides
21 # three top-level commands: 'test', 'testgrid', and 'testdiff'.
22 # See OCCT Tests User Guide for description of the test system.
24 # Note: procedures with names starting with underscore are for internal use
25 # inside the test system.
26 ############################################################################
28 # Default verbose level for command _run_test
31 # regexp for parsing test case results in summary log
32 set _test_case_regexp {^CASE\s+([\w.-]+)\s+([\w.-]+)\s+([\w.-]+)\s*:\s*([\w]+)(.*)}
34 # Basic command to run indicated test case in DRAW
36 Run specified test case
37 Use: test group grid casename [echo=0]
38 - If echo is set to 0 (default), log is stored in memory and only summary
39 is output (the log can be obtained with command "dlog get")
40 - If echo is set to 1 or "-echo", all commands and results are echoed
41 immediately, but log is not saved and summary is not produced
43 proc test {group grid casename {echo 0}} {
44 # get test case paths (will raise error if input is invalid)
45 _get_test $group $grid $casename dir gridname casefile
47 # if echo specified as "-echo", convert it to bool
48 if { "$echo" == "-echo" } { set echo t }
51 uplevel _run_test $dir $group $gridname $casefile $echo
55 _check_log $dir $group $gridname $casename [dlog get]
61 # Basic command to run indicated test case in DRAW
63 Run all tests, or specified group, or one grid
64 Use: testgrid [group [grid]] [options...]
66 -parallel N: run N parallel processes (default is number of CPUs, 0 to disable)
67 -refresh N: save summary logs every N seconds (default 60, minimal 1, 0 to disable)
68 -outdir dirname: set log directory (should be empty or non-existing)
69 -overwrite: force writing logs in existing non-empty directory
70 -xml filename: write XML report for Jenkins (in JUnit-like format)
72 proc testgrid {args} {
73 global env tcl_platform _tests_verbose
75 ######################################################
77 ######################################################
79 # check that environment variable defining paths to test scripts is defined
80 if { ! [info exists env(CSF_TestScriptsPath)] ||
81 [llength $env(CSF_TestScriptsPath)] <= 0 } {
82 error "Error: Environment variable CSF_TestScriptsPath is not defined"
86 set parallel [_get_nb_cpus]
91 for {set narg 0} {$narg < [llength $args]} {incr narg} {
92 set arg [lindex $args $narg]
95 if { $arg == "-parallel" } {
97 if { $narg < [llength $args] && ! [regexp {^-} [lindex $args $narg]] } {
98 set parallel [expr [lindex $args $narg]]
100 error "Option -parallel requires argument"
106 if { $arg == "-refresh" } {
108 if { $narg < [llength $args] && ! [regexp {^-} [lindex $args $narg]] } {
109 set refresh [expr [lindex $args $narg]]
111 error "Option -refresh requires argument"
117 if { $arg == "-outdir" } {
119 if { $narg < [llength $args] && ! [regexp {^-} [lindex $args $narg]] } {
120 set logdir [lindex $args $narg]
122 error "Option -outdir requires argument"
127 # allow overwrite logs
128 if { $arg == "-overwrite" } {
134 if { $arg == "-xml" } {
136 if { $narg < [llength $args] && ! [regexp {^-} [lindex $args $narg]] } {
137 set xmlfile [lindex $args $narg]
139 if { $xmlfile == "" } {
140 set xmlfile TESTS-summary.xml
146 if { [regexp {^-} $arg] } {
147 error "Error: unsupported option \"$arg\""
150 # treat arguments not recognized as options as group and grid names
151 if { ! [info exists groupname] } {
153 } elseif { ! [info exists gridname] } {
156 error "Error: cannot interpret argument $narg ($arg): both group and grid names are already defined by previous args!"
160 # check that target log directory is empty or does not exist
161 set logdir [file normalize [string trim $logdir]]
162 if { $logdir == "" } {
163 # if specified logdir is empty string, generate unique name like
164 # results_<branch>_<timestamp>
166 if { ! [catch {exec git branch} gitout] &&
167 [regexp {[*] ([\w]+)} $gitout res branch] } {
168 set prefix "${prefix}_$branch"
170 set logdir "${prefix}_[clock format [clock seconds] -format {%Y-%m-%dT%H%M}]"
171 set logdir [file normalize $logdir]
173 if { [file isdirectory $logdir] && ! $overwrite && ! [catch {glob -directory $logdir *}] } {
174 error "Error: Specified log directory \"$logdir\" is not empty; please clean it before running tests"
176 if { [catch {file mkdir $logdir}] || ! [file writable $logdir] } {
177 error "Error: Cannot create directory \"$logdir\", or it is not writable"
180 ######################################################
181 # prepare list of tests to be performed
182 ######################################################
184 # list of tests, each defined by a list of:
185 # test scripts directory
186 # group (subfolder) name
187 # grid (subfolder) name
189 # path to test case file
192 # iterate by all script paths
193 foreach dir [_split_path $env(CSF_TestScriptsPath)] {
194 # protection against empty paths
195 set dir [string trim $dir]
196 if { $dir == "" } { continue }
198 if { $_tests_verbose > 0 } { _log_and_puts log "Examining tests directory $dir" }
200 # check that directory exists
201 if { ! [file isdirectory $dir] } {
202 _log_and_puts log "Warning: directory $dir listed in CSF_TestScriptsPath does not exist, skipped"
206 # if test group is specified, check that directory with given name exists in this dir
207 # if not, continue to the next test dir
208 if { [info exists groupname] && $groupname != "" } {
209 if { [file isdirectory $dir/$groupname] } {
210 set groups $groupname
215 # else search all directories in the current dir
216 if [catch {glob -directory $dir -tail -types d *} groups] { continue }
220 if { $_tests_verbose > 0 } { _log_and_puts log "Groups to be executed: $groups" }
221 foreach group [lsort -dictionary $groups] {
222 if { $_tests_verbose > 0 } { _log_and_puts log "Examining group directory $group" }
224 # file grids.list must exist: it defines sequence of grids in the group
225 if { ! [file exists $dir/$group/grids.list] } {
226 _log_and_puts log "Warning: directory $dir/$group does not contain file grids.list, skipped"
230 # read grids.list file and make a list of grids to be executed
232 set fd [open $dir/$group/grids.list]
234 while { [gets $fd line] >= 0 } {
237 # skip comments and empty lines
238 if { [regexp "\[ \t\]*\#.*" $line] } { continue }
239 if { [string trim $line] == "" } { continue }
241 # get grid id and name
242 if { ! [regexp "^\(\[0-9\]+\)\[ \t\]*\(\[A-Za-z0-9_.-\]+\)\$" $line res gridid grid] } {
243 _log_and_puts log "Warning: cannot recognize line $nline in file $dir/$group/grids.list as \"gridid gridname\"; ignored"
247 # if specific grid is requested, check that it is present; otherwise make complete list
248 if { ! [info exists gridname] || $gridname == "" || $gridname == $gridid || $gridname == $grid } {
249 lappend gridlist $grid
254 # iterate by all grids
255 foreach grid $gridlist {
257 # check if this grid is aliased to another one
258 set griddir $dir/$group/$grid
259 if { [file exists $griddir/cases.list] } {
260 set fd [open $griddir/cases.list]
261 if { [gets $fd line] >= 0 } {
262 set griddir [file normalize $dir/$group/$grid/[string trim $line]]
267 # check if grid directory actually exists
268 if { ! [file isdirectory $griddir] } {
269 _log_and_puts log "Error: tests directory for grid $grid ($griddir) is missing; skipped"
273 # create directory for logging test results
274 if { $logdir != "" } { file mkdir $logdir/$group/$grid }
276 # iterate by all tests in the grid directory
277 if { [catch {glob -directory $griddir -type f *} testfiles] } { continue }
278 foreach casefile [lsort -dictionary $testfiles] {
279 # filter out begin and end files
280 set casename [file tail $casefile]
281 if { $casename == "begin" || $casename == "end" } { continue }
283 lappend tests_list [list $dir $group $grid $casename $casefile]
288 if { [llength $tests_list] < 1 } {
289 error "Error: no tests are found, check you input arguments and variable CSF_TestScriptsPath!"
292 ######################################################
294 ######################################################
296 # log command arguments and environment
297 set log "Command: testgrid $args\nHost: [info hostname]\nStarted on: [clock format [clock seconds] -format {%Y-%m-%d %H:%M:%S}]\n"
298 set log "$log\nEnvironment:\n"
299 foreach envar [array names env] {
300 set log "$log$envar=\"$env($envar)\"\n"
304 set refresh_timer [clock seconds]
305 uplevel dchrono _timer reset
306 uplevel dchrono _timer start
308 # if parallel execution is requested, allocate thread pool
309 if { $parallel > 0 } {
310 if { ! [info exists tcl_platform(threaded)] || [catch {package require Thread}] } {
311 _log_and_puts log "Warning: Tcl package Thread is not available, running in sequential mode"
314 set worker [tpool::create -minworkers $parallel -maxworkers $parallel]
315 # suspend the pool until all jobs are posted, to prevent blocking of the process
316 # of starting / processing jobs by running threads
317 catch {tpool::suspend $worker}
318 if { $_tests_verbose > 0 } { _log_and_puts log "Executing tests in (up to) $parallel threads" }
323 foreach test_def $tests_list {
324 set dir [lindex $test_def 0]
325 set group [lindex $test_def 1]
326 set grid [lindex $test_def 2]
327 set casename [lindex $test_def 3]
328 set casefile [lindex $test_def 4]
330 # command to set tests for generation of image in results directory
332 if { $logdir != "" } { set imgdir_cmd "set imagedir $logdir/$group/$grid" }
334 # prepare command file for running test case in separate instance of DRAW
335 set fd_cmd [open $logdir/$group/$grid/${casename}.tcl w]
336 puts $fd_cmd "$imgdir_cmd"
337 puts $fd_cmd "set test_image $casename"
338 puts $fd_cmd "_run_test $dir $group $grid $casefile t"
340 # use dlog command to obtain complete output of the test when it is absent (i.e. since OCCT 6.6.0)
341 # note: this is not needed if echo is set to 1 in call to _run_test above
342 if { ! [catch {dlog get}] } {
343 puts $fd_cmd "puts \[dlog get\]"
345 # else try to use old-style QA_ variables to get more output...
348 set env(QA_print_command) 1
351 # final 'exit' is needed when running on Linux under VirtualGl
355 # commant to run DRAW with a command file;
356 # note that empty string is passed as standard input to avoid possible
357 # hang-ups due to waiting for stdin of the launching process
358 set command "exec <<{} DRAWEXE -f $logdir/$group/$grid/${casename}.tcl"
360 # alternative method to run without temporary file; disabled as it needs too many backslashes
362 # set command "exec <<\"\" DRAWEXE -c $imgdir_cmd\\\; set test_image $casename\\\; \
363 # _run_test $dir $group $grid $casefile\\\; \
364 # puts \\\[dlog get\\\]\\\; exit"
367 # run test case, either in parallel or sequentially
368 if { $parallel > 0 } {
370 set job [tpool::post -nowait $worker "catch \"$command\" output; return \$output"]
371 set job_def($job) [list $logdir $dir $group $grid $casename]
373 # sequential execution
374 catch {eval $command} output
375 _log_test_case $output $logdir $dir $group $grid $casename log
377 # update summary log with requested period
378 if { $logdir != "" && $refresh > 0 && [expr [clock seconds] - $refresh_timer > $refresh] } {
379 # update and dump summary
380 _log_summarize $logdir $log
381 set refresh_timer [clock seconds]
386 # get results of started threads
387 if { $parallel > 0 } {
388 catch {tpool::resume $worker}
389 while { [llength [array names job_def]] > 0 } {
390 foreach job [tpool::wait $worker [array names job_def]] {
391 eval _log_test_case \[tpool::get $worker $job\] $job_def($job) log
395 # update summary log with requested period
396 if { $logdir != "" && $refresh > 0 && [clock seconds] > $refresh_timer + $refresh } {
397 _log_summarize $logdir $log
398 set refresh_timer [clock seconds]
401 # release thread pool
402 tpool::release $worker
405 uplevel dchrono _timer stop
406 set time [lindex [split [uplevel dchrono _timer show] "\n"] 0]
408 ######################################################
409 # output summary logs and exit
410 ######################################################
412 _log_summarize $logdir $log $time
413 if { $logdir != "" } {
414 puts "Detailed logs are saved in $logdir"
416 if { $logdir != "" && $xmlfile != "" } {
417 # XML output file is assumed relative to log dir unless it is absolute
418 if { [ file pathtype $xmlfile] == "relative" } {
419 set xmlfile [file normalize $logdir/$xmlfile]
421 _log_xml_summary $logdir $xmlfile $log 0
422 puts "XML summary is saved to $xmlfile"
428 # Procedure to compare results of two runs of test cases
430 Compare results of two executions of tests (CPU times, ...)
431 Use: testdiff dir1 dir2 [options...]
432 Where dir1 and dir2 are directories containing logs of two test runs.
434 -save filename: save resulting log in specified file
435 -subdir name: compare only specified subdirectory (can be nested)
436 -status {same|ok|all}: filter cases for comparing by their status:
437 same - only cases with same status are compared (default)
438 ok - only cases with OK status in both logs are compared
439 all - results are compared regardless of status
441 1 - output only differences
442 2 - output list of logs and directories present in one of dirs only
443 3 - (default) output progress messages
445 proc testdiff {dir1 dir2 args} {
446 if { "$dir1" == "$dir2" } {
447 error "Input directories are the same"
450 ######################################################
452 ######################################################
459 for {set narg 0} {$narg < [llength $args]} {incr narg} {
460 set arg [lindex $args $narg]
463 if { $arg == "-save" } {
465 if { $narg < [llength $args] } {
466 set logfile [lindex $args $narg]
468 error "Error: Option -save must be followed by log file name"
473 # subdirectory to compare
474 if { $arg == "-subdir" } {
476 if { $narg < [llength $args] } {
477 set basename [lindex $args $narg]
479 error "Error: Option -subdir must be followed by subdirectory path"
485 if { $arg == "-status" } {
487 if { $narg < [llength $args] } {
488 set status [lindex $args $narg]
489 } else { set status "" }
490 if { "$status" != "same" && "$status" != "all" && "$status" != "ok" } {
491 error "Error: Option -status must be followed by one of \"same\", \"all\", or \"ok\""
497 if { $arg == "-verbose" } {
499 if { $narg < [llength $args] } {
500 set verbose [expr [lindex $args $narg]]
505 # if { [regexp {^-} $arg] } {
506 error "Error: unsupported option \"$arg\""
510 # run diff procedure (recursive)
511 _test_diff $dir1 $dir2 $basename $status $verbose log
513 # save result to log file
514 if { "$logfile" != "" } {
515 _log_save $logfile $log
521 # Internal procedure to find test case indicated by group, grid, and test case names;
523 # - dir: path to the base directory of the tests group
524 # - gridname: actual name of the grid
525 # - casefile: path to the test case script
526 # if no such test is found, raises error with appropriate message
527 proc _get_test {group grid casename _dir _gridname _casefile} {
529 upvar $_gridname gridname
530 upvar $_casefile casefile
534 # check that environment variable defining paths to test scripts is defined
535 if { ! [info exists env(CSF_TestScriptsPath)] ||
536 [llength $env(CSF_TestScriptsPath)] <= 0 } {
537 error "Error: Environment variable CSF_TestScriptsPath is not defined"
540 # iterate by all script paths
541 foreach dir [_split_path $env(CSF_TestScriptsPath)] {
542 # protection against empty paths
543 set dir [string trim $dir]
544 if { $dir == "" } { continue }
546 # check that directory exists
547 if { ! [file isdirectory $dir] } {
548 puts "Warning: directory $dir listed in CSF_TestScriptsPath does not exist, skipped"
552 # check if test group with given name exists in this dir
553 # if not, continue to the next test dir
554 if { ! [file isdirectory $dir/$group] } { continue }
556 # check that grid with given name (possibly alias) exists; stop otherwise
558 if { ! [file isdirectory $dir/$group/$gridname] } {
559 # check if grid is named by alias rather than by actual name
560 if { [file exists $dir/$group/grids.list] } {
561 set fd [open $dir/$group/grids.list]
562 while { [gets $fd line] >= 0 } {
563 if { [regexp "\[ \t\]*\#.*" $line] } { continue }
564 if { [regexp "^$grid\[ \t\]*\(\[A-Za-z0-9_.-\]+\)\$" $line res gridname] } {
571 if { ! [file isdirectory $dir/$group/$gridname] } { continue }
573 # get actual file name of the script; stop if it cannot be found
574 set casefile $dir/$group/$gridname/$casename
575 if { ! [file exists $casefile] } {
576 # check if this grid is aliased to another one
577 if { [file exists $dir/$group/$gridname/cases.list] } {
578 set fd [open $dir/$group/$gridname/cases.list]
579 if { [gets $fd line] >= 0 } {
580 set casefile [file normalize $dir/$group/$gridname/[string trim $line]/$casename]
585 if { [file exists $casefile] } {
591 # coming here means specified test is not found; report error
592 error [join [list "Error: test case $group / $grid / $casename is not found in paths listed in variable" \
593 "CSF_TestScriptsPath (current value is \"$env(CSF_TestScriptsPath)\")"] "\n"]
596 # Internal procedure to run test case indicated by base directory,
597 # grid and grid names, and test case file path.
598 # The log can be obtained by command "dlog get".
599 proc _run_test {scriptsdir group gridname casefile echo} {
603 uplevel dchrono _timer reset
604 uplevel dchrono _timer start
606 # enable commands logging; switch to old-style mode if dlog command is not present
608 if { [catch {dlog reset}] } {
615 rename puts puts-saved
617 global _tests_verbose
619 # log only output to stdout and stderr, not to file!
620 if {[llength $args] > 1} {
621 set optarg [lindex $args end-1]
622 if { $optarg == "stdout" || $optarg == "stderr" || $optarg == "-newline" } {
623 dlog add [lindex $args end]
626 dlog add [lindex $args end]
629 # reproduce original puts
630 if { $_tests_verbose } {
631 eval puts-saved $args
638 uplevel set casename [file tail $casefile]
639 uplevel set groupname $group
640 uplevel set gridname $gridname
642 if { [file exists $scriptsdir/$group/begin] } {
643 puts "Executing $scriptsdir/$group/begin..."; flush stdout
644 uplevel source $scriptsdir/$group/begin
646 if { [file exists $scriptsdir/$group/$gridname/begin] } {
647 puts "Executing $scriptsdir/$group/$gridname/begin..."; flush stdout
648 uplevel source $scriptsdir/$group/$gridname/begin
651 puts "Executing $casefile..."; flush stdout
652 uplevel source $casefile
654 if { [file exists $scriptsdir/$group/$gridname/end] } {
655 puts "Executing $scriptsdir/$group/$gridname/end..."; flush stdout
656 uplevel source $scriptsdir/$group/$gridname/end
658 if { [file exists $scriptsdir/$group/end] } {
659 puts "Executing $scriptsdir/$group/end..."; flush stdout
660 uplevel source $scriptsdir/$group/end
663 puts "Tcl Exception: $res"
667 if { $dlog_exists } {
672 rename puts-saved puts
677 # stop cpulimit killer if armed by the test
681 uplevel dchrono _timer stop
682 set time [uplevel dchrono _timer show]
683 if [regexp -nocase {CPU user time:[ \t]*([0-9.e-]+)} $time res cpu] {
684 if { $dlog_exists && ! $echo } {
685 dlog add "TOTAL CPU TIME: $cpu sec"
687 puts "TOTAL CPU TIME: $cpu sec"
692 # Internal procedure to check log of test execution and decide if it passed or failed
693 proc _check_log {dir group gridname casename log {_summary {}} {_html_log {}}} {
695 if { $_summary != "" } { upvar $_summary summary }
696 if { $_html_log != "" } { upvar $_html_log html_log }
702 # load definition of 'bad words' indicating test failure
703 # note that rules are loaded in the order of decreasing priority (grid - group - common),
704 # thus grid rules will override group ones
706 foreach rulesfile [list $dir/$group/$gridname/parse.rules $dir/$group/parse.rules $dir/parse.rules] {
707 if [catch {set fd [open $rulesfile r]}] { continue }
708 while { [gets $fd line] >= 0 } {
709 # skip comments and empty lines
710 if { [regexp "\[ \t\]*\#.*" $line] } { continue }
711 if { [string trim $line] == "" } { continue }
713 if { ! [regexp {^([^/]*)/([^/]*)/(.*)$} $line res status rexp comment] } {
714 puts "Warning: cannot recognize parsing rule \"$line\" in file $rulesfile"
717 set status [string trim $status]
718 if { $comment != "" } { set status "$status ([string trim $comment])" }
719 set rexp [regsub -all {\\b} $rexp {\\y}] ;# convert regexp from Perl to Tcl style
720 lappend badwords [list $status $rexp]
724 if { [llength $badwords] <= 0 } {
725 puts "Warning: no definition of error indicators found (check files parse.rules)"
728 # analyse log line-by-line
731 foreach line [split $log "\n"] {
732 # check if line defines specific treatment of some messages
733 if [regexp -nocase {^[ \t]*TODO ([^:]*):(.*)$} $line res platforms pattern] {
734 if { ! [regexp -nocase {\mAll\M} $platforms] &&
735 ! [regexp -nocase "\\m$env(os_type)\\M" $platforms] } {
736 set html_log "$html_log\n$line"
737 continue ;# TODO statement is for another platform
740 # record TODOs that mark unstable cases
741 if { [regexp {[\?]} $platforms] } {
742 set todos_unstable([llength $todos]) 1
745 lappend todos [regsub -all {\\b} [string trim $pattern] {\\y}] ;# convert regexp from Perl to Tcl style
746 set html_log "$html_log\n[_html_highlight BAD $line]"
750 # check for presence of messages indicating test result
752 foreach bw $badwords {
753 if { [regexp [lindex $bw 1] $line] } {
754 # check if this is known bad case
756 for {set i 0} {$i < [llength $todos]} {incr i} {
757 if { [regexp [lindex $todos $i] $line] } {
760 set html_log "$html_log\n[_html_highlight BAD $line]"
765 # if it is not in todo, define status
767 set stat [lindex $bw 0 0]
768 set html_log "$html_log\n[_html_highlight $stat $line]"
769 if { $status == "" && $stat != "OK" && ! [regexp -nocase {^IGNOR} $stat] } {
770 set status [lindex $bw 0]
778 set html_log "$html_log\n$line"
782 # check for presence of TEST COMPLETED statement
783 if { $status == "" && ! [regexp {TEST COMPLETED} $log] } {
784 # check whether absence of TEST COMPLETED is known problem
785 set i [lsearch $todos "TEST INCOMPLETE"]
789 set status "FAILED (no final message is found)"
793 # check declared bad cases and diagnose possible improvement
794 # (bad case declared but not detected).
795 # Note that absence of the problem marked by TODO with question mark
796 # (unstable) is not reported as improvement.
797 if { $status == "" } {
798 for {set i 0} {$i < [llength $todos]} {incr i} {
799 if { ! [info exists todos_unstable($i)] &&
800 (! [info exists todo_count($i)] || $todo_count($i) <= 0) } {
801 set status "IMPROVEMENT (expected problem TODO no. [expr $i + 1] is not detected)"
807 # report test as known bad if at least one of expected problems is found
808 if { $status == "" && [llength [array names todo_count]] > 0 } {
809 set status "BAD (known problem)"
813 if { $status == "" } {set status "OK" }
816 set status "FAILED ($res)"
820 _log_and_puts summary "CASE $group $gridname $casename: $status"
821 set html_log "[_html_highlight [lindex $status 0] $summary]\n$html_log"
824 # Auxiliary procedure putting message to both cout and log variable (list)
825 proc _log_and_puts {logvar message} {
826 if { $logvar != "" } {
828 if [info exists log] {
829 set log "$log$message\n"
837 # Auxiliary procedure to log result on single test case
838 proc _log_test_case {output logdir dir group grid casename logvar} {
841 # check result and make HTML log
842 _check_log $dir $group $grid $casename $output summary html_log
843 set log "$log$summary"
846 if { $logdir != "" } {
847 _log_html $logdir/$group/$grid/$casename.html $html_log "Test $group $grid $casename"
848 _log_save $logdir/$group/$grid/$casename.log "$output\n$summary" "Test $group $grid $casename"
852 # Auxiliary procedure to save log to file
853 proc _log_save {file log {title {}}} {
854 # create missing directories as needed
855 catch {file mkdir [file dirname $file]}
858 if [catch {set fd [open $file w]} res] {
859 error "Error saving log file $file: $res"
869 # Auxiliary procedure to save log to file
870 proc _log_html {file log {title {}}} {
871 # create missing directories as needed
872 catch {file mkdir [file dirname $file]}
875 if [catch {set fd [open $file w]} res] {
876 error "Error saving log file $file: $res"
880 puts $fd "<html><head><title>$title</title><head><body><h1>$title</h1>"
882 # add images if present
883 set imgbasename [file rootname [file tail $file]]
884 foreach img [lsort [glob -nocomplain -directory [file dirname $file] -tails ${imgbasename}*.gif ${imgbasename}*.png ${imgbasename}*.jpg]] {
885 puts $fd "<p><img src=\"$img\"/><p>"
888 # print log body, trying to add HTML links to script files on lines like
889 # "Executing <filename>..."
891 set logpath [file split [file normalize $file]]
892 foreach line [split $log "\n"] {
893 if { [regexp {Executing[ \t]+([a-zA-Z0-9._/:-]+[^.])} $line res script] &&
894 [file exists $script] } {
896 # generate relative path to the script file
897 set url "file://[file normalize $script]"
898 set scriptpath [file split [file normalize $script]]
899 for {set i 0} {$i < [llength $logpath]} {incr i} {
900 if { "[lindex $logpath $i]" != "[lindex $scriptpath $i]]" } {
901 if { $i == 0 } { break }
902 set url "[string repeat "../" [expr [llength $logpath] - $i - 1]]/[file join [lrange $scriptpath $i end]]"
907 set line [regsub $script $line "<a href=\"$url\">$script</a>"]
911 puts $fd "</pre></body></html>"
917 # Auxiliary method to make text with HTML highlighting according to status
918 proc _html_color {status} {
919 # choose a color for the cell according to result
920 if { $status == "OK" } {
922 } elseif { [regexp -nocase {^FAIL} $status] } {
924 } elseif { [regexp -nocase {^BAD} $status] } {
926 } elseif { [regexp -nocase {^IMP} $status] } {
928 } elseif { [regexp -nocase {^SKIP} $status] } {
930 } elseif { [regexp -nocase {^IGNOR} $status] } {
933 puts "Warning: no color defined for status $status, using red as if FAILED"
938 # Format text line in HTML to be colored according to the status
939 proc _html_highlight {status line} {
940 return "<table><tr><td bgcolor=\"[_html_color $status]\">$line</td></tr></table>"
943 # Internal procedure to generate HTML page presenting log of the tests
944 # execution in tabular form, with links to reports on individual cases
945 proc _log_html_summary {logdir log totals regressions improvements total_time} {
946 global _test_case_regexp
948 # create missing directories as needed
949 catch {file mkdir $logdir}
951 # try to open a file and start HTML
952 if [catch {set fd [open $logdir/summary.html w]} res] {
953 error "Error creating log file: $res"
956 # write HRML header, including command to refresh log if still in progress
957 puts $fd "<html><head>"
958 puts $fd "<title>Tests summary</title>"
959 if { $total_time == "" } {
960 puts $fd "<meta http-equiv=\"refresh\" content=\"10\">"
962 puts $fd "<meta http-equiv=\"pragma\" content=\"NO-CACHE\">"
963 puts $fd "</head><body>"
966 set legend(OK) "Test passed OK"
967 set legend(FAILED) "Test failed (regression)"
968 set legend(BAD) "Known problem"
969 set legend(IMPROVEMENT) "Possible improvement (expected problem not detected)"
970 set legend(SKIPPED) "Test skipped due to lack of data file"
971 puts $fd "<h1>Summary</h1><table>"
972 foreach nbstat $totals {
973 set status [lindex $nbstat 1]
974 if { [info exists legend($status)] } {
975 set comment $legend($status)
977 set comment "User-defined status"
979 puts $fd "<tr><td align=\"right\">[lindex $nbstat 0]</td><td bgcolor=\"[_html_color $status]\">$status</td><td>$comment</td></tr>"
983 # time stamp and elapsed time info
984 if { $total_time != "" } {
985 puts $fd "<p>Generated on [clock format [clock seconds] -format {%Y-%m-%d %H:%M:%S}] on [info hostname] <p> $total_time"
987 puts $fd "<p>NOTE: This is intermediate summary; the tests are still running! This page will refresh automatically until tests are finished."
990 # print regressions and improvements
991 foreach featured [list $regressions $improvements] {
992 if { [llength $featured] <= 1 } { continue }
993 set status [string trim [lindex $featured 0] { :}]
994 puts $fd "<h2>$status</h2>"
997 foreach test [lrange $featured 1 end] {
998 if { ! [regexp {^(.*)\s+([\w.]+)$} $test res gg name] } {
1000 set name "Error building short list; check details"
1002 if { $gg != $groupgrid } {
1003 if { $groupgrid != "" } { puts $fd "</tr>" }
1005 puts $fd "<tr><td>$gg</td>"
1007 puts $fd "<td bgcolor=\"[_html_color $status]\"><a href=\"[regsub -all { } $gg /]/${name}.html\">$name</a></td>"
1009 if { $groupgrid != "" } { puts $fd "</tr>" }
1014 puts $fd "<h1>Details</h1>"
1016 # process log line-by-line
1019 foreach line [lsort -dictionary [split $log "\n"]] {
1020 # check that the line is case report in the form "CASE group grid name: result (explanation)"
1021 if { ! [regexp $_test_case_regexp $line res grp grd casename result message] } {
1026 if { $grp != $group } {
1027 if { $letter != "" } { puts $fd "</tr></table>" }
1031 puts $fd "<h2>Group $group</h2>"
1035 if { $grd != $grid } {
1036 if { $letter != "" } { puts $fd "</tr></table>" }
1039 puts $fd "<h3>Grid $grid</h3>"
1042 # check if test case name is <letter><digit>;
1043 # if not, set alnum to period "." to recognize non-standard test name
1044 if { ! [regexp {([A-Za-z]+)([0-9]+)} $casename res alnum number] } {
1048 # start new row when letter changes or for non-standard names
1049 if { $alnum != $letter || $alnum == "." } {
1050 if { $letter != "" } {
1051 puts $fd "</tr><tr>"
1053 puts $fd "<table><tr>"
1058 puts $fd "<td bgcolor=\"[_html_color $result]\"><a href=\"$group/$grid/${casename}.html\">$casename</a></td>"
1060 puts $fd "</tr></table>"
1062 # add remaining lines of log as plain text
1063 puts $fd "<h2>Plain text messages</h2>\n<pre>"
1064 foreach line [split $log "\n"] {
1065 if { ! [regexp $_test_case_regexp $line] } {
1071 # close file and exit
1077 # Procedure to dump summary logs of tests
1078 proc _log_summarize {logdir log {total_time {}}} {
1080 # sort log records alphabetically to have the same behavior on Linux and Windows
1081 # (also needed if tests are run in parallel)
1082 set loglist [lsort -dictionary [split $log "\n"]]
1084 # classify test cases by status
1085 foreach line $loglist {
1086 if { [regexp {^CASE ([^:]*): ([[:alnum:]]+).*$} $line res caseid status] } {
1087 lappend stat($status) $caseid
1091 set improvements {Improvements:}
1092 set regressions {Failed:}
1093 if { [info exists stat] } {
1094 foreach status [lsort [array names stat]] {
1095 lappend totals [list [llength $stat($status)] $status]
1097 # separately count improvements (status starting with IMP) and regressions (all except IMP, OK, BAD, and SKIP)
1098 if { [regexp -nocase {^IMP} $status] } {
1099 eval lappend improvements $stat($status)
1100 } elseif { $status != "OK" && ! [regexp -nocase {^BAD} $status] && ! [regexp -nocase {^SKIP} $status] } {
1101 eval lappend regressions $stat($status)
1106 # if time is specified, add totals
1107 if { $total_time != "" } {
1108 if { [llength $improvements] > 1 } {
1109 _log_and_puts log [join $improvements "\n "]
1111 if { [llength $regressions] > 1 } {
1112 _log_and_puts log [join $regressions "\n "]
1114 if { [llength $improvements] == 1 && [llength $regressions] == 1 } {
1115 _log_and_puts log "No regressions"
1117 _log_and_puts log "Total cases: [join $totals {, }]"
1118 _log_and_puts log $total_time
1122 if { $logdir != "" } {
1123 _log_html_summary $logdir $log $totals $regressions $improvements $total_time
1124 _log_save $logdir/tests.log $log "Tests summary"
1130 # Internal procedure to generate XML log in JUnit style, for further
1131 # consumption by Jenkins or similar systems.
1133 # The output is intended to conform to XML schema supported by Jenkins found at
1134 # https://svn.jenkins-ci.org/trunk/hudson/dtkit/dtkit-format/dtkit-junit-model/src/main/resources/com/thalesgroup/dtkit/junit/model/xsd/junit-4.xsd
1136 # The mapping of the fields is inspired by annotated schema of Apache Ant JUnit XML format found at
1137 # http://windyroad.org/dl/Open%20Source/JUnit.xsd
1138 proc _log_xml_summary {logdir filename log include_cout} {
1139 global _test_case_regexp
1141 catch {file mkdir [file dirname $filename]}
1143 # try to open a file and start XML
1144 if [catch {set fd [open $filename w]} res] {
1145 error "Error creating XML summary file $filename: $res"
1147 puts $fd "<?xml version='1.0' encoding='utf-8'?>"
1148 puts $fd "<testsuites>"
1150 # prototype for command to generate test suite tag
1151 set time_and_host "timestamp=\"[clock format [clock seconds] -format {%Y-%m-%dT%H:%M:%S}]\" hostname=\"[info hostname]\""
1152 set cmd_testsuite {puts $fd "<testsuite name=\"$group $grid\" tests=\"$nbtests\" failures=\"$nbfail\" errors=\"$nberr\" time=\"$time\" skipped=\"$nbskip\" $time_and_host>\n$testcases\n</testsuite>\n"}
1154 # sort log and process it line-by-line
1156 foreach line [lsort -dictionary [split $log "\n"]] {
1157 # check that the line is case report in the form "CASE group grid name: result (explanation)"
1158 if { ! [regexp $_test_case_regexp $line res grp grd casename result message] } {
1161 set message [string trim $message " \t\r\n()"]
1163 # start new testsuite for each grid
1164 if { $grp != $group || $grd != $grid } {
1166 # write previous test suite
1167 if [info exists testcases] { eval $cmd_testsuite }
1182 # parse test log and get its CPU time
1185 if { [catch {set fdlog [open $logdir/$group/$grid/${casename}.log r]} ret] } {
1186 puts "Error: cannot open $logdir/$group/$grid/${casename}.log: $ret"
1188 while { [gets $fdlog logline] >= 0 } {
1189 if { $include_cout } {
1190 set testout "$testout$logline\n"
1192 if [regexp -nocase {TOTAL CPU TIME:\s*([\d.]+)\s*sec} $logline res cpu] {
1193 set add_cpu " time=\"$cpu\""
1194 set time [expr $time + $cpu]
1199 if { ! $include_cout } {
1200 set testout "$line\n"
1203 # record test case with its output and status
1204 # Mapping is: SKIPPED, BAD, and OK to OK, all other to failure
1205 set testcases "$testcases\n <testcase name=\"$casename\"$add_cpu status=\"$result\">\n"
1206 set testcases "$testcases\n <system-out>\n$testout </system-out>"
1207 if { $result != "OK" } {
1208 if { [regexp -nocase {^SKIP} $result] } {
1210 set testcases "$testcases\n <error name=\"$result\" message=\"$message\"/>"
1211 } elseif { [regexp -nocase {^BAD} $result] } {
1213 set testcases "$testcases\n <skipped>$message</skipped>"
1216 set testcases "$testcases\n <failure name=\"$result\" message=\"$message\"/>"
1219 set testcases "$testcases\n </testcase>"
1222 # write last test suite
1223 if [info exists testcases] { eval $cmd_testsuite }
1226 puts $fd "</testsuites>"
1231 # define custom platform name
1232 proc _tests_platform_def {} {
1233 global env tcl_platform
1235 if [info exists env(os_type)] { return }
1237 set env(os_type) $tcl_platform(platform)
1239 # use detailed mapping for various versions of Lunix
1240 # (note that mapping is rather non-uniform, for historical reasons)
1241 if { $env(os_type) == "unix" && ! [catch {exec cat /etc/issue} issue] } {
1242 if { [regexp {Mandriva[ \tA-Za-z]+([0-9]+)} $issue res num] } {
1243 set env(os_type) Mandriva$num
1244 } elseif { [regexp {Red Hat[ \tA-Za-z]+([0-9]+)} $issue res num] } {
1245 set env(os_type) RedHat$num
1246 } elseif { [regexp {Debian[ \tA-Za-z/]+([0-9]+)[.]([0-9]+)} $issue res num subnum] } {
1247 set env(os_type) Debian$num$subnum
1248 } elseif { [regexp {CentOS[ \tA-Za-z]+([0-9]+)[.]([0-9]+)} $issue res num subnum] } {
1249 set env(os_type) CentOS$num$subnum
1250 } elseif { [regexp {Scientific[ \tA-Za-z]+([0-9]+)[.]([0-9]+)} $issue res num subnum] } {
1251 set env(os_type) SL$num$subnum
1252 } elseif { [regexp {Fedora Core[ \tA-Za-z]+([0-9]+)} $issue res num] } {
1253 set env(os_type) FedoraCore$num
1255 if { [exec uname -m] == "x86_64" } {
1256 set env(os_type) "$env(os_type)-64"
1262 # Auxiliary procedure to split path specification (usually defined by
1263 # environment variable) into list of directories or files
1264 proc _split_path {pathspec} {
1267 # first replace all \ (which might occur on Windows) by /
1268 regsub -all "\\\\" $pathspec "/" pathspec
1270 # split path by platform-specific separator
1271 return [split $pathspec [_path_separator]]
1274 # Auxiliary procedure to define platform-specific separator for directories in
1275 # path specification
1276 proc _path_separator {} {
1279 # split path by platform-specific separator
1280 if { $tcl_platform(platform) == "windows" } {
1287 # Procedure to locate data file for test given its name.
1288 # The search is performed assuming that the function is called
1289 # from the test case script; the search order is:
1290 # - subdirectories in environment variable CSF_TestDataPath
1291 # If file is not found, raises Tcl error.
1292 proc locate_data_file {filename} {
1293 global env groupname gridname casename
1295 # check if the file is located in the subdirectory data of the script dir
1296 set scriptfile [info script]
1297 if { $scriptfile != "" } {
1298 set path [file join [file dirname $scriptfile] data $filename]
1299 if { [file exists $path] } {
1300 return [file normalize $path]
1304 # check sub-directories in paths indicated by CSF_TestDataPath
1305 if { [info exists env(CSF_TestDataPath)] } {
1306 foreach dir [_split_path $env(CSF_TestDataPath)] {
1307 while {[llength $dir] != 0} {
1308 set name [lindex $dir 0]
1309 set dir [lrange $dir 1 end]
1310 eval lappend dir [glob -nocomplain -directory $name -type d *]
1311 if { [file exists $name/$filename] } {
1312 return [file normalize $name/$filename]
1318 # check current datadir
1319 if { [file exists [uplevel datadir]/$filename] } {
1320 return [file normalize [uplevel datadir]/$filename]
1324 error [join [list "Error: file $filename could not be found" \
1325 "(should be in paths indicated by CSF_TestDataPath environment variable, " \
1326 "or in subfolder data in the script directory)"] "\n"]
1329 # Procedure to make a diff and common of two lists
1330 proc _list_diff {list1 list2 _in1 _in2 _common} {
1333 upvar $_common common
1338 foreach item $list1 {
1339 if { [lsearch -exact $list2 $item] >= 0 } {
1340 lappend common $item
1345 foreach item $list2 {
1346 if { [lsearch -exact $common $item] < 0 } {
1353 # procedure to load a file to Tcl string
1354 proc _read_file {filename} {
1355 set fd [open $filename r]
1356 set result [read -nonewline $fd]
1361 # Procedure to compare results of two runs of test cases
1362 proc _test_diff {dir1 dir2 basename status verbose _logvar {_statvar ""}} {
1365 # prepare variable (array) for collecting statistics
1366 if { "$_statvar" != "" } {
1367 upvar $_statvar stat
1374 # first check subdirectories
1375 set path1 [file join $dir1 $basename]
1376 set path2 [file join $dir2 $basename]
1377 set list1 [glob -directory $path1 -types d -tails -nocomplain *]
1378 set list2 [glob -directory $path2 -types d -tails -nocomplain *]
1379 if { [llength $list1] >0 || [llength $list2] > 0 } {
1380 _list_diff $list1 $list2 in1 in2 common
1381 if { "$verbose" > 1 } {
1382 if { [llength $in1] > 0 } { _log_and_puts log "Only in $path1: $in1" }
1383 if { [llength $in2] > 0 } { _log_and_puts log "Only in $path2: $in2" }
1385 foreach subdir $common {
1386 if { "$verbose" > 2 } {
1387 _log_and_puts log "Checking [file join $basename $subdir]"
1389 _test_diff $dir1 $dir2 [file join $basename $subdir] $status $verbose log stat
1392 # check log files (only if directory has no subdirs)
1393 set list1 [glob -directory $path1 -types f -tails -nocomplain *.log]
1394 set list2 [glob -directory $path2 -types f -tails -nocomplain *.log]
1395 _list_diff $list1 $list2 in1 in2 common
1396 if { "$verbose" > 1 } {
1397 if { [llength $in1] > 0 } { _log_and_puts log "Only in $path1: $in1" }
1398 if { [llength $in2] > 0 } { _log_and_puts log "Only in $path2: $in2" }
1400 foreach logfile $common {
1402 set log1 [_read_file [file join $dir1 $basename $logfile]]
1403 set log2 [_read_file [file join $dir2 $basename $logfile]]
1405 # check execution statuses
1406 set status1 UNDEFINED
1407 set status2 UNDEFINED
1408 if { ! [regexp {CASE [^:]*:\s*([\w]+)} $log1 res1 status1] ||
1409 ! [regexp {CASE [^:]*:\s*([\w]+)} $log2 res2 status2] ||
1410 "$status1" != "$status2" } {
1411 _log_and_puts log "STATUS [split $basename /] [file rootname $logfile]: $status1 / $status2"
1413 # if test statuses are different, further comparison makes
1414 # no sense unless explicitly requested
1415 if { "$status" != "all" } {
1419 if { "$status" == "ok" && "$status1" != "OK" } {
1426 if { [regexp {TOTAL CPU TIME:\s*([\d.]+)} $log1 res1 cpu1] &&
1427 [regexp {TOTAL CPU TIME:\s*([\d.]+)} $log2 res1 cpu2] } {
1428 set stat(cpu1) [expr $stat(cpu1) + $cpu1]
1429 set stat(cpu2) [expr $stat(cpu2) + $cpu2]
1431 # compare CPU times with 10% precision (but not less 0.5 sec)
1432 if { [expr abs ($cpu1 - $cpu2) > 0.5 + 0.05 * abs ($cpu1 + $cpu2)] } {
1433 _log_and_puts log "CPU [split $basename /] [file rootname $logfile]: $cpu1 / $cpu2"
1439 if { "$_statvar" == "" } {
1440 _log_and_puts log "Total CPU difference: $stat(cpu1) / $stat(cpu2)"
1444 # get number of CPUs on the system
1445 proc _get_nb_cpus {} {
1446 global tcl_platform env
1448 if { "$tcl_platform(platform)" == "windows" } {
1449 # on Windows, take the value of the environment variable
1450 if { [info exists env(NUMBER_OF_PROCESSORS)] &&
1451 ! [catch {expr $env(NUMBER_OF_PROCESSORS) > 0} res] && $res >= 0 } {
1452 return $env(NUMBER_OF_PROCESSORS)
1454 } elseif { "$tcl_platform(os)" == "Linux" } {
1455 # on Linux, take number of logical processors listed in /proc/cpuinfo
1456 if { [catch {open "/proc/cpuinfo" r} fd] } {
1457 return 0 ;# should never happen, but...
1460 while { [gets $fd line] >= 0 } {
1461 if { [regexp {^processor[ \t]*:} $line] } {
1467 } elseif { "$tcl_platform(os)" == "Darwin" } {
1468 # on MacOS X, call sysctl command
1469 if { ! [catch {exec sysctl hw.ncpu} ret] &&
1470 [regexp {^hw[.]ncpu[ \t]*:[ \t]*([0-9]+)} $ret res nb] } {
1475 # if cannot get good value, return 0 as default