1 # Copyright (c) 2012 OPEN CASCADE SAS
3 # The content of this file is subject to the Open CASCADE Technology Public
4 # License Version 6.5 (the "License"). You may not use the content of this file
5 # except in compliance with the License. Please obtain a copy of the License
6 # at http://www.opencascade.org and read it completely before using this file.
8 # The Initial Developer of the Original Code is Open CASCADE S.A.S., having its
9 # main offices at: 1, place des Freres Montgolfier, 78280 Guyancourt, France.
11 # The Original Code and all software distributed under the License is
12 # distributed on an "AS IS" basis, without warranty of any kind, and the
13 # Initial Developer hereby disclaims all such warranties, including without
14 # limitation, any warranties of merchantability, fitness for a particular
15 # purpose or non-infringement. Please see the License for the specific terms
16 # and conditions governing the rights and limitations under the License.
18 ############################################################################
19 # This file defines scripts for execution of OCCT tests.
20 # It should be loaded automatically when DRAW is started, and provides
21 # two top-level commands: 'test' and 'testgrid'.
22 # See OCCT Tests User Guide for description of the test system.
24 # Note: procedures with names starting with underscore are for internal use
25 # inside the test system.
26 ############################################################################
28 # Default verbose level for command _run_test
31 # regexp for parsing test case results in summary log
32 set _test_case_regexp {^CASE\s+([\w.-]+)\s+([\w.-]+)\s+([\w.-]+)\s*:\s*([\w]+)(.*)}
34 # Basic command to run indicated test case in DRAW
35 help test {Run specified test case
36 Use: test group grid casename [verbose_level]
37 Verbose level is 0 by default; can be set to 1 or 2}
38 proc test {group grid casename {verbose {}}} {
40 if { $verbose != "" } {
41 set _tests_verbose $verbose
44 # get test case paths (will raise error if input is invalid)
45 _get_test $group $grid $casename dir gridname casefile
48 uplevel _run_test $dir $group $gridname $casefile
51 _check_log $dir $group $gridname $casename [dlog get]
56 # Basic command to run indicated test case in DRAW
57 help testgrid {Run all tests, or specified group, or one grid
58 Use: testgrid logdir [group [grid]] [options...]
59 Log directory should be empty (or non-existing)
61 -parallel N: run in parallel mode with up to N processes (default 0)
62 -refresh N: save summary logs every N seconds (default 60, minimal 1, 0 to disable)
63 -overwrite: force writing logs in existing non-empty directory
64 -xml filename: write XML report for Jenkins (in JUnit-like format)
66 proc testgrid {logdir args} {
67 global env tcl_platform _tests_verbose
69 ######################################################
71 ######################################################
73 # check that environment variable defining paths to test scripts is defined
74 if { ! [info exists env(CSF_TestScriptsPath)] ||
75 [llength $env(CSF_TestScriptsPath)] <= 0 } {
76 error "Error: Environment variable CSF_TestScriptsPath is not defined"
84 for {set narg 0} {$narg < [llength $args]} {incr narg} {
85 set arg [lindex $args $narg]
88 if { $arg == "-parallel" } {
90 if { $narg < [llength $args] } {
91 set parallel [expr [lindex $args $narg]]
99 if { $arg == "-refresh" } {
101 if { $narg < [llength $args] } {
102 set refresh [expr [lindex $args $narg]]
109 # allow overwrite logs
110 if { $arg == "-overwrite" } {
116 if { $arg == "-xml" } {
118 if { $narg < [llength $args] } {
119 set xmlfile [lindex $args $narg]
121 if { $xmlfile == "" } {
122 set xmlfile TESTS-summary.xml
128 if { [regexp {^-} $arg] } {
129 error "Error: unsupported option \"$arg\""
132 # treat arguments not recognized as options as group and grid names
133 if { ! [info exists groupname] } {
135 } elseif { ! [info exists gridname] } {
138 error "Error: cannot interpret argument $narg ($arg): both group and grid names are already defined by previous args!"
142 # check that target log directory is empty or does not exist
143 set logdir [file normalize [string trim $logdir]]
144 if { $logdir == "" } {
145 # if specified logdir is empty string, generate unique name like "results_2010-12-31T23:59:59"
146 set logdir "results_[clock format [clock seconds] -format {%Y-%m-%dT%H%M}]"
147 set logdir [file normalize $logdir]
149 if { [file isdirectory $logdir] && ! $overwrite && ! [catch {glob -directory $logdir *}] } {
150 error "Error: Specified log directory \"$logdir\" is not empty; please clean it before running tests"
152 if { [catch {file mkdir $logdir}] || ! [file writable $logdir] } {
153 error "Error: Cannot create directory \"$logdir\", or it is not writable"
156 ######################################################
157 # prepare list of tests to be performed
158 ######################################################
160 # list of tests, each defined by a list of:
161 # test scripts directory
162 # group (subfolder) name
163 # grid (subfolder) name
165 # path to test case file
168 # iterate by all script paths
169 foreach dir [_split_path $env(CSF_TestScriptsPath)] {
170 # protection against empty paths
171 set dir [string trim $dir]
172 if { $dir == "" } { continue }
174 if { $_tests_verbose > 0 } { _log_and_puts log "Examining tests directory $dir" }
176 # check that directory exists
177 if { ! [file isdirectory $dir] } {
178 _log_and_puts log "Warning: directory $dir listed in CSF_TestScriptsPath does not exist, skipped"
182 # if test group is specified, check that directory with given name exists in this dir
183 # if not, continue to the next test dir
184 if { [info exists groupname] && $groupname != "" } {
185 if { [file isdirectory $dir/$groupname] } {
186 set groups $groupname
191 # else search all directories in the current dir
192 if [catch {glob -directory $dir -tail -types d *} groups] { continue }
196 if { $_tests_verbose > 0 } { _log_and_puts log "Groups to be executed: $groups" }
197 foreach group [lsort -dictionary $groups] {
198 if { $_tests_verbose > 0 } { _log_and_puts log "Examining group directory $group" }
200 # file grids.list must exist: it defines sequence of grids in the group
201 if { ! [file exists $dir/$group/grids.list] } {
202 _log_and_puts log "Warning: directory $dir/$group does not contain file grids.list, skipped"
206 # read grids.list file and make a list of grids to be executed
208 set fd [open $dir/$group/grids.list]
210 while { [gets $fd line] >= 0 } {
213 # skip comments and empty lines
214 if { [regexp "\[ \t\]*\#.*" $line] } { continue }
215 if { [string trim $line] == "" } { continue }
217 # get grid id and name
218 if { ! [regexp "^\(\[0-9\]+\)\[ \t\]*\(\[A-Za-z0-9_.-\]+\)\$" $line res gridid grid] } {
219 _log_and_puts log "Warning: cannot recognize line $nline in file $dir/$group/grids.list as \"gridid gridname\"; ignored"
223 # if specific grid is requested, check that it is present; otherwise make complete list
224 if { ! [info exists gridname] || $gridname == "" || $gridname == $gridid || $gridname == $grid } {
225 lappend gridlist $grid
230 # iterate by all grids
231 foreach grid $gridlist {
233 # check if this grid is aliased to another one
234 set griddir $dir/$group/$grid
235 if { [file exists $griddir/cases.list] } {
236 set fd [open $griddir/cases.list]
237 if { [gets $fd line] >= 0 } {
238 set griddir [file normalize $dir/$group/$grid/[string trim $line]]
243 # check if grid directory actually exists
244 if { ! [file isdirectory $griddir] } {
245 _log_and_puts log "Error: tests directory for grid $grid ($griddir) is missing; skipped"
249 # create directory for logging test results
250 if { $logdir != "" } { file mkdir $logdir/$group/$grid }
252 # iterate by all tests in the grid directory
253 if { [catch {glob -directory $griddir -type f *} testfiles] } { continue }
254 foreach casefile [lsort -dictionary $testfiles] {
255 # filter out begin and end files
256 set casename [file tail $casefile]
257 if { $casename == "begin" || $casename == "end" } { continue }
259 lappend tests_list [list $dir $group $grid $casename $casefile]
264 if { [llength $tests_list] < 1 } {
265 error "Error: no tests are found, check you input arguments and variable CSF_TestScriptsPath!"
268 ######################################################
270 ######################################################
272 # log command arguments and environment
273 set log "Command: testgrid $args\nHost: [info hostname]\nStarted on: [clock format [clock seconds] -format {%Y-%m-%d %H:%M:%S}]\n"
274 set log "$log\nEnvironment:\n"
275 foreach envar [array names env] {
276 set log "$log$envar=\"$env($envar)\"\n"
280 set refresh_timer [clock seconds]
281 uplevel dchrono _timer reset
282 uplevel dchrono _timer start
284 # if parallel execution is requested, allocate thread pool
285 if { $parallel > 0 } {
286 if { ! [info exists tcl_platform(threaded)] || [catch {package require Thread}] } {
287 _log_and_puts log "Warning: Tcl package Thread is not available, running in sequential mode"
290 set worker [tpool::create -minworkers $parallel -maxworkers $parallel]
291 # suspend the pool until all jobs are posted, to prevent blocking of the process
292 # of starting / processing jobs by running threads
293 tpool::suspend $worker
294 if { $_tests_verbose > 0 } { _log_and_puts log "Executing tests in (up to) $parallel threads" }
299 foreach test_def $tests_list {
300 set dir [lindex $test_def 0]
301 set group [lindex $test_def 1]
302 set grid [lindex $test_def 2]
303 set casename [lindex $test_def 3]
304 set casefile [lindex $test_def 4]
306 # command to set tests for generation of image in results directory
308 if { $logdir != "" } { set imgdir_cmd "set imagedir $logdir/$group/$grid" }
310 # prepare command file for running test case in separate instance of DRAW
311 set fd_cmd [open $logdir/$group/$grid/${casename}.tcl w]
312 puts $fd_cmd "$imgdir_cmd"
313 puts $fd_cmd "set test_image $casename"
314 puts $fd_cmd "_run_test $dir $group $grid $casefile"
315 # use dlog command to obtain complete output of the test when it is absent (i.e. since OCCT 6.6.0)
316 if { ! [catch {dlog get}] } {
317 puts $fd_cmd "puts \[dlog get\]"
319 # else try to use old-style QA_ variables to get more output...
322 set env(QA_print_command) 1
324 # final 'exit' is needed when running on Linux under VirtualGl
328 # commant to run DRAW with a command file;
329 # note that empty string is passed as standard input to avoid possible
330 # hang-ups due to waiting for stdin of the launching process
331 set command "exec <<{} DRAWEXE -f $logdir/$group/$grid/${casename}.tcl"
333 # alternative method to run without temporary file; disabled as it needs too many backslashes
335 # set command "exec <<\"\" DRAWEXE -c $imgdir_cmd\\\; set test_image $casename\\\; \
336 # _run_test $dir $group $grid $casefile\\\; \
337 # puts \\\[dlog get\\\]\\\; exit"
340 # run test case, either in parallel or sequentially
341 if { $parallel > 0 } {
343 set job [tpool::post -nowait $worker "catch \"$command\" output; return \$output"]
344 set job_def($job) [list $logdir $dir $group $grid $casename]
346 # sequential execution
347 catch {eval $command} output
348 _log_test_case $output $logdir $dir $group $grid $casename log
350 # update summary log with requested period
351 if { $logdir != "" && $refresh > 0 && [expr [clock seconds] - $refresh_timer > $refresh] } {
352 # update and dump summary
353 _log_summarize $logdir $log
354 set refresh_timer [clock seconds]
359 # get results of started threads
360 if { $parallel > 0 } {
361 tpool::resume $worker
362 while { [llength [array names job_def]] > 0 } {
363 foreach job [tpool::wait $worker [array names job_def]] {
364 eval _log_test_case \[tpool::get $worker $job\] $job_def($job) log
368 # update summary log with requested period
369 if { $logdir != "" && $refresh > 0 && [clock seconds] > $refresh_timer + $refresh } {
370 _log_summarize $logdir $log
371 set refresh_timer [clock seconds]
374 # release thread pool
375 tpool::release $worker
378 uplevel dchrono _timer stop
379 set time [lindex [split [uplevel dchrono _timer show] "\n"] 0]
381 ######################################################
382 # output summary logs and exit
383 ######################################################
385 _log_summarize $logdir $log $time
386 if { $logdir != "" } {
387 puts "Detailed logs are saved in $logdir"
389 if { $logdir != "" && $xmlfile != "" } {
390 # XML output file is assumed relative to log dir unless it is absolute
391 if { [ file pathtype $xmlfile] == "relative" } {
392 set xmlfile [file normalize $logdir/$xmlfile]
394 _log_xml_summary $logdir $xmlfile $log 0
395 puts "XML summary is saved to $xmlfile"
401 # Internal procedure to find test case indicated by group, grid, and test case names;
403 # - dir: path to the base directory of the tests group
404 # - gridname: actual name of the grid
405 # - casefile: path to the test case script
406 # if no such test is found, raises error with appropriate message
407 proc _get_test {group grid casename _dir _gridname _casefile} {
409 upvar $_gridname gridname
410 upvar $_casefile casefile
414 # check that environment variable defining paths to test scripts is defined
415 if { ! [info exists env(CSF_TestScriptsPath)] ||
416 [llength $env(CSF_TestScriptsPath)] <= 0 } {
417 error "Error: Environment variable CSF_TestScriptsPath is not defined"
420 # iterate by all script paths
421 foreach dir [_split_path $env(CSF_TestScriptsPath)] {
422 # protection against empty paths
423 set dir [string trim $dir]
424 if { $dir == "" } { continue }
426 # check that directory exists
427 if { ! [file isdirectory $dir] } {
428 puts "Warning: directory $dir listed in CSF_TestScriptsPath does not exist, skipped"
432 # check if test group with given name exists in this dir
433 # if not, continue to the next test dir
434 if { ! [file isdirectory $dir/$group] } { continue }
436 # check that grid with given name (possibly alias) exists; stop otherwise
438 if { ! [file isdirectory $dir/$group/$gridname] } {
439 # check if grid is named by alias rather than by actual name
440 if { [file exists $dir/$group/grids.list] } {
441 set fd [open $dir/$group/grids.list]
442 while { [gets $fd line] >= 0 } {
443 if { [regexp "\[ \t\]*\#.*" $line] } { continue }
444 if { [regexp "^$grid\[ \t\]*\(\[A-Za-z0-9_.-\]+\)\$" $line res gridname] } {
451 if { ! [file isdirectory $dir/$group/$gridname] } { continue }
453 # get actual file name of the script; stop if it cannot be found
454 set casefile $dir/$group/$gridname/$casename
455 if { ! [file exists $casefile] } {
456 # check if this grid is aliased to another one
457 if { [file exists $dir/$group/$gridname/cases.list] } {
458 set fd [open $dir/$group/$gridname/cases.list]
459 if { [gets $fd line] >= 0 } {
460 set casefile [file normalize $dir/$group/$gridname/[string trim $line]/$casename]
465 if { [file exists $casefile] } {
471 # coming here means specified test is not found; report error
472 error [join [list "Error: test case $group / $grid / $casename is not found in paths listed in variable" \
473 "CSF_TestScriptsPath (current value is \"$env(CSF_TestScriptsPath)\")"] "\n"]
476 # Internal procedure to run test case indicated by base directory,
477 # grid and grid names, and test case file path.
478 # The log can be obtained by command "dlog get".
479 proc _run_test {scriptsdir group gridname casefile} {
483 uplevel dchrono _timer reset
484 uplevel dchrono _timer start
486 # enable commands logging; switch to old-style mode if dlog command is not present
488 if { [catch {dlog reset}] } {
493 rename puts puts-saved
495 global _tests_verbose
497 # log only output to stdout and stderr, not to file!
498 if {[llength $args] > 1} {
499 set optvarg [lindex $args end-1]
500 if { $optarg == stdout || $optarg == stderr || $optarg == -newline } {
501 dlog add [lindex $args end]
504 dlog add [lindex $args end]
507 # reproduce original puts
508 if { $_tests_verbose } {
509 eval puts-saved $args
516 uplevel set casename [file tail $casefile]
517 uplevel set groupname $group
518 uplevel set gridname $gridname
520 if { [file exists $scriptsdir/$group/begin] } {
521 puts "Executing $scriptsdir/$group/begin..."; flush stdout
522 uplevel source $scriptsdir/$group/begin
524 if { [file exists $scriptsdir/$group/$gridname/begin] } {
525 puts "Executing $scriptsdir/$group/$gridname/begin..."; flush stdout
526 uplevel source $scriptsdir/$group/$gridname/begin
529 puts "Executing $casefile..."; flush stdout
530 uplevel source $casefile
532 if { [file exists $scriptsdir/$group/$gridname/end] } {
533 puts "Executing $scriptsdir/$group/$gridname/end..."; flush stdout
534 uplevel source $scriptsdir/$group/$gridname/end
536 if { [file exists $scriptsdir/$group/end] } {
537 puts "Executing $scriptsdir/$group/end..."; flush stdout
538 uplevel source $scriptsdir/$group/end
541 puts "Tcl Exception: $res"
545 if { $dlog_exists } {
547 rename puts-saved puts
551 # stop cpulimit killer if armed by the test
555 uplevel dchrono _timer stop
556 set time [uplevel dchrono _timer show]
557 if [regexp -nocase {CPU user time:[ \t]*([0-9.e-]+)} $time res cpu] {
558 if { $dlog_exists } {
559 dlog add "TOTAL CPU TIME: $cpu sec"
561 puts "TOTAL CPU TIME: $cpu sec"
566 # Internal procedure to check log of test execution and decide if it passed or failed
567 proc _check_log {dir group gridname casename log {_summary {}} {_html_log {}}} {
569 if { $_summary != "" } { upvar $_summary summary }
570 if { $_html_log != "" } { upvar $_html_log html_log }
576 # load definition of 'bad words' indicating test failure
577 # note that rules are loaded in the order of decreasing priority (grid - group - common),
578 # thus grid rules will override group ones
580 foreach rulesfile [list $dir/$group/$gridname/parse.rules $dir/$group/parse.rules $dir/parse.rules] {
581 if [catch {set fd [open $rulesfile r]}] { continue }
582 while { [gets $fd line] >= 0 } {
583 # skip comments and empty lines
584 if { [regexp "\[ \t\]*\#.*" $line] } { continue }
585 if { [string trim $line] == "" } { continue }
587 if { ! [regexp {^([^/]*)/([^/]*)/(.*)$} $line res status rexp comment] } {
588 puts "Warning: cannot recognize parsing rule \"$line\" in file $rulesfile"
591 set status [string trim $status]
592 if { $comment != "" } { set status "$status ([string trim $comment])" }
593 set rexp [regsub -all {\\b} $rexp {\\y}] ;# convert regexp from Perl to Tcl style
594 lappend badwords [list $status $rexp]
598 if { [llength $badwords] <= 0 } {
599 puts "Warning: no definition of error indicators found (check files parse.rules)"
602 # analyse log line-by-line
605 foreach line [split $log "\n"] {
606 # check if line defines specific treatment of some messages
607 if [regexp -nocase {^[ \t]*TODO ([^:]*):(.*)$} $line res platforms pattern] {
608 if { ! [regexp -nocase {\mAll\M} $platforms] &&
609 ! [regexp -nocase "\\m$env(os_type)\\M" $platforms] } {
610 set html_log "$html_log\n$line"
611 continue ;# TODO statement is for another platform
614 # record TODOs that mark unstable cases
615 if { [regexp {[\?]} $platforms] } {
616 set todos_unstable([llength $todos]) 1
619 lappend todos [regsub -all {\\b} [string trim $pattern] {\\y}] ;# convert regexp from Perl to Tcl style
620 set html_log "$html_log\n[_html_highlight BAD $line]"
624 # check for presence of messages indicating test result
626 foreach bw $badwords {
627 if { [regexp [lindex $bw 1] $line] } {
628 # check if this is known bad case
630 for {set i 0} {$i < [llength $todos]} {incr i} {
631 if { [regexp [lindex $todos $i] $line] } {
634 set html_log "$html_log\n[_html_highlight BAD $line]"
639 # if it is not in todo, define status
641 set stat [lindex $bw 0 0]
642 set html_log "$html_log\n[_html_highlight $stat $line]"
643 if { $status == "" && $stat != "OK" && ! [regexp -nocase {^IGNOR} $stat] } {
644 set status [lindex $bw 0]
652 set html_log "$html_log\n$line"
656 # check for presence of TEST COMPLETED statement
657 if { $status == "" && ! [regexp {TEST COMPLETED} $log] } {
658 # check whether absence of TEST COMPLETED is known problem
659 set i [lsearch $todos "TEST INCOMPLETE"]
663 set status "FAILED (no final message is found)"
667 # check declared bad cases and diagnose possible improvement
668 # (bad case declared but not detected).
669 # Note that absence of the problem marked by TODO with question mark
670 # (unstable) is not reported as improvement.
671 if { $status == "" } {
672 for {set i 0} {$i < [llength $todos]} {incr i} {
673 if { ! [info exists todos_unstable($i)] &&
674 (! [info exists todo_count($i)] || $todo_count($i) <= 0) } {
675 set status "IMPROVEMENT (expected problem TODO no. [expr $i + 1] is not detected)"
681 # report test as known bad if at least one of expected problems is found
682 if { $status == "" && [llength [array names todo_count]] > 0 } {
683 set status "BAD (known problem)"
687 if { $status == "" } {set status "OK" }
690 set status "FAILED ($res)"
694 _log_and_puts summary "CASE $group $gridname $casename: $status"
695 set html_log "[_html_highlight [lindex $status 0] $summary]\n$html_log"
698 # Auxiliary procedure putting message to both cout and log variable (list)
699 proc _log_and_puts {logvar message} {
700 if { $logvar != "" } {
702 if [info exists log] {
703 set log "$log$message\n"
711 # Auxiliary procedure to log result on single test case
712 proc _log_test_case {output logdir dir group grid casename logvar} {
715 # check result and make HTML log
716 _check_log $dir $group $grid $casename $output summary html_log
717 set log "$log$summary"
720 if { $logdir != "" } {
721 _log_html $logdir/$group/$grid/$casename.html $html_log "Test $group $grid $casename"
722 _log_save $logdir/$group/$grid/$casename.log "$output\n$summary" "Test $group $grid $casename"
726 # Auxiliary procedure to save log to file
727 proc _log_save {file log {title {}}} {
728 # create missing directories as needed
729 catch {file mkdir [file dirname $file]}
732 if [catch {set fd [open $file w]} res] {
733 error "Error saving log file $file: $res"
743 # Auxiliary procedure to save log to file
744 proc _log_html {file log {title {}}} {
745 # create missing directories as needed
746 catch {file mkdir [file dirname $file]}
749 if [catch {set fd [open $file w]} res] {
750 error "Error saving log file $file: $res"
754 puts $fd "<html><head><title>$title</title><head><body><h1>$title</h1>"
756 # add images if present
757 set imgbasename [file rootname [file tail $file]]
758 foreach img [lsort [glob -nocomplain -directory [file dirname $file] -tails ${imgbasename}*.gif ${imgbasename}*.png ${imgbasename}*.jpg]] {
759 puts $fd "<p><img src=\"$img\"/><p>"
762 # print body, then end and close
765 puts $fd "</pre></body></html>"
771 # Auxiliary method to make text with HTML highlighting according to status
772 proc _html_color {status} {
773 # choose a color for the cell according to result
774 if { $status == "OK" } {
776 } elseif { [regexp -nocase {^FAIL} $status] } {
778 } elseif { [regexp -nocase {^BAD} $status] } {
780 } elseif { [regexp -nocase {^IMP} $status] } {
782 } elseif { [regexp -nocase {^SKIP} $status] } {
784 } elseif { [regexp -nocase {^IGNOR} $status] } {
787 puts "Warning: no color defined for status $status, using red as if FAILED"
792 # Format text line in HTML to be colored according to the status
793 proc _html_highlight {status line} {
794 return "<table><tr><td bgcolor=\"[_html_color $status]\">$line</td></tr></table>"
797 # Internal procedure to generate HTML page presenting log of the tests
798 # execution in tabular form, with links to reports on individual cases
799 proc _log_html_summary {logdir log totals regressions improvements total_time} {
800 global _test_case_regexp
802 # create missing directories as needed
803 catch {file mkdir $logdir}
805 # try to open a file and start HTML
806 if [catch {set fd [open $logdir/summary.html w]} res] {
807 error "Error creating log file: $res"
810 # write HRML header, including command to refresh log if still in progress
811 puts $fd "<html><head>"
812 puts $fd "<title>Tests summary</title>"
813 if { $total_time == "" } {
814 puts $fd "<meta http-equiv=\"refresh\" content=\"10\">"
816 puts $fd "<meta http-equiv=\"pragma\" content=\"NO-CACHE\">"
817 puts $fd "</head><body>"
820 set legend(OK) "Test passed OK"
821 set legend(FAILED) "Test failed (regression)"
822 set legend(BAD) "Known problem"
823 set legend(IMPROVEMENT) "Possible improvement (expected problem not detected)"
824 set legend(SKIPPED) "Test skipped due to lack of data file"
825 puts $fd "<h1>Summary</h1><table>"
826 foreach nbstat $totals {
827 set status [lindex $nbstat 1]
828 if { [info exists legend($status)] } {
829 set comment $legend($status)
831 set comment "User-defined status"
833 puts $fd "<tr><td align=\"right\">[lindex $nbstat 0]</td><td bgcolor=\"[_html_color $status]\">$status</td><td>$comment</td></tr>"
837 # time stamp and elapsed time info
838 if { $total_time != "" } {
839 puts $fd "<p>Generated on [clock format [clock seconds] -format {%Y-%m-%d %H:%M:%S}] on [info hostname] <p> $total_time"
841 puts $fd "<p>NOTE: This is intermediate summary; the tests are still running! This page will refresh automatically until tests are finished."
844 # print regressions and improvements
845 foreach featured [list $regressions $improvements] {
846 if { [llength $featured] <= 1 } { continue }
847 set status [string trim [lindex $featured 0] { :}]
848 puts $fd "<h2>$status</h2>"
851 foreach test [lrange $featured 1 end] {
852 if { ! [regexp {^(.*)\s+([\w.]+)$} $test res gg name] } {
854 set name "Error building short list; check details"
856 if { $gg != $groupgrid } {
857 if { $groupgrid != "" } { puts $fd "</tr>" }
859 puts $fd "<tr><td>$gg</td>"
861 puts $fd "<td bgcolor=\"[_html_color $status]\"><a href=\"[regsub -all { } $gg /]/${name}.html\">$name</a></td>"
863 if { $groupgrid != "" } { puts $fd "</tr>" }
868 puts $fd "<h1>Details</h1>"
870 # process log line-by-line
873 foreach line [lsort -dictionary [split $log "\n"]] {
874 # check that the line is case report in the form "CASE group grid name: result (explanation)"
875 if { ! [regexp $_test_case_regexp $line res grp grd casename result message] } {
880 if { $grp != $group } {
881 if { $letter != "" } { puts $fd "</tr></table>" }
885 puts $fd "<h2>Group $group</h2>"
889 if { $grd != $grid } {
890 if { $letter != "" } { puts $fd "</tr></table>" }
893 puts $fd "<h3>Grid $grid</h3>"
896 # check if test case name is <letter><digit>;
897 # if not, set alnum to period "." to recognize non-standard test name
898 if { ! [regexp {([A-Za-z]+)([0-9]+)} $casename res alnum number] } {
902 # start new row when letter changes or for non-standard names
903 if { $alnum != $letter || $alnum == "." } {
904 if { $letter != "" } {
907 puts $fd "<table><tr>"
912 puts $fd "<td bgcolor=\"[_html_color $result]\"><a href=\"$group/$grid/${casename}.html\">$casename</a></td>"
914 puts $fd "</tr></table>"
916 # add remaining lines of log as plain text
917 puts $fd "<h2>Plain text messages</h2>\n<pre>"
918 foreach line [split $log "\n"] {
919 if { ! [regexp $_test_case_regexp $line] } {
925 # close file and exit
931 # Procedure to dump summary logs of tests
932 proc _log_summarize {logdir log {total_time {}}} {
934 # sort log records alphabetically to have the same behavior on Linux and Windows
935 # (also needed if tests are run in parallel)
936 set loglist [lsort -dictionary [split $log "\n"]]
938 # classify test cases by status
939 foreach line $loglist {
940 if { [regexp {^CASE ([^:]*): ([[:alnum:]]+).*$} $line res caseid status] } {
941 lappend stat($status) $caseid
945 set improvements {Improvements:}
946 set regressions {Failed:}
947 if { [info exists stat] } {
948 foreach status [lsort [array names stat]] {
949 lappend totals [list [llength $stat($status)] $status]
951 # separately count improvements (status starting with IMP) and regressions (all except IMP, OK, BAD, and SKIP)
952 if { [regexp -nocase {^IMP} $status] } {
953 eval lappend improvements $stat($status)
954 } elseif { $status != "OK" && ! [regexp -nocase {^BAD} $status] && ! [regexp -nocase {^SKIP} $status] } {
955 eval lappend regressions $stat($status)
960 # if time is specified, add totals
961 if { $total_time != "" } {
962 if { [llength $improvements] > 1 } {
963 _log_and_puts log [join $improvements "\n "]
965 if { [llength $regressions] > 1 } {
966 _log_and_puts log [join $regressions "\n "]
968 if { [llength $improvements] == 1 && [llength $regressions] == 1 } {
969 _log_and_puts log "No regressions"
971 _log_and_puts log "Total cases: [join $totals {, }]"
972 _log_and_puts log $total_time
976 if { $logdir != "" } {
977 _log_html_summary $logdir $log $totals $regressions $improvements $total_time
978 _log_save $logdir/tests.log $log "Tests summary"
984 # Internal procedure to generate XML log in JUnit style, for further
985 # consumption by Jenkins or similar systems.
987 # The output is intended to conform to XML schema supported by Jenkins found at
988 # https://svn.jenkins-ci.org/trunk/hudson/dtkit/dtkit-format/dtkit-junit-model/src/main/resources/com/thalesgroup/dtkit/junit/model/xsd/junit-4.xsd
990 # The mapping of the fields is inspired by annotated schema of Apache Ant JUnit XML format found at
991 # http://windyroad.org/dl/Open%20Source/JUnit.xsd
992 proc _log_xml_summary {logdir filename log include_cout} {
993 global _test_case_regexp
995 catch {file mkdir [file dirname $filename]}
997 # try to open a file and start XML
998 if [catch {set fd [open $filename w]} res] {
999 error "Error creating XML summary file $filename: $res"
1001 puts $fd "<?xml version='1.0' encoding='utf-8'?>"
1002 puts $fd "<testsuites>"
1004 # prototype for command to generate test suite tag
1005 set time_and_host "timestamp=\"[clock format [clock seconds] -format {%Y-%m-%dT%H:%M:%S}]\" hostname=\"[info hostname]\""
1006 set cmd_testsuite {puts $fd "<testsuite name=\"$group $grid\" tests=\"$nbtests\" failures=\"$nbfail\" errors=\"$nberr\" time=\"$time\" skipped=\"$nbskip\" $time_and_host>\n$testcases\n</testsuite>\n"}
1008 # sort log and process it line-by-line
1010 foreach line [lsort -dictionary [split $log "\n"]] {
1011 # check that the line is case report in the form "CASE group grid name: result (explanation)"
1012 if { ! [regexp $_test_case_regexp $line res grp grd casename result message] } {
1015 set message [string trim $message " \t\r\n()"]
1017 # start new testsuite for each grid
1018 if { $grp != $group || $grd != $grid } {
1020 # write previous test suite
1021 if [info exists testcases] { eval $cmd_testsuite }
1036 # parse test log and get its CPU time
1039 if { [catch {set fdlog [open $logdir/$group/$grid/${casename}.log r]} ret] } {
1040 puts "Error: cannot open $logdir/$group/$grid/${casename}.log: $ret"
1042 while { [gets $fdlog logline] >= 0 } {
1043 if { $include_cout } {
1044 set testout "$testout$logline\n"
1046 if [regexp -nocase {TOTAL CPU TIME:\s*([\d.]+)\s*sec} $logline res cpu] {
1047 set add_cpu " time=\"$cpu\""
1048 set time [expr $time + $cpu]
1053 if { ! $include_cout } {
1054 set testout "$line\n"
1057 # record test case with its output and status
1058 # Mapping is: SKIPPED, BAD, and OK to OK, all other to failure
1059 set testcases "$testcases\n <testcase name=\"$casename\"$add_cpu status=\"$result\">\n"
1060 set testcases "$testcases\n <system-out>\n$testout </system-out>"
1061 if { $result != "OK" } {
1062 if { [regexp -nocase {^SKIP} $result] } {
1064 set testcases "$testcases\n <error name=\"$result\" message=\"$message\"/>"
1065 } elseif { [regexp -nocase {^BAD} $result] } {
1067 set testcases "$testcases\n <skipped>$message</skipped>"
1070 set testcases "$testcases\n <failure name=\"$result\" message=\"$message\"/>"
1073 set testcases "$testcases\n </testcase>"
1076 # write last test suite
1077 if [info exists testcases] { eval $cmd_testsuite }
1080 puts $fd "</testsuites>"
1085 # define custom platform name
1086 proc _tests_platform_def {} {
1087 global env tcl_platform
1089 if [info exists env(os_type)] { return }
1091 set env(os_type) $tcl_platform(platform)
1093 # use detailed mapping for various versions of Lunix
1094 # (note that mapping is rather non-uniform, for historical reasons)
1095 if { $env(os_type) == "unix" && ! [catch {exec cat /etc/issue} issue] } {
1096 if { [regexp {Mandriva[ \tA-Za-z]+([0-9]+)} $issue res num] } {
1097 set env(os_type) Mandriva$num
1098 } elseif { [regexp {Red Hat[ \tA-Za-z]+([0-9]+)} $issue res num] } {
1099 set env(os_type) RedHat$num
1100 } elseif { [regexp {Debian[ \tA-Za-z/]+([0-9]+)[.]([0-9]+)} $issue res num subnum] } {
1101 set env(os_type) Debian$num$subnum
1102 } elseif { [regexp {CentOS[ \tA-Za-z]+([0-9]+)[.]([0-9]+)} $issue res num subnum] } {
1103 set env(os_type) CentOS$num$subnum
1104 } elseif { [regexp {Scientific[ \tA-Za-z]+([0-9]+)[.]([0-9]+)} $issue res num subnum] } {
1105 set env(os_type) SL$num$subnum
1106 } elseif { [regexp {Fedora Core[ \tA-Za-z]+([0-9]+)} $issue res num] } {
1107 set env(os_type) FedoraCore$num
1109 if { [exec uname -m] == "x86_64" } {
1110 set env(os_type) "$env(os_type)-64"
1116 # Auxiliary procedure to split path specification (usually defined by
1117 # environment variable) into list of directories or files
1118 proc _split_path {pathspec} {
1121 # first replace all \ (which might occur on Windows) by /
1122 regsub -all "\\\\" $pathspec "/" pathspec
1124 # split path by platform-specific separator
1125 return [split $pathspec [_path_separator]]
1128 # Auxiliary procedure to define platform-specific separator for directories in
1129 # path specification
1130 proc _path_separator {} {
1133 # split path by platform-specific separator
1134 if { $tcl_platform(platform) == "windows" } {
1141 # Procedure to locate data file for test given its name.
1142 # The search is performed assuming that the function is called
1143 # from the test case script; the search order is:
1144 # - directory where test script is located
1145 # - directories ../data and ../../data from the script dir
1146 # - subdirectories <group>/<grid> and <group> of directories listed in
1147 # environment variable CSF_TestDataPath
1148 # If file is not found, raises Tcl error.
1149 proc locate_data_file {filename} {
1150 global env groupname gridname casename
1152 set scriptfile [info script]
1153 if { $scriptfile == "" } {
1154 error "Error: This procedure (locate_data_file) is for use only in test scripts!"
1157 # check sub-directories data of the test case grid directory
1158 # the current test case in paths indicated by CSF_TestScriptsPath
1159 if { [info exists groupname] && [info exists gridname] &&
1160 [info exists env(CSF_TestScriptsPath)] } {
1161 foreach dir [_split_path $env(CSF_TestScriptsPath)] {
1162 if { [file exists $dir/$groupname/$gridname/data/$filename] } {
1163 return [file normalize $dir/$groupname/$gridname/data/$filename]
1165 if { [file exists $dir/$groupname/data/$filename] } {
1166 return [file normalize $dir/$groupname/data/$filename]
1171 # check sub-directories corresponding to group and grid of
1172 # the current test case in paths indicated by CSF_TestDataPath
1173 if { [info exists groupname] && [info exists env(CSF_TestDataPath)] } {
1174 foreach dir [_split_path $env(CSF_TestDataPath)] {
1175 if { [info exists gridname] && [file exists $dir/$groupname/$gridname/$filename] } {
1176 return [file normalize $dir/$groupname/$gridname/$filename]
1178 if { [file exists $dir/$groupname/$filename] } {
1179 return [file normalize $dir/$groupname/$filename]
1185 if { [file exists [uplevel datadir]/$filename] } {
1186 return [uplevel datadir]/$filename
1190 error [join [list "Error: file $filename could not be found neither in script" \
1191 "directories nor in paths indicated by CSF_TestDataPath environment variable"] "\n"]