1 # Copyright (c) 2012 OPEN CASCADE SAS
3 # The content of this file is subject to the Open CASCADE Technology Public
4 # License Version 6.5 (the "License"). You may not use the content of this file
5 # except in compliance with the License. Please obtain a copy of the License
6 # at http://www.opencascade.org and read it completely before using this file.
8 # The Initial Developer of the Original Code is Open CASCADE S.A.S., having its
9 # main offices at: 1, place des Freres Montgolfier, 78280 Guyancourt, France.
11 # The Original Code and all software distributed under the License is
12 # distributed on an "AS IS" basis, without warranty of any kind, and the
13 # Initial Developer hereby disclaims all such warranties, including without
14 # limitation, any warranties of merchantability, fitness for a particular
15 # purpose or non-infringement. Please see the License for the specific terms
16 # and conditions governing the rights and limitations under the License.
18 ############################################################################
19 # This file defines scripts for execution of OCCT tests.
20 # It should be loaded automatically when DRAW is started, and provides
21 # top-level commands starting with 'test'. Type 'help test' to get their
23 # See OCCT Tests User Guide for description of the test system.
25 # Note: procedures with names starting with underscore are for internal use
26 # inside the test system.
27 ############################################################################
29 # Default verbose level for command _run_test
32 # regexp for parsing test case results in summary log
33 set _test_case_regexp {^CASE\s+([\w.-]+)\s+([\w.-]+)\s+([\w.-]+)\s*:\s*([\w]+)(.*)}
35 # Basic command to run indicated test case in DRAW
37 Run specified test case
38 Use: test group grid casename [echo=0]
39 - If echo is set to 0 (default), log is stored in memory and only summary
40 is output (the log can be obtained with command "dlog get")
41 - If echo is set to 1 or "-echo", all commands and results are echoed
42 immediately, but log is not saved and summary is not produced
44 proc test {group grid casename {echo 0}} {
45 # get test case paths (will raise error if input is invalid)
46 _get_test $group $grid $casename dir gridname casefile
48 # if echo specified as "-echo", convert it to bool
49 if { "$echo" == "-echo" } { set echo t }
52 uplevel _run_test $dir $group $gridname $casefile $echo
56 _check_log $dir $group $gridname $casename [dlog get]
62 # Basic command to run indicated test case in DRAW
64 Run all tests, or specified group, or one grid
65 Use: testgrid [group [grid]] [options...]
67 -parallel N: run N parallel processes (default is number of CPUs, 0 to disable)
68 -refresh N: save summary logs every N seconds (default 60, minimal 1, 0 to disable)
69 -outdir dirname: set log directory (should be empty or non-existing)
70 -overwrite: force writing logs in existing non-empty directory
71 -xml filename: write XML report for Jenkins (in JUnit-like format)
73 proc testgrid {args} {
74 global env tcl_platform _tests_verbose
76 ######################################################
78 ######################################################
80 # check that environment variable defining paths to test scripts is defined
81 if { ! [info exists env(CSF_TestScriptsPath)] ||
82 [llength $env(CSF_TestScriptsPath)] <= 0 } {
83 error "Error: Environment variable CSF_TestScriptsPath is not defined"
87 set parallel [_get_nb_cpus]
92 for {set narg 0} {$narg < [llength $args]} {incr narg} {
93 set arg [lindex $args $narg]
96 if { $arg == "-parallel" } {
98 if { $narg < [llength $args] && ! [regexp {^-} [lindex $args $narg]] } {
99 set parallel [expr [lindex $args $narg]]
101 error "Option -parallel requires argument"
107 if { $arg == "-refresh" } {
109 if { $narg < [llength $args] && ! [regexp {^-} [lindex $args $narg]] } {
110 set refresh [expr [lindex $args $narg]]
112 error "Option -refresh requires argument"
118 if { $arg == "-outdir" } {
120 if { $narg < [llength $args] && ! [regexp {^-} [lindex $args $narg]] } {
121 set logdir [lindex $args $narg]
123 error "Option -outdir requires argument"
128 # allow overwrite logs
129 if { $arg == "-overwrite" } {
135 if { $arg == "-xml" } {
137 if { $narg < [llength $args] && ! [regexp {^-} [lindex $args $narg]] } {
138 set xmlfile [lindex $args $narg]
140 if { $xmlfile == "" } {
141 set xmlfile TESTS-summary.xml
147 if { [regexp {^-} $arg] } {
148 error "Error: unsupported option \"$arg\""
151 # treat arguments not recognized as options as group and grid names
152 if { ! [info exists groupname] } {
154 } elseif { ! [info exists gridname] } {
157 error "Error: cannot interpret argument $narg ($arg): both group and grid names are already defined by previous args!"
161 # check that target log directory is empty or does not exist
162 set logdir [file normalize [string trim $logdir]]
163 if { $logdir == "" } {
164 # if specified logdir is empty string, generate unique name like
165 # results_<branch>_<timestamp>
167 if { ! [catch {exec git branch} gitout] &&
168 [regexp {[*] ([\w]+)} $gitout res branch] } {
169 set prefix "${prefix}_$branch"
171 set logdir "${prefix}_[clock format [clock seconds] -format {%Y-%m-%dT%H%M}]"
172 set logdir [file normalize $logdir]
174 if { [file isdirectory $logdir] && ! $overwrite && ! [catch {glob -directory $logdir *}] } {
175 error "Error: Specified log directory \"$logdir\" is not empty; please clean it before running tests"
177 if { [catch {file mkdir $logdir}] || ! [file writable $logdir] } {
178 error "Error: Cannot create directory \"$logdir\", or it is not writable"
181 ######################################################
182 # prepare list of tests to be performed
183 ######################################################
185 # list of tests, each defined by a list of:
186 # test scripts directory
187 # group (subfolder) name
188 # grid (subfolder) name
190 # path to test case file
193 # iterate by all script paths
194 foreach dir [_split_path $env(CSF_TestScriptsPath)] {
195 # protection against empty paths
196 set dir [string trim $dir]
197 if { $dir == "" } { continue }
199 if { $_tests_verbose > 0 } { _log_and_puts log "Examining tests directory $dir" }
201 # check that directory exists
202 if { ! [file isdirectory $dir] } {
203 _log_and_puts log "Warning: directory $dir listed in CSF_TestScriptsPath does not exist, skipped"
207 # if test group is specified, check that directory with given name exists in this dir
208 # if not, continue to the next test dir
209 if { [info exists groupname] && $groupname != "" } {
210 if { [file isdirectory $dir/$groupname] } {
211 set groups $groupname
216 # else search all directories in the current dir
217 if [catch {glob -directory $dir -tail -types d *} groups] { continue }
221 if { $_tests_verbose > 0 } { _log_and_puts log "Groups to be executed: $groups" }
222 foreach group [lsort -dictionary $groups] {
223 if { $_tests_verbose > 0 } { _log_and_puts log "Examining group directory $group" }
225 # file grids.list must exist: it defines sequence of grids in the group
226 if { ! [file exists $dir/$group/grids.list] } {
227 _log_and_puts log "Warning: directory $dir/$group does not contain file grids.list, skipped"
231 # read grids.list file and make a list of grids to be executed
233 set fd [open $dir/$group/grids.list]
235 while { [gets $fd line] >= 0 } {
238 # skip comments and empty lines
239 if { [regexp "\[ \t\]*\#.*" $line] } { continue }
240 if { [string trim $line] == "" } { continue }
242 # get grid id and name
243 if { ! [regexp "^\(\[0-9\]+\)\[ \t\]*\(\[A-Za-z0-9_.-\]+\)\$" $line res gridid grid] } {
244 _log_and_puts log "Warning: cannot recognize line $nline in file $dir/$group/grids.list as \"gridid gridname\"; ignored"
248 # if specific grid is requested, check that it is present; otherwise make complete list
249 if { ! [info exists gridname] || $gridname == "" || $gridname == $gridid || $gridname == $grid } {
250 lappend gridlist $grid
255 # iterate by all grids
256 foreach grid $gridlist {
258 # check if this grid is aliased to another one
259 set griddir $dir/$group/$grid
260 if { [file exists $griddir/cases.list] } {
261 set fd [open $griddir/cases.list]
262 if { [gets $fd line] >= 0 } {
263 set griddir [file normalize $dir/$group/$grid/[string trim $line]]
268 # check if grid directory actually exists
269 if { ! [file isdirectory $griddir] } {
270 _log_and_puts log "Error: tests directory for grid $grid ($griddir) is missing; skipped"
274 # create directory for logging test results
275 if { $logdir != "" } { file mkdir $logdir/$group/$grid }
277 # iterate by all tests in the grid directory
278 if { [catch {glob -directory $griddir -type f *} testfiles] } { continue }
279 foreach casefile [lsort -dictionary $testfiles] {
280 # filter out begin and end files
281 set casename [file tail $casefile]
282 if { $casename == "begin" || $casename == "end" } { continue }
284 lappend tests_list [list $dir $group $grid $casename $casefile]
289 if { [llength $tests_list] < 1 } {
290 error "Error: no tests are found, check you input arguments and variable CSF_TestScriptsPath!"
293 ######################################################
295 ######################################################
297 # log command arguments and environment
298 set log "Command: testgrid $args\nHost: [info hostname]\nStarted on: [clock format [clock seconds] -format {%Y-%m-%d %H:%M:%S}]\n"
299 set log "$log\nEnvironment:\n"
300 foreach envar [array names env] {
301 set log "$log$envar=\"$env($envar)\"\n"
305 set refresh_timer [clock seconds]
306 uplevel dchrono _timer reset
307 uplevel dchrono _timer start
309 # if parallel execution is requested, allocate thread pool
310 if { $parallel > 0 } {
311 if { ! [info exists tcl_platform(threaded)] || [catch {package require Thread}] } {
312 _log_and_puts log "Warning: Tcl package Thread is not available, running in sequential mode"
315 set worker [tpool::create -minworkers $parallel -maxworkers $parallel]
316 # suspend the pool until all jobs are posted, to prevent blocking of the process
317 # of starting / processing jobs by running threads
318 catch {tpool::suspend $worker}
319 if { $_tests_verbose > 0 } { _log_and_puts log "Executing tests in (up to) $parallel threads" }
324 foreach test_def $tests_list {
325 set dir [lindex $test_def 0]
326 set group [lindex $test_def 1]
327 set grid [lindex $test_def 2]
328 set casename [lindex $test_def 3]
329 set casefile [lindex $test_def 4]
331 # command to set tests for generation of image in results directory
333 if { $logdir != "" } { set imgdir_cmd "set imagedir $logdir/$group/$grid" }
335 # prepare command file for running test case in separate instance of DRAW
336 set fd_cmd [open $logdir/$group/$grid/${casename}.tcl w]
337 puts $fd_cmd "$imgdir_cmd"
338 puts $fd_cmd "set test_image $casename"
339 puts $fd_cmd "_run_test $dir $group $grid $casefile t"
341 # use dlog command to obtain complete output of the test when it is absent (i.e. since OCCT 6.6.0)
342 # note: this is not needed if echo is set to 1 in call to _run_test above
343 if { ! [catch {dlog get}] } {
344 puts $fd_cmd "puts \[dlog get\]"
346 # else try to use old-style QA_ variables to get more output...
349 set env(QA_print_command) 1
352 # final 'exit' is needed when running on Linux under VirtualGl
356 # commant to run DRAW with a command file;
357 # note that empty string is passed as standard input to avoid possible
358 # hang-ups due to waiting for stdin of the launching process
359 set command "exec <<{} DRAWEXE -f $logdir/$group/$grid/${casename}.tcl"
361 # alternative method to run without temporary file; disabled as it needs too many backslashes
363 # set command "exec <<\"\" DRAWEXE -c $imgdir_cmd\\\; set test_image $casename\\\; \
364 # _run_test $dir $group $grid $casefile\\\; \
365 # puts \\\[dlog get\\\]\\\; exit"
368 # run test case, either in parallel or sequentially
369 if { $parallel > 0 } {
371 set job [tpool::post -nowait $worker "catch \"$command\" output; return \$output"]
372 set job_def($job) [list $logdir $dir $group $grid $casename]
374 # sequential execution
375 catch {eval $command} output
376 _log_test_case $output $logdir $dir $group $grid $casename log
378 # update summary log with requested period
379 if { $logdir != "" && $refresh > 0 && [expr [clock seconds] - $refresh_timer > $refresh] } {
380 # update and dump summary
381 _log_summarize $logdir $log
382 set refresh_timer [clock seconds]
387 # get results of started threads
388 if { $parallel > 0 } {
389 catch {tpool::resume $worker}
390 while { [llength [array names job_def]] > 0 } {
391 foreach job [tpool::wait $worker [array names job_def]] {
392 eval _log_test_case \[tpool::get $worker $job\] $job_def($job) log
396 # update summary log with requested period
397 if { $logdir != "" && $refresh > 0 && [clock seconds] > $refresh_timer + $refresh } {
398 _log_summarize $logdir $log
399 set refresh_timer [clock seconds]
402 # release thread pool
403 tpool::release $worker
406 uplevel dchrono _timer stop
407 set time [lindex [split [uplevel dchrono _timer show] "\n"] 0]
409 ######################################################
410 # output summary logs and exit
411 ######################################################
413 _log_summarize $logdir $log $time
414 if { $logdir != "" } {
415 puts "Detailed logs are saved in $logdir"
417 if { $logdir != "" && $xmlfile != "" } {
418 # XML output file is assumed relative to log dir unless it is absolute
419 if { [ file pathtype $xmlfile] == "relative" } {
420 set xmlfile [file normalize $logdir/$xmlfile]
422 _log_xml_summary $logdir $xmlfile $log 0
423 puts "XML summary is saved to $xmlfile"
429 # Procedure to regenerate summary log from logs of test cases
431 Regenerate summary log in the test directory from logs of test cases.
432 This can be necessary if test grids are executed separately (e.g. on
433 different stations) or some grids have been re-executed.
434 Use: testsummarize dir
436 proc testsummarize {dir} {
437 global _test_case_regexp
439 if { ! [file isdirectory $dir] } {
440 error "Error: \"$dir\" is not a directory"
443 # get summary statements from all test cases in one log
446 # to avoid huge listing of logs, first find all subdirectories and iterate
447 # by them, parsing log files in each subdirectory independently
448 foreach grid [glob -directory $dir -types d -tails */*] {
449 foreach caselog [glob -nocomplain -directory [file join $dir $grid] -types f -tails *.log] {
450 set file [file join $dir $grid $caselog]
452 set fd [open $file r]
453 while { [gets $fd line] >= 0 } {
454 if { [regexp $_test_case_regexp $line res grp grd cas status message] } {
455 if { "[file join $grid $caselog]" != "[file join $grp $grd ${cas}.log]" } {
456 puts "Error: $file contains status line for another test case ($line)"
458 set log "$log$line\n"
464 if { $nbfound != 1 } {
465 puts "Error: $file contains $nbfound status lines, expected 1"
470 _log_summarize $dir $log "Summary regenerated from logs at [clock format [clock seconds]]"
474 # Procedure to compare results of two runs of test cases
476 Compare results of two executions of tests (CPU times, ...)
477 Use: testdiff dir1 dir2 [groupname [gridname]] [options...]
478 Where dir1 and dir2 are directories containing logs of two test runs.
480 -save filename: save resulting log in specified file (default name is
481 \$dir1/diff-\$dir2.log); HTML log is saved with same name
483 -status {same|ok|all}: filter cases for comparing by their status:
484 same - only cases with same status are compared (default)
485 ok - only cases with OK status in both logs are compared
486 all - results are compared regardless of status
488 1 - output only differences
489 2 - output also list of logs and directories present in one of dirs only
490 3 - (default) output also progress messages
492 proc testdiff {dir1 dir2 args} {
493 if { "$dir1" == "$dir2" } {
494 error "Input directories are the same"
497 ######################################################
499 ######################################################
502 set logfile [file join $dir1 "diff-[file tail $dir2].log"]
506 for {set narg 0} {$narg < [llength $args]} {incr narg} {
507 set arg [lindex $args $narg]
510 if { $arg == "-save" } {
512 if { $narg < [llength $args] && ! [regexp {^-} [lindex $args $narg]] } {
513 set logfile [lindex $args $narg]
515 error "Error: Option -save must be followed by log file name"
521 if { $arg == "-status" } {
523 if { $narg < [llength $args] && ! [regexp {^-} [lindex $args $narg]] } {
524 set status [lindex $args $narg]
525 } else { set status "" }
526 if { "$status" != "same" && "$status" != "all" && "$status" != "ok" } {
527 error "Error: Option -status must be followed by one of \"same\", \"all\", or \"ok\""
533 if { $arg == "-verbose" } {
535 if { $narg < [llength $args] && ! [regexp {^-} [lindex $args $narg]] } {
536 set verbose [expr [lindex $args $narg]]
538 error "Error: Option -verbose must be followed by integer verbose level"
543 if { [regexp {^-} $arg] } {
544 error "Error: unsupported option \"$arg\""
547 # non-option arguments form a subdirectory path
548 set basename [file join $basename $arg]
551 # run diff procedure (recursive)
552 _test_diff $dir1 $dir2 $basename $status $verbose log
554 # save result to log file
555 if { "$logfile" != "" } {
556 _log_save $logfile $log
557 _log_html_diff "[file rootname $logfile].html" $log $dir1 $dir2
558 puts "Log is saved to $logfile (and .html)"
564 # Procedure to check data file before adding it to repository
566 Check data file and prepare it for putting to test data files repository.
567 Use: testfile [filelist]
570 - data file (non-binary) is in DOS encoding (CR/LF)
571 - same data file (with same or another name) already exists in the repository
572 - another file with the same name already exists
573 Note that names are assumed to be case-insensitive (for Windows).
575 Unless the file is already in the repository, tries to load it, reports
576 the recognized file format, file size, number of faces and edges in the
577 loaded shape (if any), and makes snapshot (in the subdirectory tmp).
578 Finally it advises whether the file should be put to public section of the
581 proc testfile {filelist} {
584 # check that CSF_TestDataPath is defined
585 if { ! [info exists env(CSF_TestDataPath)] } {
586 error "Environment variable CSF_TestDataPath must be defined!"
589 # build registry of existing data files (name -> path) and (size -> path)
590 puts "Checking available test data files..."
591 foreach dir [_split_path $env(CSF_TestDataPath)] {
592 while {[llength $dir] != 0} {
593 set curr [lindex $dir 0]
594 set dir [lrange $dir 1 end]
595 eval lappend dir [glob -nocomplain -directory $curr -type d *]
596 foreach file [glob -nocomplain -directory $curr -type f *] {
597 set name [file tail $file]
598 set name_lower [string tolower $name]
600 # check that the file is not in DOS encoding
601 if { [_check_dos_encoding $file] } {
602 puts "Warning: file $file is in DOS encoding; was this intended?"
604 _check_file_format $file
606 # check if file with the same name is present twice or more
607 if { [info exists names($name_lower)] } {
608 puts "Error: more than one file with name $name is present in the repository:"
609 if { [_diff_files $file $names($name_lower)] } {
610 puts "(files are different by content)"
612 puts "(files are same by content)"
615 puts "--> $names($name_lower)"
619 # check if file with the same content exists
620 set size [file size $file]
621 if { [info exists sizes($size)] } {
622 foreach other $sizes($size) {
623 if { ! [_diff_files $file $other] } {
624 puts "Warning: two files with the same content found:"
631 # add the file to the registry
632 set names($name_lower) $file
633 lappend sizes($size) $file
637 if { [llength $filelist] <= 0 } { return }
639 # check the new files
641 puts "Checking new file(s)..."
642 foreach file $filelist {
643 # check for DOS encoding
644 if { [_check_dos_encoding $file] } {
645 puts "$file: Warning: DOS encoding detected"
648 set name [file tail $file]
649 set name_lower [string tolower $name]
651 # check for presence of the file with same name
652 if { [info exists names($name_lower)] } {
653 if { [_diff_files $file $names($name_lower)] } {
654 puts "$file: Error: name is already used by existing file\n--> $names($name_lower)"
656 puts "$file: OK: already in the repository \n--> $names($name_lower)"
661 # check if file with the same content exists
662 set size [file size $file]
663 if { [info exists sizes($size)] } {
665 foreach other $sizes($size) {
666 if { ! [_diff_files $file $other] } {
667 puts "$file: OK: the same file is already present under name [file tail $other]\n--> $other"
672 if { $found } { continue }
675 # try to read the file
676 set format [_check_file_format $file]
677 if { [catch {uplevel load_data_file $file $format a}] } {
678 puts "$file: Error: Cannot read as $format file"
682 # get number of faces and edges
685 set nbs [uplevel nbshapes a]
686 regexp {EDGE[ \t:]*([0-9]+)} $nbs res edges
687 regexp {FACE[ \t:]*([0-9]+)} $nbs res faces
689 # classify; first check file size and number of faces and edges
690 if { $size < 95000 && $faces < 20 && $edges < 100 } {
694 # check if one of names of that file corresponds to typical name for
695 # MDTV bugs or has extension .rle, this should be old model
696 if { [regexp -nocase {.*(cts|ats|pro|buc|ger|fra|usa|uki)[0-9]+.*} $name] ||
697 [regexp -nocase {[.]rle\y} $name] } {
703 puts "$file: $format size=[expr $size / 1024] KiB, nbfaces=$faces, nbedges=$edges -> $dir"
712 uplevel vdump tmp/$dir/[file rootname [file tail $file]].png
716 puts "Snapshots are saved in subdirectory tmp"
720 # Procedure to locate data file for test given its name.
721 # The search is performed assuming that the function is called
722 # from the test case script; the search order is:
723 # - subdirectory "data" of the test script (grid) folder
724 # - subdirectories in environment variable CSF_TestDataPath
725 # - subdirectory set by datadir command
726 # If file is not found, raises Tcl error.
727 proc locate_data_file {filename} {
728 global env groupname gridname casename
730 # check if the file is located in the subdirectory data of the script dir
731 set scriptfile [info script]
732 if { $scriptfile != "" } {
733 set path [file join [file dirname $scriptfile] data $filename]
734 if { [file exists $path] } {
735 return [file normalize $path]
739 # check sub-directories in paths indicated by CSF_TestDataPath
740 if { [info exists env(CSF_TestDataPath)] } {
741 foreach dir [_split_path $env(CSF_TestDataPath)] {
742 while {[llength $dir] != 0} {
743 set name [lindex $dir 0]
744 set dir [lrange $dir 1 end]
745 eval lappend dir [glob -nocomplain -directory $name -type d *]
746 if { [file exists $name/$filename] } {
747 return [file normalize $name/$filename]
753 # check current datadir
754 if { [file exists [uplevel datadir]/$filename] } {
755 return [file normalize [uplevel datadir]/$filename]
759 error [join [list "Error: file $filename could not be found" \
760 "(should be in paths indicated by CSF_TestDataPath environment variable, " \
761 "or in subfolder data in the script directory)"] "\n"]
764 # Internal procedure to find test case indicated by group, grid, and test case names;
766 # - dir: path to the base directory of the tests group
767 # - gridname: actual name of the grid
768 # - casefile: path to the test case script
769 # if no such test is found, raises error with appropriate message
770 proc _get_test {group grid casename _dir _gridname _casefile} {
772 upvar $_gridname gridname
773 upvar $_casefile casefile
777 # check that environment variable defining paths to test scripts is defined
778 if { ! [info exists env(CSF_TestScriptsPath)] ||
779 [llength $env(CSF_TestScriptsPath)] <= 0 } {
780 error "Error: Environment variable CSF_TestScriptsPath is not defined"
783 # iterate by all script paths
784 foreach dir [_split_path $env(CSF_TestScriptsPath)] {
785 # protection against empty paths
786 set dir [string trim $dir]
787 if { $dir == "" } { continue }
789 # check that directory exists
790 if { ! [file isdirectory $dir] } {
791 puts "Warning: directory $dir listed in CSF_TestScriptsPath does not exist, skipped"
795 # check if test group with given name exists in this dir
796 # if not, continue to the next test dir
797 if { ! [file isdirectory $dir/$group] } { continue }
799 # check that grid with given name (possibly alias) exists; stop otherwise
801 if { ! [file isdirectory $dir/$group/$gridname] } {
802 # check if grid is named by alias rather than by actual name
803 if { [file exists $dir/$group/grids.list] } {
804 set fd [open $dir/$group/grids.list]
805 while { [gets $fd line] >= 0 } {
806 if { [regexp "\[ \t\]*\#.*" $line] } { continue }
807 if { [regexp "^$grid\[ \t\]*\(\[A-Za-z0-9_.-\]+\)\$" $line res gridname] } {
814 if { ! [file isdirectory $dir/$group/$gridname] } { continue }
816 # get actual file name of the script; stop if it cannot be found
817 set casefile $dir/$group/$gridname/$casename
818 if { ! [file exists $casefile] } {
819 # check if this grid is aliased to another one
820 if { [file exists $dir/$group/$gridname/cases.list] } {
821 set fd [open $dir/$group/$gridname/cases.list]
822 if { [gets $fd line] >= 0 } {
823 set casefile [file normalize $dir/$group/$gridname/[string trim $line]/$casename]
828 if { [file exists $casefile] } {
834 # coming here means specified test is not found; report error
835 error [join [list "Error: test case $group / $grid / $casename is not found in paths listed in variable" \
836 "CSF_TestScriptsPath (current value is \"$env(CSF_TestScriptsPath)\")"] "\n"]
839 # Internal procedure to run test case indicated by base directory,
840 # grid and grid names, and test case file path.
841 # The log can be obtained by command "dlog get".
842 proc _run_test {scriptsdir group gridname casefile echo} {
846 uplevel dchrono _timer reset
847 uplevel dchrono _timer start
848 catch {uplevel meminfo w} membase
850 # enable commands logging; switch to old-style mode if dlog command is not present
852 if { [catch {dlog reset}] } {
859 rename puts puts-saved
861 global _tests_verbose
863 # log only output to stdout and stderr, not to file!
864 if {[llength $args] > 1} {
865 set optarg [lindex $args end-1]
866 if { $optarg == "stdout" || $optarg == "stderr" || $optarg == "-newline" } {
867 dlog add [lindex $args end]
870 dlog add [lindex $args end]
873 # reproduce original puts
874 if { $_tests_verbose } {
875 eval puts-saved $args
882 uplevel set casename [file tail $casefile]
883 uplevel set groupname $group
884 uplevel set gridname $gridname
886 if { [file exists $scriptsdir/$group/begin] } {
887 puts "Executing $scriptsdir/$group/begin..."; flush stdout
888 uplevel source $scriptsdir/$group/begin
890 if { [file exists $scriptsdir/$group/$gridname/begin] } {
891 puts "Executing $scriptsdir/$group/$gridname/begin..."; flush stdout
892 uplevel source $scriptsdir/$group/$gridname/begin
895 puts "Executing $casefile..."; flush stdout
896 uplevel source $casefile
898 if { [file exists $scriptsdir/$group/$gridname/end] } {
899 puts "Executing $scriptsdir/$group/$gridname/end..."; flush stdout
900 uplevel source $scriptsdir/$group/$gridname/end
902 if { [file exists $scriptsdir/$group/end] } {
903 puts "Executing $scriptsdir/$group/end..."; flush stdout
904 uplevel source $scriptsdir/$group/end
907 puts "Tcl Exception: $res"
911 if { $dlog_exists } {
916 rename puts-saved puts
921 # stop cpulimit killer if armed by the test
924 # add memory and timing info
926 if { ! [catch {uplevel meminfo w} memuse] } {
927 set stats "MEMORY DELTA: [expr ($memuse - $membase) / 1024] KiB\n"
929 uplevel dchrono _timer stop
930 set time [uplevel dchrono _timer show]
931 if [regexp -nocase {CPU user time:[ \t]*([0-9.e-]+)} $time res cpu] {
932 set stats "${stats}TOTAL CPU TIME: $cpu sec\n"
934 if { $dlog_exists && ! $echo } {
941 # Internal procedure to check log of test execution and decide if it passed or failed
942 proc _check_log {dir group gridname casename log {_summary {}} {_html_log {}}} {
944 if { $_summary != "" } { upvar $_summary summary }
945 if { $_html_log != "" } { upvar $_html_log html_log }
951 # load definition of 'bad words' indicating test failure
952 # note that rules are loaded in the order of decreasing priority (grid - group - common),
953 # thus grid rules will override group ones
955 foreach rulesfile [list $dir/$group/$gridname/parse.rules $dir/$group/parse.rules $dir/parse.rules] {
956 if [catch {set fd [open $rulesfile r]}] { continue }
957 while { [gets $fd line] >= 0 } {
958 # skip comments and empty lines
959 if { [regexp "\[ \t\]*\#.*" $line] } { continue }
960 if { [string trim $line] == "" } { continue }
962 if { ! [regexp {^([^/]*)/([^/]*)/(.*)$} $line res status rexp comment] } {
963 puts "Warning: cannot recognize parsing rule \"$line\" in file $rulesfile"
966 set status [string trim $status]
967 if { $comment != "" } { set status "$status ([string trim $comment])" }
968 set rexp [regsub -all {\\b} $rexp {\\y}] ;# convert regexp from Perl to Tcl style
969 lappend badwords [list $status $rexp]
973 if { [llength $badwords] <= 0 } {
974 puts "Warning: no definition of error indicators found (check files parse.rules)"
977 # analyse log line-by-line
980 foreach line [split $log "\n"] {
981 # check if line defines specific treatment of some messages
982 if [regexp -nocase {^[ \t]*TODO ([^:]*):(.*)$} $line res platforms pattern] {
983 if { ! [regexp -nocase {\mAll\M} $platforms] &&
984 ! [regexp -nocase "\\m$env(os_type)\\M" $platforms] } {
985 set html_log "$html_log\n$line"
986 continue ;# TODO statement is for another platform
989 # record TODOs that mark unstable cases
990 if { [regexp {[\?]} $platforms] } {
991 set todos_unstable([llength $todos]) 1
994 lappend todos [regsub -all {\\b} [string trim $pattern] {\\y}] ;# convert regexp from Perl to Tcl style
995 set html_log "$html_log\n[_html_highlight BAD $line]"
999 # check for presence of messages indicating test result
1001 foreach bw $badwords {
1002 if { [regexp [lindex $bw 1] $line] } {
1003 # check if this is known bad case
1005 for {set i 0} {$i < [llength $todos]} {incr i} {
1006 if { [regexp [lindex $todos $i] $line] } {
1009 set html_log "$html_log\n[_html_highlight BAD $line]"
1014 # if it is not in todo, define status
1015 if { ! $is_known } {
1016 set stat [lindex $bw 0 0]
1017 set html_log "$html_log\n[_html_highlight $stat $line]"
1018 if { $status == "" && $stat != "OK" && ! [regexp -nocase {^IGNOR} $stat] } {
1019 set status [lindex $bw 0]
1026 if { ! $ismarked } {
1027 set html_log "$html_log\n$line"
1031 # check for presence of TEST COMPLETED statement
1032 if { $status == "" && ! [regexp {TEST COMPLETED} $log] } {
1033 # check whether absence of TEST COMPLETED is known problem
1034 set i [lsearch $todos "TEST INCOMPLETE"]
1038 set status "FAILED (no final message is found)"
1042 # check declared bad cases and diagnose possible improvement
1043 # (bad case declared but not detected).
1044 # Note that absence of the problem marked by TODO with question mark
1045 # (unstable) is not reported as improvement.
1046 if { $status == "" } {
1047 for {set i 0} {$i < [llength $todos]} {incr i} {
1048 if { ! [info exists todos_unstable($i)] &&
1049 (! [info exists todo_count($i)] || $todo_count($i) <= 0) } {
1050 set status "IMPROVEMENT (expected problem TODO no. [expr $i + 1] is not detected)"
1056 # report test as known bad if at least one of expected problems is found
1057 if { $status == "" && [llength [array names todo_count]] > 0 } {
1058 set status "BAD (known problem)"
1062 if { $status == "" } {set status "OK" }
1065 set status "FAILED ($res)"
1069 _log_and_puts summary "CASE $group $gridname $casename: $status"
1070 set html_log "[_html_highlight [lindex $status 0] $summary]\n$html_log"
1073 # Auxiliary procedure putting message to both cout and log variable (list)
1074 proc _log_and_puts {logvar message} {
1075 if { $logvar != "" } {
1077 if [info exists log] {
1078 set log "$log$message\n"
1080 set log "$message\n"
1086 # Auxiliary procedure to log result on single test case
1087 proc _log_test_case {output logdir dir group grid casename logvar} {
1090 # check result and make HTML log
1091 _check_log $dir $group $grid $casename $output summary html_log
1092 set log "$log$summary"
1095 if { $logdir != "" } {
1096 _log_html $logdir/$group/$grid/$casename.html $html_log "Test $group $grid $casename"
1097 _log_save $logdir/$group/$grid/$casename.log "$output\n$summary" "Test $group $grid $casename"
1101 # Auxiliary procedure to save log to file
1102 proc _log_save {file log {title {}}} {
1103 # create missing directories as needed
1104 catch {file mkdir [file dirname $file]}
1106 # try to open a file
1107 if [catch {set fd [open $file w]} res] {
1108 error "Error saving log file $file: $res"
1111 # dump log and close
1118 # Auxiliary procedure to make a (relative if possible) URL to a file for
1119 # inclusion a reference in HTML log
1120 proc _make_url {htmldir file} {
1121 set htmlpath [file split [file normalize $htmldir]]
1122 set filepath [file split [file normalize $file]]
1123 for {set i 0} {$i < [llength $htmlpath]} {incr i} {
1124 if { "[lindex $htmlpath $i]" != "[lindex $filepath $i]" } {
1125 if { $i == 0 } { break }
1126 return "[string repeat "../" [expr [llength $htmlpath] - $i - 1]][eval file join [lrange $filepath $i end]]"
1130 # if relative path could not be made, return full file URL
1131 return "file://[file normalize $file]"
1134 # Auxiliary procedure to save log to file
1135 proc _log_html {file log {title {}}} {
1136 # create missing directories as needed
1137 catch {file mkdir [file dirname $file]}
1139 # try to open a file
1140 if [catch {set fd [open $file w]} res] {
1141 error "Error saving log file $file: $res"
1145 puts $fd "<html><head><title>$title</title></head><body><h1>$title</h1>"
1147 # add images if present
1148 set imgbasename [file rootname [file tail $file]]
1149 foreach img [lsort [glob -nocomplain -directory [file dirname $file] -tails ${imgbasename}*.gif ${imgbasename}*.png ${imgbasename}*.jpg]] {
1150 puts $fd "<p><img src=\"$img\"/><p>"
1153 # print log body, trying to add HTML links to script files on lines like
1154 # "Executing <filename>..."
1156 foreach line [split $log "\n"] {
1157 if { [regexp {Executing[ \t]+([a-zA-Z0-9._/:-]+[^.])} $line res script] &&
1158 [file exists $script] } {
1159 set line [regsub $script $line "<a href=\"[_make_url $file $script]\">$script</a>"]
1163 puts $fd "</pre></body></html>"
1169 # Auxiliary method to make text with HTML highlighting according to status
1170 proc _html_color {status} {
1171 # choose a color for the cell according to result
1172 if { $status == "OK" } {
1174 } elseif { [regexp -nocase {^FAIL} $status] } {
1176 } elseif { [regexp -nocase {^BAD} $status] } {
1178 } elseif { [regexp -nocase {^IMP} $status] } {
1180 } elseif { [regexp -nocase {^SKIP} $status] } {
1182 } elseif { [regexp -nocase {^IGNOR} $status] } {
1185 puts "Warning: no color defined for status $status, using red as if FAILED"
1190 # Format text line in HTML to be colored according to the status
1191 proc _html_highlight {status line} {
1192 return "<table><tr><td bgcolor=\"[_html_color $status]\">$line</td></tr></table>"
1195 # Internal procedure to generate HTML page presenting log of the tests
1196 # execution in tabular form, with links to reports on individual cases
1197 proc _log_html_summary {logdir log totals regressions improvements total_time} {
1198 global _test_case_regexp
1200 # create missing directories as needed
1201 catch {file mkdir $logdir}
1203 # try to open a file and start HTML
1204 if [catch {set fd [open $logdir/summary.html w]} res] {
1205 error "Error creating log file: $res"
1208 # write HRML header, including command to refresh log if still in progress
1209 puts $fd "<html><head>"
1210 puts $fd "<title>Tests summary</title>"
1211 if { $total_time == "" } {
1212 puts $fd "<meta http-equiv=\"refresh\" content=\"10\">"
1214 puts $fd "<meta http-equiv=\"pragma\" content=\"NO-CACHE\">"
1215 puts $fd "</head><body>"
1218 set legend(OK) "Test passed OK"
1219 set legend(FAILED) "Test failed (regression)"
1220 set legend(BAD) "Known problem"
1221 set legend(IMPROVEMENT) "Possible improvement (expected problem not detected)"
1222 set legend(SKIPPED) "Test skipped due to lack of data file"
1223 puts $fd "<h1>Summary</h1><table>"
1224 foreach nbstat $totals {
1225 set status [lindex $nbstat 1]
1226 if { [info exists legend($status)] } {
1227 set comment $legend($status)
1229 set comment "User-defined status"
1231 puts $fd "<tr><td align=\"right\">[lindex $nbstat 0]</td><td bgcolor=\"[_html_color $status]\">$status</td><td>$comment</td></tr>"
1235 # time stamp and elapsed time info
1236 if { $total_time != "" } {
1237 puts $fd "<p>Generated on [clock format [clock seconds] -format {%Y-%m-%d %H:%M:%S}] on [info hostname] <p> $total_time"
1239 puts $fd "<p>NOTE: This is intermediate summary; the tests are still running! This page will refresh automatically until tests are finished."
1242 # print regressions and improvements
1243 foreach featured [list $regressions $improvements] {
1244 if { [llength $featured] <= 1 } { continue }
1245 set status [string trim [lindex $featured 0] { :}]
1246 puts $fd "<h2>$status</h2>"
1249 foreach test [lrange $featured 1 end] {
1250 if { ! [regexp {^(.*)\s+([\w.]+)$} $test res gg name] } {
1252 set name "Error building short list; check details"
1254 if { $gg != $groupgrid } {
1255 if { $groupgrid != "" } { puts $fd "</tr>" }
1257 puts $fd "<tr><td>$gg</td>"
1259 puts $fd "<td bgcolor=\"[_html_color $status]\"><a href=\"[regsub -all { } $gg /]/${name}.html\">$name</a></td>"
1261 if { $groupgrid != "" } { puts $fd "</tr>" }
1266 puts $fd "<h1>Details</h1>"
1268 # process log line-by-line
1271 foreach line [lsort -dictionary [split $log "\n"]] {
1272 # check that the line is case report in the form "CASE group grid name: result (explanation)"
1273 if { ! [regexp $_test_case_regexp $line res grp grd casename result message] } {
1278 if { $grp != $group } {
1279 if { $letter != "" } { puts $fd "</tr></table>" }
1283 puts $fd "<h2>Group $group</h2>"
1287 if { $grd != $grid } {
1288 if { $letter != "" } { puts $fd "</tr></table>" }
1291 puts $fd "<h3>Grid $grid</h3>"
1294 # check if test case name is <letter><digit>;
1295 # if not, set alnum to period "." to recognize non-standard test name
1296 if { ! [regexp {([A-Za-z]+)([0-9]+)} $casename res alnum number] } {
1300 # start new row when letter changes or for non-standard names
1301 if { $alnum != $letter || $alnum == "." } {
1302 if { $letter != "" } {
1303 puts $fd "</tr><tr>"
1305 puts $fd "<table><tr>"
1310 puts $fd "<td bgcolor=\"[_html_color $result]\"><a href=\"$group/$grid/${casename}.html\">$casename</a></td>"
1312 puts $fd "</tr></table>"
1314 # add remaining lines of log as plain text
1315 puts $fd "<h2>Plain text messages</h2>\n<pre>"
1316 foreach line [split $log "\n"] {
1317 if { ! [regexp $_test_case_regexp $line] } {
1323 # close file and exit
1329 # Procedure to dump summary logs of tests
1330 proc _log_summarize {logdir log {total_time {}}} {
1332 # sort log records alphabetically to have the same behavior on Linux and Windows
1333 # (also needed if tests are run in parallel)
1334 set loglist [lsort -dictionary [split $log "\n"]]
1336 # classify test cases by status
1337 foreach line $loglist {
1338 if { [regexp {^CASE ([^:]*): ([[:alnum:]]+).*$} $line res caseid status] } {
1339 lappend stat($status) $caseid
1343 set improvements {Improvements:}
1344 set regressions {Failed:}
1345 if { [info exists stat] } {
1346 foreach status [lsort [array names stat]] {
1347 lappend totals [list [llength $stat($status)] $status]
1349 # separately count improvements (status starting with IMP) and regressions (all except IMP, OK, BAD, and SKIP)
1350 if { [regexp -nocase {^IMP} $status] } {
1351 eval lappend improvements $stat($status)
1352 } elseif { $status != "OK" && ! [regexp -nocase {^BAD} $status] && ! [regexp -nocase {^SKIP} $status] } {
1353 eval lappend regressions $stat($status)
1358 # if time is specified, add totals
1359 if { $total_time != "" } {
1360 if { [llength $improvements] > 1 } {
1361 _log_and_puts log [join $improvements "\n "]
1363 if { [llength $regressions] > 1 } {
1364 _log_and_puts log [join $regressions "\n "]
1366 if { [llength $improvements] == 1 && [llength $regressions] == 1 } {
1367 _log_and_puts log "No regressions"
1369 _log_and_puts log "Total cases: [join $totals {, }]"
1370 _log_and_puts log $total_time
1374 if { $logdir != "" } {
1375 _log_html_summary $logdir $log $totals $regressions $improvements $total_time
1376 _log_save $logdir/tests.log $log "Tests summary"
1382 # Internal procedure to generate XML log in JUnit style, for further
1383 # consumption by Jenkins or similar systems.
1385 # The output is intended to conform to XML schema supported by Jenkins found at
1386 # https://svn.jenkins-ci.org/trunk/hudson/dtkit/dtkit-format/dtkit-junit-model/src/main/resources/com/thalesgroup/dtkit/junit/model/xsd/junit-4.xsd
1388 # The mapping of the fields is inspired by annotated schema of Apache Ant JUnit XML format found at
1389 # http://windyroad.org/dl/Open%20Source/JUnit.xsd
1390 proc _log_xml_summary {logdir filename log include_cout} {
1391 global _test_case_regexp
1393 catch {file mkdir [file dirname $filename]}
1395 # try to open a file and start XML
1396 if [catch {set fd [open $filename w]} res] {
1397 error "Error creating XML summary file $filename: $res"
1399 puts $fd "<?xml version='1.0' encoding='utf-8'?>"
1400 puts $fd "<testsuites>"
1402 # prototype for command to generate test suite tag
1403 set time_and_host "timestamp=\"[clock format [clock seconds] -format {%Y-%m-%dT%H:%M:%S}]\" hostname=\"[info hostname]\""
1404 set cmd_testsuite {puts $fd "<testsuite name=\"$group $grid\" tests=\"$nbtests\" failures=\"$nbfail\" errors=\"$nberr\" time=\"$time\" skipped=\"$nbskip\" $time_and_host>\n$testcases\n</testsuite>\n"}
1406 # sort log and process it line-by-line
1408 foreach line [lsort -dictionary [split $log "\n"]] {
1409 # check that the line is case report in the form "CASE group grid name: result (explanation)"
1410 if { ! [regexp $_test_case_regexp $line res grp grd casename result message] } {
1413 set message [string trim $message " \t\r\n()"]
1415 # start new testsuite for each grid
1416 if { $grp != $group || $grd != $grid } {
1418 # write previous test suite
1419 if [info exists testcases] { eval $cmd_testsuite }
1434 # parse test log and get its CPU time
1437 if { [catch {set fdlog [open $logdir/$group/$grid/${casename}.log r]} ret] } {
1438 puts "Error: cannot open $logdir/$group/$grid/${casename}.log: $ret"
1440 while { [gets $fdlog logline] >= 0 } {
1441 if { $include_cout } {
1442 set testout "$testout$logline\n"
1444 if [regexp -nocase {TOTAL CPU TIME:\s*([\d.]+)\s*sec} $logline res cpu] {
1445 set add_cpu " time=\"$cpu\""
1446 set time [expr $time + $cpu]
1451 if { ! $include_cout } {
1452 set testout "$line\n"
1455 # record test case with its output and status
1456 # Mapping is: SKIPPED, BAD, and OK to OK, all other to failure
1457 set testcases "$testcases\n <testcase name=\"$casename\"$add_cpu status=\"$result\">\n"
1458 set testcases "$testcases\n <system-out>\n$testout </system-out>"
1459 if { $result != "OK" } {
1460 if { [regexp -nocase {^SKIP} $result] } {
1462 set testcases "$testcases\n <error name=\"$result\" message=\"$message\"/>"
1463 } elseif { [regexp -nocase {^BAD} $result] } {
1465 set testcases "$testcases\n <skipped>$message</skipped>"
1468 set testcases "$testcases\n <failure name=\"$result\" message=\"$message\"/>"
1471 set testcases "$testcases\n </testcase>"
1474 # write last test suite
1475 if [info exists testcases] { eval $cmd_testsuite }
1478 puts $fd "</testsuites>"
1483 # define custom platform name
1484 proc _tests_platform_def {} {
1485 global env tcl_platform
1487 if [info exists env(os_type)] { return }
1489 set env(os_type) $tcl_platform(platform)
1491 # use detailed mapping for various versions of Lunix
1492 # (note that mapping is rather non-uniform, for historical reasons)
1493 if { $env(os_type) == "unix" && ! [catch {exec cat /etc/issue} issue] } {
1494 if { [regexp {Mandriva[ \tA-Za-z]+([0-9]+)} $issue res num] } {
1495 set env(os_type) Mandriva$num
1496 } elseif { [regexp {Red Hat[ \tA-Za-z]+([0-9]+)} $issue res num] } {
1497 set env(os_type) RedHat$num
1498 } elseif { [regexp {Debian[ \tA-Za-z/]+([0-9]+)[.]([0-9]+)} $issue res num subnum] } {
1499 set env(os_type) Debian$num$subnum
1500 } elseif { [regexp {CentOS[ \tA-Za-z]+([0-9]+)[.]([0-9]+)} $issue res num subnum] } {
1501 set env(os_type) CentOS$num$subnum
1502 } elseif { [regexp {Scientific[ \tA-Za-z]+([0-9]+)[.]([0-9]+)} $issue res num subnum] } {
1503 set env(os_type) SL$num$subnum
1504 } elseif { [regexp {Fedora Core[ \tA-Za-z]+([0-9]+)} $issue res num] } {
1505 set env(os_type) FedoraCore$num
1507 if { [exec uname -m] == "x86_64" } {
1508 set env(os_type) "$env(os_type)-64"
1514 # Auxiliary procedure to split path specification (usually defined by
1515 # environment variable) into list of directories or files
1516 proc _split_path {pathspec} {
1519 # first replace all \ (which might occur on Windows) by /
1520 regsub -all "\\\\" $pathspec "/" pathspec
1522 # split path by platform-specific separator
1523 return [split $pathspec [_path_separator]]
1526 # Auxiliary procedure to define platform-specific separator for directories in
1527 # path specification
1528 proc _path_separator {} {
1531 # split path by platform-specific separator
1532 if { $tcl_platform(platform) == "windows" } {
1539 # Procedure to make a diff and common of two lists
1540 proc _list_diff {list1 list2 _in1 _in2 _common} {
1543 upvar $_common common
1548 foreach item $list1 {
1549 if { [lsearch -exact $list2 $item] >= 0 } {
1550 lappend common $item
1555 foreach item $list2 {
1556 if { [lsearch -exact $common $item] < 0 } {
1563 # procedure to load a file to Tcl string
1564 proc _read_file {filename} {
1565 set fd [open $filename r]
1566 set result [read -nonewline $fd]
1571 # procedure to construct name for the mage diff file
1572 proc _diff_img_name {dir1 dir2 casepath imgfile} {
1573 return [file join $dir1 $casepath "diff-[file tail $dir2]-$imgfile"]
1576 # Procedure to compare results of two runs of test cases
1577 proc _test_diff {dir1 dir2 basename status verbose _logvar {_statvar ""}} {
1580 # make sure to load diffimage command
1581 uplevel pload VISUALIZATION
1583 # prepare variable (array) for collecting statistics
1584 if { "$_statvar" != "" } {
1585 upvar $_statvar stat
1594 # first check subdirectories
1595 set path1 [file join $dir1 $basename]
1596 set path2 [file join $dir2 $basename]
1597 set list1 [glob -directory $path1 -types d -tails -nocomplain *]
1598 set list2 [glob -directory $path2 -types d -tails -nocomplain *]
1599 if { [llength $list1] >0 || [llength $list2] > 0 } {
1600 _list_diff $list1 $list2 in1 in2 common
1601 if { "$verbose" > 1 } {
1602 if { [llength $in1] > 0 } { _log_and_puts log "Only in $path1: $in1" }
1603 if { [llength $in2] > 0 } { _log_and_puts log "Only in $path2: $in2" }
1605 foreach subdir $common {
1606 if { "$verbose" > 2 } {
1607 _log_and_puts log "Checking [file join $basename $subdir]"
1609 _test_diff $dir1 $dir2 [file join $basename $subdir] $status $verbose log stat
1612 # check log files (only if directory has no subdirs)
1613 set list1 [glob -directory $path1 -types f -tails -nocomplain *.log]
1614 set list2 [glob -directory $path2 -types f -tails -nocomplain *.log]
1615 _list_diff $list1 $list2 in1 in2 common
1616 if { "$verbose" > 1 } {
1617 if { [llength $in1] > 0 } { _log_and_puts log "Only in $path1: $in1" }
1618 if { [llength $in2] > 0 } { _log_and_puts log "Only in $path2: $in2" }
1620 foreach logfile $common {
1622 set log1 [_read_file [file join $dir1 $basename $logfile]]
1623 set log2 [_read_file [file join $dir2 $basename $logfile]]
1624 set casename [file rootname $logfile]
1626 # check execution statuses
1627 set status1 UNDEFINED
1628 set status2 UNDEFINED
1629 if { ! [regexp {CASE [^:]*:\s*([\w]+)} $log1 res1 status1] ||
1630 ! [regexp {CASE [^:]*:\s*([\w]+)} $log2 res2 status2] ||
1631 "$status1" != "$status2" } {
1632 _log_and_puts log "STATUS [split $basename /] $casename: $status1 / $status2"
1634 # if test statuses are different, further comparison makes
1635 # no sense unless explicitly requested
1636 if { "$status" != "all" } {
1640 if { "$status" == "ok" && "$status1" != "OK" } {
1647 if { [regexp {TOTAL CPU TIME:\s*([\d.]+)} $log1 res1 cpu1] &&
1648 [regexp {TOTAL CPU TIME:\s*([\d.]+)} $log2 res1 cpu2] } {
1649 set stat(cpu1) [expr $stat(cpu1) + $cpu1]
1650 set stat(cpu2) [expr $stat(cpu2) + $cpu2]
1652 # compare CPU times with 10% precision (but not less 0.5 sec)
1653 if { [expr abs ($cpu1 - $cpu2) > 0.5 + 0.05 * abs ($cpu1 + $cpu2)] } {
1654 _log_and_puts log "CPU [split $basename /] $casename: $cpu1 / $cpu2"
1658 # check memory delta
1661 if { [regexp {MEMORY DELTA:\s*([\d.]+)} $log1 res1 mem1] &&
1662 [regexp {MEMORY DELTA:\s*([\d.]+)} $log2 res1 mem2] } {
1663 set stat(mem1) [expr $stat(mem1) + $mem1]
1664 set stat(mem2) [expr $stat(mem2) + $mem2]
1666 # compare memory usage with 10% precision (but not less 16 KiB)
1667 if { [expr abs ($mem1 - $mem2) > 16 + 0.05 * abs ($mem1 + $mem2)] } {
1668 _log_and_puts log "MEMORY [split $basename /] $casename: $mem1 / $mem2"
1673 set imglist1 [glob -directory $path1 -types f -tails -nocomplain $casename*.{png,gif}]
1674 set imglist2 [glob -directory $path2 -types f -tails -nocomplain $casename*.{png,gif}]
1675 _list_diff $imglist1 $imglist2 imgin1 imgin2 imgcommon
1676 if { "$verbose" > 1 } {
1677 if { [llength $imgin1] > 0 } { _log_and_puts log "Only in $path1: $imgin1" }
1678 if { [llength $imgin2] > 0 } { _log_and_puts log "Only in $path2: $imgin2" }
1680 foreach imgfile $imgcommon {
1681 # if { $verbose > 1 } { _log_and_puts log "Checking [split basename /] $casename: $imgfile" }
1682 set diffile [_diff_img_name $dir1 $dir2 $basename $imgfile]
1683 if { [catch {diffimage [file join $dir1 $basename $imgfile] \
1684 [file join $dir2 $basename $imgfile] \
1685 0 0 0 $diffile} diff] } {
1686 _log_and_puts log "IMAGE [split $basename /] $casename: $imgfile cannot be compared"
1687 file delete -force $diffile ;# clean possible previous result of diffimage
1688 } elseif { $diff != 0 } {
1689 _log_and_puts log "IMAGE [split $basename /] $casename: $imgfile differs"
1691 file delete -force $diffile ;# clean useless artifact of diffimage
1697 if { "$_statvar" == "" } {
1698 _log_and_puts log "Total MEMORY difference: $stat(mem1) / $stat(mem2)"
1699 _log_and_puts log "Total CPU difference: $stat(cpu1) / $stat(cpu2)"
1703 # Auxiliary procedure to save log of results comparison to file
1704 proc _log_html_diff {file log dir1 dir2} {
1705 # create missing directories as needed
1706 catch {file mkdir [file dirname $file]}
1708 # try to open a file
1709 if [catch {set fd [open $file w]} res] {
1710 error "Error saving log file $file: $res"
1714 puts $fd "<html><head><title>Diff $dir1 vs. $dir2</title></head><body>"
1715 puts $fd "<h1>Comparison of test results: $dir1 vs. $dir2</h1>"
1717 # print log body, trying to add HTML links to script files on lines like
1718 # "Executing <filename>..."
1720 set logpath [file split [file normalize $file]]
1721 foreach line [split $log "\n"] {
1724 if { [regexp {IMAGE[ \t]+([^:]+):[ \t]+([A-Za-z0-9_.-]+)} $line res case img] } {
1725 if { [catch {eval file join "" [lrange $case 0 end-1]} gridpath] } {
1726 # note: special handler for the case if test grid directoried are compared directly
1729 set img1 "<img src=\"[_make_url $file [file join $dir1 $gridpath $img]]\">"
1730 set img2 "<img src=\"[_make_url $file [file join $dir2 $gridpath $img]]\">"
1732 set difffile [_diff_img_name $dir1 $dir2 $gridpath $img]
1733 if { [file exists $difffile] } {
1734 set imgd "<img src=\"[_make_url $file $difffile]\">"
1739 puts $fd "<table><tr><th>[file tail $dir1]</th><th>[file tail $dir2]</th><th>Different pixels</th></tr>"
1740 puts $fd "<tr><td>$img1</td><td>$img2</td><td>$imgd</td></tr></table>"
1743 puts $fd "</pre></body></html>"
1749 # get number of CPUs on the system
1750 proc _get_nb_cpus {} {
1751 global tcl_platform env
1753 if { "$tcl_platform(platform)" == "windows" } {
1754 # on Windows, take the value of the environment variable
1755 if { [info exists env(NUMBER_OF_PROCESSORS)] &&
1756 ! [catch {expr $env(NUMBER_OF_PROCESSORS) > 0} res] && $res >= 0 } {
1757 return $env(NUMBER_OF_PROCESSORS)
1759 } elseif { "$tcl_platform(os)" == "Linux" } {
1760 # on Linux, take number of logical processors listed in /proc/cpuinfo
1761 if { [catch {open "/proc/cpuinfo" r} fd] } {
1762 return 0 ;# should never happen, but...
1765 while { [gets $fd line] >= 0 } {
1766 if { [regexp {^processor[ \t]*:} $line] } {
1772 } elseif { "$tcl_platform(os)" == "Darwin" } {
1773 # on MacOS X, call sysctl command
1774 if { ! [catch {exec sysctl hw.ncpu} ret] &&
1775 [regexp {^hw[.]ncpu[ \t]*:[ \t]*([0-9]+)} $ret res nb] } {
1780 # if cannot get good value, return 0 as default
1784 # check two files for difference
1785 proc _diff_files {file1 file2} {
1786 set fd1 [open $file1 "r"]
1787 set fd2 [open $file2 "r"]
1791 set nb1 [gets $fd1 line1]
1792 set nb2 [gets $fd2 line2]
1793 if { $nb1 != $nb2 } { set differ t; break }
1794 if { $nb1 < 0 } { break }
1795 if { [string compare $line1 $line2] } {
1806 # Check if file is in DOS encoding.
1807 # This check is done by presence of \r\n combination at the end of the first
1808 # line (i.e. prior to any other \n symbol).
1809 # Note that presence of non-ascii symbols typically used for recognition
1810 # of binary files is not suitable since some IGES and STEP files contain
1811 # non-ascii symbols.
1812 # Special check is added for PNG files which contain \r\n in the beginning.
1813 proc _check_dos_encoding {file} {
1814 set fd [open $file rb]
1816 if { [gets $fd line] && [regexp {.*\r$} $line] &&
1817 ! [regexp {^.PNG} $line] } {
1824 # procedure to recognize format of a data file by its first symbols (for OCCT
1825 # BREP and geometry DRAW formats, IGES, and STEP) and extension (all others)
1826 proc _check_file_format {file} {
1827 set fd [open $file rb]
1828 set line [read $fd 1024]
1832 set ext [file extension $file]
1834 if { [regexp {^DBRep_DrawableShape} $line] } {
1836 if { "$ext" != ".brep" && "$ext" != ".rle" &&
1837 "$ext" != ".draw" && "$ext" != "" } {
1840 } elseif { [regexp {^DrawTrSurf_} $line] } {
1842 if { "$ext" != ".rle" &&
1843 "$ext" != ".draw" && "$ext" != "" } {
1846 } elseif { [regexp {^[ \t]*ISO-10303-21} $line] } {
1848 if { "$ext" != ".step" && "$ext" != ".stp" } {
1851 } elseif { [regexp {^.\{72\}S[0 ]\{6\}1} $line] } {
1853 if { "$ext" != ".iges" && "$ext" != ".igs" } {
1856 } elseif { "$ext" == ".igs" } {
1858 } elseif { "$ext" == ".stp" } {
1861 set format [string toupper [string range $ext 1 end]]
1865 puts "$file: Warning: extension ($ext) does not match format ($format)"
1871 # procedure to load file knowing its format
1872 proc load_data_file {file format shape} {
1874 BREP { uplevel restore $file $shape }
1875 IGES { pload XSDRAW; uplevel igesbrep $file $shape * }
1876 STEP { pload XSDRAW; uplevel stepread $file __a *; uplevel renamevar __a_1 $shape }
1877 STL { pload XSDRAW; uplevel readstl $shape $file }
1878 default { error "Cannot read $format file $file" }