0023428: Extend OpenGl_Context to use Geometry Shaders extension
[occt.git] / src / DrawResources / TestCommands.tcl
CommitLineData
40093367 1# Copyright (c) 2012 OPEN CASCADE SAS
2#
3# The content of this file is subject to the Open CASCADE Technology Public
4# License Version 6.5 (the "License"). You may not use the content of this file
5# except in compliance with the License. Please obtain a copy of the License
6# at http://www.opencascade.org and read it completely before using this file.
7#
8# The Initial Developer of the Original Code is Open CASCADE S.A.S., having its
9# main offices at: 1, place des Freres Montgolfier, 78280 Guyancourt, France.
10#
11# The Original Code and all software distributed under the License is
12# distributed on an "AS IS" basis, without warranty of any kind, and the
13# Initial Developer hereby disclaims all such warranties, including without
14# limitation, any warranties of merchantability, fitness for a particular
15# purpose or non-infringement. Please see the License for the specific terms
16# and conditions governing the rights and limitations under the License.
17
18############################################################################
19# This file defines scripts for execution of OCCT tests.
20# It should be loaded automatically when DRAW is started, and provides
cc6a292d 21# three top-level commands: 'test', 'testgrid', and 'testdiff'.
40093367 22# See OCCT Tests User Guide for description of the test system.
23#
24# Note: procedures with names starting with underscore are for internal use
25# inside the test system.
26############################################################################
27
28# Default verbose level for command _run_test
29set _tests_verbose 0
30
31# regexp for parsing test case results in summary log
32set _test_case_regexp {^CASE\s+([\w.-]+)\s+([\w.-]+)\s+([\w.-]+)\s*:\s*([\w]+)(.*)}
33
34# Basic command to run indicated test case in DRAW
cc6a292d 35help test {Run specified test case
40093367 36 Use: test group grid casename [verbose_level]
37 Verbose level is 0 by default; can be set to 1 or 2}
38proc test {group grid casename {verbose {}}} {
39 global _tests_verbose
40 if { $verbose != "" } {
41 set _tests_verbose $verbose
42 }
43
44 # get test case paths (will raise error if input is invalid)
45 _get_test $group $grid $casename dir gridname casefile
46
47 # run test
48 uplevel _run_test $dir $group $gridname $casefile
49
50 # check log
51 _check_log $dir $group $gridname $casename [dlog get]
52
53 return
54}
55
56# Basic command to run indicated test case in DRAW
cc6a292d 57help testgrid {Run all tests, or specified group, or one grid
40093367 58 Use: testgrid logdir [group [grid]] [options...]
59 Log directory should be empty (or non-existing)
60 Allowed options are:
61 -parallel N: run in parallel mode with up to N processes (default 0)
62 -refresh N: save summary logs every N seconds (default 60, minimal 1, 0 to disable)
63 -overwrite: force writing logs in existing non-empty directory
64 -xml filename: write XML report for Jenkins (in JUnit-like format)
65}
66proc testgrid {logdir args} {
67 global env tcl_platform _tests_verbose
68
69 ######################################################
70 # check arguments
71 ######################################################
72
73 # check that environment variable defining paths to test scripts is defined
74 if { ! [info exists env(CSF_TestScriptsPath)] ||
75 [llength $env(CSF_TestScriptsPath)] <= 0 } {
76 error "Error: Environment variable CSF_TestScriptsPath is not defined"
77 }
78
79 # treat options
80 set parallel 0
81 set refresh 60
82 set overwrite 0
83 set xmlfile ""
84 for {set narg 0} {$narg < [llength $args]} {incr narg} {
85 set arg [lindex $args $narg]
86
87 # parallel execution
88 if { $arg == "-parallel" } {
89 incr narg
90 if { $narg < [llength $args] } {
91 set parallel [expr [lindex $args $narg]]
92 } else {
93 set paralell 2
94 }
95 continue
96 }
97
98 # refresh logs time
99 if { $arg == "-refresh" } {
100 incr narg
101 if { $narg < [llength $args] } {
102 set refresh [expr [lindex $args $narg]]
103 } else {
104 set refresh 10
105 }
106 continue
107 }
108
109 # allow overwrite logs
110 if { $arg == "-overwrite" } {
111 set overwrite 1
112 continue
113 }
114
115 # refresh logs time
116 if { $arg == "-xml" } {
117 incr narg
118 if { $narg < [llength $args] } {
119 set xmlfile [lindex $args $narg]
120 }
121 if { $xmlfile == "" } {
122 set xmlfile TESTS-summary.xml
123 }
124 continue
125 }
126
127 # unsupported option
128 if { [regexp {^-} $arg] } {
129 error "Error: unsupported option \"$arg\""
130 }
131
132 # treat arguments not recognized as options as group and grid names
133 if { ! [info exists groupname] } {
134 set groupname $arg
135 } elseif { ! [info exists gridname] } {
136 set gridname $arg
137 } else {
138 error "Error: cannot interpret argument $narg ($arg): both group and grid names are already defined by previous args!"
139 }
140 }
141
142 # check that target log directory is empty or does not exist
143 set logdir [file normalize [string trim $logdir]]
144 if { $logdir == "" } {
145 # if specified logdir is empty string, generate unique name like "results_2010-12-31T23:59:59"
146 set logdir "results_[clock format [clock seconds] -format {%Y-%m-%dT%H%M}]"
147 set logdir [file normalize $logdir]
148 }
149 if { [file isdirectory $logdir] && ! $overwrite && ! [catch {glob -directory $logdir *}] } {
150 error "Error: Specified log directory \"$logdir\" is not empty; please clean it before running tests"
151 }
152 if { [catch {file mkdir $logdir}] || ! [file writable $logdir] } {
153 error "Error: Cannot create directory \"$logdir\", or it is not writable"
154 }
155
156 ######################################################
157 # prepare list of tests to be performed
158 ######################################################
159
160 # list of tests, each defined by a list of:
161 # test scripts directory
162 # group (subfolder) name
163 # grid (subfolder) name
164 # test case name
165 # path to test case file
166 set tests_list {}
167
168 # iterate by all script paths
169 foreach dir [_split_path $env(CSF_TestScriptsPath)] {
170 # protection against empty paths
171 set dir [string trim $dir]
172 if { $dir == "" } { continue }
173
174 if { $_tests_verbose > 0 } { _log_and_puts log "Examining tests directory $dir" }
175
176 # check that directory exists
177 if { ! [file isdirectory $dir] } {
178 _log_and_puts log "Warning: directory $dir listed in CSF_TestScriptsPath does not exist, skipped"
179 continue
180 }
181
182 # if test group is specified, check that directory with given name exists in this dir
183 # if not, continue to the next test dir
184 if { [info exists groupname] && $groupname != "" } {
185 if { [file isdirectory $dir/$groupname] } {
186 set groups $groupname
187 } else {
188 continue
189 }
190 } else {
191 # else search all directories in the current dir
192 if [catch {glob -directory $dir -tail -types d *} groups] { continue }
193 }
194
195 # iterate by groups
196 if { $_tests_verbose > 0 } { _log_and_puts log "Groups to be executed: $groups" }
197 foreach group [lsort -dictionary $groups] {
198 if { $_tests_verbose > 0 } { _log_and_puts log "Examining group directory $group" }
199
200 # file grids.list must exist: it defines sequence of grids in the group
201 if { ! [file exists $dir/$group/grids.list] } {
202 _log_and_puts log "Warning: directory $dir/$group does not contain file grids.list, skipped"
203 continue
204 }
205
206 # read grids.list file and make a list of grids to be executed
207 set gridlist {}
208 set fd [open $dir/$group/grids.list]
209 set nline 0
210 while { [gets $fd line] >= 0 } {
211 incr nline
212
213 # skip comments and empty lines
214 if { [regexp "\[ \t\]*\#.*" $line] } { continue }
215 if { [string trim $line] == "" } { continue }
216
217 # get grid id and name
218 if { ! [regexp "^\(\[0-9\]+\)\[ \t\]*\(\[A-Za-z0-9_.-\]+\)\$" $line res gridid grid] } {
219 _log_and_puts log "Warning: cannot recognize line $nline in file $dir/$group/grids.list as \"gridid gridname\"; ignored"
220 continue
221 }
222
223 # if specific grid is requested, check that it is present; otherwise make complete list
224 if { ! [info exists gridname] || $gridname == "" || $gridname == $gridid || $gridname == $grid } {
225 lappend gridlist $grid
226 }
227 }
228 close $fd
229
230 # iterate by all grids
231 foreach grid $gridlist {
232
233 # check if this grid is aliased to another one
234 set griddir $dir/$group/$grid
235 if { [file exists $griddir/cases.list] } {
236 set fd [open $griddir/cases.list]
237 if { [gets $fd line] >= 0 } {
238 set griddir [file normalize $dir/$group/$grid/[string trim $line]]
239 }
240 close $fd
241 }
242
243 # check if grid directory actually exists
244 if { ! [file isdirectory $griddir] } {
245 _log_and_puts log "Error: tests directory for grid $grid ($griddir) is missing; skipped"
246 continue
247 }
248
249 # create directory for logging test results
250 if { $logdir != "" } { file mkdir $logdir/$group/$grid }
251
252 # iterate by all tests in the grid directory
253 if { [catch {glob -directory $griddir -type f *} testfiles] } { continue }
254 foreach casefile [lsort -dictionary $testfiles] {
255 # filter out begin and end files
256 set casename [file tail $casefile]
257 if { $casename == "begin" || $casename == "end" } { continue }
258
259 lappend tests_list [list $dir $group $grid $casename $casefile]
260 }
261 }
262 }
263 }
264 if { [llength $tests_list] < 1 } {
265 error "Error: no tests are found, check you input arguments and variable CSF_TestScriptsPath!"
266 }
267
268 ######################################################
269 # run tests
270 ######################################################
271
272 # log command arguments and environment
273 set log "Command: testgrid $args\nHost: [info hostname]\nStarted on: [clock format [clock seconds] -format {%Y-%m-%d %H:%M:%S}]\n"
274 set log "$log\nEnvironment:\n"
275 foreach envar [array names env] {
276 set log "$log$envar=\"$env($envar)\"\n"
277 }
278 set log "$log\n"
279
280 set refresh_timer [clock seconds]
281 uplevel dchrono _timer reset
282 uplevel dchrono _timer start
283
284 # if parallel execution is requested, allocate thread pool
285 if { $parallel > 0 } {
286 if { ! [info exists tcl_platform(threaded)] || [catch {package require Thread}] } {
287 _log_and_puts log "Warning: Tcl package Thread is not available, running in sequential mode"
288 set parallel 0
289 } else {
290 set worker [tpool::create -minworkers $parallel -maxworkers $parallel]
291 # suspend the pool until all jobs are posted, to prevent blocking of the process
292 # of starting / processing jobs by running threads
293 tpool::suspend $worker
294 if { $_tests_verbose > 0 } { _log_and_puts log "Executing tests in (up to) $parallel threads" }
295 }
296 }
297
298 # start test cases
299 foreach test_def $tests_list {
300 set dir [lindex $test_def 0]
301 set group [lindex $test_def 1]
302 set grid [lindex $test_def 2]
303 set casename [lindex $test_def 3]
304 set casefile [lindex $test_def 4]
305
306 # command to set tests for generation of image in results directory
307 set imgdir_cmd ""
308 if { $logdir != "" } { set imgdir_cmd "set imagedir $logdir/$group/$grid" }
309
310 # prepare command file for running test case in separate instance of DRAW
311 set fd_cmd [open $logdir/$group/$grid/${casename}.tcl w]
312 puts $fd_cmd "$imgdir_cmd"
313 puts $fd_cmd "set test_image $casename"
314 puts $fd_cmd "_run_test $dir $group $grid $casefile"
315 # use dlog command to obtain complete output of the test when it is absent (i.e. since OCCT 6.6.0)
316 if { ! [catch {dlog get}] } {
317 puts $fd_cmd "puts \[dlog get\]"
318 } else {
319 # else try to use old-style QA_ variables to get more output...
320 set env(QA_DUMP) 1
321 set env(QA_DUP) 1
322 set env(QA_print_command) 1
323 }
324 # final 'exit' is needed when running on Linux under VirtualGl
325 puts $fd_cmd "exit"
326 close $fd_cmd
8418c617 327
328 # commant to run DRAW with a command file;
329 # note that empty string is passed as standard input to avoid possible
330 # hang-ups due to waiting for stdin of the launching process
331 set command "exec <<{} DRAWEXE -f $logdir/$group/$grid/${casename}.tcl"
332
40093367 333 # alternative method to run without temporary file; disabled as it needs too many backslashes
334# else {
8418c617 335# set command "exec <<\"\" DRAWEXE -c $imgdir_cmd\\\; set test_image $casename\\\; \
40093367 336# _run_test $dir $group $grid $casefile\\\; \
337# puts \\\[dlog get\\\]\\\; exit"
338# }
339
340 # run test case, either in parallel or sequentially
341 if { $parallel > 0 } {
342 # parallel execution
343 set job [tpool::post -nowait $worker "catch \"$command\" output; return \$output"]
344 set job_def($job) [list $logdir $dir $group $grid $casename]
345 } else {
346 # sequential execution
347 catch {eval $command} output
348 _log_test_case $output $logdir $dir $group $grid $casename log
349
350 # update summary log with requested period
351 if { $logdir != "" && $refresh > 0 && [expr [clock seconds] - $refresh_timer > $refresh] } {
352 # update and dump summary
353 _log_summarize $logdir $log
354 set refresh_timer [clock seconds]
355 }
356 }
357 }
358
359 # get results of started threads
360 if { $parallel > 0 } {
361 tpool::resume $worker
362 while { [llength [array names job_def]] > 0 } {
363 foreach job [tpool::wait $worker [array names job_def]] {
364 eval _log_test_case \[tpool::get $worker $job\] $job_def($job) log
365 unset job_def($job)
366 }
367
368 # update summary log with requested period
369 if { $logdir != "" && $refresh > 0 && [clock seconds] > $refresh_timer + $refresh } {
370 _log_summarize $logdir $log
371 set refresh_timer [clock seconds]
372 }
373 }
374 # release thread pool
375 tpool::release $worker
376 }
377
378 uplevel dchrono _timer stop
379 set time [lindex [split [uplevel dchrono _timer show] "\n"] 0]
380
381 ######################################################
382 # output summary logs and exit
383 ######################################################
384
385 _log_summarize $logdir $log $time
386 if { $logdir != "" } {
387 puts "Detailed logs are saved in $logdir"
388 }
389 if { $logdir != "" && $xmlfile != "" } {
390 # XML output file is assumed relative to log dir unless it is absolute
391 if { [ file pathtype $xmlfile] == "relative" } {
392 set xmlfile [file normalize $logdir/$xmlfile]
393 }
394 _log_xml_summary $logdir $xmlfile $log 0
395 puts "XML summary is saved to $xmlfile"
396 }
397
398 return
399}
400
cc6a292d 401# Procedure to compare results of two runs of test cases
402help testdiff {Compare results of two executions of tests (CPU times, ...)
403 Use: testdiff dir1 dir2 [options...]
404 Where dir1 and dir2 are directories containing logs of two test runs.
405 Allowed options are:
406 -save filename: save resulting log in specified file
407 -subdir name: compare only specified subdirectory (can be nested)
408 -status {same|ok|all}: filter cases for comparing by their status:
409 same - only cases with same status are compared (default)
410 ok - only cases with OK status in both logs are compared
411 all - results are compared regardless of status
412 -verbose level:
413 1 - output only differences
414 2 - output list of logs and directories present in one of dirs only
415 3 - (default) output progress messages
416}
417proc testdiff {dir1 dir2 args} {
418 if { "$dir1" == "$dir2" } {
419 error "Input directories are the same"
420 }
421
422 ######################################################
423 # check arguments
424 ######################################################
425
426 # treat options
427 set logfile ""
428 set basename ""
429 set status "same"
430 set verbose 3
431 for {set narg 0} {$narg < [llength $args]} {incr narg} {
432 set arg [lindex $args $narg]
433
434 # log file name
435 if { $arg == "-save" } {
436 incr narg
437 if { $narg < [llength $args] } {
438 set logfile [lindex $args $narg]
439 } else {
440 error "Error: Option -save must be followed by log file name"
441 }
442 continue
443 }
444
445 # subdirectory to compare
446 if { $arg == "-subdir" } {
447 incr narg
448 if { $narg < [llength $args] } {
449 set basename [lindex $args $narg]
450 } else {
451 error "Error: Option -subdir must be followed by subdirectory path"
452 }
453 continue
454 }
455
456 # status filter
457 if { $arg == "-status" } {
458 incr narg
459 if { $narg < [llength $args] } {
460 set status [lindex $args $narg]
461 } else { set status "" }
462 if { "$status" != "same" && "$status" != "all" && "$status" != "ok" } {
463 error "Error: Option -status must be followed by one of \"same\", \"all\", or \"ok\""
464 }
465 continue
466 }
467
468 # verbose level
469 if { $arg == "-verbose" } {
470 incr narg
471 if { $narg < [llength $args] } {
472 set verbose [expr [lindex $args $narg]]
473 }
474 continue
475 }
476
477# if { [regexp {^-} $arg] } {
478 error "Error: unsupported option \"$arg\""
479# }
480 }
481
482 # run diff procedure (recursive)
483 _test_diff $dir1 $dir2 $basename $status $verbose log
484
485 # save result to log file
486 if { "$logfile" != "" } {
487 _log_save $logfile $log
488 }
489
490 return
491}
492
40093367 493# Internal procedure to find test case indicated by group, grid, and test case names;
494# returns:
495# - dir: path to the base directory of the tests group
496# - gridname: actual name of the grid
497# - casefile: path to the test case script
498# if no such test is found, raises error with appropriate message
499proc _get_test {group grid casename _dir _gridname _casefile} {
500 upvar $_dir dir
501 upvar $_gridname gridname
502 upvar $_casefile casefile
503
504 global env
505
506 # check that environment variable defining paths to test scripts is defined
507 if { ! [info exists env(CSF_TestScriptsPath)] ||
508 [llength $env(CSF_TestScriptsPath)] <= 0 } {
509 error "Error: Environment variable CSF_TestScriptsPath is not defined"
510 }
511
512 # iterate by all script paths
513 foreach dir [_split_path $env(CSF_TestScriptsPath)] {
514 # protection against empty paths
515 set dir [string trim $dir]
516 if { $dir == "" } { continue }
517
518 # check that directory exists
519 if { ! [file isdirectory $dir] } {
520 puts "Warning: directory $dir listed in CSF_TestScriptsPath does not exist, skipped"
521 continue
522 }
523
524 # check if test group with given name exists in this dir
525 # if not, continue to the next test dir
526 if { ! [file isdirectory $dir/$group] } { continue }
527
528 # check that grid with given name (possibly alias) exists; stop otherwise
529 set gridname $grid
530 if { ! [file isdirectory $dir/$group/$gridname] } {
531 # check if grid is named by alias rather than by actual name
532 if { [file exists $dir/$group/grids.list] } {
533 set fd [open $dir/$group/grids.list]
534 while { [gets $fd line] >= 0 } {
535 if { [regexp "\[ \t\]*\#.*" $line] } { continue }
536 if { [regexp "^$grid\[ \t\]*\(\[A-Za-z0-9_.-\]+\)\$" $line res gridname] } {
537 break
538 }
539 }
540 close $fd
541 }
542 }
543 if { ! [file isdirectory $dir/$group/$gridname] } { continue }
544
545 # get actual file name of the script; stop if it cannot be found
546 set casefile $dir/$group/$gridname/$casename
547 if { ! [file exists $casefile] } {
548 # check if this grid is aliased to another one
549 if { [file exists $dir/$group/$gridname/cases.list] } {
550 set fd [open $dir/$group/$gridname/cases.list]
551 if { [gets $fd line] >= 0 } {
552 set casefile [file normalize $dir/$group/$gridname/[string trim $line]/$casename]
553 }
554 close $fd
555 }
556 }
557 if { [file exists $casefile] } {
558 # normal return
559 return
560 }
561 }
562
563 # coming here means specified test is not found; report error
564 error [join [list "Error: test case $group / $grid / $casename is not found in paths listed in variable" \
565 "CSF_TestScriptsPath (current value is \"$env(CSF_TestScriptsPath)\")"] "\n"]
566}
567
568# Internal procedure to run test case indicated by base directory,
569# grid and grid names, and test case file path.
570# The log can be obtained by command "dlog get".
571proc _run_test {scriptsdir group gridname casefile} {
572 global env
573
574 # start timer
575 uplevel dchrono _timer reset
576 uplevel dchrono _timer start
577
578 # enable commands logging; switch to old-style mode if dlog command is not present
579 set dlog_exists 1
580 if { [catch {dlog reset}] } {
581 set dlog_exists 0
582 } else {
583 dlog reset
584 dlog on
585 rename puts puts-saved
586 proc puts args {
587 global _tests_verbose
588
589 # log only output to stdout and stderr, not to file!
590 if {[llength $args] > 1} {
d33dea30
PK
591 set optarg [lindex $args end-1]
592 if { $optarg == "stdout" || $optarg == "stderr" || $optarg == "-newline" } {
40093367 593 dlog add [lindex $args end]
594 }
595 } else {
596 dlog add [lindex $args end]
597 }
598
599 # reproduce original puts
600 if { $_tests_verbose } {
601 eval puts-saved $args
602 }
603 }
604 }
605
40093367 606 # evaluate test case
607 if [catch {
608 uplevel set casename [file tail $casefile]
8418c617 609 uplevel set groupname $group
610 uplevel set gridname $gridname
40093367 611
612 if { [file exists $scriptsdir/$group/begin] } {
613 puts "Executing $scriptsdir/$group/begin..."; flush stdout
614 uplevel source $scriptsdir/$group/begin
615 }
616 if { [file exists $scriptsdir/$group/$gridname/begin] } {
617 puts "Executing $scriptsdir/$group/$gridname/begin..."; flush stdout
618 uplevel source $scriptsdir/$group/$gridname/begin
619 }
620
621 puts "Executing $casefile..."; flush stdout
622 uplevel source $casefile
623
624 if { [file exists $scriptsdir/$group/$gridname/end] } {
625 puts "Executing $scriptsdir/$group/$gridname/end..."; flush stdout
626 uplevel source $scriptsdir/$group/$gridname/end
627 }
628 if { [file exists $scriptsdir/$group/end] } {
629 puts "Executing $scriptsdir/$group/end..."; flush stdout
630 uplevel source $scriptsdir/$group/end
631 }
632 } res] {
633 puts "Tcl Exception: $res"
634 }
635
40093367 636 # stop logging
637 if { $dlog_exists } {
638 rename puts {}
639 rename puts-saved puts
640 dlog off
641 }
642
8418c617 643 # stop cpulimit killer if armed by the test
644 cpulimit
645
40093367 646 # add timing info
647 uplevel dchrono _timer stop
648 set time [uplevel dchrono _timer show]
649 if [regexp -nocase {CPU user time:[ \t]*([0-9.e-]+)} $time res cpu] {
650 if { $dlog_exists } {
651 dlog add "TOTAL CPU TIME: $cpu sec"
652 } else {
653 puts "TOTAL CPU TIME: $cpu sec"
654 }
655 }
656}
657
658# Internal procedure to check log of test execution and decide if it passed or failed
659proc _check_log {dir group gridname casename log {_summary {}} {_html_log {}}} {
660 global env
661 if { $_summary != "" } { upvar $_summary summary }
662 if { $_html_log != "" } { upvar $_html_log html_log }
663 set summary ""
664 set html_log ""
665
666if [catch {
667
668 # load definition of 'bad words' indicating test failure
669 # note that rules are loaded in the order of decreasing priority (grid - group - common),
670 # thus grid rules will override group ones
671 set badwords {}
672 foreach rulesfile [list $dir/$group/$gridname/parse.rules $dir/$group/parse.rules $dir/parse.rules] {
673 if [catch {set fd [open $rulesfile r]}] { continue }
674 while { [gets $fd line] >= 0 } {
675 # skip comments and empty lines
676 if { [regexp "\[ \t\]*\#.*" $line] } { continue }
677 if { [string trim $line] == "" } { continue }
678 # extract regexp
679 if { ! [regexp {^([^/]*)/([^/]*)/(.*)$} $line res status rexp comment] } {
680 puts "Warning: cannot recognize parsing rule \"$line\" in file $rulesfile"
681 continue
682 }
683 set status [string trim $status]
684 if { $comment != "" } { set status "$status ([string trim $comment])" }
685 set rexp [regsub -all {\\b} $rexp {\\y}] ;# convert regexp from Perl to Tcl style
686 lappend badwords [list $status $rexp]
687 }
688 close $fd
689 }
690 if { [llength $badwords] <= 0 } {
691 puts "Warning: no definition of error indicators found (check files parse.rules)"
692 }
693
694 # analyse log line-by-line
695 set todos {}
696 set status ""
697 foreach line [split $log "\n"] {
698 # check if line defines specific treatment of some messages
699 if [regexp -nocase {^[ \t]*TODO ([^:]*):(.*)$} $line res platforms pattern] {
700 if { ! [regexp -nocase {\mAll\M} $platforms] &&
701 ! [regexp -nocase "\\m$env(os_type)\\M" $platforms] } {
702 set html_log "$html_log\n$line"
703 continue ;# TODO statement is for another platform
704 }
705
706 # record TODOs that mark unstable cases
707 if { [regexp {[\?]} $platforms] } {
708 set todos_unstable([llength $todos]) 1
709 }
710
711 lappend todos [regsub -all {\\b} [string trim $pattern] {\\y}] ;# convert regexp from Perl to Tcl style
712 set html_log "$html_log\n[_html_highlight BAD $line]"
713 continue
714 }
715
716 # check for presence of messages indicating test result
717 set ismarked 0
718 foreach bw $badwords {
719 if { [regexp [lindex $bw 1] $line] } {
720 # check if this is known bad case
721 set is_known 0
722 for {set i 0} {$i < [llength $todos]} {incr i} {
723 if { [regexp [lindex $todos $i] $line] } {
724 set is_known 1
725 incr todo_count($i)
726 set html_log "$html_log\n[_html_highlight BAD $line]"
727 break
728 }
729 }
730
731 # if it is not in todo, define status
732 if { ! $is_known } {
733 set stat [lindex $bw 0 0]
734 set html_log "$html_log\n[_html_highlight $stat $line]"
735 if { $status == "" && $stat != "OK" && ! [regexp -nocase {^IGNOR} $stat] } {
736 set status [lindex $bw 0]
737 }
738 }
739 set ismarked 1
740 break
741 }
742 }
743 if { ! $ismarked } {
744 set html_log "$html_log\n$line"
745 }
746 }
747
748 # check for presence of TEST COMPLETED statement
749 if { $status == "" && ! [regexp {TEST COMPLETED} $log] } {
750 # check whether absence of TEST COMPLETED is known problem
751 set i [lsearch $todos "TEST INCOMPLETE"]
752 if { $i >= 0 } {
753 incr todo_count($i)
754 } else {
755 set status "FAILED (no final message is found)"
756 }
757 }
758
759 # check declared bad cases and diagnose possible improvement
760 # (bad case declared but not detected).
761 # Note that absence of the problem marked by TODO with question mark
762 # (unstable) is not reported as improvement.
763 if { $status == "" } {
764 for {set i 0} {$i < [llength $todos]} {incr i} {
765 if { ! [info exists todos_unstable($i)] &&
766 (! [info exists todo_count($i)] || $todo_count($i) <= 0) } {
767 set status "IMPROVEMENT (expected problem TODO no. [expr $i + 1] is not detected)"
768 break;
769 }
770 }
771 }
772
773 # report test as known bad if at least one of expected problems is found
774 if { $status == "" && [llength [array names todo_count]] > 0 } {
775 set status "BAD (known problem)"
776 }
777
778 # report normal OK
779 if { $status == "" } {set status "OK" }
780
781} res] {
782 set status "FAILED ($res)"
783}
784
785 # put final message
786 _log_and_puts summary "CASE $group $gridname $casename: $status"
787 set html_log "[_html_highlight [lindex $status 0] $summary]\n$html_log"
788}
789
790# Auxiliary procedure putting message to both cout and log variable (list)
791proc _log_and_puts {logvar message} {
792 if { $logvar != "" } {
793 upvar $logvar log
794 if [info exists log] {
795 set log "$log$message\n"
796 } else {
797 set log "$message\n"
798 }
799 }
800 puts $message
801}
802
803# Auxiliary procedure to log result on single test case
804proc _log_test_case {output logdir dir group grid casename logvar} {
805 upvar $logvar log
806
807 # check result and make HTML log
808 _check_log $dir $group $grid $casename $output summary html_log
809 set log "$log$summary"
810
811 # save log to file
812 if { $logdir != "" } {
813 _log_html $logdir/$group/$grid/$casename.html $html_log "Test $group $grid $casename"
814 _log_save $logdir/$group/$grid/$casename.log "$output\n$summary" "Test $group $grid $casename"
815 }
816}
817
818# Auxiliary procedure to save log to file
819proc _log_save {file log {title {}}} {
820 # create missing directories as needed
821 catch {file mkdir [file dirname $file]}
822
823 # try to open a file
824 if [catch {set fd [open $file w]} res] {
825 error "Error saving log file $file: $res"
826 }
827
828 # dump log and close
829 puts $fd "$title\n"
830 puts $fd $log
831 close $fd
832 return
833}
834
835# Auxiliary procedure to save log to file
836proc _log_html {file log {title {}}} {
837 # create missing directories as needed
838 catch {file mkdir [file dirname $file]}
839
840 # try to open a file
841 if [catch {set fd [open $file w]} res] {
842 error "Error saving log file $file: $res"
843 }
844
845 # print header
846 puts $fd "<html><head><title>$title</title><head><body><h1>$title</h1>"
847
848 # add images if present
849 set imgbasename [file rootname [file tail $file]]
850 foreach img [lsort [glob -nocomplain -directory [file dirname $file] -tails ${imgbasename}*.gif ${imgbasename}*.png ${imgbasename}*.jpg]] {
851 puts $fd "<p><img src=\"$img\"/><p>"
852 }
853
854 # print body, then end and close
855 puts $fd "<pre>"
856 puts $fd $log
857 puts $fd "</pre></body></html>"
858
859 close $fd
860 return
861}
862
863# Auxiliary method to make text with HTML highlighting according to status
864proc _html_color {status} {
865 # choose a color for the cell according to result
866 if { $status == "OK" } {
867 return lightgreen
868 } elseif { [regexp -nocase {^FAIL} $status] } {
869 return red
870 } elseif { [regexp -nocase {^BAD} $status] } {
871 return yellow
872 } elseif { [regexp -nocase {^IMP} $status] } {
873 return orange
874 } elseif { [regexp -nocase {^SKIP} $status] } {
875 return gray
876 } elseif { [regexp -nocase {^IGNOR} $status] } {
877 return gray
878 } else {
879 puts "Warning: no color defined for status $status, using red as if FAILED"
880 return red
881 }
882}
883
884# Format text line in HTML to be colored according to the status
885proc _html_highlight {status line} {
886 return "<table><tr><td bgcolor=\"[_html_color $status]\">$line</td></tr></table>"
887}
888
889# Internal procedure to generate HTML page presenting log of the tests
890# execution in tabular form, with links to reports on individual cases
891proc _log_html_summary {logdir log totals regressions improvements total_time} {
892 global _test_case_regexp
893
894 # create missing directories as needed
895 catch {file mkdir $logdir}
896
897 # try to open a file and start HTML
898 if [catch {set fd [open $logdir/summary.html w]} res] {
899 error "Error creating log file: $res"
900 }
901
902 # write HRML header, including command to refresh log if still in progress
903 puts $fd "<html><head>"
904 puts $fd "<title>Tests summary</title>"
905 if { $total_time == "" } {
906 puts $fd "<meta http-equiv=\"refresh\" content=\"10\">"
907 }
908 puts $fd "<meta http-equiv=\"pragma\" content=\"NO-CACHE\">"
909 puts $fd "</head><body>"
910
911 # put summary
912 set legend(OK) "Test passed OK"
913 set legend(FAILED) "Test failed (regression)"
914 set legend(BAD) "Known problem"
915 set legend(IMPROVEMENT) "Possible improvement (expected problem not detected)"
916 set legend(SKIPPED) "Test skipped due to lack of data file"
917 puts $fd "<h1>Summary</h1><table>"
918 foreach nbstat $totals {
919 set status [lindex $nbstat 1]
920 if { [info exists legend($status)] } {
921 set comment $legend($status)
922 } else {
923 set comment "User-defined status"
924 }
925 puts $fd "<tr><td align=\"right\">[lindex $nbstat 0]</td><td bgcolor=\"[_html_color $status]\">$status</td><td>$comment</td></tr>"
926 }
927 puts $fd "</table>"
928
929 # time stamp and elapsed time info
930 if { $total_time != "" } {
931 puts $fd "<p>Generated on [clock format [clock seconds] -format {%Y-%m-%d %H:%M:%S}] on [info hostname] <p> $total_time"
932 } else {
933 puts $fd "<p>NOTE: This is intermediate summary; the tests are still running! This page will refresh automatically until tests are finished."
934 }
935
936 # print regressions and improvements
937 foreach featured [list $regressions $improvements] {
938 if { [llength $featured] <= 1 } { continue }
939 set status [string trim [lindex $featured 0] { :}]
940 puts $fd "<h2>$status</h2>"
941 puts $fd "<table>"
942 set groupgrid ""
943 foreach test [lrange $featured 1 end] {
944 if { ! [regexp {^(.*)\s+([\w.]+)$} $test res gg name] } {
945 set gg UNKNOWN
946 set name "Error building short list; check details"
947 }
948 if { $gg != $groupgrid } {
949 if { $groupgrid != "" } { puts $fd "</tr>" }
950 set groupgrid $gg
951 puts $fd "<tr><td>$gg</td>"
952 }
953 puts $fd "<td bgcolor=\"[_html_color $status]\"><a href=\"[regsub -all { } $gg /]/${name}.html\">$name</a></td>"
954 }
955 if { $groupgrid != "" } { puts $fd "</tr>" }
956 puts $fd "</table>"
957 }
958
959 # put detailed log
960 puts $fd "<h1>Details</h1>"
961
962 # process log line-by-line
963 set group {}
964 set letter {}
965 foreach line [lsort -dictionary [split $log "\n"]] {
966 # check that the line is case report in the form "CASE group grid name: result (explanation)"
967 if { ! [regexp $_test_case_regexp $line res grp grd casename result message] } {
968 continue
969 }
970
971 # start new group
972 if { $grp != $group } {
973 if { $letter != "" } { puts $fd "</tr></table>" }
974 set letter {}
975 set group $grp
976 set grid {}
977 puts $fd "<h2>Group $group</h2>"
978 }
979
980 # start new grid
981 if { $grd != $grid } {
982 if { $letter != "" } { puts $fd "</tr></table>" }
983 set letter {}
984 set grid $grd
985 puts $fd "<h3>Grid $grid</h3>"
986 }
987
988 # check if test case name is <letter><digit>;
989 # if not, set alnum to period "." to recognize non-standard test name
990 if { ! [regexp {([A-Za-z]+)([0-9]+)} $casename res alnum number] } {
991 set alnum .
992 }
993
994 # start new row when letter changes or for non-standard names
995 if { $alnum != $letter || $alnum == "." } {
996 if { $letter != "" } {
997 puts $fd "</tr><tr>"
998 } else {
999 puts $fd "<table><tr>"
1000 }
1001 set letter $alnum
1002 }
1003
1004 puts $fd "<td bgcolor=\"[_html_color $result]\"><a href=\"$group/$grid/${casename}.html\">$casename</a></td>"
1005 }
1006 puts $fd "</tr></table>"
1007
1008 # add remaining lines of log as plain text
1009 puts $fd "<h2>Plain text messages</h2>\n<pre>"
1010 foreach line [split $log "\n"] {
1011 if { ! [regexp $_test_case_regexp $line] } {
1012 puts $fd "$line"
1013 }
1014 }
1015 puts $fd "</pre>"
1016
1017 # close file and exit
1018 puts $fd "</body>"
1019 close $fd
1020 return
1021}
1022
1023# Procedure to dump summary logs of tests
1024proc _log_summarize {logdir log {total_time {}}} {
1025
1026 # sort log records alphabetically to have the same behavior on Linux and Windows
1027 # (also needed if tests are run in parallel)
1028 set loglist [lsort -dictionary [split $log "\n"]]
1029
1030 # classify test cases by status
1031 foreach line $loglist {
1032 if { [regexp {^CASE ([^:]*): ([[:alnum:]]+).*$} $line res caseid status] } {
1033 lappend stat($status) $caseid
1034 }
1035 }
1036 set totals {}
1037 set improvements {Improvements:}
1038 set regressions {Failed:}
1039 if { [info exists stat] } {
1040 foreach status [lsort [array names stat]] {
1041 lappend totals [list [llength $stat($status)] $status]
1042
1043 # separately count improvements (status starting with IMP) and regressions (all except IMP, OK, BAD, and SKIP)
1044 if { [regexp -nocase {^IMP} $status] } {
1045 eval lappend improvements $stat($status)
1046 } elseif { $status != "OK" && ! [regexp -nocase {^BAD} $status] && ! [regexp -nocase {^SKIP} $status] } {
1047 eval lappend regressions $stat($status)
1048 }
1049 }
1050 }
1051
1052 # if time is specified, add totals
1053 if { $total_time != "" } {
1054 if { [llength $improvements] > 1 } {
1055 _log_and_puts log [join $improvements "\n "]
1056 }
1057 if { [llength $regressions] > 1 } {
1058 _log_and_puts log [join $regressions "\n "]
1059 }
1060 if { [llength $improvements] == 1 && [llength $regressions] == 1 } {
1061 _log_and_puts log "No regressions"
1062 }
1063 _log_and_puts log "Total cases: [join $totals {, }]"
1064 _log_and_puts log $total_time
1065 }
1066
1067 # save log to files
1068 if { $logdir != "" } {
1069 _log_html_summary $logdir $log $totals $regressions $improvements $total_time
1070 _log_save $logdir/tests.log $log "Tests summary"
1071 }
1072
1073 return
1074}
1075
1076# Internal procedure to generate XML log in JUnit style, for further
1077# consumption by Jenkins or similar systems.
1078#
1079# The output is intended to conform to XML schema supported by Jenkins found at
1080# https://svn.jenkins-ci.org/trunk/hudson/dtkit/dtkit-format/dtkit-junit-model/src/main/resources/com/thalesgroup/dtkit/junit/model/xsd/junit-4.xsd
1081#
1082# The mapping of the fields is inspired by annotated schema of Apache Ant JUnit XML format found at
1083# http://windyroad.org/dl/Open%20Source/JUnit.xsd
1084proc _log_xml_summary {logdir filename log include_cout} {
1085 global _test_case_regexp
1086
1087 catch {file mkdir [file dirname $filename]}
1088
1089 # try to open a file and start XML
1090 if [catch {set fd [open $filename w]} res] {
1091 error "Error creating XML summary file $filename: $res"
1092 }
1093 puts $fd "<?xml version='1.0' encoding='utf-8'?>"
1094 puts $fd "<testsuites>"
1095
1096 # prototype for command to generate test suite tag
1097 set time_and_host "timestamp=\"[clock format [clock seconds] -format {%Y-%m-%dT%H:%M:%S}]\" hostname=\"[info hostname]\""
1098 set cmd_testsuite {puts $fd "<testsuite name=\"$group $grid\" tests=\"$nbtests\" failures=\"$nbfail\" errors=\"$nberr\" time=\"$time\" skipped=\"$nbskip\" $time_and_host>\n$testcases\n</testsuite>\n"}
1099
1100 # sort log and process it line-by-line
1101 set group {}
1102 foreach line [lsort -dictionary [split $log "\n"]] {
1103 # check that the line is case report in the form "CASE group grid name: result (explanation)"
1104 if { ! [regexp $_test_case_regexp $line res grp grd casename result message] } {
1105 continue
1106 }
1107 set message [string trim $message " \t\r\n()"]
1108
1109 # start new testsuite for each grid
1110 if { $grp != $group || $grd != $grid } {
1111
1112 # write previous test suite
1113 if [info exists testcases] { eval $cmd_testsuite }
1114
1115 set testcases {}
1116 set nbtests 0
1117 set nberr 0
1118 set nbfail 0
1119 set nbskip 0
1120 set time 0.
1121
1122 set group $grp
1123 set grid $grd
1124 }
1125
1126 incr nbtests
1127
1128 # parse test log and get its CPU time
1129 set testout {}
1130 set add_cpu {}
1131 if { [catch {set fdlog [open $logdir/$group/$grid/${casename}.log r]} ret] } {
1132 puts "Error: cannot open $logdir/$group/$grid/${casename}.log: $ret"
1133 } else {
1134 while { [gets $fdlog logline] >= 0 } {
1135 if { $include_cout } {
1136 set testout "$testout$logline\n"
1137 }
1138 if [regexp -nocase {TOTAL CPU TIME:\s*([\d.]+)\s*sec} $logline res cpu] {
1139 set add_cpu " time=\"$cpu\""
1140 set time [expr $time + $cpu]
1141 }
1142 }
1143 close $fdlog
1144 }
1145 if { ! $include_cout } {
1146 set testout "$line\n"
1147 }
1148
1149 # record test case with its output and status
1150 # Mapping is: SKIPPED, BAD, and OK to OK, all other to failure
1151 set testcases "$testcases\n <testcase name=\"$casename\"$add_cpu status=\"$result\">\n"
1152 set testcases "$testcases\n <system-out>\n$testout </system-out>"
1153 if { $result != "OK" } {
1154 if { [regexp -nocase {^SKIP} $result] } {
1155 incr nberr
1156 set testcases "$testcases\n <error name=\"$result\" message=\"$message\"/>"
1157 } elseif { [regexp -nocase {^BAD} $result] } {
1158 incr nbskip
1159 set testcases "$testcases\n <skipped>$message</skipped>"
1160 } else {
1161 incr nbfail
1162 set testcases "$testcases\n <failure name=\"$result\" message=\"$message\"/>"
1163 }
1164 }
1165 set testcases "$testcases\n </testcase>"
1166 }
1167
1168 # write last test suite
1169 if [info exists testcases] { eval $cmd_testsuite }
1170
1171 # the end
1172 puts $fd "</testsuites>"
1173 close $fd
1174 return
1175}
1176
1177# define custom platform name
1178proc _tests_platform_def {} {
1179 global env tcl_platform
1180
1181 if [info exists env(os_type)] { return }
1182
1183 set env(os_type) $tcl_platform(platform)
1184
1185 # use detailed mapping for various versions of Lunix
1186 # (note that mapping is rather non-uniform, for historical reasons)
1187 if { $env(os_type) == "unix" && ! [catch {exec cat /etc/issue} issue] } {
1188 if { [regexp {Mandriva[ \tA-Za-z]+([0-9]+)} $issue res num] } {
1189 set env(os_type) Mandriva$num
1190 } elseif { [regexp {Red Hat[ \tA-Za-z]+([0-9]+)} $issue res num] } {
1191 set env(os_type) RedHat$num
1192 } elseif { [regexp {Debian[ \tA-Za-z/]+([0-9]+)[.]([0-9]+)} $issue res num subnum] } {
1193 set env(os_type) Debian$num$subnum
1194 } elseif { [regexp {CentOS[ \tA-Za-z]+([0-9]+)[.]([0-9]+)} $issue res num subnum] } {
1195 set env(os_type) CentOS$num$subnum
1196 } elseif { [regexp {Scientific[ \tA-Za-z]+([0-9]+)[.]([0-9]+)} $issue res num subnum] } {
1197 set env(os_type) SL$num$subnum
1198 } elseif { [regexp {Fedora Core[ \tA-Za-z]+([0-9]+)} $issue res num] } {
1199 set env(os_type) FedoraCore$num
1200 }
1201 if { [exec uname -m] == "x86_64" } {
1202 set env(os_type) "$env(os_type)-64"
1203 }
1204 }
1205}
1206_tests_platform_def
1207
1208# Auxiliary procedure to split path specification (usually defined by
1209# environment variable) into list of directories or files
1210proc _split_path {pathspec} {
1211 global tcl_platform
1212
1213 # first replace all \ (which might occur on Windows) by /
1214 regsub -all "\\\\" $pathspec "/" pathspec
1215
1216 # split path by platform-specific separator
1217 return [split $pathspec [_path_separator]]
1218}
1219
1220# Auxiliary procedure to define platform-specific separator for directories in
1221# path specification
1222proc _path_separator {} {
1223 global tcl_platform
1224
1225 # split path by platform-specific separator
1226 if { $tcl_platform(platform) == "windows" } {
1227 return ";"
1228 } else {
1229 return ":"
1230 }
1231}
1232
1233# Procedure to locate data file for test given its name.
1234# The search is performed assuming that the function is called
1235# from the test case script; the search order is:
1236# - directory where test script is located
1237# - directories ../data and ../../data from the script dir
1238# - subdirectories <group>/<grid> and <group> of directories listed in
1239# environment variable CSF_TestDataPath
1240# If file is not found, raises Tcl error.
1241proc locate_data_file {filename} {
8418c617 1242 global env groupname gridname casename
40093367 1243
1244 set scriptfile [info script]
1245 if { $scriptfile == "" } {
1246 error "Error: This procedure (locate_data_file) is for use only in test scripts!"
1247 }
1248
8418c617 1249 # check sub-directories data of the test case grid directory
1250 # the current test case in paths indicated by CSF_TestScriptsPath
1251 if { [info exists groupname] && [info exists gridname] &&
1252 [info exists env(CSF_TestScriptsPath)] } {
1253 foreach dir [_split_path $env(CSF_TestScriptsPath)] {
1254 if { [file exists $dir/$groupname/$gridname/data/$filename] } {
1255 return [file normalize $dir/$groupname/$gridname/data/$filename]
1256 }
1257 if { [file exists $dir/$groupname/data/$filename] } {
1258 return [file normalize $dir/$groupname/data/$filename]
1259 }
40093367 1260 }
1261 }
40093367 1262
1263 # check sub-directories corresponding to group and grid of
1264 # the current test case in paths indicated by CSF_TestDataPath
8418c617 1265 if { [info exists groupname] && [info exists env(CSF_TestDataPath)] } {
40093367 1266 foreach dir [_split_path $env(CSF_TestDataPath)] {
8418c617 1267 if { [info exists gridname] && [file exists $dir/$groupname/$gridname/$filename] } {
1268 return [file normalize $dir/$groupname/$gridname/$filename]
40093367 1269 }
8418c617 1270 if { [file exists $dir/$groupname/$filename] } {
1271 return [file normalize $dir/$groupname/$filename]
40093367 1272 }
1273 }
1274 }
1275
8418c617 1276 # check datadir
1277 if { [file exists [uplevel datadir]/$filename] } {
1278 return [uplevel datadir]/$filename
1279 }
1280
40093367 1281 # raise error
1282 error [join [list "Error: file $filename could not be found neither in script" \
1283 "directories nor in paths indicated by CSF_TestDataPath environment variable"] "\n"]
8418c617 1284}
cc6a292d 1285
1286# Procedure to make a diff and common of two lists
1287proc _list_diff {list1 list2 _in1 _in2 _common} {
1288 upvar $_in1 in1
1289 upvar $_in2 in2
1290 upvar $_common common
1291
1292 set in1 {}
1293 set in2 {}
1294 set common {}
1295 foreach item $list1 {
1296 if { [lsearch -exact $list2 $item] >= 0 } {
1297 lappend common $item
1298 } else {
1299 lappend in1 $item
1300 }
1301 }
1302 foreach item $list2 {
1303 if { [lsearch -exact $common $item] < 0 } {
1304 lappend in2 $item
1305 }
1306 }
1307 return
1308}
1309
1310# procedure to load a file to Tcl string
1311proc _read_file {filename} {
1312 set fd [open $filename r]
1313 set result [read -nonewline $fd]
1314 close $fd
1315 return $result
1316}
1317
1318# Procedure to compare results of two runs of test cases
1319proc _test_diff {dir1 dir2 basename status verbose _logvar {_statvar ""}} {
1320 upvar $_logvar log
1321
1322 # prepare variable (array) for collecting statistics
1323 if { "$_statvar" != "" } {
1324 upvar $_statvar stat
1325 } else {
1326 set stat(cpu1) 0
1327 set stat(cpu2) 0
1328 set log {}
1329 }
1330
1331 # first check subdirectories
1332 set path1 [file join $dir1 $basename]
1333 set path2 [file join $dir2 $basename]
1334 set list1 [glob -directory $path1 -types d -tails -nocomplain *]
1335 set list2 [glob -directory $path2 -types d -tails -nocomplain *]
1336 if { [llength $list1] >0 || [llength $list2] > 0 } {
1337 _list_diff $list1 $list2 in1 in2 common
1338 if { "$verbose" > 1 } {
1339 if { [llength $in1] > 0 } { _log_and_puts log "Only in $path1: $in1" }
1340 if { [llength $in2] > 0 } { _log_and_puts log "Only in $path2: $in2" }
1341 }
1342 foreach subdir $common {
1343 if { "$verbose" > 2 } {
1344 _log_and_puts log "Checking [file join $basename $subdir]"
1345 }
1346 _test_diff $dir1 $dir2 [file join $basename $subdir] $status $verbose log stat
1347 }
1348 } else {
1349 # check log files (only if directory has no subdirs)
1350 set list1 [glob -directory $path1 -types f -tails -nocomplain *.log]
1351 set list2 [glob -directory $path2 -types f -tails -nocomplain *.log]
1352 _list_diff $list1 $list2 in1 in2 common
1353 if { "$verbose" > 1 } {
1354 if { [llength $in1] > 0 } { _log_and_puts log "Only in $path1: $in1" }
1355 if { [llength $in2] > 0 } { _log_and_puts log "Only in $path2: $in2" }
1356 }
1357 foreach logfile $common {
1358 # load two logs
1359 set log1 [_read_file [file join $dir1 $basename $logfile]]
1360 set log2 [_read_file [file join $dir2 $basename $logfile]]
1361
1362 # check execution statuses
1363 set status1 UNDEFINED
1364 set status2 UNDEFINED
1365 if { ! [regexp {CASE [^:]*:\s*([\w]+)} $log1 res1 status1] ||
1366 ! [regexp {CASE [^:]*:\s*([\w]+)} $log2 res2 status2] ||
1367 "$status1" != "$status2" } {
1368 _log_and_puts log "STATUS [split $basename /] [file rootname $logfile]: $status1 / $status2"
1369
1370 # if test statuses are different, further comparison makes
1371 # no sense unless explicitly requested
1372 if { "$status" != "all" } {
1373 continue
1374 }
1375 }
1376 if { "$status" == "ok" && "$status1" != "OK" } {
1377 continue
1378 }
1379
1380 # check CPU times
1381 set cpu1 UNDEFINED
1382 set cpu2 UNDEFINED
1383 if { [regexp {TOTAL CPU TIME:\s*([\d.]+)} $log1 res1 cpu1] &&
1384 [regexp {TOTAL CPU TIME:\s*([\d.]+)} $log2 res1 cpu2] } {
1385 set stat(cpu1) [expr $stat(cpu1) + $cpu1]
1386 set stat(cpu2) [expr $stat(cpu2) + $cpu2]
1387
1388 # compare CPU times with 10% precision (but not less 0.5 sec)
1389 if { [expr abs ($cpu1 - $cpu2) > 0.5 + 0.05 * abs ($cpu1 + $cpu2)] } {
1390 _log_and_puts log "CPU [split $basename /] [file rootname $logfile]: $cpu1 / $cpu2"
1391 }
1392 }
1393 }
1394 }
1395
1396 if { "$_statvar" == "" } {
1397 _log_and_puts log "Total CPU difference: $stat(cpu1) / $stat(cpu2)"
1398 }
1399}