Commit | Line | Data |
---|---|---|
40093367 | 1 | # Copyright (c) 2012 OPEN CASCADE SAS |
2 | # | |
3 | # The content of this file is subject to the Open CASCADE Technology Public | |
4 | # License Version 6.5 (the "License"). You may not use the content of this file | |
5 | # except in compliance with the License. Please obtain a copy of the License | |
6 | # at http://www.opencascade.org and read it completely before using this file. | |
7 | # | |
8 | # The Initial Developer of the Original Code is Open CASCADE S.A.S., having its | |
9 | # main offices at: 1, place des Freres Montgolfier, 78280 Guyancourt, France. | |
10 | # | |
11 | # The Original Code and all software distributed under the License is | |
12 | # distributed on an "AS IS" basis, without warranty of any kind, and the | |
13 | # Initial Developer hereby disclaims all such warranties, including without | |
14 | # limitation, any warranties of merchantability, fitness for a particular | |
15 | # purpose or non-infringement. Please see the License for the specific terms | |
16 | # and conditions governing the rights and limitations under the License. | |
17 | ||
18 | ############################################################################ | |
19 | # This file defines scripts for execution of OCCT tests. | |
20 | # It should be loaded automatically when DRAW is started, and provides | |
cc6a292d | 21 | # three top-level commands: 'test', 'testgrid', and 'testdiff'. |
40093367 | 22 | # See OCCT Tests User Guide for description of the test system. |
23 | # | |
24 | # Note: procedures with names starting with underscore are for internal use | |
25 | # inside the test system. | |
26 | ############################################################################ | |
27 | ||
28 | # Default verbose level for command _run_test | |
29 | set _tests_verbose 0 | |
30 | ||
31 | # regexp for parsing test case results in summary log | |
32 | set _test_case_regexp {^CASE\s+([\w.-]+)\s+([\w.-]+)\s+([\w.-]+)\s*:\s*([\w]+)(.*)} | |
33 | ||
34 | # Basic command to run indicated test case in DRAW | |
b725d7c5 | 35 | help test { |
36 | Run specified test case | |
37 | Use: test group grid casename [echo=0] | |
38 | - If echo is set to 0 (default), log is stored in memory and only summary | |
39 | is output (the log can be obtained with command "dlog get") | |
40 | - If echo is set to 1 or "-echo", all commands and results are echoed | |
41 | immediately, but log is not saved and summary is not produced | |
42 | } | |
5df3a117 | 43 | proc test {group grid casename {echo 0}} { |
40093367 | 44 | # get test case paths (will raise error if input is invalid) |
45 | _get_test $group $grid $casename dir gridname casefile | |
46 | ||
b725d7c5 | 47 | # if echo specified as "-echo", convert it to bool |
48 | if { "$echo" == "-echo" } { set echo t } | |
49 | ||
40093367 | 50 | # run test |
5df3a117 | 51 | uplevel _run_test $dir $group $gridname $casefile $echo |
40093367 | 52 | |
53 | # check log | |
5df3a117 | 54 | if { ! $echo } { |
55 | _check_log $dir $group $gridname $casename [dlog get] | |
56 | } | |
40093367 | 57 | |
58 | return | |
59 | } | |
60 | ||
61 | # Basic command to run indicated test case in DRAW | |
b725d7c5 | 62 | help testgrid { |
63 | Run all tests, or specified group, or one grid | |
64 | Use: testgrid [group [grid]] [options...] | |
65 | Allowed options are: | |
66 | -parallel N: run N parallel processes (default is number of CPUs, 0 to disable) | |
67 | -refresh N: save summary logs every N seconds (default 60, minimal 1, 0 to disable) | |
68 | -outdir dirname: set log directory (should be empty or non-existing) | |
69 | -overwrite: force writing logs in existing non-empty directory | |
70 | -xml filename: write XML report for Jenkins (in JUnit-like format) | |
40093367 | 71 | } |
b725d7c5 | 72 | proc testgrid {args} { |
40093367 | 73 | global env tcl_platform _tests_verbose |
74 | ||
75 | ###################################################### | |
76 | # check arguments | |
77 | ###################################################### | |
78 | ||
79 | # check that environment variable defining paths to test scripts is defined | |
80 | if { ! [info exists env(CSF_TestScriptsPath)] || | |
81 | [llength $env(CSF_TestScriptsPath)] <= 0 } { | |
82 | error "Error: Environment variable CSF_TestScriptsPath is not defined" | |
83 | } | |
84 | ||
85 | # treat options | |
b725d7c5 | 86 | set parallel [_get_nb_cpus] |
40093367 | 87 | set refresh 60 |
b725d7c5 | 88 | set logdir "" |
40093367 | 89 | set overwrite 0 |
90 | set xmlfile "" | |
91 | for {set narg 0} {$narg < [llength $args]} {incr narg} { | |
92 | set arg [lindex $args $narg] | |
93 | ||
94 | # parallel execution | |
95 | if { $arg == "-parallel" } { | |
96 | incr narg | |
b725d7c5 | 97 | if { $narg < [llength $args] && ! [regexp {^-} [lindex $args $narg]] } { |
40093367 | 98 | set parallel [expr [lindex $args $narg]] |
99 | } else { | |
b725d7c5 | 100 | error "Option -parallel requires argument" |
40093367 | 101 | } |
102 | continue | |
103 | } | |
104 | ||
105 | # refresh logs time | |
106 | if { $arg == "-refresh" } { | |
107 | incr narg | |
b725d7c5 | 108 | if { $narg < [llength $args] && ! [regexp {^-} [lindex $args $narg]] } { |
40093367 | 109 | set refresh [expr [lindex $args $narg]] |
110 | } else { | |
b725d7c5 | 111 | error "Option -refresh requires argument" |
112 | } | |
113 | continue | |
114 | } | |
115 | ||
116 | # output directory | |
117 | if { $arg == "-outdir" } { | |
118 | incr narg | |
119 | if { $narg < [llength $args] && ! [regexp {^-} [lindex $args $narg]] } { | |
120 | set logdir [lindex $args $narg] | |
121 | } else { | |
122 | error "Option -outdir requires argument" | |
40093367 | 123 | } |
124 | continue | |
125 | } | |
126 | ||
127 | # allow overwrite logs | |
128 | if { $arg == "-overwrite" } { | |
129 | set overwrite 1 | |
130 | continue | |
131 | } | |
132 | ||
133 | # refresh logs time | |
134 | if { $arg == "-xml" } { | |
135 | incr narg | |
b725d7c5 | 136 | if { $narg < [llength $args] && ! [regexp {^-} [lindex $args $narg]] } { |
40093367 | 137 | set xmlfile [lindex $args $narg] |
138 | } | |
139 | if { $xmlfile == "" } { | |
140 | set xmlfile TESTS-summary.xml | |
141 | } | |
142 | continue | |
143 | } | |
144 | ||
145 | # unsupported option | |
146 | if { [regexp {^-} $arg] } { | |
147 | error "Error: unsupported option \"$arg\"" | |
148 | } | |
149 | ||
150 | # treat arguments not recognized as options as group and grid names | |
151 | if { ! [info exists groupname] } { | |
152 | set groupname $arg | |
153 | } elseif { ! [info exists gridname] } { | |
154 | set gridname $arg | |
155 | } else { | |
156 | error "Error: cannot interpret argument $narg ($arg): both group and grid names are already defined by previous args!" | |
157 | } | |
158 | } | |
159 | ||
160 | # check that target log directory is empty or does not exist | |
161 | set logdir [file normalize [string trim $logdir]] | |
162 | if { $logdir == "" } { | |
b725d7c5 | 163 | # if specified logdir is empty string, generate unique name like |
164 | # results_<branch>_<timestamp> | |
165 | set prefix "results" | |
166 | if { ! [catch {exec git branch} gitout] && | |
167 | [regexp {[*] ([\w]+)} $gitout res branch] } { | |
168 | set prefix "${prefix}_$branch" | |
169 | } | |
170 | set logdir "${prefix}_[clock format [clock seconds] -format {%Y-%m-%dT%H%M}]" | |
40093367 | 171 | set logdir [file normalize $logdir] |
172 | } | |
173 | if { [file isdirectory $logdir] && ! $overwrite && ! [catch {glob -directory $logdir *}] } { | |
174 | error "Error: Specified log directory \"$logdir\" is not empty; please clean it before running tests" | |
175 | } | |
176 | if { [catch {file mkdir $logdir}] || ! [file writable $logdir] } { | |
177 | error "Error: Cannot create directory \"$logdir\", or it is not writable" | |
178 | } | |
179 | ||
180 | ###################################################### | |
181 | # prepare list of tests to be performed | |
182 | ###################################################### | |
183 | ||
184 | # list of tests, each defined by a list of: | |
185 | # test scripts directory | |
186 | # group (subfolder) name | |
187 | # grid (subfolder) name | |
188 | # test case name | |
189 | # path to test case file | |
190 | set tests_list {} | |
191 | ||
192 | # iterate by all script paths | |
193 | foreach dir [_split_path $env(CSF_TestScriptsPath)] { | |
194 | # protection against empty paths | |
195 | set dir [string trim $dir] | |
196 | if { $dir == "" } { continue } | |
197 | ||
198 | if { $_tests_verbose > 0 } { _log_and_puts log "Examining tests directory $dir" } | |
199 | ||
200 | # check that directory exists | |
201 | if { ! [file isdirectory $dir] } { | |
202 | _log_and_puts log "Warning: directory $dir listed in CSF_TestScriptsPath does not exist, skipped" | |
203 | continue | |
204 | } | |
205 | ||
206 | # if test group is specified, check that directory with given name exists in this dir | |
207 | # if not, continue to the next test dir | |
208 | if { [info exists groupname] && $groupname != "" } { | |
209 | if { [file isdirectory $dir/$groupname] } { | |
210 | set groups $groupname | |
211 | } else { | |
212 | continue | |
213 | } | |
214 | } else { | |
215 | # else search all directories in the current dir | |
216 | if [catch {glob -directory $dir -tail -types d *} groups] { continue } | |
217 | } | |
218 | ||
219 | # iterate by groups | |
220 | if { $_tests_verbose > 0 } { _log_and_puts log "Groups to be executed: $groups" } | |
221 | foreach group [lsort -dictionary $groups] { | |
222 | if { $_tests_verbose > 0 } { _log_and_puts log "Examining group directory $group" } | |
223 | ||
224 | # file grids.list must exist: it defines sequence of grids in the group | |
225 | if { ! [file exists $dir/$group/grids.list] } { | |
226 | _log_and_puts log "Warning: directory $dir/$group does not contain file grids.list, skipped" | |
227 | continue | |
228 | } | |
229 | ||
230 | # read grids.list file and make a list of grids to be executed | |
231 | set gridlist {} | |
232 | set fd [open $dir/$group/grids.list] | |
233 | set nline 0 | |
234 | while { [gets $fd line] >= 0 } { | |
235 | incr nline | |
236 | ||
237 | # skip comments and empty lines | |
238 | if { [regexp "\[ \t\]*\#.*" $line] } { continue } | |
239 | if { [string trim $line] == "" } { continue } | |
240 | ||
241 | # get grid id and name | |
242 | if { ! [regexp "^\(\[0-9\]+\)\[ \t\]*\(\[A-Za-z0-9_.-\]+\)\$" $line res gridid grid] } { | |
243 | _log_and_puts log "Warning: cannot recognize line $nline in file $dir/$group/grids.list as \"gridid gridname\"; ignored" | |
244 | continue | |
245 | } | |
246 | ||
247 | # if specific grid is requested, check that it is present; otherwise make complete list | |
248 | if { ! [info exists gridname] || $gridname == "" || $gridname == $gridid || $gridname == $grid } { | |
249 | lappend gridlist $grid | |
250 | } | |
251 | } | |
252 | close $fd | |
253 | ||
254 | # iterate by all grids | |
255 | foreach grid $gridlist { | |
256 | ||
257 | # check if this grid is aliased to another one | |
258 | set griddir $dir/$group/$grid | |
259 | if { [file exists $griddir/cases.list] } { | |
260 | set fd [open $griddir/cases.list] | |
261 | if { [gets $fd line] >= 0 } { | |
262 | set griddir [file normalize $dir/$group/$grid/[string trim $line]] | |
263 | } | |
264 | close $fd | |
265 | } | |
266 | ||
267 | # check if grid directory actually exists | |
268 | if { ! [file isdirectory $griddir] } { | |
269 | _log_and_puts log "Error: tests directory for grid $grid ($griddir) is missing; skipped" | |
270 | continue | |
271 | } | |
272 | ||
273 | # create directory for logging test results | |
274 | if { $logdir != "" } { file mkdir $logdir/$group/$grid } | |
275 | ||
276 | # iterate by all tests in the grid directory | |
277 | if { [catch {glob -directory $griddir -type f *} testfiles] } { continue } | |
278 | foreach casefile [lsort -dictionary $testfiles] { | |
279 | # filter out begin and end files | |
280 | set casename [file tail $casefile] | |
281 | if { $casename == "begin" || $casename == "end" } { continue } | |
282 | ||
283 | lappend tests_list [list $dir $group $grid $casename $casefile] | |
284 | } | |
285 | } | |
286 | } | |
287 | } | |
288 | if { [llength $tests_list] < 1 } { | |
289 | error "Error: no tests are found, check you input arguments and variable CSF_TestScriptsPath!" | |
290 | } | |
291 | ||
292 | ###################################################### | |
293 | # run tests | |
294 | ###################################################### | |
295 | ||
296 | # log command arguments and environment | |
297 | set log "Command: testgrid $args\nHost: [info hostname]\nStarted on: [clock format [clock seconds] -format {%Y-%m-%d %H:%M:%S}]\n" | |
298 | set log "$log\nEnvironment:\n" | |
299 | foreach envar [array names env] { | |
300 | set log "$log$envar=\"$env($envar)\"\n" | |
301 | } | |
302 | set log "$log\n" | |
303 | ||
304 | set refresh_timer [clock seconds] | |
305 | uplevel dchrono _timer reset | |
306 | uplevel dchrono _timer start | |
307 | ||
308 | # if parallel execution is requested, allocate thread pool | |
309 | if { $parallel > 0 } { | |
310 | if { ! [info exists tcl_platform(threaded)] || [catch {package require Thread}] } { | |
311 | _log_and_puts log "Warning: Tcl package Thread is not available, running in sequential mode" | |
312 | set parallel 0 | |
313 | } else { | |
314 | set worker [tpool::create -minworkers $parallel -maxworkers $parallel] | |
315 | # suspend the pool until all jobs are posted, to prevent blocking of the process | |
316 | # of starting / processing jobs by running threads | |
b725d7c5 | 317 | catch {tpool::suspend $worker} |
40093367 | 318 | if { $_tests_verbose > 0 } { _log_and_puts log "Executing tests in (up to) $parallel threads" } |
319 | } | |
320 | } | |
321 | ||
322 | # start test cases | |
323 | foreach test_def $tests_list { | |
324 | set dir [lindex $test_def 0] | |
325 | set group [lindex $test_def 1] | |
326 | set grid [lindex $test_def 2] | |
327 | set casename [lindex $test_def 3] | |
328 | set casefile [lindex $test_def 4] | |
329 | ||
330 | # command to set tests for generation of image in results directory | |
331 | set imgdir_cmd "" | |
332 | if { $logdir != "" } { set imgdir_cmd "set imagedir $logdir/$group/$grid" } | |
333 | ||
334 | # prepare command file for running test case in separate instance of DRAW | |
335 | set fd_cmd [open $logdir/$group/$grid/${casename}.tcl w] | |
336 | puts $fd_cmd "$imgdir_cmd" | |
337 | puts $fd_cmd "set test_image $casename" | |
b725d7c5 | 338 | puts $fd_cmd "_run_test $dir $group $grid $casefile t" |
5df3a117 | 339 | |
40093367 | 340 | # use dlog command to obtain complete output of the test when it is absent (i.e. since OCCT 6.6.0) |
5df3a117 | 341 | # note: this is not needed if echo is set to 1 in call to _run_test above |
40093367 | 342 | if { ! [catch {dlog get}] } { |
343 | puts $fd_cmd "puts \[dlog get\]" | |
344 | } else { | |
345 | # else try to use old-style QA_ variables to get more output... | |
346 | set env(QA_DUMP) 1 | |
347 | set env(QA_DUP) 1 | |
348 | set env(QA_print_command) 1 | |
349 | } | |
5df3a117 | 350 | |
40093367 | 351 | # final 'exit' is needed when running on Linux under VirtualGl |
352 | puts $fd_cmd "exit" | |
353 | close $fd_cmd | |
8418c617 | 354 | |
355 | # commant to run DRAW with a command file; | |
356 | # note that empty string is passed as standard input to avoid possible | |
357 | # hang-ups due to waiting for stdin of the launching process | |
358 | set command "exec <<{} DRAWEXE -f $logdir/$group/$grid/${casename}.tcl" | |
359 | ||
40093367 | 360 | # alternative method to run without temporary file; disabled as it needs too many backslashes |
361 | # else { | |
8418c617 | 362 | # set command "exec <<\"\" DRAWEXE -c $imgdir_cmd\\\; set test_image $casename\\\; \ |
40093367 | 363 | # _run_test $dir $group $grid $casefile\\\; \ |
364 | # puts \\\[dlog get\\\]\\\; exit" | |
365 | # } | |
366 | ||
367 | # run test case, either in parallel or sequentially | |
368 | if { $parallel > 0 } { | |
369 | # parallel execution | |
370 | set job [tpool::post -nowait $worker "catch \"$command\" output; return \$output"] | |
371 | set job_def($job) [list $logdir $dir $group $grid $casename] | |
372 | } else { | |
373 | # sequential execution | |
374 | catch {eval $command} output | |
375 | _log_test_case $output $logdir $dir $group $grid $casename log | |
376 | ||
377 | # update summary log with requested period | |
378 | if { $logdir != "" && $refresh > 0 && [expr [clock seconds] - $refresh_timer > $refresh] } { | |
379 | # update and dump summary | |
380 | _log_summarize $logdir $log | |
381 | set refresh_timer [clock seconds] | |
382 | } | |
383 | } | |
384 | } | |
385 | ||
386 | # get results of started threads | |
387 | if { $parallel > 0 } { | |
b725d7c5 | 388 | catch {tpool::resume $worker} |
40093367 | 389 | while { [llength [array names job_def]] > 0 } { |
390 | foreach job [tpool::wait $worker [array names job_def]] { | |
391 | eval _log_test_case \[tpool::get $worker $job\] $job_def($job) log | |
392 | unset job_def($job) | |
393 | } | |
394 | ||
395 | # update summary log with requested period | |
396 | if { $logdir != "" && $refresh > 0 && [clock seconds] > $refresh_timer + $refresh } { | |
397 | _log_summarize $logdir $log | |
398 | set refresh_timer [clock seconds] | |
399 | } | |
400 | } | |
401 | # release thread pool | |
402 | tpool::release $worker | |
403 | } | |
404 | ||
405 | uplevel dchrono _timer stop | |
406 | set time [lindex [split [uplevel dchrono _timer show] "\n"] 0] | |
407 | ||
408 | ###################################################### | |
409 | # output summary logs and exit | |
410 | ###################################################### | |
411 | ||
412 | _log_summarize $logdir $log $time | |
413 | if { $logdir != "" } { | |
414 | puts "Detailed logs are saved in $logdir" | |
415 | } | |
416 | if { $logdir != "" && $xmlfile != "" } { | |
417 | # XML output file is assumed relative to log dir unless it is absolute | |
418 | if { [ file pathtype $xmlfile] == "relative" } { | |
419 | set xmlfile [file normalize $logdir/$xmlfile] | |
420 | } | |
421 | _log_xml_summary $logdir $xmlfile $log 0 | |
422 | puts "XML summary is saved to $xmlfile" | |
423 | } | |
424 | ||
425 | return | |
426 | } | |
427 | ||
cc6a292d | 428 | # Procedure to compare results of two runs of test cases |
b725d7c5 | 429 | help testdiff { |
430 | Compare results of two executions of tests (CPU times, ...) | |
431 | Use: testdiff dir1 dir2 [options...] | |
432 | Where dir1 and dir2 are directories containing logs of two test runs. | |
433 | Allowed options are: | |
434 | -save filename: save resulting log in specified file | |
435 | -subdir name: compare only specified subdirectory (can be nested) | |
436 | -status {same|ok|all}: filter cases for comparing by their status: | |
437 | same - only cases with same status are compared (default) | |
438 | ok - only cases with OK status in both logs are compared | |
439 | all - results are compared regardless of status | |
440 | -verbose level: | |
441 | 1 - output only differences | |
442 | 2 - output list of logs and directories present in one of dirs only | |
443 | 3 - (default) output progress messages | |
cc6a292d | 444 | } |
445 | proc testdiff {dir1 dir2 args} { | |
446 | if { "$dir1" == "$dir2" } { | |
447 | error "Input directories are the same" | |
448 | } | |
449 | ||
450 | ###################################################### | |
451 | # check arguments | |
452 | ###################################################### | |
453 | ||
454 | # treat options | |
455 | set logfile "" | |
456 | set basename "" | |
457 | set status "same" | |
458 | set verbose 3 | |
459 | for {set narg 0} {$narg < [llength $args]} {incr narg} { | |
460 | set arg [lindex $args $narg] | |
461 | ||
462 | # log file name | |
463 | if { $arg == "-save" } { | |
464 | incr narg | |
465 | if { $narg < [llength $args] } { | |
466 | set logfile [lindex $args $narg] | |
467 | } else { | |
468 | error "Error: Option -save must be followed by log file name" | |
469 | } | |
470 | continue | |
471 | } | |
472 | ||
473 | # subdirectory to compare | |
474 | if { $arg == "-subdir" } { | |
475 | incr narg | |
476 | if { $narg < [llength $args] } { | |
477 | set basename [lindex $args $narg] | |
478 | } else { | |
479 | error "Error: Option -subdir must be followed by subdirectory path" | |
480 | } | |
481 | continue | |
482 | } | |
483 | ||
484 | # status filter | |
485 | if { $arg == "-status" } { | |
486 | incr narg | |
487 | if { $narg < [llength $args] } { | |
488 | set status [lindex $args $narg] | |
489 | } else { set status "" } | |
490 | if { "$status" != "same" && "$status" != "all" && "$status" != "ok" } { | |
491 | error "Error: Option -status must be followed by one of \"same\", \"all\", or \"ok\"" | |
492 | } | |
493 | continue | |
494 | } | |
495 | ||
496 | # verbose level | |
497 | if { $arg == "-verbose" } { | |
498 | incr narg | |
499 | if { $narg < [llength $args] } { | |
500 | set verbose [expr [lindex $args $narg]] | |
501 | } | |
502 | continue | |
503 | } | |
504 | ||
505 | # if { [regexp {^-} $arg] } { | |
506 | error "Error: unsupported option \"$arg\"" | |
507 | # } | |
508 | } | |
509 | ||
510 | # run diff procedure (recursive) | |
511 | _test_diff $dir1 $dir2 $basename $status $verbose log | |
512 | ||
513 | # save result to log file | |
514 | if { "$logfile" != "" } { | |
515 | _log_save $logfile $log | |
516 | } | |
517 | ||
518 | return | |
519 | } | |
520 | ||
40093367 | 521 | # Internal procedure to find test case indicated by group, grid, and test case names; |
522 | # returns: | |
523 | # - dir: path to the base directory of the tests group | |
524 | # - gridname: actual name of the grid | |
525 | # - casefile: path to the test case script | |
526 | # if no such test is found, raises error with appropriate message | |
527 | proc _get_test {group grid casename _dir _gridname _casefile} { | |
528 | upvar $_dir dir | |
529 | upvar $_gridname gridname | |
530 | upvar $_casefile casefile | |
531 | ||
532 | global env | |
533 | ||
534 | # check that environment variable defining paths to test scripts is defined | |
535 | if { ! [info exists env(CSF_TestScriptsPath)] || | |
536 | [llength $env(CSF_TestScriptsPath)] <= 0 } { | |
537 | error "Error: Environment variable CSF_TestScriptsPath is not defined" | |
538 | } | |
539 | ||
540 | # iterate by all script paths | |
541 | foreach dir [_split_path $env(CSF_TestScriptsPath)] { | |
542 | # protection against empty paths | |
543 | set dir [string trim $dir] | |
544 | if { $dir == "" } { continue } | |
545 | ||
546 | # check that directory exists | |
547 | if { ! [file isdirectory $dir] } { | |
548 | puts "Warning: directory $dir listed in CSF_TestScriptsPath does not exist, skipped" | |
549 | continue | |
550 | } | |
551 | ||
552 | # check if test group with given name exists in this dir | |
553 | # if not, continue to the next test dir | |
554 | if { ! [file isdirectory $dir/$group] } { continue } | |
555 | ||
556 | # check that grid with given name (possibly alias) exists; stop otherwise | |
557 | set gridname $grid | |
558 | if { ! [file isdirectory $dir/$group/$gridname] } { | |
559 | # check if grid is named by alias rather than by actual name | |
560 | if { [file exists $dir/$group/grids.list] } { | |
561 | set fd [open $dir/$group/grids.list] | |
562 | while { [gets $fd line] >= 0 } { | |
563 | if { [regexp "\[ \t\]*\#.*" $line] } { continue } | |
564 | if { [regexp "^$grid\[ \t\]*\(\[A-Za-z0-9_.-\]+\)\$" $line res gridname] } { | |
565 | break | |
566 | } | |
567 | } | |
568 | close $fd | |
569 | } | |
570 | } | |
571 | if { ! [file isdirectory $dir/$group/$gridname] } { continue } | |
572 | ||
573 | # get actual file name of the script; stop if it cannot be found | |
574 | set casefile $dir/$group/$gridname/$casename | |
575 | if { ! [file exists $casefile] } { | |
576 | # check if this grid is aliased to another one | |
577 | if { [file exists $dir/$group/$gridname/cases.list] } { | |
578 | set fd [open $dir/$group/$gridname/cases.list] | |
579 | if { [gets $fd line] >= 0 } { | |
580 | set casefile [file normalize $dir/$group/$gridname/[string trim $line]/$casename] | |
581 | } | |
582 | close $fd | |
583 | } | |
584 | } | |
585 | if { [file exists $casefile] } { | |
586 | # normal return | |
587 | return | |
588 | } | |
589 | } | |
590 | ||
591 | # coming here means specified test is not found; report error | |
592 | error [join [list "Error: test case $group / $grid / $casename is not found in paths listed in variable" \ | |
593 | "CSF_TestScriptsPath (current value is \"$env(CSF_TestScriptsPath)\")"] "\n"] | |
594 | } | |
595 | ||
596 | # Internal procedure to run test case indicated by base directory, | |
597 | # grid and grid names, and test case file path. | |
598 | # The log can be obtained by command "dlog get". | |
5df3a117 | 599 | proc _run_test {scriptsdir group gridname casefile echo} { |
40093367 | 600 | global env |
601 | ||
602 | # start timer | |
603 | uplevel dchrono _timer reset | |
604 | uplevel dchrono _timer start | |
605 | ||
606 | # enable commands logging; switch to old-style mode if dlog command is not present | |
607 | set dlog_exists 1 | |
608 | if { [catch {dlog reset}] } { | |
609 | set dlog_exists 0 | |
5df3a117 | 610 | } elseif { $echo } { |
611 | decho on | |
40093367 | 612 | } else { |
613 | dlog reset | |
614 | dlog on | |
615 | rename puts puts-saved | |
616 | proc puts args { | |
617 | global _tests_verbose | |
618 | ||
619 | # log only output to stdout and stderr, not to file! | |
620 | if {[llength $args] > 1} { | |
d33dea30 PK |
621 | set optarg [lindex $args end-1] |
622 | if { $optarg == "stdout" || $optarg == "stderr" || $optarg == "-newline" } { | |
40093367 | 623 | dlog add [lindex $args end] |
624 | } | |
625 | } else { | |
626 | dlog add [lindex $args end] | |
627 | } | |
628 | ||
629 | # reproduce original puts | |
630 | if { $_tests_verbose } { | |
631 | eval puts-saved $args | |
632 | } | |
633 | } | |
634 | } | |
635 | ||
40093367 | 636 | # evaluate test case |
637 | if [catch { | |
638 | uplevel set casename [file tail $casefile] | |
8418c617 | 639 | uplevel set groupname $group |
640 | uplevel set gridname $gridname | |
40093367 | 641 | |
642 | if { [file exists $scriptsdir/$group/begin] } { | |
643 | puts "Executing $scriptsdir/$group/begin..."; flush stdout | |
644 | uplevel source $scriptsdir/$group/begin | |
645 | } | |
646 | if { [file exists $scriptsdir/$group/$gridname/begin] } { | |
647 | puts "Executing $scriptsdir/$group/$gridname/begin..."; flush stdout | |
648 | uplevel source $scriptsdir/$group/$gridname/begin | |
649 | } | |
650 | ||
651 | puts "Executing $casefile..."; flush stdout | |
652 | uplevel source $casefile | |
653 | ||
654 | if { [file exists $scriptsdir/$group/$gridname/end] } { | |
655 | puts "Executing $scriptsdir/$group/$gridname/end..."; flush stdout | |
656 | uplevel source $scriptsdir/$group/$gridname/end | |
657 | } | |
658 | if { [file exists $scriptsdir/$group/end] } { | |
659 | puts "Executing $scriptsdir/$group/end..."; flush stdout | |
660 | uplevel source $scriptsdir/$group/end | |
661 | } | |
662 | } res] { | |
663 | puts "Tcl Exception: $res" | |
664 | } | |
665 | ||
40093367 | 666 | # stop logging |
667 | if { $dlog_exists } { | |
5df3a117 | 668 | if { $echo } { |
669 | decho off | |
670 | } else { | |
671 | rename puts {} | |
672 | rename puts-saved puts | |
673 | dlog off | |
674 | } | |
40093367 | 675 | } |
676 | ||
8418c617 | 677 | # stop cpulimit killer if armed by the test |
678 | cpulimit | |
679 | ||
40093367 | 680 | # add timing info |
681 | uplevel dchrono _timer stop | |
682 | set time [uplevel dchrono _timer show] | |
683 | if [regexp -nocase {CPU user time:[ \t]*([0-9.e-]+)} $time res cpu] { | |
5df3a117 | 684 | if { $dlog_exists && ! $echo } { |
40093367 | 685 | dlog add "TOTAL CPU TIME: $cpu sec" |
686 | } else { | |
687 | puts "TOTAL CPU TIME: $cpu sec" | |
688 | } | |
689 | } | |
690 | } | |
691 | ||
692 | # Internal procedure to check log of test execution and decide if it passed or failed | |
693 | proc _check_log {dir group gridname casename log {_summary {}} {_html_log {}}} { | |
694 | global env | |
695 | if { $_summary != "" } { upvar $_summary summary } | |
696 | if { $_html_log != "" } { upvar $_html_log html_log } | |
697 | set summary "" | |
698 | set html_log "" | |
699 | ||
700 | if [catch { | |
701 | ||
702 | # load definition of 'bad words' indicating test failure | |
703 | # note that rules are loaded in the order of decreasing priority (grid - group - common), | |
704 | # thus grid rules will override group ones | |
705 | set badwords {} | |
706 | foreach rulesfile [list $dir/$group/$gridname/parse.rules $dir/$group/parse.rules $dir/parse.rules] { | |
707 | if [catch {set fd [open $rulesfile r]}] { continue } | |
708 | while { [gets $fd line] >= 0 } { | |
709 | # skip comments and empty lines | |
710 | if { [regexp "\[ \t\]*\#.*" $line] } { continue } | |
711 | if { [string trim $line] == "" } { continue } | |
712 | # extract regexp | |
713 | if { ! [regexp {^([^/]*)/([^/]*)/(.*)$} $line res status rexp comment] } { | |
714 | puts "Warning: cannot recognize parsing rule \"$line\" in file $rulesfile" | |
715 | continue | |
716 | } | |
717 | set status [string trim $status] | |
718 | if { $comment != "" } { set status "$status ([string trim $comment])" } | |
719 | set rexp [regsub -all {\\b} $rexp {\\y}] ;# convert regexp from Perl to Tcl style | |
720 | lappend badwords [list $status $rexp] | |
721 | } | |
722 | close $fd | |
723 | } | |
724 | if { [llength $badwords] <= 0 } { | |
725 | puts "Warning: no definition of error indicators found (check files parse.rules)" | |
726 | } | |
727 | ||
728 | # analyse log line-by-line | |
729 | set todos {} | |
730 | set status "" | |
731 | foreach line [split $log "\n"] { | |
732 | # check if line defines specific treatment of some messages | |
733 | if [regexp -nocase {^[ \t]*TODO ([^:]*):(.*)$} $line res platforms pattern] { | |
734 | if { ! [regexp -nocase {\mAll\M} $platforms] && | |
735 | ! [regexp -nocase "\\m$env(os_type)\\M" $platforms] } { | |
736 | set html_log "$html_log\n$line" | |
737 | continue ;# TODO statement is for another platform | |
738 | } | |
739 | ||
740 | # record TODOs that mark unstable cases | |
741 | if { [regexp {[\?]} $platforms] } { | |
742 | set todos_unstable([llength $todos]) 1 | |
743 | } | |
744 | ||
745 | lappend todos [regsub -all {\\b} [string trim $pattern] {\\y}] ;# convert regexp from Perl to Tcl style | |
746 | set html_log "$html_log\n[_html_highlight BAD $line]" | |
747 | continue | |
748 | } | |
749 | ||
750 | # check for presence of messages indicating test result | |
751 | set ismarked 0 | |
752 | foreach bw $badwords { | |
753 | if { [regexp [lindex $bw 1] $line] } { | |
754 | # check if this is known bad case | |
755 | set is_known 0 | |
756 | for {set i 0} {$i < [llength $todos]} {incr i} { | |
757 | if { [regexp [lindex $todos $i] $line] } { | |
758 | set is_known 1 | |
759 | incr todo_count($i) | |
760 | set html_log "$html_log\n[_html_highlight BAD $line]" | |
761 | break | |
762 | } | |
763 | } | |
764 | ||
765 | # if it is not in todo, define status | |
766 | if { ! $is_known } { | |
767 | set stat [lindex $bw 0 0] | |
768 | set html_log "$html_log\n[_html_highlight $stat $line]" | |
769 | if { $status == "" && $stat != "OK" && ! [regexp -nocase {^IGNOR} $stat] } { | |
770 | set status [lindex $bw 0] | |
771 | } | |
772 | } | |
773 | set ismarked 1 | |
774 | break | |
775 | } | |
776 | } | |
777 | if { ! $ismarked } { | |
778 | set html_log "$html_log\n$line" | |
779 | } | |
780 | } | |
781 | ||
782 | # check for presence of TEST COMPLETED statement | |
783 | if { $status == "" && ! [regexp {TEST COMPLETED} $log] } { | |
784 | # check whether absence of TEST COMPLETED is known problem | |
785 | set i [lsearch $todos "TEST INCOMPLETE"] | |
786 | if { $i >= 0 } { | |
787 | incr todo_count($i) | |
788 | } else { | |
789 | set status "FAILED (no final message is found)" | |
790 | } | |
791 | } | |
792 | ||
793 | # check declared bad cases and diagnose possible improvement | |
794 | # (bad case declared but not detected). | |
795 | # Note that absence of the problem marked by TODO with question mark | |
796 | # (unstable) is not reported as improvement. | |
797 | if { $status == "" } { | |
798 | for {set i 0} {$i < [llength $todos]} {incr i} { | |
799 | if { ! [info exists todos_unstable($i)] && | |
800 | (! [info exists todo_count($i)] || $todo_count($i) <= 0) } { | |
801 | set status "IMPROVEMENT (expected problem TODO no. [expr $i + 1] is not detected)" | |
802 | break; | |
803 | } | |
804 | } | |
805 | } | |
806 | ||
807 | # report test as known bad if at least one of expected problems is found | |
808 | if { $status == "" && [llength [array names todo_count]] > 0 } { | |
809 | set status "BAD (known problem)" | |
810 | } | |
811 | ||
812 | # report normal OK | |
813 | if { $status == "" } {set status "OK" } | |
814 | ||
815 | } res] { | |
816 | set status "FAILED ($res)" | |
817 | } | |
818 | ||
819 | # put final message | |
820 | _log_and_puts summary "CASE $group $gridname $casename: $status" | |
821 | set html_log "[_html_highlight [lindex $status 0] $summary]\n$html_log" | |
822 | } | |
823 | ||
824 | # Auxiliary procedure putting message to both cout and log variable (list) | |
825 | proc _log_and_puts {logvar message} { | |
826 | if { $logvar != "" } { | |
827 | upvar $logvar log | |
828 | if [info exists log] { | |
829 | set log "$log$message\n" | |
830 | } else { | |
831 | set log "$message\n" | |
832 | } | |
833 | } | |
834 | puts $message | |
835 | } | |
836 | ||
837 | # Auxiliary procedure to log result on single test case | |
838 | proc _log_test_case {output logdir dir group grid casename logvar} { | |
839 | upvar $logvar log | |
840 | ||
841 | # check result and make HTML log | |
842 | _check_log $dir $group $grid $casename $output summary html_log | |
843 | set log "$log$summary" | |
844 | ||
845 | # save log to file | |
846 | if { $logdir != "" } { | |
847 | _log_html $logdir/$group/$grid/$casename.html $html_log "Test $group $grid $casename" | |
848 | _log_save $logdir/$group/$grid/$casename.log "$output\n$summary" "Test $group $grid $casename" | |
849 | } | |
850 | } | |
851 | ||
852 | # Auxiliary procedure to save log to file | |
853 | proc _log_save {file log {title {}}} { | |
854 | # create missing directories as needed | |
855 | catch {file mkdir [file dirname $file]} | |
856 | ||
857 | # try to open a file | |
858 | if [catch {set fd [open $file w]} res] { | |
859 | error "Error saving log file $file: $res" | |
860 | } | |
861 | ||
862 | # dump log and close | |
863 | puts $fd "$title\n" | |
864 | puts $fd $log | |
865 | close $fd | |
866 | return | |
867 | } | |
868 | ||
869 | # Auxiliary procedure to save log to file | |
870 | proc _log_html {file log {title {}}} { | |
871 | # create missing directories as needed | |
872 | catch {file mkdir [file dirname $file]} | |
873 | ||
874 | # try to open a file | |
875 | if [catch {set fd [open $file w]} res] { | |
876 | error "Error saving log file $file: $res" | |
877 | } | |
878 | ||
879 | # print header | |
880 | puts $fd "<html><head><title>$title</title><head><body><h1>$title</h1>" | |
881 | ||
882 | # add images if present | |
883 | set imgbasename [file rootname [file tail $file]] | |
884 | foreach img [lsort [glob -nocomplain -directory [file dirname $file] -tails ${imgbasename}*.gif ${imgbasename}*.png ${imgbasename}*.jpg]] { | |
885 | puts $fd "<p><img src=\"$img\"/><p>" | |
886 | } | |
887 | ||
b725d7c5 | 888 | # print log body, trying to add HTML links to script files on lines like |
889 | # "Executing <filename>..." | |
40093367 | 890 | puts $fd "<pre>" |
b725d7c5 | 891 | set logpath [file split [file normalize $file]] |
892 | foreach line [split $log "\n"] { | |
893 | if { [regexp {Executing[ \t]+([a-zA-Z0-9._/:-]+[^.])} $line res script] && | |
894 | [file exists $script] } { | |
895 | ||
896 | # generate relative path to the script file | |
897 | set url "file://[file normalize $script]" | |
898 | set scriptpath [file split [file normalize $script]] | |
899 | for {set i 0} {$i < [llength $logpath]} {incr i} { | |
900 | if { "[lindex $logpath $i]" != "[lindex $scriptpath $i]]" } { | |
901 | if { $i == 0 } { break } | |
902 | set url "[string repeat "../" [expr [llength $logpath] - $i - 1]]/[file join [lrange $scriptpath $i end]]" | |
903 | break | |
904 | } | |
905 | } | |
906 | ||
907 | set line [regsub $script $line "<a href=\"$url\">$script</a>"] | |
908 | } | |
909 | puts $fd $line | |
910 | } | |
40093367 | 911 | puts $fd "</pre></body></html>" |
912 | ||
913 | close $fd | |
914 | return | |
915 | } | |
916 | ||
917 | # Auxiliary method to make text with HTML highlighting according to status | |
918 | proc _html_color {status} { | |
919 | # choose a color for the cell according to result | |
920 | if { $status == "OK" } { | |
921 | return lightgreen | |
922 | } elseif { [regexp -nocase {^FAIL} $status] } { | |
923 | return red | |
924 | } elseif { [regexp -nocase {^BAD} $status] } { | |
925 | return yellow | |
926 | } elseif { [regexp -nocase {^IMP} $status] } { | |
927 | return orange | |
928 | } elseif { [regexp -nocase {^SKIP} $status] } { | |
929 | return gray | |
930 | } elseif { [regexp -nocase {^IGNOR} $status] } { | |
931 | return gray | |
932 | } else { | |
933 | puts "Warning: no color defined for status $status, using red as if FAILED" | |
934 | return red | |
935 | } | |
936 | } | |
937 | ||
938 | # Format text line in HTML to be colored according to the status | |
939 | proc _html_highlight {status line} { | |
940 | return "<table><tr><td bgcolor=\"[_html_color $status]\">$line</td></tr></table>" | |
941 | } | |
942 | ||
943 | # Internal procedure to generate HTML page presenting log of the tests | |
944 | # execution in tabular form, with links to reports on individual cases | |
945 | proc _log_html_summary {logdir log totals regressions improvements total_time} { | |
946 | global _test_case_regexp | |
947 | ||
948 | # create missing directories as needed | |
949 | catch {file mkdir $logdir} | |
950 | ||
951 | # try to open a file and start HTML | |
952 | if [catch {set fd [open $logdir/summary.html w]} res] { | |
953 | error "Error creating log file: $res" | |
954 | } | |
955 | ||
956 | # write HRML header, including command to refresh log if still in progress | |
957 | puts $fd "<html><head>" | |
958 | puts $fd "<title>Tests summary</title>" | |
959 | if { $total_time == "" } { | |
960 | puts $fd "<meta http-equiv=\"refresh\" content=\"10\">" | |
961 | } | |
962 | puts $fd "<meta http-equiv=\"pragma\" content=\"NO-CACHE\">" | |
963 | puts $fd "</head><body>" | |
964 | ||
965 | # put summary | |
966 | set legend(OK) "Test passed OK" | |
967 | set legend(FAILED) "Test failed (regression)" | |
968 | set legend(BAD) "Known problem" | |
969 | set legend(IMPROVEMENT) "Possible improvement (expected problem not detected)" | |
970 | set legend(SKIPPED) "Test skipped due to lack of data file" | |
971 | puts $fd "<h1>Summary</h1><table>" | |
972 | foreach nbstat $totals { | |
973 | set status [lindex $nbstat 1] | |
974 | if { [info exists legend($status)] } { | |
975 | set comment $legend($status) | |
976 | } else { | |
977 | set comment "User-defined status" | |
978 | } | |
979 | puts $fd "<tr><td align=\"right\">[lindex $nbstat 0]</td><td bgcolor=\"[_html_color $status]\">$status</td><td>$comment</td></tr>" | |
980 | } | |
981 | puts $fd "</table>" | |
982 | ||
983 | # time stamp and elapsed time info | |
984 | if { $total_time != "" } { | |
985 | puts $fd "<p>Generated on [clock format [clock seconds] -format {%Y-%m-%d %H:%M:%S}] on [info hostname] <p> $total_time" | |
986 | } else { | |
987 | puts $fd "<p>NOTE: This is intermediate summary; the tests are still running! This page will refresh automatically until tests are finished." | |
988 | } | |
989 | ||
990 | # print regressions and improvements | |
991 | foreach featured [list $regressions $improvements] { | |
992 | if { [llength $featured] <= 1 } { continue } | |
993 | set status [string trim [lindex $featured 0] { :}] | |
994 | puts $fd "<h2>$status</h2>" | |
995 | puts $fd "<table>" | |
996 | set groupgrid "" | |
997 | foreach test [lrange $featured 1 end] { | |
998 | if { ! [regexp {^(.*)\s+([\w.]+)$} $test res gg name] } { | |
999 | set gg UNKNOWN | |
1000 | set name "Error building short list; check details" | |
1001 | } | |
1002 | if { $gg != $groupgrid } { | |
1003 | if { $groupgrid != "" } { puts $fd "</tr>" } | |
1004 | set groupgrid $gg | |
1005 | puts $fd "<tr><td>$gg</td>" | |
1006 | } | |
1007 | puts $fd "<td bgcolor=\"[_html_color $status]\"><a href=\"[regsub -all { } $gg /]/${name}.html\">$name</a></td>" | |
1008 | } | |
1009 | if { $groupgrid != "" } { puts $fd "</tr>" } | |
1010 | puts $fd "</table>" | |
1011 | } | |
1012 | ||
1013 | # put detailed log | |
1014 | puts $fd "<h1>Details</h1>" | |
1015 | ||
1016 | # process log line-by-line | |
1017 | set group {} | |
1018 | set letter {} | |
1019 | foreach line [lsort -dictionary [split $log "\n"]] { | |
1020 | # check that the line is case report in the form "CASE group grid name: result (explanation)" | |
1021 | if { ! [regexp $_test_case_regexp $line res grp grd casename result message] } { | |
1022 | continue | |
1023 | } | |
1024 | ||
1025 | # start new group | |
1026 | if { $grp != $group } { | |
1027 | if { $letter != "" } { puts $fd "</tr></table>" } | |
1028 | set letter {} | |
1029 | set group $grp | |
1030 | set grid {} | |
1031 | puts $fd "<h2>Group $group</h2>" | |
1032 | } | |
1033 | ||
1034 | # start new grid | |
1035 | if { $grd != $grid } { | |
1036 | if { $letter != "" } { puts $fd "</tr></table>" } | |
1037 | set letter {} | |
1038 | set grid $grd | |
1039 | puts $fd "<h3>Grid $grid</h3>" | |
1040 | } | |
1041 | ||
1042 | # check if test case name is <letter><digit>; | |
1043 | # if not, set alnum to period "." to recognize non-standard test name | |
1044 | if { ! [regexp {([A-Za-z]+)([0-9]+)} $casename res alnum number] } { | |
1045 | set alnum . | |
1046 | } | |
1047 | ||
1048 | # start new row when letter changes or for non-standard names | |
1049 | if { $alnum != $letter || $alnum == "." } { | |
1050 | if { $letter != "" } { | |
1051 | puts $fd "</tr><tr>" | |
1052 | } else { | |
1053 | puts $fd "<table><tr>" | |
1054 | } | |
1055 | set letter $alnum | |
1056 | } | |
1057 | ||
1058 | puts $fd "<td bgcolor=\"[_html_color $result]\"><a href=\"$group/$grid/${casename}.html\">$casename</a></td>" | |
1059 | } | |
1060 | puts $fd "</tr></table>" | |
1061 | ||
1062 | # add remaining lines of log as plain text | |
1063 | puts $fd "<h2>Plain text messages</h2>\n<pre>" | |
1064 | foreach line [split $log "\n"] { | |
1065 | if { ! [regexp $_test_case_regexp $line] } { | |
1066 | puts $fd "$line" | |
1067 | } | |
1068 | } | |
1069 | puts $fd "</pre>" | |
1070 | ||
1071 | # close file and exit | |
1072 | puts $fd "</body>" | |
1073 | close $fd | |
1074 | return | |
1075 | } | |
1076 | ||
1077 | # Procedure to dump summary logs of tests | |
1078 | proc _log_summarize {logdir log {total_time {}}} { | |
1079 | ||
1080 | # sort log records alphabetically to have the same behavior on Linux and Windows | |
1081 | # (also needed if tests are run in parallel) | |
1082 | set loglist [lsort -dictionary [split $log "\n"]] | |
1083 | ||
1084 | # classify test cases by status | |
1085 | foreach line $loglist { | |
1086 | if { [regexp {^CASE ([^:]*): ([[:alnum:]]+).*$} $line res caseid status] } { | |
1087 | lappend stat($status) $caseid | |
1088 | } | |
1089 | } | |
1090 | set totals {} | |
1091 | set improvements {Improvements:} | |
1092 | set regressions {Failed:} | |
1093 | if { [info exists stat] } { | |
1094 | foreach status [lsort [array names stat]] { | |
1095 | lappend totals [list [llength $stat($status)] $status] | |
1096 | ||
1097 | # separately count improvements (status starting with IMP) and regressions (all except IMP, OK, BAD, and SKIP) | |
1098 | if { [regexp -nocase {^IMP} $status] } { | |
1099 | eval lappend improvements $stat($status) | |
1100 | } elseif { $status != "OK" && ! [regexp -nocase {^BAD} $status] && ! [regexp -nocase {^SKIP} $status] } { | |
1101 | eval lappend regressions $stat($status) | |
1102 | } | |
1103 | } | |
1104 | } | |
1105 | ||
1106 | # if time is specified, add totals | |
1107 | if { $total_time != "" } { | |
1108 | if { [llength $improvements] > 1 } { | |
1109 | _log_and_puts log [join $improvements "\n "] | |
1110 | } | |
1111 | if { [llength $regressions] > 1 } { | |
1112 | _log_and_puts log [join $regressions "\n "] | |
1113 | } | |
1114 | if { [llength $improvements] == 1 && [llength $regressions] == 1 } { | |
1115 | _log_and_puts log "No regressions" | |
1116 | } | |
1117 | _log_and_puts log "Total cases: [join $totals {, }]" | |
1118 | _log_and_puts log $total_time | |
1119 | } | |
1120 | ||
1121 | # save log to files | |
1122 | if { $logdir != "" } { | |
1123 | _log_html_summary $logdir $log $totals $regressions $improvements $total_time | |
1124 | _log_save $logdir/tests.log $log "Tests summary" | |
1125 | } | |
1126 | ||
1127 | return | |
1128 | } | |
1129 | ||
1130 | # Internal procedure to generate XML log in JUnit style, for further | |
1131 | # consumption by Jenkins or similar systems. | |
1132 | # | |
1133 | # The output is intended to conform to XML schema supported by Jenkins found at | |
1134 | # https://svn.jenkins-ci.org/trunk/hudson/dtkit/dtkit-format/dtkit-junit-model/src/main/resources/com/thalesgroup/dtkit/junit/model/xsd/junit-4.xsd | |
1135 | # | |
1136 | # The mapping of the fields is inspired by annotated schema of Apache Ant JUnit XML format found at | |
1137 | # http://windyroad.org/dl/Open%20Source/JUnit.xsd | |
1138 | proc _log_xml_summary {logdir filename log include_cout} { | |
1139 | global _test_case_regexp | |
1140 | ||
1141 | catch {file mkdir [file dirname $filename]} | |
1142 | ||
1143 | # try to open a file and start XML | |
1144 | if [catch {set fd [open $filename w]} res] { | |
1145 | error "Error creating XML summary file $filename: $res" | |
1146 | } | |
1147 | puts $fd "<?xml version='1.0' encoding='utf-8'?>" | |
1148 | puts $fd "<testsuites>" | |
1149 | ||
1150 | # prototype for command to generate test suite tag | |
1151 | set time_and_host "timestamp=\"[clock format [clock seconds] -format {%Y-%m-%dT%H:%M:%S}]\" hostname=\"[info hostname]\"" | |
1152 | set cmd_testsuite {puts $fd "<testsuite name=\"$group $grid\" tests=\"$nbtests\" failures=\"$nbfail\" errors=\"$nberr\" time=\"$time\" skipped=\"$nbskip\" $time_and_host>\n$testcases\n</testsuite>\n"} | |
1153 | ||
1154 | # sort log and process it line-by-line | |
1155 | set group {} | |
1156 | foreach line [lsort -dictionary [split $log "\n"]] { | |
1157 | # check that the line is case report in the form "CASE group grid name: result (explanation)" | |
1158 | if { ! [regexp $_test_case_regexp $line res grp grd casename result message] } { | |
1159 | continue | |
1160 | } | |
1161 | set message [string trim $message " \t\r\n()"] | |
1162 | ||
1163 | # start new testsuite for each grid | |
1164 | if { $grp != $group || $grd != $grid } { | |
1165 | ||
1166 | # write previous test suite | |
1167 | if [info exists testcases] { eval $cmd_testsuite } | |
1168 | ||
1169 | set testcases {} | |
1170 | set nbtests 0 | |
1171 | set nberr 0 | |
1172 | set nbfail 0 | |
1173 | set nbskip 0 | |
1174 | set time 0. | |
1175 | ||
1176 | set group $grp | |
1177 | set grid $grd | |
1178 | } | |
1179 | ||
1180 | incr nbtests | |
1181 | ||
1182 | # parse test log and get its CPU time | |
1183 | set testout {} | |
1184 | set add_cpu {} | |
1185 | if { [catch {set fdlog [open $logdir/$group/$grid/${casename}.log r]} ret] } { | |
1186 | puts "Error: cannot open $logdir/$group/$grid/${casename}.log: $ret" | |
1187 | } else { | |
1188 | while { [gets $fdlog logline] >= 0 } { | |
1189 | if { $include_cout } { | |
1190 | set testout "$testout$logline\n" | |
1191 | } | |
1192 | if [regexp -nocase {TOTAL CPU TIME:\s*([\d.]+)\s*sec} $logline res cpu] { | |
1193 | set add_cpu " time=\"$cpu\"" | |
1194 | set time [expr $time + $cpu] | |
1195 | } | |
1196 | } | |
1197 | close $fdlog | |
1198 | } | |
1199 | if { ! $include_cout } { | |
1200 | set testout "$line\n" | |
1201 | } | |
1202 | ||
1203 | # record test case with its output and status | |
1204 | # Mapping is: SKIPPED, BAD, and OK to OK, all other to failure | |
1205 | set testcases "$testcases\n <testcase name=\"$casename\"$add_cpu status=\"$result\">\n" | |
1206 | set testcases "$testcases\n <system-out>\n$testout </system-out>" | |
1207 | if { $result != "OK" } { | |
1208 | if { [regexp -nocase {^SKIP} $result] } { | |
1209 | incr nberr | |
1210 | set testcases "$testcases\n <error name=\"$result\" message=\"$message\"/>" | |
1211 | } elseif { [regexp -nocase {^BAD} $result] } { | |
1212 | incr nbskip | |
1213 | set testcases "$testcases\n <skipped>$message</skipped>" | |
1214 | } else { | |
1215 | incr nbfail | |
1216 | set testcases "$testcases\n <failure name=\"$result\" message=\"$message\"/>" | |
1217 | } | |
1218 | } | |
1219 | set testcases "$testcases\n </testcase>" | |
1220 | } | |
1221 | ||
1222 | # write last test suite | |
1223 | if [info exists testcases] { eval $cmd_testsuite } | |
1224 | ||
1225 | # the end | |
1226 | puts $fd "</testsuites>" | |
1227 | close $fd | |
1228 | return | |
1229 | } | |
1230 | ||
1231 | # define custom platform name | |
1232 | proc _tests_platform_def {} { | |
1233 | global env tcl_platform | |
1234 | ||
1235 | if [info exists env(os_type)] { return } | |
1236 | ||
1237 | set env(os_type) $tcl_platform(platform) | |
1238 | ||
1239 | # use detailed mapping for various versions of Lunix | |
1240 | # (note that mapping is rather non-uniform, for historical reasons) | |
1241 | if { $env(os_type) == "unix" && ! [catch {exec cat /etc/issue} issue] } { | |
1242 | if { [regexp {Mandriva[ \tA-Za-z]+([0-9]+)} $issue res num] } { | |
1243 | set env(os_type) Mandriva$num | |
1244 | } elseif { [regexp {Red Hat[ \tA-Za-z]+([0-9]+)} $issue res num] } { | |
1245 | set env(os_type) RedHat$num | |
1246 | } elseif { [regexp {Debian[ \tA-Za-z/]+([0-9]+)[.]([0-9]+)} $issue res num subnum] } { | |
1247 | set env(os_type) Debian$num$subnum | |
1248 | } elseif { [regexp {CentOS[ \tA-Za-z]+([0-9]+)[.]([0-9]+)} $issue res num subnum] } { | |
1249 | set env(os_type) CentOS$num$subnum | |
1250 | } elseif { [regexp {Scientific[ \tA-Za-z]+([0-9]+)[.]([0-9]+)} $issue res num subnum] } { | |
1251 | set env(os_type) SL$num$subnum | |
1252 | } elseif { [regexp {Fedora Core[ \tA-Za-z]+([0-9]+)} $issue res num] } { | |
1253 | set env(os_type) FedoraCore$num | |
1254 | } | |
1255 | if { [exec uname -m] == "x86_64" } { | |
1256 | set env(os_type) "$env(os_type)-64" | |
1257 | } | |
1258 | } | |
1259 | } | |
1260 | _tests_platform_def | |
1261 | ||
1262 | # Auxiliary procedure to split path specification (usually defined by | |
1263 | # environment variable) into list of directories or files | |
1264 | proc _split_path {pathspec} { | |
1265 | global tcl_platform | |
1266 | ||
1267 | # first replace all \ (which might occur on Windows) by / | |
1268 | regsub -all "\\\\" $pathspec "/" pathspec | |
1269 | ||
1270 | # split path by platform-specific separator | |
1271 | return [split $pathspec [_path_separator]] | |
1272 | } | |
1273 | ||
1274 | # Auxiliary procedure to define platform-specific separator for directories in | |
1275 | # path specification | |
1276 | proc _path_separator {} { | |
1277 | global tcl_platform | |
1278 | ||
1279 | # split path by platform-specific separator | |
1280 | if { $tcl_platform(platform) == "windows" } { | |
1281 | return ";" | |
1282 | } else { | |
1283 | return ":" | |
1284 | } | |
1285 | } | |
1286 | ||
1287 | # Procedure to locate data file for test given its name. | |
1288 | # The search is performed assuming that the function is called | |
1289 | # from the test case script; the search order is: | |
34dd4990 | 1290 | # - subdirectories in environment variable CSF_TestDataPath |
40093367 | 1291 | # If file is not found, raises Tcl error. |
1292 | proc locate_data_file {filename} { | |
8418c617 | 1293 | global env groupname gridname casename |
40093367 | 1294 | |
b725d7c5 | 1295 | # check if the file is located in the subdirectory data of the script dir |
40093367 | 1296 | set scriptfile [info script] |
b725d7c5 | 1297 | if { $scriptfile != "" } { |
1298 | set path [file join [file dirname $scriptfile] data $filename] | |
1299 | if { [file exists $path] } { | |
1300 | return [file normalize $path] | |
1301 | } | |
40093367 | 1302 | } |
1303 | ||
b725d7c5 | 1304 | # check sub-directories in paths indicated by CSF_TestDataPath |
1305 | if { [info exists env(CSF_TestDataPath)] } { | |
40093367 | 1306 | foreach dir [_split_path $env(CSF_TestDataPath)] { |
b725d7c5 | 1307 | while {[llength $dir] != 0} { |
1308 | set name [lindex $dir 0] | |
1309 | set dir [lrange $dir 1 end] | |
1310 | eval lappend dir [glob -nocomplain -directory $name -type d *] | |
1311 | if { [file exists $name/$filename] } { | |
1312 | return [file normalize $name/$filename] | |
34dd4990 | 1313 | } |
b725d7c5 | 1314 | } |
40093367 | 1315 | } |
b725d7c5 | 1316 | } |
1317 | ||
1318 | # check current datadir | |
8418c617 | 1319 | if { [file exists [uplevel datadir]/$filename] } { |
b725d7c5 | 1320 | return [file normalize [uplevel datadir]/$filename] |
8418c617 | 1321 | } |
1322 | ||
40093367 | 1323 | # raise error |
b725d7c5 | 1324 | error [join [list "Error: file $filename could not be found" \ |
1325 | "(should be in paths indicated by CSF_TestDataPath environment variable, " \ | |
1326 | "or in subfolder data in the script directory)"] "\n"] | |
8418c617 | 1327 | } |
cc6a292d | 1328 | |
1329 | # Procedure to make a diff and common of two lists | |
1330 | proc _list_diff {list1 list2 _in1 _in2 _common} { | |
1331 | upvar $_in1 in1 | |
1332 | upvar $_in2 in2 | |
1333 | upvar $_common common | |
1334 | ||
1335 | set in1 {} | |
1336 | set in2 {} | |
1337 | set common {} | |
1338 | foreach item $list1 { | |
1339 | if { [lsearch -exact $list2 $item] >= 0 } { | |
1340 | lappend common $item | |
1341 | } else { | |
1342 | lappend in1 $item | |
1343 | } | |
1344 | } | |
1345 | foreach item $list2 { | |
1346 | if { [lsearch -exact $common $item] < 0 } { | |
1347 | lappend in2 $item | |
1348 | } | |
1349 | } | |
1350 | return | |
1351 | } | |
1352 | ||
1353 | # procedure to load a file to Tcl string | |
1354 | proc _read_file {filename} { | |
1355 | set fd [open $filename r] | |
1356 | set result [read -nonewline $fd] | |
1357 | close $fd | |
1358 | return $result | |
1359 | } | |
1360 | ||
1361 | # Procedure to compare results of two runs of test cases | |
1362 | proc _test_diff {dir1 dir2 basename status verbose _logvar {_statvar ""}} { | |
1363 | upvar $_logvar log | |
1364 | ||
1365 | # prepare variable (array) for collecting statistics | |
1366 | if { "$_statvar" != "" } { | |
1367 | upvar $_statvar stat | |
1368 | } else { | |
1369 | set stat(cpu1) 0 | |
1370 | set stat(cpu2) 0 | |
1371 | set log {} | |
1372 | } | |
1373 | ||
1374 | # first check subdirectories | |
1375 | set path1 [file join $dir1 $basename] | |
1376 | set path2 [file join $dir2 $basename] | |
1377 | set list1 [glob -directory $path1 -types d -tails -nocomplain *] | |
1378 | set list2 [glob -directory $path2 -types d -tails -nocomplain *] | |
1379 | if { [llength $list1] >0 || [llength $list2] > 0 } { | |
1380 | _list_diff $list1 $list2 in1 in2 common | |
1381 | if { "$verbose" > 1 } { | |
1382 | if { [llength $in1] > 0 } { _log_and_puts log "Only in $path1: $in1" } | |
1383 | if { [llength $in2] > 0 } { _log_and_puts log "Only in $path2: $in2" } | |
1384 | } | |
1385 | foreach subdir $common { | |
1386 | if { "$verbose" > 2 } { | |
1387 | _log_and_puts log "Checking [file join $basename $subdir]" | |
1388 | } | |
1389 | _test_diff $dir1 $dir2 [file join $basename $subdir] $status $verbose log stat | |
1390 | } | |
1391 | } else { | |
1392 | # check log files (only if directory has no subdirs) | |
1393 | set list1 [glob -directory $path1 -types f -tails -nocomplain *.log] | |
1394 | set list2 [glob -directory $path2 -types f -tails -nocomplain *.log] | |
1395 | _list_diff $list1 $list2 in1 in2 common | |
1396 | if { "$verbose" > 1 } { | |
1397 | if { [llength $in1] > 0 } { _log_and_puts log "Only in $path1: $in1" } | |
1398 | if { [llength $in2] > 0 } { _log_and_puts log "Only in $path2: $in2" } | |
1399 | } | |
1400 | foreach logfile $common { | |
1401 | # load two logs | |
1402 | set log1 [_read_file [file join $dir1 $basename $logfile]] | |
1403 | set log2 [_read_file [file join $dir2 $basename $logfile]] | |
1404 | ||
1405 | # check execution statuses | |
1406 | set status1 UNDEFINED | |
1407 | set status2 UNDEFINED | |
1408 | if { ! [regexp {CASE [^:]*:\s*([\w]+)} $log1 res1 status1] || | |
1409 | ! [regexp {CASE [^:]*:\s*([\w]+)} $log2 res2 status2] || | |
1410 | "$status1" != "$status2" } { | |
1411 | _log_and_puts log "STATUS [split $basename /] [file rootname $logfile]: $status1 / $status2" | |
1412 | ||
1413 | # if test statuses are different, further comparison makes | |
1414 | # no sense unless explicitly requested | |
1415 | if { "$status" != "all" } { | |
1416 | continue | |
1417 | } | |
1418 | } | |
1419 | if { "$status" == "ok" && "$status1" != "OK" } { | |
1420 | continue | |
1421 | } | |
1422 | ||
1423 | # check CPU times | |
1424 | set cpu1 UNDEFINED | |
1425 | set cpu2 UNDEFINED | |
1426 | if { [regexp {TOTAL CPU TIME:\s*([\d.]+)} $log1 res1 cpu1] && | |
1427 | [regexp {TOTAL CPU TIME:\s*([\d.]+)} $log2 res1 cpu2] } { | |
1428 | set stat(cpu1) [expr $stat(cpu1) + $cpu1] | |
1429 | set stat(cpu2) [expr $stat(cpu2) + $cpu2] | |
1430 | ||
1431 | # compare CPU times with 10% precision (but not less 0.5 sec) | |
1432 | if { [expr abs ($cpu1 - $cpu2) > 0.5 + 0.05 * abs ($cpu1 + $cpu2)] } { | |
1433 | _log_and_puts log "CPU [split $basename /] [file rootname $logfile]: $cpu1 / $cpu2" | |
1434 | } | |
1435 | } | |
1436 | } | |
1437 | } | |
1438 | ||
1439 | if { "$_statvar" == "" } { | |
1440 | _log_and_puts log "Total CPU difference: $stat(cpu1) / $stat(cpu2)" | |
1441 | } | |
1442 | } | |
b725d7c5 | 1443 | |
1444 | # get number of CPUs on the system | |
1445 | proc _get_nb_cpus {} { | |
1446 | global tcl_platform env | |
1447 | ||
1448 | if { "$tcl_platform(platform)" == "windows" } { | |
1449 | # on Windows, take the value of the environment variable | |
1450 | if { [info exists env(NUMBER_OF_PROCESSORS)] && | |
1451 | ! [catch {expr $env(NUMBER_OF_PROCESSORS) > 0} res] && $res >= 0 } { | |
1452 | return $env(NUMBER_OF_PROCESSORS) | |
1453 | } | |
1454 | } elseif { "$tcl_platform(os)" == "Linux" } { | |
1455 | # on Linux, take number of logical processors listed in /proc/cpuinfo | |
1456 | if { [catch {open "/proc/cpuinfo" r} fd] } { | |
1457 | return 0 ;# should never happen, but... | |
1458 | } | |
1459 | set nb 0 | |
1460 | while { [gets $fd line] >= 0 } { | |
1461 | if { [regexp {^processor[ \t]*:} $line] } { | |
1462 | incr nb | |
1463 | } | |
1464 | } | |
1465 | close $fd | |
1466 | return $nb | |
1467 | } elseif { "$tcl_platform(os)" == "Darwin" } { | |
1468 | # on MacOS X, call sysctl command | |
1469 | if { ! [catch {exec sysctl hw.ncpu} ret] && | |
1470 | [regexp {^hw[.]ncpu[ \t]*:[ \t]*([0-9]+)} $ret res nb] } { | |
1471 | return $nb | |
1472 | } | |
1473 | } | |
1474 | ||
1475 | # if cannot get good value, return 0 as default | |
1476 | return 0 | |
1477 | } |