40093367 |
1 | # Copyright (c) 2012 OPEN CASCADE SAS |
2 | # |
3 | # The content of this file is subject to the Open CASCADE Technology Public |
4 | # License Version 6.5 (the "License"). You may not use the content of this file |
5 | # except in compliance with the License. Please obtain a copy of the License |
6 | # at http://www.opencascade.org and read it completely before using this file. |
7 | # |
8 | # The Initial Developer of the Original Code is Open CASCADE S.A.S., having its |
9 | # main offices at: 1, place des Freres Montgolfier, 78280 Guyancourt, France. |
10 | # |
11 | # The Original Code and all software distributed under the License is |
12 | # distributed on an "AS IS" basis, without warranty of any kind, and the |
13 | # Initial Developer hereby disclaims all such warranties, including without |
14 | # limitation, any warranties of merchantability, fitness for a particular |
15 | # purpose or non-infringement. Please see the License for the specific terms |
16 | # and conditions governing the rights and limitations under the License. |
17 | |
18 | ############################################################################ |
19 | # This file defines scripts for execution of OCCT tests. |
20 | # It should be loaded automatically when DRAW is started, and provides |
21 | # two top-level commands: 'test' and 'testgrid'. |
22 | # See OCCT Tests User Guide for description of the test system. |
23 | # |
24 | # Note: procedures with names starting with underscore are for internal use |
25 | # inside the test system. |
26 | ############################################################################ |
27 | |
28 | # Default verbose level for command _run_test |
29 | set _tests_verbose 0 |
30 | |
31 | # regexp for parsing test case results in summary log |
32 | set _test_case_regexp {^CASE\s+([\w.-]+)\s+([\w.-]+)\s+([\w.-]+)\s*:\s*([\w]+)(.*)} |
33 | |
34 | # Basic command to run indicated test case in DRAW |
35 | help test {Run specified test case |
36 | Use: test group grid casename [verbose_level] |
37 | Verbose level is 0 by default; can be set to 1 or 2} |
38 | proc test {group grid casename {verbose {}}} { |
39 | global _tests_verbose |
40 | if { $verbose != "" } { |
41 | set _tests_verbose $verbose |
42 | } |
43 | |
44 | # get test case paths (will raise error if input is invalid) |
45 | _get_test $group $grid $casename dir gridname casefile |
46 | |
47 | # run test |
48 | uplevel _run_test $dir $group $gridname $casefile |
49 | |
50 | # check log |
51 | _check_log $dir $group $gridname $casename [dlog get] |
52 | |
53 | return |
54 | } |
55 | |
56 | # Basic command to run indicated test case in DRAW |
57 | help testgrid {Run all tests, or specified group, or one grid |
58 | Use: testgrid logdir [group [grid]] [options...] |
59 | Log directory should be empty (or non-existing) |
60 | Allowed options are: |
61 | -parallel N: run in parallel mode with up to N processes (default 0) |
62 | -refresh N: save summary logs every N seconds (default 60, minimal 1, 0 to disable) |
63 | -overwrite: force writing logs in existing non-empty directory |
64 | -xml filename: write XML report for Jenkins (in JUnit-like format) |
65 | } |
66 | proc testgrid {logdir args} { |
67 | global env tcl_platform _tests_verbose |
68 | |
69 | ###################################################### |
70 | # check arguments |
71 | ###################################################### |
72 | |
73 | # check that environment variable defining paths to test scripts is defined |
74 | if { ! [info exists env(CSF_TestScriptsPath)] || |
75 | [llength $env(CSF_TestScriptsPath)] <= 0 } { |
76 | error "Error: Environment variable CSF_TestScriptsPath is not defined" |
77 | } |
78 | |
79 | # treat options |
80 | set parallel 0 |
81 | set refresh 60 |
82 | set overwrite 0 |
83 | set xmlfile "" |
84 | for {set narg 0} {$narg < [llength $args]} {incr narg} { |
85 | set arg [lindex $args $narg] |
86 | |
87 | # parallel execution |
88 | if { $arg == "-parallel" } { |
89 | incr narg |
90 | if { $narg < [llength $args] } { |
91 | set parallel [expr [lindex $args $narg]] |
92 | } else { |
93 | set paralell 2 |
94 | } |
95 | continue |
96 | } |
97 | |
98 | # refresh logs time |
99 | if { $arg == "-refresh" } { |
100 | incr narg |
101 | if { $narg < [llength $args] } { |
102 | set refresh [expr [lindex $args $narg]] |
103 | } else { |
104 | set refresh 10 |
105 | } |
106 | continue |
107 | } |
108 | |
109 | # allow overwrite logs |
110 | if { $arg == "-overwrite" } { |
111 | set overwrite 1 |
112 | continue |
113 | } |
114 | |
115 | # refresh logs time |
116 | if { $arg == "-xml" } { |
117 | incr narg |
118 | if { $narg < [llength $args] } { |
119 | set xmlfile [lindex $args $narg] |
120 | } |
121 | if { $xmlfile == "" } { |
122 | set xmlfile TESTS-summary.xml |
123 | } |
124 | continue |
125 | } |
126 | |
127 | # unsupported option |
128 | if { [regexp {^-} $arg] } { |
129 | error "Error: unsupported option \"$arg\"" |
130 | } |
131 | |
132 | # treat arguments not recognized as options as group and grid names |
133 | if { ! [info exists groupname] } { |
134 | set groupname $arg |
135 | } elseif { ! [info exists gridname] } { |
136 | set gridname $arg |
137 | } else { |
138 | error "Error: cannot interpret argument $narg ($arg): both group and grid names are already defined by previous args!" |
139 | } |
140 | } |
141 | |
142 | # check that target log directory is empty or does not exist |
143 | set logdir [file normalize [string trim $logdir]] |
144 | if { $logdir == "" } { |
145 | # if specified logdir is empty string, generate unique name like "results_2010-12-31T23:59:59" |
146 | set logdir "results_[clock format [clock seconds] -format {%Y-%m-%dT%H%M}]" |
147 | set logdir [file normalize $logdir] |
148 | } |
149 | if { [file isdirectory $logdir] && ! $overwrite && ! [catch {glob -directory $logdir *}] } { |
150 | error "Error: Specified log directory \"$logdir\" is not empty; please clean it before running tests" |
151 | } |
152 | if { [catch {file mkdir $logdir}] || ! [file writable $logdir] } { |
153 | error "Error: Cannot create directory \"$logdir\", or it is not writable" |
154 | } |
155 | |
156 | ###################################################### |
157 | # prepare list of tests to be performed |
158 | ###################################################### |
159 | |
160 | # list of tests, each defined by a list of: |
161 | # test scripts directory |
162 | # group (subfolder) name |
163 | # grid (subfolder) name |
164 | # test case name |
165 | # path to test case file |
166 | set tests_list {} |
167 | |
168 | # iterate by all script paths |
169 | foreach dir [_split_path $env(CSF_TestScriptsPath)] { |
170 | # protection against empty paths |
171 | set dir [string trim $dir] |
172 | if { $dir == "" } { continue } |
173 | |
174 | if { $_tests_verbose > 0 } { _log_and_puts log "Examining tests directory $dir" } |
175 | |
176 | # check that directory exists |
177 | if { ! [file isdirectory $dir] } { |
178 | _log_and_puts log "Warning: directory $dir listed in CSF_TestScriptsPath does not exist, skipped" |
179 | continue |
180 | } |
181 | |
182 | # if test group is specified, check that directory with given name exists in this dir |
183 | # if not, continue to the next test dir |
184 | if { [info exists groupname] && $groupname != "" } { |
185 | if { [file isdirectory $dir/$groupname] } { |
186 | set groups $groupname |
187 | } else { |
188 | continue |
189 | } |
190 | } else { |
191 | # else search all directories in the current dir |
192 | if [catch {glob -directory $dir -tail -types d *} groups] { continue } |
193 | } |
194 | |
195 | # iterate by groups |
196 | if { $_tests_verbose > 0 } { _log_and_puts log "Groups to be executed: $groups" } |
197 | foreach group [lsort -dictionary $groups] { |
198 | if { $_tests_verbose > 0 } { _log_and_puts log "Examining group directory $group" } |
199 | |
200 | # file grids.list must exist: it defines sequence of grids in the group |
201 | if { ! [file exists $dir/$group/grids.list] } { |
202 | _log_and_puts log "Warning: directory $dir/$group does not contain file grids.list, skipped" |
203 | continue |
204 | } |
205 | |
206 | # read grids.list file and make a list of grids to be executed |
207 | set gridlist {} |
208 | set fd [open $dir/$group/grids.list] |
209 | set nline 0 |
210 | while { [gets $fd line] >= 0 } { |
211 | incr nline |
212 | |
213 | # skip comments and empty lines |
214 | if { [regexp "\[ \t\]*\#.*" $line] } { continue } |
215 | if { [string trim $line] == "" } { continue } |
216 | |
217 | # get grid id and name |
218 | if { ! [regexp "^\(\[0-9\]+\)\[ \t\]*\(\[A-Za-z0-9_.-\]+\)\$" $line res gridid grid] } { |
219 | _log_and_puts log "Warning: cannot recognize line $nline in file $dir/$group/grids.list as \"gridid gridname\"; ignored" |
220 | continue |
221 | } |
222 | |
223 | # if specific grid is requested, check that it is present; otherwise make complete list |
224 | if { ! [info exists gridname] || $gridname == "" || $gridname == $gridid || $gridname == $grid } { |
225 | lappend gridlist $grid |
226 | } |
227 | } |
228 | close $fd |
229 | |
230 | # iterate by all grids |
231 | foreach grid $gridlist { |
232 | |
233 | # check if this grid is aliased to another one |
234 | set griddir $dir/$group/$grid |
235 | if { [file exists $griddir/cases.list] } { |
236 | set fd [open $griddir/cases.list] |
237 | if { [gets $fd line] >= 0 } { |
238 | set griddir [file normalize $dir/$group/$grid/[string trim $line]] |
239 | } |
240 | close $fd |
241 | } |
242 | |
243 | # check if grid directory actually exists |
244 | if { ! [file isdirectory $griddir] } { |
245 | _log_and_puts log "Error: tests directory for grid $grid ($griddir) is missing; skipped" |
246 | continue |
247 | } |
248 | |
249 | # create directory for logging test results |
250 | if { $logdir != "" } { file mkdir $logdir/$group/$grid } |
251 | |
252 | # iterate by all tests in the grid directory |
253 | if { [catch {glob -directory $griddir -type f *} testfiles] } { continue } |
254 | foreach casefile [lsort -dictionary $testfiles] { |
255 | # filter out begin and end files |
256 | set casename [file tail $casefile] |
257 | if { $casename == "begin" || $casename == "end" } { continue } |
258 | |
259 | lappend tests_list [list $dir $group $grid $casename $casefile] |
260 | } |
261 | } |
262 | } |
263 | } |
264 | if { [llength $tests_list] < 1 } { |
265 | error "Error: no tests are found, check you input arguments and variable CSF_TestScriptsPath!" |
266 | } |
267 | |
268 | ###################################################### |
269 | # run tests |
270 | ###################################################### |
271 | |
272 | # log command arguments and environment |
273 | set log "Command: testgrid $args\nHost: [info hostname]\nStarted on: [clock format [clock seconds] -format {%Y-%m-%d %H:%M:%S}]\n" |
274 | set log "$log\nEnvironment:\n" |
275 | foreach envar [array names env] { |
276 | set log "$log$envar=\"$env($envar)\"\n" |
277 | } |
278 | set log "$log\n" |
279 | |
280 | set refresh_timer [clock seconds] |
281 | uplevel dchrono _timer reset |
282 | uplevel dchrono _timer start |
283 | |
284 | # if parallel execution is requested, allocate thread pool |
285 | if { $parallel > 0 } { |
286 | if { ! [info exists tcl_platform(threaded)] || [catch {package require Thread}] } { |
287 | _log_and_puts log "Warning: Tcl package Thread is not available, running in sequential mode" |
288 | set parallel 0 |
289 | } else { |
290 | set worker [tpool::create -minworkers $parallel -maxworkers $parallel] |
291 | # suspend the pool until all jobs are posted, to prevent blocking of the process |
292 | # of starting / processing jobs by running threads |
293 | tpool::suspend $worker |
294 | if { $_tests_verbose > 0 } { _log_and_puts log "Executing tests in (up to) $parallel threads" } |
295 | } |
296 | } |
297 | |
298 | # start test cases |
299 | foreach test_def $tests_list { |
300 | set dir [lindex $test_def 0] |
301 | set group [lindex $test_def 1] |
302 | set grid [lindex $test_def 2] |
303 | set casename [lindex $test_def 3] |
304 | set casefile [lindex $test_def 4] |
305 | |
306 | # command to set tests for generation of image in results directory |
307 | set imgdir_cmd "" |
308 | if { $logdir != "" } { set imgdir_cmd "set imagedir $logdir/$group/$grid" } |
309 | |
310 | # prepare command file for running test case in separate instance of DRAW |
311 | set fd_cmd [open $logdir/$group/$grid/${casename}.tcl w] |
312 | puts $fd_cmd "$imgdir_cmd" |
313 | puts $fd_cmd "set test_image $casename" |
314 | puts $fd_cmd "_run_test $dir $group $grid $casefile" |
315 | # use dlog command to obtain complete output of the test when it is absent (i.e. since OCCT 6.6.0) |
316 | if { ! [catch {dlog get}] } { |
317 | puts $fd_cmd "puts \[dlog get\]" |
318 | } else { |
319 | # else try to use old-style QA_ variables to get more output... |
320 | set env(QA_DUMP) 1 |
321 | set env(QA_DUP) 1 |
322 | set env(QA_print_command) 1 |
323 | } |
324 | # final 'exit' is needed when running on Linux under VirtualGl |
325 | puts $fd_cmd "exit" |
326 | close $fd_cmd |
8418c617 |
327 | |
328 | # commant to run DRAW with a command file; |
329 | # note that empty string is passed as standard input to avoid possible |
330 | # hang-ups due to waiting for stdin of the launching process |
331 | set command "exec <<{} DRAWEXE -f $logdir/$group/$grid/${casename}.tcl" |
332 | |
40093367 |
333 | # alternative method to run without temporary file; disabled as it needs too many backslashes |
334 | # else { |
8418c617 |
335 | # set command "exec <<\"\" DRAWEXE -c $imgdir_cmd\\\; set test_image $casename\\\; \ |
40093367 |
336 | # _run_test $dir $group $grid $casefile\\\; \ |
337 | # puts \\\[dlog get\\\]\\\; exit" |
338 | # } |
339 | |
340 | # run test case, either in parallel or sequentially |
341 | if { $parallel > 0 } { |
342 | # parallel execution |
343 | set job [tpool::post -nowait $worker "catch \"$command\" output; return \$output"] |
344 | set job_def($job) [list $logdir $dir $group $grid $casename] |
345 | } else { |
346 | # sequential execution |
347 | catch {eval $command} output |
348 | _log_test_case $output $logdir $dir $group $grid $casename log |
349 | |
350 | # update summary log with requested period |
351 | if { $logdir != "" && $refresh > 0 && [expr [clock seconds] - $refresh_timer > $refresh] } { |
352 | # update and dump summary |
353 | _log_summarize $logdir $log |
354 | set refresh_timer [clock seconds] |
355 | } |
356 | } |
357 | } |
358 | |
359 | # get results of started threads |
360 | if { $parallel > 0 } { |
361 | tpool::resume $worker |
362 | while { [llength [array names job_def]] > 0 } { |
363 | foreach job [tpool::wait $worker [array names job_def]] { |
364 | eval _log_test_case \[tpool::get $worker $job\] $job_def($job) log |
365 | unset job_def($job) |
366 | } |
367 | |
368 | # update summary log with requested period |
369 | if { $logdir != "" && $refresh > 0 && [clock seconds] > $refresh_timer + $refresh } { |
370 | _log_summarize $logdir $log |
371 | set refresh_timer [clock seconds] |
372 | } |
373 | } |
374 | # release thread pool |
375 | tpool::release $worker |
376 | } |
377 | |
378 | uplevel dchrono _timer stop |
379 | set time [lindex [split [uplevel dchrono _timer show] "\n"] 0] |
380 | |
381 | ###################################################### |
382 | # output summary logs and exit |
383 | ###################################################### |
384 | |
385 | _log_summarize $logdir $log $time |
386 | if { $logdir != "" } { |
387 | puts "Detailed logs are saved in $logdir" |
388 | } |
389 | if { $logdir != "" && $xmlfile != "" } { |
390 | # XML output file is assumed relative to log dir unless it is absolute |
391 | if { [ file pathtype $xmlfile] == "relative" } { |
392 | set xmlfile [file normalize $logdir/$xmlfile] |
393 | } |
394 | _log_xml_summary $logdir $xmlfile $log 0 |
395 | puts "XML summary is saved to $xmlfile" |
396 | } |
397 | |
398 | return |
399 | } |
400 | |
401 | # Internal procedure to find test case indicated by group, grid, and test case names; |
402 | # returns: |
403 | # - dir: path to the base directory of the tests group |
404 | # - gridname: actual name of the grid |
405 | # - casefile: path to the test case script |
406 | # if no such test is found, raises error with appropriate message |
407 | proc _get_test {group grid casename _dir _gridname _casefile} { |
408 | upvar $_dir dir |
409 | upvar $_gridname gridname |
410 | upvar $_casefile casefile |
411 | |
412 | global env |
413 | |
414 | # check that environment variable defining paths to test scripts is defined |
415 | if { ! [info exists env(CSF_TestScriptsPath)] || |
416 | [llength $env(CSF_TestScriptsPath)] <= 0 } { |
417 | error "Error: Environment variable CSF_TestScriptsPath is not defined" |
418 | } |
419 | |
420 | # iterate by all script paths |
421 | foreach dir [_split_path $env(CSF_TestScriptsPath)] { |
422 | # protection against empty paths |
423 | set dir [string trim $dir] |
424 | if { $dir == "" } { continue } |
425 | |
426 | # check that directory exists |
427 | if { ! [file isdirectory $dir] } { |
428 | puts "Warning: directory $dir listed in CSF_TestScriptsPath does not exist, skipped" |
429 | continue |
430 | } |
431 | |
432 | # check if test group with given name exists in this dir |
433 | # if not, continue to the next test dir |
434 | if { ! [file isdirectory $dir/$group] } { continue } |
435 | |
436 | # check that grid with given name (possibly alias) exists; stop otherwise |
437 | set gridname $grid |
438 | if { ! [file isdirectory $dir/$group/$gridname] } { |
439 | # check if grid is named by alias rather than by actual name |
440 | if { [file exists $dir/$group/grids.list] } { |
441 | set fd [open $dir/$group/grids.list] |
442 | while { [gets $fd line] >= 0 } { |
443 | if { [regexp "\[ \t\]*\#.*" $line] } { continue } |
444 | if { [regexp "^$grid\[ \t\]*\(\[A-Za-z0-9_.-\]+\)\$" $line res gridname] } { |
445 | break |
446 | } |
447 | } |
448 | close $fd |
449 | } |
450 | } |
451 | if { ! [file isdirectory $dir/$group/$gridname] } { continue } |
452 | |
453 | # get actual file name of the script; stop if it cannot be found |
454 | set casefile $dir/$group/$gridname/$casename |
455 | if { ! [file exists $casefile] } { |
456 | # check if this grid is aliased to another one |
457 | if { [file exists $dir/$group/$gridname/cases.list] } { |
458 | set fd [open $dir/$group/$gridname/cases.list] |
459 | if { [gets $fd line] >= 0 } { |
460 | set casefile [file normalize $dir/$group/$gridname/[string trim $line]/$casename] |
461 | } |
462 | close $fd |
463 | } |
464 | } |
465 | if { [file exists $casefile] } { |
466 | # normal return |
467 | return |
468 | } |
469 | } |
470 | |
471 | # coming here means specified test is not found; report error |
472 | error [join [list "Error: test case $group / $grid / $casename is not found in paths listed in variable" \ |
473 | "CSF_TestScriptsPath (current value is \"$env(CSF_TestScriptsPath)\")"] "\n"] |
474 | } |
475 | |
476 | # Internal procedure to run test case indicated by base directory, |
477 | # grid and grid names, and test case file path. |
478 | # The log can be obtained by command "dlog get". |
479 | proc _run_test {scriptsdir group gridname casefile} { |
480 | global env |
481 | |
482 | # start timer |
483 | uplevel dchrono _timer reset |
484 | uplevel dchrono _timer start |
485 | |
486 | # enable commands logging; switch to old-style mode if dlog command is not present |
487 | set dlog_exists 1 |
488 | if { [catch {dlog reset}] } { |
489 | set dlog_exists 0 |
490 | } else { |
491 | dlog reset |
492 | dlog on |
493 | rename puts puts-saved |
494 | proc puts args { |
495 | global _tests_verbose |
496 | |
497 | # log only output to stdout and stderr, not to file! |
498 | if {[llength $args] > 1} { |
499 | set optvarg [lindex $args end-1] |
500 | if { $optarg == stdout || $optarg == stderr || $optarg == -newline } { |
501 | dlog add [lindex $args end] |
502 | } |
503 | } else { |
504 | dlog add [lindex $args end] |
505 | } |
506 | |
507 | # reproduce original puts |
508 | if { $_tests_verbose } { |
509 | eval puts-saved $args |
510 | } |
511 | } |
512 | } |
513 | |
40093367 |
514 | # evaluate test case |
515 | if [catch { |
516 | uplevel set casename [file tail $casefile] |
8418c617 |
517 | uplevel set groupname $group |
518 | uplevel set gridname $gridname |
40093367 |
519 | |
520 | if { [file exists $scriptsdir/$group/begin] } { |
521 | puts "Executing $scriptsdir/$group/begin..."; flush stdout |
522 | uplevel source $scriptsdir/$group/begin |
523 | } |
524 | if { [file exists $scriptsdir/$group/$gridname/begin] } { |
525 | puts "Executing $scriptsdir/$group/$gridname/begin..."; flush stdout |
526 | uplevel source $scriptsdir/$group/$gridname/begin |
527 | } |
528 | |
529 | puts "Executing $casefile..."; flush stdout |
530 | uplevel source $casefile |
531 | |
532 | if { [file exists $scriptsdir/$group/$gridname/end] } { |
533 | puts "Executing $scriptsdir/$group/$gridname/end..."; flush stdout |
534 | uplevel source $scriptsdir/$group/$gridname/end |
535 | } |
536 | if { [file exists $scriptsdir/$group/end] } { |
537 | puts "Executing $scriptsdir/$group/end..."; flush stdout |
538 | uplevel source $scriptsdir/$group/end |
539 | } |
540 | } res] { |
541 | puts "Tcl Exception: $res" |
542 | } |
543 | |
40093367 |
544 | # stop logging |
545 | if { $dlog_exists } { |
546 | rename puts {} |
547 | rename puts-saved puts |
548 | dlog off |
549 | } |
550 | |
8418c617 |
551 | # stop cpulimit killer if armed by the test |
552 | cpulimit |
553 | |
40093367 |
554 | # add timing info |
555 | uplevel dchrono _timer stop |
556 | set time [uplevel dchrono _timer show] |
557 | if [regexp -nocase {CPU user time:[ \t]*([0-9.e-]+)} $time res cpu] { |
558 | if { $dlog_exists } { |
559 | dlog add "TOTAL CPU TIME: $cpu sec" |
560 | } else { |
561 | puts "TOTAL CPU TIME: $cpu sec" |
562 | } |
563 | } |
564 | } |
565 | |
566 | # Internal procedure to check log of test execution and decide if it passed or failed |
567 | proc _check_log {dir group gridname casename log {_summary {}} {_html_log {}}} { |
568 | global env |
569 | if { $_summary != "" } { upvar $_summary summary } |
570 | if { $_html_log != "" } { upvar $_html_log html_log } |
571 | set summary "" |
572 | set html_log "" |
573 | |
574 | if [catch { |
575 | |
576 | # load definition of 'bad words' indicating test failure |
577 | # note that rules are loaded in the order of decreasing priority (grid - group - common), |
578 | # thus grid rules will override group ones |
579 | set badwords {} |
580 | foreach rulesfile [list $dir/$group/$gridname/parse.rules $dir/$group/parse.rules $dir/parse.rules] { |
581 | if [catch {set fd [open $rulesfile r]}] { continue } |
582 | while { [gets $fd line] >= 0 } { |
583 | # skip comments and empty lines |
584 | if { [regexp "\[ \t\]*\#.*" $line] } { continue } |
585 | if { [string trim $line] == "" } { continue } |
586 | # extract regexp |
587 | if { ! [regexp {^([^/]*)/([^/]*)/(.*)$} $line res status rexp comment] } { |
588 | puts "Warning: cannot recognize parsing rule \"$line\" in file $rulesfile" |
589 | continue |
590 | } |
591 | set status [string trim $status] |
592 | if { $comment != "" } { set status "$status ([string trim $comment])" } |
593 | set rexp [regsub -all {\\b} $rexp {\\y}] ;# convert regexp from Perl to Tcl style |
594 | lappend badwords [list $status $rexp] |
595 | } |
596 | close $fd |
597 | } |
598 | if { [llength $badwords] <= 0 } { |
599 | puts "Warning: no definition of error indicators found (check files parse.rules)" |
600 | } |
601 | |
602 | # analyse log line-by-line |
603 | set todos {} |
604 | set status "" |
605 | foreach line [split $log "\n"] { |
606 | # check if line defines specific treatment of some messages |
607 | if [regexp -nocase {^[ \t]*TODO ([^:]*):(.*)$} $line res platforms pattern] { |
608 | if { ! [regexp -nocase {\mAll\M} $platforms] && |
609 | ! [regexp -nocase "\\m$env(os_type)\\M" $platforms] } { |
610 | set html_log "$html_log\n$line" |
611 | continue ;# TODO statement is for another platform |
612 | } |
613 | |
614 | # record TODOs that mark unstable cases |
615 | if { [regexp {[\?]} $platforms] } { |
616 | set todos_unstable([llength $todos]) 1 |
617 | } |
618 | |
619 | lappend todos [regsub -all {\\b} [string trim $pattern] {\\y}] ;# convert regexp from Perl to Tcl style |
620 | set html_log "$html_log\n[_html_highlight BAD $line]" |
621 | continue |
622 | } |
623 | |
624 | # check for presence of messages indicating test result |
625 | set ismarked 0 |
626 | foreach bw $badwords { |
627 | if { [regexp [lindex $bw 1] $line] } { |
628 | # check if this is known bad case |
629 | set is_known 0 |
630 | for {set i 0} {$i < [llength $todos]} {incr i} { |
631 | if { [regexp [lindex $todos $i] $line] } { |
632 | set is_known 1 |
633 | incr todo_count($i) |
634 | set html_log "$html_log\n[_html_highlight BAD $line]" |
635 | break |
636 | } |
637 | } |
638 | |
639 | # if it is not in todo, define status |
640 | if { ! $is_known } { |
641 | set stat [lindex $bw 0 0] |
642 | set html_log "$html_log\n[_html_highlight $stat $line]" |
643 | if { $status == "" && $stat != "OK" && ! [regexp -nocase {^IGNOR} $stat] } { |
644 | set status [lindex $bw 0] |
645 | } |
646 | } |
647 | set ismarked 1 |
648 | break |
649 | } |
650 | } |
651 | if { ! $ismarked } { |
652 | set html_log "$html_log\n$line" |
653 | } |
654 | } |
655 | |
656 | # check for presence of TEST COMPLETED statement |
657 | if { $status == "" && ! [regexp {TEST COMPLETED} $log] } { |
658 | # check whether absence of TEST COMPLETED is known problem |
659 | set i [lsearch $todos "TEST INCOMPLETE"] |
660 | if { $i >= 0 } { |
661 | incr todo_count($i) |
662 | } else { |
663 | set status "FAILED (no final message is found)" |
664 | } |
665 | } |
666 | |
667 | # check declared bad cases and diagnose possible improvement |
668 | # (bad case declared but not detected). |
669 | # Note that absence of the problem marked by TODO with question mark |
670 | # (unstable) is not reported as improvement. |
671 | if { $status == "" } { |
672 | for {set i 0} {$i < [llength $todos]} {incr i} { |
673 | if { ! [info exists todos_unstable($i)] && |
674 | (! [info exists todo_count($i)] || $todo_count($i) <= 0) } { |
675 | set status "IMPROVEMENT (expected problem TODO no. [expr $i + 1] is not detected)" |
676 | break; |
677 | } |
678 | } |
679 | } |
680 | |
681 | # report test as known bad if at least one of expected problems is found |
682 | if { $status == "" && [llength [array names todo_count]] > 0 } { |
683 | set status "BAD (known problem)" |
684 | } |
685 | |
686 | # report normal OK |
687 | if { $status == "" } {set status "OK" } |
688 | |
689 | } res] { |
690 | set status "FAILED ($res)" |
691 | } |
692 | |
693 | # put final message |
694 | _log_and_puts summary "CASE $group $gridname $casename: $status" |
695 | set html_log "[_html_highlight [lindex $status 0] $summary]\n$html_log" |
696 | } |
697 | |
698 | # Auxiliary procedure putting message to both cout and log variable (list) |
699 | proc _log_and_puts {logvar message} { |
700 | if { $logvar != "" } { |
701 | upvar $logvar log |
702 | if [info exists log] { |
703 | set log "$log$message\n" |
704 | } else { |
705 | set log "$message\n" |
706 | } |
707 | } |
708 | puts $message |
709 | } |
710 | |
711 | # Auxiliary procedure to log result on single test case |
712 | proc _log_test_case {output logdir dir group grid casename logvar} { |
713 | upvar $logvar log |
714 | |
715 | # check result and make HTML log |
716 | _check_log $dir $group $grid $casename $output summary html_log |
717 | set log "$log$summary" |
718 | |
719 | # save log to file |
720 | if { $logdir != "" } { |
721 | _log_html $logdir/$group/$grid/$casename.html $html_log "Test $group $grid $casename" |
722 | _log_save $logdir/$group/$grid/$casename.log "$output\n$summary" "Test $group $grid $casename" |
723 | } |
724 | } |
725 | |
726 | # Auxiliary procedure to save log to file |
727 | proc _log_save {file log {title {}}} { |
728 | # create missing directories as needed |
729 | catch {file mkdir [file dirname $file]} |
730 | |
731 | # try to open a file |
732 | if [catch {set fd [open $file w]} res] { |
733 | error "Error saving log file $file: $res" |
734 | } |
735 | |
736 | # dump log and close |
737 | puts $fd "$title\n" |
738 | puts $fd $log |
739 | close $fd |
740 | return |
741 | } |
742 | |
743 | # Auxiliary procedure to save log to file |
744 | proc _log_html {file log {title {}}} { |
745 | # create missing directories as needed |
746 | catch {file mkdir [file dirname $file]} |
747 | |
748 | # try to open a file |
749 | if [catch {set fd [open $file w]} res] { |
750 | error "Error saving log file $file: $res" |
751 | } |
752 | |
753 | # print header |
754 | puts $fd "<html><head><title>$title</title><head><body><h1>$title</h1>" |
755 | |
756 | # add images if present |
757 | set imgbasename [file rootname [file tail $file]] |
758 | foreach img [lsort [glob -nocomplain -directory [file dirname $file] -tails ${imgbasename}*.gif ${imgbasename}*.png ${imgbasename}*.jpg]] { |
759 | puts $fd "<p><img src=\"$img\"/><p>" |
760 | } |
761 | |
762 | # print body, then end and close |
763 | puts $fd "<pre>" |
764 | puts $fd $log |
765 | puts $fd "</pre></body></html>" |
766 | |
767 | close $fd |
768 | return |
769 | } |
770 | |
771 | # Auxiliary method to make text with HTML highlighting according to status |
772 | proc _html_color {status} { |
773 | # choose a color for the cell according to result |
774 | if { $status == "OK" } { |
775 | return lightgreen |
776 | } elseif { [regexp -nocase {^FAIL} $status] } { |
777 | return red |
778 | } elseif { [regexp -nocase {^BAD} $status] } { |
779 | return yellow |
780 | } elseif { [regexp -nocase {^IMP} $status] } { |
781 | return orange |
782 | } elseif { [regexp -nocase {^SKIP} $status] } { |
783 | return gray |
784 | } elseif { [regexp -nocase {^IGNOR} $status] } { |
785 | return gray |
786 | } else { |
787 | puts "Warning: no color defined for status $status, using red as if FAILED" |
788 | return red |
789 | } |
790 | } |
791 | |
792 | # Format text line in HTML to be colored according to the status |
793 | proc _html_highlight {status line} { |
794 | return "<table><tr><td bgcolor=\"[_html_color $status]\">$line</td></tr></table>" |
795 | } |
796 | |
797 | # Internal procedure to generate HTML page presenting log of the tests |
798 | # execution in tabular form, with links to reports on individual cases |
799 | proc _log_html_summary {logdir log totals regressions improvements total_time} { |
800 | global _test_case_regexp |
801 | |
802 | # create missing directories as needed |
803 | catch {file mkdir $logdir} |
804 | |
805 | # try to open a file and start HTML |
806 | if [catch {set fd [open $logdir/summary.html w]} res] { |
807 | error "Error creating log file: $res" |
808 | } |
809 | |
810 | # write HRML header, including command to refresh log if still in progress |
811 | puts $fd "<html><head>" |
812 | puts $fd "<title>Tests summary</title>" |
813 | if { $total_time == "" } { |
814 | puts $fd "<meta http-equiv=\"refresh\" content=\"10\">" |
815 | } |
816 | puts $fd "<meta http-equiv=\"pragma\" content=\"NO-CACHE\">" |
817 | puts $fd "</head><body>" |
818 | |
819 | # put summary |
820 | set legend(OK) "Test passed OK" |
821 | set legend(FAILED) "Test failed (regression)" |
822 | set legend(BAD) "Known problem" |
823 | set legend(IMPROVEMENT) "Possible improvement (expected problem not detected)" |
824 | set legend(SKIPPED) "Test skipped due to lack of data file" |
825 | puts $fd "<h1>Summary</h1><table>" |
826 | foreach nbstat $totals { |
827 | set status [lindex $nbstat 1] |
828 | if { [info exists legend($status)] } { |
829 | set comment $legend($status) |
830 | } else { |
831 | set comment "User-defined status" |
832 | } |
833 | puts $fd "<tr><td align=\"right\">[lindex $nbstat 0]</td><td bgcolor=\"[_html_color $status]\">$status</td><td>$comment</td></tr>" |
834 | } |
835 | puts $fd "</table>" |
836 | |
837 | # time stamp and elapsed time info |
838 | if { $total_time != "" } { |
839 | puts $fd "<p>Generated on [clock format [clock seconds] -format {%Y-%m-%d %H:%M:%S}] on [info hostname] <p> $total_time" |
840 | } else { |
841 | puts $fd "<p>NOTE: This is intermediate summary; the tests are still running! This page will refresh automatically until tests are finished." |
842 | } |
843 | |
844 | # print regressions and improvements |
845 | foreach featured [list $regressions $improvements] { |
846 | if { [llength $featured] <= 1 } { continue } |
847 | set status [string trim [lindex $featured 0] { :}] |
848 | puts $fd "<h2>$status</h2>" |
849 | puts $fd "<table>" |
850 | set groupgrid "" |
851 | foreach test [lrange $featured 1 end] { |
852 | if { ! [regexp {^(.*)\s+([\w.]+)$} $test res gg name] } { |
853 | set gg UNKNOWN |
854 | set name "Error building short list; check details" |
855 | } |
856 | if { $gg != $groupgrid } { |
857 | if { $groupgrid != "" } { puts $fd "</tr>" } |
858 | set groupgrid $gg |
859 | puts $fd "<tr><td>$gg</td>" |
860 | } |
861 | puts $fd "<td bgcolor=\"[_html_color $status]\"><a href=\"[regsub -all { } $gg /]/${name}.html\">$name</a></td>" |
862 | } |
863 | if { $groupgrid != "" } { puts $fd "</tr>" } |
864 | puts $fd "</table>" |
865 | } |
866 | |
867 | # put detailed log |
868 | puts $fd "<h1>Details</h1>" |
869 | |
870 | # process log line-by-line |
871 | set group {} |
872 | set letter {} |
873 | foreach line [lsort -dictionary [split $log "\n"]] { |
874 | # check that the line is case report in the form "CASE group grid name: result (explanation)" |
875 | if { ! [regexp $_test_case_regexp $line res grp grd casename result message] } { |
876 | continue |
877 | } |
878 | |
879 | # start new group |
880 | if { $grp != $group } { |
881 | if { $letter != "" } { puts $fd "</tr></table>" } |
882 | set letter {} |
883 | set group $grp |
884 | set grid {} |
885 | puts $fd "<h2>Group $group</h2>" |
886 | } |
887 | |
888 | # start new grid |
889 | if { $grd != $grid } { |
890 | if { $letter != "" } { puts $fd "</tr></table>" } |
891 | set letter {} |
892 | set grid $grd |
893 | puts $fd "<h3>Grid $grid</h3>" |
894 | } |
895 | |
896 | # check if test case name is <letter><digit>; |
897 | # if not, set alnum to period "." to recognize non-standard test name |
898 | if { ! [regexp {([A-Za-z]+)([0-9]+)} $casename res alnum number] } { |
899 | set alnum . |
900 | } |
901 | |
902 | # start new row when letter changes or for non-standard names |
903 | if { $alnum != $letter || $alnum == "." } { |
904 | if { $letter != "" } { |
905 | puts $fd "</tr><tr>" |
906 | } else { |
907 | puts $fd "<table><tr>" |
908 | } |
909 | set letter $alnum |
910 | } |
911 | |
912 | puts $fd "<td bgcolor=\"[_html_color $result]\"><a href=\"$group/$grid/${casename}.html\">$casename</a></td>" |
913 | } |
914 | puts $fd "</tr></table>" |
915 | |
916 | # add remaining lines of log as plain text |
917 | puts $fd "<h2>Plain text messages</h2>\n<pre>" |
918 | foreach line [split $log "\n"] { |
919 | if { ! [regexp $_test_case_regexp $line] } { |
920 | puts $fd "$line" |
921 | } |
922 | } |
923 | puts $fd "</pre>" |
924 | |
925 | # close file and exit |
926 | puts $fd "</body>" |
927 | close $fd |
928 | return |
929 | } |
930 | |
931 | # Procedure to dump summary logs of tests |
932 | proc _log_summarize {logdir log {total_time {}}} { |
933 | |
934 | # sort log records alphabetically to have the same behavior on Linux and Windows |
935 | # (also needed if tests are run in parallel) |
936 | set loglist [lsort -dictionary [split $log "\n"]] |
937 | |
938 | # classify test cases by status |
939 | foreach line $loglist { |
940 | if { [regexp {^CASE ([^:]*): ([[:alnum:]]+).*$} $line res caseid status] } { |
941 | lappend stat($status) $caseid |
942 | } |
943 | } |
944 | set totals {} |
945 | set improvements {Improvements:} |
946 | set regressions {Failed:} |
947 | if { [info exists stat] } { |
948 | foreach status [lsort [array names stat]] { |
949 | lappend totals [list [llength $stat($status)] $status] |
950 | |
951 | # separately count improvements (status starting with IMP) and regressions (all except IMP, OK, BAD, and SKIP) |
952 | if { [regexp -nocase {^IMP} $status] } { |
953 | eval lappend improvements $stat($status) |
954 | } elseif { $status != "OK" && ! [regexp -nocase {^BAD} $status] && ! [regexp -nocase {^SKIP} $status] } { |
955 | eval lappend regressions $stat($status) |
956 | } |
957 | } |
958 | } |
959 | |
960 | # if time is specified, add totals |
961 | if { $total_time != "" } { |
962 | if { [llength $improvements] > 1 } { |
963 | _log_and_puts log [join $improvements "\n "] |
964 | } |
965 | if { [llength $regressions] > 1 } { |
966 | _log_and_puts log [join $regressions "\n "] |
967 | } |
968 | if { [llength $improvements] == 1 && [llength $regressions] == 1 } { |
969 | _log_and_puts log "No regressions" |
970 | } |
971 | _log_and_puts log "Total cases: [join $totals {, }]" |
972 | _log_and_puts log $total_time |
973 | } |
974 | |
975 | # save log to files |
976 | if { $logdir != "" } { |
977 | _log_html_summary $logdir $log $totals $regressions $improvements $total_time |
978 | _log_save $logdir/tests.log $log "Tests summary" |
979 | } |
980 | |
981 | return |
982 | } |
983 | |
984 | # Internal procedure to generate XML log in JUnit style, for further |
985 | # consumption by Jenkins or similar systems. |
986 | # |
987 | # The output is intended to conform to XML schema supported by Jenkins found at |
988 | # https://svn.jenkins-ci.org/trunk/hudson/dtkit/dtkit-format/dtkit-junit-model/src/main/resources/com/thalesgroup/dtkit/junit/model/xsd/junit-4.xsd |
989 | # |
990 | # The mapping of the fields is inspired by annotated schema of Apache Ant JUnit XML format found at |
991 | # http://windyroad.org/dl/Open%20Source/JUnit.xsd |
992 | proc _log_xml_summary {logdir filename log include_cout} { |
993 | global _test_case_regexp |
994 | |
995 | catch {file mkdir [file dirname $filename]} |
996 | |
997 | # try to open a file and start XML |
998 | if [catch {set fd [open $filename w]} res] { |
999 | error "Error creating XML summary file $filename: $res" |
1000 | } |
1001 | puts $fd "<?xml version='1.0' encoding='utf-8'?>" |
1002 | puts $fd "<testsuites>" |
1003 | |
1004 | # prototype for command to generate test suite tag |
1005 | set time_and_host "timestamp=\"[clock format [clock seconds] -format {%Y-%m-%dT%H:%M:%S}]\" hostname=\"[info hostname]\"" |
1006 | set cmd_testsuite {puts $fd "<testsuite name=\"$group $grid\" tests=\"$nbtests\" failures=\"$nbfail\" errors=\"$nberr\" time=\"$time\" skipped=\"$nbskip\" $time_and_host>\n$testcases\n</testsuite>\n"} |
1007 | |
1008 | # sort log and process it line-by-line |
1009 | set group {} |
1010 | foreach line [lsort -dictionary [split $log "\n"]] { |
1011 | # check that the line is case report in the form "CASE group grid name: result (explanation)" |
1012 | if { ! [regexp $_test_case_regexp $line res grp grd casename result message] } { |
1013 | continue |
1014 | } |
1015 | set message [string trim $message " \t\r\n()"] |
1016 | |
1017 | # start new testsuite for each grid |
1018 | if { $grp != $group || $grd != $grid } { |
1019 | |
1020 | # write previous test suite |
1021 | if [info exists testcases] { eval $cmd_testsuite } |
1022 | |
1023 | set testcases {} |
1024 | set nbtests 0 |
1025 | set nberr 0 |
1026 | set nbfail 0 |
1027 | set nbskip 0 |
1028 | set time 0. |
1029 | |
1030 | set group $grp |
1031 | set grid $grd |
1032 | } |
1033 | |
1034 | incr nbtests |
1035 | |
1036 | # parse test log and get its CPU time |
1037 | set testout {} |
1038 | set add_cpu {} |
1039 | if { [catch {set fdlog [open $logdir/$group/$grid/${casename}.log r]} ret] } { |
1040 | puts "Error: cannot open $logdir/$group/$grid/${casename}.log: $ret" |
1041 | } else { |
1042 | while { [gets $fdlog logline] >= 0 } { |
1043 | if { $include_cout } { |
1044 | set testout "$testout$logline\n" |
1045 | } |
1046 | if [regexp -nocase {TOTAL CPU TIME:\s*([\d.]+)\s*sec} $logline res cpu] { |
1047 | set add_cpu " time=\"$cpu\"" |
1048 | set time [expr $time + $cpu] |
1049 | } |
1050 | } |
1051 | close $fdlog |
1052 | } |
1053 | if { ! $include_cout } { |
1054 | set testout "$line\n" |
1055 | } |
1056 | |
1057 | # record test case with its output and status |
1058 | # Mapping is: SKIPPED, BAD, and OK to OK, all other to failure |
1059 | set testcases "$testcases\n <testcase name=\"$casename\"$add_cpu status=\"$result\">\n" |
1060 | set testcases "$testcases\n <system-out>\n$testout </system-out>" |
1061 | if { $result != "OK" } { |
1062 | if { [regexp -nocase {^SKIP} $result] } { |
1063 | incr nberr |
1064 | set testcases "$testcases\n <error name=\"$result\" message=\"$message\"/>" |
1065 | } elseif { [regexp -nocase {^BAD} $result] } { |
1066 | incr nbskip |
1067 | set testcases "$testcases\n <skipped>$message</skipped>" |
1068 | } else { |
1069 | incr nbfail |
1070 | set testcases "$testcases\n <failure name=\"$result\" message=\"$message\"/>" |
1071 | } |
1072 | } |
1073 | set testcases "$testcases\n </testcase>" |
1074 | } |
1075 | |
1076 | # write last test suite |
1077 | if [info exists testcases] { eval $cmd_testsuite } |
1078 | |
1079 | # the end |
1080 | puts $fd "</testsuites>" |
1081 | close $fd |
1082 | return |
1083 | } |
1084 | |
1085 | # define custom platform name |
1086 | proc _tests_platform_def {} { |
1087 | global env tcl_platform |
1088 | |
1089 | if [info exists env(os_type)] { return } |
1090 | |
1091 | set env(os_type) $tcl_platform(platform) |
1092 | |
1093 | # use detailed mapping for various versions of Lunix |
1094 | # (note that mapping is rather non-uniform, for historical reasons) |
1095 | if { $env(os_type) == "unix" && ! [catch {exec cat /etc/issue} issue] } { |
1096 | if { [regexp {Mandriva[ \tA-Za-z]+([0-9]+)} $issue res num] } { |
1097 | set env(os_type) Mandriva$num |
1098 | } elseif { [regexp {Red Hat[ \tA-Za-z]+([0-9]+)} $issue res num] } { |
1099 | set env(os_type) RedHat$num |
1100 | } elseif { [regexp {Debian[ \tA-Za-z/]+([0-9]+)[.]([0-9]+)} $issue res num subnum] } { |
1101 | set env(os_type) Debian$num$subnum |
1102 | } elseif { [regexp {CentOS[ \tA-Za-z]+([0-9]+)[.]([0-9]+)} $issue res num subnum] } { |
1103 | set env(os_type) CentOS$num$subnum |
1104 | } elseif { [regexp {Scientific[ \tA-Za-z]+([0-9]+)[.]([0-9]+)} $issue res num subnum] } { |
1105 | set env(os_type) SL$num$subnum |
1106 | } elseif { [regexp {Fedora Core[ \tA-Za-z]+([0-9]+)} $issue res num] } { |
1107 | set env(os_type) FedoraCore$num |
1108 | } |
1109 | if { [exec uname -m] == "x86_64" } { |
1110 | set env(os_type) "$env(os_type)-64" |
1111 | } |
1112 | } |
1113 | } |
1114 | _tests_platform_def |
1115 | |
1116 | # Auxiliary procedure to split path specification (usually defined by |
1117 | # environment variable) into list of directories or files |
1118 | proc _split_path {pathspec} { |
1119 | global tcl_platform |
1120 | |
1121 | # first replace all \ (which might occur on Windows) by / |
1122 | regsub -all "\\\\" $pathspec "/" pathspec |
1123 | |
1124 | # split path by platform-specific separator |
1125 | return [split $pathspec [_path_separator]] |
1126 | } |
1127 | |
1128 | # Auxiliary procedure to define platform-specific separator for directories in |
1129 | # path specification |
1130 | proc _path_separator {} { |
1131 | global tcl_platform |
1132 | |
1133 | # split path by platform-specific separator |
1134 | if { $tcl_platform(platform) == "windows" } { |
1135 | return ";" |
1136 | } else { |
1137 | return ":" |
1138 | } |
1139 | } |
1140 | |
1141 | # Procedure to locate data file for test given its name. |
1142 | # The search is performed assuming that the function is called |
1143 | # from the test case script; the search order is: |
1144 | # - directory where test script is located |
1145 | # - directories ../data and ../../data from the script dir |
1146 | # - subdirectories <group>/<grid> and <group> of directories listed in |
1147 | # environment variable CSF_TestDataPath |
1148 | # If file is not found, raises Tcl error. |
1149 | proc locate_data_file {filename} { |
8418c617 |
1150 | global env groupname gridname casename |
40093367 |
1151 | |
1152 | set scriptfile [info script] |
1153 | if { $scriptfile == "" } { |
1154 | error "Error: This procedure (locate_data_file) is for use only in test scripts!" |
1155 | } |
1156 | |
8418c617 |
1157 | # check sub-directories data of the test case grid directory |
1158 | # the current test case in paths indicated by CSF_TestScriptsPath |
1159 | if { [info exists groupname] && [info exists gridname] && |
1160 | [info exists env(CSF_TestScriptsPath)] } { |
1161 | foreach dir [_split_path $env(CSF_TestScriptsPath)] { |
1162 | if { [file exists $dir/$groupname/$gridname/data/$filename] } { |
1163 | return [file normalize $dir/$groupname/$gridname/data/$filename] |
1164 | } |
1165 | if { [file exists $dir/$groupname/data/$filename] } { |
1166 | return [file normalize $dir/$groupname/data/$filename] |
1167 | } |
40093367 |
1168 | } |
1169 | } |
40093367 |
1170 | |
1171 | # check sub-directories corresponding to group and grid of |
1172 | # the current test case in paths indicated by CSF_TestDataPath |
8418c617 |
1173 | if { [info exists groupname] && [info exists env(CSF_TestDataPath)] } { |
40093367 |
1174 | foreach dir [_split_path $env(CSF_TestDataPath)] { |
8418c617 |
1175 | if { [info exists gridname] && [file exists $dir/$groupname/$gridname/$filename] } { |
1176 | return [file normalize $dir/$groupname/$gridname/$filename] |
40093367 |
1177 | } |
8418c617 |
1178 | if { [file exists $dir/$groupname/$filename] } { |
1179 | return [file normalize $dir/$groupname/$filename] |
40093367 |
1180 | } |
1181 | } |
1182 | } |
1183 | |
8418c617 |
1184 | # check datadir |
1185 | if { [file exists [uplevel datadir]/$filename] } { |
1186 | return [uplevel datadir]/$filename |
1187 | } |
1188 | |
40093367 |
1189 | # raise error |
1190 | error [join [list "Error: file $filename could not be found neither in script" \ |
1191 | "directories nor in paths indicated by CSF_TestDataPath environment variable"] "\n"] |
8418c617 |
1192 | } |