Commit | Line | Data |
---|---|---|
4b2d70a7 | 1 | #! /bin/sh |
78d75878 | 2 | # SPDX-License-Identifier: GPL-2.0-or-later |
4b2d70a7 | 3 | # |
78d75878 | 4 | # Copyright (C) 2011-2018 Free Software Foundation, Inc. |
4b2d70a7 MJ |
5 | |
6 | # As a special exception to the GNU General Public License, if you | |
7 | # distribute this file as part of a program that contains a | |
8 | # configuration script generated by Autoconf, you may include it under | |
9 | # the same distribution terms that you use for the rest of that program. | |
10 | ||
11 | # This file is maintained in Automake, please report | |
12 | # bugs to <bug-automake@gnu.org> or send patches to | |
13 | # <automake-patches@gnu.org>. | |
14 | ||
15 | scriptversion=2013-12-23.17; # UTC | |
16 | ||
17 | # Make unconditional expansion of undefined variables an error. This | |
18 | # helps a lot in preventing typo-related bugs. | |
19 | set -u | |
20 | ||
21 | me=tap-driver.sh | |
22 | ||
23 | fatal () | |
24 | { | |
25 | echo "$me: fatal: $*" >&2 | |
26 | exit 1 | |
27 | } | |
28 | ||
29 | usage_error () | |
30 | { | |
31 | echo "$me: $*" >&2 | |
32 | print_usage >&2 | |
33 | exit 2 | |
34 | } | |
35 | ||
36 | print_usage () | |
37 | { | |
38 | cat <<END | |
39 | Usage: | |
40 | tap-driver.sh --test-name=NAME --log-file=PATH --trs-file=PATH | |
41 | [--expect-failure={yes|no}] [--color-tests={yes|no}] | |
42 | [--enable-hard-errors={yes|no}] [--ignore-exit] | |
43 | [--diagnostic-string=STRING] [--merge|--no-merge] | |
44 | [--comments|--no-comments] [--] TEST-COMMAND | |
45 | The '--test-name', '-log-file' and '--trs-file' options are mandatory. | |
46 | END | |
47 | } | |
48 | ||
49 | # TODO: better error handling in option parsing (in particular, ensure | |
50 | # TODO: $log_file, $trs_file and $test_name are defined). | |
51 | test_name= # Used for reporting. | |
52 | log_file= # Where to save the result and output of the test script. | |
53 | trs_file= # Where to save the metadata of the test run. | |
54 | expect_failure=0 | |
55 | color_tests=0 | |
56 | merge=0 | |
57 | ignore_exit=0 | |
58 | comments=0 | |
59 | diag_string='#' | |
60 | while test $# -gt 0; do | |
61 | case $1 in | |
62 | --help) print_usage; exit $?;; | |
63 | --version) echo "$me $scriptversion"; exit $?;; | |
64 | --test-name) test_name=$2; shift;; | |
65 | --log-file) log_file=$2; shift;; | |
66 | --trs-file) trs_file=$2; shift;; | |
67 | --color-tests) color_tests=$2; shift;; | |
68 | --expect-failure) expect_failure=$2; shift;; | |
69 | --enable-hard-errors) shift;; # No-op. | |
70 | --merge) merge=1;; | |
71 | --no-merge) merge=0;; | |
72 | --ignore-exit) ignore_exit=1;; | |
73 | --comments) comments=1;; | |
74 | --no-comments) comments=0;; | |
75 | --diagnostic-string) diag_string=$2; shift;; | |
76 | --) shift; break;; | |
77 | -*) usage_error "invalid option: '$1'";; | |
78 | esac | |
79 | shift | |
80 | done | |
81 | ||
82 | test $# -gt 0 || usage_error "missing test command" | |
83 | ||
84 | case $expect_failure in | |
85 | yes) expect_failure=1;; | |
86 | *) expect_failure=0;; | |
87 | esac | |
88 | ||
89 | if test $color_tests = yes; then | |
90 | init_colors=' | |
91 | color_map["red"]="\e[0;31m" # Red. | |
92 | color_map["grn"]="\e[0;32m" # Green. | |
93 | color_map["lgn"]="\e[1;32m" # Light green. | |
94 | color_map["blu"]="\e[1;34m" # Blue. | |
95 | color_map["mgn"]="\e[0;35m" # Magenta. | |
96 | color_map["std"]="\e[m" # No color. | |
97 | color_for_result["ERROR"] = "mgn" | |
98 | color_for_result["PASS"] = "grn" | |
99 | color_for_result["XPASS"] = "red" | |
100 | color_for_result["FAIL"] = "red" | |
101 | color_for_result["XFAIL"] = "lgn" | |
102 | color_for_result["SKIP"] = "blu"' | |
103 | else | |
104 | init_colors='' | |
105 | fi | |
106 | ||
107 | # :; is there to work around a bug in bash 3.2 (and earlier) which | |
108 | # does not always set '$?' properly on redirection failure. | |
109 | # See the Autoconf manual for more details. | |
110 | :;{ | |
111 | ( | |
112 | # Ignore common signals (in this subshell only!), to avoid potential | |
113 | # problems with Korn shells. Some Korn shells are known to propagate | |
114 | # to themselves signals that have killed a child process they were | |
115 | # waiting for; this is done at least for SIGINT (and usually only for | |
116 | # it, in truth). Without the `trap' below, such a behaviour could | |
117 | # cause a premature exit in the current subshell, e.g., in case the | |
118 | # test command it runs gets terminated by a SIGINT. Thus, the awk | |
119 | # script we are piping into would never seen the exit status it | |
120 | # expects on its last input line (which is displayed below by the | |
121 | # last `echo $?' statement), and would thus die reporting an internal | |
122 | # error. | |
123 | # For more information, see the Autoconf manual and the threads: | |
78d75878 | 124 | # <https://lists.gnu.org/archive/html/bug-autoconf/2011-09/msg00004.html> |
4b2d70a7 MJ |
125 | # <http://mail.opensolaris.org/pipermail/ksh93-integration-discuss/2009-February/004121.html> |
126 | trap : 1 3 2 13 15 | |
127 | if test $merge -gt 0; then | |
128 | exec 2>&1 | |
129 | else | |
130 | exec 2>&3 | |
131 | fi | |
132 | "$@" | |
133 | echo $? | |
134 | ) | LC_ALL=C ${AM_TAP_AWK-awk} \ | |
135 | -v me="$me" \ | |
136 | -v test_script_name="$test_name" \ | |
137 | -v log_file="$log_file" \ | |
138 | -v trs_file="$trs_file" \ | |
139 | -v expect_failure="$expect_failure" \ | |
140 | -v merge="$merge" \ | |
141 | -v ignore_exit="$ignore_exit" \ | |
142 | -v comments="$comments" \ | |
143 | -v diag_string="$diag_string" \ | |
144 | ' | |
145 | # TODO: the usages of "cat >&3" below could be optimized when using | |
146 | # GNU awk, and/on on systems that supports /dev/fd/. | |
147 | ||
148 | # Implementation note: in what follows, `result_obj` will be an | |
149 | # associative array that (partly) simulates a TAP result object | |
150 | # from the `TAP::Parser` perl module. | |
151 | ||
152 | ## ----------- ## | |
153 | ## FUNCTIONS ## | |
154 | ## ----------- ## | |
155 | ||
156 | function fatal(msg) | |
157 | { | |
158 | print me ": " msg | "cat >&2" | |
159 | exit 1 | |
160 | } | |
161 | ||
162 | function abort(where) | |
163 | { | |
164 | fatal("internal error " where) | |
165 | } | |
166 | ||
167 | # Convert a boolean to a "yes"/"no" string. | |
168 | function yn(bool) | |
169 | { | |
170 | return bool ? "yes" : "no"; | |
171 | } | |
172 | ||
173 | function add_test_result(result) | |
174 | { | |
175 | if (!test_results_index) | |
176 | test_results_index = 0 | |
177 | test_results_list[test_results_index] = result | |
178 | test_results_index += 1 | |
179 | test_results_seen[result] = 1; | |
180 | } | |
181 | ||
182 | # Whether the test script should be re-run by "make recheck". | |
183 | function must_recheck() | |
184 | { | |
185 | for (k in test_results_seen) | |
186 | if (k != "XFAIL" && k != "PASS" && k != "SKIP") | |
187 | return 1 | |
188 | return 0 | |
189 | } | |
190 | ||
191 | # Whether the content of the log file associated to this test should | |
192 | # be copied into the "global" test-suite.log. | |
193 | function copy_in_global_log() | |
194 | { | |
195 | for (k in test_results_seen) | |
196 | if (k != "PASS") | |
197 | return 1 | |
198 | return 0 | |
199 | } | |
200 | ||
201 | function get_global_test_result() | |
202 | { | |
203 | if ("ERROR" in test_results_seen) | |
204 | return "ERROR" | |
205 | if ("FAIL" in test_results_seen || "XPASS" in test_results_seen) | |
206 | return "FAIL" | |
207 | all_skipped = 1 | |
208 | for (k in test_results_seen) | |
209 | if (k != "SKIP") | |
210 | all_skipped = 0 | |
211 | if (all_skipped) | |
212 | return "SKIP" | |
213 | return "PASS"; | |
214 | } | |
215 | ||
216 | function stringify_result_obj(result_obj) | |
217 | { | |
218 | if (result_obj["is_unplanned"] || result_obj["number"] != testno) | |
219 | return "ERROR" | |
220 | ||
221 | if (plan_seen == LATE_PLAN) | |
222 | return "ERROR" | |
223 | ||
224 | if (result_obj["directive"] == "TODO") | |
225 | return result_obj["is_ok"] ? "XPASS" : "XFAIL" | |
226 | ||
227 | if (result_obj["directive"] == "SKIP") | |
228 | return result_obj["is_ok"] ? "SKIP" : COOKED_FAIL; | |
229 | ||
230 | if (length(result_obj["directive"])) | |
231 | abort("in function stringify_result_obj()") | |
232 | ||
233 | return result_obj["is_ok"] ? COOKED_PASS : COOKED_FAIL | |
234 | } | |
235 | ||
236 | function decorate_result(result) | |
237 | { | |
238 | color_name = color_for_result[result] | |
239 | if (color_name) | |
240 | return color_map[color_name] "" result "" color_map["std"] | |
241 | # If we are not using colorized output, or if we do not know how | |
242 | # to colorize the given result, we should return it unchanged. | |
243 | return result | |
244 | } | |
245 | ||
246 | function report(result, details) | |
247 | { | |
248 | if (result ~ /^(X?(PASS|FAIL)|SKIP|ERROR)/) | |
249 | { | |
250 | msg = ": " test_script_name | |
251 | add_test_result(result) | |
252 | } | |
253 | else if (result == "#") | |
254 | { | |
255 | msg = " " test_script_name ":" | |
256 | } | |
257 | else | |
258 | { | |
259 | abort("in function report()") | |
260 | } | |
261 | if (length(details)) | |
262 | msg = msg " " details | |
263 | # Output on console might be colorized. | |
264 | print decorate_result(result) msg | |
5b578e8a MJ |
265 | # Flush stdout after each test result, this is useful when stdout |
266 | # is buffered, for example in a CI system. | |
267 | fflush() | |
4b2d70a7 MJ |
268 | # Log the result in the log file too, to help debugging (this is |
269 | # especially true when said result is a TAP error or "Bail out!"). | |
270 | print result msg | "cat >&3"; | |
271 | } | |
272 | ||
273 | function testsuite_error(error_message) | |
274 | { | |
275 | report("ERROR", "- " error_message) | |
276 | } | |
277 | ||
278 | function handle_tap_result() | |
279 | { | |
280 | details = result_obj["number"]; | |
281 | if (length(result_obj["description"])) | |
282 | details = details " " result_obj["description"] | |
283 | ||
284 | if (plan_seen == LATE_PLAN) | |
285 | { | |
286 | details = details " # AFTER LATE PLAN"; | |
287 | } | |
288 | else if (result_obj["is_unplanned"]) | |
289 | { | |
290 | details = details " # UNPLANNED"; | |
291 | } | |
292 | else if (result_obj["number"] != testno) | |
293 | { | |
294 | details = sprintf("%s # OUT-OF-ORDER (expecting %d)", | |
295 | details, testno); | |
296 | } | |
297 | else if (result_obj["directive"]) | |
298 | { | |
299 | details = details " # " result_obj["directive"]; | |
300 | if (length(result_obj["explanation"])) | |
301 | details = details " " result_obj["explanation"] | |
302 | } | |
303 | ||
304 | report(stringify_result_obj(result_obj), details) | |
305 | } | |
306 | ||
307 | # `skip_reason` should be empty whenever planned > 0. | |
308 | function handle_tap_plan(planned, skip_reason) | |
309 | { | |
310 | planned += 0 # Avoid getting confused if, say, `planned` is "00" | |
311 | if (length(skip_reason) && planned > 0) | |
312 | abort("in function handle_tap_plan()") | |
313 | if (plan_seen) | |
314 | { | |
315 | # Error, only one plan per stream is acceptable. | |
316 | testsuite_error("multiple test plans") | |
317 | return; | |
318 | } | |
319 | planned_tests = planned | |
320 | # The TAP plan can come before or after *all* the TAP results; we speak | |
321 | # respectively of an "early" or a "late" plan. If we see the plan line | |
322 | # after at least one TAP result has been seen, assume we have a late | |
323 | # plan; in this case, any further test result seen after the plan will | |
324 | # be flagged as an error. | |
325 | plan_seen = (testno >= 1 ? LATE_PLAN : EARLY_PLAN) | |
326 | # If testno > 0, we have an error ("too many tests run") that will be | |
327 | # automatically dealt with later, so do not worry about it here. If | |
328 | # $plan_seen is true, we have an error due to a repeated plan, and that | |
329 | # has already been dealt with above. Otherwise, we have a valid "plan | |
330 | # with SKIP" specification, and should report it as a particular kind | |
331 | # of SKIP result. | |
332 | if (planned == 0 && testno == 0) | |
333 | { | |
334 | if (length(skip_reason)) | |
335 | skip_reason = "- " skip_reason; | |
336 | report("SKIP", skip_reason); | |
337 | } | |
338 | } | |
339 | ||
340 | function extract_tap_comment(line) | |
341 | { | |
342 | if (index(line, diag_string) == 1) | |
343 | { | |
344 | # Strip leading `diag_string` from `line`. | |
345 | line = substr(line, length(diag_string) + 1) | |
346 | # And strip any leading and trailing whitespace left. | |
347 | sub("^[ \t]*", "", line) | |
348 | sub("[ \t]*$", "", line) | |
349 | # Return what is left (if any). | |
350 | return line; | |
351 | } | |
352 | return ""; | |
353 | } | |
354 | ||
355 | # When this function is called, we know that line is a TAP result line, | |
356 | # so that it matches the (perl) RE "^(not )?ok\b". | |
357 | function setup_result_obj(line) | |
358 | { | |
359 | # Get the result, and remove it from the line. | |
360 | result_obj["is_ok"] = (substr(line, 1, 2) == "ok" ? 1 : 0) | |
361 | sub("^(not )?ok[ \t]*", "", line) | |
362 | ||
363 | # If the result has an explicit number, get it and strip it; otherwise, | |
364 | # automatically assing the next progresive number to it. | |
365 | if (line ~ /^[0-9]+$/ || line ~ /^[0-9]+[^a-zA-Z0-9_]/) | |
366 | { | |
367 | match(line, "^[0-9]+") | |
368 | # The final `+ 0` is to normalize numbers with leading zeros. | |
369 | result_obj["number"] = substr(line, 1, RLENGTH) + 0 | |
370 | line = substr(line, RLENGTH + 1) | |
371 | } | |
372 | else | |
373 | { | |
374 | result_obj["number"] = testno | |
375 | } | |
376 | ||
377 | if (plan_seen == LATE_PLAN) | |
378 | # No further test results are acceptable after a "late" TAP plan | |
379 | # has been seen. | |
380 | result_obj["is_unplanned"] = 1 | |
381 | else if (plan_seen && testno > planned_tests) | |
382 | result_obj["is_unplanned"] = 1 | |
383 | else | |
384 | result_obj["is_unplanned"] = 0 | |
385 | ||
386 | # Strip trailing and leading whitespace. | |
387 | sub("^[ \t]*", "", line) | |
388 | sub("[ \t]*$", "", line) | |
389 | ||
390 | # This will have to be corrected if we have a "TODO"/"SKIP" directive. | |
391 | result_obj["description"] = line | |
392 | result_obj["directive"] = "" | |
393 | result_obj["explanation"] = "" | |
394 | ||
395 | if (index(line, "#") == 0) | |
396 | return # No possible directive, nothing more to do. | |
397 | ||
398 | # Directives are case-insensitive. | |
399 | rx = "[ \t]*#[ \t]*([tT][oO][dD][oO]|[sS][kK][iI][pP])[ \t]*" | |
400 | ||
401 | # See whether we have the directive, and if yes, where. | |
402 | pos = match(line, rx "$") | |
403 | if (!pos) | |
404 | pos = match(line, rx "[^a-zA-Z0-9_]") | |
405 | ||
406 | # If there was no TAP directive, we have nothing more to do. | |
407 | if (!pos) | |
408 | return | |
409 | ||
410 | # Let`s now see if the TAP directive has been escaped. For example: | |
411 | # escaped: ok \# SKIP | |
412 | # not escaped: ok \\# SKIP | |
413 | # escaped: ok \\\\\# SKIP | |
414 | # not escaped: ok \ # SKIP | |
415 | if (substr(line, pos, 1) == "#") | |
416 | { | |
417 | bslash_count = 0 | |
418 | for (i = pos; i > 1 && substr(line, i - 1, 1) == "\\"; i--) | |
419 | bslash_count += 1 | |
420 | if (bslash_count % 2) | |
421 | return # Directive was escaped. | |
422 | } | |
423 | ||
424 | # Strip the directive and its explanation (if any) from the test | |
425 | # description. | |
426 | result_obj["description"] = substr(line, 1, pos - 1) | |
427 | # Now remove the test description from the line, that has been dealt | |
428 | # with already. | |
429 | line = substr(line, pos) | |
430 | # Strip the directive, and save its value (normalized to upper case). | |
431 | sub("^[ \t]*#[ \t]*", "", line) | |
432 | result_obj["directive"] = toupper(substr(line, 1, 4)) | |
433 | line = substr(line, 5) | |
434 | # Now get the explanation for the directive (if any), with leading | |
435 | # and trailing whitespace removed. | |
436 | sub("^[ \t]*", "", line) | |
437 | sub("[ \t]*$", "", line) | |
438 | result_obj["explanation"] = line | |
439 | } | |
440 | ||
441 | function get_test_exit_message(status) | |
442 | { | |
443 | if (status == 0) | |
444 | return "" | |
445 | if (status !~ /^[1-9][0-9]*$/) | |
446 | abort("getting exit status") | |
447 | if (status < 127) | |
448 | exit_details = "" | |
449 | else if (status == 127) | |
450 | exit_details = " (command not found?)" | |
451 | else if (status >= 128 && status <= 255) | |
452 | exit_details = sprintf(" (terminated by signal %d?)", status - 128) | |
453 | else if (status > 256 && status <= 384) | |
454 | # We used to report an "abnormal termination" here, but some Korn | |
455 | # shells, when a child process die due to signal number n, can leave | |
456 | # in $? an exit status of 256+n instead of the more standard 128+n. | |
457 | # Apparently, both behaviours are allowed by POSIX (2008), so be | |
458 | # prepared to handle them both. See also Austing Group report ID | |
459 | # 0000051 <http://www.austingroupbugs.net/view.php?id=51> | |
460 | exit_details = sprintf(" (terminated by signal %d?)", status - 256) | |
461 | else | |
462 | # Never seen in practice. | |
463 | exit_details = " (abnormal termination)" | |
464 | return sprintf("exited with status %d%s", status, exit_details) | |
465 | } | |
466 | ||
467 | function write_test_results() | |
468 | { | |
469 | print ":global-test-result: " get_global_test_result() > trs_file | |
470 | print ":recheck: " yn(must_recheck()) > trs_file | |
471 | print ":copy-in-global-log: " yn(copy_in_global_log()) > trs_file | |
472 | for (i = 0; i < test_results_index; i += 1) | |
473 | print ":test-result: " test_results_list[i] > trs_file | |
474 | close(trs_file); | |
475 | } | |
476 | ||
477 | BEGIN { | |
478 | ||
479 | ## ------- ## | |
480 | ## SETUP ## | |
481 | ## ------- ## | |
482 | ||
483 | '"$init_colors"' | |
484 | ||
485 | # Properly initialized once the TAP plan is seen. | |
486 | planned_tests = 0 | |
487 | ||
488 | COOKED_PASS = expect_failure ? "XPASS": "PASS"; | |
489 | COOKED_FAIL = expect_failure ? "XFAIL": "FAIL"; | |
490 | ||
491 | # Enumeration-like constants to remember which kind of plan (if any) | |
492 | # has been seen. It is important that NO_PLAN evaluates "false" as | |
493 | # a boolean. | |
494 | NO_PLAN = 0 | |
495 | EARLY_PLAN = 1 | |
496 | LATE_PLAN = 2 | |
497 | ||
498 | testno = 0 # Number of test results seen so far. | |
499 | bailed_out = 0 # Whether a "Bail out!" directive has been seen. | |
500 | ||
501 | # Whether the TAP plan has been seen or not, and if yes, which kind | |
502 | # it is ("early" is seen before any test result, "late" otherwise). | |
503 | plan_seen = NO_PLAN | |
504 | ||
505 | ## --------- ## | |
506 | ## PARSING ## | |
507 | ## --------- ## | |
508 | ||
509 | is_first_read = 1 | |
510 | ||
511 | while (1) | |
512 | { | |
513 | # Involutions required so that we are able to read the exit status | |
514 | # from the last input line. | |
515 | st = getline | |
516 | if (st < 0) # I/O error. | |
517 | fatal("I/O error while reading from input stream") | |
518 | else if (st == 0) # End-of-input | |
519 | { | |
520 | if (is_first_read) | |
521 | abort("in input loop: only one input line") | |
522 | break | |
523 | } | |
524 | if (is_first_read) | |
525 | { | |
526 | is_first_read = 0 | |
527 | nextline = $0 | |
528 | continue | |
529 | } | |
530 | else | |
531 | { | |
532 | curline = nextline | |
533 | nextline = $0 | |
534 | $0 = curline | |
535 | } | |
536 | # Copy any input line verbatim into the log file. | |
537 | print | "cat >&3" | |
538 | # Parsing of TAP input should stop after a "Bail out!" directive. | |
539 | if (bailed_out) | |
540 | continue | |
541 | ||
542 | # TAP test result. | |
543 | if ($0 ~ /^(not )?ok$/ || $0 ~ /^(not )?ok[^a-zA-Z0-9_]/) | |
544 | { | |
545 | testno += 1 | |
546 | setup_result_obj($0) | |
547 | handle_tap_result() | |
548 | } | |
549 | # TAP plan (normal or "SKIP" without explanation). | |
550 | else if ($0 ~ /^1\.\.[0-9]+[ \t]*$/) | |
551 | { | |
552 | # The next two lines will put the number of planned tests in $0. | |
553 | sub("^1\\.\\.", "") | |
554 | sub("[^0-9]*$", "") | |
555 | handle_tap_plan($0, "") | |
556 | continue | |
557 | } | |
558 | # TAP "SKIP" plan, with an explanation. | |
559 | else if ($0 ~ /^1\.\.0+[ \t]*#/) | |
560 | { | |
561 | # The next lines will put the skip explanation in $0, stripping | |
562 | # any leading and trailing whitespace. This is a little more | |
563 | # tricky in truth, since we want to also strip a potential leading | |
564 | # "SKIP" string from the message. | |
565 | sub("^[^#]*#[ \t]*(SKIP[: \t][ \t]*)?", "") | |
566 | sub("[ \t]*$", ""); | |
567 | handle_tap_plan(0, $0) | |
568 | } | |
569 | # "Bail out!" magic. | |
570 | # Older versions of prove and TAP::Harness (e.g., 3.17) did not | |
571 | # recognize a "Bail out!" directive when preceded by leading | |
572 | # whitespace, but more modern versions (e.g., 3.23) do. So we | |
573 | # emulate the latter, "more modern" behaviour. | |
574 | else if ($0 ~ /^[ \t]*Bail out!/) | |
575 | { | |
576 | bailed_out = 1 | |
577 | # Get the bailout message (if any), with leading and trailing | |
578 | # whitespace stripped. The message remains stored in `$0`. | |
579 | sub("^[ \t]*Bail out![ \t]*", ""); | |
580 | sub("[ \t]*$", ""); | |
581 | # Format the error message for the | |
582 | bailout_message = "Bail out!" | |
583 | if (length($0)) | |
584 | bailout_message = bailout_message " " $0 | |
585 | testsuite_error(bailout_message) | |
586 | } | |
587 | # Maybe we have too look for dianogtic comments too. | |
588 | else if (comments != 0) | |
589 | { | |
590 | comment = extract_tap_comment($0); | |
591 | if (length(comment)) | |
592 | report("#", comment); | |
593 | } | |
594 | } | |
595 | ||
596 | ## -------- ## | |
597 | ## FINISH ## | |
598 | ## -------- ## | |
599 | ||
600 | # A "Bail out!" directive should cause us to ignore any following TAP | |
601 | # error, as well as a non-zero exit status from the TAP producer. | |
602 | if (!bailed_out) | |
603 | { | |
604 | if (!plan_seen) | |
605 | { | |
606 | testsuite_error("missing test plan") | |
607 | } | |
608 | else if (planned_tests != testno) | |
609 | { | |
610 | bad_amount = testno > planned_tests ? "many" : "few" | |
611 | testsuite_error(sprintf("too %s tests run (expected %d, got %d)", | |
612 | bad_amount, planned_tests, testno)) | |
613 | } | |
614 | if (!ignore_exit) | |
615 | { | |
616 | # Fetch exit status from the last line. | |
617 | exit_message = get_test_exit_message(nextline) | |
618 | if (exit_message) | |
619 | testsuite_error(exit_message) | |
620 | } | |
621 | } | |
622 | ||
623 | write_test_results() | |
624 | ||
625 | exit 0 | |
626 | ||
627 | } # End of "BEGIN" block. | |
628 | ' | |
629 | ||
630 | # TODO: document that we consume the file descriptor 3 :-( | |
631 | } 3>"$log_file" | |
632 | ||
633 | test $? -eq 0 || fatal "I/O or internal error" | |
634 | ||
635 | # Local Variables: | |
636 | # mode: shell-script | |
637 | # sh-indentation: 2 | |
78d75878 | 638 | # eval: (add-hook 'before-save-hook 'time-stamp) |
4b2d70a7 MJ |
639 | # time-stamp-start: "scriptversion=" |
640 | # time-stamp-format: "%:y-%02m-%02d.%02H" | |
78d75878 | 641 | # time-stamp-time-zone: "UTC0" |
4b2d70a7 MJ |
642 | # time-stamp-end: "; # UTC" |
643 | # End: |