OILS / test / spec-runner.sh View on Github | oils.pub

452 lines, 169 significant
1#!/usr/bin/env bash
2#
3# Run tests against multiple shells with the sh_spec framework.
4#
5# Usage:
6# test/spec-runner.sh <function name>
7
8set -o nounset
9set -o pipefail
10set -o errexit
11shopt -s strict:all 2>/dev/null || true # dogfood for OSH
12
13REPO_ROOT=$(cd "$(dirname $0)/.."; pwd)
14
15source build/dev-shell.sh
16source test/common.sh
17source test/spec-common.sh
18source test/tsv-lib.sh # $TAB
19
20#
21# Test Runner
22#
23
24write-suite-manifests() {
25 # This takes ~160 ms, it would be nice not to do it 3 times!
26 # I guess we can print (suite, name, tag) with duplicates, and then use 'uniq'
27 #
28 #test/sh_spec.py --print-table spec/*.test.sh
29
30 local dir=_tmp/spec
31
32 { test/sh_spec.py --print-table spec/*.test.sh | while read suite name; do
33 case $suite in
34 osh) echo $name >& $osh ;;
35 ysh) echo $name >& $ysh ;;
36 disabled) ;; # ignore
37 *) die "Invalid suite $suite" ;;
38 esac
39 done
40 } {osh}>$dir/SUITE-osh.txt \
41 {ysh}>$dir/SUITE-ysh.txt \
42 {needs_terminal}>$dir/SUITE-needs-terminal.txt
43
44 # These are kind of pseudo-suites, not the main 3
45 test/sh_spec.py --print-tagged interactive \
46 spec/*.test.sh > $dir/SUITE-interactive.txt
47
48 test/sh_spec.py --print-tagged dev-minimal \
49 spec/*.test.sh > $dir/SUITE-osh-minimal.txt
50
51 # For spec-compat, remove files that other shells aren't expected to run.
52 # Keep SUITE-osh the same for historical comparison.
53
54 # I want errexit-osh to be adopted by other shells, so I'm keeping it
55 local remove='strict-options'
56 #local remove='errexit-osh|strict-options'
57
58 egrep -v "$remove" $dir/SUITE-osh.txt > $dir/SUITE-compat.txt
59}
60
61print-manifest() {
62 local manifest=$1
63 if test -n "${SPEC_EGREP:-}"; then
64 egrep "$SPEC_EGREP" $manifest
65 else
66 head -n $NUM_SPEC_TASKS $manifest
67 fi
68}
69
70_print-task-file() {
71 cat <<'EOF'
72#!/usr/bin/env bash
73#
74# This file is GENERATED -- DO NOT EDIT.
75#
76# Update it with:
77# test/spec-runner.sh gen-task-file
78#
79# Usage:
80# test/spec.sh <function name>
81
82: ${LIB_OSH=stdlib/osh}
83source $LIB_OSH/bash-strict.sh
84source $LIB_OSH/task-five.sh
85
86source build/dev-shell.sh
87EOF
88
89 while read spec_name; do
90 echo "
91$spec_name() {
92 test/spec-py.sh run-file $spec_name \"\$@\"
93}"
94 done
95
96 echo
97 echo 'task-five "$@"'
98}
99
100gen-task-file() {
101 test/sh_spec.py --print-table spec/*.test.sh | while read suite name; do
102 echo $name
103 done | _print-task-file > test/spec.sh
104}
105
106diff-manifest() {
107 ### temporary test
108
109 write-suite-manifests
110 #return
111
112 # crazy sorting, affects glob
113 # doesn't work
114 #LANG=C
115 #LC_COLLATE=C
116 #LC_ALL=C
117 #export LANG LC_COLLATE LC_ALL
118
119 for suite in osh ysh interactive osh-minimal; do
120 echo
121 echo [$suite]
122 echo
123
124 diff -u -r <(sort spec2/SUITE-$suite.txt) <(sort _tmp/spec/SUITE-$suite.txt) #|| true
125 done
126}
127
128dispatch-one() {
129 # Determines what binaries to compare against: compare-py | compare-cpp | release-alpine
130 local compare_mode=${1:-compare-py}
131 # Which subdir of _tmp/spec: osh-py ysh-py osh-cpp ysh-cpp smoosh
132 local spec_subdir=${2:-osh-py}
133 local spec_name=$3
134 shift 3 # rest are more flags
135
136 log "__ $spec_name"
137
138 local -a prefix
139 case $compare_mode in
140
141 #compare-py) prefix=(test/spec.sh) ;;
142 compare-py) prefix=(test/spec-py.sh run-file) ;;
143
144 compare-cpp) prefix=(test/spec-cpp.sh run-file) ;;
145 spec-compat) prefix=(test/spec-compat.sh run-file) ;;
146
147 # For interactive comparison
148 osh-only) prefix=(test/spec-util.sh run-file-with-osh) ;;
149 bash-only) prefix=(test/spec-util.sh run-file-with-bash) ;;
150
151 release-alpine) prefix=(test/spec-alpine.sh run-file) ;;
152
153 *) die "Invalid compare mode $compare_mode" ;;
154 esac
155
156 local base_dir=_tmp/spec/$spec_subdir
157
158 # TODO: Could --stats-{file,template} be a separate awk step on .tsv files?
159 run-task-with-status \
160 $base_dir/${spec_name}.task.txt \
161 "${prefix[@]}" $spec_name \
162 --format html \
163 --stats-file $base_dir/${spec_name}.stats.txt \
164 --stats-template \
165 '%(num_cases)d %(oils_num_passed)d %(oils_num_failed)d %(oils_failures_allowed)d %(oils_ALT_delta)d' \
166 "$@" \
167 > $base_dir/${spec_name}.html
168}
169
170
171_html-summary() {
172 ### Print an HTML summary to stdout and return whether all tests succeeded
173
174 local sh_label=$1 # osh or ysh
175 local base_dir=$2 # e.g. _tmp/spec/ysh-cpp
176 local totals=$3 # path to print HTML to
177 local manifest=$4
178
179 html-head --title "Spec Test Summary" \
180 ../../../web/base.css ../../../web/spec-tests.css
181
182 cat <<EOF
183 <body class="width50">
184
185<p id="home-link">
186 <!-- The release index is two dirs up -->
187 <a href="../..">Up</a> |
188 <a href="/">oils.pub</a>
189</p>
190
191<h1>Spec Test Results Summary</h1>
192
193<table>
194 <thead>
195 <tr>
196 <td>name</td>
197 <td># cases</td> <td>$sh_label # passed</td> <td>$sh_label # failed</td>
198 <td>$sh_label failures allowed</td>
199 <td>$sh_label ALT delta</td>
200 <td>Elapsed Seconds</td>
201 </tr>
202 </thead>
203 <!-- TOTALS -->
204EOF
205
206 # Awk notes:
207 # - "getline" is kind of like bash "read", but it doesn't allow you do
208 # specify variable names. You have to destructure it yourself.
209 # - Lack of string interpolation is very annoying
210
211 print-manifest $manifest | sort | awk -v totals=$totals -v base_dir=$base_dir '
212 # Awk problem: getline errors are ignored by default!
213 function error(path) {
214 print "Error reading line from file: " path > "/dev/stderr"
215 exit(1)
216 }
217
218 {
219 spec_name = $0
220
221 # Read from the task files
222 path = ( base_dir "/" spec_name ".task.txt" )
223 n = getline < path
224 if (n != 1) {
225 error(path)
226 }
227 status = $1
228 wall_secs = $2
229
230 path = ( base_dir "/" spec_name ".stats.txt" )
231 n = getline < path
232 if (n != 1) {
233 error(path)
234 }
235 num_cases = $1
236 oils_num_passed = $2
237 oils_num_failed = $3
238 oils_failures_allowed = $4
239 oils_ALT_delta = $5
240
241 sum_status += status
242 sum_wall_secs += wall_secs
243 sum_num_cases += num_cases
244 sum_oils_num_passed += oils_num_passed
245 sum_oils_num_failed += oils_num_failed
246 sum_oils_failures_allowed += oils_failures_allowed
247 sum_oils_ALT_delta += oils_ALT_delta
248 num_rows += 1
249
250 # For the console
251 if (status == 0) {
252 num_passed += 1
253 } else {
254 num_failed += 1
255 print spec_name " failed with status " status > "/dev/stderr"
256 }
257
258 if (status != 0) {
259 css_class = "failed"
260 } else if (oils_num_failed != 0) {
261 css_class = "osh-allow-fail"
262 } else if (oils_num_passed != 0) {
263 css_class = "osh-pass"
264 } else {
265 css_class = ""
266 }
267 print "<tr class=" css_class ">"
268 print "<td><a href=" spec_name ".html>" spec_name "</a></td>"
269 print "<td>" num_cases "</td>"
270 print "<td>" oils_num_passed "</td>"
271 print "<td>" oils_num_failed "</td>"
272 print "<td>" oils_failures_allowed "</td>"
273 print "<td>" oils_ALT_delta "</td>"
274 printf("<td>%.2f</td>\n", wall_secs);
275 print "</tr>"
276 }
277
278 END {
279 print "<tr class=totals>" >totals
280 print "<td>TOTAL (" num_rows " rows) </td>" >totals
281 print "<td>" sum_num_cases "</td>" >totals
282 print "<td>" sum_oils_num_passed "</td>" >totals
283 print "<td>" sum_oils_num_failed "</td>" >totals
284 print "<td>" sum_oils_failures_allowed "</td>" >totals
285 print "<td>" sum_oils_ALT_delta "</td>" >totals
286 printf("<td>%.2f</td>\n", sum_wall_secs) > totals
287 print "</tr>" >totals
288
289 print "<tfoot>"
290 print "<!-- TOTALS -->"
291 print "</tfoot>"
292
293 # For the console
294 print "" > "/dev/stderr"
295 if (num_failed == 0) {
296 print "*** All " num_passed " tests PASSED" > "/dev/stderr"
297 } else {
298 print "*** " num_failed " tests FAILED" > "/dev/stderr"
299 exit(1) # failure
300 }
301 }
302 '
303 all_passed=$?
304
305 cat <<EOF
306 </table>
307
308 <h3>Version Information</h3>
309 <pre>
310EOF
311
312 # TODO: can pass shells here, e.g. for test/spec-cpp.sh
313 test/spec-version.sh ${suite}-version-text
314
315 cat <<EOF
316 </pre>
317 </body>
318</html>
319EOF
320
321 return $all_passed
322}
323
324html-summary() {
325 local suite=$1
326 local base_dir=$2
327
328 local manifest="_tmp/spec/SUITE-$suite.txt"
329
330 local totals=$base_dir/totals-$suite.html
331 local tmp=$base_dir/tmp-$suite.html
332
333 local out=$base_dir/index.html
334
335 # TODO: Do we also need $base_dir/{osh,oil}-details-for-toil.json
336 # osh failures, and all failures
337 # When deploying, if they exist, them copy them outside?
338 # I guess toil_web.py can use the zipfile module?
339 # To get _tmp/spec/...
340 # it can read JSON like:
341 # { "task_tsv": "_tmp/toil/INDEX.tsv",
342 # "details_json": [ ... ],
343 # }
344
345 set +o errexit
346 _html-summary $suite $base_dir $totals $manifest > $tmp
347 all_passed=$?
348 set -o errexit
349
350 # Total rows are displayed at both the top and bottom.
351 awk -v totals="$(cat $totals)" '
352 /<!-- TOTALS -->/ {
353 print totals
354 next
355 }
356 { print }
357 ' < $tmp > $out
358
359 echo
360 echo "Results: file://$PWD/$out"
361
362 return $all_passed
363}
364
365assert-FOO() {
366 # there's a stray 'foo' at the end
367 #
368 # I bet this is file descriptor leak from a redirect!
369 # Maybe a shell is doing something in correct?
370 # But the manifest shouldn't be open for write? I guess there could be some
371 # swapping
372 #
373 # Happens with NUM_SPEC_TASKS=100, but not NUM_SPEC_TASKS=50
374 # Gah
375
376 if grep foo _tmp/spec/SUITE-osh.txt; then
377 echo "BAD FOO"
378 exit
379 fi
380}
381
382_all-parallel() {
383 local suite=${1:-osh}
384 local compare_mode=${2:-compare-py}
385 local spec_subdir=${3:-survey}
386
387 # The rest are more flags
388 shift 3
389
390 local manifest="_tmp/spec/SUITE-$suite.txt"
391 local output_base_dir="_tmp/spec/$spec_subdir"
392 mkdir -p $output_base_dir
393
394 write-suite-manifests
395
396 assert-FOO
397
398 # The exit codes are recorded in files for html-summary to aggregate.
399 set +o errexit
400 print-manifest $manifest \
401 | xargs -I {} -P $MAX_PROCS -- \
402 $0 dispatch-one $compare_mode $spec_subdir {} "$@"
403 set -o errexit
404
405 assert-FOO
406
407 all-tests-to-html $manifest $output_base_dir
408
409 # note: the HTML links to ../../web/, which is in the repo.
410 html-summary $suite $output_base_dir # returns whether all passed
411}
412
413all-parallel() {
414 ### Run spec tests in parallel.
415
416 # Note: this function doesn't fail because 'run-file' saves the status to a
417 # file.
418 time _all-parallel "$@"
419}
420
421src-tree-py() {
422 PYTHONPATH='.:vendor/' doctools/src_tree.py "$@"
423}
424
425all-tests-to-html() {
426 local manifest=$1
427 local output_base_dir=$2
428 # ignore attrs output
429 print-manifest $manifest \
430 | xargs --verbose -- $0 src-tree-py spec-files $output_base_dir >/dev/null
431
432 #| xargs -n 1 -P $MAX_PROCS -- $0 test-to-html $output_base_dir
433 log "done: all-tests-to-html"
434}
435
436shell-sanity-check() {
437 echo "PWD = $PWD"
438 echo "PATH = $PATH"
439
440 for sh in "$@"; do
441 # note: shells are in $PATH, but not $OSH_LIST
442 if ! $sh -c 'echo -n "hello from $0: "; command -v $0 || true'; then
443 echo "ERROR: $sh failed sanity check"
444 return 1
445 fi
446 done
447}
448
449filename=$(basename $0)
450if test "$filename" = 'spec-runner.sh'; then
451 "$@"
452fi