OILS / benchmarks / osh-parser.sh View on Github | oils.pub

528 lines, 302 significant
1#!/usr/bin/env bash
2#
3# Measure how fast the OSH parser is.
4#
5# Usage:
6# benchmarks/osh-parser.sh <function name>
7#
8# Examples:
9# benchmarks/osh-parser.sh soil-run
10# QUICKLY=1 benchmarks/osh-parser.sh soil-run
11
12set -o nounset
13set -o pipefail
14set -o errexit
15
16REPO_ROOT=$(cd "$(dirname $0)/.."; pwd) # tsv-lib.sh uses this
17readonly REPO_ROOT
18
19source benchmarks/common.sh # die
20source benchmarks/cachegrind.sh # with-cachgrind
21source build/dev-shell.sh # python2
22source test/tsv-lib.sh # tsv2html
23source test/common.sh # die
24
25# TODO: The raw files should be published. In both
26# ~/git/oilshell/benchmarks-data and also in the /release/ hierarchy?
27readonly BASE_DIR=_tmp/osh-parser
28readonly SORTED=$BASE_DIR/tmp/sorted.txt
29
30write-sorted-manifest() {
31 local files=${1:-benchmarks/osh-parser-files.txt}
32 local counts=$BASE_DIR/tmp/line-counts.txt
33 local csv_out=$2
34 local sep=${3:-','} # CSV or TSV
35
36 # Remove comments and sort by line count
37 grep -v '^#' $files | xargs wc -l | sort -n > $counts
38
39 # Raw list of paths
40 cat $counts | awk '$2 != "total" { print $2 }' > $SORTED
41
42 # Make a CSV file from wc output
43 cat $counts | awk -v sep="$sep" '
44 BEGIN { print "num_lines" sep "path" }
45 $2 != "total" { print $1 sep $2 }' \
46 > $csv_out
47}
48
49# Called by xargs with a task row.
50parser-task() {
51 local out_dir=$1 # output
52 local job_id=$2
53 local host=$3
54 local host_hash=$4
55 local sh_path=$5
56 local shell_hash=$6
57 local script_path=$7
58
59 echo "--- TIME $sh_path $script_path ---"
60
61 local times_out="$out_dir/$host.$job_id.times.csv"
62
63 local shell_name
64 case $sh_path in
65 _bin/*/mycpp-souffle/*)
66 shell_name=osh-souffle
67 ;;
68 *)
69 shell_name=$(basename $sh_path)
70 ;;
71 esac
72
73 # Can't use array because of set -u bug!!! Only fixed in bash 4.4.
74 extra_args=''
75 case "$shell_name" in
76 osh*|oils-for-unix.*)
77 extra_args='--ast-format none'
78 ;;
79 esac
80
81 # exit code, time in seconds, host_hash, shell_hash, path. \0
82 # would have been nice here!
83 # TODO: TSV
84 benchmarks/time_.py \
85 --append \
86 --output $times_out \
87 --rusage \
88 --field "$host" --field "$host_hash" \
89 --field "$shell_name" --field "$shell_hash" \
90 --field "$script_path" -- \
91 "$sh_path" -n $extra_args "$script_path" || echo FAILED
92}
93
94# Called by xargs with a task row.
95# NOTE: This is very similar to the function above, except that we add
96# cachegrind. We could probably conslidate these.
97cachegrind-task() {
98 local out_dir=$1 # output
99 local job_id=$2
100 local host_name=$3
101 local unused2=$4
102 local sh_path=$5
103 local shell_hash=$6
104 local script_path=$7
105
106 echo "--- CACHEGRIND $sh_path $script_path ---"
107
108 local host_job_id="$host_name.$job_id"
109
110 # NOTE: This has to match the path that the header was written to
111 local times_out="$out_dir/$host_job_id.cachegrind.tsv"
112
113 local cachegrind_out_dir="$host_job_id.cachegrind"
114 mkdir -p $out_dir/$cachegrind_out_dir
115
116 local shell_name
117 case $sh_path in
118 _bin/*/mycpp-souffle/*)
119 shell_name=osh-souffle
120 ;;
121 *)
122 shell_name=$(basename $sh_path)
123 ;;
124 esac
125
126 local script_name
127 script_name=$(basename $script_path)
128
129 # RELATIVE PATH
130 local cachegrind_out_path="${cachegrind_out_dir}/${shell_name}-${shell_hash}__${script_name}.txt"
131
132 # Can't use array because of set -u bug!!! Only fixed in bash 4.4.
133 extra_args=''
134 case "$shell_name" in
135 osh*|oils-for-unix.*)
136 extra_args="--ast-format none"
137 ;;
138 esac
139
140 benchmarks/time_.py \
141 --tsv \
142 --append \
143 --output $times_out \
144 --rusage \
145 --field "$shell_name" --field "$shell_hash" \
146 --field "$script_path" \
147 --field $cachegrind_out_path \
148 -- \
149 $0 with-cachegrind $out_dir/$cachegrind_out_path \
150 "$sh_path" -n $extra_args "$script_path" || echo FAILED
151}
152
153# For each shell, print 10 script paths.
154print-tasks() {
155 local provenance=$1
156 shift
157 # rest are shells
158
159 # Add 1 field for each of 5 fields.
160 cat $provenance | filter-provenance "$@" |
161 while read fields; do
162 if test -n "${QUICKLY:-}"; then
163 # Quick test
164 head -n 2 $SORTED | xargs -n 1 -- echo "$fields"
165 else
166 cat $SORTED | xargs -n 1 -- echo "$fields"
167 fi
168 done
169}
170
171cachegrind-parse-configure-coreutils() {
172 ### Similar to benchmarks/gc, benchmarks/uftrace
173
174 local bin=_bin/cxx-opt/oils-for-unix
175 ninja $bin
176 local out=_tmp/parse.configure-coreutils.txt
177
178 local -a cmd=(
179 $bin --ast-format none -n
180 benchmarks/testdata/configure-coreutils )
181
182 time "${cmd[@]}"
183
184 time cachegrind $out "${cmd[@]}"
185
186 echo
187 cat $out
188}
189
190cachegrind-demo() {
191 #local sh=bash
192 local sh=zsh
193
194 local out_dir=_tmp/cachegrind
195
196 mkdir -p $out_dir
197
198 # notes:
199 # - not passing --trace-children (follow execvpe)
200 # - passing --xml=yes gives error: cachegrind doesn't support XML
201 # - there is a log out and a details out
202
203 valgrind --tool=cachegrind \
204 --log-file=$out_dir/log.txt \
205 --cachegrind-out-file=$out_dir/details.txt \
206 -- $sh -c 'echo hi'
207
208 echo
209 head -n 20 $out_dir/*.txt
210}
211
212readonly NUM_TASK_COLS=6 # input columns: 5 from provenance, 1 for file
213
214# Figure out all tasks to run, and run them. When called from auto.sh, $2
215# should be the ../benchmarks-data repo.
216measure() {
217 local provenance=$1
218 local host_job_id=$2
219 local out_dir=${3:-$BASE_DIR/raw}
220 shift 3
221 local -a osh_cpp=( "${@:-$OSH_CPP_TWO}" )
222
223 local times_out="$out_dir/$host_job_id.times.csv"
224 local lines_out="$out_dir/$host_job_id.lines.csv"
225
226 mkdir -p $BASE_DIR/{tmp,raw,stage1} $out_dir
227
228 # Files that we should measure. Exploded into tasks.
229 write-sorted-manifest '' $lines_out
230
231 # Write Header of the CSV file that is appended to.
232 # TODO: TSV
233 benchmarks/time_.py --print-header \
234 --rusage \
235 --field host_name --field host_hash \
236 --field shell_name --field shell_hash \
237 --field path \
238 > $times_out
239
240 local tasks=$BASE_DIR/tasks.txt
241 print-tasks $provenance "${SHELLS[@]}" "${osh_cpp[@]}" > $tasks
242
243 # Run them all
244 cat $tasks | xargs -n $NUM_TASK_COLS -- $0 parser-task $out_dir
245}
246
247measure-cachegrind() {
248 local provenance=$1
249 local host_job_id=$2
250 local out_dir=${3:-$BASE_DIR/raw}
251 shift 3
252 local -a osh_cpp=( "${@:-$OSH_CPP_TWO}" )
253
254 local cachegrind_tsv="$out_dir/$host_job_id.cachegrind.tsv"
255 local lines_out="$out_dir/$host_job_id.lines.tsv"
256
257 mkdir -p $BASE_DIR/{tmp,raw,stage1} $out_dir
258
259 write-sorted-manifest '' $lines_out $'\t' # TSV
260
261 # TODO: This header is fragile. Every task should print its own file with a
262 # header, and then we can run them in parallel, and join them with
263 # devtools/csv_concat.py
264
265 benchmarks/time_.py --tsv --print-header \
266 --rusage \
267 --field shell_name --field shell_hash \
268 --field path \
269 --field cachegrind_out_path \
270 > $cachegrind_tsv
271
272 local ctasks=$BASE_DIR/cachegrind-tasks.txt
273
274 # zsh weirdly forks during zsh -n, which complicates our cachegrind
275 # measurement. So just ignore it. (This can be seen with
276 # strace -e fork -f -- zsh -n $file)
277 print-tasks $provenance bash dash mksh "${osh_cpp[@]}" > $ctasks
278
279 cat $ctasks | xargs -n $NUM_TASK_COLS -- $0 cachegrind-task $out_dir
280}
281
282#
283# Data Preparation and Analysis
284#
285
286stage1-cachegrind() {
287 local raw_dir=$1
288 local single_machine=$2
289 local out_dir=$3
290 local raw_data_csv=$4
291
292 local maybe_host
293 if test -n "$single_machine"; then
294 # CI: _tmp/osh-parser/raw.no-host.$job_id
295 maybe_host='no-host'
296 else
297 # release: ../benchmark-data/osh-parser/raw.lenny.$job_id
298 #maybe_host=$(hostname)
299 maybe_host=$MACHINE1 # lenny
300 fi
301
302 # Only runs on one machine
303 local -a sorted=( $raw_dir/$maybe_host.*.cachegrind.tsv )
304 local tsv_in=${sorted[-1]} # latest one
305
306 devtools/tsv_column_from_files.py \
307 --new-column irefs \
308 --path-column cachegrind_out_path \
309 --extract-group-1 'I[ ]*refs:[ ]*([\d,]+)' \
310 --remove-commas \
311 $tsv_in > $out_dir/cachegrind.tsv
312
313 echo $tsv_in >> $raw_data_csv
314}
315
316stage1() {
317 local raw_dir=${1:-$BASE_DIR/raw}
318 local single_machine=${2:-}
319
320 local out=$BASE_DIR/stage1
321 mkdir -p $out
322
323 # Construct a one-column CSV file
324 local raw_data_csv=$out/raw-data.csv
325 echo 'path' > $raw_data_csv
326
327 stage1-cachegrind $raw_dir "$single_machine" $out $raw_data_csv
328
329 local lines_csv=$out/lines.csv
330
331 local -a raw=()
332 if test -n "$single_machine"; then
333 local -a a=($raw_dir/$single_machine.*.times.csv)
334 raw+=( ${a[-1]} )
335 echo ${a[-1]} >> $raw_data_csv
336
337 # They are the same, output one of them.
338 cat $raw_dir/$single_machine.*.lines.csv > $lines_csv
339 else
340 # Globs are in lexicographical order, which works for our dates.
341 local -a a=($raw_dir/$MACHINE1.*.times.csv)
342 local -a b=($raw_dir/$MACHINE2.*.times.csv)
343
344 raw+=( ${a[-1]} ${b[-1]} )
345 {
346 echo ${a[-1]}
347 echo ${b[-1]}
348 } >> $raw_data_csv
349
350
351 # Verify that the files are equal, and pass one of them.
352 local -a c=($raw_dir/$MACHINE1.*.lines.csv)
353 local -a d=($raw_dir/$MACHINE2.*.lines.csv)
354
355 local left=${c[-1]}
356 local right=${d[-1]}
357
358 if ! diff $left $right; then
359 die "Benchmarks were run on different files ($left != $right)"
360 fi
361
362 # They are the same, output one of them.
363 cat $left > $lines_csv
364 fi
365
366 local times_csv=$out/times.csv
367 csv-concat "${raw[@]}" > $times_csv
368
369 head $out/*
370 wc -l $out/*
371}
372
373# TODO:
374# - maybe rowspan for hosts: flanders/lenny
375# - does that interfere with sorting?
376#
377# NOTE: not bothering to make it sortable now. Just using the CSS.
378
379print-report() {
380 local in_dir=$1
381
382 benchmark-html-head 'OSH Parser Performance'
383
384 cat <<EOF
385 <body class="width60">
386 <p id="home-link">
387 <a href="/">oils.pub</a>
388 </p>
389EOF
390
391 cmark <<'EOF'
392## OSH Parser Performance
393
394We time `$sh -n $file` for various files under various shells, and repeat then
395run under cachegrind for stable metrics.
396
397Source code: [oils/benchmarks/osh-parser.sh](https://github.com/oils-for-unix/oils/tree/master/benchmarks/osh-parser.sh)
398
399[Raw files](-wwz-index)
400
401### Summary
402
403#### Instructions Per Line (via cachegrind)
404
405Lower numbers are generally better, but each shell recognizes a different
406language, and OSH uses a more thorough parsing algorithm. In **thousands** of
407"I refs".
408
409EOF
410 tsv2html $in_dir/cachegrind_summary.tsv
411
412 cmark <<'EOF'
413
414(zsh isn't measured because `zsh -n` unexpectedly forks.)
415
416#### Average Parsing Rate, Measured on Two Machines (lines/ms)
417
418Shell startup time is included in the elapsed time measurements, but long files
419are chosen to minimize its effect.
420EOF
421 csv2html $in_dir/summary.csv
422
423 cmark <<< '### Per-File Measurements'
424 echo
425
426 # Flat tables for CI
427 if test -f $in_dir/times_flat.tsv; then
428 cmark <<< '#### Time and Memory'
429 echo
430
431 tsv2html $in_dir/times_flat.tsv
432 fi
433 if test -f $in_dir/cachegrind_flat.tsv; then
434 cmark <<< '#### Instruction Counts'
435 echo
436
437 tsv2html $in_dir/cachegrind_flat.tsv
438 fi
439
440 # Breakdowns for release
441 if test -f $in_dir/instructions.tsv; then
442 cmark <<< '#### Instructions Per Line (in thousands)'
443 echo
444 tsv2html $in_dir/instructions.tsv
445 fi
446
447 if test -f $in_dir/elapsed.csv; then
448 cmark <<< '#### Elapsed Time (milliseconds)'
449 echo
450 csv2html $in_dir/elapsed.csv
451 fi
452
453 if test -f $in_dir/rate.csv; then
454 cmark <<< '#### Parsing Rate (lines/ms)'
455 echo
456 csv2html $in_dir/rate.csv
457 fi
458
459 if test -f $in_dir/max_rss.csv; then
460 cmark <<'EOF'
461### Memory Usage (Max Resident Set Size in MB)
462
463Again, OSH uses a **different algorithm** (and language) than POSIX shells. It
464builds an AST in memory rather than just validating the code line-by-line.
465
466EOF
467 csv2html $in_dir/max_rss.csv
468 fi
469
470 cmark <<EOF
471### Shell and Host Details
472EOF
473 csv2html $in_dir/shells.csv
474 csv2html $in_dir/hosts.csv
475
476 cmark <<EOF
477### Raw Data
478EOF
479 csv2html $in_dir/raw-data.csv
480
481 cmark << 'EOF'
482
483 </body>
484</html>
485EOF
486}
487
488soil-run() {
489 ### Run it on just this machine, and make a report
490
491 rm -r -f $BASE_DIR
492 mkdir -p $BASE_DIR
493
494 # The three things built
495 local -a osh_bin=(
496 $OSH_CPP_SOIL
497 $OSH_SOUFFLE_CPP_SOIL
498 )
499
500 local single_machine='no-host'
501
502 local job_id
503 job_id=$(benchmarks/id.sh print-job-id)
504
505 benchmarks/id.sh shell-provenance-2 \
506 $single_machine $job_id _tmp \
507 bash dash bin/osh "${osh_bin[@]}"
508
509 # TODO: measure* should use print-tasks | run-tasks
510 local provenance=_tmp/provenance.txt
511 local host_job_id="$single_machine.$job_id"
512
513 measure $provenance $host_job_id '' "${osh_bin[@]}"
514
515 measure-cachegrind $provenance $host_job_id '' "${osh_bin[@]}"
516
517 # TODO: R can use this TSV file
518 cp -v _tmp/provenance.tsv $BASE_DIR/stage1/provenance.tsv
519
520 # Trivial concatenation for 1 machine
521 stage1 '' $single_machine
522
523 benchmarks/report.sh stage2 $BASE_DIR
524
525 benchmarks/report.sh stage3 $BASE_DIR
526}
527
528"$@"