1#!/usr/bin/env bash
2# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
3# REQUIRE: benchmark_leveldb.sh exists in the current directory
4# After execution of this script, log files are generated in $output_dir.
5# report.txt provides a high level statistics
6#
7# This should be used with the LevelDB fork listed here to use additional test options.
8# For more details on the changes see the blog post listed below.
9#   https://github.com/mdcallag/leveldb-1
10#   http://smalldatum.blogspot.com/2015/04/comparing-leveldb-and-rocksdb-take-2.html
11#
12# This should be run from the parent of the tools directory. The command line is:
13#   [$env_vars] tools/run_flash_bench.sh [list-of-threads]
14#
15# This runs a sequence of tests in the following sequence:
16#   step 1) load - bulkload, compact, fillseq, overwrite
17#   step 2) read-only for each number of threads
18#   step 3) read-write for each number of threads
19#
20# The list of threads is optional and when not set is equivalent to "24".
21# Were list-of-threads specified as "1 2 4" then the tests in steps 2, 3 and
22# 4 above would be repeated for 1, 2 and 4 threads. The tests in step 1 are
23# only run for 1 thread.
24
25# Test output is written to $OUTPUT_DIR, currently /tmp/output. The performance
26# summary is in $OUTPUT_DIR/report.txt. There is one file in $OUTPUT_DIR per
27# test and the tests are listed below.
28#
29# The environment variables are also optional. The variables are:
30#   NKEYS         - number of key/value pairs to load
31#   NWRITESPERSEC - the writes/second rate limit for the *whilewriting* tests.
32#                   If this is too large then the non-writer threads can get
33#                   starved.
34#   VAL_SIZE      - the length of the value in the key/value pairs loaded.
35#                   You can estimate the size of the test database from this,
36#                   NKEYS and the compression rate (--compression_ratio) set
37#                   in tools/benchmark_leveldb.sh
38#   BLOCK_LENGTH  - value for db_bench --block_size
39#   CACHE_BYTES   - the size of the RocksDB block cache in bytes
40#   DATA_DIR      - directory in which to create database files
41#   DO_SETUP      - when set to 0 then a backup of the database is copied from
42#                   $DATA_DIR.bak to $DATA_DIR and the load tests from step 1
43#                   This allows tests from steps 2, 3 to be repeated faster.
44#   SAVE_SETUP    - saves a copy of the database at the end of step 1 to
45#                   $DATA_DIR.bak.
46
47# Size constants
48K=1024
49M=$((1024 * K))
50G=$((1024 * M))
51
52num_keys=${NKEYS:-$((1 * G))}
53wps=${NWRITESPERSEC:-$((10 * K))}
54vs=${VAL_SIZE:-400}
55cs=${CACHE_BYTES:-$(( 1 * G ))}
56bs=${BLOCK_LENGTH:-4096}
57
58# If no command line arguments then run for 24 threads.
59if [[ $# -eq 0 ]]; then
60  nthreads=( 24 )
61else
62  nthreads=( "$@" )
63fi
64
65for num_thr in "${nthreads[@]}" ; do
66  echo Will run for $num_thr threads
67done
68
69# Update these parameters before execution !!!
70db_dir=${DATA_DIR:-"/tmp/rocksdb/"}
71
72do_setup=${DO_SETUP:-1}
73save_setup=${SAVE_SETUP:-0}
74
75output_dir="${TMPDIR:-/tmp}/output"
76
77ARGS="\
78OUTPUT_DIR=$output_dir \
79NUM_KEYS=$num_keys \
80DB_DIR=$db_dir \
81VALUE_SIZE=$vs \
82BLOCK_SIZE=$bs \
83CACHE_SIZE=$cs"
84
85mkdir -p $output_dir
86echo -e "ops/sec\tmb/sec\tusec/op\tavg\tp50\tTest" \
87  > $output_dir/report.txt
88
89# Notes on test sequence:
90#   step 1) Setup database via sequential fill followed by overwrite to fragment it.
91#           Done without setting DURATION to make sure that overwrite does $num_keys writes
92#   step 2) read-only tests for all levels of concurrency requested
93#   step 3) non read-only tests for all levels of concurrency requested
94
95###### Setup the database
96
97if [[ $do_setup != 0 ]]; then
98  echo Doing setup
99
100  # Test 2a: sequential fill with large values to get peak ingest
101  #          adjust NUM_KEYS given the use of larger values
102  env $ARGS BLOCK_SIZE=$((1 * M)) VALUE_SIZE=$((32 * K)) NUM_KEYS=$(( num_keys / 64 )) \
103       ./tools/benchmark_leveldb.sh fillseq
104
105  # Test 2b: sequential fill with the configured value size
106  env $ARGS ./tools/benchmark_leveldb.sh fillseq
107
108  # Test 3: single-threaded overwrite
109  env $ARGS NUM_THREADS=1 DB_BENCH_NO_SYNC=1 ./tools/benchmark_leveldb.sh overwrite
110
111else
112  echo Restoring from backup
113
114  rm -rf $db_dir
115
116  if [ ! -d ${db_dir}.bak ]; then
117    echo Database backup does not exist at ${db_dir}.bak
118    exit -1
119  fi
120
121  echo Restore database from ${db_dir}.bak
122  cp -p -r ${db_dir}.bak $db_dir
123fi
124
125if [[ $save_setup != 0 ]]; then
126  echo Save database to ${db_dir}.bak
127  cp -p -r $db_dir ${db_dir}.bak
128fi
129
130###### Read-only tests
131
132for num_thr in "${nthreads[@]}" ; do
133  # Test 4: random read
134  env $ARGS NUM_THREADS=$num_thr ./tools/benchmark_leveldb.sh readrandom
135
136done
137
138###### Non read-only tests
139
140for num_thr in "${nthreads[@]}" ; do
141  # Test 7: overwrite with sync=0
142  env $ARGS NUM_THREADS=$num_thr DB_BENCH_NO_SYNC=1 \
143    ./tools/benchmark_leveldb.sh overwrite
144
145  # Test 8: overwrite with sync=1
146  # Not run for now because LevelDB db_bench doesn't have an option to limit the
147  # test run to X seconds and doing sync-per-commit for --num can take too long.
148  # env $ARGS NUM_THREADS=$num_thr ./tools/benchmark_leveldb.sh overwrite
149
150  # Test 11: random read while writing
151  env $ARGS NUM_THREADS=$num_thr WRITES_PER_SECOND=$wps \
152    ./tools/benchmark_leveldb.sh readwhilewriting
153
154done
155
156echo bulkload > $output_dir/report2.txt
157head -1 $output_dir/report.txt >> $output_dir/report2.txt
158grep bulkload $output_dir/report.txt >> $output_dir/report2.txt
159echo fillseq >> $output_dir/report2.txt
160head -1 $output_dir/report.txt >> $output_dir/report2.txt
161grep fillseq $output_dir/report.txt >> $output_dir/report2.txt
162echo overwrite sync=0 >> $output_dir/report2.txt
163head -1 $output_dir/report.txt >> $output_dir/report2.txt
164grep overwrite $output_dir/report.txt | grep \.s0  >> $output_dir/report2.txt
165echo overwrite sync=1 >> $output_dir/report2.txt
166head -1 $output_dir/report.txt >> $output_dir/report2.txt
167grep overwrite $output_dir/report.txt | grep \.s1  >> $output_dir/report2.txt
168echo readrandom >> $output_dir/report2.txt
169head -1 $output_dir/report.txt >> $output_dir/report2.txt
170grep readrandom $output_dir/report.txt  >> $output_dir/report2.txt
171echo readwhile >> $output_dir/report2.txt >> $output_dir/report2.txt
172head -1 $output_dir/report.txt >> $output_dir/report2.txt
173grep readwhilewriting $output_dir/report.txt >> $output_dir/report2.txt
174
175cat $output_dir/report2.txt
176