1#!/usr/bin/env bash
2# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
3
4set -e
5
6NUM=10000000
7
8if [ $# -eq 1 ];then
9  DATA_DIR=$1
10elif [ $# -eq 2 ];then
11  DATA_DIR=$1
12  STAT_FILE=$2
13fi
14
15# On the production build servers, set data and stat
16# files/directories not in /tmp or else the tempdir cleaning
17# scripts will make you very unhappy.
18DATA_DIR=${DATA_DIR:-$(mktemp -t -d rocksdb_XXXX)}
19STAT_FILE=${STAT_FILE:-$(mktemp -t -u rocksdb_test_stats_XXXX)}
20
21function cleanup {
22  rm -rf $DATA_DIR
23  rm -f $STAT_FILE.fillseq
24  rm -f $STAT_FILE.readrandom
25  rm -f $STAT_FILE.overwrite
26  rm -f $STAT_FILE.memtablefillreadrandom
27}
28
29trap cleanup EXIT
30
31if [ -z $GIT_BRANCH ]; then
32  git_br=`git rev-parse --abbrev-ref HEAD`
33else
34  git_br=$(basename $GIT_BRANCH)
35fi
36
37if [ $git_br == "master" ]; then
38  git_br=""
39else
40  git_br="."$git_br
41fi
42
43make release
44
45# measure fillseq + fill up the DB for overwrite benchmark
46./db_bench \
47    --benchmarks=fillseq \
48    --db=$DATA_DIR \
49    --use_existing_db=0 \
50    --bloom_bits=10 \
51    --num=$NUM \
52    --writes=$NUM \
53    --cache_size=6442450944 \
54    --cache_numshardbits=6 \
55    --table_cache_numshardbits=4 \
56    --open_files=55000 \
57    --statistics=1 \
58    --histogram=1 \
59    --disable_wal=1 \
60    --sync=0  > ${STAT_FILE}.fillseq
61
62# measure overwrite performance
63./db_bench \
64    --benchmarks=overwrite \
65    --db=$DATA_DIR \
66    --use_existing_db=1 \
67    --bloom_bits=10 \
68    --num=$NUM \
69    --writes=$((NUM / 10)) \
70    --cache_size=6442450944 \
71    --cache_numshardbits=6  \
72    --table_cache_numshardbits=4 \
73    --open_files=55000 \
74    --statistics=1 \
75    --histogram=1 \
76    --disable_wal=1 \
77    --sync=0 \
78    --threads=8 > ${STAT_FILE}.overwrite
79
80# fill up the db for readrandom benchmark (1GB total size)
81./db_bench \
82    --benchmarks=fillseq \
83    --db=$DATA_DIR \
84    --use_existing_db=0 \
85    --bloom_bits=10 \
86    --num=$NUM \
87    --writes=$NUM \
88    --cache_size=6442450944 \
89    --cache_numshardbits=6 \
90    --table_cache_numshardbits=4 \
91    --open_files=55000 \
92    --statistics=1 \
93    --histogram=1 \
94    --disable_wal=1 \
95    --sync=0 \
96    --threads=1 > /dev/null
97
98# measure readrandom with 6GB block cache
99./db_bench \
100    --benchmarks=readrandom \
101    --db=$DATA_DIR \
102    --use_existing_db=1 \
103    --bloom_bits=10 \
104    --num=$NUM \
105    --reads=$((NUM / 5)) \
106    --cache_size=6442450944 \
107    --cache_numshardbits=6 \
108    --table_cache_numshardbits=4 \
109    --open_files=55000 \
110    --statistics=1 \
111    --histogram=1 \
112    --disable_wal=1 \
113    --sync=0 \
114    --threads=16 > ${STAT_FILE}.readrandom
115
116# measure readrandom with 6GB block cache and tailing iterator
117./db_bench \
118    --benchmarks=readrandom \
119    --db=$DATA_DIR \
120    --use_existing_db=1 \
121    --bloom_bits=10 \
122    --num=$NUM \
123    --reads=$((NUM / 5)) \
124    --cache_size=6442450944 \
125    --cache_numshardbits=6 \
126    --table_cache_numshardbits=4 \
127    --open_files=55000 \
128    --use_tailing_iterator=1 \
129    --statistics=1 \
130    --histogram=1 \
131    --disable_wal=1 \
132    --sync=0 \
133    --threads=16 > ${STAT_FILE}.readrandomtailing
134
135# measure readrandom with 100MB block cache
136./db_bench \
137    --benchmarks=readrandom \
138    --db=$DATA_DIR \
139    --use_existing_db=1 \
140    --bloom_bits=10 \
141    --num=$NUM \
142    --reads=$((NUM / 5)) \
143    --cache_size=104857600 \
144    --cache_numshardbits=6 \
145    --table_cache_numshardbits=4 \
146    --open_files=55000 \
147    --statistics=1 \
148    --histogram=1 \
149    --disable_wal=1 \
150    --sync=0 \
151    --threads=16 > ${STAT_FILE}.readrandomsmallblockcache
152
153# measure readrandom with 8k data in memtable
154./db_bench \
155    --benchmarks=overwrite,readrandom \
156    --db=$DATA_DIR \
157    --use_existing_db=1 \
158    --bloom_bits=10 \
159    --num=$NUM \
160    --reads=$((NUM / 5)) \
161    --writes=512 \
162    --cache_size=6442450944 \
163    --cache_numshardbits=6 \
164    --table_cache_numshardbits=4 \
165    --write_buffer_size=1000000000 \
166    --open_files=55000 \
167    --statistics=1 \
168    --histogram=1 \
169    --disable_wal=1 \
170    --sync=0 \
171    --threads=16 > ${STAT_FILE}.readrandom_mem_sst
172
173
174# fill up the db for readrandom benchmark with filluniquerandom (1GB total size)
175./db_bench \
176    --benchmarks=filluniquerandom \
177    --db=$DATA_DIR \
178    --use_existing_db=0 \
179    --bloom_bits=10 \
180    --num=$((NUM / 4)) \
181    --writes=$((NUM / 4)) \
182    --cache_size=6442450944 \
183    --cache_numshardbits=6 \
184    --table_cache_numshardbits=4 \
185    --open_files=55000 \
186    --statistics=1 \
187    --histogram=1 \
188    --disable_wal=1 \
189    --sync=0 \
190    --threads=1 > /dev/null
191
192# dummy test just to compact the data
193./db_bench \
194    --benchmarks=readrandom \
195    --db=$DATA_DIR \
196    --use_existing_db=1 \
197    --bloom_bits=10 \
198    --num=$((NUM / 1000)) \
199    --reads=$((NUM / 1000)) \
200    --cache_size=6442450944 \
201    --cache_numshardbits=6 \
202    --table_cache_numshardbits=4 \
203    --open_files=55000 \
204    --statistics=1 \
205    --histogram=1 \
206    --disable_wal=1 \
207    --sync=0 \
208    --threads=16 > /dev/null
209
210# measure readrandom after load with filluniquerandom with 6GB block cache
211./db_bench \
212    --benchmarks=readrandom \
213    --db=$DATA_DIR \
214    --use_existing_db=1 \
215    --bloom_bits=10 \
216    --num=$((NUM / 4)) \
217    --reads=$((NUM / 4)) \
218    --cache_size=6442450944 \
219    --cache_numshardbits=6 \
220    --table_cache_numshardbits=4 \
221    --open_files=55000 \
222    --disable_auto_compactions=1 \
223    --statistics=1 \
224    --histogram=1 \
225    --disable_wal=1 \
226    --sync=0 \
227    --threads=16 > ${STAT_FILE}.readrandom_filluniquerandom
228
229# measure readwhilewriting after load with filluniquerandom with 6GB block cache
230./db_bench \
231    --benchmarks=readwhilewriting \
232    --db=$DATA_DIR \
233    --use_existing_db=1 \
234    --bloom_bits=10 \
235    --num=$((NUM / 4)) \
236    --reads=$((NUM / 4)) \
237    --benchmark_write_rate_limit=$(( 110 * 1024 )) \
238    --write_buffer_size=100000000 \
239    --cache_size=6442450944 \
240    --cache_numshardbits=6 \
241    --table_cache_numshardbits=4 \
242    --open_files=55000 \
243    --statistics=1 \
244    --histogram=1 \
245    --disable_wal=1 \
246    --sync=0 \
247    --threads=16 > ${STAT_FILE}.readwhilewriting
248
249# measure memtable performance -- none of the data gets flushed to disk
250./db_bench \
251    --benchmarks=fillrandom,readrandom, \
252    --db=$DATA_DIR \
253    --use_existing_db=0 \
254    --num=$((NUM / 10)) \
255    --reads=$NUM \
256    --cache_size=6442450944 \
257    --cache_numshardbits=6 \
258    --table_cache_numshardbits=4 \
259    --write_buffer_size=1000000000 \
260    --open_files=55000 \
261    --statistics=1 \
262    --histogram=1 \
263    --disable_wal=1 \
264    --sync=0 \
265    --value_size=10 \
266    --threads=16 > ${STAT_FILE}.memtablefillreadrandom
267
268common_in_mem_args="--db=/dev/shm/rocksdb \
269    --num_levels=6 \
270    --key_size=20 \
271    --prefix_size=12 \
272    --keys_per_prefix=10 \
273    --value_size=100 \
274    --compression_type=none \
275    --compression_ratio=1 \
276    --hard_rate_limit=2 \
277    --write_buffer_size=134217728 \
278    --max_write_buffer_number=4 \
279    --level0_file_num_compaction_trigger=8 \
280    --level0_slowdown_writes_trigger=16 \
281    --level0_stop_writes_trigger=24 \
282    --target_file_size_base=134217728 \
283    --max_bytes_for_level_base=1073741824 \
284    --disable_wal=0 \
285    --wal_dir=/dev/shm/rocksdb \
286    --sync=0 \
287    --verify_checksum=1 \
288    --delete_obsolete_files_period_micros=314572800 \
289    --max_grandparent_overlap_factor=10 \
290    --use_plain_table=1 \
291    --open_files=-1 \
292    --mmap_read=1 \
293    --mmap_write=0 \
294    --memtablerep=prefix_hash \
295    --bloom_bits=10 \
296    --bloom_locality=1 \
297    --perf_level=0"
298
299# prepare a in-memory DB with 50M keys, total DB size is ~6G
300./db_bench \
301    $common_in_mem_args \
302    --statistics=0 \
303    --max_background_compactions=16 \
304    --max_background_flushes=16 \
305    --benchmarks=filluniquerandom \
306    --use_existing_db=0 \
307    --num=52428800 \
308    --threads=1 > /dev/null
309
310# Readwhilewriting
311./db_bench \
312    $common_in_mem_args \
313    --statistics=1 \
314    --max_background_compactions=4 \
315    --max_background_flushes=0 \
316    --benchmarks=readwhilewriting\
317    --use_existing_db=1 \
318    --duration=600 \
319    --threads=32 \
320    --benchmark_write_rate_limit=9502720 > ${STAT_FILE}.readwhilewriting_in_ram
321
322# Seekrandomwhilewriting
323./db_bench \
324    $common_in_mem_args \
325    --statistics=1 \
326    --max_background_compactions=4 \
327    --max_background_flushes=0 \
328    --benchmarks=seekrandomwhilewriting \
329    --use_existing_db=1 \
330    --use_tailing_iterator=1 \
331    --duration=600 \
332    --threads=32 \
333    --benchmark_write_rate_limit=9502720 > ${STAT_FILE}.seekwhilewriting_in_ram
334
335# measure fillseq with bunch of column families
336./db_bench \
337    --benchmarks=fillseq \
338    --num_column_families=500 \
339    --write_buffer_size=1048576 \
340    --db=$DATA_DIR \
341    --use_existing_db=0 \
342    --num=$NUM \
343    --writes=$NUM \
344    --open_files=55000 \
345    --statistics=1 \
346    --histogram=1 \
347    --disable_wal=1 \
348    --sync=0  > ${STAT_FILE}.fillseq_lots_column_families
349
350# measure overwrite performance with bunch of column families
351./db_bench \
352    --benchmarks=overwrite \
353    --num_column_families=500 \
354    --write_buffer_size=1048576 \
355    --db=$DATA_DIR \
356    --use_existing_db=1 \
357    --num=$NUM \
358    --writes=$((NUM / 10)) \
359    --open_files=55000 \
360    --statistics=1 \
361    --histogram=1 \
362    --disable_wal=1 \
363    --sync=0 \
364    --threads=8 > ${STAT_FILE}.overwrite_lots_column_families
365
366# send data to ods
367function send_to_ods {
368  key="$1"
369  value="$2"
370
371  if [ -z $JENKINS_HOME ]; then
372    # running on devbox, just print out the values
373    echo $1 $2
374    return
375  fi
376
377  if [ -z "$value" ];then
378    echo >&2 "ERROR: Key $key doesn't have a value."
379    return
380  fi
381  curl --silent "https://www.intern.facebook.com/intern/agent/ods_set.php?entity=rocksdb_build$git_br&key=$key&value=$value" \
382    --connect-timeout 60
383}
384
385function send_benchmark_to_ods {
386  bench="$1"
387  bench_key="$2"
388  file="$3"
389
390  QPS=$(grep $bench $file | awk '{print $5}')
391  P50_MICROS=$(grep $bench $file -A 6 | grep "Percentiles" | awk '{print $3}' )
392  P75_MICROS=$(grep $bench $file -A 6 | grep "Percentiles" | awk '{print $5}' )
393  P99_MICROS=$(grep $bench $file -A 6 | grep "Percentiles" | awk '{print $7}' )
394
395  send_to_ods rocksdb.build.$bench_key.qps $QPS
396  send_to_ods rocksdb.build.$bench_key.p50_micros $P50_MICROS
397  send_to_ods rocksdb.build.$bench_key.p75_micros $P75_MICROS
398  send_to_ods rocksdb.build.$bench_key.p99_micros $P99_MICROS
399}
400
401send_benchmark_to_ods overwrite overwrite $STAT_FILE.overwrite
402send_benchmark_to_ods fillseq fillseq $STAT_FILE.fillseq
403send_benchmark_to_ods readrandom readrandom $STAT_FILE.readrandom
404send_benchmark_to_ods readrandom readrandom_tailing $STAT_FILE.readrandomtailing
405send_benchmark_to_ods readrandom readrandom_smallblockcache $STAT_FILE.readrandomsmallblockcache
406send_benchmark_to_ods readrandom readrandom_memtable_sst $STAT_FILE.readrandom_mem_sst
407send_benchmark_to_ods readrandom readrandom_fillunique_random $STAT_FILE.readrandom_filluniquerandom
408send_benchmark_to_ods fillrandom memtablefillrandom $STAT_FILE.memtablefillreadrandom
409send_benchmark_to_ods readrandom memtablereadrandom $STAT_FILE.memtablefillreadrandom
410send_benchmark_to_ods readwhilewriting readwhilewriting $STAT_FILE.readwhilewriting
411send_benchmark_to_ods readwhilewriting readwhilewriting_in_ram ${STAT_FILE}.readwhilewriting_in_ram
412send_benchmark_to_ods seekrandomwhilewriting seekwhilewriting_in_ram ${STAT_FILE}.seekwhilewriting_in_ram
413send_benchmark_to_ods fillseq fillseq_lots_column_families ${STAT_FILE}.fillseq_lots_column_families
414send_benchmark_to_ods overwrite overwrite_lots_column_families ${STAT_FILE}.overwrite_lots_column_families
415