14 # On the production build servers, set data and stat
15 # files/directories not in /tmp or else the tempdir cleaning
16 # scripts will make you very unhappy.
17 DATA_DIR
=${DATA_DIR:-$(mktemp -t -d rocksdb_XXXX)}
18 STAT_FILE
=${STAT_FILE:-$(mktemp -t -u rocksdb_test_stats_XXXX)}
22 rm -f $STAT_FILE.fillseq
23 rm -f $STAT_FILE.readrandom
24 rm -f $STAT_FILE.overwrite
25 rm -f $STAT_FILE.memtablefillreadrandom
30 if [ -z $GIT_BRANCH ]; then
31 git_br
=`git rev-parse --abbrev-ref HEAD`
33 git_br
=$
(basename $GIT_BRANCH)
36 if [ $git_br == "master" ]; then
44 # measure fillseq + fill up the DB for overwrite benchmark
46 --benchmarks=fillseq \
52 --cache_size=6442450944 \
53 --cache_numshardbits=6 \
54 --table_cache_numshardbits=4 \
59 --sync=0 > ${STAT_FILE}.fillseq
61 # measure overwrite performance
63 --benchmarks=overwrite \
68 --writes=$
((NUM
/ 10)) \
69 --cache_size=6442450944 \
70 --cache_numshardbits=6 \
71 --table_cache_numshardbits=4 \
77 --threads=8 > ${STAT_FILE}.overwrite
79 # fill up the db for readrandom benchmark (1GB total size)
81 --benchmarks=fillseq \
87 --cache_size=6442450944 \
88 --cache_numshardbits=6 \
89 --table_cache_numshardbits=4 \
95 --threads=1 > /dev
/null
97 # measure readrandom with 6GB block cache
99 --benchmarks=readrandom \
101 --use_existing_db=1 \
104 --reads=$
((NUM
/ 5)) \
105 --cache_size=6442450944 \
106 --cache_numshardbits=6 \
107 --table_cache_numshardbits=4 \
113 --threads=16 > ${STAT_FILE}.readrandom
115 # measure readrandom with 6GB block cache and tailing iterator
117 --benchmarks=readrandom \
119 --use_existing_db=1 \
122 --reads=$
((NUM
/ 5)) \
123 --cache_size=6442450944 \
124 --cache_numshardbits=6 \
125 --table_cache_numshardbits=4 \
127 --use_tailing_iterator=1 \
132 --threads=16 > ${STAT_FILE}.readrandomtailing
134 # measure readrandom with 100MB block cache
136 --benchmarks=readrandom \
138 --use_existing_db=1 \
141 --reads=$
((NUM
/ 5)) \
142 --cache_size=104857600 \
143 --cache_numshardbits=6 \
144 --table_cache_numshardbits=4 \
150 --threads=16 > ${STAT_FILE}.readrandomsmallblockcache
152 # measure readrandom with 8k data in memtable
154 --benchmarks=overwrite
,readrandom \
156 --use_existing_db=1 \
159 --reads=$
((NUM
/ 5)) \
161 --cache_size=6442450944 \
162 --cache_numshardbits=6 \
163 --table_cache_numshardbits=4 \
164 --write_buffer_size=1000000000 \
170 --threads=16 > ${STAT_FILE}.readrandom_mem_sst
173 # fill up the db for readrandom benchmark with filluniquerandom (1GB total size)
175 --benchmarks=filluniquerandom \
177 --use_existing_db=0 \
180 --writes=$
((NUM
/ 4)) \
181 --cache_size=6442450944 \
182 --cache_numshardbits=6 \
183 --table_cache_numshardbits=4 \
189 --threads=1 > /dev
/null
191 # dummy test just to compact the data
193 --benchmarks=readrandom \
195 --use_existing_db=1 \
197 --num=$
((NUM
/ 1000)) \
198 --reads=$
((NUM
/ 1000)) \
199 --cache_size=6442450944 \
200 --cache_numshardbits=6 \
201 --table_cache_numshardbits=4 \
207 --threads=16 > /dev
/null
209 # measure readrandom after load with filluniquerandom with 6GB block cache
211 --benchmarks=readrandom \
213 --use_existing_db=1 \
216 --reads=$
((NUM
/ 4)) \
217 --cache_size=6442450944 \
218 --cache_numshardbits=6 \
219 --table_cache_numshardbits=4 \
221 --disable_auto_compactions=1 \
226 --threads=16 > ${STAT_FILE}.readrandom_filluniquerandom
228 # measure readwhilewriting after load with filluniquerandom with 6GB block cache
230 --benchmarks=readwhilewriting \
232 --use_existing_db=1 \
235 --reads=$
((NUM
/ 4)) \
236 --benchmark_write_rate_limit=$
(( 110 * 1024 )) \
237 --write_buffer_size=100000000 \
238 --cache_size=6442450944 \
239 --cache_numshardbits=6 \
240 --table_cache_numshardbits=4 \
246 --threads=16 > ${STAT_FILE}.readwhilewriting
248 # measure memtable performance -- none of the data gets flushed to disk
250 --benchmarks=fillrandom
,readrandom
, \
252 --use_existing_db=0 \
253 --num=$
((NUM
/ 10)) \
255 --cache_size=6442450944 \
256 --cache_numshardbits=6 \
257 --table_cache_numshardbits=4 \
258 --write_buffer_size=1000000000 \
265 --threads=16 > ${STAT_FILE}.memtablefillreadrandom
267 common_in_mem_args
="--db=/dev/shm/rocksdb \
271 --keys_per_prefix=10 \
273 --compression_type=none \
274 --compression_ratio=1 \
275 --hard_rate_limit=2 \
276 --write_buffer_size=134217728 \
277 --max_write_buffer_number=4 \
278 --level0_file_num_compaction_trigger=8 \
279 --level0_slowdown_writes_trigger=16 \
280 --level0_stop_writes_trigger=24 \
281 --target_file_size_base=134217728 \
282 --max_bytes_for_level_base=1073741824 \
284 --wal_dir=/dev/shm/rocksdb \
286 --verify_checksum=1 \
287 --delete_obsolete_files_period_micros=314572800 \
288 --max_grandparent_overlap_factor=10 \
289 --use_plain_table=1 \
293 --memtablerep=prefix_hash \
298 # prepare a in-memory DB with 50M keys, total DB size is ~6G
300 $common_in_mem_args \
302 --max_background_compactions=16 \
303 --max_background_flushes=16 \
304 --benchmarks=filluniquerandom \
305 --use_existing_db=0 \
307 --threads=1 > /dev
/null
311 $common_in_mem_args \
313 --max_background_compactions=4 \
314 --max_background_flushes=0 \
315 --benchmarks=readwhilewriting\
316 --use_existing_db=1 \
319 --benchmark_write_rate_limit=9502720 > ${STAT_FILE}.readwhilewriting_in_ram
321 # Seekrandomwhilewriting
323 $common_in_mem_args \
325 --max_background_compactions=4 \
326 --max_background_flushes=0 \
327 --benchmarks=seekrandomwhilewriting \
328 --use_existing_db=1 \
329 --use_tailing_iterator=1 \
332 --benchmark_write_rate_limit=9502720 > ${STAT_FILE}.seekwhilewriting_in_ram
334 # measure fillseq with bunch of column families
336 --benchmarks=fillseq \
337 --num_column_families=500 \
338 --write_buffer_size=1048576 \
340 --use_existing_db=0 \
347 --sync=0 > ${STAT_FILE}.fillseq_lots_column_families
349 # measure overwrite performance with bunch of column families
351 --benchmarks=overwrite \
352 --num_column_families=500 \
353 --write_buffer_size=1048576 \
355 --use_existing_db=1 \
357 --writes=$
((NUM
/ 10)) \
363 --threads=8 > ${STAT_FILE}.overwrite_lots_column_families
366 function send_to_ods
{
370 if [ -z $JENKINS_HOME ]; then
371 # running on devbox, just print out the values
376 if [ -z "$value" ];then
377 echo >&2 "ERROR: Key $key doesn't have a value."
380 curl
-s "https://www.intern.facebook.com/intern/agent/ods_set.php?entity=rocksdb_build$git_br&key=$key&value=$value" \
384 function send_benchmark_to_ods
{
389 QPS
=$
(grep $bench $file |
awk '{print $5}')
390 P50_MICROS
=$
(grep $bench $file -A 6 |
grep "Percentiles" |
awk '{print $3}' )
391 P75_MICROS
=$
(grep $bench $file -A 6 |
grep "Percentiles" |
awk '{print $5}' )
392 P99_MICROS
=$
(grep $bench $file -A 6 |
grep "Percentiles" |
awk '{print $7}' )
394 send_to_ods rocksdb.build.
$bench_key.qps
$QPS
395 send_to_ods rocksdb.build.
$bench_key.p50_micros
$P50_MICROS
396 send_to_ods rocksdb.build.
$bench_key.p75_micros
$P75_MICROS
397 send_to_ods rocksdb.build.
$bench_key.p99_micros
$P99_MICROS
400 send_benchmark_to_ods overwrite overwrite
$STAT_FILE.overwrite
401 send_benchmark_to_ods fillseq fillseq
$STAT_FILE.fillseq
402 send_benchmark_to_ods readrandom readrandom
$STAT_FILE.readrandom
403 send_benchmark_to_ods readrandom readrandom_tailing
$STAT_FILE.readrandomtailing
404 send_benchmark_to_ods readrandom readrandom_smallblockcache
$STAT_FILE.readrandomsmallblockcache
405 send_benchmark_to_ods readrandom readrandom_memtable_sst
$STAT_FILE.readrandom_mem_sst
406 send_benchmark_to_ods readrandom readrandom_fillunique_random
$STAT_FILE.readrandom_filluniquerandom
407 send_benchmark_to_ods fillrandom memtablefillrandom
$STAT_FILE.memtablefillreadrandom
408 send_benchmark_to_ods readrandom memtablereadrandom
$STAT_FILE.memtablefillreadrandom
409 send_benchmark_to_ods readwhilewriting readwhilewriting
$STAT_FILE.readwhilewriting
410 send_benchmark_to_ods readwhilewriting readwhilewriting_in_ram
${STAT_FILE}.readwhilewriting_in_ram
411 send_benchmark_to_ods seekrandomwhilewriting seekwhilewriting_in_ram
${STAT_FILE}.seekwhilewriting_in_ram
412 send_benchmark_to_ods fillseq fillseq_lots_column_families
${STAT_FILE}.fillseq_lots_column_families
413 send_benchmark_to_ods overwrite overwrite_lots_column_families
${STAT_FILE}.overwrite_lots_column_families