]> git.proxmox.com Git - ceph.git/blame - ceph/src/rocksdb/tools/run_leveldb.sh
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / rocksdb / tools / run_leveldb.sh
CommitLineData
11fdf7f2 1#!/usr/bin/env bash
7c673cae
FG
2# REQUIRE: benchmark_leveldb.sh exists in the current directory
3# After execution of this script, log files are generated in $output_dir.
4# report.txt provides a high level statistics
5#
6# This should be used with the LevelDB fork listed here to use additional test options.
7# For more details on the changes see the blog post listed below.
8# https://github.com/mdcallag/leveldb-1
9# http://smalldatum.blogspot.com/2015/04/comparing-leveldb-and-rocksdb-take-2.html
10#
11# This should be run from the parent of the tools directory. The command line is:
12# [$env_vars] tools/run_flash_bench.sh [list-of-threads]
13#
14# This runs a sequence of tests in the following sequence:
15# step 1) load - bulkload, compact, fillseq, overwrite
16# step 2) read-only for each number of threads
17# step 3) read-write for each number of threads
18#
19# The list of threads is optional and when not set is equivalent to "24".
20# Were list-of-threads specified as "1 2 4" then the tests in steps 2, 3 and
21# 4 above would be repeated for 1, 2 and 4 threads. The tests in step 1 are
22# only run for 1 thread.
23
24# Test output is written to $OUTPUT_DIR, currently /tmp/output. The performance
25# summary is in $OUTPUT_DIR/report.txt. There is one file in $OUTPUT_DIR per
26# test and the tests are listed below.
27#
28# The environment variables are also optional. The variables are:
29# NKEYS - number of key/value pairs to load
30# NWRITESPERSEC - the writes/second rate limit for the *whilewriting* tests.
31# If this is too large then the non-writer threads can get
32# starved.
33# VAL_SIZE - the length of the value in the key/value pairs loaded.
34# You can estimate the size of the test database from this,
35# NKEYS and the compression rate (--compression_ratio) set
36# in tools/benchmark_leveldb.sh
37# BLOCK_LENGTH - value for db_bench --block_size
38# CACHE_BYTES - the size of the RocksDB block cache in bytes
39# DATA_DIR - directory in which to create database files
40# DO_SETUP - when set to 0 then a backup of the database is copied from
41# $DATA_DIR.bak to $DATA_DIR and the load tests from step 1
42# This allows tests from steps 2, 3 to be repeated faster.
43# SAVE_SETUP - saves a copy of the database at the end of step 1 to
44# $DATA_DIR.bak.
45
46# Size constants
47K=1024
48M=$((1024 * K))
49G=$((1024 * M))
50
51num_keys=${NKEYS:-$((1 * G))}
52wps=${NWRITESPERSEC:-$((10 * K))}
53vs=${VAL_SIZE:-400}
54cs=${CACHE_BYTES:-$(( 1 * G ))}
55bs=${BLOCK_LENGTH:-4096}
56
57# If no command line arguments then run for 24 threads.
58if [[ $# -eq 0 ]]; then
59 nthreads=( 24 )
60else
61 nthreads=( "$@" )
62fi
63
64for num_thr in "${nthreads[@]}" ; do
65 echo Will run for $num_thr threads
66done
67
68# Update these parameters before execution !!!
69db_dir=${DATA_DIR:-"/tmp/rocksdb/"}
70
71do_setup=${DO_SETUP:-1}
72save_setup=${SAVE_SETUP:-0}
73
74output_dir="${TMPDIR:-/tmp}/output"
75
76ARGS="\
77OUTPUT_DIR=$output_dir \
78NUM_KEYS=$num_keys \
79DB_DIR=$db_dir \
80VALUE_SIZE=$vs \
81BLOCK_SIZE=$bs \
82CACHE_SIZE=$cs"
83
84mkdir -p $output_dir
85echo -e "ops/sec\tmb/sec\tusec/op\tavg\tp50\tTest" \
86 > $output_dir/report.txt
87
88# Notes on test sequence:
89# step 1) Setup database via sequential fill followed by overwrite to fragment it.
90# Done without setting DURATION to make sure that overwrite does $num_keys writes
91# step 2) read-only tests for all levels of concurrency requested
92# step 3) non read-only tests for all levels of concurrency requested
93
94###### Setup the database
95
96if [[ $do_setup != 0 ]]; then
97 echo Doing setup
98
99 # Test 2a: sequential fill with large values to get peak ingest
100 # adjust NUM_KEYS given the use of larger values
101 env $ARGS BLOCK_SIZE=$((1 * M)) VALUE_SIZE=$((32 * K)) NUM_KEYS=$(( num_keys / 64 )) \
102 ./tools/benchmark_leveldb.sh fillseq
103
104 # Test 2b: sequential fill with the configured value size
105 env $ARGS ./tools/benchmark_leveldb.sh fillseq
106
107 # Test 3: single-threaded overwrite
108 env $ARGS NUM_THREADS=1 DB_BENCH_NO_SYNC=1 ./tools/benchmark_leveldb.sh overwrite
109
110else
111 echo Restoring from backup
112
113 rm -rf $db_dir
114
115 if [ ! -d ${db_dir}.bak ]; then
116 echo Database backup does not exist at ${db_dir}.bak
117 exit -1
118 fi
119
120 echo Restore database from ${db_dir}.bak
121 cp -p -r ${db_dir}.bak $db_dir
122fi
123
124if [[ $save_setup != 0 ]]; then
125 echo Save database to ${db_dir}.bak
126 cp -p -r $db_dir ${db_dir}.bak
127fi
128
129###### Read-only tests
130
131for num_thr in "${nthreads[@]}" ; do
132 # Test 4: random read
133 env $ARGS NUM_THREADS=$num_thr ./tools/benchmark_leveldb.sh readrandom
134
135done
136
137###### Non read-only tests
138
139for num_thr in "${nthreads[@]}" ; do
140 # Test 7: overwrite with sync=0
141 env $ARGS NUM_THREADS=$num_thr DB_BENCH_NO_SYNC=1 \
142 ./tools/benchmark_leveldb.sh overwrite
143
144 # Test 8: overwrite with sync=1
145 # Not run for now because LevelDB db_bench doesn't have an option to limit the
146 # test run to X seconds and doing sync-per-commit for --num can take too long.
147 # env $ARGS NUM_THREADS=$num_thr ./tools/benchmark_leveldb.sh overwrite
148
149 # Test 11: random read while writing
150 env $ARGS NUM_THREADS=$num_thr WRITES_PER_SECOND=$wps \
151 ./tools/benchmark_leveldb.sh readwhilewriting
152
153done
154
155echo bulkload > $output_dir/report2.txt
156head -1 $output_dir/report.txt >> $output_dir/report2.txt
157grep bulkload $output_dir/report.txt >> $output_dir/report2.txt
158echo fillseq >> $output_dir/report2.txt
159head -1 $output_dir/report.txt >> $output_dir/report2.txt
160grep fillseq $output_dir/report.txt >> $output_dir/report2.txt
161echo overwrite sync=0 >> $output_dir/report2.txt
162head -1 $output_dir/report.txt >> $output_dir/report2.txt
163grep overwrite $output_dir/report.txt | grep \.s0 >> $output_dir/report2.txt
164echo overwrite sync=1 >> $output_dir/report2.txt
165head -1 $output_dir/report.txt >> $output_dir/report2.txt
166grep overwrite $output_dir/report.txt | grep \.s1 >> $output_dir/report2.txt
167echo readrandom >> $output_dir/report2.txt
168head -1 $output_dir/report.txt >> $output_dir/report2.txt
169grep readrandom $output_dir/report.txt >> $output_dir/report2.txt
170echo readwhile >> $output_dir/report2.txt >> $output_dir/report2.txt
171head -1 $output_dir/report.txt >> $output_dir/report2.txt
172grep readwhilewriting $output_dir/report.txt >> $output_dir/report2.txt
173
174cat $output_dir/report2.txt