]> git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/build_tools/regression_build_test.sh
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / rocksdb / build_tools / regression_build_test.sh
1 #!/bin/bash
2
3 set -e
4
5 NUM=10000000
6
7 if [ $# -eq 1 ];then
8 DATA_DIR=$1
9 elif [ $# -eq 2 ];then
10 DATA_DIR=$1
11 STAT_FILE=$2
12 fi
13
14 # On the production build servers, set data and stat
15 # files/directories not in /tmp or else the tempdir cleaning
16 # scripts will make you very unhappy.
17 DATA_DIR=${DATA_DIR:-$(mktemp -t -d rocksdb_XXXX)}
18 STAT_FILE=${STAT_FILE:-$(mktemp -t -u rocksdb_test_stats_XXXX)}
19
20 function cleanup {
21 rm -rf $DATA_DIR
22 rm -f $STAT_FILE.fillseq
23 rm -f $STAT_FILE.readrandom
24 rm -f $STAT_FILE.overwrite
25 rm -f $STAT_FILE.memtablefillreadrandom
26 }
27
28 trap cleanup EXIT
29
30 if [ -z $GIT_BRANCH ]; then
31 git_br=`git rev-parse --abbrev-ref HEAD`
32 else
33 git_br=$(basename $GIT_BRANCH)
34 fi
35
36 if [ $git_br == "master" ]; then
37 git_br=""
38 else
39 git_br="."$git_br
40 fi
41
42 make release
43
44 # measure fillseq + fill up the DB for overwrite benchmark
45 ./db_bench \
46 --benchmarks=fillseq \
47 --db=$DATA_DIR \
48 --use_existing_db=0 \
49 --bloom_bits=10 \
50 --num=$NUM \
51 --writes=$NUM \
52 --cache_size=6442450944 \
53 --cache_numshardbits=6 \
54 --table_cache_numshardbits=4 \
55 --open_files=55000 \
56 --statistics=1 \
57 --histogram=1 \
58 --disable_wal=1 \
59 --sync=0 > ${STAT_FILE}.fillseq
60
61 # measure overwrite performance
62 ./db_bench \
63 --benchmarks=overwrite \
64 --db=$DATA_DIR \
65 --use_existing_db=1 \
66 --bloom_bits=10 \
67 --num=$NUM \
68 --writes=$((NUM / 10)) \
69 --cache_size=6442450944 \
70 --cache_numshardbits=6 \
71 --table_cache_numshardbits=4 \
72 --open_files=55000 \
73 --statistics=1 \
74 --histogram=1 \
75 --disable_wal=1 \
76 --sync=0 \
77 --threads=8 > ${STAT_FILE}.overwrite
78
79 # fill up the db for readrandom benchmark (1GB total size)
80 ./db_bench \
81 --benchmarks=fillseq \
82 --db=$DATA_DIR \
83 --use_existing_db=0 \
84 --bloom_bits=10 \
85 --num=$NUM \
86 --writes=$NUM \
87 --cache_size=6442450944 \
88 --cache_numshardbits=6 \
89 --table_cache_numshardbits=4 \
90 --open_files=55000 \
91 --statistics=1 \
92 --histogram=1 \
93 --disable_wal=1 \
94 --sync=0 \
95 --threads=1 > /dev/null
96
97 # measure readrandom with 6GB block cache
98 ./db_bench \
99 --benchmarks=readrandom \
100 --db=$DATA_DIR \
101 --use_existing_db=1 \
102 --bloom_bits=10 \
103 --num=$NUM \
104 --reads=$((NUM / 5)) \
105 --cache_size=6442450944 \
106 --cache_numshardbits=6 \
107 --table_cache_numshardbits=4 \
108 --open_files=55000 \
109 --statistics=1 \
110 --histogram=1 \
111 --disable_wal=1 \
112 --sync=0 \
113 --threads=16 > ${STAT_FILE}.readrandom
114
115 # measure readrandom with 6GB block cache and tailing iterator
116 ./db_bench \
117 --benchmarks=readrandom \
118 --db=$DATA_DIR \
119 --use_existing_db=1 \
120 --bloom_bits=10 \
121 --num=$NUM \
122 --reads=$((NUM / 5)) \
123 --cache_size=6442450944 \
124 --cache_numshardbits=6 \
125 --table_cache_numshardbits=4 \
126 --open_files=55000 \
127 --use_tailing_iterator=1 \
128 --statistics=1 \
129 --histogram=1 \
130 --disable_wal=1 \
131 --sync=0 \
132 --threads=16 > ${STAT_FILE}.readrandomtailing
133
134 # measure readrandom with 100MB block cache
135 ./db_bench \
136 --benchmarks=readrandom \
137 --db=$DATA_DIR \
138 --use_existing_db=1 \
139 --bloom_bits=10 \
140 --num=$NUM \
141 --reads=$((NUM / 5)) \
142 --cache_size=104857600 \
143 --cache_numshardbits=6 \
144 --table_cache_numshardbits=4 \
145 --open_files=55000 \
146 --statistics=1 \
147 --histogram=1 \
148 --disable_wal=1 \
149 --sync=0 \
150 --threads=16 > ${STAT_FILE}.readrandomsmallblockcache
151
152 # measure readrandom with 8k data in memtable
153 ./db_bench \
154 --benchmarks=overwrite,readrandom \
155 --db=$DATA_DIR \
156 --use_existing_db=1 \
157 --bloom_bits=10 \
158 --num=$NUM \
159 --reads=$((NUM / 5)) \
160 --writes=512 \
161 --cache_size=6442450944 \
162 --cache_numshardbits=6 \
163 --table_cache_numshardbits=4 \
164 --write_buffer_size=1000000000 \
165 --open_files=55000 \
166 --statistics=1 \
167 --histogram=1 \
168 --disable_wal=1 \
169 --sync=0 \
170 --threads=16 > ${STAT_FILE}.readrandom_mem_sst
171
172
173 # fill up the db for readrandom benchmark with filluniquerandom (1GB total size)
174 ./db_bench \
175 --benchmarks=filluniquerandom \
176 --db=$DATA_DIR \
177 --use_existing_db=0 \
178 --bloom_bits=10 \
179 --num=$((NUM / 4)) \
180 --writes=$((NUM / 4)) \
181 --cache_size=6442450944 \
182 --cache_numshardbits=6 \
183 --table_cache_numshardbits=4 \
184 --open_files=55000 \
185 --statistics=1 \
186 --histogram=1 \
187 --disable_wal=1 \
188 --sync=0 \
189 --threads=1 > /dev/null
190
191 # dummy test just to compact the data
192 ./db_bench \
193 --benchmarks=readrandom \
194 --db=$DATA_DIR \
195 --use_existing_db=1 \
196 --bloom_bits=10 \
197 --num=$((NUM / 1000)) \
198 --reads=$((NUM / 1000)) \
199 --cache_size=6442450944 \
200 --cache_numshardbits=6 \
201 --table_cache_numshardbits=4 \
202 --open_files=55000 \
203 --statistics=1 \
204 --histogram=1 \
205 --disable_wal=1 \
206 --sync=0 \
207 --threads=16 > /dev/null
208
209 # measure readrandom after load with filluniquerandom with 6GB block cache
210 ./db_bench \
211 --benchmarks=readrandom \
212 --db=$DATA_DIR \
213 --use_existing_db=1 \
214 --bloom_bits=10 \
215 --num=$((NUM / 4)) \
216 --reads=$((NUM / 4)) \
217 --cache_size=6442450944 \
218 --cache_numshardbits=6 \
219 --table_cache_numshardbits=4 \
220 --open_files=55000 \
221 --disable_auto_compactions=1 \
222 --statistics=1 \
223 --histogram=1 \
224 --disable_wal=1 \
225 --sync=0 \
226 --threads=16 > ${STAT_FILE}.readrandom_filluniquerandom
227
228 # measure readwhilewriting after load with filluniquerandom with 6GB block cache
229 ./db_bench \
230 --benchmarks=readwhilewriting \
231 --db=$DATA_DIR \
232 --use_existing_db=1 \
233 --bloom_bits=10 \
234 --num=$((NUM / 4)) \
235 --reads=$((NUM / 4)) \
236 --benchmark_write_rate_limit=$(( 110 * 1024 )) \
237 --write_buffer_size=100000000 \
238 --cache_size=6442450944 \
239 --cache_numshardbits=6 \
240 --table_cache_numshardbits=4 \
241 --open_files=55000 \
242 --statistics=1 \
243 --histogram=1 \
244 --disable_wal=1 \
245 --sync=0 \
246 --threads=16 > ${STAT_FILE}.readwhilewriting
247
248 # measure memtable performance -- none of the data gets flushed to disk
249 ./db_bench \
250 --benchmarks=fillrandom,readrandom, \
251 --db=$DATA_DIR \
252 --use_existing_db=0 \
253 --num=$((NUM / 10)) \
254 --reads=$NUM \
255 --cache_size=6442450944 \
256 --cache_numshardbits=6 \
257 --table_cache_numshardbits=4 \
258 --write_buffer_size=1000000000 \
259 --open_files=55000 \
260 --statistics=1 \
261 --histogram=1 \
262 --disable_wal=1 \
263 --sync=0 \
264 --value_size=10 \
265 --threads=16 > ${STAT_FILE}.memtablefillreadrandom
266
267 common_in_mem_args="--db=/dev/shm/rocksdb \
268 --num_levels=6 \
269 --key_size=20 \
270 --prefix_size=12 \
271 --keys_per_prefix=10 \
272 --value_size=100 \
273 --compression_type=none \
274 --compression_ratio=1 \
275 --hard_rate_limit=2 \
276 --write_buffer_size=134217728 \
277 --max_write_buffer_number=4 \
278 --level0_file_num_compaction_trigger=8 \
279 --level0_slowdown_writes_trigger=16 \
280 --level0_stop_writes_trigger=24 \
281 --target_file_size_base=134217728 \
282 --max_bytes_for_level_base=1073741824 \
283 --disable_wal=0 \
284 --wal_dir=/dev/shm/rocksdb \
285 --sync=0 \
286 --verify_checksum=1 \
287 --delete_obsolete_files_period_micros=314572800 \
288 --max_grandparent_overlap_factor=10 \
289 --use_plain_table=1 \
290 --open_files=-1 \
291 --mmap_read=1 \
292 --mmap_write=0 \
293 --memtablerep=prefix_hash \
294 --bloom_bits=10 \
295 --bloom_locality=1 \
296 --perf_level=0"
297
298 # prepare a in-memory DB with 50M keys, total DB size is ~6G
299 ./db_bench \
300 $common_in_mem_args \
301 --statistics=0 \
302 --max_background_compactions=16 \
303 --max_background_flushes=16 \
304 --benchmarks=filluniquerandom \
305 --use_existing_db=0 \
306 --num=52428800 \
307 --threads=1 > /dev/null
308
309 # Readwhilewriting
310 ./db_bench \
311 $common_in_mem_args \
312 --statistics=1 \
313 --max_background_compactions=4 \
314 --max_background_flushes=0 \
315 --benchmarks=readwhilewriting\
316 --use_existing_db=1 \
317 --duration=600 \
318 --threads=32 \
319 --benchmark_write_rate_limit=9502720 > ${STAT_FILE}.readwhilewriting_in_ram
320
321 # Seekrandomwhilewriting
322 ./db_bench \
323 $common_in_mem_args \
324 --statistics=1 \
325 --max_background_compactions=4 \
326 --max_background_flushes=0 \
327 --benchmarks=seekrandomwhilewriting \
328 --use_existing_db=1 \
329 --use_tailing_iterator=1 \
330 --duration=600 \
331 --threads=32 \
332 --benchmark_write_rate_limit=9502720 > ${STAT_FILE}.seekwhilewriting_in_ram
333
334 # measure fillseq with bunch of column families
335 ./db_bench \
336 --benchmarks=fillseq \
337 --num_column_families=500 \
338 --write_buffer_size=1048576 \
339 --db=$DATA_DIR \
340 --use_existing_db=0 \
341 --num=$NUM \
342 --writes=$NUM \
343 --open_files=55000 \
344 --statistics=1 \
345 --histogram=1 \
346 --disable_wal=1 \
347 --sync=0 > ${STAT_FILE}.fillseq_lots_column_families
348
349 # measure overwrite performance with bunch of column families
350 ./db_bench \
351 --benchmarks=overwrite \
352 --num_column_families=500 \
353 --write_buffer_size=1048576 \
354 --db=$DATA_DIR \
355 --use_existing_db=1 \
356 --num=$NUM \
357 --writes=$((NUM / 10)) \
358 --open_files=55000 \
359 --statistics=1 \
360 --histogram=1 \
361 --disable_wal=1 \
362 --sync=0 \
363 --threads=8 > ${STAT_FILE}.overwrite_lots_column_families
364
365 # send data to ods
366 function send_to_ods {
367 key="$1"
368 value="$2"
369
370 if [ -z $JENKINS_HOME ]; then
371 # running on devbox, just print out the values
372 echo $1 $2
373 return
374 fi
375
376 if [ -z "$value" ];then
377 echo >&2 "ERROR: Key $key doesn't have a value."
378 return
379 fi
380 curl -s "https://www.intern.facebook.com/intern/agent/ods_set.php?entity=rocksdb_build$git_br&key=$key&value=$value" \
381 --connect-timeout 60
382 }
383
384 function send_benchmark_to_ods {
385 bench="$1"
386 bench_key="$2"
387 file="$3"
388
389 QPS=$(grep $bench $file | awk '{print $5}')
390 P50_MICROS=$(grep $bench $file -A 6 | grep "Percentiles" | awk '{print $3}' )
391 P75_MICROS=$(grep $bench $file -A 6 | grep "Percentiles" | awk '{print $5}' )
392 P99_MICROS=$(grep $bench $file -A 6 | grep "Percentiles" | awk '{print $7}' )
393
394 send_to_ods rocksdb.build.$bench_key.qps $QPS
395 send_to_ods rocksdb.build.$bench_key.p50_micros $P50_MICROS
396 send_to_ods rocksdb.build.$bench_key.p75_micros $P75_MICROS
397 send_to_ods rocksdb.build.$bench_key.p99_micros $P99_MICROS
398 }
399
400 send_benchmark_to_ods overwrite overwrite $STAT_FILE.overwrite
401 send_benchmark_to_ods fillseq fillseq $STAT_FILE.fillseq
402 send_benchmark_to_ods readrandom readrandom $STAT_FILE.readrandom
403 send_benchmark_to_ods readrandom readrandom_tailing $STAT_FILE.readrandomtailing
404 send_benchmark_to_ods readrandom readrandom_smallblockcache $STAT_FILE.readrandomsmallblockcache
405 send_benchmark_to_ods readrandom readrandom_memtable_sst $STAT_FILE.readrandom_mem_sst
406 send_benchmark_to_ods readrandom readrandom_fillunique_random $STAT_FILE.readrandom_filluniquerandom
407 send_benchmark_to_ods fillrandom memtablefillrandom $STAT_FILE.memtablefillreadrandom
408 send_benchmark_to_ods readrandom memtablereadrandom $STAT_FILE.memtablefillreadrandom
409 send_benchmark_to_ods readwhilewriting readwhilewriting $STAT_FILE.readwhilewriting
410 send_benchmark_to_ods readwhilewriting readwhilewriting_in_ram ${STAT_FILE}.readwhilewriting_in_ram
411 send_benchmark_to_ods seekrandomwhilewriting seekwhilewriting_in_ram ${STAT_FILE}.seekwhilewriting_in_ram
412 send_benchmark_to_ods fillseq fillseq_lots_column_families ${STAT_FILE}.fillseq_lots_column_families
413 send_benchmark_to_ods overwrite overwrite_lots_column_families ${STAT_FILE}.overwrite_lots_column_families