]> git.proxmox.com Git - ceph.git/blame - ceph/qa/standalone/osd/osd-bluefs-volume-ops.sh
import 15.2.0 Octopus source
[ceph.git] / ceph / qa / standalone / osd / osd-bluefs-volume-ops.sh
CommitLineData
11fdf7f2
TL
1#!/usr/bin/env bash
2
3source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
4
5[ `uname` = FreeBSD ] && exit 0
6
7function run() {
8 local dir=$1
9 shift
10
11 export CEPH_MON="127.0.0.1:7146" # git grep '\<7146\>' : there must be only one
12 export CEPH_ARGS
13 CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
14 CEPH_ARGS+="--mon-host=$CEPH_MON "
15 CEPH_ARGS+="--bluestore_block_size=2147483648 "
16 CEPH_ARGS+="--bluestore_block_db_create=true "
17 CEPH_ARGS+="--bluestore_block_db_size=1073741824 "
18 CEPH_ARGS+="--bluestore_block_wal_size=536870912 "
19 CEPH_ARGS+="--bluestore_bluefs_min=536870912 "
20 CEPH_ARGS+="--bluestore_bluefs_min_free=536870912 "
21 CEPH_ARGS+="--bluestore_block_wal_create=true "
22 CEPH_ARGS+="--bluestore_fsck_on_mount=true "
23 local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
24 for func in $funcs ; do
25 setup $dir || return 1
26 $func $dir || return 1
27 teardown $dir || return 1
28 done
29}
30
31function TEST_bluestore() {
32 local dir=$1
33
34 local flimit=$(ulimit -n)
35 if [ $flimit -lt 1536 ]; then
36 echo "Low open file limit ($flimit), test may fail. Increase to 1536 or higher and retry if that happens."
37 fi
38
39 run_mon $dir a || return 1
40 run_mgr $dir x || return 1
eafe8130 41 run_osd $dir 0 || return 1
11fdf7f2 42 osd_pid0=$(cat $dir/osd.0.pid)
eafe8130 43 run_osd $dir 1 || return 1
11fdf7f2 44 osd_pid1=$(cat $dir/osd.1.pid)
eafe8130 45 run_osd $dir 2 || return 1
11fdf7f2 46 osd_pid2=$(cat $dir/osd.2.pid)
eafe8130 47 run_osd $dir 3 || return 1
11fdf7f2
TL
48 osd_pid3=$(cat $dir/osd.3.pid)
49
50 sleep 5
51
52 create_pool foo 16
53
54 # write some objects
55 timeout 60 rados bench -p foo 30 write -b 4096 --no-cleanup #|| return 1
56
57 echo "after bench"
58
59 # kill
60 while kill $osd_pid0; do sleep 1 ; done
61 ceph osd down 0
62 while kill $osd_pid1; do sleep 1 ; done
63 ceph osd down 1
64 while kill $osd_pid2; do sleep 1 ; done
65 ceph osd down 2
66 while kill $osd_pid3; do sleep 1 ; done
67 ceph osd down 3
68
69 # expand slow devices
70 ceph-bluestore-tool --path $dir/0 fsck || return 1
71 ceph-bluestore-tool --path $dir/1 fsck || return 1
72 ceph-bluestore-tool --path $dir/2 fsck || return 1
73 ceph-bluestore-tool --path $dir/3 fsck || return 1
74
75 truncate $dir/0/block -s 4294967296 # 4GB
76 ceph-bluestore-tool --path $dir/0 bluefs-bdev-expand || return 1
77 truncate $dir/1/block -s 4311744512 # 4GB + 16MB
78 ceph-bluestore-tool --path $dir/1 bluefs-bdev-expand || return 1
79 truncate $dir/2/block -s 4295099392 # 4GB + 129KB
80 ceph-bluestore-tool --path $dir/2 bluefs-bdev-expand || return 1
81 truncate $dir/3/block -s 4293918720 # 4GB - 1MB
82 ceph-bluestore-tool --path $dir/3 bluefs-bdev-expand || return 1
83
84 # slow, DB, WAL -> slow, DB
85 ceph-bluestore-tool --path $dir/0 fsck || return 1
86 ceph-bluestore-tool --path $dir/1 fsck || return 1
87 ceph-bluestore-tool --path $dir/2 fsck || return 1
88 ceph-bluestore-tool --path $dir/3 fsck || return 1
89
90 ceph-bluestore-tool --path $dir/0 bluefs-bdev-sizes
91
92 ceph-bluestore-tool --path $dir/0 \
93 --devs-source $dir/0/block.wal \
94 --dev-target $dir/0/block.db \
95 --command bluefs-bdev-migrate || return 1
96
97 ceph-bluestore-tool --path $dir/0 fsck || return 1
98
99 # slow, DB, WAL -> slow, WAL
100 ceph-bluestore-tool --path $dir/1 \
101 --devs-source $dir/1/block.db \
102 --dev-target $dir/1/block \
103 --command bluefs-bdev-migrate || return 1
104
105 ceph-bluestore-tool --path $dir/1 fsck || return 1
106
107 # slow, DB, WAL -> slow
108 ceph-bluestore-tool --path $dir/2 \
109 --devs-source $dir/2/block.wal \
110 --devs-source $dir/2/block.db \
111 --dev-target $dir/2/block \
112 --command bluefs-bdev-migrate || return 1
113
114 ceph-bluestore-tool --path $dir/2 fsck || return 1
115
116 # slow, DB, WAL -> slow, WAL (negative case)
117 ceph-bluestore-tool --path $dir/3 \
118 --devs-source $dir/3/block.db \
119 --dev-target $dir/3/block.wal \
120 --command bluefs-bdev-migrate
121
122 # Migration to WAL is unsupported
123 if [ $? -eq 0 ]; then
124 return 1
125 fi
126 ceph-bluestore-tool --path $dir/3 fsck || return 1
127
128 # slow, DB, WAL -> slow, DB (WAL to slow then slow to DB)
129 ceph-bluestore-tool --path $dir/3 \
130 --devs-source $dir/3/block.wal \
131 --dev-target $dir/3/block \
132 --command bluefs-bdev-migrate || return 1
133
134 ceph-bluestore-tool --path $dir/3 fsck || return 1
135
136 ceph-bluestore-tool --path $dir/3 \
137 --devs-source $dir/3/block \
138 --dev-target $dir/3/block.db \
139 --command bluefs-bdev-migrate || return 1
140
141 ceph-bluestore-tool --path $dir/3 fsck || return 1
142
9f95a23c 143 activate_osd $dir 0 || return 1
11fdf7f2 144 osd_pid0=$(cat $dir/osd.0.pid)
9f95a23c 145 activate_osd $dir 1 || return 1
11fdf7f2 146 osd_pid1=$(cat $dir/osd.1.pid)
9f95a23c 147 activate_osd $dir 2 || return 1
11fdf7f2 148 osd_pid2=$(cat $dir/osd.2.pid)
9f95a23c 149 activate_osd $dir 3 || return 1
11fdf7f2
TL
150 osd_pid3=$(cat $dir/osd.3.pid)
151
152 wait_for_clean || return 1
153
154 # write some objects
155 timeout 60 rados bench -p foo 30 write -b 4096 --no-cleanup #|| return 1
156
157 # kill
158 while kill $osd_pid0; do sleep 1 ; done
159 ceph osd down 0
160 while kill $osd_pid1; do sleep 1 ; done
161 ceph osd down 1
162 while kill $osd_pid2; do sleep 1 ; done
163 ceph osd down 2
164 while kill $osd_pid3; do sleep 1 ; done
165 ceph osd down 3
166
167 # slow, DB -> slow, DB, WAL
168 ceph-bluestore-tool --path $dir/0 fsck || return 1
169
170 dd if=/dev/zero of=$dir/0/wal count=512 bs=1M
171 ceph-bluestore-tool --path $dir/0 \
172 --dev-target $dir/0/wal \
173 --command bluefs-bdev-new-wal || return 1
174
175 ceph-bluestore-tool --path $dir/0 fsck || return 1
176
177 # slow, WAL -> slow, DB, WAL
178 ceph-bluestore-tool --path $dir/1 fsck || return 1
179
180 dd if=/dev/zero of=$dir/1/db count=1024 bs=1M
181 ceph-bluestore-tool --path $dir/1 \
182 --dev-target $dir/1/db \
183 --command bluefs-bdev-new-db || return 1
184
185 ceph-bluestore-tool --path $dir/1 \
186 --devs-source $dir/1/block \
187 --dev-target $dir/1/block.db \
188 --command bluefs-bdev-migrate || return 1
189
190 ceph-bluestore-tool --path $dir/1 fsck || return 1
191
192 # slow -> slow, DB, WAL
193 ceph-bluestore-tool --path $dir/2 fsck || return 1
194
195 ceph-bluestore-tool --path $dir/2 \
196 --command bluefs-bdev-new-db || return 1
197
198 ceph-bluestore-tool --path $dir/2 \
199 --command bluefs-bdev-new-wal || return 1
200
201 ceph-bluestore-tool --path $dir/2 \
202 --devs-source $dir/2/block \
203 --dev-target $dir/2/block.db \
204 --command bluefs-bdev-migrate || return 1
205
206 ceph-bluestore-tool --path $dir/2 fsck || return 1
207
208 # slow, DB -> slow, WAL
209 ceph-bluestore-tool --path $dir/3 fsck || return 1
210
211 ceph-bluestore-tool --path $dir/3 \
212 --command bluefs-bdev-new-wal || return 1
213
214 ceph-bluestore-tool --path $dir/3 \
215 --devs-source $dir/3/block.db \
216 --dev-target $dir/3/block \
217 --command bluefs-bdev-migrate || return 1
218
219 ceph-bluestore-tool --path $dir/3 fsck || return 1
220
9f95a23c 221 activate_osd $dir 0 || return 1
11fdf7f2 222 osd_pid0=$(cat $dir/osd.0.pid)
9f95a23c 223 activate_osd $dir 1 || return 1
11fdf7f2 224 osd_pid1=$(cat $dir/osd.1.pid)
9f95a23c 225 activate_osd $dir 2 || return 1
11fdf7f2 226 osd_pid2=$(cat $dir/osd.2.pid)
9f95a23c 227 activate_osd $dir 3 || return 1
11fdf7f2
TL
228 osd_pid3=$(cat $dir/osd.3.pid)
229
230 # write some objects
231 timeout 60 rados bench -p foo 30 write -b 4096 --no-cleanup #|| return 1
232
233 # kill
234 while kill $osd_pid0; do sleep 1 ; done
235 ceph osd down 0
236 while kill $osd_pid1; do sleep 1 ; done
237 ceph osd down 1
238 while kill $osd_pid2; do sleep 1 ; done
239 ceph osd down 2
240 while kill $osd_pid3; do sleep 1 ; done
241 ceph osd down 3
242
243 # slow, DB1, WAL -> slow, DB2, WAL
244 ceph-bluestore-tool --path $dir/0 fsck || return 1
245
246 dd if=/dev/zero of=$dir/0/db2 count=1024 bs=1M
247 ceph-bluestore-tool --path $dir/0 \
248 --devs-source $dir/0/block.db \
249 --dev-target $dir/0/db2 \
250 --command bluefs-bdev-migrate || return 1
251
252 ceph-bluestore-tool --path $dir/0 fsck || return 1
253
254 # slow, DB, WAL1 -> slow, DB, WAL2
255
256 dd if=/dev/zero of=$dir/0/wal2 count=512 bs=1M
257 ceph-bluestore-tool --path $dir/0 \
258 --devs-source $dir/0/block.wal \
259 --dev-target $dir/0/wal2 \
260 --command bluefs-bdev-migrate || return 1
261 rm -rf $dir/0/wal
262
263 ceph-bluestore-tool --path $dir/0 fsck || return 1
264
265 # slow, DB + WAL -> slow, DB2 -> slow
266 ceph-bluestore-tool --path $dir/1 fsck || return 1
267
268 dd if=/dev/zero of=$dir/1/db2 count=1024 bs=1M
269 ceph-bluestore-tool --path $dir/1 \
270 --devs-source $dir/1/block.db \
271 --devs-source $dir/1/block.wal \
272 --dev-target $dir/1/db2 \
273 --command bluefs-bdev-migrate || return 1
274
275 rm -rf $dir/1/db
276
277 ceph-bluestore-tool --path $dir/1 fsck || return 1
278
279 ceph-bluestore-tool --path $dir/1 \
280 --devs-source $dir/1/block.db \
281 --dev-target $dir/1/block \
282 --command bluefs-bdev-migrate || return 1
283
284 rm -rf $dir/1/db2
285
286 ceph-bluestore-tool --path $dir/1 fsck || return 1
287
288 # slow -> slow, DB (negative case)
289 ceph-objectstore-tool --type bluestore --data-path $dir/2 \
290 --op fsck --no-mon-config || return 1
291
292 dd if=/dev/zero of=$dir/2/db2 count=1024 bs=1M
293 ceph-bluestore-tool --path $dir/2 \
294 --devs-source $dir/2/block \
295 --dev-target $dir/2/db2 \
296 --command bluefs-bdev-migrate
297
298 # Migration from slow-only to new device is unsupported
299 if [ $? -eq 0 ]; then
300 return 1
301 fi
302 ceph-bluestore-tool --path $dir/2 fsck || return 1
303
304 # slow + DB + WAL -> slow, DB2
305 dd if=/dev/zero of=$dir/2/db2 count=1024 bs=1M
306
307 ceph-bluestore-tool --path $dir/2 \
308 --devs-source $dir/2/block \
309 --devs-source $dir/2/block.db \
310 --devs-source $dir/2/block.wal \
311 --dev-target $dir/2/db2 \
312 --command bluefs-bdev-migrate || return 1
313
314 ceph-bluestore-tool --path $dir/2 fsck || return 1
315
316 # slow + WAL -> slow2, WAL2
317 dd if=/dev/zero of=$dir/3/wal2 count=1024 bs=1M
318
319 ceph-bluestore-tool --path $dir/3 \
320 --devs-source $dir/3/block \
321 --devs-source $dir/3/block.wal \
322 --dev-target $dir/3/wal2 \
323 --command bluefs-bdev-migrate || return 1
324
325 ceph-bluestore-tool --path $dir/3 fsck || return 1
326
9f95a23c 327 activate_osd $dir 0 || return 1
11fdf7f2 328 osd_pid0=$(cat $dir/osd.0.pid)
9f95a23c 329 activate_osd $dir 1 || return 1
11fdf7f2 330 osd_pid1=$(cat $dir/osd.1.pid)
9f95a23c 331 activate_osd $dir 2 || return 1
11fdf7f2 332 osd_pid2=$(cat $dir/osd.2.pid)
9f95a23c 333 activate_osd $dir 3 || return 1
11fdf7f2
TL
334 osd_pid3=$(cat $dir/osd.3.pid)
335
336 # write some objects
337 timeout 60 rados bench -p foo 30 write -b 4096 --no-cleanup #|| return 1
338
339 wait_for_clean || return 1
340}
341
342main osd-bluefs-volume-ops "$@"
343
344# Local Variables:
345# compile-command: "cd ../.. ; make -j4 && test/osd/osd-bluefs-volume-ops.sh"
346# End: