]> git.proxmox.com Git - ceph.git/blob - ceph/qa/workunits/rbd/krbd_huge_osdmap.sh
import 15.2.0 Octopus source
[ceph.git] / ceph / qa / workunits / rbd / krbd_huge_osdmap.sh
1 #!/usr/bin/env bash
2
3 # This is a test for https://tracker.ceph.com/issues/40481.
4 #
5 # An osdmap with 60000 slots encodes to ~16M, of which the ignored portion
6 # is ~13M. However in-memory osdmap is larger than ~3M: in-memory osd_addr
7 # array for 60000 OSDs is ~8M because of sockaddr_storage.
8 #
9 # Set mon_max_osd = 60000 in ceph.conf.
10
11 set -ex
12
13 function expect_false() {
14 if "$@"; then return 1; else return 0; fi
15 }
16
17 function run_test() {
18 local dev
19
20 # initially tiny, grow via incrementals
21 dev=$(sudo rbd map img)
22 for max in 8 60 600 6000 60000; do
23 ceph osd setmaxosd $max
24 expect_false sudo rbd map wait_for/latest_osdmap
25 xfs_io -c 'pwrite -w 0 12M' $DEV
26 done
27 ceph osd getcrushmap -o /dev/stdout | ceph osd setcrushmap -i /dev/stdin
28 expect_false sudo rbd map wait_for/latest_osdmap
29 xfs_io -c 'pwrite -w 0 12M' $DEV
30 sudo rbd unmap $dev
31
32 # initially huge, shrink via incrementals
33 dev=$(sudo rbd map img)
34 for max in 60000 6000 600 60 8; do
35 ceph osd setmaxosd $max
36 expect_false sudo rbd map wait_for/latest_osdmap
37 xfs_io -c 'pwrite -w 0 12M' $DEV
38 done
39 ceph osd getcrushmap -o /dev/stdout | ceph osd setcrushmap -i /dev/stdin
40 expect_false sudo rbd map wait_for/latest_osdmap
41 xfs_io -c 'pwrite -w 0 12M' $DEV
42 sudo rbd unmap $dev
43 }
44
45 rbd create --size 12M img
46 run_test
47 # repeat with primary affinity (adds an extra array)
48 ceph osd primary-affinity osd.0 0.5
49 run_test
50
51 echo OK