]> git.proxmox.com Git - ceph.git/blob - ceph/qa/suites/rados/singleton/all/recovery-preemption.yaml
bb8d3de5d7a581c421d56edbef8e6869007b6344
[ceph.git] / ceph / qa / suites / rados / singleton / all / recovery-preemption.yaml
1 roles:
2 - - mon.a
3 - mon.b
4 - mon.c
5 - mgr.x
6 - osd.0
7 - osd.1
8 - osd.2
9 - osd.3
10 openstack:
11 - volumes: # attached to each instance
12 count: 3
13 size: 20 # GB
14 tasks:
15 - install:
16 - ceph:
17 pre-mgr-commands:
18 - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
19 conf:
20 osd:
21 osd recovery sleep: .1
22 osd min pg log entries: 10
23 osd max pg log entries: 1000
24 osd_target_pg_log_entries_per_osd: 0
25 osd pg log trim min: 10
26 log-whitelist:
27 - \(POOL_APP_NOT_ENABLED\)
28 - \(OSDMAP_FLAGS\)
29 - \(OSD_
30 - \(OBJECT_
31 - \(PG_
32 - \(SLOW_OPS\)
33 - overall HEALTH
34 - slow request
35 - exec:
36 osd.0:
37 - ceph osd pool create foo 128
38 - ceph osd pool application enable foo foo
39 - sleep 5
40 - ceph.healthy:
41 - exec:
42 osd.0:
43 - rados -p foo bench 30 write -b 4096 --no-cleanup
44 - ceph osd out 0
45 - sleep 5
46 - ceph osd set noup
47 - ceph.restart:
48 daemons: [osd.1]
49 wait-for-up: false
50 wait-for-healthy: false
51 - exec:
52 osd.0:
53 - rados -p foo bench 3 write -b 4096 --no-cleanup
54 - ceph osd unset noup
55 - sleep 10
56 - for f in 0 1 2 3 ; do sudo ceph daemon osd.$f config set osd_recovery_sleep 0 ; sudo ceph daemon osd.$f config set osd_recovery_max_active 20 ; done
57 - ceph.healthy:
58 - exec:
59 osd.0:
60 - egrep '(defer backfill|defer recovery)' /var/log/ceph/ceph-osd.*.log