]> git.proxmox.com Git - ceph.git/blame - ceph/qa/suites/rados/singleton/all/recovery-preemption.yaml
import ceph 15.2.10
[ceph.git] / ceph / qa / suites / rados / singleton / all / recovery-preemption.yaml
CommitLineData
3efd9988
FG
1roles:
2- - mon.a
3 - mon.b
4 - mon.c
5 - mgr.x
6 - osd.0
7 - osd.1
8 - osd.2
9 - osd.3
10openstack:
11 - volumes: # attached to each instance
12 count: 3
13 size: 20 # GB
14tasks:
15- install:
16- ceph:
9f95a23c
TL
17 pre-mgr-commands:
18 - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
3efd9988
FG
19 conf:
20 osd:
21 osd recovery sleep: .1
11fdf7f2 22 osd min pg log entries: 10
3efd9988 23 osd max pg log entries: 1000
9f95a23c 24 osd_target_pg_log_entries_per_osd: 0
11fdf7f2 25 osd pg log trim min: 10
cd265ab1 26 log-ignorelist:
3efd9988
FG
27 - \(POOL_APP_NOT_ENABLED\)
28 - \(OSDMAP_FLAGS\)
29 - \(OSD_
30 - \(OBJECT_
31 - \(PG_
11fdf7f2 32 - \(SLOW_OPS\)
3efd9988 33 - overall HEALTH
9f95a23c 34 - slow request
3efd9988
FG
35- exec:
36 osd.0:
37 - ceph osd pool create foo 128
38 - ceph osd pool application enable foo foo
11fdf7f2
TL
39 - sleep 5
40- ceph.healthy:
41- exec:
42 osd.0:
3efd9988
FG
43 - rados -p foo bench 30 write -b 4096 --no-cleanup
44 - ceph osd out 0
45 - sleep 5
46 - ceph osd set noup
47- ceph.restart:
48 daemons: [osd.1]
49 wait-for-up: false
50 wait-for-healthy: false
51- exec:
52 osd.0:
53 - rados -p foo bench 3 write -b 4096 --no-cleanup
54 - ceph osd unset noup
55 - sleep 10
11fdf7f2 56 - for f in 0 1 2 3 ; do sudo ceph daemon osd.$f config set osd_recovery_sleep 0 ; sudo ceph daemon osd.$f config set osd_recovery_max_active 20 ; done
3efd9988
FG
57- ceph.healthy:
58- exec:
59 osd.0:
60 - egrep '(defer backfill|defer recovery)' /var/log/ceph/ceph-osd.*.log