]> git.proxmox.com Git - ceph.git/blob - ceph/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml
af0a47a0551a30b700697c92b896ce1a9e00e84d
[ceph.git] / ceph / qa / suites / rados / singleton-nomsgr / all / full-tiering.yaml
1 # verify #13098 fix
2 openstack:
3 - volumes: # attached to each instance
4 count: 3
5 size: 10 # GB
6 roles:
7 - [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
8 overrides:
9 ceph:
10 log-whitelist:
11 - is full
12 - overall HEALTH_
13 - \(POOL_FULL\)
14 - \(POOL_NEAR_FULL\)
15 - \(CACHE_POOL_NO_HIT_SET\)
16 - \(CACHE_POOL_NEAR_FULL\)
17 tasks:
18 - install:
19 - ceph:
20 pre-mgr-commands:
21 - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
22 conf:
23 global:
24 osd max object name len: 460
25 osd max object namespace len: 64
26 - exec:
27 client.0:
28 - ceph osd pool create ec-ca 1 1
29 - ceph osd pool create ec 1 1 erasure default
30 - ceph osd pool application enable ec rados
31 - ceph osd tier add ec ec-ca
32 - ceph osd tier cache-mode ec-ca readproxy
33 - ceph osd tier set-overlay ec ec-ca
34 - ceph osd pool set ec-ca hit_set_type bloom
35 - ceph osd pool set-quota ec-ca max_bytes 20480000
36 - ceph osd pool set-quota ec max_bytes 20480000
37 - ceph osd pool set ec-ca target_max_bytes 20480000
38 - timeout 30 rados -p ec-ca bench 30 write || true
39 - ceph osd pool set-quota ec-ca max_bytes 0
40 - ceph osd pool set-quota ec max_bytes 0