]> git.proxmox.com Git - ceph.git/blob - ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml
5576404dbb193519972bd42253b2f5025c9e995d
[ceph.git] / ceph / qa / suites / rados / singleton / all / thrash_cache_writeback_proxy_none.yaml
1 roles:
2 - - mon.a
3 - mgr.x
4 - osd.0
5 - osd.1
6 - osd.2
7 - - osd.3
8 - osd.4
9 - osd.5
10 - client.0
11 openstack:
12 - volumes: # attached to each instance
13 count: 3
14 size: 30 # GB
15 tasks:
16 - install:
17 - ceph:
18 pre-mgr-commands:
19 - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
20 log-whitelist:
21 - but it is still running
22 - slow request
23 - overall HEALTH_
24 - \(CACHE_POOL_
25 - exec:
26 client.0:
27 - sudo ceph osd pool create base 4
28 - sudo ceph osd pool application enable base rados
29 - sudo ceph osd pool create cache 4
30 - sudo ceph osd tier add base cache
31 - sudo ceph osd tier cache-mode cache writeback
32 - sudo ceph osd tier set-overlay base cache
33 - sudo ceph osd pool set cache hit_set_type bloom
34 - sudo ceph osd pool set cache hit_set_count 8
35 - sudo ceph osd pool set cache hit_set_period 60
36 - sudo ceph osd pool set cache target_max_objects 500
37 - background_exec:
38 mon.a:
39 - while true
40 - do sleep 30
41 - echo proxy
42 - sudo ceph osd tier cache-mode cache proxy
43 - sleep 10
44 - sudo ceph osd pool set cache cache_target_full_ratio .001
45 - echo cache-try-flush-evict-all
46 - rados -p cache cache-try-flush-evict-all
47 - sleep 5
48 - echo cache-flush-evict-all
49 - rados -p cache cache-flush-evict-all
50 - sleep 5
51 - echo remove overlay
52 - sudo ceph osd tier remove-overlay base
53 - sleep 20
54 - echo add writeback overlay
55 - sudo ceph osd tier cache-mode cache writeback
56 - sudo ceph osd pool set cache cache_target_full_ratio .8
57 - sudo ceph osd tier set-overlay base cache
58 - sleep 30
59 - sudo ceph osd tier cache-mode cache readproxy
60 - done
61 - rados:
62 clients: [client.0]
63 pools: [base]
64 max_seconds: 600
65 ops: 400000
66 objects: 10000
67 size: 1024
68 op_weights:
69 read: 100
70 write: 100
71 delete: 50
72 copy_from: 50