]> git.proxmox.com Git - ceph.git/blob - ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml
import quincy beta 17.1.0
[ceph.git] / ceph / qa / suites / rados / singleton / all / thrash_cache_writeback_proxy_none.yaml
1 roles:
2 - - mon.a
3 - mgr.x
4 - osd.0
5 - osd.1
6 - osd.2
7 - - osd.3
8 - osd.4
9 - osd.5
10 - client.0
11 openstack:
12 - volumes: # attached to each instance
13 count: 3
14 size: 30 # GB
15 tasks:
16 - install:
17 - ceph:
18 pre-mgr-commands:
19 - sudo ceph config set mgr mgr_pool false --force
20 log-ignorelist:
21 - but it is still running
22 - slow request
23 - overall HEALTH_
24 - \(CACHE_POOL_
25 - exec:
26 client.0:
27 - sudo ceph osd pool create base 4
28 - sudo ceph osd pool application enable base rados
29 - sudo ceph osd pool create cache 4
30 - sudo ceph osd tier add base cache
31 - sudo ceph osd tier cache-mode cache writeback
32 - sudo ceph osd tier set-overlay base cache
33 - sudo ceph osd pool set cache hit_set_type bloom
34 - sudo ceph osd pool set cache hit_set_count 8
35 - sudo ceph osd pool set cache hit_set_period 60
36 - sudo ceph osd pool set cache target_max_objects 500
37 - background_exec:
38 mon.a:
39 - while true
40 - do sleep 30
41 - sudo ceph osd pool set cache cache_target_full_ratio .001
42 - echo cache-try-flush-evict-all
43 - rados -p cache cache-try-flush-evict-all
44 - sleep 5
45 - echo cache-flush-evict-all
46 - rados -p cache cache-flush-evict-all
47 - sleep 5
48 - echo remove overlay
49 - sudo ceph osd tier remove-overlay base
50 - sleep 20
51 # Disabled due to https://tracker.ceph.com/issues/46323
52 #- echo add writeback overlay
53 #- sudo ceph osd tier cache-mode cache writeback
54 #- sudo ceph osd pool set cache cache_target_full_ratio .8
55 #- sudo ceph osd tier set-overlay base cache
56 #- sleep 30
57 #- sudo ceph osd tier cache-mode cache readproxy
58 - done
59 - rados:
60 clients: [client.0]
61 pools: [base]
62 max_seconds: 600
63 ops: 400000
64 objects: 10000
65 size: 1024
66 op_weights:
67 read: 100
68 write: 100
69 delete: 50
70 copy_from: 50