3 - volumes: # attached to each instance
7 - [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
15 - \(CACHE_POOL_NO_HIT_SET\)
16 - \(CACHE_POOL_NEAR_FULL\)
17 - \(POOL_APP_NOT_ENABLED\)
22 - sudo ceph config set mgr mgr_pool false --force
25 osd max object name len: 460
26 osd max object namespace len: 64
29 - ceph osd pool create ec-ca 1 1
30 - ceph osd pool create ec 1 1 erasure default
31 - ceph osd pool application enable ec rados
32 - ceph osd tier add ec ec-ca
33 - ceph osd tier cache-mode ec-ca readproxy
34 - ceph osd tier set-overlay ec ec-ca
35 - ceph osd pool set ec-ca hit_set_type bloom
36 - ceph osd pool set-quota ec-ca max_bytes 20480000
37 - ceph osd pool set-quota ec max_bytes 20480000
38 - ceph osd pool set ec-ca target_max_bytes 20480000
39 - timeout 30 rados -p ec-ca bench 30 write || true
40 - ceph osd pool set-quota ec-ca max_bytes 0
41 - ceph osd pool set-quota ec max_bytes 0