2 - volumes: # attached to each instance
6 - [mon.a, mgr.x, mds.a, osd.0, osd.1, osd.2, client.0, client.1]
11 - sudo ceph config set mgr mgr_pool false --force
14 - \(CACHE_POOL_NO_HIT_SET\)
15 - \(POOL_APP_NOT_ENABLED\)
18 osd max object name len: 460
19 osd max object namespace len: 64
25 - ceph osd pool create data_cache 4
26 - ceph osd tier add cephfs_data data_cache
27 - ceph osd tier cache-mode data_cache writeback
28 - ceph osd tier set-overlay cephfs_data data_cache
29 - ceph osd pool set data_cache hit_set_type bloom
30 - ceph osd pool set data_cache hit_set_count 8
31 - ceph osd pool set data_cache hit_set_period 3600
32 - ceph osd pool set data_cache min_read_recency_for_promote 0
36 - sudo chmod 777 $TESTDIR/mnt.0/
37 - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5
38 - ls -al $TESTDIR/mnt.0/foo
39 - truncate --size 0 $TESTDIR/mnt.0/foo
40 - ls -al $TESTDIR/mnt.0/foo
41 - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5
42 - ls -al $TESTDIR/mnt.0/foo
43 - cp $TESTDIR/mnt.0/foo /tmp/foo
45 - rados -p data_cache ls -
47 - rados -p data_cache ls -
48 - rados -p data_cache cache-flush-evict-all
49 - rados -p data_cache ls -
53 - hexdump -C /tmp/foo | head
54 - hexdump -C $TESTDIR/mnt.1/foo | head
55 - cmp $TESTDIR/mnt.1/foo /tmp/foo