2 - volumes: # attached to each instance
6 - [mon.a, mgr.x, mds.a, osd.0, osd.1, osd.2, client.0, client.1]
11 - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
14 - \(CACHE_POOL_NO_HIT_SET\)
17 osd max object name len: 460
18 osd max object namespace len: 64
24 - ceph osd pool create data_cache 4
25 - ceph osd tier add cephfs_data data_cache
26 - ceph osd tier cache-mode data_cache writeback
27 - ceph osd tier set-overlay cephfs_data data_cache
28 - ceph osd pool set data_cache hit_set_type bloom
29 - ceph osd pool set data_cache hit_set_count 8
30 - ceph osd pool set data_cache hit_set_period 3600
31 - ceph osd pool set data_cache min_read_recency_for_promote 0
35 - sudo chmod 777 $TESTDIR/mnt.0/
36 - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5
37 - ls -al $TESTDIR/mnt.0/foo
38 - truncate --size 0 $TESTDIR/mnt.0/foo
39 - ls -al $TESTDIR/mnt.0/foo
40 - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5
41 - ls -al $TESTDIR/mnt.0/foo
42 - cp $TESTDIR/mnt.0/foo /tmp/foo
44 - rados -p data_cache ls -
46 - rados -p data_cache ls -
47 - rados -p data_cache cache-flush-evict-all
48 - rados -p data_cache ls -
52 - hexdump -C /tmp/foo | head
53 - hexdump -C $TESTDIR/mnt.1/foo | head
54 - cmp $TESTDIR/mnt.1/foo /tmp/foo