]> git.proxmox.com Git - ceph.git/blob - ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml
dce6f51bae0237ce15afd6113b163af67369e4d0
[ceph.git] / ceph / qa / suites / rados / singleton-nomsgr / all / cache-fs-trunc.yaml
1 openstack:
2 - volumes: # attached to each instance
3 count: 3
4 size: 10 # GB
5 roles:
6 - [mon.a, mgr.x, mds.a, osd.0, osd.1, osd.2, client.0, client.1]
7 tasks:
8 - install:
9 - ceph:
10 pre-mgr-commands:
11 - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
12 log-whitelist:
13 - overall HEALTH_
14 - \(CACHE_POOL_NO_HIT_SET\)
15 conf:
16 global:
17 osd max object name len: 460
18 osd max object namespace len: 64
19 debug client: 20
20 debug mds: 20
21 debug ms: 1
22 - exec:
23 client.0:
24 - ceph osd pool create data_cache 4
25 - ceph osd tier add cephfs_data data_cache
26 - ceph osd tier cache-mode data_cache writeback
27 - ceph osd tier set-overlay cephfs_data data_cache
28 - ceph osd pool set data_cache hit_set_type bloom
29 - ceph osd pool set data_cache hit_set_count 8
30 - ceph osd pool set data_cache hit_set_period 3600
31 - ceph osd pool set data_cache min_read_recency_for_promote 0
32 - ceph-fuse:
33 - exec:
34 client.0:
35 - sudo chmod 777 $TESTDIR/mnt.0/
36 - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5
37 - ls -al $TESTDIR/mnt.0/foo
38 - truncate --size 0 $TESTDIR/mnt.0/foo
39 - ls -al $TESTDIR/mnt.0/foo
40 - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5
41 - ls -al $TESTDIR/mnt.0/foo
42 - cp $TESTDIR/mnt.0/foo /tmp/foo
43 - sync
44 - rados -p data_cache ls -
45 - sleep 10
46 - rados -p data_cache ls -
47 - rados -p data_cache cache-flush-evict-all
48 - rados -p data_cache ls -
49 - sleep 1
50 - exec:
51 client.1:
52 - hexdump -C /tmp/foo | head
53 - hexdump -C $TESTDIR/mnt.1/foo | head
54 - cmp $TESTDIR/mnt.1/foo /tmp/foo