- scrub
- osd_map_max_advance
- wrongly marked
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(OSD_
+ - \(PG_
+ - \(CACHE_
fs: xfs
conf:
+ global:
+ mon warn on pool no app: false
mon:
mon debug unsafe allow tier with nonempty snaps: true
osd:
osd map max advance: 1000
+ osd map cache size: 1100
roles:
- - mon.a
- mds.a
- ceph:
fs: xfs
skip_mgr_daemons: true
+ add_osds_to_crush: true
- print: "**** done ceph xfs"
- sequential:
- workload
- workload_x
- upgrade-sequence_x
- print: "**** done parallel -x branch"
+- exec:
+ osd.0:
+ - ceph osd set-require-min-compat-client luminous
# Run librados tests on the -x upgraded cluster
- install.upgrade:
client.1:
branch: jewel
clients:
client.1:
- - rados/test-upgrade-v11.0.0.sh
+ - rados/test-upgrade-v11.0.0-noec.sh
- cls
env:
CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_image'
branch: jewel
clients:
client.0:
- - rados/test-upgrade-v11.0.0.sh
+ - rados/test-upgrade-v11.0.0-noec.sh
- cls
- print: "**** done rados/test-upgrade-v11.0.0.sh & cls workload_x upgraded client"
- rgw: [client.1]
- exec:
osd.0:
- ceph osd require-osd-release luminous
- - ceph osd set-require-min-compat-client luminous
- ceph.healthy:
- print: "**** done ceph.restart all -x branch mds/osd/mon"