Run ceph on two nodes, using one of them as a client,
with a separate client-only node.
Use xfs beneath the osds.
- install ceph/octopus v15.2.10 and the v15.2.x point versions
+ install ceph/octopus v15.2.14 and the v15.2.x point versions
run workload and upgrade-sequence in parallel
(every point release should be tested)
run workload and upgrade-sequence in parallel
count: 3
size: 30 # GB
tasks:
-- print: "**** done octopus v15.2.10 about to install"
+- print: "**** done octopus v15.2.13 about to install"
- install:
- tag: v15.2.10
+ tag: v15.2.13
# line below can be removed its from jewel test
#exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
-- print: "**** done v15.2.10 install"
+- print: "**** done v15.2.13 install"
- ceph:
fs: xfs
add_osds_to_crush: true
- print: "**** done ceph xfs"
- sequential:
- workload
-- print: "**** done workload v15.2.10"
+- print: "**** done workload v15.2.13"
-####### upgrade to v15.2.11
+####### upgrade to v15.2.14
- install.upgrade:
#exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
mon.a:
- tag: v15.2.11
+ tag: v15.2.14
mon.b:
- tag: v15.2.11
+ tag: v15.2.14
- parallel:
- workload_octopus
- upgrade-sequence_octopus
-- print: "**** done parallel octopus v15.2.11"
+- print: "**** done parallel octopus v15.2.14"
#### upgrade to latest octopus
- install.upgrade: