3 Run ceph on two nodes, using one of them as a client,
4 with a separate client-only node.
5 Use xfs beneath the osds.
6 install ceph/jewel v10.2.0 point version
7 run workload and upgrade-sequence in parallel
8 install ceph/jewel latest version
9 run workload and upgrade-sequence in parallel
10 install ceph/-x version (jewel or kraken)
11 run workload and upgrade-sequence in parallel
22 mon debug unsafe allow tier with nonempty snaps: true
23 mon warn on pool no app: false
25 osd map max advance: 1000
41 - volumes: # attached to each instance
45 - print: "**** v10.2.0 about to install"
48 exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
49 - print: "**** done v10.2.0 install"
52 skip_mgr_daemons: true
53 add_osds_to_crush: true
54 - print: "**** done ceph xfs"
57 - print: "**** done workload v10.2.0"
59 exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
64 # Note that client.a IS NOT upgraded at this point
69 - upgrade-sequence_jewel
70 - print: "**** done parallel jewel branch"
72 exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
75 - print: "**** done branch: jewel install.upgrade on client.1"
79 - print: "**** done branch: -x install.upgrade on mon.a and mon.b"
83 - print: "**** done parallel -x branch"
86 - ceph osd set-require-min-compat-client luminous
87 # Run librados tests on the -x upgraded cluster
94 - rados/test-upgrade-v11.0.0.sh
96 - print: "**** done final test on -x cluster"
97 #######################
103 - suites/blogbench.sh
113 CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_image'
114 - print: "**** done rados/test.sh & cls workload_jewel"
117 - print: "**** done rgw workload_jewel"
120 force-branch: ceph-jewel
122 scan_for_encryption_keys: false
123 - print: "**** done s3tests workload_jewel"
124 upgrade-sequence_jewel:
126 - print: "**** done branch: jewel install.upgrade"
127 - ceph.restart: [mds.a]
130 - ceph.restart: [osd.0]
133 - ceph.restart: [osd.1]
136 - ceph.restart: [osd.2]
139 - ceph.restart: [osd.3]
142 - ceph.restart: [osd.4]
145 - ceph.restart: [osd.5]
148 - ceph.restart: [mon.a]
151 - ceph.restart: [mon.b]
154 - ceph.restart: [mon.c]
157 - print: "**** done ceph.restart all jewel branch mds/osd/mon"
164 - rados/test-upgrade-v11.0.0.sh
167 CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_image'
168 - print: "**** done rados/test-upgrade-v11.0.0.sh & cls workload_x NOT upgraded client"
173 - rados/test-upgrade-v11.0.0.sh
175 - print: "**** done rados/test-upgrade-v11.0.0.sh & cls workload_x upgraded client"
177 - print: "**** done rgw workload_x"
180 force-branch: ceph-jewel
182 scan_for_encryption_keys: false
183 - print: "**** done s3tests workload_x"
186 - ceph.restart: [mds.a]
189 - ceph.restart: [mon.a]
192 - ceph.restart: [mon.b]
195 - ceph.restart: [mon.c]
198 - ceph.restart: [osd.0]
201 - ceph.restart: [osd.1]
204 - ceph.restart: [osd.2]
207 - ceph.restart: [osd.3]
210 - ceph.restart: [osd.4]
215 wait-for-healthy: false
216 wait-for-up-osds: true
219 - mkdir -p /var/lib/ceph/mgr/ceph-x
220 - ceph auth get-or-create-key mgr.x mon 'allow profile mgr'
221 - ceph auth export mgr.x > /var/lib/ceph/mgr/ceph-x/keyring
224 wait-for-healthy: false
227 - ceph osd require-osd-release luminous
229 - print: "**** done ceph.restart all -x branch mds/osd/mon"