3 Run ceph on two nodes, using one of them as a client,
4 with a separate client-only node.
5 Use xfs beneath the osds.
6 install ceph/luminous v12.2.2 point version
7 run workload and upgrade-sequence in parallel
8 install ceph/luminous latest version
9 run workload and upgrade-sequence in parallel
10 install ceph/-x version (luminous or master/mimic)
11 run workload and upgrade-sequence in parallel
22 mon debug unsafe allow tier with nonempty snaps: true
23 mon warn on pool no app: false
25 osd map max advance: 1000
26 osd_class_load_list: "cephfs hello journal lock log numops rbd refcount
27 replica_log rgw sdk statelog timeindex user version"
28 osd_class_default_list: "cephfs hello journal lock log numops rbd refcount
29 replica_log rgw sdk statelog timeindex user version"
31 rgw_crypt_require_ssl: false
32 rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
48 - volumes: # attached to each instance
52 - print: "**** v12.2.2 about to install"
55 # line below can be removed its from jewel test
56 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
57 - print: "**** done v12.2.2 install"
60 add_osds_to_crush: true
61 - print: "**** done ceph xfs"
64 - print: "**** done workload"
66 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
71 # Note that client.a IS NOT upgraded at this point
74 - upgrade-sequence_luminous
75 - print: "**** done parallel luminous branch"
77 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
80 - print: "**** done branch: luminous install.upgrade on client.1"
84 - print: "**** done branch: -x install.upgrade on mon.a and mon.b"
88 - print: "**** done parallel -x branch"
91 - ceph osd set-require-min-compat-client luminous
92 # Run librados tests on the -x upgraded cluster
101 - print: "**** done final test on -x cluster"
102 #######################
108 - suites/blogbench.sh
118 CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.snapshots_namespaces'
119 - print: "**** done rados/test.sh & cls workload_luminous"
122 - print: "**** done rgw workload_luminous"
125 force-branch: ceph-luminous
127 scan_for_encryption_keys: false
128 - print: "**** done s3tests workload_luminous"
129 upgrade-sequence_luminous:
131 - print: "**** done branch: luminous install.upgrade"
132 - ceph.restart: [mds.a]
135 - ceph.restart: [osd.0]
138 - ceph.restart: [osd.1]
141 - ceph.restart: [osd.2]
144 - ceph.restart: [osd.3]
147 - ceph.restart: [osd.4]
150 - ceph.restart: [osd.5]
153 - ceph.restart: [mon.a]
156 - ceph.restart: [mon.b]
159 - ceph.restart: [mon.c]
162 - print: "**** done ceph.restart all luminous branch mds/osd/mon"
171 - print: "**** done rados/test.sh & cls workload_x NOT upgraded client"
178 - print: "**** done rados/test.sh & cls workload_x upgraded client"
180 - print: "**** done rgw workload_x"
183 force-branch: ceph-luminous
185 scan_for_encryption_keys: false
186 - print: "**** done s3tests workload_x"
189 - ceph.restart: [mds.a]
192 - ceph.restart: [mon.a]
195 - ceph.restart: [mon.b]
198 - ceph.restart: [mon.c]
201 - ceph.restart: [osd.0]
204 - ceph.restart: [osd.1]
207 - ceph.restart: [osd.2]
210 - ceph.restart: [osd.3]
213 - ceph.restart: [osd.4]
218 wait-for-healthy: false
219 wait-for-up-osds: true
222 wait-for-healthy: false
225 - ceph osd require-osd-release luminous
227 - print: "**** done ceph.restart all -x branch mds/osd/mon"