3 Run ceph on two nodes, using one of them as a client,
4 with a separate client-only node.
5 Use xfs beneath the osds.
6 install ceph/luminous v12.2.2 point version
7 run workload and upgrade-sequence in parallel
8 install ceph/luminous v12.2.5 point version
9 run workload and upgrade-sequence in parallel
10 install ceph/luminous latest version
11 run workload and upgrade-sequence in parallel
20 - POOL_APP_NOT_ENABLED
21 - CACHE_POOL_NO_HIT_SET
27 - CACHE_POOL_NEAR_FULL
30 - application not enabled
34 mon debug unsafe allow tier with nonempty snaps: true
35 mon warn on pool no app: false
37 osd map max advance: 1000
38 osd_class_load_list: "cephfs hello journal lock log numops rbd refcount
39 replica_log rgw sdk statelog timeindex user version"
40 osd_class_default_list: "cephfs hello journal lock log numops rbd refcount
41 replica_log rgw sdk statelog timeindex user version"
43 rgw_crypt_require_ssl: false
44 rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
60 - volumes: # attached to each instance
64 - print: "**** v12.2.2 about to install"
67 # line below can be removed its from jewel test
68 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
69 - print: "**** done v12.2.2 install"
72 add_osds_to_crush: true
73 - print: "**** done ceph xfs"
76 - print: "**** done workload v12.2.2"
78 #### upgrade to v12.2.5
80 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
85 # Note that client.a IS NOT upgraded at this point
88 - upgrade-sequence_luminous
89 - print: "**** done parallel luminous v12.2.5"
90 #### upgrade to latest luminous
92 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
97 # Note that client.a IS NOT upgraded at this point
100 - upgrade-sequence_luminous
101 - print: "**** done parallel luminous branch"
103 #######################
109 - suites/blogbench.sh
119 CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.snapshots_namespaces'
120 - print: "**** done rados/test.sh & cls workload_luminous"
123 - print: "**** done rgw workload_luminous"
126 force-branch: ceph-luminous
128 scan_for_encryption_keys: false
129 - print: "**** done s3tests workload_luminous"
130 upgrade-sequence_luminous:
132 - print: "**** done branch: luminous install.upgrade"
133 - ceph.restart: [mds.a]
136 - ceph.restart: [osd.0]
139 - ceph.restart: [osd.1]
142 - ceph.restart: [osd.2]
145 - ceph.restart: [osd.3]
148 - ceph.restart: [osd.4]
151 - ceph.restart: [osd.5]
154 - ceph.restart: [mon.a]
157 - ceph.restart: [mon.b]
160 - ceph.restart: [mon.c]
163 - print: "**** done ceph.restart all luminous branch mds/osd/mon"