3 Run ceph on two nodes, using one of them as a client,
4 with a separate client-only node.
5 Use xfs beneath the osds.
6 install ceph/luminous v12.2.2 point version
7 run workload and upgrade-sequence in parallel
8 install ceph/luminous v12.2.5 point version
9 run workload and upgrade-sequence in parallel
10 install ceph/luminous v12.2.7 point version
11 run workload and upgrade-sequence in parallel
12 install ceph/luminous latest version
13 run workload and upgrade-sequence in parallel
22 - POOL_APP_NOT_ENABLED
23 - CACHE_POOL_NO_HIT_SET
29 - CACHE_POOL_NEAR_FULL
32 - application not enabled
37 mon debug unsafe allow tier with nonempty snaps: true
38 mon warn on pool no app: false
40 osd map max advance: 1000
41 osd_class_load_list: "cephfs hello journal lock log numops rbd refcount
42 replica_log rgw sdk statelog timeindex user version"
43 osd_class_default_list: "cephfs hello journal lock log numops rbd refcount
44 replica_log rgw sdk statelog timeindex user version"
46 rgw_crypt_require_ssl: false
47 rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
63 - volumes: # attached to each instance
67 - print: "**** v12.2.2 about to install"
70 # line below can be removed its from jewel test
71 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
72 - print: "**** done v12.2.2 install"
75 add_osds_to_crush: true
76 - print: "**** done ceph xfs"
79 - print: "**** done workload v12.2.2"
81 #### upgrade to v12.2.5
83 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
88 # Note that client.a IS NOT upgraded at this point
91 - upgrade-sequence_luminous
92 - print: "**** done parallel luminous v12.2.5"
94 #### upgrade to v12.2.7
96 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
101 # Note that client.a IS NOT upgraded at this point
104 - upgrade-sequence_luminous
105 - print: "**** done parallel luminous v12.2.7"
107 #### upgrade to latest luminous
109 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
112 # Note that client.a IS NOT upgraded at this point
115 - upgrade-sequence_luminous
116 - print: "**** done parallel luminous branch"
118 #######################
124 - suites/blogbench.sh
134 CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.snapshots_namespaces'
135 - print: "**** done rados/test.sh & cls workload_luminous"
138 - print: "**** done rgw workload_luminous"
141 force-branch: ceph-luminous
143 scan_for_encryption_keys: false
144 - print: "**** done s3tests workload_luminous"
145 upgrade-sequence_luminous:
147 - print: "**** done branch: luminous install.upgrade"
148 - ceph.restart: [mds.a]
151 - ceph.restart: [osd.0]
154 - ceph.restart: [osd.1]
157 - ceph.restart: [osd.2]
160 - ceph.restart: [osd.3]
163 - ceph.restart: [osd.4]
166 - ceph.restart: [osd.5]
169 - ceph.restart: [mon.a]
172 - ceph.restart: [mon.b]
175 - ceph.restart: [mon.c]
178 - print: "**** done ceph.restart all luminous branch mds/osd/mon"