3 Run ceph on two nodes, using one of them as a client,
4 with a separate client-only node.
5 Use xfs beneath the osds.
6 install ceph/luminous v12.2.2 point version
7 run workload and upgrade-sequence in parallel
8 install ceph/luminous v12.2.5 point version
9 run workload and upgrade-sequence in parallel
10 install ceph/luminous v12.2.7 point version
11 run workload and upgrade-sequence in parallel
12 install ceph/luminous v12.2.8 point version
13 run workload and upgrade-sequence in parallel
14 install ceph/luminous latest version
15 run workload and upgrade-sequence in parallel
24 - POOL_APP_NOT_ENABLED
25 - CACHE_POOL_NO_HIT_SET
31 - CACHE_POOL_NEAR_FULL
34 - application not enabled
39 mon debug unsafe allow tier with nonempty snaps: true
40 mon warn on pool no app: false
42 osd map max advance: 1000
43 osd_class_load_list: "cephfs hello journal lock log numops rbd refcount
44 replica_log rgw sdk statelog timeindex user version"
45 osd_class_default_list: "cephfs hello journal lock log numops rbd refcount
46 replica_log rgw sdk statelog timeindex user version"
48 rgw_crypt_require_ssl: false
49 rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
65 - volumes: # attached to each instance
69 - print: "**** v12.2.2 about to install"
72 # line below can be removed its from jewel test
73 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
74 - print: "**** done v12.2.2 install"
77 add_osds_to_crush: true
78 - print: "**** done ceph xfs"
81 - print: "**** done workload v12.2.2"
83 #### upgrade to v12.2.5
85 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
90 # Note that client.a IS NOT upgraded at this point
93 - upgrade-sequence_luminous
94 - print: "**** done parallel luminous v12.2.5"
96 #### upgrade to v12.2.7
98 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
103 # Note that client.a IS NOT upgraded at this point
106 - upgrade-sequence_luminous
107 - print: "**** done parallel luminous v12.2.7"
109 #### upgrade to v12.2.8
111 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
116 # Note that client.a IS NOT upgraded at this point
119 - upgrade-sequence_luminous
120 - print: "**** done parallel luminous v12.2.8"
122 #### upgrade to latest luminous
124 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
127 # Note that client.a IS NOT upgraded at this point
130 - upgrade-sequence_luminous
131 - print: "**** done parallel luminous branch"
133 #######################
139 - suites/blogbench.sh
149 CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.snapshots_namespaces'
150 - print: "**** done rados/test.sh & cls workload_luminous"
153 - print: "**** done rgw workload_luminous"
156 force-branch: ceph-luminous
158 scan_for_encryption_keys: false
159 - print: "**** done s3tests workload_luminous"
160 upgrade-sequence_luminous:
162 - print: "**** done branch: luminous install.upgrade"
163 - ceph.restart: [mds.a]
166 - ceph.restart: [osd.0]
169 - ceph.restart: [osd.1]
172 - ceph.restart: [osd.2]
175 - ceph.restart: [osd.3]
178 - ceph.restart: [osd.4]
181 - ceph.restart: [osd.5]
184 - ceph.restart: [mon.a]
187 - ceph.restart: [mon.b]
190 - ceph.restart: [mon.c]
193 - print: "**** done ceph.restart all luminous branch mds/osd/mon"