3 Run ceph on two nodes, using one of them as a client,
4 with a separate client-only node.
5 Use xfs beneath the osds.
6 install ceph/octopus v15.2.3 and the v15.2.x point versions
7 run workload and upgrade-sequence in parallel
8 (every point release should be tested)
9 run workload and upgrade-sequence in parallel
10 install ceph/octopus latest version
11 run workload and upgrade-sequence in parallel
20 - POOL_APP_NOT_ENABLED
21 - CACHE_POOL_NO_HIT_SET
27 - CACHE_POOL_NEAR_FULL
30 - application not enabled
31 - cache pools at or near target size
32 - filesystem is degraded
34 ### ref: https://tracker.ceph.com/issues/40251
35 #removed see ^ - failed to encode map
41 mon_warn_on_pool_no_app: false
43 mon debug unsafe allow tier with nonempty snaps: true
45 osd map max advance: 1000
46 osd_class_default_list: "*"
47 osd_class_load_list: "*"
49 rgw_crypt_require_ssl: false
50 rgw crypt s3 kms backend: testing
51 rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
67 - volumes: # attached to each instance
71 - print: "**** done octopus v15.2.1 about to install"
74 # line below can be removed its from jewel test
75 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
76 - print: "**** done v15.2.1 install"
79 add_osds_to_crush: true
80 - print: "**** done ceph xfs"
83 - print: "**** done workload v15.2.1"
86 ####### upgrade to v15.2.3
88 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
95 - upgrade-sequence_octopus
96 - print: "**** done parallel octopus v15.2.3"
98 #### upgrade to latest octopus
104 - upgrade-sequence_octopus
105 - print: "**** done parallel octopus branch"
107 #######################
113 - suites/blogbench.sh
124 CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.snapshots_namespaces'
125 - print: "**** done rados/test.sh & cls workload_octopus"
128 - print: "**** done rgw workload_octopus"
131 force-branch: ceph-octopus
133 scan_for_encryption_keys: false
134 - print: "**** done s3tests workload_octopus"
138 - print: "**** done rbd_fsx workload_octopus"
140 upgrade-sequence_octopus:
142 - print: "**** done branch: octopus install.upgrade"
143 - ceph.restart: [mds.a]
146 - ceph.restart: [osd.0]
149 - ceph.restart: [osd.1]
152 - ceph.restart: [osd.2]
155 - ceph.restart: [osd.3]
158 - ceph.restart: [osd.4]
161 - ceph.restart: [osd.5]
164 - ceph.restart: [mgr.x]
167 - ceph.restart: [mon.a]
170 - ceph.restart: [mon.b]
173 - ceph.restart: [mon.c]
176 - print: "**** done ceph.restart all octopus branch mds/osd/mon"