]> git.proxmox.com Git - ceph.git/blobdiff - ceph/qa/suites/upgrade/pacific-p2p/pacific-p2p-parallel/point-to-point-upgrade.yaml
import ceph pacific 16.2.5
[ceph.git] / ceph / qa / suites / upgrade / pacific-p2p / pacific-p2p-parallel / point-to-point-upgrade.yaml
diff --git a/ceph/qa/suites/upgrade/pacific-p2p/pacific-p2p-parallel/point-to-point-upgrade.yaml b/ceph/qa/suites/upgrade/pacific-p2p/pacific-p2p-parallel/point-to-point-upgrade.yaml
new file mode 100644 (file)
index 0000000..a433f60
--- /dev/null
@@ -0,0 +1,177 @@
+meta:
+- desc: |
+   Run ceph on two nodes, using one of them as a client,
+   with a separate client-only node.
+   Use xfs beneath the osds.
+   install ceph/pacific v16.2.2 and the v16.2.x point versions
+   run workload and upgrade-sequence in parallel
+   (every point release should be tested)
+   run workload and upgrade-sequence in parallel
+   install ceph/pacific  latest version
+   run workload and upgrade-sequence in parallel
+   Overall upgrade path is - pacific-latest.point-1 => pacific-latest.point => pacific-latest
+overrides:
+  ceph:
+    log-ignorelist:
+    - reached quota
+    - scrub
+    - osd_map_max_advance
+    - wrongly marked
+    - FS_DEGRADED
+    - POOL_APP_NOT_ENABLED
+    - CACHE_POOL_NO_HIT_SET
+    - POOL_FULL
+    - SMALLER_PG
+    - pool\(s\) full
+    - OSD_DOWN
+    - missing hit_sets
+    - CACHE_POOL_NEAR_FULL
+    - PG_AVAILABILITY
+    - PG_DEGRADED
+    - application not enabled
+    - cache pools at or near target size
+    - filesystem is degraded
+    - OBJECT_MISPLACED
+    ### ref: https://tracker.ceph.com/issues/40251
+    #removed see ^ - failed to encode map
+
+    fs: xfs
+
+    conf:
+      global:
+        mon_warn_on_pool_no_app: false
+      mon:
+        mon debug unsafe allow tier with nonempty snaps: true
+      osd:
+        osd map max advance: 1000
+        osd_class_default_list: "*"
+        osd_class_load_list: "*"
+      client:
+        rgw_crypt_require_ssl: false
+        rgw crypt s3 kms backend: testing
+        rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
+roles:
+- - mon.a
+  - mds.a
+  - osd.0
+  - osd.1
+  - osd.2
+  - mgr.x
+- - mon.b
+  - mon.c
+  - osd.3
+  - osd.4
+  - osd.5
+  - client.0
+- - client.1
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 30 # GB
+tasks:
+- print: "****  done pacific v16.2.0 about to install"
+- install:
+    tag: v16.2.2
+    # line below can be removed its from jewel test
+    #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
+- print: "**** done v16.2.2 install"
+- ceph:
+   fs: xfs
+   add_osds_to_crush: true
+- print: "**** done ceph xfs"
+- sequential:
+   - workload
+- print: "**** done workload v16.2.2"
+
+
+#######  upgrade to v16.2.3
+- install.upgrade:
+    #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+    mon.a:
+      tag: v16.2.3
+    mon.b:
+      tag: v16.2.3
+- parallel:
+   - workload_pacific
+   - upgrade-sequence_pacific
+- print: "**** done parallel pacific v16.2.3"
+
+####  upgrade to latest pacific
+- install.upgrade:
+    mon.a:
+    mon.b:
+- parallel:
+   - workload_pacific
+   - upgrade-sequence_pacific
+- print: "**** done parallel pacific branch"
+
+#######################
+workload:
+   sequential:
+   - workunit:
+       clients:
+         client.0:
+           - suites/blogbench.sh
+workload_pacific:
+   full_sequential:
+   - workunit:
+       branch: pacific
+       #tag: v16.2.1
+       clients:
+         client.1:
+         - rados/test.sh
+         - cls
+       env:
+         CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.snapshots_namespaces'
+   - print: "**** done rados/test.sh &  cls workload_pacific"
+   - sequential:
+     - rgw: [client.0]
+     - print: "**** done rgw workload_pacific"
+     - s3tests:
+         client.0:
+           force-branch: ceph-pacific
+           rgw_server: client.0
+           scan_for_encryption_keys: false
+     - print: "**** done s3tests workload_pacific"
+     - rbd_fsx:
+         clients: [client.0]
+         size: 134217728
+     - print: "**** done rbd_fsx workload_pacific"
+
+upgrade-sequence_pacific:
+   sequential:
+   - print: "**** done branch: pacific install.upgrade"
+   - ceph.restart: [mds.a]
+   - sleep:
+       duration: 60
+   - ceph.restart: [osd.0]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.1]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.2]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.3]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.4]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.5]
+   - sleep:
+       duration: 60
+   - ceph.restart: [mgr.x]
+   - sleep:
+       duration: 60
+   - ceph.restart: [mon.a]
+   - sleep:
+       duration: 60
+   - ceph.restart: [mon.b]
+   - sleep:
+       duration: 60
+   - ceph.restart: [mon.c]
+   - sleep:
+       duration: 60
+   - print: "**** done ceph.restart all pacific branch mds/osd/mon"