]> git.proxmox.com Git - ceph.git/blob - ceph/qa/suites/upgrade/octopus-p2p/octopus-p2p-parallel/point-to-point-upgrade.yaml
165076b017b2ebabc16c0ce134c4809f3291f66b
[ceph.git] / ceph / qa / suites / upgrade / octopus-p2p / octopus-p2p-parallel / point-to-point-upgrade.yaml
1 meta:
2 - desc: |
3 Run ceph on two nodes, using one of them as a client,
4 with a separate client-only node.
5 Use xfs beneath the osds.
6 install ceph/octopus v15.2.3 and the v15.2.x point versions
7 run workload and upgrade-sequence in parallel
8 (every point release should be tested)
9 run workload and upgrade-sequence in parallel
10 install ceph/octopus latest version
11 run workload and upgrade-sequence in parallel
12 overrides:
13 ceph:
14 log-whitelist:
15 - reached quota
16 - scrub
17 - osd_map_max_advance
18 - wrongly marked
19 - FS_DEGRADED
20 - POOL_APP_NOT_ENABLED
21 - CACHE_POOL_NO_HIT_SET
22 - POOL_FULL
23 - SMALLER_PG
24 - pool\(s\) full
25 - OSD_DOWN
26 - missing hit_sets
27 - CACHE_POOL_NEAR_FULL
28 - PG_AVAILABILITY
29 - PG_DEGRADED
30 - application not enabled
31 - cache pools at or near target size
32 - filesystem is degraded
33 - OBJECT_MISPLACED
34 ### ref: https://tracker.ceph.com/issues/40251
35 #removed see ^ - failed to encode map
36
37 fs: xfs
38
39 conf:
40 global:
41 mon_warn_on_pool_no_app: false
42 mon:
43 mon debug unsafe allow tier with nonempty snaps: true
44 osd:
45 osd map max advance: 1000
46 osd_class_default_list: "*"
47 osd_class_load_list: "*"
48 client:
49 rgw_crypt_require_ssl: false
50 rgw crypt s3 kms backend: testing
51 rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
52 roles:
53 - - mon.a
54 - mds.a
55 - osd.0
56 - osd.1
57 - osd.2
58 - mgr.x
59 - - mon.b
60 - mon.c
61 - osd.3
62 - osd.4
63 - osd.5
64 - client.0
65 - - client.1
66 openstack:
67 - volumes: # attached to each instance
68 count: 3
69 size: 30 # GB
70 tasks:
71 - print: "**** done octopus v15.2.1 about to install"
72 - install:
73 tag: v15.2.1
74 # line below can be removed its from jewel test
75 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
76 - print: "**** done v15.2.1 install"
77 - ceph:
78 fs: xfs
79 add_osds_to_crush: true
80 - print: "**** done ceph xfs"
81 - sequential:
82 - workload
83 - print: "**** done workload v15.2.1"
84
85
86 ####### upgrade to v15.2.3
87 - install.upgrade:
88 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
89 mon.a:
90 tag: v15.2.3
91 mon.b:
92 tag: v15.2.3
93 - parallel:
94 - workload_octopus
95 - upgrade-sequence_octopus
96 - print: "**** done parallel octopus v15.2.3"
97
98 #### upgrade to latest octopus
99 - install.upgrade:
100 mon.a:
101 mon.b:
102 - parallel:
103 - workload_octopus
104 - upgrade-sequence_octopus
105 - print: "**** done parallel octopus branch"
106
107 #######################
108 workload:
109 sequential:
110 - workunit:
111 clients:
112 client.0:
113 - suites/blogbench.sh
114 workload_octopus:
115 full_sequential:
116 - workunit:
117 branch: octopus
118 #tag: v15.2.1
119 clients:
120 client.1:
121 - rados/test.sh
122 - cls
123 env:
124 CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.snapshots_namespaces'
125 - print: "**** done rados/test.sh & cls workload_octopus"
126 - sequential:
127 - rgw: [client.0]
128 - print: "**** done rgw workload_octopus"
129 - s3tests:
130 client.0:
131 force-branch: ceph-octopus
132 rgw_server: client.0
133 scan_for_encryption_keys: false
134 - print: "**** done s3tests workload_octopus"
135 - rbd_fsx:
136 clients: [client.0]
137 size: 134217728
138 - print: "**** done rbd_fsx workload_octopus"
139
140 upgrade-sequence_octopus:
141 sequential:
142 - print: "**** done branch: octopus install.upgrade"
143 - ceph.restart: [mds.a]
144 - sleep:
145 duration: 60
146 - ceph.restart: [osd.0]
147 - sleep:
148 duration: 30
149 - ceph.restart: [osd.1]
150 - sleep:
151 duration: 30
152 - ceph.restart: [osd.2]
153 - sleep:
154 duration: 30
155 - ceph.restart: [osd.3]
156 - sleep:
157 duration: 30
158 - ceph.restart: [osd.4]
159 - sleep:
160 duration: 30
161 - ceph.restart: [osd.5]
162 - sleep:
163 duration: 60
164 - ceph.restart: [mgr.x]
165 - sleep:
166 duration: 60
167 - ceph.restart: [mon.a]
168 - sleep:
169 duration: 60
170 - ceph.restart: [mon.b]
171 - sleep:
172 duration: 60
173 - ceph.restart: [mon.c]
174 - sleep:
175 duration: 60
176 - print: "**** done ceph.restart all octopus branch mds/osd/mon"