]> git.proxmox.com Git - ceph.git/blob - ceph/qa/suites/upgrade/jewel-x/point-to-point-x/point-to-point-upgrade.yaml
update sources to v12.1.3
[ceph.git] / ceph / qa / suites / upgrade / jewel-x / point-to-point-x / point-to-point-upgrade.yaml
1 meta:
2 - desc: |
3 Run ceph on two nodes, using one of them as a client,
4 with a separate client-only node.
5 Use xfs beneath the osds.
6 install ceph/jewel v10.2.0 point version
7 run workload and upgrade-sequence in parallel
8 install ceph/jewel latest version
9 run workload and upgrade-sequence in parallel
10 install ceph/-x version (jewel or kraken)
11 run workload and upgrade-sequence in parallel
12 overrides:
13 ceph:
14 log-whitelist:
15 - reached quota
16 - scrub
17 - osd_map_max_advance
18 - wrongly marked
19 fs: xfs
20 conf:
21 mon:
22 mon debug unsafe allow tier with nonempty snaps: true
23 mon warn on pool no app: false
24 osd:
25 osd map max advance: 1000
26 roles:
27 - - mon.a
28 - mds.a
29 - osd.0
30 - osd.1
31 - osd.2
32 - mgr.x
33 - - mon.b
34 - mon.c
35 - osd.3
36 - osd.4
37 - osd.5
38 - client.0
39 - - client.1
40 openstack:
41 - volumes: # attached to each instance
42 count: 3
43 size: 30 # GB
44 tasks:
45 - print: "**** v10.2.0 about to install"
46 - install:
47 tag: v10.2.0
48 exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
49 - print: "**** done v10.2.0 install"
50 - ceph:
51 fs: xfs
52 skip_mgr_daemons: true
53 add_osds_to_crush: true
54 - print: "**** done ceph xfs"
55 - sequential:
56 - workload
57 - print: "**** done workload v10.2.0"
58 - install.upgrade:
59 exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
60 mon.a:
61 branch: jewel
62 mon.b:
63 branch: jewel
64 # Note that client.a IS NOT upgraded at this point
65 #client.1:
66 #branch: jewel
67 - parallel:
68 - workload_jewel
69 - upgrade-sequence_jewel
70 - print: "**** done parallel jewel branch"
71 - install.upgrade:
72 exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
73 client.1:
74 branch: jewel
75 - print: "**** done branch: jewel install.upgrade on client.1"
76 - install.upgrade:
77 mon.a:
78 mon.b:
79 - print: "**** done branch: -x install.upgrade on mon.a and mon.b"
80 - parallel:
81 - workload_x
82 - upgrade-sequence_x
83 - print: "**** done parallel -x branch"
84 - exec:
85 osd.0:
86 - ceph osd set-require-min-compat-client luminous
87 # Run librados tests on the -x upgraded cluster
88 - install.upgrade:
89 client.1:
90 - workunit:
91 branch: jewel
92 clients:
93 client.1:
94 - rados/test-upgrade-v11.0.0.sh
95 - cls
96 - print: "**** done final test on -x cluster"
97 #######################
98 workload:
99 sequential:
100 - workunit:
101 clients:
102 client.0:
103 - suites/blogbench.sh
104 workload_jewel:
105 full_sequential:
106 - workunit:
107 branch: jewel
108 clients:
109 client.1:
110 - rados/test.sh
111 - cls
112 env:
113 CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_image'
114 - print: "**** done rados/test.sh & cls workload_jewel"
115 - sequential:
116 - rgw: [client.0]
117 - print: "**** done rgw workload_jewel"
118 - s3tests:
119 client.0:
120 force-branch: ceph-jewel
121 rgw_server: client.0
122 scan_for_encryption_keys: false
123 - print: "**** done s3tests workload_jewel"
124 upgrade-sequence_jewel:
125 sequential:
126 - print: "**** done branch: jewel install.upgrade"
127 - ceph.restart: [mds.a]
128 - sleep:
129 duration: 60
130 - ceph.restart: [osd.0]
131 - sleep:
132 duration: 30
133 - ceph.restart: [osd.1]
134 - sleep:
135 duration: 30
136 - ceph.restart: [osd.2]
137 - sleep:
138 duration: 30
139 - ceph.restart: [osd.3]
140 - sleep:
141 duration: 30
142 - ceph.restart: [osd.4]
143 - sleep:
144 duration: 30
145 - ceph.restart: [osd.5]
146 - sleep:
147 duration: 60
148 - ceph.restart: [mon.a]
149 - sleep:
150 duration: 60
151 - ceph.restart: [mon.b]
152 - sleep:
153 duration: 60
154 - ceph.restart: [mon.c]
155 - sleep:
156 duration: 60
157 - print: "**** done ceph.restart all jewel branch mds/osd/mon"
158 workload_x:
159 sequential:
160 - workunit:
161 branch: jewel
162 clients:
163 client.1:
164 - rados/test-upgrade-v11.0.0.sh
165 - cls
166 env:
167 CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_image'
168 - print: "**** done rados/test-upgrade-v11.0.0.sh & cls workload_x NOT upgraded client"
169 - workunit:
170 branch: jewel
171 clients:
172 client.0:
173 - rados/test-upgrade-v11.0.0.sh
174 - cls
175 - print: "**** done rados/test-upgrade-v11.0.0.sh & cls workload_x upgraded client"
176 - rgw: [client.1]
177 - print: "**** done rgw workload_x"
178 - s3tests:
179 client.1:
180 force-branch: ceph-jewel
181 rgw_server: client.1
182 scan_for_encryption_keys: false
183 - print: "**** done s3tests workload_x"
184 upgrade-sequence_x:
185 sequential:
186 - ceph.restart: [mds.a]
187 - sleep:
188 duration: 60
189 - ceph.restart: [mon.a]
190 - sleep:
191 duration: 60
192 - ceph.restart: [mon.b]
193 - sleep:
194 duration: 60
195 - ceph.restart: [mon.c]
196 - sleep:
197 duration: 60
198 - ceph.restart: [osd.0]
199 - sleep:
200 duration: 30
201 - ceph.restart: [osd.1]
202 - sleep:
203 duration: 30
204 - ceph.restart: [osd.2]
205 - sleep:
206 duration: 30
207 - ceph.restart: [osd.3]
208 - sleep:
209 duration: 30
210 - ceph.restart: [osd.4]
211 - sleep:
212 duration: 30
213 - ceph.restart:
214 daemons: [osd.5]
215 wait-for-healthy: false
216 wait-for-up-osds: true
217 - exec:
218 mgr.x:
219 - mkdir -p /var/lib/ceph/mgr/ceph-x
220 - ceph auth get-or-create-key mgr.x mon 'allow profile mgr'
221 - ceph auth export mgr.x > /var/lib/ceph/mgr/ceph-x/keyring
222 - ceph.restart:
223 daemons: [mgr.x]
224 wait-for-healthy: false
225 - exec:
226 osd.0:
227 - ceph osd require-osd-release luminous
228 - ceph.healthy:
229 - print: "**** done ceph.restart all -x branch mds/osd/mon"