]> git.proxmox.com Git - ceph.git/blob - ceph/qa/suites/upgrade/jewel-x/point-to-point-x/point-to-point-upgrade.yaml
update sources to v12.1.1
[ceph.git] / ceph / qa / suites / upgrade / jewel-x / point-to-point-x / point-to-point-upgrade.yaml
1 meta:
2 - desc: |
3 Run ceph on two nodes, using one of them as a client,
4 with a separate client-only node.
5 Use xfs beneath the osds.
6 install ceph/jewel v10.2.0 point version
7 run workload and upgrade-sequence in parallel
8 install ceph/jewel latest version
9 run workload and upgrade-sequence in parallel
10 install ceph/-x version (jewel or kraken)
11 run workload and upgrade-sequence in parallel
12 overrides:
13 ceph:
14 log-whitelist:
15 - reached quota
16 - scrub
17 - osd_map_max_advance
18 - wrongly marked
19 fs: xfs
20 conf:
21 mon:
22 mon debug unsafe allow tier with nonempty snaps: true
23 osd:
24 osd map max advance: 1000
25 roles:
26 - - mon.a
27 - mds.a
28 - osd.0
29 - osd.1
30 - osd.2
31 - mgr.x
32 - - mon.b
33 - mon.c
34 - osd.3
35 - osd.4
36 - osd.5
37 - client.0
38 - - client.1
39 openstack:
40 - volumes: # attached to each instance
41 count: 3
42 size: 30 # GB
43 tasks:
44 - print: "**** v10.2.0 about to install"
45 - install:
46 tag: v10.2.0
47 exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
48 - print: "**** done v10.2.0 install"
49 - ceph:
50 fs: xfs
51 skip_mgr_daemons: true
52 add_osds_to_crush: true
53 - print: "**** done ceph xfs"
54 - sequential:
55 - workload
56 - print: "**** done workload v10.2.0"
57 - install.upgrade:
58 exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
59 mon.a:
60 branch: jewel
61 mon.b:
62 branch: jewel
63 # Note that client.a IS NOT upgraded at this point
64 #client.1:
65 #branch: jewel
66 - parallel:
67 - workload_jewel
68 - upgrade-sequence_jewel
69 - print: "**** done parallel jewel branch"
70 - install.upgrade:
71 exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
72 client.1:
73 branch: jewel
74 - print: "**** done branch: jewel install.upgrade on client.1"
75 - install.upgrade:
76 mon.a:
77 mon.b:
78 - print: "**** done branch: -x install.upgrade on mon.a and mon.b"
79 - parallel:
80 - workload_x
81 - upgrade-sequence_x
82 - print: "**** done parallel -x branch"
83 # Run librados tests on the -x upgraded cluster
84 - install.upgrade:
85 client.1:
86 - workunit:
87 branch: jewel
88 clients:
89 client.1:
90 - rados/test-upgrade-v11.0.0.sh
91 - cls
92 - print: "**** done final test on -x cluster"
93 #######################
94 workload:
95 sequential:
96 - workunit:
97 clients:
98 client.0:
99 - suites/blogbench.sh
100 workload_jewel:
101 full_sequential:
102 - workunit:
103 branch: jewel
104 clients:
105 client.1:
106 - rados/test.sh
107 - cls
108 env:
109 CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_image'
110 - print: "**** done rados/test.sh & cls workload_jewel"
111 - sequential:
112 - rgw: [client.0]
113 - print: "**** done rgw workload_jewel"
114 - s3tests:
115 client.0:
116 force-branch: ceph-jewel
117 rgw_server: client.0
118 scan_for_encryption_keys: false
119 - print: "**** done s3tests workload_jewel"
120 upgrade-sequence_jewel:
121 sequential:
122 - print: "**** done branch: jewel install.upgrade"
123 - ceph.restart: [mds.a]
124 - sleep:
125 duration: 60
126 - ceph.restart: [osd.0]
127 - sleep:
128 duration: 30
129 - ceph.restart: [osd.1]
130 - sleep:
131 duration: 30
132 - ceph.restart: [osd.2]
133 - sleep:
134 duration: 30
135 - ceph.restart: [osd.3]
136 - sleep:
137 duration: 30
138 - ceph.restart: [osd.4]
139 - sleep:
140 duration: 30
141 - ceph.restart: [osd.5]
142 - sleep:
143 duration: 60
144 - ceph.restart: [mon.a]
145 - sleep:
146 duration: 60
147 - ceph.restart: [mon.b]
148 - sleep:
149 duration: 60
150 - ceph.restart: [mon.c]
151 - sleep:
152 duration: 60
153 - print: "**** done ceph.restart all jewel branch mds/osd/mon"
154 workload_x:
155 sequential:
156 - workunit:
157 branch: jewel
158 clients:
159 client.1:
160 - rados/test-upgrade-v11.0.0.sh
161 - cls
162 env:
163 CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_image'
164 - print: "**** done rados/test-upgrade-v11.0.0.sh & cls workload_x NOT upgraded client"
165 - workunit:
166 branch: jewel
167 clients:
168 client.0:
169 - rados/test-upgrade-v11.0.0.sh
170 - cls
171 - print: "**** done rados/test-upgrade-v11.0.0.sh & cls workload_x upgraded client"
172 - rgw: [client.1]
173 - print: "**** done rgw workload_x"
174 - s3tests:
175 client.1:
176 force-branch: ceph-jewel
177 rgw_server: client.1
178 scan_for_encryption_keys: false
179 - print: "**** done s3tests workload_x"
180 upgrade-sequence_x:
181 sequential:
182 - ceph.restart: [mds.a]
183 - sleep:
184 duration: 60
185 - ceph.restart: [mon.a]
186 - sleep:
187 duration: 60
188 - ceph.restart: [mon.b]
189 - sleep:
190 duration: 60
191 - ceph.restart: [mon.c]
192 - sleep:
193 duration: 60
194 - ceph.restart: [osd.0]
195 - sleep:
196 duration: 30
197 - ceph.restart: [osd.1]
198 - sleep:
199 duration: 30
200 - ceph.restart: [osd.2]
201 - sleep:
202 duration: 30
203 - ceph.restart: [osd.3]
204 - sleep:
205 duration: 30
206 - ceph.restart: [osd.4]
207 - sleep:
208 duration: 30
209 - ceph.restart:
210 daemons: [osd.5]
211 wait-for-healthy: false
212 wait-for-up-osds: true
213 - exec:
214 mgr.x:
215 - mkdir -p /var/lib/ceph/mgr/ceph-x
216 - ceph auth get-or-create-key mgr.x mon 'allow profile mgr'
217 - ceph auth export mgr.x > /var/lib/ceph/mgr/ceph-x/keyring
218 - ceph.restart:
219 daemons: [mgr.x]
220 wait-for-healthy: false
221 - exec:
222 osd.0:
223 - ceph osd require-osd-release luminous
224 - ceph osd set-require-min-compat-client luminous
225 - ceph.healthy:
226 - print: "**** done ceph.restart all -x branch mds/osd/mon"