]>
Commit | Line | Data |
---|---|---|
b32b8144 FG |
1 | meta: |
2 | - desc: | | |
3 | Run ceph on two nodes, using one of them as a client, | |
4 | with a separate client-only node. | |
5 | Use xfs beneath the osds. | |
6 | install ceph/luminous v12.2.2 point version | |
7 | run workload and upgrade-sequence in parallel | |
8 | install ceph/luminous latest version | |
9 | run workload and upgrade-sequence in parallel | |
10 | install ceph/-x version (luminous or master/mimic) | |
11 | run workload and upgrade-sequence in parallel | |
12 | overrides: | |
13 | ceph: | |
14 | log-whitelist: | |
15 | - reached quota | |
16 | - scrub | |
17 | - osd_map_max_advance | |
18 | - wrongly marked | |
19 | fs: xfs | |
20 | conf: | |
21 | mon: | |
22 | mon debug unsafe allow tier with nonempty snaps: true | |
23 | mon warn on pool no app: false | |
24 | osd: | |
25 | osd map max advance: 1000 | |
26 | osd_class_load_list: "cephfs hello journal lock log numops rbd refcount | |
27 | replica_log rgw sdk statelog timeindex user version" | |
28 | osd_class_default_list: "cephfs hello journal lock log numops rbd refcount | |
29 | replica_log rgw sdk statelog timeindex user version" | |
30 | client: | |
31 | rgw_crypt_require_ssl: false | |
32 | rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo= | |
33 | roles: | |
34 | - - mon.a | |
35 | - mds.a | |
36 | - osd.0 | |
37 | - osd.1 | |
38 | - osd.2 | |
39 | - mgr.x | |
40 | - - mon.b | |
41 | - mon.c | |
42 | - osd.3 | |
43 | - osd.4 | |
44 | - osd.5 | |
45 | - client.0 | |
46 | - - client.1 | |
47 | openstack: | |
48 | - volumes: # attached to each instance | |
49 | count: 3 | |
50 | size: 30 # GB | |
51 | tasks: | |
52 | - print: "**** v12.2.2 about to install" | |
53 | - install: | |
54 | tag: v12.2.2 | |
55 | # line below can be removed its from jewel test | |
56 | #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2'] | |
57 | - print: "**** done v12.2.2 install" | |
58 | - ceph: | |
59 | fs: xfs | |
60 | add_osds_to_crush: true | |
61 | - print: "**** done ceph xfs" | |
62 | - sequential: | |
63 | - workload | |
64 | - print: "**** done workload" | |
65 | - install.upgrade: | |
66 | #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev'] | |
67 | mon.a: | |
68 | branch: luminous | |
69 | mon.b: | |
70 | branch: luminous | |
71 | # Note that client.a IS NOT upgraded at this point | |
72 | - parallel: | |
73 | - workload_luminous | |
74 | - upgrade-sequence_luminous | |
75 | - print: "**** done parallel luminous branch" | |
76 | - install.upgrade: | |
77 | #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev'] | |
78 | client.1: | |
79 | branch: luminous | |
80 | - print: "**** done branch: luminous install.upgrade on client.1" | |
81 | - install.upgrade: | |
82 | mon.a: | |
83 | mon.b: | |
84 | - print: "**** done branch: -x install.upgrade on mon.a and mon.b" | |
85 | - parallel: | |
86 | - workload_x | |
87 | - upgrade-sequence_x | |
88 | - print: "**** done parallel -x branch" | |
89 | - exec: | |
90 | osd.0: | |
91 | - ceph osd set-require-min-compat-client luminous | |
92 | # Run librados tests on the -x upgraded cluster | |
93 | - install.upgrade: | |
94 | client.1: | |
95 | - workunit: | |
96 | branch: luminous | |
97 | clients: | |
98 | client.1: | |
99 | - rados/test.sh | |
100 | - cls | |
101 | - print: "**** done final test on -x cluster" | |
102 | ####################### | |
103 | workload: | |
104 | sequential: | |
105 | - workunit: | |
106 | clients: | |
107 | client.0: | |
108 | - suites/blogbench.sh | |
109 | workload_luminous: | |
110 | full_sequential: | |
111 | - workunit: | |
112 | branch: luminous | |
113 | clients: | |
114 | client.1: | |
115 | - rados/test.sh | |
116 | - cls | |
117 | env: | |
118 | CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.snapshots_namespaces' | |
119 | - print: "**** done rados/test.sh & cls workload_luminous" | |
120 | - sequential: | |
121 | - rgw: [client.0] | |
122 | - print: "**** done rgw workload_luminous" | |
123 | - s3tests: | |
124 | client.0: | |
125 | force-branch: ceph-luminous | |
126 | rgw_server: client.0 | |
127 | scan_for_encryption_keys: false | |
128 | - print: "**** done s3tests workload_luminous" | |
129 | upgrade-sequence_luminous: | |
130 | sequential: | |
131 | - print: "**** done branch: luminous install.upgrade" | |
132 | - ceph.restart: [mds.a] | |
133 | - sleep: | |
134 | duration: 60 | |
135 | - ceph.restart: [osd.0] | |
136 | - sleep: | |
137 | duration: 30 | |
138 | - ceph.restart: [osd.1] | |
139 | - sleep: | |
140 | duration: 30 | |
141 | - ceph.restart: [osd.2] | |
142 | - sleep: | |
143 | duration: 30 | |
144 | - ceph.restart: [osd.3] | |
145 | - sleep: | |
146 | duration: 30 | |
147 | - ceph.restart: [osd.4] | |
148 | - sleep: | |
149 | duration: 30 | |
150 | - ceph.restart: [osd.5] | |
151 | - sleep: | |
152 | duration: 60 | |
153 | - ceph.restart: [mon.a] | |
154 | - sleep: | |
155 | duration: 60 | |
156 | - ceph.restart: [mon.b] | |
157 | - sleep: | |
158 | duration: 60 | |
159 | - ceph.restart: [mon.c] | |
160 | - sleep: | |
161 | duration: 60 | |
162 | - print: "**** done ceph.restart all luminous branch mds/osd/mon" | |
163 | workload_x: | |
164 | sequential: | |
165 | - workunit: | |
166 | branch: luminous | |
167 | clients: | |
168 | client.1: | |
169 | - rados/test.sh | |
170 | - cls | |
171 | - print: "**** done rados/test.sh & cls workload_x NOT upgraded client" | |
172 | - workunit: | |
173 | branch: luminous | |
174 | clients: | |
175 | client.0: | |
176 | - rados/test.sh | |
177 | - cls | |
178 | - print: "**** done rados/test.sh & cls workload_x upgraded client" | |
179 | - rgw: [client.1] | |
180 | - print: "**** done rgw workload_x" | |
181 | - s3tests: | |
182 | client.1: | |
183 | force-branch: ceph-luminous | |
184 | rgw_server: client.1 | |
185 | scan_for_encryption_keys: false | |
186 | - print: "**** done s3tests workload_x" | |
187 | upgrade-sequence_x: | |
188 | sequential: | |
189 | - ceph.restart: [mds.a] | |
190 | - sleep: | |
191 | duration: 60 | |
192 | - ceph.restart: [mon.a] | |
193 | - sleep: | |
194 | duration: 60 | |
195 | - ceph.restart: [mon.b] | |
196 | - sleep: | |
197 | duration: 60 | |
198 | - ceph.restart: [mon.c] | |
199 | - sleep: | |
200 | duration: 60 | |
201 | - ceph.restart: [osd.0] | |
202 | - sleep: | |
203 | duration: 30 | |
204 | - ceph.restart: [osd.1] | |
205 | - sleep: | |
206 | duration: 30 | |
207 | - ceph.restart: [osd.2] | |
208 | - sleep: | |
209 | duration: 30 | |
210 | - ceph.restart: [osd.3] | |
211 | - sleep: | |
212 | duration: 30 | |
213 | - ceph.restart: [osd.4] | |
214 | - sleep: | |
215 | duration: 30 | |
216 | - ceph.restart: | |
217 | daemons: [osd.5] | |
218 | wait-for-healthy: false | |
219 | wait-for-up-osds: true | |
220 | - ceph.restart: | |
221 | daemons: [mgr.x] | |
222 | wait-for-healthy: false | |
223 | - exec: | |
224 | osd.0: | |
225 | - ceph osd require-osd-release luminous | |
226 | - ceph.healthy: | |
227 | - print: "**** done ceph.restart all -x branch mds/osd/mon" |