]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | tasks: |
2 | - parallel: | |
3 | - workload | |
4 | - upgrade-sequence | |
5 | - print: "**** done parallel" | |
6 | ||
7 | workload: | |
8 | sequential: | |
9 | - rados: | |
10 | clients: [client.0] | |
11 | pools: [base-pool] | |
12 | ops: 4000 | |
13 | objects: 500 | |
14 | op_weights: | |
15 | read: 100 | |
16 | write: 100 | |
17 | delete: 50 | |
18 | copy_from: 50 | |
19 | flush: 50 | |
20 | try_flush: 50 | |
21 | evict: 50 | |
22 | - print: "**** done rados" | |
23 | ||
24 | upgrade-sequence: | |
25 | sequential: | |
26 | - install.upgrade: | |
27 | exclude_packages: | |
28 | - ceph-mgr | |
29 | - libcephfs2 | |
30 | - libcephfs-devel | |
31 | - libcephfs-dev | |
32 | osd.0: | |
33 | branch: jewel | |
34 | osd.2: | |
35 | branch: jewel | |
36 | - print: "*** done install.upgrade osd.0 and osd.2" | |
37 | - ceph.restart: | |
38 | daemons: [osd.0, osd.1, osd.2, osd.3] | |
39 | wait-for-healthy: false | |
40 | wait-for-osds-up: true | |
41 | - ceph.restart: | |
42 | daemons: [mon.a, mon.b, mon.c] | |
43 | wait-for-healthy: false | |
44 | wait-for-osds-up: true | |
45 | - print: "**** done ceph.restart do not wait for healthy" | |
46 | - exec: | |
47 | mon.a: | |
48 | - sleep 300 # http://tracker.ceph.com/issues/17808 | |
49 | - ceph osd set require_jewel_osds | |
50 | - ceph.healthy: | |
51 | - print: "**** done ceph.healthy" |