]> git.proxmox.com Git - ceph.git/blob - ceph/qa/suites/upgrade/luminous-p2p/point-to-point-upgrade.yaml
f660e2a2f2a6ce976a6a4e0611131556bac91921
[ceph.git] / ceph / qa / suites / upgrade / luminous-p2p / point-to-point-upgrade.yaml
1 meta:
2 - desc: |
3 Run ceph on two nodes, using one of them as a client,
4 with a separate client-only node.
5 Use xfs beneath the osds.
6 install ceph/luminous v12.2.2 point version
7 run workload and upgrade-sequence in parallel
8 install ceph/luminous v12.2.5 point version
9 run workload and upgrade-sequence in parallel
10 install ceph/luminous latest version
11 run workload and upgrade-sequence in parallel
12 overrides:
13 ceph:
14 log-whitelist:
15 - reached quota
16 - scrub
17 - osd_map_max_advance
18 - wrongly marked
19 - FS_DEGRADED
20 - POOL_APP_NOT_ENABLED
21 - CACHE_POOL_NO_HIT_SET
22 - POOL_FULL
23 - SMALLER_PG
24 - pool\(s\) full
25 - OSD_DOWN
26 - missing hit_sets
27 - CACHE_POOL_NEAR_FULL
28 - PG_AVAILABILITY
29 - PG_DEGRADED
30 - application not enabled
31 fs: xfs
32 conf:
33 mon:
34 mon debug unsafe allow tier with nonempty snaps: true
35 mon warn on pool no app: false
36 osd:
37 osd map max advance: 1000
38 osd_class_load_list: "cephfs hello journal lock log numops rbd refcount
39 replica_log rgw sdk statelog timeindex user version"
40 osd_class_default_list: "cephfs hello journal lock log numops rbd refcount
41 replica_log rgw sdk statelog timeindex user version"
42 client:
43 rgw_crypt_require_ssl: false
44 rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
45 roles:
46 - - mon.a
47 - mds.a
48 - osd.0
49 - osd.1
50 - osd.2
51 - mgr.x
52 - - mon.b
53 - mon.c
54 - osd.3
55 - osd.4
56 - osd.5
57 - client.0
58 - - client.1
59 openstack:
60 - volumes: # attached to each instance
61 count: 3
62 size: 30 # GB
63 tasks:
64 - print: "**** v12.2.2 about to install"
65 - install:
66 tag: v12.2.2
67 # line below can be removed its from jewel test
68 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
69 - print: "**** done v12.2.2 install"
70 - ceph:
71 fs: xfs
72 add_osds_to_crush: true
73 - print: "**** done ceph xfs"
74 - sequential:
75 - workload
76 - print: "**** done workload v12.2.2"
77
78 #### upgrade to v12.2.5
79 - install.upgrade:
80 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
81 mon.a:
82 tag: v12.2.5
83 mon.b:
84 tag: v12.2.5
85 # Note that client.a IS NOT upgraded at this point
86 - parallel:
87 - workload_luminous
88 - upgrade-sequence_luminous
89 - print: "**** done parallel luminous v12.2.5"
90 #### upgrade to latest luminous
91 - install.upgrade:
92 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
93 mon.a:
94 branch: luminous
95 mon.b:
96 branch: luminous
97 # Note that client.a IS NOT upgraded at this point
98 - parallel:
99 - workload_luminous
100 - upgrade-sequence_luminous
101 - print: "**** done parallel luminous branch"
102
103 #######################
104 workload:
105 sequential:
106 - workunit:
107 clients:
108 client.0:
109 - suites/blogbench.sh
110 workload_luminous:
111 full_sequential:
112 - workunit:
113 tag: v12.2.2
114 clients:
115 client.1:
116 - rados/test.sh
117 - cls
118 env:
119 CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.snapshots_namespaces'
120 - print: "**** done rados/test.sh & cls workload_luminous"
121 - sequential:
122 - rgw: [client.0]
123 - print: "**** done rgw workload_luminous"
124 - s3tests:
125 client.0:
126 force-branch: ceph-luminous
127 rgw_server: client.0
128 scan_for_encryption_keys: false
129 - print: "**** done s3tests workload_luminous"
130 upgrade-sequence_luminous:
131 sequential:
132 - print: "**** done branch: luminous install.upgrade"
133 - ceph.restart: [mds.a]
134 - sleep:
135 duration: 60
136 - ceph.restart: [osd.0]
137 - sleep:
138 duration: 30
139 - ceph.restart: [osd.1]
140 - sleep:
141 duration: 30
142 - ceph.restart: [osd.2]
143 - sleep:
144 duration: 30
145 - ceph.restart: [osd.3]
146 - sleep:
147 duration: 30
148 - ceph.restart: [osd.4]
149 - sleep:
150 duration: 30
151 - ceph.restart: [osd.5]
152 - sleep:
153 duration: 60
154 - ceph.restart: [mon.a]
155 - sleep:
156 duration: 60
157 - ceph.restart: [mon.b]
158 - sleep:
159 duration: 60
160 - ceph.restart: [mon.c]
161 - sleep:
162 duration: 60
163 - print: "**** done ceph.restart all luminous branch mds/osd/mon"