]> git.proxmox.com Git - ceph.git/blame - ceph/qa/suites/upgrade/luminous-p2p/point-to-point-upgrade.yaml
update sources to 12.2.10
[ceph.git] / ceph / qa / suites / upgrade / luminous-p2p / point-to-point-upgrade.yaml
CommitLineData
b32b8144
FG
1meta:
2- desc: |
3 Run ceph on two nodes, using one of them as a client,
4 with a separate client-only node.
5 Use xfs beneath the osds.
6 install ceph/luminous v12.2.2 point version
7 run workload and upgrade-sequence in parallel
28e407b8 8 install ceph/luminous v12.2.5 point version
b32b8144 9 run workload and upgrade-sequence in parallel
1adf2230
AA
10 install ceph/luminous v12.2.7 point version
11 run workload and upgrade-sequence in parallel
91327a77
AA
12 install ceph/luminous v12.2.8 point version
13 run workload and upgrade-sequence in parallel
28e407b8 14 install ceph/luminous latest version
b32b8144
FG
15 run workload and upgrade-sequence in parallel
16overrides:
17 ceph:
18 log-whitelist:
19 - reached quota
20 - scrub
21 - osd_map_max_advance
22 - wrongly marked
28e407b8
AA
23 - FS_DEGRADED
24 - POOL_APP_NOT_ENABLED
25 - CACHE_POOL_NO_HIT_SET
26 - POOL_FULL
27 - SMALLER_PG
28 - pool\(s\) full
29 - OSD_DOWN
30 - missing hit_sets
31 - CACHE_POOL_NEAR_FULL
32 - PG_AVAILABILITY
33 - PG_DEGRADED
34 - application not enabled
1adf2230 35 - overall HEALTH_
b32b8144
FG
36 fs: xfs
37 conf:
38 mon:
39 mon debug unsafe allow tier with nonempty snaps: true
40 mon warn on pool no app: false
41 osd:
42 osd map max advance: 1000
43 osd_class_load_list: "cephfs hello journal lock log numops rbd refcount
44 replica_log rgw sdk statelog timeindex user version"
45 osd_class_default_list: "cephfs hello journal lock log numops rbd refcount
46 replica_log rgw sdk statelog timeindex user version"
47 client:
48 rgw_crypt_require_ssl: false
49 rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
50roles:
51- - mon.a
52 - mds.a
53 - osd.0
54 - osd.1
55 - osd.2
56 - mgr.x
57- - mon.b
58 - mon.c
59 - osd.3
60 - osd.4
61 - osd.5
62 - client.0
63- - client.1
64openstack:
65- volumes: # attached to each instance
66 count: 3
67 size: 30 # GB
68tasks:
69- print: "**** v12.2.2 about to install"
70- install:
71 tag: v12.2.2
72 # line below can be removed its from jewel test
73 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
74- print: "**** done v12.2.2 install"
75- ceph:
76 fs: xfs
77 add_osds_to_crush: true
78- print: "**** done ceph xfs"
79- sequential:
80 - workload
28e407b8
AA
81- print: "**** done workload v12.2.2"
82
83#### upgrade to v12.2.5
b32b8144
FG
84- install.upgrade:
85 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
86 mon.a:
28e407b8 87 tag: v12.2.5
b32b8144 88 mon.b:
28e407b8 89 tag: v12.2.5
b32b8144
FG
90 # Note that client.a IS NOT upgraded at this point
91- parallel:
92 - workload_luminous
93 - upgrade-sequence_luminous
28e407b8 94- print: "**** done parallel luminous v12.2.5"
1adf2230
AA
95
96#### upgrade to v12.2.7
97- install.upgrade:
98 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
99 mon.a:
100 tag: v12.2.7
101 mon.b:
102 tag: v12.2.7
103 # Note that client.a IS NOT upgraded at this point
104- parallel:
105 - workload_luminous
106 - upgrade-sequence_luminous
107- print: "**** done parallel luminous v12.2.7"
108
91327a77
AA
109#### upgrade to v12.2.8
110- install.upgrade:
111 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
112 mon.a:
113 tag: v12.2.8
114 mon.b:
115 tag: v12.2.8
116 # Note that client.a IS NOT upgraded at this point
117- parallel:
118 - workload_luminous
119 - upgrade-sequence_luminous
120- print: "**** done parallel luminous v12.2.8"
121
28e407b8 122#### upgrade to latest luminous
b32b8144
FG
123- install.upgrade:
124 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
b32b8144
FG
125 mon.a:
126 mon.b:
28e407b8 127 # Note that client.a IS NOT upgraded at this point
b32b8144 128- parallel:
28e407b8
AA
129 - workload_luminous
130 - upgrade-sequence_luminous
131- print: "**** done parallel luminous branch"
132
b32b8144
FG
133#######################
134workload:
135 sequential:
136 - workunit:
137 clients:
138 client.0:
139 - suites/blogbench.sh
140workload_luminous:
141 full_sequential:
142 - workunit:
94b18763 143 tag: v12.2.2
b32b8144
FG
144 clients:
145 client.1:
146 - rados/test.sh
147 - cls
148 env:
149 CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.snapshots_namespaces'
150 - print: "**** done rados/test.sh & cls workload_luminous"
151 - sequential:
152 - rgw: [client.0]
153 - print: "**** done rgw workload_luminous"
154 - s3tests:
155 client.0:
156 force-branch: ceph-luminous
157 rgw_server: client.0
158 scan_for_encryption_keys: false
159 - print: "**** done s3tests workload_luminous"
160upgrade-sequence_luminous:
161 sequential:
162 - print: "**** done branch: luminous install.upgrade"
163 - ceph.restart: [mds.a]
164 - sleep:
165 duration: 60
166 - ceph.restart: [osd.0]
167 - sleep:
168 duration: 30
169 - ceph.restart: [osd.1]
170 - sleep:
171 duration: 30
172 - ceph.restart: [osd.2]
173 - sleep:
174 duration: 30
175 - ceph.restart: [osd.3]
176 - sleep:
177 duration: 30
178 - ceph.restart: [osd.4]
179 - sleep:
180 duration: 30
181 - ceph.restart: [osd.5]
182 - sleep:
183 duration: 60
184 - ceph.restart: [mon.a]
185 - sleep:
186 duration: 60
187 - ceph.restart: [mon.b]
188 - sleep:
189 duration: 60
190 - ceph.restart: [mon.c]
191 - sleep:
192 duration: 60
193 - print: "**** done ceph.restart all luminous branch mds/osd/mon"