]> git.proxmox.com Git - ceph.git/blame - ceph/qa/suites/upgrade/quincy-p2p/quincy-p2p-parallel/point-to-point-upgrade.yaml
buildsys: change download over to reef release
[ceph.git] / ceph / qa / suites / upgrade / quincy-p2p / quincy-p2p-parallel / point-to-point-upgrade.yaml
CommitLineData
39ae355f
TL
1meta:
2- desc: |
3 Run ceph on two nodes, using one of them as a client,
4 with a separate client-only node.
5 Use xfs beneath the osds.
6 install ceph/quincy v17.2.5 and the v17.2.x point versions
7 run workload and upgrade-sequence in parallel
8 (every point release should be tested)
9 run workload and upgrade-sequence in parallel
10 install ceph/quincy latest version
11 run workload and upgrade-sequence in parallel
12 Overall upgrade path is - quincy-latest.point-1 => quincy-latest.point => quincy-latest
13overrides:
14 ceph:
15 log-ignorelist:
16 - reached quota
17 - scrub
18 - osd_map_max_advance
19 - wrongly marked
20 - FS_DEGRADED
21 - POOL_APP_NOT_ENABLED
22 - CACHE_POOL_NO_HIT_SET
23 - POOL_FULL
24 - SMALLER_PG
25 - pool\(s\) full
26 - OSD_DOWN
27 - missing hit_sets
28 - CACHE_POOL_NEAR_FULL
29 - PG_AVAILABILITY
30 - PG_DEGRADED
31 - application not enabled
32 - cache pools at or near target size
33 - filesystem is degraded
34 - OBJECT_MISPLACED
35 ### ref: https://tracker.ceph.com/issues/40251
36 #removed see ^ - failed to encode map
37
38 fs: xfs
39
40 conf:
41 global:
42 mon_warn_on_pool_no_app: false
43 mon_mds_skip_sanity: true
44 mon:
45 mon debug unsafe allow tier with nonempty snaps: true
46 osd:
47 osd map max advance: 1000
48 osd_class_default_list: "*"
49 osd_class_load_list: "*"
50 client:
51 rgw_crypt_require_ssl: false
52 rgw crypt s3 kms backend: testing
53 rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
54roles:
55- - mon.a
56 - mds.a
57 - osd.0
58 - osd.1
59 - osd.2
60 - mgr.x
61- - mon.b
62 - mon.c
63 - osd.3
64 - osd.4
65 - osd.5
66 - client.0
67- - client.1
68openstack:
69- volumes: # attached to each instance
70 count: 3
71 size: 30 # GB
72tasks:
73- print: "**** done quincy about to install v17.2.0 "
74- install:
75 tag: v17.2.0
76 # line below can be removed its from jewel test
77 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
78- print: "**** done v17.2.0 install"
79- ceph:
80 fs: xfs
81 add_osds_to_crush: true
82- print: "**** done ceph xfs"
83- sequential:
84 - workload
85- print: "**** done workload v17.2.0"
86
87
88####### upgrade to v17.2.5
89- install.upgrade:
90 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
91 mon.a:
92 tag: v17.2.5
93 mon.b:
94 tag: v17.2.5
95- parallel:
96 - workload_quincy
97 - upgrade-sequence_quincy
98- print: "**** done parallel quincy v17.2.5"
99
100#### upgrade to latest quincy
101- install.upgrade:
102 mon.a:
103 mon.b:
104- parallel:
105 - workload_quincy
106 - upgrade-sequence_quincy
107- print: "**** done parallel quincy branch"
108
109#######################
110workload:
111 sequential:
112 - workunit:
113 clients:
114 client.0:
115 - suites/blogbench.sh
116
117workload_quincy:
118 full_sequential:
119 - workunit:
120 branch: quincy
121 # tag: v17.2.5
122 clients:
123 client.1:
124 - rados/test.sh
125 - cls
126 env:
127 CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.snapshots_namespaces'
128 - print: "**** done rados/test.sh & cls workload_quincy"
129 - sequential:
130 - rgw: [client.0]
131 - print: "**** done rgw workload_quincy"
132 - s3tests:
133 client.0:
134 force-branch: ceph-quincy
135 rgw_server: client.0
136 scan_for_encryption_keys: false
137 - print: "**** done s3tests workload_quincy"
138 - rbd_fsx:
139 clients: [client.0]
140 size: 134217728
141 - print: "**** done rbd_fsx workload_quincy"
142
143upgrade-sequence_quincy:
144 sequential:
145 - print: "**** done branch: quincy install.upgrade"
146 - ceph.restart: [mds.a]
147 - sleep:
148 duration: 60
149 - ceph.restart: [osd.0]
150 - sleep:
151 duration: 30
152 - ceph.restart: [osd.1]
153 - sleep:
154 duration: 30
155 - ceph.restart: [osd.2]
156 - sleep:
157 duration: 30
158 - ceph.restart: [osd.3]
159 - sleep:
160 duration: 30
161 - ceph.restart: [osd.4]
162 - sleep:
163 duration: 30
164 - ceph.restart: [osd.5]
165 - sleep:
166 duration: 60
167 - ceph.restart: [mgr.x]
168 - sleep:
169 duration: 60
170 - ceph.restart: [mon.a]
171 - sleep:
172 duration: 60
173 - ceph.restart: [mon.b]
174 - sleep:
175 duration: 60
176 - ceph.restart: [mon.c]
177 - sleep:
178 duration: 60
179 - print: "**** done ceph.restart all quincy branch mds/osd/mon"