]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | meta: |
2 | - desc: | | |
3 | Run ceph on two nodes, using one of them as a client, | |
4 | with a separate client-only node. | |
5 | Use xfs beneath the osds. | |
494da23a | 6 | install ceph/nautilus v14.2.2 point version |
11fdf7f2 TL |
7 | run workload and upgrade-sequence in parallel |
8 | (every point reslease should be tested) | |
9 | run workload and upgrade-sequence in parallel | |
10 | install ceph/nautilus latest version | |
11 | run workload and upgrade-sequence in parallel | |
12 | overrides: | |
13 | ceph: | |
14 | log-whitelist: | |
15 | - reached quota | |
16 | - scrub | |
17 | - osd_map_max_advance | |
18 | - wrongly marked | |
19 | - FS_DEGRADED | |
20 | - POOL_APP_NOT_ENABLED | |
21 | - CACHE_POOL_NO_HIT_SET | |
22 | - POOL_FULL | |
23 | - SMALLER_PG | |
24 | - pool\(s\) full | |
25 | - OSD_DOWN | |
26 | - missing hit_sets | |
27 | - CACHE_POOL_NEAR_FULL | |
28 | - PG_AVAILABILITY | |
29 | - PG_DEGRADED | |
30 | - application not enabled | |
31 | - cache pools at or near target size | |
32 | - filesystem is degraded | |
33 | - OBJECT_MISPLACED | |
81eedcae | 34 | ### ref: https://tracker.ceph.com/issues/40251 |
494da23a | 35 | #removed see ^ - failed to encode map |
11fdf7f2 TL |
36 | |
37 | fs: xfs | |
38 | ||
39 | conf: | |
40 | global: | |
41 | mon_warn_on_pool_no_app: false | |
42 | mon: | |
43 | mon debug unsafe allow tier with nonempty snaps: true | |
44 | osd: | |
45 | osd map max advance: 1000 | |
46 | osd_class_default_list: "*" | |
47 | osd_class_load_list: "*" | |
48 | client: | |
49 | rgw_crypt_require_ssl: false | |
50 | rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo= | |
51 | roles: | |
52 | - - mon.a | |
53 | - mds.a | |
54 | - osd.0 | |
55 | - osd.1 | |
56 | - osd.2 | |
57 | - mgr.x | |
58 | - - mon.b | |
59 | - mon.c | |
60 | - osd.3 | |
61 | - osd.4 | |
62 | - osd.5 | |
63 | - client.0 | |
64 | - - client.1 | |
65 | openstack: | |
66 | - volumes: # attached to each instance | |
67 | count: 3 | |
68 | size: 30 # GB | |
69 | tasks: | |
494da23a TL |
70 | # v14.2.0 removed per http://tracker.ceph.com/issues/40251 |
71 | - print: "**** done nautilus v14.2.2 about to install" | |
11fdf7f2 | 72 | - install: |
494da23a | 73 | tag: v14.2.2 |
11fdf7f2 TL |
74 | # line below can be removed its from jewel test |
75 | #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2'] | |
494da23a | 76 | - print: "**** done v14.2.2 install" |
11fdf7f2 TL |
77 | - ceph: |
78 | fs: xfs | |
79 | add_osds_to_crush: true | |
80 | - print: "**** done ceph xfs" | |
81 | - sequential: | |
82 | - workload | |
494da23a | 83 | - print: "**** done workload v14.2.2" |
81eedcae TL |
84 | |
85 | ||
494da23a TL |
86 | # v14.2.1 removed per http://tracker.ceph.com/issues/40251 |
87 | # v14.2.2 | |
11fdf7f2 TL |
88 | |
89 | ####### upgrade to v14.2.?? PLACEHOLDER | |
90 | #- install.upgrade: | |
91 | # #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev'] | |
92 | # mon.a: | |
93 | # tag: v14.2.?? | |
94 | # mon.b: | |
95 | # tag: v14.2.?? | |
96 | # # Note that client.a IS NOT upgraded at this point | |
97 | #- parallel: | |
98 | # - workload_nautilus | |
99 | # - upgrade-sequence_nautilus | |
100 | #- print: "**** done parallel nautilus v14.2.??" | |
101 | ||
102 | #### upgrade to latest nautilus | |
103 | - install.upgrade: | |
104 | mon.a: | |
105 | mon.b: | |
106 | - parallel: | |
107 | - workload_nautilus | |
108 | - upgrade-sequence_nautilus | |
109 | - print: "**** done parallel nautilus branch" | |
110 | ||
111 | ####################### | |
112 | workload: | |
113 | sequential: | |
114 | - workunit: | |
115 | clients: | |
116 | client.0: | |
117 | - suites/blogbench.sh | |
118 | workload_nautilus: | |
119 | full_sequential: | |
120 | - workunit: | |
494da23a TL |
121 | branch: nautilus |
122 | #tag: v14.2.0 | |
11fdf7f2 TL |
123 | clients: |
124 | client.1: | |
125 | - rados/test.sh | |
126 | - cls | |
127 | env: | |
128 | CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.snapshots_namespaces' | |
129 | - print: "**** done rados/test.sh & cls workload_nautilus" | |
130 | - sequential: | |
131 | - rgw: [client.0] | |
132 | - print: "**** done rgw workload_nautilus" | |
133 | - s3tests: | |
134 | client.0: | |
135 | force-branch: ceph-nautilus | |
136 | rgw_server: client.0 | |
137 | scan_for_encryption_keys: false | |
138 | - print: "**** done s3tests workload_nautilus" | |
494da23a TL |
139 | - rbd_fsx: |
140 | clients: [client.0] | |
141 | size: 134217728 | |
142 | - print: "**** done rbd_fsx workload_nautilus" | |
11fdf7f2 TL |
143 | |
144 | upgrade-sequence_nautilus: | |
145 | sequential: | |
146 | - print: "**** done branch: nautilus install.upgrade" | |
147 | - ceph.restart: [mds.a] | |
148 | - sleep: | |
149 | duration: 60 | |
150 | - ceph.restart: [osd.0] | |
151 | - sleep: | |
152 | duration: 30 | |
153 | - ceph.restart: [osd.1] | |
154 | - sleep: | |
155 | duration: 30 | |
156 | - ceph.restart: [osd.2] | |
157 | - sleep: | |
158 | duration: 30 | |
159 | - ceph.restart: [osd.3] | |
160 | - sleep: | |
161 | duration: 30 | |
162 | - ceph.restart: [osd.4] | |
163 | - sleep: | |
164 | duration: 30 | |
165 | - ceph.restart: [osd.5] | |
494da23a TL |
166 | - sleep: |
167 | duration: 60 | |
168 | - ceph.restart: [mgr.x] | |
11fdf7f2 TL |
169 | - sleep: |
170 | duration: 60 | |
171 | - ceph.restart: [mon.a] | |
172 | - sleep: | |
173 | duration: 60 | |
174 | - ceph.restart: [mon.b] | |
175 | - sleep: | |
176 | duration: 60 | |
177 | - ceph.restart: [mon.c] | |
178 | - sleep: | |
179 | duration: 60 | |
180 | - print: "**** done ceph.restart all nautilus branch mds/osd/mon" |