]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | """ |
2 | Ceph cluster task. | |
3 | ||
4 | Handle the setup, starting, and clean-up of a Ceph cluster. | |
5 | """ | |
f67539c2 | 6 | from copy import deepcopy |
9f95a23c | 7 | from io import BytesIO |
f91f0fd5 | 8 | from io import StringIO |
7c673cae FG |
9 | |
10 | import argparse | |
11fdf7f2 | 11 | import configobj |
7c673cae FG |
12 | import contextlib |
13 | import errno | |
14 | import logging | |
15 | import os | |
16 | import json | |
17 | import time | |
18 | import gevent | |
11fdf7f2 | 19 | import re |
7c673cae | 20 | import socket |
f67539c2 | 21 | import yaml |
7c673cae FG |
22 | |
23 | from paramiko import SSHException | |
f67539c2 | 24 | from tasks.ceph_manager import CephManager, write_conf, get_valgrind_args |
11fdf7f2 | 25 | from tarfile import ReadError |
f67539c2 | 26 | from tasks.cephfs.filesystem import MDSCluster, Filesystem |
7c673cae FG |
27 | from teuthology import misc as teuthology |
28 | from teuthology import contextutil | |
29 | from teuthology import exceptions | |
30 | from teuthology.orchestra import run | |
f67539c2 | 31 | from tasks import ceph_client as cclient |
7c673cae | 32 | from teuthology.orchestra.daemon import DaemonGroup |
9f95a23c | 33 | from tasks.daemonwatchdog import DaemonWatchdog |
7c673cae FG |
34 | |
35 | CEPH_ROLE_TYPES = ['mon', 'mgr', 'osd', 'mds', 'rgw'] | |
11fdf7f2 | 36 | DATA_PATH = '/var/lib/ceph/{type_}/{cluster}-{id_}' |
7c673cae FG |
37 | |
38 | log = logging.getLogger(__name__) | |
39 | ||
40 | ||
41 | def generate_caps(type_): | |
42 | """ | |
43 | Each call will return the next capability for each system type | |
44 | (essentially a subset of possible role values). Valid types are osd, | |
45 | mds and client. | |
46 | """ | |
47 | defaults = dict( | |
48 | osd=dict( | |
a4b75251 TL |
49 | mon='allow profile osd', |
50 | mgr='allow profile osd', | |
7c673cae FG |
51 | osd='allow *', |
52 | ), | |
53 | mgr=dict( | |
3efd9988 FG |
54 | mon='allow profile mgr', |
55 | osd='allow *', | |
56 | mds='allow *', | |
7c673cae FG |
57 | ), |
58 | mds=dict( | |
59 | mon='allow *', | |
60 | mgr='allow *', | |
61 | osd='allow *', | |
62 | mds='allow', | |
63 | ), | |
64 | client=dict( | |
65 | mon='allow rw', | |
66 | mgr='allow r', | |
67 | osd='allow rwx', | |
68 | mds='allow', | |
69 | ), | |
70 | ) | |
71 | for subsystem, capability in defaults[type_].items(): | |
72 | yield '--cap' | |
73 | yield subsystem | |
74 | yield capability | |
75 | ||
76 | ||
f67539c2 TL |
77 | def update_archive_setting(ctx, key, value): |
78 | """ | |
79 | Add logs directory to job's info log file | |
80 | """ | |
81 | if ctx.archive is None: | |
82 | return | |
83 | with open(os.path.join(ctx.archive, 'info.yaml'), 'r+') as info_file: | |
84 | info_yaml = yaml.safe_load(info_file) | |
85 | info_file.seek(0) | |
86 | if 'archive' in info_yaml: | |
87 | info_yaml['archive'][key] = value | |
88 | else: | |
89 | info_yaml['archive'] = {key: value} | |
90 | yaml.safe_dump(info_yaml, info_file, default_flow_style=False) | |
91 | ||
92 | ||
11fdf7f2 TL |
93 | @contextlib.contextmanager |
94 | def ceph_crash(ctx, config): | |
95 | """ | |
f67539c2 | 96 | Gather crash dumps from /var/lib/ceph/crash |
11fdf7f2 | 97 | """ |
f67539c2 TL |
98 | |
99 | # Add crash directory to job's archive | |
100 | update_archive_setting(ctx, 'crash', '/var/lib/ceph/crash') | |
101 | ||
11fdf7f2 TL |
102 | try: |
103 | yield | |
104 | ||
105 | finally: | |
106 | if ctx.archive is not None: | |
107 | log.info('Archiving crash dumps...') | |
108 | path = os.path.join(ctx.archive, 'remote') | |
109 | try: | |
110 | os.makedirs(path) | |
9f95a23c | 111 | except OSError: |
11fdf7f2 | 112 | pass |
9f95a23c | 113 | for remote in ctx.cluster.remotes.keys(): |
11fdf7f2 TL |
114 | sub = os.path.join(path, remote.shortname) |
115 | try: | |
116 | os.makedirs(sub) | |
9f95a23c | 117 | except OSError: |
11fdf7f2 TL |
118 | pass |
119 | try: | |
120 | teuthology.pull_directory(remote, '/var/lib/ceph/crash', | |
121 | os.path.join(sub, 'crash')) | |
9f95a23c | 122 | except ReadError: |
11fdf7f2 TL |
123 | pass |
124 | ||
125 | ||
7c673cae FG |
126 | @contextlib.contextmanager |
127 | def ceph_log(ctx, config): | |
128 | """ | |
129 | Create /var/log/ceph log directory that is open to everyone. | |
130 | Add valgrind and profiling-logger directories. | |
131 | ||
132 | :param ctx: Context | |
133 | :param config: Configuration | |
134 | """ | |
135 | log.info('Making ceph log dir writeable by non-root...') | |
136 | run.wait( | |
137 | ctx.cluster.run( | |
138 | args=[ | |
139 | 'sudo', | |
140 | 'chmod', | |
141 | '777', | |
142 | '/var/log/ceph', | |
143 | ], | |
144 | wait=False, | |
145 | ) | |
146 | ) | |
147 | log.info('Disabling ceph logrotate...') | |
148 | run.wait( | |
149 | ctx.cluster.run( | |
150 | args=[ | |
151 | 'sudo', | |
152 | 'rm', '-f', '--', | |
153 | '/etc/logrotate.d/ceph', | |
154 | ], | |
155 | wait=False, | |
156 | ) | |
157 | ) | |
158 | log.info('Creating extra log directories...') | |
159 | run.wait( | |
160 | ctx.cluster.run( | |
161 | args=[ | |
162 | 'sudo', | |
163 | 'install', '-d', '-m0777', '--', | |
164 | '/var/log/ceph/valgrind', | |
165 | '/var/log/ceph/profiling-logger', | |
166 | ], | |
167 | wait=False, | |
168 | ) | |
169 | ) | |
170 | ||
f67539c2 TL |
171 | # Add logs directory to job's info log file |
172 | update_archive_setting(ctx, 'log', '/var/log/ceph') | |
173 | ||
7c673cae FG |
174 | class Rotater(object): |
175 | stop_event = gevent.event.Event() | |
176 | ||
177 | def invoke_logrotate(self): | |
178 | # 1) install ceph-test.conf in /etc/logrotate.d | |
179 | # 2) continuously loop over logrotate invocation with ceph-test.conf | |
180 | while not self.stop_event.is_set(): | |
181 | self.stop_event.wait(timeout=30) | |
182 | try: | |
adb31ebb TL |
183 | procs = ctx.cluster.run( |
184 | args=['sudo', 'logrotate', '/etc/logrotate.d/ceph-test.conf'], | |
185 | wait=False, | |
186 | stderr=StringIO() | |
7c673cae | 187 | ) |
adb31ebb | 188 | run.wait(procs) |
7c673cae FG |
189 | except exceptions.ConnectionLostError as e: |
190 | # Some tests may power off nodes during test, in which | |
191 | # case we will see connection errors that we should ignore. | |
192 | log.debug("Missed logrotate, node '{0}' is offline".format( | |
193 | e.node)) | |
9f95a23c | 194 | except EOFError: |
7c673cae FG |
195 | # Paramiko sometimes raises this when it fails to |
196 | # connect to a node during open_session. As with | |
197 | # ConnectionLostError, we ignore this because nodes | |
198 | # are allowed to get power cycled during tests. | |
199 | log.debug("Missed logrotate, EOFError") | |
9f95a23c | 200 | except SSHException: |
7c673cae | 201 | log.debug("Missed logrotate, SSHException") |
adb31ebb TL |
202 | except run.CommandFailedError as e: |
203 | for p in procs: | |
204 | if p.finished and p.exitstatus != 0: | |
205 | err = p.stderr.getvalue() | |
206 | if 'error: error renaming temp state file' in err: | |
207 | log.info('ignoring transient state error: %s', e) | |
208 | else: | |
209 | raise | |
7c673cae | 210 | except socket.error as e: |
92f5a8d4 | 211 | if e.errno in (errno.EHOSTUNREACH, errno.ECONNRESET): |
7c673cae FG |
212 | log.debug("Missed logrotate, host unreachable") |
213 | else: | |
214 | raise | |
215 | ||
216 | def begin(self): | |
217 | self.thread = gevent.spawn(self.invoke_logrotate) | |
218 | ||
219 | def end(self): | |
220 | self.stop_event.set() | |
221 | self.thread.get() | |
222 | ||
223 | def write_rotate_conf(ctx, daemons): | |
224 | testdir = teuthology.get_testdir(ctx) | |
9f95a23c | 225 | remote_logrotate_conf = '%s/logrotate.ceph-test.conf' % testdir |
7c673cae | 226 | rotate_conf_path = os.path.join(os.path.dirname(__file__), 'logrotate.conf') |
f91f0fd5 | 227 | with open(rotate_conf_path) as f: |
7c673cae | 228 | conf = "" |
9f95a23c TL |
229 | for daemon, size in daemons.items(): |
230 | log.info('writing logrotate stanza for {}'.format(daemon)) | |
f91f0fd5 TL |
231 | conf += f.read().format(daemon_type=daemon, |
232 | max_size=size) | |
7c673cae FG |
233 | f.seek(0, 0) |
234 | ||
9f95a23c | 235 | for remote in ctx.cluster.remotes.keys(): |
f67539c2 TL |
236 | remote.write_file(remote_logrotate_conf, BytesIO(conf.encode())) |
237 | remote.sh( | |
238 | f'sudo mv {remote_logrotate_conf} /etc/logrotate.d/ceph-test.conf && ' | |
239 | 'sudo chmod 0644 /etc/logrotate.d/ceph-test.conf && ' | |
240 | 'sudo chown root.root /etc/logrotate.d/ceph-test.conf') | |
7c673cae FG |
241 | remote.chcon('/etc/logrotate.d/ceph-test.conf', |
242 | 'system_u:object_r:etc_t:s0') | |
243 | ||
244 | if ctx.config.get('log-rotate'): | |
245 | daemons = ctx.config.get('log-rotate') | |
246 | log.info('Setting up log rotation with ' + str(daemons)) | |
247 | write_rotate_conf(ctx, daemons) | |
248 | logrotater = Rotater() | |
249 | logrotater.begin() | |
250 | try: | |
251 | yield | |
252 | ||
253 | finally: | |
254 | if ctx.config.get('log-rotate'): | |
255 | log.info('Shutting down logrotate') | |
256 | logrotater.end() | |
f67539c2 | 257 | ctx.cluster.sh('sudo rm /etc/logrotate.d/ceph-test.conf') |
7c673cae FG |
258 | if ctx.archive is not None and \ |
259 | not (ctx.config.get('archive-on-error') and ctx.summary['success']): | |
260 | # and logs | |
261 | log.info('Compressing logs...') | |
262 | run.wait( | |
263 | ctx.cluster.run( | |
264 | args=[ | |
265 | 'sudo', | |
266 | 'find', | |
267 | '/var/log/ceph', | |
268 | '-name', | |
269 | '*.log', | |
270 | '-print0', | |
271 | run.Raw('|'), | |
272 | 'sudo', | |
273 | 'xargs', | |
274 | '-0', | |
275 | '--no-run-if-empty', | |
276 | '--', | |
277 | 'gzip', | |
278 | '--', | |
279 | ], | |
280 | wait=False, | |
281 | ), | |
282 | ) | |
283 | ||
284 | log.info('Archiving logs...') | |
285 | path = os.path.join(ctx.archive, 'remote') | |
11fdf7f2 TL |
286 | try: |
287 | os.makedirs(path) | |
9f95a23c | 288 | except OSError: |
11fdf7f2 | 289 | pass |
9f95a23c | 290 | for remote in ctx.cluster.remotes.keys(): |
7c673cae | 291 | sub = os.path.join(path, remote.shortname) |
11fdf7f2 TL |
292 | try: |
293 | os.makedirs(sub) | |
9f95a23c | 294 | except OSError: |
11fdf7f2 | 295 | pass |
7c673cae FG |
296 | teuthology.pull_directory(remote, '/var/log/ceph', |
297 | os.path.join(sub, 'log')) | |
298 | ||
299 | ||
300 | def assign_devs(roles, devs): | |
301 | """ | |
302 | Create a dictionary of devs indexed by roles | |
303 | ||
304 | :param roles: List of roles | |
305 | :param devs: Corresponding list of devices. | |
306 | :returns: Dictionary of devs indexed by roles. | |
307 | """ | |
308 | return dict(zip(roles, devs)) | |
309 | ||
310 | ||
311 | @contextlib.contextmanager | |
312 | def valgrind_post(ctx, config): | |
313 | """ | |
11fdf7f2 TL |
314 | After the tests run, look through all the valgrind logs. Exceptions are raised |
315 | if textual errors occurred in the logs, or if valgrind exceptions were detected in | |
7c673cae FG |
316 | the logs. |
317 | ||
318 | :param ctx: Context | |
319 | :param config: Configuration | |
320 | """ | |
321 | try: | |
322 | yield | |
323 | finally: | |
324 | lookup_procs = list() | |
325 | log.info('Checking for errors in any valgrind logs...') | |
9f95a23c | 326 | for remote in ctx.cluster.remotes.keys(): |
7c673cae FG |
327 | # look at valgrind logs for each node |
328 | proc = remote.run( | |
9f95a23c TL |
329 | args="sudo zgrep '<kind>' /var/log/ceph/valgrind/* " |
330 | # include a second file so that we always get | |
331 | # a filename prefix on the output | |
332 | "/dev/null | sort | uniq", | |
7c673cae FG |
333 | wait=False, |
334 | check_status=False, | |
f91f0fd5 | 335 | stdout=StringIO(), |
7c673cae FG |
336 | ) |
337 | lookup_procs.append((proc, remote)) | |
338 | ||
339 | valgrind_exception = None | |
340 | for (proc, remote) in lookup_procs: | |
341 | proc.wait() | |
f91f0fd5 | 342 | out = proc.stdout.getvalue() |
7c673cae FG |
343 | for line in out.split('\n'): |
344 | if line == '': | |
345 | continue | |
346 | try: | |
347 | (file, kind) = line.split(':') | |
348 | except Exception: | |
349 | log.error('failed to split line %s', line) | |
350 | raise | |
351 | log.debug('file %s kind %s', file, kind) | |
352 | if (file.find('mds') >= 0) and kind.find('Lost') > 0: | |
353 | continue | |
354 | log.error('saw valgrind issue %s in %s', kind, file) | |
355 | valgrind_exception = Exception('saw valgrind issues') | |
356 | ||
357 | if config.get('expect_valgrind_errors'): | |
358 | if not valgrind_exception: | |
359 | raise Exception('expected valgrind issues and found none') | |
360 | else: | |
361 | if valgrind_exception: | |
362 | raise valgrind_exception | |
363 | ||
364 | ||
365 | @contextlib.contextmanager | |
366 | def crush_setup(ctx, config): | |
367 | cluster_name = config['cluster'] | |
368 | first_mon = teuthology.get_first_mon(ctx, config, cluster_name) | |
9f95a23c | 369 | (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys() |
7c673cae FG |
370 | |
371 | profile = config.get('crush_tunables', 'default') | |
372 | log.info('Setting crush tunables to %s', profile) | |
373 | mon_remote.run( | |
374 | args=['sudo', 'ceph', '--cluster', cluster_name, | |
375 | 'osd', 'crush', 'tunables', profile]) | |
376 | yield | |
377 | ||
378 | ||
a4b75251 TL |
379 | @contextlib.contextmanager |
380 | def setup_manager(ctx, config): | |
381 | first_mon = teuthology.get_first_mon(ctx, config, config['cluster']) | |
382 | (mon,) = ctx.cluster.only(first_mon).remotes.keys() | |
383 | if not hasattr(ctx, 'managers'): | |
384 | ctx.managers = {} | |
385 | ctx.managers[config['cluster']] = CephManager( | |
386 | mon, | |
387 | ctx=ctx, | |
388 | logger=log.getChild('ceph_manager.' + config['cluster']), | |
389 | cluster=config['cluster'], | |
390 | ) | |
391 | yield | |
392 | ||
224ce89b WB |
393 | @contextlib.contextmanager |
394 | def create_rbd_pool(ctx, config): | |
395 | cluster_name = config['cluster'] | |
396 | first_mon = teuthology.get_first_mon(ctx, config, cluster_name) | |
9f95a23c | 397 | (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys() |
224ce89b WB |
398 | log.info('Waiting for OSDs to come up') |
399 | teuthology.wait_until_osds_up( | |
400 | ctx, | |
401 | cluster=ctx.cluster, | |
402 | remote=mon_remote, | |
403 | ceph_cluster=cluster_name, | |
404 | ) | |
3efd9988 FG |
405 | if config.get('create_rbd_pool', True): |
406 | log.info('Creating RBD pool') | |
407 | mon_remote.run( | |
408 | args=['sudo', 'ceph', '--cluster', cluster_name, | |
409 | 'osd', 'pool', 'create', 'rbd', '8']) | |
410 | mon_remote.run( | |
411 | args=[ | |
412 | 'sudo', 'ceph', '--cluster', cluster_name, | |
413 | 'osd', 'pool', 'application', 'enable', | |
414 | 'rbd', 'rbd', '--yes-i-really-mean-it' | |
415 | ], | |
416 | check_status=False) | |
224ce89b WB |
417 | yield |
418 | ||
7c673cae FG |
419 | @contextlib.contextmanager |
420 | def cephfs_setup(ctx, config): | |
421 | cluster_name = config['cluster'] | |
7c673cae FG |
422 | |
423 | first_mon = teuthology.get_first_mon(ctx, config, cluster_name) | |
9f95a23c | 424 | (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys() |
7c673cae FG |
425 | mdss = ctx.cluster.only(teuthology.is_type('mds', cluster_name)) |
426 | # If there are any MDSs, then create a filesystem for them to use | |
427 | # Do this last because requires mon cluster to be up and running | |
428 | if mdss.remotes: | |
f67539c2 TL |
429 | log.info('Setting up CephFS filesystem(s)...') |
430 | cephfs_config = config.get('cephfs', {}) | |
431 | fs_configs = cephfs_config.pop('fs', [{'name': 'cephfs'}]) | |
f67539c2 TL |
432 | |
433 | # wait for standbys to become available (slow due to valgrind, perhaps) | |
434 | mdsc = MDSCluster(ctx) | |
435 | mds_count = len(list(teuthology.all_roles_of_type(ctx.cluster, 'mds'))) | |
436 | with contextutil.safe_while(sleep=2,tries=150) as proceed: | |
437 | while proceed(): | |
438 | if len(mdsc.get_standby_daemons()) >= mds_count: | |
439 | break | |
440 | ||
441 | fss = [] | |
442 | for fs_config in fs_configs: | |
443 | assert isinstance(fs_config, dict) | |
444 | name = fs_config.pop('name') | |
445 | temp = deepcopy(cephfs_config) | |
446 | teuthology.deep_merge(temp, fs_config) | |
447 | fs = Filesystem(ctx, fs_config=temp, name=name, create=True) | |
f67539c2 | 448 | fss.append(fs) |
7c673cae | 449 | |
f67539c2 | 450 | yield |
7c673cae | 451 | |
f67539c2 TL |
452 | for fs in fss: |
453 | fs.destroy() | |
454 | else: | |
455 | yield | |
7c673cae | 456 | |
9f95a23c TL |
457 | @contextlib.contextmanager |
458 | def watchdog_setup(ctx, config): | |
459 | ctx.ceph[config['cluster']].thrashers = [] | |
460 | ctx.ceph[config['cluster']].watchdog = DaemonWatchdog(ctx, config, ctx.ceph[config['cluster']].thrashers) | |
461 | ctx.ceph[config['cluster']].watchdog.start() | |
462 | yield | |
7c673cae | 463 | |
11fdf7f2 TL |
464 | def get_mons(roles, ips, cluster_name, |
465 | mon_bind_msgr2=False, | |
466 | mon_bind_addrvec=False): | |
467 | """ | |
468 | Get monitors and their associated addresses | |
469 | """ | |
470 | mons = {} | |
471 | v1_ports = {} | |
472 | v2_ports = {} | |
11fdf7f2 TL |
473 | is_mon = teuthology.is_type('mon', cluster_name) |
474 | for idx, roles in enumerate(roles): | |
475 | for role in roles: | |
476 | if not is_mon(role): | |
477 | continue | |
478 | if ips[idx] not in v1_ports: | |
479 | v1_ports[ips[idx]] = 6789 | |
480 | else: | |
481 | v1_ports[ips[idx]] += 1 | |
482 | if mon_bind_msgr2: | |
483 | if ips[idx] not in v2_ports: | |
484 | v2_ports[ips[idx]] = 3300 | |
485 | addr = '{ip}'.format(ip=ips[idx]) | |
486 | else: | |
487 | assert mon_bind_addrvec | |
488 | v2_ports[ips[idx]] += 1 | |
489 | addr = '[v2:{ip}:{port2},v1:{ip}:{port1}]'.format( | |
490 | ip=ips[idx], | |
491 | port2=v2_ports[ips[idx]], | |
492 | port1=v1_ports[ips[idx]], | |
493 | ) | |
494 | elif mon_bind_addrvec: | |
495 | addr = '[v1:{ip}:{port}]'.format( | |
496 | ip=ips[idx], | |
497 | port=v1_ports[ips[idx]], | |
498 | ) | |
499 | else: | |
500 | addr = '{ip}:{port}'.format( | |
501 | ip=ips[idx], | |
502 | port=v1_ports[ips[idx]], | |
503 | ) | |
11fdf7f2 TL |
504 | mons[role] = addr |
505 | assert mons | |
506 | return mons | |
507 | ||
508 | def skeleton_config(ctx, roles, ips, mons, cluster='ceph'): | |
509 | """ | |
510 | Returns a ConfigObj that is prefilled with a skeleton config. | |
511 | ||
512 | Use conf[section][key]=value or conf.merge to change it. | |
513 | ||
514 | Use conf.write to write it out, override .filename first if you want. | |
515 | """ | |
516 | path = os.path.join(os.path.dirname(__file__), 'ceph.conf.template') | |
9f95a23c | 517 | conf = configobj.ConfigObj(path, file_error=True) |
11fdf7f2 | 518 | mon_hosts = [] |
9f95a23c | 519 | for role, addr in mons.items(): |
11fdf7f2 TL |
520 | mon_cluster, _, _ = teuthology.split_role(role) |
521 | if mon_cluster != cluster: | |
522 | continue | |
523 | name = teuthology.ceph_role(role) | |
524 | conf.setdefault(name, {}) | |
525 | mon_hosts.append(addr) | |
526 | conf.setdefault('global', {}) | |
527 | conf['global']['mon host'] = ','.join(mon_hosts) | |
528 | # set up standby mds's | |
529 | is_mds = teuthology.is_type('mds', cluster) | |
530 | for roles_subset in roles: | |
531 | for role in roles_subset: | |
532 | if is_mds(role): | |
533 | name = teuthology.ceph_role(role) | |
534 | conf.setdefault(name, {}) | |
535 | return conf | |
536 | ||
537 | def create_simple_monmap(ctx, remote, conf, mons, | |
538 | path=None, | |
539 | mon_bind_addrvec=False): | |
540 | """ | |
541 | Writes a simple monmap based on current ceph.conf into path, or | |
542 | <testdir>/monmap by default. | |
543 | ||
544 | Assumes ceph_conf is up to date. | |
545 | ||
546 | Assumes mon sections are named "mon.*", with the dot. | |
547 | ||
548 | :return the FSID (as a string) of the newly created monmap | |
549 | """ | |
550 | ||
9f95a23c | 551 | addresses = list(mons.items()) |
11fdf7f2 TL |
552 | assert addresses, "There are no monitors in config!" |
553 | log.debug('Ceph mon addresses: %s', addresses) | |
554 | ||
f67539c2 TL |
555 | try: |
556 | log.debug('writing out conf {c}'.format(c=conf)) | |
557 | except: | |
558 | log.debug('my conf logging attempt failed') | |
11fdf7f2 | 559 | testdir = teuthology.get_testdir(ctx) |
f67539c2 TL |
560 | tmp_conf_path = '{tdir}/ceph.tmp.conf'.format(tdir=testdir) |
561 | conf_fp = BytesIO() | |
562 | conf.write(conf_fp) | |
563 | conf_fp.seek(0) | |
564 | teuthology.write_file(remote, tmp_conf_path, conf_fp) | |
11fdf7f2 TL |
565 | args = [ |
566 | 'adjust-ulimits', | |
567 | 'ceph-coverage', | |
568 | '{tdir}/archive/coverage'.format(tdir=testdir), | |
569 | 'monmaptool', | |
f67539c2 TL |
570 | '-c', |
571 | '{conf}'.format(conf=tmp_conf_path), | |
11fdf7f2 TL |
572 | '--create', |
573 | '--clobber', | |
574 | ] | |
575 | if mon_bind_addrvec: | |
576 | args.extend(['--enable-all-features']) | |
577 | for (role, addr) in addresses: | |
578 | _, _, n = teuthology.split_role(role) | |
579 | if mon_bind_addrvec and (',' in addr or 'v' in addr or ':' in addr): | |
580 | args.extend(('--addv', n, addr)) | |
581 | else: | |
582 | args.extend(('--add', n, addr)) | |
583 | if not path: | |
584 | path = '{tdir}/monmap'.format(tdir=testdir) | |
585 | args.extend([ | |
586 | '--print', | |
587 | path | |
588 | ]) | |
589 | ||
9f95a23c | 590 | monmap_output = remote.sh(args) |
11fdf7f2 TL |
591 | fsid = re.search("generated fsid (.+)$", |
592 | monmap_output, re.MULTILINE).group(1) | |
f67539c2 | 593 | teuthology.delete_file(remote, tmp_conf_path) |
11fdf7f2 TL |
594 | return fsid |
595 | ||
f67539c2 TL |
596 | |
597 | def maybe_redirect_stderr(config, type_, args, log_path): | |
598 | if type_ == 'osd' and \ | |
599 | config.get('flavor', 'default') == 'crimson': | |
600 | # teuthworker uses ubuntu:ubuntu to access the test nodes | |
601 | create_log_cmd = \ | |
602 | f'sudo install -b -o ubuntu -g ubuntu /dev/null {log_path}' | |
603 | return create_log_cmd, args + [run.Raw('2>>'), log_path] | |
604 | else: | |
605 | return None, args | |
606 | ||
607 | ||
7c673cae FG |
608 | @contextlib.contextmanager |
609 | def cluster(ctx, config): | |
610 | """ | |
611 | Handle the creation and removal of a ceph cluster. | |
612 | ||
613 | On startup: | |
614 | Create directories needed for the cluster. | |
615 | Create remote journals for all osds. | |
616 | Create and set keyring. | |
11fdf7f2 | 617 | Copy the monmap to the test systems. |
7c673cae FG |
618 | Setup mon nodes. |
619 | Setup mds nodes. | |
620 | Mkfs osd nodes. | |
621 | Add keyring information to monmaps | |
622 | Mkfs mon nodes. | |
623 | ||
624 | On exit: | |
11fdf7f2 | 625 | If errors occurred, extract a failure message and store in ctx.summary. |
7c673cae FG |
626 | Unmount all test files and temporary journaling files. |
627 | Save the monitor information and archive all ceph logs. | |
628 | Cleanup the keyring setup, and remove all monitor map and data files left over. | |
629 | ||
630 | :param ctx: Context | |
631 | :param config: Configuration | |
632 | """ | |
633 | if ctx.config.get('use_existing_cluster', False) is True: | |
634 | log.info("'use_existing_cluster' is true; skipping cluster creation") | |
635 | yield | |
636 | ||
637 | testdir = teuthology.get_testdir(ctx) | |
638 | cluster_name = config['cluster'] | |
639 | data_dir = '{tdir}/{cluster}.data'.format(tdir=testdir, cluster=cluster_name) | |
640 | log.info('Creating ceph cluster %s...', cluster_name) | |
11fdf7f2 TL |
641 | log.info('config %s', config) |
642 | log.info('ctx.config %s', ctx.config) | |
7c673cae FG |
643 | run.wait( |
644 | ctx.cluster.run( | |
645 | args=[ | |
646 | 'install', '-d', '-m0755', '--', | |
647 | data_dir, | |
648 | ], | |
649 | wait=False, | |
650 | ) | |
651 | ) | |
652 | ||
653 | run.wait( | |
654 | ctx.cluster.run( | |
655 | args=[ | |
656 | 'sudo', | |
657 | 'install', '-d', '-m0777', '--', '/var/run/ceph', | |
658 | ], | |
659 | wait=False, | |
660 | ) | |
661 | ) | |
662 | ||
663 | devs_to_clean = {} | |
664 | remote_to_roles_to_devs = {} | |
7c673cae | 665 | osds = ctx.cluster.only(teuthology.is_type('osd', cluster_name)) |
9f95a23c | 666 | for remote, roles_for_host in osds.remotes.items(): |
7c673cae | 667 | devs = teuthology.get_scratch_devices(remote) |
801d1391 TL |
668 | roles_to_devs = assign_devs( |
669 | teuthology.cluster_roles_of_type(roles_for_host, 'osd', cluster_name), devs | |
670 | ) | |
671 | devs_to_clean[remote] = [] | |
672 | log.info('osd dev map: {}'.format(roles_to_devs)) | |
673 | assert roles_to_devs, \ | |
674 | "remote {} has osd roles, but no osd devices were specified!".format(remote.hostname) | |
7c673cae | 675 | remote_to_roles_to_devs[remote] = roles_to_devs |
801d1391 TL |
676 | log.info("remote_to_roles_to_devs: {}".format(remote_to_roles_to_devs)) |
677 | for osd_role, dev_name in remote_to_roles_to_devs.items(): | |
678 | assert dev_name, "{} has no associated device!".format(osd_role) | |
7c673cae FG |
679 | |
680 | log.info('Generating config...') | |
681 | remotes_and_roles = ctx.cluster.remotes.items() | |
682 | roles = [role_list for (remote, role_list) in remotes_and_roles] | |
683 | ips = [host for (host, port) in | |
684 | (remote.ssh.get_transport().getpeername() for (remote, role_list) in remotes_and_roles)] | |
11fdf7f2 TL |
685 | mons = get_mons( |
686 | roles, ips, cluster_name, | |
687 | mon_bind_msgr2=config.get('mon_bind_msgr2'), | |
688 | mon_bind_addrvec=config.get('mon_bind_addrvec'), | |
689 | ) | |
690 | conf = skeleton_config( | |
691 | ctx, roles=roles, ips=ips, mons=mons, cluster=cluster_name, | |
692 | ) | |
9f95a23c TL |
693 | for section, keys in config['conf'].items(): |
694 | for key, value in keys.items(): | |
7c673cae FG |
695 | log.info("[%s] %s = %s" % (section, key, value)) |
696 | if section not in conf: | |
697 | conf[section] = {} | |
698 | conf[section][key] = value | |
699 | ||
7c673cae FG |
700 | if not hasattr(ctx, 'ceph'): |
701 | ctx.ceph = {} | |
702 | ctx.ceph[cluster_name] = argparse.Namespace() | |
703 | ctx.ceph[cluster_name].conf = conf | |
11fdf7f2 | 704 | ctx.ceph[cluster_name].mons = mons |
7c673cae FG |
705 | |
706 | default_keyring = '/etc/ceph/{cluster}.keyring'.format(cluster=cluster_name) | |
707 | keyring_path = config.get('keyring_path', default_keyring) | |
708 | ||
709 | coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) | |
710 | ||
711 | firstmon = teuthology.get_first_mon(ctx, config, cluster_name) | |
712 | ||
713 | log.info('Setting up %s...' % firstmon) | |
714 | ctx.cluster.only(firstmon).run( | |
715 | args=[ | |
716 | 'sudo', | |
717 | 'adjust-ulimits', | |
718 | 'ceph-coverage', | |
719 | coverage_dir, | |
720 | 'ceph-authtool', | |
721 | '--create-keyring', | |
722 | keyring_path, | |
723 | ], | |
724 | ) | |
725 | ctx.cluster.only(firstmon).run( | |
726 | args=[ | |
727 | 'sudo', | |
728 | 'adjust-ulimits', | |
729 | 'ceph-coverage', | |
730 | coverage_dir, | |
731 | 'ceph-authtool', | |
732 | '--gen-key', | |
733 | '--name=mon.', | |
734 | keyring_path, | |
735 | ], | |
736 | ) | |
737 | ctx.cluster.only(firstmon).run( | |
738 | args=[ | |
739 | 'sudo', | |
740 | 'chmod', | |
741 | '0644', | |
742 | keyring_path, | |
743 | ], | |
744 | ) | |
745 | (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() | |
746 | monmap_path = '{tdir}/{cluster}.monmap'.format(tdir=testdir, | |
747 | cluster=cluster_name) | |
11fdf7f2 | 748 | fsid = create_simple_monmap( |
7c673cae FG |
749 | ctx, |
750 | remote=mon0_remote, | |
751 | conf=conf, | |
11fdf7f2 | 752 | mons=mons, |
7c673cae | 753 | path=monmap_path, |
11fdf7f2 | 754 | mon_bind_addrvec=config.get('mon_bind_addrvec'), |
7c673cae | 755 | ) |
f67539c2 | 756 | ctx.ceph[cluster_name].fsid = fsid |
7c673cae FG |
757 | if not 'global' in conf: |
758 | conf['global'] = {} | |
759 | conf['global']['fsid'] = fsid | |
760 | ||
761 | default_conf_path = '/etc/ceph/{cluster}.conf'.format(cluster=cluster_name) | |
762 | conf_path = config.get('conf_path', default_conf_path) | |
763 | log.info('Writing %s for FSID %s...' % (conf_path, fsid)) | |
764 | write_conf(ctx, conf_path, cluster_name) | |
765 | ||
766 | log.info('Creating admin key on %s...' % firstmon) | |
767 | ctx.cluster.only(firstmon).run( | |
768 | args=[ | |
769 | 'sudo', | |
770 | 'adjust-ulimits', | |
771 | 'ceph-coverage', | |
772 | coverage_dir, | |
773 | 'ceph-authtool', | |
774 | '--gen-key', | |
775 | '--name=client.admin', | |
7c673cae FG |
776 | '--cap', 'mon', 'allow *', |
777 | '--cap', 'osd', 'allow *', | |
778 | '--cap', 'mds', 'allow *', | |
779 | '--cap', 'mgr', 'allow *', | |
780 | keyring_path, | |
781 | ], | |
782 | ) | |
783 | ||
784 | log.info('Copying monmap to all nodes...') | |
f67539c2 TL |
785 | keyring = mon0_remote.read_file(keyring_path) |
786 | monmap = mon0_remote.read_file(monmap_path) | |
7c673cae | 787 | |
9f95a23c | 788 | for rem in ctx.cluster.remotes.keys(): |
7c673cae FG |
789 | # copy mon key and initial monmap |
790 | log.info('Sending monmap to node {remote}'.format(remote=rem)) | |
f67539c2 TL |
791 | rem.write_file(keyring_path, keyring, mode='0644', sudo=True) |
792 | rem.write_file(monmap_path, monmap) | |
7c673cae FG |
793 | |
794 | log.info('Setting up mon nodes...') | |
795 | mons = ctx.cluster.only(teuthology.is_type('mon', cluster_name)) | |
7c673cae FG |
796 | |
797 | if not config.get('skip_mgr_daemons', False): | |
798 | log.info('Setting up mgr nodes...') | |
799 | mgrs = ctx.cluster.only(teuthology.is_type('mgr', cluster_name)) | |
9f95a23c | 800 | for remote, roles_for_host in mgrs.remotes.items(): |
7c673cae FG |
801 | for role in teuthology.cluster_roles_of_type(roles_for_host, 'mgr', |
802 | cluster_name): | |
803 | _, _, id_ = teuthology.split_role(role) | |
11fdf7f2 TL |
804 | mgr_dir = DATA_PATH.format( |
805 | type_='mgr', cluster=cluster_name, id_=id_) | |
7c673cae FG |
806 | remote.run( |
807 | args=[ | |
808 | 'sudo', | |
809 | 'mkdir', | |
810 | '-p', | |
811 | mgr_dir, | |
812 | run.Raw('&&'), | |
813 | 'sudo', | |
814 | 'adjust-ulimits', | |
815 | 'ceph-coverage', | |
816 | coverage_dir, | |
817 | 'ceph-authtool', | |
818 | '--create-keyring', | |
819 | '--gen-key', | |
820 | '--name=mgr.{id}'.format(id=id_), | |
821 | mgr_dir + '/keyring', | |
822 | ], | |
823 | ) | |
824 | ||
825 | log.info('Setting up mds nodes...') | |
826 | mdss = ctx.cluster.only(teuthology.is_type('mds', cluster_name)) | |
9f95a23c | 827 | for remote, roles_for_host in mdss.remotes.items(): |
7c673cae FG |
828 | for role in teuthology.cluster_roles_of_type(roles_for_host, 'mds', |
829 | cluster_name): | |
830 | _, _, id_ = teuthology.split_role(role) | |
11fdf7f2 TL |
831 | mds_dir = DATA_PATH.format( |
832 | type_='mds', cluster=cluster_name, id_=id_) | |
7c673cae FG |
833 | remote.run( |
834 | args=[ | |
835 | 'sudo', | |
836 | 'mkdir', | |
837 | '-p', | |
838 | mds_dir, | |
839 | run.Raw('&&'), | |
840 | 'sudo', | |
841 | 'adjust-ulimits', | |
842 | 'ceph-coverage', | |
843 | coverage_dir, | |
844 | 'ceph-authtool', | |
845 | '--create-keyring', | |
846 | '--gen-key', | |
847 | '--name=mds.{id}'.format(id=id_), | |
848 | mds_dir + '/keyring', | |
849 | ], | |
850 | ) | |
11fdf7f2 TL |
851 | remote.run(args=[ |
852 | 'sudo', 'chown', '-R', 'ceph:ceph', mds_dir | |
853 | ]) | |
7c673cae FG |
854 | |
855 | cclient.create_keyring(ctx, cluster_name) | |
856 | log.info('Running mkfs on osd nodes...') | |
857 | ||
858 | if not hasattr(ctx, 'disk_config'): | |
859 | ctx.disk_config = argparse.Namespace() | |
860 | if not hasattr(ctx.disk_config, 'remote_to_roles_to_dev'): | |
861 | ctx.disk_config.remote_to_roles_to_dev = {} | |
7c673cae FG |
862 | if not hasattr(ctx.disk_config, 'remote_to_roles_to_dev_mount_options'): |
863 | ctx.disk_config.remote_to_roles_to_dev_mount_options = {} | |
864 | if not hasattr(ctx.disk_config, 'remote_to_roles_to_dev_fstype'): | |
865 | ctx.disk_config.remote_to_roles_to_dev_fstype = {} | |
866 | ||
867 | teuthology.deep_merge(ctx.disk_config.remote_to_roles_to_dev, remote_to_roles_to_devs) | |
7c673cae FG |
868 | |
869 | log.info("ctx.disk_config.remote_to_roles_to_dev: {r}".format(r=str(ctx.disk_config.remote_to_roles_to_dev))) | |
9f95a23c | 870 | for remote, roles_for_host in osds.remotes.items(): |
7c673cae | 871 | roles_to_devs = remote_to_roles_to_devs[remote] |
7c673cae FG |
872 | |
873 | for role in teuthology.cluster_roles_of_type(roles_for_host, 'osd', cluster_name): | |
874 | _, _, id_ = teuthology.split_role(role) | |
11fdf7f2 TL |
875 | mnt_point = DATA_PATH.format( |
876 | type_='osd', cluster=cluster_name, id_=id_) | |
7c673cae FG |
877 | remote.run( |
878 | args=[ | |
879 | 'sudo', | |
880 | 'mkdir', | |
881 | '-p', | |
882 | mnt_point, | |
883 | ]) | |
801d1391 TL |
884 | log.info('roles_to_devs: {}'.format(roles_to_devs)) |
885 | log.info('role: {}'.format(role)) | |
7c673cae FG |
886 | if roles_to_devs.get(role): |
887 | dev = roles_to_devs[role] | |
888 | fs = config.get('fs') | |
889 | package = None | |
890 | mkfs_options = config.get('mkfs_options') | |
891 | mount_options = config.get('mount_options') | |
892 | if fs == 'btrfs': | |
893 | # package = 'btrfs-tools' | |
894 | if mount_options is None: | |
895 | mount_options = ['noatime', 'user_subvol_rm_allowed'] | |
896 | if mkfs_options is None: | |
897 | mkfs_options = ['-m', 'single', | |
898 | '-l', '32768', | |
899 | '-n', '32768'] | |
900 | if fs == 'xfs': | |
901 | # package = 'xfsprogs' | |
902 | if mount_options is None: | |
903 | mount_options = ['noatime'] | |
904 | if mkfs_options is None: | |
905 | mkfs_options = ['-f', '-i', 'size=2048'] | |
906 | if fs == 'ext4' or fs == 'ext3': | |
907 | if mount_options is None: | |
908 | mount_options = ['noatime', 'user_xattr'] | |
909 | ||
910 | if mount_options is None: | |
911 | mount_options = [] | |
912 | if mkfs_options is None: | |
913 | mkfs_options = [] | |
914 | mkfs = ['mkfs.%s' % fs] + mkfs_options | |
915 | log.info('%s on %s on %s' % (mkfs, dev, remote)) | |
916 | if package is not None: | |
9f95a23c | 917 | remote.sh('sudo apt-get install -y %s' % package) |
7c673cae FG |
918 | |
919 | try: | |
920 | remote.run(args=['yes', run.Raw('|')] + ['sudo'] + mkfs + [dev]) | |
921 | except run.CommandFailedError: | |
922 | # Newer btfs-tools doesn't prompt for overwrite, use -f | |
923 | if '-f' not in mount_options: | |
924 | mkfs_options.append('-f') | |
925 | mkfs = ['mkfs.%s' % fs] + mkfs_options | |
926 | log.info('%s on %s on %s' % (mkfs, dev, remote)) | |
927 | remote.run(args=['yes', run.Raw('|')] + ['sudo'] + mkfs + [dev]) | |
928 | ||
929 | log.info('mount %s on %s -o %s' % (dev, remote, | |
930 | ','.join(mount_options))) | |
931 | remote.run( | |
932 | args=[ | |
933 | 'sudo', | |
934 | 'mount', | |
935 | '-t', fs, | |
936 | '-o', ','.join(mount_options), | |
937 | dev, | |
938 | mnt_point, | |
939 | ] | |
940 | ) | |
941 | remote.run( | |
942 | args=[ | |
943 | 'sudo', '/sbin/restorecon', mnt_point, | |
944 | ], | |
945 | check_status=False, | |
946 | ) | |
947 | if not remote in ctx.disk_config.remote_to_roles_to_dev_mount_options: | |
948 | ctx.disk_config.remote_to_roles_to_dev_mount_options[remote] = {} | |
949 | ctx.disk_config.remote_to_roles_to_dev_mount_options[remote][role] = mount_options | |
950 | if not remote in ctx.disk_config.remote_to_roles_to_dev_fstype: | |
951 | ctx.disk_config.remote_to_roles_to_dev_fstype[remote] = {} | |
952 | ctx.disk_config.remote_to_roles_to_dev_fstype[remote][role] = fs | |
953 | devs_to_clean[remote].append(mnt_point) | |
954 | ||
955 | for role in teuthology.cluster_roles_of_type(roles_for_host, 'osd', cluster_name): | |
956 | _, _, id_ = teuthology.split_role(role) | |
11fdf7f2 | 957 | try: |
f67539c2 | 958 | args = ['sudo', |
11fdf7f2 TL |
959 | 'MALLOC_CHECK_=3', |
960 | 'adjust-ulimits', | |
f67539c2 | 961 | 'ceph-coverage', coverage_dir, |
11fdf7f2 TL |
962 | 'ceph-osd', |
963 | '--no-mon-config', | |
f67539c2 | 964 | '--cluster', cluster_name, |
11fdf7f2 TL |
965 | '--mkfs', |
966 | '--mkkey', | |
967 | '-i', id_, | |
f67539c2 TL |
968 | '--monmap', monmap_path] |
969 | log_path = f'/var/log/ceph/{cluster_name}-osd.{id_}.log' | |
970 | create_log_cmd, args = \ | |
971 | maybe_redirect_stderr(config, 'osd', args, log_path) | |
972 | if create_log_cmd: | |
973 | remote.sh(create_log_cmd) | |
974 | remote.run(args=args) | |
11fdf7f2 TL |
975 | except run.CommandFailedError: |
976 | # try without --no-mon-config.. this may be an upgrade test | |
977 | remote.run( | |
978 | args=[ | |
979 | 'sudo', | |
980 | 'MALLOC_CHECK_=3', | |
981 | 'adjust-ulimits', | |
982 | 'ceph-coverage', | |
983 | coverage_dir, | |
984 | 'ceph-osd', | |
985 | '--cluster', | |
986 | cluster_name, | |
987 | '--mkfs', | |
988 | '--mkkey', | |
989 | '-i', id_, | |
990 | '--monmap', monmap_path, | |
991 | ], | |
992 | ) | |
993 | mnt_point = DATA_PATH.format( | |
994 | type_='osd', cluster=cluster_name, id_=id_) | |
f91f0fd5 TL |
995 | remote.run(args=[ |
996 | 'sudo', 'chown', '-R', 'ceph:ceph', mnt_point | |
997 | ]) | |
7c673cae FG |
998 | |
999 | log.info('Reading keys from all nodes...') | |
9f95a23c | 1000 | keys_fp = BytesIO() |
7c673cae | 1001 | keys = [] |
9f95a23c | 1002 | for remote, roles_for_host in ctx.cluster.remotes.items(): |
7c673cae FG |
1003 | for type_ in ['mgr', 'mds', 'osd']: |
1004 | if type_ == 'mgr' and config.get('skip_mgr_daemons', False): | |
1005 | continue | |
1006 | for role in teuthology.cluster_roles_of_type(roles_for_host, type_, cluster_name): | |
1007 | _, _, id_ = teuthology.split_role(role) | |
f67539c2 TL |
1008 | data = remote.read_file( |
1009 | os.path.join( | |
11fdf7f2 TL |
1010 | DATA_PATH.format( |
1011 | type_=type_, id_=id_, cluster=cluster_name), | |
1012 | 'keyring', | |
7c673cae FG |
1013 | ), |
1014 | sudo=True, | |
1015 | ) | |
1016 | keys.append((type_, id_, data)) | |
1017 | keys_fp.write(data) | |
9f95a23c | 1018 | for remote, roles_for_host in ctx.cluster.remotes.items(): |
7c673cae FG |
1019 | for role in teuthology.cluster_roles_of_type(roles_for_host, 'client', cluster_name): |
1020 | _, _, id_ = teuthology.split_role(role) | |
f67539c2 TL |
1021 | data = remote.read_file( |
1022 | '/etc/ceph/{cluster}.client.{id}.keyring'.format(id=id_, cluster=cluster_name) | |
7c673cae FG |
1023 | ) |
1024 | keys.append(('client', id_, data)) | |
1025 | keys_fp.write(data) | |
1026 | ||
1027 | log.info('Adding keys to all mons...') | |
1028 | writes = mons.run( | |
1029 | args=[ | |
1030 | 'sudo', 'tee', '-a', | |
1031 | keyring_path, | |
1032 | ], | |
1033 | stdin=run.PIPE, | |
1034 | wait=False, | |
9f95a23c | 1035 | stdout=BytesIO(), |
7c673cae FG |
1036 | ) |
1037 | keys_fp.seek(0) | |
1038 | teuthology.feed_many_stdins_and_close(keys_fp, writes) | |
1039 | run.wait(writes) | |
1040 | for type_, id_, data in keys: | |
1041 | run.wait( | |
1042 | mons.run( | |
1043 | args=[ | |
1044 | 'sudo', | |
1045 | 'adjust-ulimits', | |
1046 | 'ceph-coverage', | |
1047 | coverage_dir, | |
1048 | 'ceph-authtool', | |
1049 | keyring_path, | |
1050 | '--name={type}.{id}'.format( | |
1051 | type=type_, | |
1052 | id=id_, | |
1053 | ), | |
1054 | ] + list(generate_caps(type_)), | |
1055 | wait=False, | |
1056 | ), | |
1057 | ) | |
1058 | ||
1059 | log.info('Running mkfs on mon nodes...') | |
9f95a23c | 1060 | for remote, roles_for_host in mons.remotes.items(): |
7c673cae FG |
1061 | for role in teuthology.cluster_roles_of_type(roles_for_host, 'mon', cluster_name): |
1062 | _, _, id_ = teuthology.split_role(role) | |
11fdf7f2 TL |
1063 | mnt_point = DATA_PATH.format( |
1064 | type_='mon', id_=id_, cluster=cluster_name) | |
7c673cae FG |
1065 | remote.run( |
1066 | args=[ | |
1067 | 'sudo', | |
1068 | 'mkdir', | |
1069 | '-p', | |
11fdf7f2 | 1070 | mnt_point, |
7c673cae FG |
1071 | ], |
1072 | ) | |
1073 | remote.run( | |
1074 | args=[ | |
1075 | 'sudo', | |
1076 | 'adjust-ulimits', | |
1077 | 'ceph-coverage', | |
1078 | coverage_dir, | |
1079 | 'ceph-mon', | |
1080 | '--cluster', cluster_name, | |
1081 | '--mkfs', | |
1082 | '-i', id_, | |
1083 | '--monmap', monmap_path, | |
7c673cae FG |
1084 | '--keyring', keyring_path, |
1085 | ], | |
1086 | ) | |
f91f0fd5 TL |
1087 | remote.run(args=[ |
1088 | 'sudo', 'chown', '-R', 'ceph:ceph', mnt_point | |
1089 | ]) | |
7c673cae FG |
1090 | |
1091 | run.wait( | |
1092 | mons.run( | |
1093 | args=[ | |
1094 | 'rm', | |
1095 | '--', | |
1096 | monmap_path, | |
7c673cae FG |
1097 | ], |
1098 | wait=False, | |
1099 | ), | |
1100 | ) | |
1101 | ||
1102 | try: | |
1103 | yield | |
1104 | except Exception: | |
1105 | # we need to know this below | |
1106 | ctx.summary['success'] = False | |
1107 | raise | |
1108 | finally: | |
1109 | (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() | |
1110 | ||
1111 | log.info('Checking cluster log for badness...') | |
1112 | ||
1113 | def first_in_ceph_log(pattern, excludes): | |
1114 | """ | |
11fdf7f2 | 1115 | Find the first occurrence of the pattern specified in the Ceph log, |
7c673cae FG |
1116 | Returns None if none found. |
1117 | ||
1118 | :param pattern: Pattern scanned for. | |
1119 | :param excludes: Patterns to ignore. | |
1120 | :return: First line of text (or None if not found) | |
1121 | """ | |
1122 | args = [ | |
1123 | 'sudo', | |
1124 | 'egrep', pattern, | |
1125 | '/var/log/ceph/{cluster}.log'.format(cluster=cluster_name), | |
1126 | ] | |
1127 | for exclude in excludes: | |
1128 | args.extend([run.Raw('|'), 'egrep', '-v', exclude]) | |
1129 | args.extend([ | |
1130 | run.Raw('|'), 'head', '-n', '1', | |
1131 | ]) | |
9f95a23c TL |
1132 | stdout = mon0_remote.sh(args) |
1133 | return stdout or None | |
7c673cae FG |
1134 | |
1135 | if first_in_ceph_log('\[ERR\]|\[WRN\]|\[SEC\]', | |
cd265ab1 | 1136 | config['log_ignorelist']) is not None: |
7c673cae FG |
1137 | log.warning('Found errors (ERR|WRN|SEC) in cluster log') |
1138 | ctx.summary['success'] = False | |
1139 | # use the most severe problem as the failure reason | |
1140 | if 'failure_reason' not in ctx.summary: | |
1141 | for pattern in ['\[SEC\]', '\[ERR\]', '\[WRN\]']: | |
cd265ab1 | 1142 | match = first_in_ceph_log(pattern, config['log_ignorelist']) |
7c673cae FG |
1143 | if match is not None: |
1144 | ctx.summary['failure_reason'] = \ | |
1145 | '"{match}" in cluster log'.format( | |
1146 | match=match.rstrip('\n'), | |
1147 | ) | |
1148 | break | |
1149 | ||
9f95a23c | 1150 | for remote, dirs in devs_to_clean.items(): |
7c673cae FG |
1151 | for dir_ in dirs: |
1152 | log.info('Unmounting %s on %s' % (dir_, remote)) | |
1153 | try: | |
1154 | remote.run( | |
1155 | args=[ | |
1156 | 'sync', | |
1157 | run.Raw('&&'), | |
1158 | 'sudo', | |
1159 | 'umount', | |
1160 | '-f', | |
1161 | dir_ | |
1162 | ] | |
1163 | ) | |
1164 | except Exception as e: | |
1165 | remote.run(args=[ | |
1166 | 'sudo', | |
1167 | run.Raw('PATH=/usr/sbin:$PATH'), | |
1168 | 'lsof', | |
1169 | run.Raw(';'), | |
1170 | 'ps', 'auxf', | |
1171 | ]) | |
1172 | raise e | |
1173 | ||
7c673cae FG |
1174 | if ctx.archive is not None and \ |
1175 | not (ctx.config.get('archive-on-error') and ctx.summary['success']): | |
1176 | ||
1177 | # archive mon data, too | |
1178 | log.info('Archiving mon data...') | |
1179 | path = os.path.join(ctx.archive, 'data') | |
1180 | try: | |
1181 | os.makedirs(path) | |
1182 | except OSError as e: | |
1183 | if e.errno == errno.EEXIST: | |
1184 | pass | |
1185 | else: | |
1186 | raise | |
9f95a23c | 1187 | for remote, roles in mons.remotes.items(): |
7c673cae FG |
1188 | for role in roles: |
1189 | is_mon = teuthology.is_type('mon', cluster_name) | |
1190 | if is_mon(role): | |
1191 | _, _, id_ = teuthology.split_role(role) | |
11fdf7f2 TL |
1192 | mon_dir = DATA_PATH.format( |
1193 | type_='mon', id_=id_, cluster=cluster_name) | |
7c673cae FG |
1194 | teuthology.pull_directory_tarball( |
1195 | remote, | |
1196 | mon_dir, | |
1197 | path + '/' + role + '.tgz') | |
1198 | ||
1199 | log.info('Cleaning ceph cluster...') | |
1200 | run.wait( | |
1201 | ctx.cluster.run( | |
1202 | args=[ | |
1203 | 'sudo', | |
1204 | 'rm', | |
1205 | '-rf', | |
1206 | '--', | |
1207 | conf_path, | |
1208 | keyring_path, | |
1209 | data_dir, | |
1210 | monmap_path, | |
7c673cae FG |
1211 | run.Raw('{tdir}/../*.pid'.format(tdir=testdir)), |
1212 | ], | |
1213 | wait=False, | |
1214 | ), | |
1215 | ) | |
1216 | ||
1217 | ||
1218 | def osd_scrub_pgs(ctx, config): | |
1219 | """ | |
1220 | Scrub pgs when we exit. | |
1221 | ||
1222 | First make sure all pgs are active and clean. | |
1223 | Next scrub all osds. | |
1224 | Then periodically check until all pgs have scrub time stamps that | |
11fdf7f2 | 1225 | indicate the last scrub completed. Time out if no progress is made |
7c673cae FG |
1226 | here after two minutes. |
1227 | """ | |
d2e6a577 FG |
1228 | retries = 40 |
1229 | delays = 20 | |
7c673cae FG |
1230 | cluster_name = config['cluster'] |
1231 | manager = ctx.managers[cluster_name] | |
f67539c2 | 1232 | for _ in range(retries): |
7c673cae | 1233 | stats = manager.get_pg_stats() |
11fdf7f2 TL |
1234 | unclean = [stat['pgid'] for stat in stats if 'active+clean' not in stat['state']] |
1235 | split_merge = [] | |
1236 | osd_dump = manager.get_osd_dump_json() | |
9f95a23c TL |
1237 | try: |
1238 | split_merge = [i['pool_name'] for i in osd_dump['pools'] if i['pg_num'] != i['pg_num_target']] | |
1239 | except KeyError: | |
1240 | # we don't support pg_num_target before nautilus | |
1241 | pass | |
11fdf7f2 | 1242 | if not unclean and not split_merge: |
7c673cae | 1243 | break |
f67539c2 TL |
1244 | waiting_on = [] |
1245 | if unclean: | |
1246 | waiting_on.append(f'{unclean} to go clean') | |
1247 | if split_merge: | |
1248 | waiting_on.append(f'{split_merge} to split/merge') | |
1249 | waiting_on = ' and '.join(waiting_on) | |
1250 | log.info('Waiting for all PGs to be active+clean and split+merged, waiting on %s', waiting_on) | |
7c673cae | 1251 | time.sleep(delays) |
f67539c2 | 1252 | else: |
31f18b77 | 1253 | raise RuntimeError("Scrubbing terminated -- not all pgs were active and clean.") |
7c673cae FG |
1254 | check_time_now = time.localtime() |
1255 | time.sleep(1) | |
1256 | all_roles = teuthology.all_roles(ctx.cluster) | |
1257 | for role in teuthology.cluster_roles_of_type(all_roles, 'osd', cluster_name): | |
1258 | log.info("Scrubbing {osd}".format(osd=role)) | |
1259 | _, _, id_ = teuthology.split_role(role) | |
31f18b77 FG |
1260 | # allow this to fail; in certain cases the OSD might not be up |
1261 | # at this point. we will catch all pgs below. | |
1262 | try: | |
28e407b8 AA |
1263 | manager.raw_cluster_cmd('tell', 'osd.' + id_, 'config', 'set', |
1264 | 'osd_debug_deep_scrub_sleep', '0'); | |
31f18b77 FG |
1265 | manager.raw_cluster_cmd('osd', 'deep-scrub', id_) |
1266 | except run.CommandFailedError: | |
1267 | pass | |
7c673cae FG |
1268 | prev_good = 0 |
1269 | gap_cnt = 0 | |
1270 | loop = True | |
1271 | while loop: | |
1272 | stats = manager.get_pg_stats() | |
1273 | timez = [(stat['pgid'],stat['last_scrub_stamp']) for stat in stats] | |
1274 | loop = False | |
1275 | thiscnt = 0 | |
9f95a23c | 1276 | re_scrub = [] |
7c673cae | 1277 | for (pgid, tmval) in timez: |
9f95a23c TL |
1278 | t = tmval[0:tmval.find('.')].replace(' ', 'T') |
1279 | pgtm = time.strptime(t, '%Y-%m-%dT%H:%M:%S') | |
7c673cae FG |
1280 | if pgtm > check_time_now: |
1281 | thiscnt += 1 | |
1282 | else: | |
1283 | log.info('pgid %s last_scrub_stamp %s %s <= %s', pgid, tmval, pgtm, check_time_now) | |
1284 | loop = True | |
9f95a23c | 1285 | re_scrub.append(pgid) |
7c673cae FG |
1286 | if thiscnt > prev_good: |
1287 | prev_good = thiscnt | |
1288 | gap_cnt = 0 | |
1289 | else: | |
1290 | gap_cnt += 1 | |
31f18b77 | 1291 | if gap_cnt % 6 == 0: |
9f95a23c | 1292 | for pgid in re_scrub: |
31f18b77 | 1293 | # re-request scrub every so often in case the earlier |
11fdf7f2 | 1294 | # request was missed. do not do it every time because |
31f18b77 FG |
1295 | # the scrub may be in progress or not reported yet and |
1296 | # we will starve progress. | |
1297 | manager.raw_cluster_cmd('pg', 'deep-scrub', pgid) | |
7c673cae | 1298 | if gap_cnt > retries: |
31f18b77 | 1299 | raise RuntimeError('Exiting scrub checking -- not all pgs scrubbed.') |
7c673cae FG |
1300 | if loop: |
1301 | log.info('Still waiting for all pgs to be scrubbed.') | |
1302 | time.sleep(delays) | |
1303 | ||
1304 | ||
1305 | @contextlib.contextmanager | |
1306 | def run_daemon(ctx, config, type_): | |
1307 | """ | |
1308 | Run daemons for a role type. Handle the startup and termination of a a daemon. | |
1309 | On startup -- set coverages, cpu_profile, valgrind values for all remotes, | |
1310 | and a max_mds value for one mds. | |
1311 | On cleanup -- Stop all existing daemons of this type. | |
1312 | ||
1313 | :param ctx: Context | |
1314 | :param config: Configuration | |
9f95a23c | 1315 | :param type_: Role type |
7c673cae FG |
1316 | """ |
1317 | cluster_name = config['cluster'] | |
1318 | log.info('Starting %s daemons in cluster %s...', type_, cluster_name) | |
1319 | testdir = teuthology.get_testdir(ctx) | |
1320 | daemons = ctx.cluster.only(teuthology.is_type(type_, cluster_name)) | |
1321 | ||
1322 | # check whether any daemons if this type are configured | |
1323 | if daemons is None: | |
1324 | return | |
1325 | coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) | |
1326 | ||
1327 | daemon_signal = 'kill' | |
1328 | if config.get('coverage') or config.get('valgrind') is not None: | |
1329 | daemon_signal = 'term' | |
1330 | ||
c07f9fc5 | 1331 | # create osds in order. (this only matters for pre-luminous, which might |
f91f0fd5 | 1332 | # be jewel/hammer, which doesn't take an id_ argument to legacy 'osd create'). |
c07f9fc5 | 1333 | osd_uuids = {} |
9f95a23c | 1334 | for remote, roles_for_host in daemons.remotes.items(): |
7c673cae FG |
1335 | is_type_ = teuthology.is_type(type_, cluster_name) |
1336 | for role in roles_for_host: | |
1337 | if not is_type_(role): | |
1338 | continue | |
1339 | _, _, id_ = teuthology.split_role(role) | |
1340 | ||
c07f9fc5 | 1341 | |
224ce89b WB |
1342 | if type_ == 'osd': |
1343 | datadir='/var/lib/ceph/osd/{cluster}-{id}'.format( | |
1344 | cluster=cluster_name, id=id_) | |
f67539c2 TL |
1345 | osd_uuid = remote.read_file( |
1346 | datadir + '/fsid', sudo=True).decode().strip() | |
c07f9fc5 FG |
1347 | osd_uuids[id_] = osd_uuid |
1348 | for osd_id in range(len(osd_uuids)): | |
1349 | id_ = str(osd_id) | |
1350 | osd_uuid = osd_uuids.get(id_) | |
1351 | try: | |
1352 | remote.run( | |
1353 | args=[ | |
1354 | 'sudo', 'ceph', '--cluster', cluster_name, | |
1355 | 'osd', 'new', osd_uuid, id_, | |
1356 | ] | |
1357 | ) | |
1358 | except: | |
f91f0fd5 | 1359 | # fallback to pre-luminous (jewel) |
c07f9fc5 FG |
1360 | remote.run( |
1361 | args=[ | |
1362 | 'sudo', 'ceph', '--cluster', cluster_name, | |
1363 | 'osd', 'create', osd_uuid, | |
1364 | ] | |
1365 | ) | |
1366 | if config.get('add_osds_to_crush'): | |
1367 | remote.run( | |
1368 | args=[ | |
1369 | 'sudo', 'ceph', '--cluster', cluster_name, | |
1370 | 'osd', 'crush', 'create-or-move', 'osd.' + id_, | |
1371 | '1.0', 'host=localhost', 'root=default', | |
1372 | ] | |
1373 | ) | |
1374 | ||
9f95a23c | 1375 | for remote, roles_for_host in daemons.remotes.items(): |
c07f9fc5 FG |
1376 | is_type_ = teuthology.is_type(type_, cluster_name) |
1377 | for role in roles_for_host: | |
1378 | if not is_type_(role): | |
1379 | continue | |
1380 | _, _, id_ = teuthology.split_role(role) | |
224ce89b | 1381 | |
7c673cae FG |
1382 | run_cmd = [ |
1383 | 'sudo', | |
1384 | 'adjust-ulimits', | |
1385 | 'ceph-coverage', | |
1386 | coverage_dir, | |
1387 | 'daemon-helper', | |
1388 | daemon_signal, | |
1389 | ] | |
1390 | run_cmd_tail = [ | |
1391 | 'ceph-%s' % (type_), | |
1392 | '-f', | |
1393 | '--cluster', cluster_name, | |
1394 | '-i', id_] | |
1395 | ||
1396 | if type_ in config.get('cpu_profile', []): | |
1397 | profile_path = '/var/log/ceph/profiling-logger/%s.prof' % (role) | |
1398 | run_cmd.extend(['env', 'CPUPROFILE=%s' % profile_path]) | |
1399 | ||
f67539c2 TL |
1400 | vc = config.get('valgrind') |
1401 | if vc is not None: | |
7c673cae | 1402 | valgrind_args = None |
f67539c2 TL |
1403 | if type_ in vc: |
1404 | valgrind_args = vc[type_] | |
1405 | if role in vc: | |
1406 | valgrind_args = vc[role] | |
1407 | exit_on_first_error = vc.get('exit_on_first_error', True) | |
1408 | run_cmd = get_valgrind_args(testdir, role, run_cmd, valgrind_args, | |
1409 | exit_on_first_error=exit_on_first_error) | |
7c673cae FG |
1410 | |
1411 | run_cmd.extend(run_cmd_tail) | |
f67539c2 TL |
1412 | log_path = f'/var/log/ceph/{cluster_name}-{type_}.{id_}.log' |
1413 | create_log_cmd, run_cmd = \ | |
1414 | maybe_redirect_stderr(config, type_, run_cmd, log_path) | |
1415 | if create_log_cmd: | |
1416 | remote.sh(create_log_cmd) | |
7c673cae FG |
1417 | # always register mgr; don't necessarily start |
1418 | ctx.daemons.register_daemon( | |
1419 | remote, type_, id_, | |
1420 | cluster=cluster_name, | |
1421 | args=run_cmd, | |
1422 | logger=log.getChild(role), | |
1423 | stdin=run.PIPE, | |
1424 | wait=False | |
1425 | ) | |
1426 | if type_ != 'mgr' or not config.get('skip_mgr_daemons', False): | |
1427 | role = cluster_name + '.' + type_ | |
1428 | ctx.daemons.get_daemon(type_, id_, cluster_name).restart() | |
1429 | ||
9f95a23c TL |
1430 | # kludge: run any pre-manager commands |
1431 | if type_ == 'mon': | |
1432 | for cmd in config.get('pre-mgr-commands', []): | |
1433 | firstmon = teuthology.get_first_mon(ctx, config, cluster_name) | |
1434 | (remote,) = ctx.cluster.only(firstmon).remotes.keys() | |
1435 | remote.run(args=cmd.split(' ')) | |
1436 | ||
7c673cae FG |
1437 | try: |
1438 | yield | |
1439 | finally: | |
1440 | teuthology.stop_daemons_of_type(ctx, type_, cluster_name) | |
1441 | ||
1442 | ||
1443 | def healthy(ctx, config): | |
1444 | """ | |
1445 | Wait for all osd's to be up, and for the ceph health monitor to return HEALTH_OK. | |
1446 | ||
1447 | :param ctx: Context | |
1448 | :param config: Configuration | |
1449 | """ | |
1450 | config = config if isinstance(config, dict) else dict() | |
1451 | cluster_name = config.get('cluster', 'ceph') | |
c07f9fc5 FG |
1452 | log.info('Waiting until %s daemons up and pgs clean...', cluster_name) |
1453 | manager = ctx.managers[cluster_name] | |
1454 | try: | |
d2e6a577 FG |
1455 | manager.wait_for_mgr_available(timeout=30) |
1456 | except (run.CommandFailedError, AssertionError) as e: | |
1457 | log.info('ignoring mgr wait error, probably testing upgrade: %s', e) | |
c07f9fc5 | 1458 | |
9f95a23c | 1459 | manager.wait_for_all_osds_up(timeout=300) |
c07f9fc5 FG |
1460 | |
1461 | try: | |
1462 | manager.flush_all_pg_stats() | |
d2e6a577 FG |
1463 | except (run.CommandFailedError, Exception) as e: |
1464 | log.info('ignoring flush pg stats error, probably testing upgrade: %s', e) | |
c07f9fc5 FG |
1465 | manager.wait_for_clean() |
1466 | ||
b32b8144 FG |
1467 | if config.get('wait-for-healthy', True): |
1468 | log.info('Waiting until ceph cluster %s is healthy...', cluster_name) | |
9f95a23c | 1469 | manager.wait_until_healthy(timeout=300) |
7c673cae FG |
1470 | |
1471 | if ctx.cluster.only(teuthology.is_type('mds', cluster_name)).remotes: | |
1472 | # Some MDSs exist, wait for them to be healthy | |
1473 | ceph_fs = Filesystem(ctx) # TODO: make Filesystem cluster-aware | |
1474 | ceph_fs.wait_for_daemons(timeout=300) | |
1475 | ||
1476 | ||
7c673cae FG |
1477 | def wait_for_mon_quorum(ctx, config): |
1478 | """ | |
1479 | Check renote ceph status until all monitors are up. | |
1480 | ||
1481 | :param ctx: Context | |
1482 | :param config: Configuration | |
1483 | """ | |
1484 | if isinstance(config, dict): | |
1485 | mons = config['daemons'] | |
1486 | cluster_name = config.get('cluster', 'ceph') | |
1487 | else: | |
1488 | assert isinstance(config, list) | |
1489 | mons = config | |
1490 | cluster_name = 'ceph' | |
1491 | firstmon = teuthology.get_first_mon(ctx, config, cluster_name) | |
1492 | (remote,) = ctx.cluster.only(firstmon).remotes.keys() | |
1493 | with contextutil.safe_while(sleep=10, tries=60, | |
1494 | action='wait for monitor quorum') as proceed: | |
1495 | while proceed(): | |
9f95a23c TL |
1496 | quorum_status = remote.sh('sudo ceph quorum_status', |
1497 | logger=log.getChild('quorum_status')) | |
1498 | j = json.loads(quorum_status) | |
7c673cae FG |
1499 | q = j.get('quorum_names', []) |
1500 | log.debug('Quorum: %s', q) | |
1501 | if sorted(q) == sorted(mons): | |
1502 | break | |
1503 | ||
1504 | ||
1505 | def created_pool(ctx, config): | |
1506 | """ | |
1507 | Add new pools to the dictionary of pools that the ceph-manager | |
1508 | knows about. | |
1509 | """ | |
1510 | for new_pool in config: | |
1511 | if new_pool not in ctx.managers['ceph'].pools: | |
9f95a23c | 1512 | ctx.managers['ceph'].pools[new_pool] = ctx.managers['ceph'].get_pool_int_property( |
7c673cae FG |
1513 | new_pool, 'pg_num') |
1514 | ||
1515 | ||
11fdf7f2 | 1516 | @contextlib.contextmanager |
9f95a23c | 1517 | def suppress_mon_health_to_clog(ctx, config): |
11fdf7f2 | 1518 | """ |
9f95a23c | 1519 | set the option, and then restore it with its original value |
11fdf7f2 TL |
1520 | |
1521 | Note, due to the way how tasks are executed/nested, it's not suggested to | |
1522 | use this method as a standalone task. otherwise, it's likely that it will | |
1523 | restore the tweaked option at the /end/ of 'tasks' block. | |
1524 | """ | |
9f95a23c | 1525 | if config.get('mon-health-to-clog', 'true') == 'false': |
9f95a23c TL |
1526 | cluster = config.get('cluster', 'ceph') |
1527 | manager = ctx.managers[cluster] | |
1528 | manager.raw_cluster_command( | |
1529 | 'config', 'set', 'mon', 'mon_health_to_clog', 'false' | |
1530 | ) | |
1531 | yield | |
1532 | manager.raw_cluster_command( | |
1533 | 'config', 'rm', 'mon', 'mon_health_to_clog' | |
1534 | ) | |
11fdf7f2 | 1535 | else: |
9f95a23c | 1536 | yield |
11fdf7f2 | 1537 | |
7c673cae FG |
1538 | @contextlib.contextmanager |
1539 | def restart(ctx, config): | |
1540 | """ | |
1541 | restart ceph daemons | |
1542 | ||
1543 | For example:: | |
1544 | tasks: | |
1545 | - ceph.restart: [all] | |
1546 | ||
1547 | For example:: | |
1548 | tasks: | |
1549 | - ceph.restart: [osd.0, mon.1, mds.*] | |
1550 | ||
1551 | or:: | |
1552 | ||
1553 | tasks: | |
1554 | - ceph.restart: | |
1555 | daemons: [osd.0, mon.1] | |
1556 | wait-for-healthy: false | |
1557 | wait-for-osds-up: true | |
1558 | ||
1559 | :param ctx: Context | |
1560 | :param config: Configuration | |
1561 | """ | |
1562 | if config is None: | |
1563 | config = {} | |
1564 | elif isinstance(config, list): | |
1565 | config = {'daemons': config} | |
1566 | ||
1567 | daemons = ctx.daemons.resolve_role_list(config.get('daemons', None), CEPH_ROLE_TYPES, True) | |
1568 | clusters = set() | |
7c673cae | 1569 | |
9f95a23c | 1570 | with suppress_mon_health_to_clog(ctx, config): |
11fdf7f2 TL |
1571 | for role in daemons: |
1572 | cluster, type_, id_ = teuthology.split_role(role) | |
9f95a23c TL |
1573 | ctx.daemons.get_daemon(type_, id_, cluster).stop() |
1574 | if type_ == 'osd': | |
1575 | ctx.managers[cluster].mark_down_osd(id_) | |
11fdf7f2 TL |
1576 | ctx.daemons.get_daemon(type_, id_, cluster).restart() |
1577 | clusters.add(cluster) | |
f67539c2 | 1578 | |
7c673cae FG |
1579 | if config.get('wait-for-healthy', True): |
1580 | for cluster in clusters: | |
1581 | healthy(ctx=ctx, config=dict(cluster=cluster)) | |
1582 | if config.get('wait-for-osds-up', False): | |
1583 | for cluster in clusters: | |
9f95a23c | 1584 | ctx.managers[cluster].wait_for_all_osds_up() |
7c673cae FG |
1585 | yield |
1586 | ||
1587 | ||
1588 | @contextlib.contextmanager | |
1589 | def stop(ctx, config): | |
1590 | """ | |
1591 | Stop ceph daemons | |
1592 | ||
1593 | For example:: | |
1594 | tasks: | |
1595 | - ceph.stop: [mds.*] | |
1596 | ||
1597 | tasks: | |
1598 | - ceph.stop: [osd.0, osd.2] | |
1599 | ||
1600 | tasks: | |
1601 | - ceph.stop: | |
1602 | daemons: [osd.0, osd.2] | |
1603 | ||
1604 | """ | |
1605 | if config is None: | |
1606 | config = {} | |
1607 | elif isinstance(config, list): | |
1608 | config = {'daemons': config} | |
1609 | ||
1610 | daemons = ctx.daemons.resolve_role_list(config.get('daemons', None), CEPH_ROLE_TYPES, True) | |
9f95a23c TL |
1611 | clusters = set() |
1612 | ||
7c673cae FG |
1613 | for role in daemons: |
1614 | cluster, type_, id_ = teuthology.split_role(role) | |
1615 | ctx.daemons.get_daemon(type_, id_, cluster).stop() | |
9f95a23c TL |
1616 | clusters.add(cluster) |
1617 | ||
1618 | ||
1619 | for cluster in clusters: | |
1620 | ctx.ceph[cluster].watchdog.stop() | |
1621 | ctx.ceph[cluster].watchdog.join() | |
7c673cae FG |
1622 | |
1623 | yield | |
1624 | ||
1625 | ||
1626 | @contextlib.contextmanager | |
1627 | def wait_for_failure(ctx, config): | |
1628 | """ | |
1629 | Wait for a failure of a ceph daemon | |
1630 | ||
1631 | For example:: | |
1632 | tasks: | |
1633 | - ceph.wait_for_failure: [mds.*] | |
1634 | ||
1635 | tasks: | |
1636 | - ceph.wait_for_failure: [osd.0, osd.2] | |
1637 | ||
1638 | tasks: | |
1639 | - ceph.wait_for_failure: | |
1640 | daemons: [osd.0, osd.2] | |
1641 | ||
1642 | """ | |
1643 | if config is None: | |
1644 | config = {} | |
1645 | elif isinstance(config, list): | |
1646 | config = {'daemons': config} | |
1647 | ||
1648 | daemons = ctx.daemons.resolve_role_list(config.get('daemons', None), CEPH_ROLE_TYPES, True) | |
1649 | for role in daemons: | |
1650 | cluster, type_, id_ = teuthology.split_role(role) | |
1651 | try: | |
1652 | ctx.daemons.get_daemon(type_, id_, cluster).wait() | |
1653 | except: | |
1654 | log.info('Saw expected daemon failure. Continuing.') | |
1655 | pass | |
1656 | else: | |
1657 | raise RuntimeError('daemon %s did not fail' % role) | |
1658 | ||
1659 | yield | |
1660 | ||
1661 | ||
1662 | def validate_config(ctx, config): | |
1663 | """ | |
1664 | Perform some simple validation on task configuration. | |
1665 | Raises exceptions.ConfigError if an error is found. | |
1666 | """ | |
1667 | # check for osds from multiple clusters on the same host | |
1668 | for remote, roles_for_host in ctx.cluster.remotes.items(): | |
1669 | last_cluster = None | |
1670 | last_role = None | |
1671 | for role in roles_for_host: | |
1672 | role_cluster, role_type, _ = teuthology.split_role(role) | |
1673 | if role_type != 'osd': | |
1674 | continue | |
1675 | if last_cluster and last_cluster != role_cluster: | |
1676 | msg = "Host should not have osds (%s and %s) from multiple clusters" % ( | |
1677 | last_role, role) | |
1678 | raise exceptions.ConfigError(msg) | |
1679 | last_cluster = role_cluster | |
1680 | last_role = role | |
1681 | ||
1682 | ||
1683 | @contextlib.contextmanager | |
1684 | def task(ctx, config): | |
1685 | """ | |
1686 | Set up and tear down a Ceph cluster. | |
1687 | ||
1688 | For example:: | |
1689 | ||
1690 | tasks: | |
1691 | - ceph: | |
1692 | - interactive: | |
1693 | ||
1694 | You can also specify what branch to run:: | |
1695 | ||
1696 | tasks: | |
1697 | - ceph: | |
1698 | branch: foo | |
1699 | ||
1700 | Or a tag:: | |
1701 | ||
1702 | tasks: | |
1703 | - ceph: | |
1704 | tag: v0.42.13 | |
1705 | ||
1706 | Or a sha1:: | |
1707 | ||
1708 | tasks: | |
1709 | - ceph: | |
1710 | sha1: 1376a5ab0c89780eab39ffbbe436f6a6092314ed | |
1711 | ||
1712 | Or a local source dir:: | |
1713 | ||
1714 | tasks: | |
1715 | - ceph: | |
1716 | path: /home/sage/ceph | |
1717 | ||
1718 | To capture code coverage data, use:: | |
1719 | ||
1720 | tasks: | |
1721 | - ceph: | |
1722 | coverage: true | |
1723 | ||
1724 | To use btrfs, ext4, or xfs on the target's scratch disks, use:: | |
1725 | ||
1726 | tasks: | |
1727 | - ceph: | |
1728 | fs: xfs | |
1729 | mkfs_options: [-b,size=65536,-l,logdev=/dev/sdc1] | |
1730 | mount_options: [nobarrier, inode64] | |
1731 | ||
f91f0fd5 TL |
1732 | To change the cephfs's default max_mds (1), use:: |
1733 | ||
1734 | tasks: | |
1735 | - ceph: | |
1736 | cephfs: | |
1737 | max_mds: 2 | |
1738 | ||
f67539c2 TL |
1739 | To change the max_mds of a specific filesystem, use:: |
1740 | ||
1741 | tasks: | |
1742 | - ceph: | |
1743 | cephfs: | |
1744 | max_mds: 2 | |
1745 | fs: | |
1746 | - name: a | |
1747 | max_mds: 3 | |
1748 | - name: b | |
1749 | ||
1750 | In the above example, filesystem 'a' will have 'max_mds' 3, | |
1751 | and filesystme 'b' will have 'max_mds' 2. | |
1752 | ||
f91f0fd5 TL |
1753 | To change the mdsmap's default session_timeout (60 seconds), use:: |
1754 | ||
1755 | tasks: | |
1756 | - ceph: | |
1757 | cephfs: | |
1758 | session_timeout: 300 | |
1759 | ||
7c673cae FG |
1760 | Note, this will cause the task to check the /scratch_devs file on each node |
1761 | for available devices. If no such file is found, /dev/sdb will be used. | |
1762 | ||
1763 | To run some daemons under valgrind, include their names | |
1764 | and the tool/args to use in a valgrind section:: | |
1765 | ||
1766 | tasks: | |
1767 | - ceph: | |
1768 | valgrind: | |
1769 | mds.1: --tool=memcheck | |
1770 | osd.1: [--tool=memcheck, --leak-check=no] | |
1771 | ||
1772 | Those nodes which are using memcheck or valgrind will get | |
1773 | checked for bad results. | |
1774 | ||
1775 | To adjust or modify config options, use:: | |
1776 | ||
1777 | tasks: | |
1778 | - ceph: | |
1779 | conf: | |
1780 | section: | |
1781 | key: value | |
1782 | ||
1783 | For example:: | |
1784 | ||
1785 | tasks: | |
1786 | - ceph: | |
1787 | conf: | |
1788 | mds.0: | |
1789 | some option: value | |
1790 | other key: other value | |
1791 | client.0: | |
1792 | debug client: 10 | |
1793 | debug ms: 1 | |
1794 | ||
1795 | By default, the cluster log is checked for errors and warnings, | |
1796 | and the run marked failed if any appear. You can ignore log | |
1797 | entries by giving a list of egrep compatible regexes, i.e.: | |
1798 | ||
1799 | tasks: | |
1800 | - ceph: | |
cd265ab1 | 1801 | log-ignorelist: ['foo.*bar', 'bad message'] |
7c673cae FG |
1802 | |
1803 | To run multiple ceph clusters, use multiple ceph tasks, and roles | |
1804 | with a cluster name prefix, e.g. cluster1.client.0. Roles with no | |
1805 | cluster use the default cluster name, 'ceph'. OSDs from separate | |
1806 | clusters must be on separate hosts. Clients and non-osd daemons | |
1807 | from multiple clusters may be colocated. For each cluster, add an | |
1808 | instance of the ceph task with the cluster name specified, e.g.:: | |
1809 | ||
1810 | roles: | |
1811 | - [mon.a, osd.0, osd.1] | |
1812 | - [backup.mon.a, backup.osd.0, backup.osd.1] | |
1813 | - [client.0, backup.client.0] | |
1814 | tasks: | |
1815 | - ceph: | |
1816 | cluster: ceph | |
1817 | - ceph: | |
1818 | cluster: backup | |
1819 | ||
1820 | :param ctx: Context | |
1821 | :param config: Configuration | |
1822 | ||
1823 | """ | |
1824 | if config is None: | |
1825 | config = {} | |
1826 | assert isinstance(config, dict), \ | |
1827 | "task ceph only supports a dictionary for configuration" | |
1828 | ||
1829 | overrides = ctx.config.get('overrides', {}) | |
1830 | teuthology.deep_merge(config, overrides.get('ceph', {})) | |
1831 | ||
1832 | first_ceph_cluster = False | |
1833 | if not hasattr(ctx, 'daemons'): | |
1834 | first_ceph_cluster = True | |
1835 | ctx.daemons = DaemonGroup() | |
1836 | ||
1837 | testdir = teuthology.get_testdir(ctx) | |
1838 | if config.get('coverage'): | |
1839 | coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) | |
1840 | log.info('Creating coverage directory...') | |
1841 | run.wait( | |
1842 | ctx.cluster.run( | |
1843 | args=[ | |
1844 | 'install', '-d', '-m0755', '--', | |
1845 | coverage_dir, | |
1846 | ], | |
1847 | wait=False, | |
1848 | ) | |
1849 | ) | |
1850 | ||
1851 | if 'cluster' not in config: | |
1852 | config['cluster'] = 'ceph' | |
1853 | ||
1854 | validate_config(ctx, config) | |
1855 | ||
1856 | subtasks = [] | |
1857 | if first_ceph_cluster: | |
1858 | # these tasks handle general log setup and parsing on all hosts, | |
1859 | # so they should only be run once | |
1860 | subtasks = [ | |
1861 | lambda: ceph_log(ctx=ctx, config=None), | |
11fdf7f2 | 1862 | lambda: ceph_crash(ctx=ctx, config=None), |
7c673cae FG |
1863 | lambda: valgrind_post(ctx=ctx, config=config), |
1864 | ] | |
1865 | ||
1866 | subtasks += [ | |
1867 | lambda: cluster(ctx=ctx, config=dict( | |
1868 | conf=config.get('conf', {}), | |
1869 | fs=config.get('fs', 'xfs'), | |
1870 | mkfs_options=config.get('mkfs_options', None), | |
1871 | mount_options=config.get('mount_options', None), | |
7c673cae | 1872 | skip_mgr_daemons=config.get('skip_mgr_daemons', False), |
cd265ab1 | 1873 | log_ignorelist=config.get('log-ignorelist', []), |
7c673cae FG |
1874 | cpu_profile=set(config.get('cpu_profile', []),), |
1875 | cluster=config['cluster'], | |
11fdf7f2 TL |
1876 | mon_bind_msgr2=config.get('mon_bind_msgr2', True), |
1877 | mon_bind_addrvec=config.get('mon_bind_addrvec', True), | |
7c673cae FG |
1878 | )), |
1879 | lambda: run_daemon(ctx=ctx, config=config, type_='mon'), | |
1880 | lambda: run_daemon(ctx=ctx, config=config, type_='mgr'), | |
1881 | lambda: crush_setup(ctx=ctx, config=config), | |
1882 | lambda: run_daemon(ctx=ctx, config=config, type_='osd'), | |
a4b75251 | 1883 | lambda: setup_manager(ctx=ctx, config=config), |
224ce89b | 1884 | lambda: create_rbd_pool(ctx=ctx, config=config), |
7c673cae | 1885 | lambda: run_daemon(ctx=ctx, config=config, type_='mds'), |
f91f0fd5 | 1886 | lambda: cephfs_setup(ctx=ctx, config=config), |
9f95a23c | 1887 | lambda: watchdog_setup(ctx=ctx, config=config), |
7c673cae FG |
1888 | ] |
1889 | ||
1890 | with contextutil.nested(*subtasks): | |
7c673cae FG |
1891 | try: |
1892 | if config.get('wait-for-healthy', True): | |
1893 | healthy(ctx=ctx, config=dict(cluster=config['cluster'])) | |
1894 | ||
1895 | yield | |
1896 | finally: | |
11fdf7f2 TL |
1897 | # set pg_num_targets back to actual pg_num, so we don't have to |
1898 | # wait for pending merges (which can take a while!) | |
1899 | ctx.managers[config['cluster']].stop_pg_num_changes() | |
1900 | ||
7c673cae | 1901 | if config.get('wait-for-scrub', True): |
f67539c2 TL |
1902 | # wait for pgs to become active+clean in case any |
1903 | # recoveries were triggered since the last health check | |
1904 | ctx.managers[config['cluster']].wait_for_clean() | |
7c673cae | 1905 | osd_scrub_pgs(ctx, config) |
224ce89b WB |
1906 | |
1907 | # stop logging health to clog during shutdown, or else we generate | |
1908 | # a bunch of scary messages unrelated to our actual run. | |
1909 | firstmon = teuthology.get_first_mon(ctx, config, config['cluster']) | |
1910 | (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() | |
1911 | mon0_remote.run( | |
1912 | args=[ | |
1913 | 'sudo', | |
1914 | 'ceph', | |
1915 | '--cluster', config['cluster'], | |
9f95a23c TL |
1916 | 'config', 'set', 'global', |
1917 | 'mon_health_to_clog', 'false', | |
1918 | ], | |
1919 | check_status=False, | |
224ce89b | 1920 | ) |