]> git.proxmox.com Git - ceph.git/blame - ceph/src/pybind/mgr/orchestrator/module.py
import 15.2.4
[ceph.git] / ceph / src / pybind / mgr / orchestrator / module.py
CommitLineData
9f95a23c
TL
1import datetime
2import errno
3import json
e306af50 4from typing import List, Set, Optional, Iterator, cast
1911f103 5import re
e306af50 6import ast
9f95a23c 7
1911f103 8import yaml
9f95a23c 9import six
9f95a23c
TL
10from prettytable import PrettyTable
11
1911f103 12from ceph.deployment.inventory import Device
9f95a23c 13from ceph.deployment.drive_group import DriveGroupSpec, DeviceSelection
9f95a23c 14from ceph.deployment.service_spec import PlacementSpec, ServiceSpec
1911f103
TL
15
16from mgr_util import format_bytes, to_pretty_timedelta
9f95a23c
TL
17from mgr_module import MgrModule, HandleCommandResult
18
19from ._interface import OrchestratorClientMixin, DeviceLightLoc, _cli_read_command, \
20 raise_if_exception, _cli_write_command, TrivialReadCompletion, OrchestratorError, \
21 NoOrchestrator, OrchestratorValidationError, NFSServiceSpec, \
1911f103 22 RGWSpec, InventoryFilter, InventoryHost, HostSpec, CLICommandMeta, \
e306af50 23 ServiceDescription, DaemonDescription, IscsiServiceSpec, json_to_generic_spec, GenericSpec
1911f103 24
9f95a23c
TL
25
26def nice_delta(now, t, suffix=''):
27 if t:
28 return to_pretty_timedelta(now - t) + suffix
29 else:
30 return '-'
31
1911f103
TL
32
33def to_format(what, format):
34 if format == 'json':
35 return json.dumps(what, sort_keys=True)
36 elif format == 'json-pretty':
37 return json.dumps(what, indent=2, sort_keys=True)
38 elif format == 'yaml':
39 return yaml.safe_dump_all(what, default_flow_style=False)
40
41
9f95a23c
TL
42@six.add_metaclass(CLICommandMeta)
43class OrchestratorCli(OrchestratorClientMixin, MgrModule):
44 MODULE_OPTIONS = [
45 {
46 'name': 'orchestrator',
47 'type': 'str',
48 'default': None,
49 'desc': 'Orchestrator backend',
50 'enum_allowed': ['cephadm', 'rook',
51 'test_orchestrator'],
52 'runtime': True,
53 },
54 ]
55 NATIVE_OPTIONS = [] # type: List[dict]
56
57 def __init__(self, *args, **kwargs):
58 super(OrchestratorCli, self).__init__(*args, **kwargs)
59 self.ident = set() # type: Set[str]
60 self.fault = set() # type: Set[str]
61 self._load()
62 self._refresh_health()
63
64 def _load(self):
65 active = self.get_store('active_devices')
66 if active:
67 decoded = json.loads(active)
68 self.ident = set(decoded.get('ident', []))
69 self.fault = set(decoded.get('fault', []))
70 self.log.debug('ident {}, fault {}'.format(self.ident, self.fault))
71
72 def _save(self):
73 encoded = json.dumps({
74 'ident': list(self.ident),
75 'fault': list(self.fault),
76 })
77 self.set_store('active_devices', encoded)
78
79 def _refresh_health(self):
80 h = {}
81 if self.ident:
82 h['DEVICE_IDENT_ON'] = {
83 'severity': 'warning',
84 'summary': '%d devices have ident light turned on' % len(
85 self.ident),
86 'detail': ['{} ident light enabled'.format(d) for d in self.ident]
87 }
88 if self.fault:
89 h['DEVICE_FAULT_ON'] = {
90 'severity': 'warning',
91 'summary': '%d devices have fault light turned on' % len(
92 self.fault),
93 'detail': ['{} fault light enabled'.format(d) for d in self.ident]
94 }
95 self.set_health_checks(h)
96
97 def _get_device_locations(self, dev_id):
98 # type: (str) -> List[DeviceLightLoc]
99 locs = [d['location'] for d in self.get('devices')['devices'] if d['devid'] == dev_id]
100 return [DeviceLightLoc(**l) for l in sum(locs, [])]
101
102 @_cli_read_command(
103 prefix='device ls-lights',
104 desc='List currently active device indicator lights')
105 def _device_ls(self):
106 return HandleCommandResult(
107 stdout=json.dumps({
108 'ident': list(self.ident),
109 'fault': list(self.fault)
110 }, indent=4, sort_keys=True))
111
112 def light_on(self, fault_ident, devid):
113 # type: (str, str) -> HandleCommandResult
114 assert fault_ident in ("fault", "ident")
115 locs = self._get_device_locations(devid)
116 if locs is None:
117 return HandleCommandResult(stderr='device {} not found'.format(devid),
118 retval=-errno.ENOENT)
119
120 getattr(self, fault_ident).add(devid)
121 self._save()
122 self._refresh_health()
123 completion = self.blink_device_light(fault_ident, True, locs)
124 self._orchestrator_wait([completion])
125 return HandleCommandResult(stdout=str(completion.result))
126
127 def light_off(self, fault_ident, devid, force):
128 # type: (str, str, bool) -> HandleCommandResult
129 assert fault_ident in ("fault", "ident")
130 locs = self._get_device_locations(devid)
131 if locs is None:
132 return HandleCommandResult(stderr='device {} not found'.format(devid),
133 retval=-errno.ENOENT)
134
135 try:
136 completion = self.blink_device_light(fault_ident, False, locs)
137 self._orchestrator_wait([completion])
138
139 if devid in getattr(self, fault_ident):
140 getattr(self, fault_ident).remove(devid)
141 self._save()
142 self._refresh_health()
143 return HandleCommandResult(stdout=str(completion.result))
144
145 except:
146 # There are several reasons the try: block might fail:
147 # 1. the device no longer exist
148 # 2. the device is no longer known to Ceph
149 # 3. the host is not reachable
150 if force and devid in getattr(self, fault_ident):
151 getattr(self, fault_ident).remove(devid)
152 self._save()
153 self._refresh_health()
154 raise
155
156 @_cli_write_command(
157 prefix='device light',
158 cmd_args='name=enable,type=CephChoices,strings=on|off '
159 'name=devid,type=CephString '
160 'name=light_type,type=CephChoices,strings=ident|fault,req=false '
161 'name=force,type=CephBool,req=false',
162 desc='Enable or disable the device light. Default type is `ident`\n'
163 'Usage: device light (on|off) <devid> [ident|fault] [--force]')
164 def _device_light(self, enable, devid, light_type=None, force=False):
165 # type: (str, str, Optional[str], bool) -> HandleCommandResult
166 light_type = light_type or 'ident'
167 on = enable == 'on'
168 if on:
169 return self.light_on(light_type, devid)
170 else:
171 return self.light_off(light_type, devid, force)
172
173 def _select_orchestrator(self):
174 return self.get_module_option("orchestrator")
175
176 @_cli_write_command(
177 'orch host add',
178 'name=hostname,type=CephString,req=true '
179 'name=addr,type=CephString,req=false '
180 'name=labels,type=CephString,n=N,req=false',
181 'Add a host')
182 def _add_host(self, hostname:str, addr: Optional[str]=None, labels: Optional[List[str]]=None):
183 s = HostSpec(hostname=hostname, addr=addr, labels=labels)
184 completion = self.add_host(s)
185 self._orchestrator_wait([completion])
186 raise_if_exception(completion)
187 return HandleCommandResult(stdout=completion.result_str())
188
189 @_cli_write_command(
190 'orch host rm',
191 "name=hostname,type=CephString,req=true",
192 'Remove a host')
193 def _remove_host(self, hostname):
194 completion = self.remove_host(hostname)
195 self._orchestrator_wait([completion])
196 raise_if_exception(completion)
197 return HandleCommandResult(stdout=completion.result_str())
198
199 @_cli_write_command(
200 'orch host set-addr',
201 'name=hostname,type=CephString '
202 'name=addr,type=CephString',
203 'Update a host address')
204 def _update_set_addr(self, hostname, addr):
205 completion = self.update_host_addr(hostname, addr)
206 self._orchestrator_wait([completion])
207 raise_if_exception(completion)
208 return HandleCommandResult(stdout=completion.result_str())
209
210 @_cli_read_command(
211 'orch host ls',
1911f103 212 'name=format,type=CephChoices,strings=plain|json|json-pretty|yaml,req=false',
9f95a23c
TL
213 'List hosts')
214 def _get_hosts(self, format='plain'):
215 completion = self.get_hosts()
216 self._orchestrator_wait([completion])
217 raise_if_exception(completion)
1911f103 218 if format != 'plain':
9f95a23c
TL
219 hosts = [host.to_json()
220 for host in completion.result]
1911f103 221 output = to_format(hosts, format)
9f95a23c
TL
222 else:
223 table = PrettyTable(
224 ['HOST', 'ADDR', 'LABELS', 'STATUS'],
225 border=False)
226 table.align = 'l'
227 table.left_padding_width = 0
228 table.right_padding_width = 2
229 for host in sorted(completion.result, key=lambda h: h.hostname):
230 table.add_row((host.hostname, host.addr, ' '.join(host.labels), host.status))
231 output = table.get_string()
232 return HandleCommandResult(stdout=output)
233
234 @_cli_write_command(
235 'orch host label add',
236 'name=hostname,type=CephString '
237 'name=label,type=CephString',
238 'Add a host label')
239 def _host_label_add(self, hostname, label):
240 completion = self.add_host_label(hostname, label)
241 self._orchestrator_wait([completion])
242 raise_if_exception(completion)
243 return HandleCommandResult(stdout=completion.result_str())
244
245 @_cli_write_command(
246 'orch host label rm',
247 'name=hostname,type=CephString '
248 'name=label,type=CephString',
249 'Remove a host label')
250 def _host_label_rm(self, hostname, label):
251 completion = self.remove_host_label(hostname, label)
252 self._orchestrator_wait([completion])
253 raise_if_exception(completion)
254 return HandleCommandResult(stdout=completion.result_str())
255
256 @_cli_read_command(
257 'orch device ls',
258 "name=hostname,type=CephString,n=N,req=false "
1911f103 259 "name=format,type=CephChoices,strings=plain|json|json-pretty|yaml,req=false "
9f95a23c
TL
260 "name=refresh,type=CephBool,req=false",
261 'List devices on a host')
262 def _list_devices(self, hostname=None, format='plain', refresh=False):
263 # type: (Optional[List[str]], str, bool) -> HandleCommandResult
264 """
265 Provide information about storage devices present in cluster hosts
266
267 Note: this does not have to be completely synchronous. Slightly out of
268 date hardware inventory is fine as long as hardware ultimately appears
269 in the output of this command.
270 """
271 nf = InventoryFilter(hosts=hostname) if hostname else None
272
273 completion = self.get_inventory(host_filter=nf, refresh=refresh)
274
275 self._orchestrator_wait([completion])
276 raise_if_exception(completion)
277
1911f103 278 if format != 'plain':
9f95a23c 279 data = [n.to_json() for n in completion.result]
1911f103 280 return HandleCommandResult(stdout=to_format(data, format))
9f95a23c
TL
281 else:
282 out = []
283
284 table = PrettyTable(
285 ['HOST', 'PATH', 'TYPE', 'SIZE', 'DEVICE', 'AVAIL',
286 'REJECT REASONS'],
287 border=False)
288 table.align = 'l'
289 table._align['SIZE'] = 'r'
290 table.left_padding_width = 0
291 table.right_padding_width = 2
292 for host_ in completion.result: # type: InventoryHost
293 for d in host_.devices.devices: # type: Device
294 table.add_row(
295 (
296 host_.name,
297 d.path,
298 d.human_readable_type,
299 format_bytes(d.sys_api.get('size', 0), 5),
300 d.device_id,
301 d.available,
302 ', '.join(d.rejected_reasons)
303 )
304 )
305 out.append(table.get_string())
306 return HandleCommandResult(stdout='\n'.join(out))
307
308 @_cli_write_command(
309 'orch device zap',
310 'name=hostname,type=CephString '
311 'name=path,type=CephString '
312 'name=force,type=CephBool,req=false',
313 'Zap (erase!) a device so it can be re-used')
314 def _zap_device(self, hostname, path, force=False):
315 if not force:
316 raise OrchestratorError('must pass --force to PERMANENTLY ERASE DEVICE DATA')
317 completion = self.zap_device(hostname, path)
318 self._orchestrator_wait([completion])
319 raise_if_exception(completion)
320 return HandleCommandResult(stdout=completion.result_str())
321
322 @_cli_read_command(
323 'orch ls',
324 "name=service_type,type=CephString,req=false "
325 "name=service_name,type=CephString,req=false "
1911f103
TL
326 "name=export,type=CephBool,req=false "
327 "name=format,type=CephChoices,strings=plain|json|json-pretty|yaml,req=false "
9f95a23c
TL
328 "name=refresh,type=CephBool,req=false",
329 'List services known to orchestrator')
1911f103
TL
330 def _list_services(self, host=None, service_type=None, service_name=None, export=False, format='plain', refresh=False):
331
332 if export and format == 'plain':
333 format = 'yaml'
334
9f95a23c
TL
335 completion = self.describe_service(service_type,
336 service_name,
337 refresh=refresh)
338 self._orchestrator_wait([completion])
339 raise_if_exception(completion)
1911f103 340 services: List[ServiceDescription] = completion.result
9f95a23c
TL
341
342 def ukn(s):
343 return '<unknown>' if s is None else s
344
345 # Sort the list for display
1911f103 346 services.sort(key=lambda s: (ukn(s.spec.service_name())))
9f95a23c
TL
347
348 if len(services) == 0:
349 return HandleCommandResult(stdout="No services reported")
1911f103
TL
350 elif format != 'plain':
351 if export:
352 data = [s.spec.to_json() for s in services]
353 else:
354 data = [s.to_json() for s in services]
355 return HandleCommandResult(stdout=to_format(data, format))
9f95a23c
TL
356 else:
357 now = datetime.datetime.utcnow()
358 table = PrettyTable(
359 ['NAME', 'RUNNING', 'REFRESHED', 'AGE',
360 'PLACEMENT',
361 'IMAGE NAME', 'IMAGE ID',
362 ],
363 border=False)
364 table.align['NAME'] = 'l'
365 table.align['RUNNING'] = 'r'
366 table.align['REFRESHED'] = 'l'
367 table.align['AGE'] = 'l'
368 table.align['IMAGE NAME'] = 'l'
369 table.align['IMAGE ID'] = 'l'
370 table.align['PLACEMENT'] = 'l'
371 table.left_padding_width = 0
372 table.right_padding_width = 2
1911f103 373 for s in services:
9f95a23c
TL
374 if not s.spec:
375 pl = '<no spec>'
376 elif s.spec.unmanaged:
377 pl = '<unmanaged>'
378 else:
379 pl = s.spec.placement.pretty_str()
380 table.add_row((
1911f103 381 s.spec.service_name(),
9f95a23c
TL
382 '%d/%d' % (s.running, s.size),
383 nice_delta(now, s.last_refresh, ' ago'),
384 nice_delta(now, s.created),
385 pl,
386 ukn(s.container_image_name),
387 ukn(s.container_image_id)[0:12],
388 ))
389
390 return HandleCommandResult(stdout=table.get_string())
391
392 @_cli_read_command(
393 'orch ps',
394 "name=hostname,type=CephString,req=false "
801d1391 395 "name=service_name,type=CephString,req=false "
9f95a23c
TL
396 "name=daemon_type,type=CephString,req=false "
397 "name=daemon_id,type=CephString,req=false "
1911f103 398 "name=format,type=CephChoices,strings=plain|json|json-pretty|yaml,req=false "
9f95a23c
TL
399 "name=refresh,type=CephBool,req=false",
400 'List daemons known to orchestrator')
801d1391
TL
401 def _list_daemons(self, hostname=None, service_name=None, daemon_type=None, daemon_id=None, format='plain', refresh=False):
402 completion = self.list_daemons(service_name,
403 daemon_type,
9f95a23c
TL
404 daemon_id=daemon_id,
405 host=hostname,
406 refresh=refresh)
407 self._orchestrator_wait([completion])
408 raise_if_exception(completion)
1911f103 409 daemons: List[DaemonDescription] = completion.result
9f95a23c
TL
410
411 def ukn(s):
412 return '<unknown>' if s is None else s
413 # Sort the list for display
414 daemons.sort(key=lambda s: (ukn(s.daemon_type), ukn(s.hostname), ukn(s.daemon_id)))
415
416 if len(daemons) == 0:
417 return HandleCommandResult(stdout="No daemons reported")
1911f103 418 elif format != 'plain':
9f95a23c 419 data = [s.to_json() for s in daemons]
1911f103 420 return HandleCommandResult(stdout=to_format(data, format))
9f95a23c
TL
421 else:
422 now = datetime.datetime.utcnow()
423 table = PrettyTable(
424 ['NAME', 'HOST', 'STATUS', 'REFRESHED', 'AGE',
425 'VERSION', 'IMAGE NAME', 'IMAGE ID', 'CONTAINER ID'],
426 border=False)
427 table.align = 'l'
428 table.left_padding_width = 0
429 table.right_padding_width = 2
430 for s in sorted(daemons, key=lambda s: s.name()):
1911f103
TL
431 if s.status_desc:
432 status = s.status_desc
433 else:
434 status = {
435 -1: 'error',
436 0: 'stopped',
437 1: 'running',
438 None: '<unknown>'
439 }[s.status]
9f95a23c
TL
440 if s.status == 1 and s.started:
441 status += ' (%s)' % to_pretty_timedelta(now - s.started)
442
443 table.add_row((
444 s.name(),
445 ukn(s.hostname),
446 status,
447 nice_delta(now, s.last_refresh, ' ago'),
448 nice_delta(now, s.created),
449 ukn(s.version),
450 ukn(s.container_image_name),
451 ukn(s.container_image_id)[0:12],
452 ukn(s.container_id)))
453
454 return HandleCommandResult(stdout=table.get_string())
455
e306af50
TL
456 def set_unmanaged_flag(self,
457 unmanaged_flag: bool,
458 service_type: str = 'osd',
459 service_name=None
460 ) -> HandleCommandResult:
1911f103 461 # setting unmanaged for $service_name
e306af50 462 completion = self.describe_service(service_name=service_name, service_type=service_type)
1911f103
TL
463 self._orchestrator_wait([completion])
464 raise_if_exception(completion)
465 services: List[ServiceDescription] = completion.result
466 specs = list()
467 for service in services:
468 spec = service.spec
469 spec.unmanaged = unmanaged_flag
470 specs.append(spec)
e306af50 471 completion = self.apply(cast(List[GenericSpec], specs))
1911f103
TL
472 self._orchestrator_wait([completion])
473 raise_if_exception(completion)
474 if specs:
475 return HandleCommandResult(stdout=f"Changed <unmanaged> flag to <{unmanaged_flag}> for "
476 f"{[spec.service_name() for spec in specs]}")
477 else:
478 return HandleCommandResult(stdout=f"No specs found with the <service_name> -> {service_name}")
479
9f95a23c 480 @_cli_write_command(
e306af50 481 'orch osd spec',
1911f103 482 'name=service_name,type=CephString,req=false '
e306af50 483 'name=preview,type=CephBool,req=false '
1911f103
TL
484 'name=unmanaged,type=CephBool,req=false '
485 "name=format,type=CephChoices,strings=plain|json|json-pretty|yaml,req=false",
e306af50
TL
486 'Common operations on an OSDSpec. Allows previewing and changing the unmanaged flag.')
487 def _misc_osd(self,
488 preview: bool = False,
489 service_name: Optional[str] = None,
490 unmanaged=None,
491 format: Optional[str] = 'plain',
492 ) -> HandleCommandResult:
9f95a23c 493 usage = """
e306af50
TL
494usage:
495 ceph orch osd spec --preview
496 ceph orch osd spec --unmanaged=true|false
497 ceph orch osd spec --service-name <service_name> --preview
498 ceph orch osd spec --service-name <service_name> --unmanaged=true|false (defaults to false)
499
500Restrictions:
501
502 Mutexes:
503 * --preview ,--unmanaged
504
505 Although it it's possible to set these at the same time, we will lack a proper response to each
506 action, possibly shadowing any failures.
507
508Description:
509
510 * --service-name
511 If flag is omitted, assume to target all existing OSDSpecs.
512 Needs either --unamanged or --preview.
513
514 * --unmanaged
515 Applies <unamanged> flag to targeted --service-name.
516 If --service-name is omitted, target all OSDSpecs
517
518Examples:
519
520 # ceph orch osd spec --preview
521
522 Queries all available OSDSpecs for previews
523
524 # ceph orch osd spec --service-name my-osdspec-name --preview
525
526 Queries only the specified <my-osdspec-name> for previews
527
528 # ceph orch osd spec --unmanaged=true
529
530 # Changes flags of all available OSDSpecs to true
531
532 # ceph orch osd spec --service-name my-osdspec-name --unmanaged=true
533
534 Changes the unmanaged flag of <my-osdspec-name> to true
9f95a23c 535"""
1911f103 536
e306af50 537 def print_preview(previews, format_to):
1911f103 538 if format != 'plain':
e306af50 539 return to_format(previews, format_to)
1911f103
TL
540 else:
541 table = PrettyTable(
542 ['NAME', 'HOST', 'DATA', 'DB', 'WAL'],
543 border=False)
544 table.align = 'l'
545 table.left_padding_width = 0
546 table.right_padding_width = 1
e306af50
TL
547 for host, data in previews.items():
548 for spec in data:
549 if spec.get('error'):
550 return spec.get('message')
551 dg_name = spec.get('osdspec')
552 for osd in spec.get('data', {}).get('osds', []):
553 db_path = '-'
554 wal_path = '-'
555 block_db = osd.get('block.db', {}).get('path')
556 block_wal = osd.get('block.wal', {}).get('path')
557 block_data = osd.get('data', {}).get('path', '')
558 if not block_data:
559 continue
560 if block_db:
561 db_path = spec.get('data', {}).get('vg', {}).get('devices', [])
562 if block_wal:
563 wal_path = spec.get('data', {}).get('wal_vg', {}).get('devices', [])
564 table.add_row((dg_name, host, block_data, db_path, wal_path))
565 ret = table.get_string()
566 if not ret:
567 ret = "No preview available"
568 return ret
569
570 if preview and (unmanaged is not None):
1911f103
TL
571 return HandleCommandResult(-errno.EINVAL, stderr=usage)
572
e306af50
TL
573 if service_name:
574 if preview:
575 completion = self.preview_osdspecs(osdspec_name=service_name)
576 self._orchestrator_wait([completion])
577 raise_if_exception(completion)
578 out = completion.result_str()
579 return HandleCommandResult(stdout=print_preview(ast.literal_eval(out), format))
580 if unmanaged is not None:
581 return self.set_unmanaged_flag(service_name=service_name, unmanaged_flag=unmanaged)
1911f103 582
e306af50
TL
583 return HandleCommandResult(-errno.EINVAL, stderr=usage)
584
585 if preview:
586 completion = self.preview_osdspecs()
587 self._orchestrator_wait([completion])
588 raise_if_exception(completion)
589 out = completion.result_str()
590 return HandleCommandResult(stdout=print_preview(ast.literal_eval(out), format))
1911f103 591
e306af50
TL
592 if unmanaged is not None:
593 return self.set_unmanaged_flag(unmanaged_flag=unmanaged)
594
595 return HandleCommandResult(-errno.EINVAL, stderr=usage)
596
597 @_cli_write_command(
598 'orch apply osd',
599 'name=all_available_devices,type=CephBool,req=false '
600 'name=unmanaged,type=CephBool,req=false '
601 "name=format,type=CephChoices,strings=plain|json|json-pretty|yaml,req=false",
602 'Create OSD daemon(s) using a drive group spec')
603 def _apply_osd(self,
604 all_available_devices: bool = False,
605 format: Optional[str] = 'plain',
606 unmanaged=None,
607 inbuf: Optional[str] = None) -> HandleCommandResult:
608 """Apply DriveGroupSpecs to create OSDs"""
609 usage = """
610usage:
611 ceph orch apply osd -i <json_file/yaml_file>
612 ceph orch apply osd --all-available-devices
613 ceph orch apply osd --all-available-devices --unmanaged=true|false
614
615Restrictions:
616
617 Mutexes:
618 * -i, --all-available-devices
619 * -i, --unmanaged (this would overwrite the osdspec loaded from a file)
620
621 Parameters:
622
623 * --unmanaged
624 Only works with --all-available-devices.
625
626Description:
627
628 * -i
629 An inbuf object like a file or a json/yaml blob containing a valid OSDSpec
630
631 * --all-available-devices
632 The most simple OSDSpec there is. Takes all as 'available' marked devices
633 and creates standalone OSDs on them.
634
635 * --unmanaged
636 Set a the unmanaged flag for all--available-devices (default is False)
637
638Examples:
639
640 # ceph orch apply osd -i <file.yml|json>
641
642 Applies one or more OSDSpecs found in <file>
643
644 # ceph orch osd apply --all-available-devices --unmanaged=true
645
646 Creates and applies simple OSDSpec with the unmanaged flag set to <true>
647"""
648
649 if inbuf and all_available_devices:
650 # mutually exclusive
651 return HandleCommandResult(-errno.EINVAL, stderr=usage)
1911f103 652
9f95a23c 653 if not inbuf and not all_available_devices:
e306af50 654 # one parameter must be present
9f95a23c 655 return HandleCommandResult(-errno.EINVAL, stderr=usage)
e306af50 656
9f95a23c 657 if inbuf:
e306af50
TL
658 if unmanaged is not None:
659 return HandleCommandResult(-errno.EINVAL, stderr=usage)
9f95a23c
TL
660 try:
661 drivegroups = yaml.load_all(inbuf)
1911f103 662 dg_specs = [DriveGroupSpec.from_json(dg) for dg in drivegroups]
e306af50
TL
663 # This acts weird when abstracted to a function
664 completion = self.apply_drivegroups(dg_specs)
665 self._orchestrator_wait([completion])
666 raise_if_exception(completion)
667 return HandleCommandResult(stdout=completion.result_str())
9f95a23c
TL
668 except ValueError as e:
669 msg = 'Failed to read JSON/YAML input: {}'.format(str(e)) + usage
670 return HandleCommandResult(-errno.EINVAL, stderr=msg)
e306af50
TL
671 if all_available_devices:
672 if unmanaged is None:
673 unmanaged = False
9f95a23c
TL
674 dg_specs = [
675 DriveGroupSpec(
676 service_id='all-available-devices',
677 placement=PlacementSpec(host_pattern='*'),
678 data_devices=DeviceSelection(all=True),
e306af50 679 unmanaged=unmanaged
9f95a23c
TL
680 )
681 ]
e306af50 682 # This acts weird when abstracted to a function
1911f103
TL
683 completion = self.apply_drivegroups(dg_specs)
684 self._orchestrator_wait([completion])
685 raise_if_exception(completion)
e306af50
TL
686 return HandleCommandResult(stdout=completion.result_str())
687
688 return HandleCommandResult(-errno.EINVAL, stderr=usage)
9f95a23c
TL
689
690 @_cli_write_command(
691 'orch daemon add osd',
692 "name=svc_arg,type=CephString,req=false",
693 'Create an OSD service. Either --svc_arg=host:drives')
694 def _daemon_add_osd(self, svc_arg=None):
695 # type: (Optional[str]) -> HandleCommandResult
696 """Create one or more OSDs"""
697
698 usage = """
699Usage:
700 ceph orch daemon add osd host:device1,device2,...
701"""
702 if not svc_arg:
703 return HandleCommandResult(-errno.EINVAL, stderr=usage)
704 try:
705 host_name, block_device = svc_arg.split(":")
706 block_devices = block_device.split(',')
707 devs = DeviceSelection(paths=block_devices)
708 drive_group = DriveGroupSpec(placement=PlacementSpec(host_pattern=host_name), data_devices=devs)
709 except (TypeError, KeyError, ValueError):
710 msg = "Invalid host:device spec: '{}'".format(svc_arg) + usage
711 return HandleCommandResult(-errno.EINVAL, stderr=msg)
712
713 completion = self.create_osds(drive_group)
714 self._orchestrator_wait([completion])
715 raise_if_exception(completion)
716 return HandleCommandResult(stdout=completion.result_str())
717
718 @_cli_write_command(
719 'orch osd rm',
720 "name=svc_id,type=CephString,n=N "
721 "name=replace,type=CephBool,req=false "
722 "name=force,type=CephBool,req=false",
723 'Remove OSD services')
724 def _osd_rm(self, svc_id: List[str],
725 replace: bool = False,
726 force: bool = False) -> HandleCommandResult:
727 completion = self.remove_osds(svc_id, replace, force)
728 self._orchestrator_wait([completion])
729 raise_if_exception(completion)
730 return HandleCommandResult(stdout=completion.result_str())
731
732 @_cli_write_command(
733 'orch osd rm status',
734 desc='status of OSD removal operation')
735 def _osd_rm_status(self) -> HandleCommandResult:
736 completion = self.remove_osds_status()
737 self._orchestrator_wait([completion])
738 raise_if_exception(completion)
739 report = completion.result
740 if not report:
741 return HandleCommandResult(stdout="No OSD remove/replace operations reported")
742 table = PrettyTable(
743 ['NAME', 'HOST', 'PGS', 'STARTED_AT'],
744 border=False)
745 table.align = 'l'
746 table.left_padding_width = 0
747 table.right_padding_width = 1
748 # TODO: re-add sorted and sort by pg_count
749 for osd in report:
750 table.add_row((osd.fullname, osd.nodename, osd.pg_count_str, osd.started_at))
751
752 return HandleCommandResult(stdout=table.get_string())
753
754 @_cli_write_command(
755 'orch daemon add',
756 'name=daemon_type,type=CephChoices,strings=mon|mgr|rbd-mirror|crash|alertmanager|grafana|node-exporter|prometheus,req=false '
757 'name=placement,type=CephString,req=false',
758 'Add daemon(s)')
e306af50
TL
759 def _daemon_add_misc(self,
760 daemon_type: Optional[str] = None,
761 placement: Optional[str] = None,
762 inbuf: Optional[str] = None) -> HandleCommandResult:
9f95a23c
TL
763 usage = f"""Usage:
764 ceph orch daemon add -i <json_file>
765 ceph orch daemon add {daemon_type or '<daemon_type>'} <placement>"""
766 if inbuf:
767 if daemon_type or placement:
768 raise OrchestratorValidationError(usage)
769 spec = ServiceSpec.from_json(yaml.safe_load(inbuf))
770 else:
e306af50
TL
771 spec = PlacementSpec.from_string(placement)
772 assert daemon_type
773 spec = ServiceSpec(daemon_type, placement=spec)
9f95a23c 774
e306af50 775 daemon_type = spec.service_type
9f95a23c
TL
776
777 if daemon_type == 'mon':
778 completion = self.add_mon(spec)
779 elif daemon_type == 'mgr':
780 completion = self.add_mgr(spec)
781 elif daemon_type == 'rbd-mirror':
782 completion = self.add_rbd_mirror(spec)
783 elif daemon_type == 'crash':
784 completion = self.add_crash(spec)
785 elif daemon_type == 'alertmanager':
786 completion = self.add_alertmanager(spec)
787 elif daemon_type == 'grafana':
788 completion = self.add_grafana(spec)
789 elif daemon_type == 'node-exporter':
790 completion = self.add_node_exporter(spec)
791 elif daemon_type == 'prometheus':
792 completion = self.add_prometheus(spec)
e306af50
TL
793 elif daemon_type == 'mds':
794 completion = self.add_mds(spec)
795 elif daemon_type == 'rgw':
796 completion = self.add_rgw(spec)
797 elif daemon_type == 'nfs':
798 completion = self.add_nfs(spec)
1911f103
TL
799 elif daemon_type == 'iscsi':
800 completion = self.add_iscsi(spec)
9f95a23c
TL
801 else:
802 raise OrchestratorValidationError(f'unknown daemon type `{daemon_type}`')
803
804 self._orchestrator_wait([completion])
805 raise_if_exception(completion)
806 return HandleCommandResult(stdout=completion.result_str())
807
808 @_cli_write_command(
809 'orch daemon add mds',
810 'name=fs_name,type=CephString '
811 'name=placement,type=CephString,req=false',
812 'Start MDS daemon(s)')
e306af50
TL
813 def _mds_add(self,
814 fs_name: str,
815 placement: Optional[str] = None,
816 inbuf: Optional[str] = None) -> HandleCommandResult:
817 if inbuf:
818 raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
819
9f95a23c 820 spec = ServiceSpec(
e306af50
TL
821 service_type='mds',
822 service_id=fs_name,
9f95a23c
TL
823 placement=PlacementSpec.from_string(placement),
824 )
e306af50 825
9f95a23c
TL
826 completion = self.add_mds(spec)
827 self._orchestrator_wait([completion])
828 raise_if_exception(completion)
829 return HandleCommandResult(stdout=completion.result_str())
830
831 @_cli_write_command(
832 'orch daemon add rgw',
e306af50
TL
833 'name=realm_name,type=CephString '
834 'name=zone_name,type=CephString '
835 'name=subcluster,type=CephString,req=false '
836 'name=port,type=CephInt,req=false '
837 'name=ssl,type=CephBool,req=false '
9f95a23c
TL
838 'name=placement,type=CephString,req=false',
839 'Start RGW daemon(s)')
e306af50
TL
840 def _rgw_add(self,
841 realm_name: str,
842 zone_name: str,
843 subcluster: Optional[str] = None,
844 port: Optional[int] = None,
845 ssl: bool = False,
846 placement: Optional[str] = None,
847 inbuf: Optional[str] = None) -> HandleCommandResult:
9f95a23c 848 if inbuf:
e306af50 849 raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
9f95a23c 850
e306af50
TL
851 spec = RGWSpec(
852 rgw_realm=realm_name,
853 rgw_zone=zone_name,
854 subcluster=subcluster,
855 rgw_frontend_port=port,
856 ssl=ssl,
857 placement=PlacementSpec.from_string(placement),
858 )
9f95a23c 859
e306af50 860 completion = self.add_rgw(spec)
1911f103
TL
861 self._orchestrator_wait([completion])
862 raise_if_exception(completion)
863 return HandleCommandResult(stdout=completion.result_str())
864
9f95a23c
TL
865 @_cli_write_command(
866 'orch daemon add nfs',
e306af50 867 "name=svc_id,type=CephString "
9f95a23c
TL
868 "name=pool,type=CephString "
869 "name=namespace,type=CephString,req=false "
870 'name=placement,type=CephString,req=false',
871 'Start NFS daemon(s)')
e306af50
TL
872 def _nfs_add(self,
873 svc_id: str,
874 pool: str,
875 namespace: Optional[str] = None,
876 placement: Optional[str] = None,
877 inbuf: Optional[str] = None) -> HandleCommandResult:
878 if inbuf:
879 raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
880
9f95a23c 881 spec = NFSServiceSpec(
e306af50 882 service_id=svc_id,
9f95a23c
TL
883 pool=pool,
884 namespace=namespace,
885 placement=PlacementSpec.from_string(placement),
886 )
e306af50 887
9f95a23c
TL
888 spec.validate_add()
889 completion = self.add_nfs(spec)
890 self._orchestrator_wait([completion])
891 raise_if_exception(completion)
892 return HandleCommandResult(stdout=completion.result_str())
893
e306af50
TL
894 @_cli_write_command(
895 'orch daemon add iscsi',
896 'name=pool,type=CephString '
897 'name=api_user,type=CephString '
898 'name=api_password,type=CephString '
899 'name=trusted_ip_list,type=CephString,req=false '
900 'name=placement,type=CephString,req=false',
901 'Start iscsi daemon(s)')
902 def _iscsi_add(self,
903 pool: str,
904 api_user: str,
905 api_password: str,
906 trusted_ip_list: Optional[str] = None,
907 placement: Optional[str] = None,
908 inbuf: Optional[str] = None) -> HandleCommandResult:
909 if inbuf:
910 raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
911
912 spec = IscsiServiceSpec(
913 service_id='iscsi',
914 pool=pool,
915 api_user=api_user,
916 api_password=api_password,
917 trusted_ip_list=trusted_ip_list,
918 placement=PlacementSpec.from_string(placement),
919 )
920
921 completion = self.add_iscsi(spec)
922 self._orchestrator_wait([completion])
923 raise_if_exception(completion)
924 return HandleCommandResult(stdout=completion.result_str())
925
9f95a23c
TL
926 @_cli_write_command(
927 'orch',
928 "name=action,type=CephChoices,strings=start|stop|restart|redeploy|reconfig "
929 "name=service_name,type=CephString",
930 'Start, stop, restart, redeploy, or reconfig an entire service (i.e. all daemons)')
931 def _service_action(self, action, service_name):
932 completion = self.service_action(action, service_name)
933 self._orchestrator_wait([completion])
934 raise_if_exception(completion)
935 return HandleCommandResult(stdout=completion.result_str())
936
937 @_cli_write_command(
938 'orch daemon',
939 "name=action,type=CephChoices,strings=start|stop|restart|redeploy|reconfig "
940 "name=name,type=CephString",
941 'Start, stop, restart, redeploy, or reconfig a specific daemon')
942 def _daemon_action(self, action, name):
943 if '.' not in name:
944 raise OrchestratorError('%s is not a valid daemon name' % name)
945 (daemon_type, daemon_id) = name.split('.', 1)
946 completion = self.daemon_action(action, daemon_type, daemon_id)
947 self._orchestrator_wait([completion])
948 raise_if_exception(completion)
949 return HandleCommandResult(stdout=completion.result_str())
950
951 @_cli_write_command(
952 'orch daemon rm',
953 "name=names,type=CephString,n=N "
954 'name=force,type=CephBool,req=false',
955 'Remove specific daemon(s)')
956 def _daemon_rm(self, names, force=False):
957 for name in names:
958 if '.' not in name:
959 raise OrchestratorError('%s is not a valid daemon name' % name)
960 (daemon_type) = name.split('.')[0]
961 if not force and daemon_type in ['osd', 'mon', 'prometheus']:
962 raise OrchestratorError('must pass --force to REMOVE daemon with potentially PRECIOUS DATA for %s' % name)
963 completion = self.remove_daemons(names)
964 self._orchestrator_wait([completion])
965 raise_if_exception(completion)
966 return HandleCommandResult(stdout=completion.result_str())
967
968 @_cli_write_command(
969 'orch rm',
970 'name=service_name,type=CephString '
971 'name=force,type=CephBool,req=false',
972 'Remove a service')
973 def _service_rm(self, service_name, force=False):
974 if service_name in ['mon', 'mgr'] and not force:
975 raise OrchestratorError('The mon and mgr services cannot be removed')
976 completion = self.remove_service(service_name)
977 self._orchestrator_wait([completion])
978 raise_if_exception(completion)
979 return HandleCommandResult(stdout=completion.result_str())
980
9f95a23c
TL
981 @_cli_write_command(
982 'orch apply',
983 'name=service_type,type=CephChoices,strings=mon|mgr|rbd-mirror|crash|alertmanager|grafana|node-exporter|prometheus,req=false '
984 'name=placement,type=CephString,req=false '
985 'name=unmanaged,type=CephBool,req=false',
986 'Update the size or placement for a service or apply a large yaml spec')
e306af50
TL
987 def _apply_misc(self,
988 service_type: Optional[str] = None,
989 placement: Optional[str] = None,
990 unmanaged: bool = False,
991 inbuf: Optional[str] = None) -> HandleCommandResult:
9f95a23c
TL
992 usage = """Usage:
993 ceph orch apply -i <yaml spec>
994 ceph orch apply <service_type> <placement> [--unmanaged]
995 """
996 if inbuf:
997 if service_type or placement or unmanaged:
998 raise OrchestratorValidationError(usage)
999 content: Iterator = yaml.load_all(inbuf)
e306af50 1000 specs: List[GenericSpec] = [json_to_generic_spec(s) for s in content]
9f95a23c 1001
e306af50
TL
1002 else:
1003 placmentspec = PlacementSpec.from_string(placement)
1004 assert service_type
1005 specs = [ServiceSpec(service_type, placement=placmentspec, unmanaged=unmanaged)]
1006
9f95a23c
TL
1007 completion = self.apply(specs)
1008 self._orchestrator_wait([completion])
1009 raise_if_exception(completion)
1010 return HandleCommandResult(stdout=completion.result_str())
1011
1012 @_cli_write_command(
1013 'orch apply mds',
1014 'name=fs_name,type=CephString '
1015 'name=placement,type=CephString,req=false '
1016 'name=unmanaged,type=CephBool,req=false',
1017 'Update the number of MDS instances for the given fs_name')
e306af50
TL
1018 def _apply_mds(self,
1019 fs_name: str,
1020 placement: Optional[str] = None,
1021 unmanaged: bool = False,
1022 inbuf: Optional[str] = None) -> HandleCommandResult:
1023 if inbuf:
1024 raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
1025
9f95a23c 1026 spec = ServiceSpec(
e306af50
TL
1027 service_type='mds',
1028 service_id=fs_name,
1029 placement=PlacementSpec.from_string(placement),
9f95a23c 1030 unmanaged=unmanaged)
e306af50 1031
9f95a23c
TL
1032 completion = self.apply_mds(spec)
1033 self._orchestrator_wait([completion])
1034 raise_if_exception(completion)
1035 return HandleCommandResult(stdout=completion.result_str())
1036
1037 @_cli_write_command(
1038 'orch apply rgw',
1039 'name=realm_name,type=CephString '
1040 'name=zone_name,type=CephString '
1041 'name=subcluster,type=CephString,req=false '
1042 'name=port,type=CephInt,req=false '
1043 'name=ssl,type=CephBool,req=false '
1044 'name=placement,type=CephString,req=false '
1045 'name=unmanaged,type=CephBool,req=false',
1046 'Update the number of RGW instances for the given zone')
e306af50
TL
1047 def _apply_rgw(self,
1048 realm_name: str,
1049 zone_name: str,
1050 subcluster: Optional[str] = None,
1051 port: Optional[int] = None,
1052 ssl: bool = False,
1053 placement: Optional[str] = None,
1054 unmanaged: bool = False,
1055 inbuf: Optional[str] = None) -> HandleCommandResult:
1056 if inbuf:
1057 raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
1058
9f95a23c
TL
1059 spec = RGWSpec(
1060 rgw_realm=realm_name,
1061 rgw_zone=zone_name,
1062 subcluster=subcluster,
9f95a23c
TL
1063 rgw_frontend_port=port,
1064 ssl=ssl,
e306af50
TL
1065 placement=PlacementSpec.from_string(placement),
1066 unmanaged=unmanaged,
9f95a23c 1067 )
e306af50 1068
9f95a23c
TL
1069 completion = self.apply_rgw(spec)
1070 self._orchestrator_wait([completion])
1071 raise_if_exception(completion)
1072 return HandleCommandResult(stdout=completion.result_str())
1073
1074 @_cli_write_command(
1075 'orch apply nfs',
801d1391
TL
1076 'name=svc_id,type=CephString '
1077 'name=pool,type=CephString '
1078 'name=namespace,type=CephString,req=false '
9f95a23c
TL
1079 'name=placement,type=CephString,req=false '
1080 'name=unmanaged,type=CephBool,req=false',
1081 'Scale an NFS service')
e306af50
TL
1082 def _apply_nfs(self,
1083 svc_id: str,
1084 pool: str,
1085 namespace: Optional[str] = None,
1086 placement: Optional[str] = None,
1087 unmanaged: bool = False,
1088 inbuf: Optional[str] = None) -> HandleCommandResult:
1089 if inbuf:
1090 raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
1091
9f95a23c 1092 spec = NFSServiceSpec(
e306af50 1093 service_id=svc_id,
801d1391
TL
1094 pool=pool,
1095 namespace=namespace,
9f95a23c
TL
1096 placement=PlacementSpec.from_string(placement),
1097 unmanaged=unmanaged,
1098 )
e306af50 1099
9f95a23c
TL
1100 completion = self.apply_nfs(spec)
1101 self._orchestrator_wait([completion])
e306af50
TL
1102 raise_if_exception(completion)
1103 return HandleCommandResult(stdout=completion.result_str())
1104
1105 @_cli_write_command(
1106 'orch apply iscsi',
1107 'name=pool,type=CephString '
1108 'name=api_user,type=CephString '
1109 'name=api_password,type=CephString '
1110 'name=trusted_ip_list,type=CephString,req=false '
1111 'name=placement,type=CephString,req=false '
1112 'name=unmanaged,type=CephBool,req=false',
1113 'Scale an iSCSI service')
1114 def _apply_iscsi(self,
1115 pool: str,
1116 api_user: str,
1117 api_password: str,
1118 trusted_ip_list: Optional[str] = None,
1119 placement: Optional[str] = None,
1120 unmanaged: bool = False,
1121 inbuf: Optional[str] = None) -> HandleCommandResult:
1122 if inbuf:
1123 raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
1124
1125 spec = IscsiServiceSpec(
1126 service_id='iscsi',
1127 pool=pool,
1128 api_user=api_user,
1129 api_password=api_password,
1130 trusted_ip_list=trusted_ip_list,
1131 placement=PlacementSpec.from_string(placement),
1132 unmanaged=unmanaged,
1133 )
1134
1135 completion = self.apply_iscsi(spec)
1136 self._orchestrator_wait([completion])
1137 raise_if_exception(completion)
9f95a23c
TL
1138 return HandleCommandResult(stdout=completion.result_str())
1139
1140 @_cli_write_command(
1141 'orch set backend',
1142 "name=module_name,type=CephString,req=true",
1143 'Select orchestrator module backend')
1144 def _set_backend(self, module_name):
1145 """
1146 We implement a setter command instead of just having the user
1147 modify the setting directly, so that we can validate they're setting
1148 it to a module that really exists and is enabled.
1149
1150 There isn't a mechanism for ensuring they don't *disable* the module
1151 later, but this is better than nothing.
1152 """
1153 mgr_map = self.get("mgr_map")
1154
1155 if module_name is None or module_name == "":
1156 self.set_module_option("orchestrator", None)
1157 return HandleCommandResult()
1158
1159 for module in mgr_map['available_modules']:
1160 if module['name'] != module_name:
1161 continue
1162
1163 if not module['can_run']:
1164 continue
1165
1166 enabled = module['name'] in mgr_map['modules']
1167 if not enabled:
1168 return HandleCommandResult(-errno.EINVAL,
1169 stderr="Module '{module_name}' is not enabled. \n Run "
1170 "`ceph mgr module enable {module_name}` "
1171 "to enable.".format(module_name=module_name))
1172
1173 try:
1174 is_orchestrator = self.remote(module_name,
1175 "is_orchestrator_module")
1176 except NameError:
1177 is_orchestrator = False
1178
1179 if not is_orchestrator:
1180 return HandleCommandResult(-errno.EINVAL,
1181 stderr="'{0}' is not an orchestrator module".format(module_name))
1182
1183 self.set_module_option("orchestrator", module_name)
1184
1185 return HandleCommandResult()
1186
1187 return HandleCommandResult(-errno.EINVAL, stderr="Module '{0}' not found".format(module_name))
1188
1189 @_cli_write_command(
1190 'orch pause',
1191 desc='Pause orchestrator background work')
1192 def _pause(self):
1193 self.pause()
1194 return HandleCommandResult()
1195
1196 @_cli_write_command(
1197 'orch resume',
1198 desc='Resume orchestrator background work (if paused)')
1199 def _resume(self):
1200 self.resume()
1201 return HandleCommandResult()
1202
1203 @_cli_write_command(
1204 'orch cancel',
1205 desc='cancels ongoing operations')
1206 def _cancel(self):
1207 """
1208 ProgressReferences might get stuck. Let's unstuck them.
1209 """
1210 self.cancel_completions()
1211 return HandleCommandResult()
1212
1213 @_cli_read_command(
1214 'orch status',
1215 desc='Report configured backend and its status')
1216 def _status(self):
1217 o = self._select_orchestrator()
1218 if o is None:
1219 raise NoOrchestrator()
1220
1221 avail, why = self.available()
1222 if avail is None:
1223 # The module does not report its availability
1224 return HandleCommandResult(stdout="Backend: {0}".format(o))
1225 else:
1226 return HandleCommandResult(stdout="Backend: {0}\nAvailable: {1}{2}".format(
1227 o, avail,
1228 " ({0})".format(why) if not avail else ""
1229 ))
1230
1231 def self_test(self):
1232 old_orch = self._select_orchestrator()
1233 self._set_backend('')
1234 assert self._select_orchestrator() is None
1235 self._set_backend(old_orch)
1236
1237 e1 = self.remote('selftest', 'remote_from_orchestrator_cli_self_test', "ZeroDivisionError")
1238 try:
1239 raise_if_exception(e1)
1240 assert False
1241 except ZeroDivisionError as e:
1242 assert e.args == ('hello', 'world')
1243
1244 e2 = self.remote('selftest', 'remote_from_orchestrator_cli_self_test', "OrchestratorError")
1245 try:
1246 raise_if_exception(e2)
1247 assert False
1248 except OrchestratorError as e:
1249 assert e.args == ('hello', 'world')
1250
1251 c = TrivialReadCompletion(result=True)
1252 assert c.has_result
1253
1911f103
TL
1254 @staticmethod
1255 def _upgrade_check_image_name(image, ceph_version):
1256 """
1257 >>> OrchestratorCli._upgrade_check_image_name('v15.2.0', None)
1258 Traceback (most recent call last):
1259 orchestrator._interface.OrchestratorValidationError: Error: unable to pull image name `v15.2.0`.
1260 Maybe you meant `--ceph-version 15.2.0`?
1261
1262 """
1263 if image and re.match(r'^v?\d+\.\d+\.\d+$', image) and ceph_version is None:
1264 ver = image[1:] if image.startswith('v') else image
1265 s = f"Error: unable to pull image name `{image}`.\n" \
1266 f" Maybe you meant `--ceph-version {ver}`?"
1267 raise OrchestratorValidationError(s)
1268
9f95a23c
TL
1269 @_cli_write_command(
1270 'orch upgrade check',
1271 'name=image,type=CephString,req=false '
1272 'name=ceph_version,type=CephString,req=false',
1273 desc='Check service versions vs available and target containers')
1274 def _upgrade_check(self, image=None, ceph_version=None):
1911f103 1275 self._upgrade_check_image_name(image, ceph_version)
9f95a23c
TL
1276 completion = self.upgrade_check(image=image, version=ceph_version)
1277 self._orchestrator_wait([completion])
1278 raise_if_exception(completion)
1279 return HandleCommandResult(stdout=completion.result_str())
1280
1281 @_cli_write_command(
1282 'orch upgrade status',
1283 desc='Check service versions vs available and target containers')
1284 def _upgrade_status(self):
1285 completion = self.upgrade_status()
1286 self._orchestrator_wait([completion])
1287 raise_if_exception(completion)
1288 r = {
1289 'target_image': completion.result.target_image,
1290 'in_progress': completion.result.in_progress,
1291 'services_complete': completion.result.services_complete,
1292 'message': completion.result.message,
1293 }
1294 out = json.dumps(r, indent=4)
1295 return HandleCommandResult(stdout=out)
1296
1297 @_cli_write_command(
1298 'orch upgrade start',
1299 'name=image,type=CephString,req=false '
1300 'name=ceph_version,type=CephString,req=false',
1301 desc='Initiate upgrade')
1302 def _upgrade_start(self, image=None, ceph_version=None):
1911f103 1303 self._upgrade_check_image_name(image, ceph_version)
9f95a23c
TL
1304 completion = self.upgrade_start(image, ceph_version)
1305 self._orchestrator_wait([completion])
1306 raise_if_exception(completion)
1307 return HandleCommandResult(stdout=completion.result_str())
1308
1309 @_cli_write_command(
1310 'orch upgrade pause',
1311 desc='Pause an in-progress upgrade')
1312 def _upgrade_pause(self):
1313 completion = self.upgrade_pause()
1314 self._orchestrator_wait([completion])
1315 raise_if_exception(completion)
1316 return HandleCommandResult(stdout=completion.result_str())
1317
1318 @_cli_write_command(
1319 'orch upgrade resume',
1320 desc='Resume paused upgrade')
1321 def _upgrade_resume(self):
1322 completion = self.upgrade_resume()
1323 self._orchestrator_wait([completion])
1324 raise_if_exception(completion)
1325 return HandleCommandResult(stdout=completion.result_str())
1326
1327 @_cli_write_command(
1328 'orch upgrade stop',
1329 desc='Stop an in-progress upgrade')
1330 def _upgrade_stop(self):
1331 completion = self.upgrade_stop()
1332 self._orchestrator_wait([completion])
1333 raise_if_exception(completion)
1334 return HandleCommandResult(stdout=completion.result_str())