]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/orchestrator/module.py
import ceph pacific 16.2.5
[ceph.git] / ceph / src / pybind / mgr / orchestrator / module.py
1 import enum
2 import errno
3 import json
4 from typing import List, Set, Optional, Iterator, cast, Dict, Any, Union, Sequence
5 import re
6 import datetime
7
8 import yaml
9 from prettytable import PrettyTable
10
11 from ceph.deployment.inventory import Device
12 from ceph.deployment.drive_group import DriveGroupSpec, DeviceSelection
13 from ceph.deployment.service_spec import PlacementSpec, ServiceSpec
14 from ceph.deployment.hostspec import SpecValidationError
15 from ceph.utils import datetime_now
16
17 from mgr_util import to_pretty_timedelta, format_dimless, format_bytes
18 from mgr_module import MgrModule, HandleCommandResult, Option
19
20 from ._interface import OrchestratorClientMixin, DeviceLightLoc, _cli_read_command, \
21 raise_if_exception, _cli_write_command, OrchestratorError, \
22 NoOrchestrator, OrchestratorValidationError, NFSServiceSpec, \
23 RGWSpec, InventoryFilter, InventoryHost, HostSpec, CLICommandMeta, \
24 ServiceDescription, DaemonDescription, IscsiServiceSpec, json_to_generic_spec, \
25 GenericSpec, DaemonDescriptionStatus
26
27
28 def nice_delta(now: datetime.datetime, t: Optional[datetime.datetime], suffix: str = '') -> str:
29 if t:
30 return to_pretty_timedelta(now - t) + suffix
31 else:
32 return '-'
33
34
35 def nice_bytes(v: Optional[int]) -> str:
36 if not v:
37 return '-'
38 return format_bytes(v, 5)
39
40
41 class Format(enum.Enum):
42 plain = 'plain'
43 json = 'json'
44 json_pretty = 'json-pretty'
45 yaml = 'yaml'
46
47
48 class ServiceType(enum.Enum):
49 mon = 'mon'
50 mgr = 'mgr'
51 rbd_mirror = 'rbd-mirror'
52 cephfs_mirror = 'cephfs-mirror'
53 crash = 'crash'
54 alertmanager = 'alertmanager'
55 grafana = 'grafana'
56 node_exporter = 'node-exporter'
57 prometheus = 'prometheus'
58 mds = 'mds'
59 rgw = 'rgw'
60 nfs = 'nfs'
61 iscsi = 'iscsi'
62 cephadm_exporter = 'cephadm-exporter'
63
64
65 class ServiceAction(enum.Enum):
66 start = 'start'
67 stop = 'stop'
68 restart = 'restart'
69 redeploy = 'redeploy'
70 reconfig = 'reconfig'
71
72
73 class DaemonAction(enum.Enum):
74 start = 'start'
75 stop = 'stop'
76 restart = 'restart'
77 reconfig = 'reconfig'
78
79
80 def to_format(what: Any, format: Format, many: bool, cls: Any) -> Any:
81 def to_json_1(obj: Any) -> Any:
82 if hasattr(obj, 'to_json'):
83 return obj.to_json()
84 return obj
85
86 def to_json_n(objs: List) -> List:
87 return [to_json_1(o) for o in objs]
88
89 to_json = to_json_n if many else to_json_1
90
91 if format == Format.json:
92 return json.dumps(to_json(what), sort_keys=True)
93 elif format == Format.json_pretty:
94 return json.dumps(to_json(what), indent=2, sort_keys=True)
95 elif format == Format.yaml:
96 # fun with subinterpreters again. pyyaml depends on object identity.
97 # as what originates from a different subinterpreter we have to copy things here.
98 if cls:
99 flat = to_json(what)
100 copy = [cls.from_json(o) for o in flat] if many else cls.from_json(flat)
101 else:
102 copy = what
103
104 def to_yaml_1(obj: Any) -> Any:
105 if hasattr(obj, 'yaml_representer'):
106 return obj
107 return to_json_1(obj)
108
109 def to_yaml_n(objs: list) -> list:
110 return [to_yaml_1(o) for o in objs]
111
112 to_yaml = to_yaml_n if many else to_yaml_1
113
114 if many:
115 return yaml.dump_all(to_yaml(copy), default_flow_style=False)
116 return yaml.dump(to_yaml(copy), default_flow_style=False)
117 else:
118 raise OrchestratorError(f'unsupported format type: {format}')
119
120
121 def generate_preview_tables(data: Any, osd_only: bool = False) -> str:
122 error = [x.get('error') for x in data if x.get('error')]
123 if error:
124 return json.dumps(error)
125 warning = [x.get('warning') for x in data if x.get('warning')]
126 osd_table = preview_table_osd(data)
127 service_table = preview_table_services(data)
128
129 if osd_only:
130 tables = f"""
131 {''.join(warning)}
132
133 ################
134 OSDSPEC PREVIEWS
135 ################
136 {osd_table}
137 """
138 return tables
139 else:
140 tables = f"""
141 {''.join(warning)}
142
143 ####################
144 SERVICESPEC PREVIEWS
145 ####################
146 {service_table}
147
148 ################
149 OSDSPEC PREVIEWS
150 ################
151 {osd_table}
152 """
153 return tables
154
155
156 def preview_table_osd(data: List) -> str:
157 table = PrettyTable(header_style='upper', title='OSDSPEC PREVIEWS', border=True)
158 table.field_names = "service name host data db wal".split()
159 table.align = 'l'
160 table.left_padding_width = 0
161 table.right_padding_width = 2
162 for osd_data in data:
163 if osd_data.get('service_type') != 'osd':
164 continue
165 for host, specs in osd_data.get('data').items():
166 for spec in specs:
167 if spec.get('error'):
168 return spec.get('message')
169 dg_name = spec.get('osdspec')
170 for osd in spec.get('data', []):
171 db_path = osd.get('block_db', '-')
172 wal_path = osd.get('block_wal', '-')
173 block_data = osd.get('data', '')
174 if not block_data:
175 continue
176 table.add_row(('osd', dg_name, host, block_data, db_path, wal_path))
177 return table.get_string()
178
179
180 def preview_table_services(data: List) -> str:
181 table = PrettyTable(header_style='upper', title="SERVICESPEC PREVIEW", border=True)
182 table.field_names = 'SERVICE NAME ADD_TO REMOVE_FROM'.split()
183 table.align = 'l'
184 table.left_padding_width = 0
185 table.right_padding_width = 2
186 for item in data:
187 if item.get('warning'):
188 continue
189 if item.get('service_type') != 'osd':
190 table.add_row((item.get('service_type'), item.get('service_name'),
191 " ".join(item.get('add')), " ".join(item.get('remove'))))
192 return table.get_string()
193
194
195 class OrchestratorCli(OrchestratorClientMixin, MgrModule,
196 metaclass=CLICommandMeta):
197 MODULE_OPTIONS = [
198 Option(
199 'orchestrator',
200 type='str',
201 default=None,
202 desc='Orchestrator backend',
203 enum_allowed=['cephadm', 'rook', 'test_orchestrator'],
204 runtime=True,
205 )
206 ]
207 NATIVE_OPTIONS = [] # type: List[dict]
208
209 def __init__(self, *args: Any, **kwargs: Any) -> None:
210 super(OrchestratorCli, self).__init__(*args, **kwargs)
211 self.ident = set() # type: Set[str]
212 self.fault = set() # type: Set[str]
213 self._load()
214 self._refresh_health()
215
216 def _load(self) -> None:
217 active = self.get_store('active_devices')
218 if active:
219 decoded = json.loads(active)
220 self.ident = set(decoded.get('ident', []))
221 self.fault = set(decoded.get('fault', []))
222 self.log.debug('ident {}, fault {}'.format(self.ident, self.fault))
223
224 def _save(self) -> None:
225 encoded = json.dumps({
226 'ident': list(self.ident),
227 'fault': list(self.fault),
228 })
229 self.set_store('active_devices', encoded)
230
231 def _refresh_health(self) -> None:
232 h = {}
233 if self.ident:
234 h['DEVICE_IDENT_ON'] = {
235 'severity': 'warning',
236 'summary': '%d devices have ident light turned on' % len(
237 self.ident),
238 'detail': ['{} ident light enabled'.format(d) for d in self.ident]
239 }
240 if self.fault:
241 h['DEVICE_FAULT_ON'] = {
242 'severity': 'warning',
243 'summary': '%d devices have fault light turned on' % len(
244 self.fault),
245 'detail': ['{} fault light enabled'.format(d) for d in self.ident]
246 }
247 self.set_health_checks(h)
248
249 def _get_device_locations(self, dev_id):
250 # type: (str) -> List[DeviceLightLoc]
251 locs = [d['location'] for d in self.get('devices')['devices'] if d['devid'] == dev_id]
252 return [DeviceLightLoc(**loc) for loc in sum(locs, [])]
253
254 @_cli_read_command(prefix='device ls-lights')
255 def _device_ls(self) -> HandleCommandResult:
256 """List currently active device indicator lights"""
257 return HandleCommandResult(
258 stdout=json.dumps({
259 'ident': list(self.ident),
260 'fault': list(self.fault)
261 }, indent=4, sort_keys=True))
262
263 def light_on(self, fault_ident, devid):
264 # type: (str, str) -> HandleCommandResult
265 assert fault_ident in ("fault", "ident")
266 locs = self._get_device_locations(devid)
267 if locs is None:
268 return HandleCommandResult(stderr='device {} not found'.format(devid),
269 retval=-errno.ENOENT)
270
271 getattr(self, fault_ident).add(devid)
272 self._save()
273 self._refresh_health()
274 completion = self.blink_device_light(fault_ident, True, locs)
275 return HandleCommandResult(stdout=str(completion.result))
276
277 def light_off(self, fault_ident, devid, force):
278 # type: (str, str, bool) -> HandleCommandResult
279 assert fault_ident in ("fault", "ident")
280 locs = self._get_device_locations(devid)
281 if locs is None:
282 return HandleCommandResult(stderr='device {} not found'.format(devid),
283 retval=-errno.ENOENT)
284
285 try:
286 completion = self.blink_device_light(fault_ident, False, locs)
287
288 if devid in getattr(self, fault_ident):
289 getattr(self, fault_ident).remove(devid)
290 self._save()
291 self._refresh_health()
292 return HandleCommandResult(stdout=str(completion.result))
293
294 except Exception:
295 # There are several reasons the try: block might fail:
296 # 1. the device no longer exist
297 # 2. the device is no longer known to Ceph
298 # 3. the host is not reachable
299 if force and devid in getattr(self, fault_ident):
300 getattr(self, fault_ident).remove(devid)
301 self._save()
302 self._refresh_health()
303 raise
304
305 class DeviceLightEnable(enum.Enum):
306 on = 'on'
307 off = 'off'
308
309 class DeviceLightType(enum.Enum):
310 ident = 'ident'
311 fault = 'fault'
312
313 @_cli_write_command(prefix='device light')
314 def _device_light(self,
315 enable: DeviceLightEnable,
316 devid: str,
317 light_type: DeviceLightType = DeviceLightType.ident,
318 force: bool = False) -> HandleCommandResult:
319 """
320 Enable or disable the device light. Default type is `ident`
321 'Usage: device light (on|off) <devid> [ident|fault] [--force]'
322 """""
323 if enable == self.DeviceLightEnable.on:
324 return self.light_on(light_type.value, devid)
325 else:
326 return self.light_off(light_type.value, devid, force)
327
328 def _select_orchestrator(self) -> str:
329 return cast(str, self.get_module_option("orchestrator"))
330
331 @_cli_write_command('orch host add')
332 def _add_host(self,
333 hostname: str,
334 addr: Optional[str] = None,
335 labels: Optional[List[str]] = None,
336 maintenance: Optional[bool] = False) -> HandleCommandResult:
337 """Add a host"""
338 _status = 'maintenance' if maintenance else ''
339
340 # split multiple labels passed in with --labels=label1,label2
341 if labels and len(labels) == 1:
342 labels = labels[0].split(',')
343
344 s = HostSpec(hostname=hostname, addr=addr, labels=labels, status=_status)
345
346 return self._apply_misc([s], False, Format.plain)
347
348 @_cli_write_command('orch host rm')
349 def _remove_host(self, hostname: str) -> HandleCommandResult:
350 """Remove a host"""
351 completion = self.remove_host(hostname)
352 raise_if_exception(completion)
353 return HandleCommandResult(stdout=completion.result_str())
354
355 @_cli_write_command('orch host set-addr')
356 def _update_set_addr(self, hostname: str, addr: str) -> HandleCommandResult:
357 """Update a host address"""
358 completion = self.update_host_addr(hostname, addr)
359 raise_if_exception(completion)
360 return HandleCommandResult(stdout=completion.result_str())
361
362 @_cli_read_command('orch host ls')
363 def _get_hosts(self, format: Format = Format.plain) -> HandleCommandResult:
364 """List hosts"""
365 completion = self.get_hosts()
366 hosts = raise_if_exception(completion)
367
368 if format != Format.plain:
369 output = to_format(hosts, format, many=True, cls=HostSpec)
370 else:
371 table = PrettyTable(
372 ['HOST', 'ADDR', 'LABELS', 'STATUS'],
373 border=False)
374 table.align = 'l'
375 table.left_padding_width = 0
376 table.right_padding_width = 2
377 for host in sorted(hosts, key=lambda h: h.hostname):
378 table.add_row((host.hostname, host.addr, ' '.join(
379 host.labels), host.status.capitalize()))
380 output = table.get_string()
381 return HandleCommandResult(stdout=output)
382
383 @_cli_write_command('orch host label add')
384 def _host_label_add(self, hostname: str, label: str) -> HandleCommandResult:
385 """Add a host label"""
386 completion = self.add_host_label(hostname, label)
387 raise_if_exception(completion)
388 return HandleCommandResult(stdout=completion.result_str())
389
390 @_cli_write_command('orch host label rm')
391 def _host_label_rm(self, hostname: str, label: str) -> HandleCommandResult:
392 """Remove a host label"""
393 completion = self.remove_host_label(hostname, label)
394 raise_if_exception(completion)
395 return HandleCommandResult(stdout=completion.result_str())
396
397 @_cli_write_command('orch host ok-to-stop')
398 def _host_ok_to_stop(self, hostname: str) -> HandleCommandResult:
399 """Check if the specified host can be safely stopped without reducing availability"""""
400 completion = self.host_ok_to_stop(hostname)
401 raise_if_exception(completion)
402 return HandleCommandResult(stdout=completion.result_str())
403
404 @_cli_write_command(
405 'orch host maintenance enter')
406 def _host_maintenance_enter(self, hostname: str, force: bool = False) -> HandleCommandResult:
407 """
408 Prepare a host for maintenance by shutting down and disabling all Ceph daemons (cephadm only)
409 """
410 completion = self.enter_host_maintenance(hostname, force=force)
411 raise_if_exception(completion)
412
413 return HandleCommandResult(stdout=completion.result_str())
414
415 @_cli_write_command(
416 'orch host maintenance exit')
417 def _host_maintenance_exit(self, hostname: str) -> HandleCommandResult:
418 """
419 Return a host from maintenance, restarting all Ceph daemons (cephadm only)
420 """
421 completion = self.exit_host_maintenance(hostname)
422 raise_if_exception(completion)
423
424 return HandleCommandResult(stdout=completion.result_str())
425
426 @_cli_read_command('orch device ls')
427 def _list_devices(self,
428 hostname: Optional[List[str]] = None,
429 format: Format = Format.plain,
430 refresh: bool = False,
431 wide: bool = False) -> HandleCommandResult:
432 """
433 List devices on a host
434 """
435 # Provide information about storage devices present in cluster hosts
436 #
437 # Note: this does not have to be completely synchronous. Slightly out of
438 # date hardware inventory is fine as long as hardware ultimately appears
439 # in the output of this command.
440 nf = InventoryFilter(hosts=hostname) if hostname else None
441
442 completion = self.get_inventory(host_filter=nf, refresh=refresh)
443
444 inv_hosts = raise_if_exception(completion)
445
446 if format != Format.plain:
447 return HandleCommandResult(stdout=to_format(inv_hosts,
448 format,
449 many=True,
450 cls=InventoryHost))
451 else:
452 display_map = {
453 "Unsupported": "N/A",
454 "N/A": "N/A",
455 "On": "On",
456 "Off": "Off",
457 True: "Yes",
458 False: "No",
459 }
460
461 out = []
462 if wide:
463 table = PrettyTable(
464 ['Hostname', 'Path', 'Type', 'Transport', 'RPM', 'Vendor', 'Model',
465 'Serial', 'Size', 'Health', 'Ident', 'Fault', 'Available',
466 'Reject Reasons'],
467 border=False)
468 else:
469 table = PrettyTable(
470 ['Hostname', 'Path', 'Type', 'Serial', 'Size',
471 'Health', 'Ident', 'Fault', 'Available'],
472 border=False)
473 table.align = 'l'
474 table._align['SIZE'] = 'r'
475 table.left_padding_width = 0
476 table.right_padding_width = 2
477 for host_ in sorted(inv_hosts, key=lambda h: h.name): # type: InventoryHost
478 for d in host_.devices.devices: # type: Device
479
480 led_ident = 'N/A'
481 led_fail = 'N/A'
482 if d.lsm_data.get('ledSupport', None):
483 led_ident = d.lsm_data['ledSupport']['IDENTstatus']
484 led_fail = d.lsm_data['ledSupport']['FAILstatus']
485
486 if d.device_id is not None:
487 fallback_serial = d.device_id.split('_')[-1]
488 else:
489 fallback_serial = ""
490
491 if wide:
492 table.add_row(
493 (
494 host_.name,
495 d.path,
496 d.human_readable_type,
497 d.lsm_data.get('transport', 'Unknown'),
498 d.lsm_data.get('rpm', 'Unknown'),
499 d.sys_api.get('vendor') or 'N/A',
500 d.sys_api.get('model') or 'N/A',
501 d.lsm_data.get('serialNum', fallback_serial),
502 format_dimless(d.sys_api.get('size', 0), 5),
503 d.lsm_data.get('health', 'Unknown'),
504 display_map[led_ident],
505 display_map[led_fail],
506 display_map[d.available],
507 ', '.join(d.rejected_reasons)
508 )
509 )
510 else:
511 table.add_row(
512 (
513 host_.name,
514 d.path,
515 d.human_readable_type,
516 d.lsm_data.get('serialNum', fallback_serial),
517 format_dimless(d.sys_api.get('size', 0), 5),
518 d.lsm_data.get('health', 'Unknown'),
519 display_map[led_ident],
520 display_map[led_fail],
521 display_map[d.available]
522 )
523 )
524 out.append(table.get_string())
525 return HandleCommandResult(stdout='\n'.join(out))
526
527 @_cli_write_command('orch device zap')
528 def _zap_device(self, hostname: str, path: str, force: bool = False) -> HandleCommandResult:
529 """
530 Zap (erase!) a device so it can be re-used
531 """
532 if not force:
533 raise OrchestratorError('must pass --force to PERMANENTLY ERASE DEVICE DATA')
534 completion = self.zap_device(hostname, path)
535 raise_if_exception(completion)
536 return HandleCommandResult(stdout=completion.result_str())
537
538 @_cli_read_command('orch ls')
539 def _list_services(self,
540 service_type: Optional[str] = None,
541 service_name: Optional[str] = None,
542 export: bool = False,
543 format: Format = Format.plain,
544 refresh: bool = False) -> HandleCommandResult:
545 """
546 List services known to orchestrator
547 """
548 if export and format == Format.plain:
549 format = Format.yaml
550
551 completion = self.describe_service(service_type,
552 service_name,
553 refresh=refresh)
554
555 services = raise_if_exception(completion)
556
557 def ukn(s: Optional[str]) -> str:
558 return '<unknown>' if s is None else s
559
560 # Sort the list for display
561 services.sort(key=lambda s: (ukn(s.spec.service_name())))
562
563 if len(services) == 0:
564 return HandleCommandResult(stdout="No services reported")
565 elif format != Format.plain:
566 if export:
567 data = [s.spec for s in services if s.deleted is None]
568 return HandleCommandResult(stdout=to_format(data, format, many=True, cls=ServiceSpec))
569 else:
570 return HandleCommandResult(stdout=to_format(services, format, many=True, cls=ServiceDescription))
571 else:
572 now = datetime_now()
573 table = PrettyTable(
574 [
575 'NAME', 'PORTS',
576 'RUNNING', 'REFRESHED', 'AGE',
577 'PLACEMENT',
578 ],
579 border=False)
580 table.align['NAME'] = 'l'
581 table.align['PORTS'] = 'l'
582 table.align['RUNNING'] = 'r'
583 table.align['REFRESHED'] = 'l'
584 table.align['AGE'] = 'l'
585 table.align['PLACEMENT'] = 'l'
586 table.left_padding_width = 0
587 table.right_padding_width = 2
588 for s in services:
589 if not s.spec:
590 pl = '<no spec>'
591 elif s.spec.unmanaged:
592 pl = '<unmanaged>'
593 else:
594 pl = s.spec.placement.pretty_str()
595 if s.deleted:
596 refreshed = '<deleting>'
597 else:
598 refreshed = nice_delta(now, s.last_refresh, ' ago')
599
600 table.add_row((
601 s.spec.service_name(),
602 s.get_port_summary(),
603 '%d/%d' % (s.running, s.size),
604 refreshed,
605 nice_delta(now, s.created),
606 pl,
607 ))
608
609 return HandleCommandResult(stdout=table.get_string())
610
611 @_cli_read_command('orch ps')
612 def _list_daemons(self,
613 hostname: Optional[str] = None,
614 service_name: Optional[str] = None,
615 daemon_type: Optional[str] = None,
616 daemon_id: Optional[str] = None,
617 format: Format = Format.plain,
618 refresh: bool = False) -> HandleCommandResult:
619 """
620 List daemons known to orchestrator
621 """
622 completion = self.list_daemons(service_name,
623 daemon_type,
624 daemon_id=daemon_id,
625 host=hostname,
626 refresh=refresh)
627
628 daemons = raise_if_exception(completion)
629
630 def ukn(s: Optional[str]) -> str:
631 return '<unknown>' if s is None else s
632 # Sort the list for display
633 daemons.sort(key=lambda s: (ukn(s.daemon_type), ukn(s.hostname), ukn(s.daemon_id)))
634
635 if format != Format.plain:
636 return HandleCommandResult(stdout=to_format(daemons, format, many=True, cls=DaemonDescription))
637 else:
638 if len(daemons) == 0:
639 return HandleCommandResult(stdout="No daemons reported")
640
641 now = datetime_now()
642 table = PrettyTable(
643 ['NAME', 'HOST', 'PORTS',
644 'STATUS', 'REFRESHED', 'AGE',
645 'MEM USE', 'MEM LIM',
646 'VERSION', 'IMAGE ID', 'CONTAINER ID'],
647 border=False)
648 table.align = 'l'
649 table._align['REFRESHED'] = 'r'
650 table._align['AGE'] = 'r'
651 table._align['MEM USE'] = 'r'
652 table._align['MEM LIM'] = 'r'
653 table.left_padding_width = 0
654 table.right_padding_width = 2
655 for s in sorted(daemons, key=lambda s: s.name()):
656 if s.status_desc:
657 status = s.status_desc
658 else:
659 status = {
660 DaemonDescriptionStatus.error: 'error',
661 DaemonDescriptionStatus.stopped: 'stopped',
662 DaemonDescriptionStatus.running: 'running',
663 None: '<unknown>'
664 }[s.status]
665 if s.status == DaemonDescriptionStatus.running and s.started:
666 status += ' (%s)' % to_pretty_timedelta(now - s.started)
667
668 table.add_row((
669 s.name(),
670 ukn(s.hostname),
671 s.get_port_summary(),
672 status,
673 nice_delta(now, s.last_refresh, ' ago'),
674 nice_delta(now, s.created),
675 nice_bytes(s.memory_usage),
676 nice_bytes(s.memory_request),
677 ukn(s.version),
678 ukn(s.container_image_id)[0:12],
679 ukn(s.container_id)))
680
681 remove_column = 'CONTAINER ID'
682 if table.get_string(fields=[remove_column], border=False,
683 header=False).count('<unknown>') == len(daemons):
684 try:
685 table.del_column(remove_column)
686 except AttributeError as e:
687 # del_column method was introduced in prettytable 2.0
688 if str(e) != "del_column":
689 raise
690 table.field_names.remove(remove_column)
691 table._rows = [row[:-1] for row in table._rows]
692
693 return HandleCommandResult(stdout=table.get_string())
694
695 @_cli_write_command('orch apply osd')
696 def _apply_osd(self,
697 all_available_devices: bool = False,
698 format: Format = Format.plain,
699 unmanaged: Optional[bool] = None,
700 dry_run: bool = False,
701 no_overwrite: bool = False,
702 inbuf: Optional[str] = None) -> HandleCommandResult:
703 """
704 Create OSD daemon(s) using a drive group spec
705 """
706 # Apply DriveGroupSpecs to create OSDs
707 usage = """
708 usage:
709 ceph orch apply osd -i <json_file/yaml_file> [--dry-run]
710 ceph orch apply osd --all-available-devices [--dry-run] [--unmanaged]
711
712 Restrictions:
713
714 Mutexes:
715 * -i, --all-available-devices
716 * -i, --unmanaged (this would overwrite the osdspec loaded from a file)
717
718 Parameters:
719
720 * --unmanaged
721 Only works with --all-available-devices.
722
723 Description:
724
725 * -i
726 An inbuf object like a file or a json/yaml blob containing a valid OSDSpec
727
728 * --all-available-devices
729 The most simple OSDSpec there is. Takes all as 'available' marked devices
730 and creates standalone OSDs on them.
731
732 * --unmanaged
733 Set a the unmanaged flag for all--available-devices (default is False)
734
735 Examples:
736
737 # ceph orch apply osd -i <file.yml|json>
738
739 Applies one or more OSDSpecs found in <file>
740
741 # ceph orch osd apply --all-available-devices --unmanaged=true
742
743 Creates and applies simple OSDSpec with the unmanaged flag set to <true>
744 """
745
746 if inbuf and all_available_devices:
747 # mutually exclusive
748 return HandleCommandResult(-errno.EINVAL, stderr=usage)
749
750 if not inbuf and not all_available_devices:
751 # one parameter must be present
752 return HandleCommandResult(-errno.EINVAL, stderr=usage)
753
754 if inbuf:
755 if unmanaged is not None:
756 return HandleCommandResult(-errno.EINVAL, stderr=usage)
757
758 try:
759 drivegroups = [_dg for _dg in yaml.safe_load_all(inbuf)]
760 except yaml.scanner.ScannerError as e:
761 msg = f"Invalid YAML received : {str(e)}"
762 self.log.exception(e)
763 return HandleCommandResult(-errno.EINVAL, stderr=msg)
764
765 dg_specs = []
766 for dg in drivegroups:
767 spec = DriveGroupSpec.from_json(dg)
768 if dry_run:
769 spec.preview_only = True
770 dg_specs.append(spec)
771
772 return self._apply_misc(dg_specs, dry_run, format, no_overwrite)
773
774 if all_available_devices:
775 if unmanaged is None:
776 unmanaged = False
777 dg_specs = [
778 DriveGroupSpec(
779 service_id='all-available-devices',
780 placement=PlacementSpec(host_pattern='*'),
781 data_devices=DeviceSelection(all=True),
782 unmanaged=unmanaged,
783 preview_only=dry_run
784 )
785 ]
786 return self._apply_misc(dg_specs, dry_run, format, no_overwrite)
787
788 return HandleCommandResult(-errno.EINVAL, stderr=usage)
789
790 @_cli_write_command('orch daemon add osd')
791 def _daemon_add_osd(self, svc_arg: Optional[str] = None) -> HandleCommandResult:
792 """Create an OSD service. Either --svc_arg=host:drives"""
793 # Create one or more OSDs"""
794
795 usage = """
796 Usage:
797 ceph orch daemon add osd host:device1,device2,...
798 """
799 if not svc_arg:
800 return HandleCommandResult(-errno.EINVAL, stderr=usage)
801 try:
802 host_name, block_device = svc_arg.split(":")
803 block_devices = block_device.split(',')
804 devs = DeviceSelection(paths=block_devices)
805 drive_group = DriveGroupSpec(placement=PlacementSpec(
806 host_pattern=host_name), data_devices=devs)
807 except (TypeError, KeyError, ValueError):
808 msg = "Invalid host:device spec: '{}'".format(svc_arg) + usage
809 return HandleCommandResult(-errno.EINVAL, stderr=msg)
810
811 completion = self.create_osds(drive_group)
812 raise_if_exception(completion)
813 return HandleCommandResult(stdout=completion.result_str())
814
815 @_cli_write_command('orch osd rm')
816 def _osd_rm_start(self,
817 osd_id: List[str],
818 replace: bool = False,
819 force: bool = False) -> HandleCommandResult:
820 """Remove OSD daemons"""
821 completion = self.remove_osds(osd_id, replace=replace, force=force)
822 raise_if_exception(completion)
823 return HandleCommandResult(stdout=completion.result_str())
824
825 @_cli_write_command('orch osd rm stop')
826 def _osd_rm_stop(self, osd_id: List[str]) -> HandleCommandResult:
827 """Cancel ongoing OSD removal operation"""
828 completion = self.stop_remove_osds(osd_id)
829 raise_if_exception(completion)
830 return HandleCommandResult(stdout=completion.result_str())
831
832 @_cli_write_command('orch osd rm status')
833 def _osd_rm_status(self, format: Format = Format.plain) -> HandleCommandResult:
834 """Status of OSD removal operation"""
835 completion = self.remove_osds_status()
836 raise_if_exception(completion)
837 report = completion.result
838
839 if not report:
840 return HandleCommandResult(stdout="No OSD remove/replace operations reported")
841
842 if format != Format.plain:
843 out = to_format(report, format, many=True, cls=None)
844 else:
845 table = PrettyTable(
846 ['OSD_ID', 'HOST', 'STATE', 'PG_COUNT', 'REPLACE', 'FORCE', 'DRAIN_STARTED_AT'],
847 border=False)
848 table.align = 'l'
849 table.left_padding_width = 0
850 table.right_padding_width = 2
851 for osd in sorted(report, key=lambda o: o.osd_id):
852 table.add_row([osd.osd_id, osd.hostname, osd.drain_status_human(),
853 osd.get_pg_count(), osd.replace, osd.replace, osd.drain_started_at])
854 out = table.get_string()
855
856 return HandleCommandResult(stdout=out)
857
858 @_cli_write_command('orch daemon add')
859 def daemon_add_misc(self,
860 daemon_type: Optional[ServiceType] = None,
861 placement: Optional[str] = None,
862 inbuf: Optional[str] = None) -> HandleCommandResult:
863 """Add daemon(s)"""
864 usage = f"""Usage:
865 ceph orch daemon add -i <json_file>
866 ceph orch daemon add {daemon_type or '<daemon_type>'} <placement>"""
867 if inbuf:
868 if daemon_type or placement:
869 raise OrchestratorValidationError(usage)
870 spec = ServiceSpec.from_json(yaml.safe_load(inbuf))
871 else:
872 if not placement or not daemon_type:
873 raise OrchestratorValidationError(usage)
874 placement_spec = PlacementSpec.from_string(placement)
875 spec = ServiceSpec(daemon_type.value, placement=placement_spec)
876
877 return self._daemon_add_misc(spec)
878
879 def _daemon_add_misc(self, spec: ServiceSpec) -> HandleCommandResult:
880 completion = self.add_daemon(spec)
881 raise_if_exception(completion)
882 return HandleCommandResult(stdout=completion.result_str())
883
884 @_cli_write_command('orch daemon add mds')
885 def _mds_add(self,
886 fs_name: str,
887 placement: Optional[str] = None,
888 inbuf: Optional[str] = None) -> HandleCommandResult:
889 """Start MDS daemon(s)"""
890 if inbuf:
891 raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
892
893 spec = ServiceSpec(
894 service_type='mds',
895 service_id=fs_name,
896 placement=PlacementSpec.from_string(placement),
897 )
898 return self._daemon_add_misc(spec)
899
900 @_cli_write_command('orch daemon add rgw')
901 def _rgw_add(self,
902 svc_id: str,
903 port: Optional[int] = None,
904 ssl: bool = False,
905 placement: Optional[str] = None,
906 inbuf: Optional[str] = None) -> HandleCommandResult:
907 """Start RGW daemon(s)"""
908 if inbuf:
909 raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
910
911 spec = RGWSpec(
912 service_id=svc_id,
913 rgw_frontend_port=port,
914 ssl=ssl,
915 placement=PlacementSpec.from_string(placement),
916 )
917 return self._daemon_add_misc(spec)
918
919 @_cli_write_command('orch daemon add nfs')
920 def _nfs_add(self,
921 svc_id: str,
922 pool: str,
923 namespace: Optional[str] = None,
924 placement: Optional[str] = None,
925 inbuf: Optional[str] = None) -> HandleCommandResult:
926 """Start NFS daemon(s)"""
927 if inbuf:
928 raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
929
930 spec = NFSServiceSpec(
931 service_id=svc_id,
932 pool=pool,
933 namespace=namespace,
934 placement=PlacementSpec.from_string(placement),
935 )
936 return self._daemon_add_misc(spec)
937
938 @_cli_write_command('orch daemon add iscsi')
939 def _iscsi_add(self,
940 pool: str,
941 api_user: str,
942 api_password: str,
943 trusted_ip_list: Optional[str] = None,
944 placement: Optional[str] = None,
945 inbuf: Optional[str] = None) -> HandleCommandResult:
946 """Start iscsi daemon(s)"""
947 if inbuf:
948 raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
949
950 spec = IscsiServiceSpec(
951 service_id='iscsi',
952 pool=pool,
953 api_user=api_user,
954 api_password=api_password,
955 trusted_ip_list=trusted_ip_list,
956 placement=PlacementSpec.from_string(placement),
957 )
958 return self._daemon_add_misc(spec)
959
960 @_cli_write_command('orch')
961 def _service_action(self, action: ServiceAction, service_name: str) -> HandleCommandResult:
962 """Start, stop, restart, redeploy, or reconfig an entire service (i.e. all daemons)"""
963 completion = self.service_action(action.value, service_name)
964 raise_if_exception(completion)
965 return HandleCommandResult(stdout=completion.result_str())
966
967 @_cli_write_command('orch daemon')
968 def _daemon_action(self, action: DaemonAction, name: str) -> HandleCommandResult:
969 """Start, stop, restart, (redeploy,) or reconfig a specific daemon"""
970 if '.' not in name:
971 raise OrchestratorError('%s is not a valid daemon name' % name)
972 completion = self.daemon_action(action.value, name)
973 raise_if_exception(completion)
974 return HandleCommandResult(stdout=completion.result_str())
975
976 @_cli_write_command('orch daemon redeploy')
977 def _daemon_action_redeploy(self, name: str, image: Optional[str] = None) -> HandleCommandResult:
978 """Redeploy a daemon (with a specifc image)"""
979 if '.' not in name:
980 raise OrchestratorError('%s is not a valid daemon name' % name)
981 completion = self.daemon_action("redeploy", name, image=image)
982 raise_if_exception(completion)
983 return HandleCommandResult(stdout=completion.result_str())
984
985 @_cli_write_command('orch daemon rm')
986 def _daemon_rm(self,
987 names: List[str],
988 force: Optional[bool] = False) -> HandleCommandResult:
989 """Remove specific daemon(s)"""
990 for name in names:
991 if '.' not in name:
992 raise OrchestratorError('%s is not a valid daemon name' % name)
993 (daemon_type) = name.split('.')[0]
994 if not force and daemon_type in ['osd', 'mon', 'prometheus']:
995 raise OrchestratorError(
996 'must pass --force to REMOVE daemon with potentially PRECIOUS DATA for %s' % name)
997 completion = self.remove_daemons(names)
998 raise_if_exception(completion)
999 return HandleCommandResult(stdout=completion.result_str())
1000
1001 @_cli_write_command('orch rm')
1002 def _service_rm(self,
1003 service_name: str,
1004 force: bool = False) -> HandleCommandResult:
1005 """Remove a service"""
1006 if service_name in ['mon', 'mgr'] and not force:
1007 raise OrchestratorError('The mon and mgr services cannot be removed')
1008 completion = self.remove_service(service_name)
1009 raise_if_exception(completion)
1010 return HandleCommandResult(stdout=completion.result_str())
1011
1012 @_cli_write_command('orch apply')
1013 def apply_misc(self,
1014 service_type: Optional[ServiceType] = None,
1015 placement: Optional[str] = None,
1016 dry_run: bool = False,
1017 format: Format = Format.plain,
1018 unmanaged: bool = False,
1019 no_overwrite: bool = False,
1020 inbuf: Optional[str] = None) -> HandleCommandResult:
1021 """Update the size or placement for a service or apply a large yaml spec"""
1022 usage = """Usage:
1023 ceph orch apply -i <yaml spec> [--dry-run]
1024 ceph orch apply <service_type> [--placement=<placement_string>] [--unmanaged]
1025 """
1026 if inbuf:
1027 if service_type or placement or unmanaged:
1028 raise OrchestratorValidationError(usage)
1029 content: Iterator = yaml.safe_load_all(inbuf)
1030 specs: List[Union[ServiceSpec, HostSpec]] = []
1031 for s in content:
1032 spec = json_to_generic_spec(s)
1033
1034 # validate the config (we need MgrModule for that)
1035 if isinstance(spec, ServiceSpec) and spec.config:
1036 for k, v in spec.config.items():
1037 try:
1038 self.get_foreign_ceph_option('mon', k)
1039 except KeyError:
1040 raise SpecValidationError(f'Invalid config option {k} in spec')
1041
1042 if dry_run and not isinstance(spec, HostSpec):
1043 spec.preview_only = dry_run
1044 specs.append(spec)
1045 else:
1046 placementspec = PlacementSpec.from_string(placement)
1047 if not service_type:
1048 raise OrchestratorValidationError(usage)
1049 specs = [ServiceSpec(service_type.value, placement=placementspec,
1050 unmanaged=unmanaged, preview_only=dry_run)]
1051 return self._apply_misc(specs, dry_run, format, no_overwrite)
1052
1053 def _apply_misc(self, specs: Sequence[GenericSpec], dry_run: bool, format: Format, no_overwrite: bool = False) -> HandleCommandResult:
1054 completion = self.apply(specs, no_overwrite)
1055 raise_if_exception(completion)
1056 out = completion.result_str()
1057 if dry_run:
1058 completion = self.plan(specs)
1059 raise_if_exception(completion)
1060 data = completion.result
1061 if format == Format.plain:
1062 out = generate_preview_tables(data)
1063 else:
1064 out = to_format(data, format, many=True, cls=None)
1065 return HandleCommandResult(stdout=out)
1066
1067 @_cli_write_command('orch apply mds')
1068 def _apply_mds(self,
1069 fs_name: str,
1070 placement: Optional[str] = None,
1071 dry_run: bool = False,
1072 unmanaged: bool = False,
1073 format: Format = Format.plain,
1074 no_overwrite: bool = False,
1075 inbuf: Optional[str] = None) -> HandleCommandResult:
1076 """Update the number of MDS instances for the given fs_name"""
1077 if inbuf:
1078 raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
1079
1080 spec = ServiceSpec(
1081 service_type='mds',
1082 service_id=fs_name,
1083 placement=PlacementSpec.from_string(placement),
1084 unmanaged=unmanaged,
1085 preview_only=dry_run)
1086 return self._apply_misc([spec], dry_run, format, no_overwrite)
1087
1088 @_cli_write_command('orch apply rgw')
1089 def _apply_rgw(self,
1090 svc_id: str,
1091 realm: Optional[str] = None,
1092 zone: Optional[str] = None,
1093 port: Optional[int] = None,
1094 ssl: bool = False,
1095 placement: Optional[str] = None,
1096 dry_run: bool = False,
1097 format: Format = Format.plain,
1098 unmanaged: bool = False,
1099 no_overwrite: bool = False,
1100 inbuf: Optional[str] = None) -> HandleCommandResult:
1101 """Update the number of RGW instances for the given zone"""
1102 if inbuf:
1103 raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
1104
1105 if realm and not zone:
1106 raise OrchestratorValidationError(
1107 'Cannot add RGW: Realm specified but no zone specified')
1108 if zone and not realm:
1109 raise OrchestratorValidationError(
1110 'Cannot add RGW: Zone specified but no realm specified')
1111
1112 spec = RGWSpec(
1113 service_id=svc_id,
1114 rgw_realm=realm,
1115 rgw_zone=zone,
1116 rgw_frontend_port=port,
1117 ssl=ssl,
1118 placement=PlacementSpec.from_string(placement),
1119 unmanaged=unmanaged,
1120 preview_only=dry_run
1121 )
1122
1123 return self._apply_misc([spec], dry_run, format, no_overwrite)
1124
1125 @_cli_write_command('orch apply nfs')
1126 def _apply_nfs(self,
1127 svc_id: str,
1128 placement: Optional[str] = None,
1129 format: Format = Format.plain,
1130 pool: Optional[str] = None,
1131 namespace: Optional[str] = None,
1132 port: Optional[int] = None,
1133 dry_run: bool = False,
1134 unmanaged: bool = False,
1135 no_overwrite: bool = False,
1136 inbuf: Optional[str] = None) -> HandleCommandResult:
1137 """Scale an NFS service"""
1138 if inbuf:
1139 raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
1140
1141 spec = NFSServiceSpec(
1142 service_id=svc_id,
1143 pool=pool,
1144 namespace=namespace,
1145 port=port,
1146 placement=PlacementSpec.from_string(placement),
1147 unmanaged=unmanaged,
1148 preview_only=dry_run
1149 )
1150
1151 return self._apply_misc([spec], dry_run, format, no_overwrite)
1152
1153 @_cli_write_command('orch apply iscsi')
1154 def _apply_iscsi(self,
1155 pool: str,
1156 api_user: str,
1157 api_password: str,
1158 trusted_ip_list: Optional[str] = None,
1159 placement: Optional[str] = None,
1160 unmanaged: bool = False,
1161 dry_run: bool = False,
1162 format: Format = Format.plain,
1163 no_overwrite: bool = False,
1164 inbuf: Optional[str] = None) -> HandleCommandResult:
1165 """Scale an iSCSI service"""
1166 if inbuf:
1167 raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
1168
1169 spec = IscsiServiceSpec(
1170 service_id=pool,
1171 pool=pool,
1172 api_user=api_user,
1173 api_password=api_password,
1174 trusted_ip_list=trusted_ip_list,
1175 placement=PlacementSpec.from_string(placement),
1176 unmanaged=unmanaged,
1177 preview_only=dry_run
1178 )
1179
1180 return self._apply_misc([spec], dry_run, format, no_overwrite)
1181
1182 @_cli_write_command('orch set backend')
1183 def _set_backend(self, module_name: Optional[str] = None) -> HandleCommandResult:
1184 """
1185 Select orchestrator module backend
1186 """
1187 # We implement a setter command instead of just having the user
1188 # modify the setting directly, so that we can validate they're setting
1189 # it to a module that really exists and is enabled.
1190
1191 # There isn't a mechanism for ensuring they don't *disable* the module
1192 # later, but this is better than nothing.
1193 mgr_map = self.get("mgr_map")
1194
1195 if module_name is None or module_name == "":
1196 self.set_module_option("orchestrator", None)
1197 return HandleCommandResult()
1198
1199 for module in mgr_map['available_modules']:
1200 if module['name'] != module_name:
1201 continue
1202
1203 if not module['can_run']:
1204 continue
1205
1206 enabled = module['name'] in mgr_map['modules']
1207 if not enabled:
1208 return HandleCommandResult(-errno.EINVAL,
1209 stderr="Module '{module_name}' is not enabled. \n Run "
1210 "`ceph mgr module enable {module_name}` "
1211 "to enable.".format(module_name=module_name))
1212
1213 try:
1214 is_orchestrator = self.remote(module_name,
1215 "is_orchestrator_module")
1216 except NameError:
1217 is_orchestrator = False
1218
1219 if not is_orchestrator:
1220 return HandleCommandResult(-errno.EINVAL,
1221 stderr="'{0}' is not an orchestrator module".format(module_name))
1222
1223 self.set_module_option("orchestrator", module_name)
1224
1225 return HandleCommandResult()
1226
1227 return HandleCommandResult(-errno.EINVAL, stderr="Module '{0}' not found".format(module_name))
1228
1229 @_cli_write_command('orch pause')
1230 def _pause(self) -> HandleCommandResult:
1231 """Pause orchestrator background work"""
1232 self.pause()
1233 return HandleCommandResult()
1234
1235 @_cli_write_command('orch resume')
1236 def _resume(self) -> HandleCommandResult:
1237 """Resume orchestrator background work (if paused)"""
1238 self.resume()
1239 return HandleCommandResult()
1240
1241 @_cli_write_command('orch cancel')
1242 def _cancel(self) -> HandleCommandResult:
1243 """
1244 Cancel ongoing background operations
1245 """
1246 self.cancel_completions()
1247 return HandleCommandResult()
1248
1249 @_cli_read_command('orch status')
1250 def _status(self,
1251 detail: bool = False,
1252 format: Format = Format.plain) -> HandleCommandResult:
1253 """Report configured backend and its status"""
1254 o = self._select_orchestrator()
1255 if o is None:
1256 raise NoOrchestrator()
1257
1258 avail, why, module_details = self.available()
1259 result: Dict[str, Any] = {
1260 "available": avail,
1261 "backend": o,
1262 }
1263
1264 if avail:
1265 result.update(module_details)
1266 else:
1267 result['reason'] = why
1268
1269 if format != Format.plain:
1270 output = to_format(result, format, many=False, cls=None)
1271 else:
1272 output = "Backend: {0}".format(result['backend'])
1273 output += f"\nAvailable: {'Yes' if result['available'] else 'No'}"
1274 if 'reason' in result:
1275 output += ' ({0})'.format(result['reason'])
1276 if 'paused' in result:
1277 output += f"\nPaused: {'Yes' if result['paused'] else 'No'}"
1278 if 'workers' in result and detail:
1279 output += f"\nHost Parallelism: {result['workers']}"
1280 return HandleCommandResult(stdout=output)
1281
1282 def self_test(self) -> None:
1283 old_orch = self._select_orchestrator()
1284 self._set_backend('')
1285 assert self._select_orchestrator() is None
1286 self._set_backend(old_orch)
1287
1288 e1 = self.remote('selftest', 'remote_from_orchestrator_cli_self_test', "ZeroDivisionError")
1289 try:
1290 raise_if_exception(e1)
1291 assert False
1292 except ZeroDivisionError as e:
1293 assert e.args == ('hello, world',)
1294
1295 e2 = self.remote('selftest', 'remote_from_orchestrator_cli_self_test', "OrchestratorError")
1296 try:
1297 raise_if_exception(e2)
1298 assert False
1299 except OrchestratorError as e:
1300 assert e.args == ('hello, world',)
1301
1302 @staticmethod
1303 def _upgrade_check_image_name(image: Optional[str], ceph_version: Optional[str]) -> None:
1304 """
1305 >>> OrchestratorCli._upgrade_check_image_name('v15.2.0', None)
1306 Traceback (most recent call last):
1307 orchestrator._interface.OrchestratorValidationError: Error: unable to pull image name `v15.2.0`.
1308 Maybe you meant `--ceph-version 15.2.0`?
1309
1310 """
1311 if image and re.match(r'^v?\d+\.\d+\.\d+$', image) and ceph_version is None:
1312 ver = image[1:] if image.startswith('v') else image
1313 s = f"Error: unable to pull image name `{image}`.\n" \
1314 f" Maybe you meant `--ceph-version {ver}`?"
1315 raise OrchestratorValidationError(s)
1316
1317 @_cli_write_command('orch upgrade check')
1318 def _upgrade_check(self,
1319 image: Optional[str] = None,
1320 ceph_version: Optional[str] = None) -> HandleCommandResult:
1321 """Check service versions vs available and target containers"""
1322 self._upgrade_check_image_name(image, ceph_version)
1323 completion = self.upgrade_check(image=image, version=ceph_version)
1324 raise_if_exception(completion)
1325 return HandleCommandResult(stdout=completion.result_str())
1326
1327 @_cli_write_command('orch upgrade status')
1328 def _upgrade_status(self) -> HandleCommandResult:
1329 """Check service versions vs available and target containers"""
1330 completion = self.upgrade_status()
1331 status = raise_if_exception(completion)
1332 r = {
1333 'target_image': status.target_image,
1334 'in_progress': status.in_progress,
1335 'services_complete': status.services_complete,
1336 'progress': status.progress,
1337 'message': status.message,
1338 }
1339 out = json.dumps(r, indent=4)
1340 return HandleCommandResult(stdout=out)
1341
1342 @_cli_write_command('orch upgrade start')
1343 def _upgrade_start(self,
1344 image: Optional[str] = None,
1345 ceph_version: Optional[str] = None) -> HandleCommandResult:
1346 """Initiate upgrade"""
1347 self._upgrade_check_image_name(image, ceph_version)
1348 completion = self.upgrade_start(image, ceph_version)
1349 raise_if_exception(completion)
1350 return HandleCommandResult(stdout=completion.result_str())
1351
1352 @_cli_write_command('orch upgrade pause')
1353 def _upgrade_pause(self) -> HandleCommandResult:
1354 """Pause an in-progress upgrade"""
1355 completion = self.upgrade_pause()
1356 raise_if_exception(completion)
1357 return HandleCommandResult(stdout=completion.result_str())
1358
1359 @_cli_write_command('orch upgrade resume')
1360 def _upgrade_resume(self) -> HandleCommandResult:
1361 """Resume paused upgrade"""
1362 completion = self.upgrade_resume()
1363 raise_if_exception(completion)
1364 return HandleCommandResult(stdout=completion.result_str())
1365
1366 @_cli_write_command('orch upgrade stop')
1367 def _upgrade_stop(self) -> HandleCommandResult:
1368 """Stop an in-progress upgrade"""
1369 completion = self.upgrade_stop()
1370 raise_if_exception(completion)
1371 return HandleCommandResult(stdout=completion.result_str())