]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/orchestrator/module.py
d6b5bef0fbf6282be28b32f07ef3f4e06a2e7432
[ceph.git] / ceph / src / pybind / mgr / orchestrator / module.py
1 import enum
2 import errno
3 import json
4 from typing import List, Set, Optional, Iterator, cast, Dict, Any, Union, Sequence
5 import re
6 import datetime
7
8 import yaml
9 from prettytable import PrettyTable
10
11 from ceph.deployment.inventory import Device
12 from ceph.deployment.drive_group import DriveGroupSpec, DeviceSelection
13 from ceph.deployment.service_spec import PlacementSpec, ServiceSpec
14 from ceph.deployment.hostspec import SpecValidationError
15 from ceph.utils import datetime_now
16
17 from mgr_util import to_pretty_timedelta, format_dimless
18 from mgr_module import MgrModule, HandleCommandResult, Option
19
20 from ._interface import OrchestratorClientMixin, DeviceLightLoc, _cli_read_command, \
21 raise_if_exception, _cli_write_command, OrchestratorError, \
22 NoOrchestrator, OrchestratorValidationError, NFSServiceSpec, \
23 RGWSpec, InventoryFilter, InventoryHost, HostSpec, CLICommandMeta, \
24 ServiceDescription, DaemonDescription, IscsiServiceSpec, json_to_generic_spec, \
25 GenericSpec, DaemonDescriptionStatus
26
27
28 def nice_delta(now: datetime.datetime, t: Optional[datetime.datetime], suffix: str = '') -> str:
29 if t:
30 return to_pretty_timedelta(now - t) + suffix
31 else:
32 return '-'
33
34
35 class Format(enum.Enum):
36 plain = 'plain'
37 json = 'json'
38 json_pretty = 'json-pretty'
39 yaml = 'yaml'
40
41
42 class ServiceType(enum.Enum):
43 mon = 'mon'
44 mgr = 'mgr'
45 rbd_mirror = 'rbd-mirror'
46 cephfs_mirror = 'cephfs-mirror'
47 crash = 'crash'
48 alertmanager = 'alertmanager'
49 grafana = 'grafana'
50 node_exporter = 'node-exporter'
51 prometheus = 'prometheus'
52 mds = 'mds'
53 rgw = 'rgw'
54 nfs = 'nfs'
55 iscsi = 'iscsi'
56 cephadm_exporter = 'cephadm-exporter'
57
58
59 class ServiceAction(enum.Enum):
60 start = 'start'
61 stop = 'stop'
62 restart = 'restart'
63 redeploy = 'redeploy'
64 reconfig = 'reconfig'
65
66
67 class DaemonAction(enum.Enum):
68 start = 'start'
69 stop = 'stop'
70 restart = 'restart'
71 reconfig = 'reconfig'
72
73
74 def to_format(what: Any, format: Format, many: bool, cls: Any) -> Any:
75 def to_json_1(obj: Any) -> Any:
76 if hasattr(obj, 'to_json'):
77 return obj.to_json()
78 return obj
79
80 def to_json_n(objs: List) -> List:
81 return [to_json_1(o) for o in objs]
82
83 to_json = to_json_n if many else to_json_1
84
85 if format == Format.json:
86 return json.dumps(to_json(what), sort_keys=True)
87 elif format == Format.json_pretty:
88 return json.dumps(to_json(what), indent=2, sort_keys=True)
89 elif format == Format.yaml:
90 # fun with subinterpreters again. pyyaml depends on object identity.
91 # as what originates from a different subinterpreter we have to copy things here.
92 if cls:
93 flat = to_json(what)
94 copy = [cls.from_json(o) for o in flat] if many else cls.from_json(flat)
95 else:
96 copy = what
97
98 def to_yaml_1(obj: Any) -> Any:
99 if hasattr(obj, 'yaml_representer'):
100 return obj
101 return to_json_1(obj)
102
103 def to_yaml_n(objs: list) -> list:
104 return [to_yaml_1(o) for o in objs]
105
106 to_yaml = to_yaml_n if many else to_yaml_1
107
108 if many:
109 return yaml.dump_all(to_yaml(copy), default_flow_style=False)
110 return yaml.dump(to_yaml(copy), default_flow_style=False)
111 else:
112 raise OrchestratorError(f'unsupported format type: {format}')
113
114
115 def generate_preview_tables(data: Any, osd_only: bool = False) -> str:
116 error = [x.get('error') for x in data if x.get('error')]
117 if error:
118 return json.dumps(error)
119 warning = [x.get('warning') for x in data if x.get('warning')]
120 osd_table = preview_table_osd(data)
121 service_table = preview_table_services(data)
122
123 if osd_only:
124 tables = f"""
125 {''.join(warning)}
126
127 ################
128 OSDSPEC PREVIEWS
129 ################
130 {osd_table}
131 """
132 return tables
133 else:
134 tables = f"""
135 {''.join(warning)}
136
137 ####################
138 SERVICESPEC PREVIEWS
139 ####################
140 {service_table}
141
142 ################
143 OSDSPEC PREVIEWS
144 ################
145 {osd_table}
146 """
147 return tables
148
149
150 def preview_table_osd(data: List) -> str:
151 table = PrettyTable(header_style='upper', title='OSDSPEC PREVIEWS', border=True)
152 table.field_names = "service name host data db wal".split()
153 table.align = 'l'
154 table.left_padding_width = 0
155 table.right_padding_width = 2
156 for osd_data in data:
157 if osd_data.get('service_type') != 'osd':
158 continue
159 for host, specs in osd_data.get('data').items():
160 for spec in specs:
161 if spec.get('error'):
162 return spec.get('message')
163 dg_name = spec.get('osdspec')
164 for osd in spec.get('data', []):
165 db_path = osd.get('block_db', '-')
166 wal_path = osd.get('block_wal', '-')
167 block_data = osd.get('data', '')
168 if not block_data:
169 continue
170 table.add_row(('osd', dg_name, host, block_data, db_path, wal_path))
171 return table.get_string()
172
173
174 def preview_table_services(data: List) -> str:
175 table = PrettyTable(header_style='upper', title="SERVICESPEC PREVIEW", border=True)
176 table.field_names = 'SERVICE NAME ADD_TO REMOVE_FROM'.split()
177 table.align = 'l'
178 table.left_padding_width = 0
179 table.right_padding_width = 2
180 for item in data:
181 if item.get('warning'):
182 continue
183 if item.get('service_type') != 'osd':
184 table.add_row((item.get('service_type'), item.get('service_name'),
185 " ".join(item.get('add')), " ".join(item.get('remove'))))
186 return table.get_string()
187
188
189 class OrchestratorCli(OrchestratorClientMixin, MgrModule,
190 metaclass=CLICommandMeta):
191 MODULE_OPTIONS = [
192 Option(
193 'orchestrator',
194 type='str',
195 default=None,
196 desc='Orchestrator backend',
197 enum_allowed=['cephadm', 'rook', 'test_orchestrator'],
198 runtime=True,
199 )
200 ]
201 NATIVE_OPTIONS = [] # type: List[dict]
202
203 def __init__(self, *args: Any, **kwargs: Any) -> None:
204 super(OrchestratorCli, self).__init__(*args, **kwargs)
205 self.ident = set() # type: Set[str]
206 self.fault = set() # type: Set[str]
207 self._load()
208 self._refresh_health()
209
210 def _load(self) -> None:
211 active = self.get_store('active_devices')
212 if active:
213 decoded = json.loads(active)
214 self.ident = set(decoded.get('ident', []))
215 self.fault = set(decoded.get('fault', []))
216 self.log.debug('ident {}, fault {}'.format(self.ident, self.fault))
217
218 def _save(self) -> None:
219 encoded = json.dumps({
220 'ident': list(self.ident),
221 'fault': list(self.fault),
222 })
223 self.set_store('active_devices', encoded)
224
225 def _refresh_health(self) -> None:
226 h = {}
227 if self.ident:
228 h['DEVICE_IDENT_ON'] = {
229 'severity': 'warning',
230 'summary': '%d devices have ident light turned on' % len(
231 self.ident),
232 'detail': ['{} ident light enabled'.format(d) for d in self.ident]
233 }
234 if self.fault:
235 h['DEVICE_FAULT_ON'] = {
236 'severity': 'warning',
237 'summary': '%d devices have fault light turned on' % len(
238 self.fault),
239 'detail': ['{} fault light enabled'.format(d) for d in self.ident]
240 }
241 self.set_health_checks(h)
242
243 def _get_device_locations(self, dev_id):
244 # type: (str) -> List[DeviceLightLoc]
245 locs = [d['location'] for d in self.get('devices')['devices'] if d['devid'] == dev_id]
246 return [DeviceLightLoc(**loc) for loc in sum(locs, [])]
247
248 @_cli_read_command(prefix='device ls-lights')
249 def _device_ls(self) -> HandleCommandResult:
250 """List currently active device indicator lights"""
251 return HandleCommandResult(
252 stdout=json.dumps({
253 'ident': list(self.ident),
254 'fault': list(self.fault)
255 }, indent=4, sort_keys=True))
256
257 def light_on(self, fault_ident, devid):
258 # type: (str, str) -> HandleCommandResult
259 assert fault_ident in ("fault", "ident")
260 locs = self._get_device_locations(devid)
261 if locs is None:
262 return HandleCommandResult(stderr='device {} not found'.format(devid),
263 retval=-errno.ENOENT)
264
265 getattr(self, fault_ident).add(devid)
266 self._save()
267 self._refresh_health()
268 completion = self.blink_device_light(fault_ident, True, locs)
269 return HandleCommandResult(stdout=str(completion.result))
270
271 def light_off(self, fault_ident, devid, force):
272 # type: (str, str, bool) -> HandleCommandResult
273 assert fault_ident in ("fault", "ident")
274 locs = self._get_device_locations(devid)
275 if locs is None:
276 return HandleCommandResult(stderr='device {} not found'.format(devid),
277 retval=-errno.ENOENT)
278
279 try:
280 completion = self.blink_device_light(fault_ident, False, locs)
281
282 if devid in getattr(self, fault_ident):
283 getattr(self, fault_ident).remove(devid)
284 self._save()
285 self._refresh_health()
286 return HandleCommandResult(stdout=str(completion.result))
287
288 except Exception:
289 # There are several reasons the try: block might fail:
290 # 1. the device no longer exist
291 # 2. the device is no longer known to Ceph
292 # 3. the host is not reachable
293 if force and devid in getattr(self, fault_ident):
294 getattr(self, fault_ident).remove(devid)
295 self._save()
296 self._refresh_health()
297 raise
298
299 class DeviceLightEnable(enum.Enum):
300 on = 'on'
301 off = 'off'
302
303 class DeviceLightType(enum.Enum):
304 ident = 'ident'
305 fault = 'fault'
306
307 @_cli_write_command(prefix='device light')
308 def _device_light(self,
309 enable: DeviceLightEnable,
310 devid: str,
311 light_type: DeviceLightType = DeviceLightType.ident,
312 force: bool = False) -> HandleCommandResult:
313 """
314 Enable or disable the device light. Default type is `ident`
315 'Usage: device light (on|off) <devid> [ident|fault] [--force]'
316 """""
317 if enable == self.DeviceLightEnable.on:
318 return self.light_on(light_type.value, devid)
319 else:
320 return self.light_off(light_type.value, devid, force)
321
322 def _select_orchestrator(self) -> str:
323 return cast(str, self.get_module_option("orchestrator"))
324
325 @_cli_write_command('orch host add')
326 def _add_host(self, hostname: str, addr: Optional[str] = None, labels: Optional[List[str]] = None, maintenance: Optional[bool] = False) -> HandleCommandResult:
327 """Add a host"""
328 _status = 'maintenance' if maintenance else ''
329
330 # split multiple labels passed in with --labels=label1,label2
331 if labels and len(labels) == 1:
332 labels = labels[0].split(',')
333
334 s = HostSpec(hostname=hostname, addr=addr, labels=labels, status=_status)
335
336 return self._apply_misc([s], False, Format.plain)
337
338 @_cli_write_command('orch host rm')
339 def _remove_host(self, hostname: str) -> HandleCommandResult:
340 """Remove a host"""
341 completion = self.remove_host(hostname)
342 raise_if_exception(completion)
343 return HandleCommandResult(stdout=completion.result_str())
344
345 @_cli_write_command('orch host set-addr')
346 def _update_set_addr(self, hostname: str, addr: str) -> HandleCommandResult:
347 """Update a host address"""
348 completion = self.update_host_addr(hostname, addr)
349 raise_if_exception(completion)
350 return HandleCommandResult(stdout=completion.result_str())
351
352 @_cli_read_command('orch host ls')
353 def _get_hosts(self, format: Format = Format.plain) -> HandleCommandResult:
354 """List hosts"""
355 completion = self.get_hosts()
356 hosts = raise_if_exception(completion)
357
358 if format != Format.plain:
359 output = to_format(hosts, format, many=True, cls=HostSpec)
360 else:
361 table = PrettyTable(
362 ['HOST', 'ADDR', 'LABELS', 'STATUS'],
363 border=False)
364 table.align = 'l'
365 table.left_padding_width = 0
366 table.right_padding_width = 2
367 for host in sorted(hosts, key=lambda h: h.hostname):
368 table.add_row((host.hostname, host.addr, ' '.join(
369 host.labels), host.status.capitalize()))
370 output = table.get_string()
371 return HandleCommandResult(stdout=output)
372
373 @_cli_write_command('orch host label add')
374 def _host_label_add(self, hostname: str, label: str) -> HandleCommandResult:
375 """Add a host label"""
376 completion = self.add_host_label(hostname, label)
377 raise_if_exception(completion)
378 return HandleCommandResult(stdout=completion.result_str())
379
380 @_cli_write_command('orch host label rm')
381 def _host_label_rm(self, hostname: str, label: str) -> HandleCommandResult:
382 """Remove a host label"""
383 completion = self.remove_host_label(hostname, label)
384 raise_if_exception(completion)
385 return HandleCommandResult(stdout=completion.result_str())
386
387 @_cli_write_command('orch host ok-to-stop')
388 def _host_ok_to_stop(self, hostname: str) -> HandleCommandResult:
389 """Check if the specified host can be safely stopped without reducing availability"""""
390 completion = self.host_ok_to_stop(hostname)
391 raise_if_exception(completion)
392 return HandleCommandResult(stdout=completion.result_str())
393
394 @_cli_write_command(
395 'orch host maintenance enter')
396 def _host_maintenance_enter(self, hostname: str, force: bool = False) -> HandleCommandResult:
397 """
398 Prepare a host for maintenance by shutting down and disabling all Ceph daemons (cephadm only)
399 """
400 completion = self.enter_host_maintenance(hostname, force=force)
401 raise_if_exception(completion)
402
403 return HandleCommandResult(stdout=completion.result_str())
404
405 @_cli_write_command(
406 'orch host maintenance exit')
407 def _host_maintenance_exit(self, hostname: str) -> HandleCommandResult:
408 """
409 Return a host from maintenance, restarting all Ceph daemons (cephadm only)
410 """
411 completion = self.exit_host_maintenance(hostname)
412 raise_if_exception(completion)
413
414 return HandleCommandResult(stdout=completion.result_str())
415
416 @_cli_read_command('orch device ls')
417 def _list_devices(self,
418 hostname: Optional[List[str]] = None,
419 format: Format = Format.plain,
420 refresh: bool = False,
421 wide: bool = False) -> HandleCommandResult:
422 """
423 List devices on a host
424 """
425 # Provide information about storage devices present in cluster hosts
426 #
427 # Note: this does not have to be completely synchronous. Slightly out of
428 # date hardware inventory is fine as long as hardware ultimately appears
429 # in the output of this command.
430 nf = InventoryFilter(hosts=hostname) if hostname else None
431
432 completion = self.get_inventory(host_filter=nf, refresh=refresh)
433
434 inv_hosts = raise_if_exception(completion)
435
436 if format != Format.plain:
437 return HandleCommandResult(stdout=to_format(inv_hosts,
438 format,
439 many=True,
440 cls=InventoryHost))
441 else:
442 display_map = {
443 "Unsupported": "N/A",
444 "N/A": "N/A",
445 "On": "On",
446 "Off": "Off",
447 True: "Yes",
448 False: "No",
449 }
450
451 out = []
452 if wide:
453 table = PrettyTable(
454 ['Hostname', 'Path', 'Type', 'Transport', 'RPM', 'Vendor', 'Model',
455 'Serial', 'Size', 'Health', 'Ident', 'Fault', 'Available',
456 'Reject Reasons'],
457 border=False)
458 else:
459 table = PrettyTable(
460 ['Hostname', 'Path', 'Type', 'Serial', 'Size',
461 'Health', 'Ident', 'Fault', 'Available'],
462 border=False)
463 table.align = 'l'
464 table._align['SIZE'] = 'r'
465 table.left_padding_width = 0
466 table.right_padding_width = 2
467 for host_ in sorted(inv_hosts, key=lambda h: h.name): # type: InventoryHost
468 for d in host_.devices.devices: # type: Device
469
470 led_ident = 'N/A'
471 led_fail = 'N/A'
472 if d.lsm_data.get('ledSupport', None):
473 led_ident = d.lsm_data['ledSupport']['IDENTstatus']
474 led_fail = d.lsm_data['ledSupport']['FAILstatus']
475
476 if d.device_id is not None:
477 fallback_serial = d.device_id.split('_')[-1]
478 else:
479 fallback_serial = ""
480
481 if wide:
482 table.add_row(
483 (
484 host_.name,
485 d.path,
486 d.human_readable_type,
487 d.lsm_data.get('transport', 'Unknown'),
488 d.lsm_data.get('rpm', 'Unknown'),
489 d.sys_api.get('vendor') or 'N/A',
490 d.sys_api.get('model') or 'N/A',
491 d.lsm_data.get('serialNum', fallback_serial),
492 format_dimless(d.sys_api.get('size', 0), 5),
493 d.lsm_data.get('health', 'Unknown'),
494 display_map[led_ident],
495 display_map[led_fail],
496 display_map[d.available],
497 ', '.join(d.rejected_reasons)
498 )
499 )
500 else:
501 table.add_row(
502 (
503 host_.name,
504 d.path,
505 d.human_readable_type,
506 d.lsm_data.get('serialNum', fallback_serial),
507 format_dimless(d.sys_api.get('size', 0), 5),
508 d.lsm_data.get('health', 'Unknown'),
509 display_map[led_ident],
510 display_map[led_fail],
511 display_map[d.available]
512 )
513 )
514 out.append(table.get_string())
515 return HandleCommandResult(stdout='\n'.join(out))
516
517 @_cli_write_command('orch device zap')
518 def _zap_device(self, hostname: str, path: str, force: bool = False) -> HandleCommandResult:
519 """
520 Zap (erase!) a device so it can be re-used
521 """
522 if not force:
523 raise OrchestratorError('must pass --force to PERMANENTLY ERASE DEVICE DATA')
524 completion = self.zap_device(hostname, path)
525 raise_if_exception(completion)
526 return HandleCommandResult(stdout=completion.result_str())
527
528 @_cli_read_command('orch ls')
529 def _list_services(self,
530 service_type: Optional[str] = None,
531 service_name: Optional[str] = None,
532 export: bool = False,
533 format: Format = Format.plain,
534 refresh: bool = False) -> HandleCommandResult:
535 """
536 List services known to orchestrator
537 """
538 if export and format == Format.plain:
539 format = Format.yaml
540
541 completion = self.describe_service(service_type,
542 service_name,
543 refresh=refresh)
544
545 services = raise_if_exception(completion)
546
547 def ukn(s: Optional[str]) -> str:
548 return '<unknown>' if s is None else s
549
550 # Sort the list for display
551 services.sort(key=lambda s: (ukn(s.spec.service_name())))
552
553 if len(services) == 0:
554 return HandleCommandResult(stdout="No services reported")
555 elif format != Format.plain:
556 if export:
557 data = [s.spec for s in services if s.deleted is None]
558 return HandleCommandResult(stdout=to_format(data, format, many=True, cls=ServiceSpec))
559 else:
560 return HandleCommandResult(stdout=to_format(services, format, many=True, cls=ServiceDescription))
561 else:
562 now = datetime_now()
563 table = PrettyTable(
564 [
565 'NAME', 'PORTS',
566 'RUNNING', 'REFRESHED', 'AGE',
567 'PLACEMENT',
568 ],
569 border=False)
570 table.align['NAME'] = 'l'
571 table.align['PORTS'] = 'l'
572 table.align['RUNNING'] = 'r'
573 table.align['REFRESHED'] = 'l'
574 table.align['AGE'] = 'l'
575 table.align['PLACEMENT'] = 'l'
576 table.left_padding_width = 0
577 table.right_padding_width = 2
578 for s in services:
579 if not s.spec:
580 pl = '<no spec>'
581 elif s.spec.unmanaged:
582 pl = '<unmanaged>'
583 else:
584 pl = s.spec.placement.pretty_str()
585 if s.deleted:
586 refreshed = '<deleting>'
587 else:
588 refreshed = nice_delta(now, s.last_refresh, ' ago')
589
590 table.add_row((
591 s.spec.service_name(),
592 s.get_port_summary(),
593 '%d/%d' % (s.running, s.size),
594 refreshed,
595 nice_delta(now, s.created),
596 pl,
597 ))
598
599 return HandleCommandResult(stdout=table.get_string())
600
601 @_cli_read_command('orch ps')
602 def _list_daemons(self,
603 hostname: Optional[str] = None,
604 service_name: Optional[str] = None,
605 daemon_type: Optional[str] = None,
606 daemon_id: Optional[str] = None,
607 format: Format = Format.plain,
608 refresh: bool = False) -> HandleCommandResult:
609 """
610 List daemons known to orchestrator
611 """
612 completion = self.list_daemons(service_name,
613 daemon_type,
614 daemon_id=daemon_id,
615 host=hostname,
616 refresh=refresh)
617
618 daemons = raise_if_exception(completion)
619
620 def ukn(s: Optional[str]) -> str:
621 return '<unknown>' if s is None else s
622 # Sort the list for display
623 daemons.sort(key=lambda s: (ukn(s.daemon_type), ukn(s.hostname), ukn(s.daemon_id)))
624
625 if format != Format.plain:
626 return HandleCommandResult(stdout=to_format(daemons, format, many=True, cls=DaemonDescription))
627 else:
628 if len(daemons) == 0:
629 return HandleCommandResult(stdout="No daemons reported")
630
631 now = datetime_now()
632 table = PrettyTable(
633 ['NAME', 'HOST', 'PORTS',
634 'STATUS', 'REFRESHED', 'AGE',
635 'VERSION', 'IMAGE ID', 'CONTAINER ID'],
636 border=False)
637 table.align = 'l'
638 table.left_padding_width = 0
639 table.right_padding_width = 2
640 for s in sorted(daemons, key=lambda s: s.name()):
641 if s.status_desc:
642 status = s.status_desc
643 else:
644 status = {
645 DaemonDescriptionStatus.error: 'error',
646 DaemonDescriptionStatus.stopped: 'stopped',
647 DaemonDescriptionStatus.running: 'running',
648 None: '<unknown>'
649 }[s.status]
650 if s.status == DaemonDescriptionStatus.running and s.started:
651 status += ' (%s)' % to_pretty_timedelta(now - s.started)
652
653 table.add_row((
654 s.name(),
655 ukn(s.hostname),
656 s.get_port_summary(),
657 status,
658 nice_delta(now, s.last_refresh, ' ago'),
659 nice_delta(now, s.created),
660 ukn(s.version),
661 ukn(s.container_image_id)[0:12],
662 ukn(s.container_id)))
663
664 remove_column = 'CONTAINER ID'
665 if table.get_string(fields=[remove_column], border=False,
666 header=False).count('<unknown>') == len(daemons):
667 try:
668 table.del_column(remove_column)
669 except AttributeError as e:
670 # del_column method was introduced in prettytable 2.0
671 if str(e) != "del_column":
672 raise
673 table.field_names.remove(remove_column)
674 table._rows = [row[:-1] for row in table._rows]
675
676 return HandleCommandResult(stdout=table.get_string())
677
678 @_cli_write_command('orch apply osd')
679 def _apply_osd(self,
680 all_available_devices: bool = False,
681 format: Format = Format.plain,
682 unmanaged: Optional[bool] = None,
683 dry_run: bool = False,
684 no_overwrite: bool = False,
685 inbuf: Optional[str] = None) -> HandleCommandResult:
686 """
687 Create OSD daemon(s) using a drive group spec
688 """
689 # Apply DriveGroupSpecs to create OSDs
690 usage = """
691 usage:
692 ceph orch apply osd -i <json_file/yaml_file> [--dry-run]
693 ceph orch apply osd --all-available-devices [--dry-run] [--unmanaged]
694
695 Restrictions:
696
697 Mutexes:
698 * -i, --all-available-devices
699 * -i, --unmanaged (this would overwrite the osdspec loaded from a file)
700
701 Parameters:
702
703 * --unmanaged
704 Only works with --all-available-devices.
705
706 Description:
707
708 * -i
709 An inbuf object like a file or a json/yaml blob containing a valid OSDSpec
710
711 * --all-available-devices
712 The most simple OSDSpec there is. Takes all as 'available' marked devices
713 and creates standalone OSDs on them.
714
715 * --unmanaged
716 Set a the unmanaged flag for all--available-devices (default is False)
717
718 Examples:
719
720 # ceph orch apply osd -i <file.yml|json>
721
722 Applies one or more OSDSpecs found in <file>
723
724 # ceph orch osd apply --all-available-devices --unmanaged=true
725
726 Creates and applies simple OSDSpec with the unmanaged flag set to <true>
727 """
728
729 if inbuf and all_available_devices:
730 # mutually exclusive
731 return HandleCommandResult(-errno.EINVAL, stderr=usage)
732
733 if not inbuf and not all_available_devices:
734 # one parameter must be present
735 return HandleCommandResult(-errno.EINVAL, stderr=usage)
736
737 if inbuf:
738 if unmanaged is not None:
739 return HandleCommandResult(-errno.EINVAL, stderr=usage)
740
741 try:
742 drivegroups = [_dg for _dg in yaml.safe_load_all(inbuf)]
743 except yaml.scanner.ScannerError as e:
744 msg = f"Invalid YAML received : {str(e)}"
745 self.log.exception(e)
746 return HandleCommandResult(-errno.EINVAL, stderr=msg)
747
748 dg_specs = []
749 for dg in drivegroups:
750 spec = DriveGroupSpec.from_json(dg)
751 if dry_run:
752 spec.preview_only = True
753 dg_specs.append(spec)
754
755 return self._apply_misc(dg_specs, dry_run, format, no_overwrite)
756
757 if all_available_devices:
758 if unmanaged is None:
759 unmanaged = False
760 dg_specs = [
761 DriveGroupSpec(
762 service_id='all-available-devices',
763 placement=PlacementSpec(host_pattern='*'),
764 data_devices=DeviceSelection(all=True),
765 unmanaged=unmanaged,
766 preview_only=dry_run
767 )
768 ]
769 return self._apply_misc(dg_specs, dry_run, format, no_overwrite)
770
771 return HandleCommandResult(-errno.EINVAL, stderr=usage)
772
773 @_cli_write_command('orch daemon add osd')
774 def _daemon_add_osd(self, svc_arg: Optional[str] = None) -> HandleCommandResult:
775 """Create an OSD service. Either --svc_arg=host:drives"""
776 # Create one or more OSDs"""
777
778 usage = """
779 Usage:
780 ceph orch daemon add osd host:device1,device2,...
781 """
782 if not svc_arg:
783 return HandleCommandResult(-errno.EINVAL, stderr=usage)
784 try:
785 host_name, block_device = svc_arg.split(":")
786 block_devices = block_device.split(',')
787 devs = DeviceSelection(paths=block_devices)
788 drive_group = DriveGroupSpec(placement=PlacementSpec(
789 host_pattern=host_name), data_devices=devs)
790 except (TypeError, KeyError, ValueError):
791 msg = "Invalid host:device spec: '{}'".format(svc_arg) + usage
792 return HandleCommandResult(-errno.EINVAL, stderr=msg)
793
794 completion = self.create_osds(drive_group)
795 raise_if_exception(completion)
796 return HandleCommandResult(stdout=completion.result_str())
797
798 @_cli_write_command('orch osd rm')
799 def _osd_rm_start(self,
800 svc_id: List[str],
801 replace: bool = False,
802 force: bool = False) -> HandleCommandResult:
803 """Remove OSD services"""
804 completion = self.remove_osds(svc_id, replace=replace, force=force)
805 raise_if_exception(completion)
806 return HandleCommandResult(stdout=completion.result_str())
807
808 @_cli_write_command('orch osd rm stop')
809 def _osd_rm_stop(self, svc_id: List[str]) -> HandleCommandResult:
810 """Remove OSD services"""
811 completion = self.stop_remove_osds(svc_id)
812 raise_if_exception(completion)
813 return HandleCommandResult(stdout=completion.result_str())
814
815 @_cli_write_command('orch osd rm status')
816 def _osd_rm_status(self, format: Format = Format.plain) -> HandleCommandResult:
817 """status of OSD removal operation"""
818 completion = self.remove_osds_status()
819 raise_if_exception(completion)
820 report = completion.result
821
822 if not report:
823 return HandleCommandResult(stdout="No OSD remove/replace operations reported")
824
825 if format != Format.plain:
826 out = to_format(report, format, many=True, cls=None)
827 else:
828 table = PrettyTable(
829 ['OSD_ID', 'HOST', 'STATE', 'PG_COUNT', 'REPLACE', 'FORCE', 'DRAIN_STARTED_AT'],
830 border=False)
831 table.align = 'l'
832 table.left_padding_width = 0
833 table.right_padding_width = 2
834 for osd in sorted(report, key=lambda o: o.osd_id):
835 table.add_row([osd.osd_id, osd.hostname, osd.drain_status_human(),
836 osd.get_pg_count(), osd.replace, osd.replace, osd.drain_started_at])
837 out = table.get_string()
838
839 return HandleCommandResult(stdout=out)
840
841 @_cli_write_command('orch daemon add')
842 def daemon_add_misc(self,
843 daemon_type: Optional[ServiceType] = None,
844 placement: Optional[str] = None,
845 inbuf: Optional[str] = None) -> HandleCommandResult:
846 """Add daemon(s)"""
847 usage = f"""Usage:
848 ceph orch daemon add -i <json_file>
849 ceph orch daemon add {daemon_type or '<daemon_type>'} <placement>"""
850 if inbuf:
851 if daemon_type or placement:
852 raise OrchestratorValidationError(usage)
853 spec = ServiceSpec.from_json(yaml.safe_load(inbuf))
854 else:
855 if not placement or not daemon_type:
856 raise OrchestratorValidationError(usage)
857 placement_spec = PlacementSpec.from_string(placement)
858 spec = ServiceSpec(daemon_type.value, placement=placement_spec)
859
860 return self._daemon_add_misc(spec)
861
862 def _daemon_add_misc(self, spec: ServiceSpec) -> HandleCommandResult:
863 completion = self.add_daemon(spec)
864 raise_if_exception(completion)
865 return HandleCommandResult(stdout=completion.result_str())
866
867 @_cli_write_command('orch daemon add mds')
868 def _mds_add(self,
869 fs_name: str,
870 placement: Optional[str] = None,
871 inbuf: Optional[str] = None) -> HandleCommandResult:
872 """Start MDS daemon(s)"""
873 if inbuf:
874 raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
875
876 spec = ServiceSpec(
877 service_type='mds',
878 service_id=fs_name,
879 placement=PlacementSpec.from_string(placement),
880 )
881 return self._daemon_add_misc(spec)
882
883 @_cli_write_command('orch daemon add rgw')
884 def _rgw_add(self,
885 svc_id: str,
886 port: Optional[int] = None,
887 ssl: bool = False,
888 placement: Optional[str] = None,
889 inbuf: Optional[str] = None) -> HandleCommandResult:
890 """Start RGW daemon(s)"""
891 if inbuf:
892 raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
893
894 spec = RGWSpec(
895 service_id=svc_id,
896 rgw_frontend_port=port,
897 ssl=ssl,
898 placement=PlacementSpec.from_string(placement),
899 )
900 return self._daemon_add_misc(spec)
901
902 @_cli_write_command('orch daemon add nfs')
903 def _nfs_add(self,
904 svc_id: str,
905 pool: str,
906 namespace: Optional[str] = None,
907 placement: Optional[str] = None,
908 inbuf: Optional[str] = None) -> HandleCommandResult:
909 """Start NFS daemon(s)"""
910 if inbuf:
911 raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
912
913 spec = NFSServiceSpec(
914 service_id=svc_id,
915 pool=pool,
916 namespace=namespace,
917 placement=PlacementSpec.from_string(placement),
918 )
919 return self._daemon_add_misc(spec)
920
921 @_cli_write_command('orch daemon add iscsi')
922 def _iscsi_add(self,
923 pool: str,
924 api_user: str,
925 api_password: str,
926 trusted_ip_list: Optional[str] = None,
927 placement: Optional[str] = None,
928 inbuf: Optional[str] = None) -> HandleCommandResult:
929 """Start iscsi daemon(s)"""
930 if inbuf:
931 raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
932
933 spec = IscsiServiceSpec(
934 service_id='iscsi',
935 pool=pool,
936 api_user=api_user,
937 api_password=api_password,
938 trusted_ip_list=trusted_ip_list,
939 placement=PlacementSpec.from_string(placement),
940 )
941 return self._daemon_add_misc(spec)
942
943 @_cli_write_command('orch')
944 def _service_action(self, action: ServiceAction, service_name: str) -> HandleCommandResult:
945 """Start, stop, restart, redeploy, or reconfig an entire service (i.e. all daemons)"""
946 completion = self.service_action(action.value, service_name)
947 raise_if_exception(completion)
948 return HandleCommandResult(stdout=completion.result_str())
949
950 @_cli_write_command('orch daemon')
951 def _daemon_action(self, action: DaemonAction, name: str) -> HandleCommandResult:
952 """Start, stop, restart, (redeploy,) or reconfig a specific daemon"""
953 if '.' not in name:
954 raise OrchestratorError('%s is not a valid daemon name' % name)
955 completion = self.daemon_action(action.value, name)
956 raise_if_exception(completion)
957 return HandleCommandResult(stdout=completion.result_str())
958
959 @_cli_write_command('orch daemon redeploy')
960 def _daemon_action_redeploy(self, name: str, image: Optional[str] = None) -> HandleCommandResult:
961 """Redeploy a daemon (with a specifc image)"""
962 if '.' not in name:
963 raise OrchestratorError('%s is not a valid daemon name' % name)
964 completion = self.daemon_action("redeploy", name, image=image)
965 raise_if_exception(completion)
966 return HandleCommandResult(stdout=completion.result_str())
967
968 @_cli_write_command('orch daemon rm')
969 def _daemon_rm(self,
970 names: List[str],
971 force: Optional[bool] = False) -> HandleCommandResult:
972 """Remove specific daemon(s)"""
973 for name in names:
974 if '.' not in name:
975 raise OrchestratorError('%s is not a valid daemon name' % name)
976 (daemon_type) = name.split('.')[0]
977 if not force and daemon_type in ['osd', 'mon', 'prometheus']:
978 raise OrchestratorError(
979 'must pass --force to REMOVE daemon with potentially PRECIOUS DATA for %s' % name)
980 completion = self.remove_daemons(names)
981 raise_if_exception(completion)
982 return HandleCommandResult(stdout=completion.result_str())
983
984 @_cli_write_command('orch rm')
985 def _service_rm(self,
986 service_name: str,
987 force: bool = False) -> HandleCommandResult:
988 """Remove a service"""
989 if service_name in ['mon', 'mgr'] and not force:
990 raise OrchestratorError('The mon and mgr services cannot be removed')
991 completion = self.remove_service(service_name)
992 raise_if_exception(completion)
993 return HandleCommandResult(stdout=completion.result_str())
994
995 @_cli_write_command('orch apply')
996 def apply_misc(self,
997 service_type: Optional[ServiceType] = None,
998 placement: Optional[str] = None,
999 dry_run: bool = False,
1000 format: Format = Format.plain,
1001 unmanaged: bool = False,
1002 no_overwrite: bool = False,
1003 inbuf: Optional[str] = None) -> HandleCommandResult:
1004 """Update the size or placement for a service or apply a large yaml spec"""
1005 usage = """Usage:
1006 ceph orch apply -i <yaml spec> [--dry-run]
1007 ceph orch apply <service_type> [--placement=<placement_string>] [--unmanaged]
1008 """
1009 if inbuf:
1010 if service_type or placement or unmanaged:
1011 raise OrchestratorValidationError(usage)
1012 content: Iterator = yaml.safe_load_all(inbuf)
1013 specs: List[Union[ServiceSpec, HostSpec]] = []
1014 for s in content:
1015 spec = json_to_generic_spec(s)
1016
1017 # validate the config (we need MgrModule for that)
1018 if isinstance(spec, ServiceSpec) and spec.config:
1019 for k, v in spec.config.items():
1020 try:
1021 self.get_foreign_ceph_option('mon', k)
1022 except KeyError:
1023 raise SpecValidationError(f'Invalid config option {k} in spec')
1024
1025 if dry_run and not isinstance(spec, HostSpec):
1026 spec.preview_only = dry_run
1027 specs.append(spec)
1028 else:
1029 placementspec = PlacementSpec.from_string(placement)
1030 if not service_type:
1031 raise OrchestratorValidationError(usage)
1032 specs = [ServiceSpec(service_type.value, placement=placementspec,
1033 unmanaged=unmanaged, preview_only=dry_run)]
1034 return self._apply_misc(specs, dry_run, format, no_overwrite)
1035
1036 def _apply_misc(self, specs: Sequence[GenericSpec], dry_run: bool, format: Format, no_overwrite: bool = False) -> HandleCommandResult:
1037 completion = self.apply(specs, no_overwrite)
1038 raise_if_exception(completion)
1039 out = completion.result_str()
1040 if dry_run:
1041 completion = self.plan(specs)
1042 raise_if_exception(completion)
1043 data = completion.result
1044 if format == Format.plain:
1045 out = generate_preview_tables(data)
1046 else:
1047 out = to_format(data, format, many=True, cls=None)
1048 return HandleCommandResult(stdout=out)
1049
1050 @_cli_write_command('orch apply mds')
1051 def _apply_mds(self,
1052 fs_name: str,
1053 placement: Optional[str] = None,
1054 dry_run: bool = False,
1055 unmanaged: bool = False,
1056 format: Format = Format.plain,
1057 no_overwrite: bool = False,
1058 inbuf: Optional[str] = None) -> HandleCommandResult:
1059 """Update the number of MDS instances for the given fs_name"""
1060 if inbuf:
1061 raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
1062
1063 spec = ServiceSpec(
1064 service_type='mds',
1065 service_id=fs_name,
1066 placement=PlacementSpec.from_string(placement),
1067 unmanaged=unmanaged,
1068 preview_only=dry_run)
1069 return self._apply_misc([spec], dry_run, format, no_overwrite)
1070
1071 @_cli_write_command('orch apply rgw')
1072 def _apply_rgw(self,
1073 svc_id: str,
1074 realm: Optional[str] = None,
1075 zone: Optional[str] = None,
1076 port: Optional[int] = None,
1077 ssl: bool = False,
1078 placement: Optional[str] = None,
1079 dry_run: bool = False,
1080 format: Format = Format.plain,
1081 unmanaged: bool = False,
1082 no_overwrite: bool = False,
1083 inbuf: Optional[str] = None) -> HandleCommandResult:
1084 """Update the number of RGW instances for the given zone"""
1085 if inbuf:
1086 raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
1087
1088 if realm and not zone:
1089 raise OrchestratorValidationError(
1090 'Cannot add RGW: Realm specified but no zone specified')
1091 if zone and not realm:
1092 raise OrchestratorValidationError(
1093 'Cannot add RGW: Zone specified but no realm specified')
1094
1095 spec = RGWSpec(
1096 service_id=svc_id,
1097 rgw_realm=realm,
1098 rgw_zone=zone,
1099 rgw_frontend_port=port,
1100 ssl=ssl,
1101 placement=PlacementSpec.from_string(placement),
1102 unmanaged=unmanaged,
1103 preview_only=dry_run
1104 )
1105
1106 return self._apply_misc([spec], dry_run, format, no_overwrite)
1107
1108 @_cli_write_command('orch apply nfs')
1109 def _apply_nfs(self,
1110 svc_id: str,
1111 pool: str,
1112 namespace: Optional[str] = None,
1113 placement: Optional[str] = None,
1114 format: Format = Format.plain,
1115 dry_run: bool = False,
1116 unmanaged: bool = False,
1117 no_overwrite: bool = False,
1118 inbuf: Optional[str] = None) -> HandleCommandResult:
1119 """Scale an NFS service"""
1120 if inbuf:
1121 raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
1122
1123 spec = NFSServiceSpec(
1124 service_id=svc_id,
1125 pool=pool,
1126 namespace=namespace,
1127 placement=PlacementSpec.from_string(placement),
1128 unmanaged=unmanaged,
1129 preview_only=dry_run
1130 )
1131
1132 return self._apply_misc([spec], dry_run, format, no_overwrite)
1133
1134 @_cli_write_command('orch apply iscsi')
1135 def _apply_iscsi(self,
1136 pool: str,
1137 api_user: str,
1138 api_password: str,
1139 trusted_ip_list: Optional[str] = None,
1140 placement: Optional[str] = None,
1141 unmanaged: bool = False,
1142 dry_run: bool = False,
1143 format: Format = Format.plain,
1144 no_overwrite: bool = False,
1145 inbuf: Optional[str] = None) -> HandleCommandResult:
1146 """Scale an iSCSI service"""
1147 if inbuf:
1148 raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
1149
1150 spec = IscsiServiceSpec(
1151 service_id=pool,
1152 pool=pool,
1153 api_user=api_user,
1154 api_password=api_password,
1155 trusted_ip_list=trusted_ip_list,
1156 placement=PlacementSpec.from_string(placement),
1157 unmanaged=unmanaged,
1158 preview_only=dry_run
1159 )
1160
1161 return self._apply_misc([spec], dry_run, format, no_overwrite)
1162
1163 @_cli_write_command('orch set backend')
1164 def _set_backend(self, module_name: Optional[str] = None) -> HandleCommandResult:
1165 """
1166 Select orchestrator module backend
1167 """
1168 # We implement a setter command instead of just having the user
1169 # modify the setting directly, so that we can validate they're setting
1170 # it to a module that really exists and is enabled.
1171
1172 # There isn't a mechanism for ensuring they don't *disable* the module
1173 # later, but this is better than nothing.
1174 mgr_map = self.get("mgr_map")
1175
1176 if module_name is None or module_name == "":
1177 self.set_module_option("orchestrator", None)
1178 return HandleCommandResult()
1179
1180 for module in mgr_map['available_modules']:
1181 if module['name'] != module_name:
1182 continue
1183
1184 if not module['can_run']:
1185 continue
1186
1187 enabled = module['name'] in mgr_map['modules']
1188 if not enabled:
1189 return HandleCommandResult(-errno.EINVAL,
1190 stderr="Module '{module_name}' is not enabled. \n Run "
1191 "`ceph mgr module enable {module_name}` "
1192 "to enable.".format(module_name=module_name))
1193
1194 try:
1195 is_orchestrator = self.remote(module_name,
1196 "is_orchestrator_module")
1197 except NameError:
1198 is_orchestrator = False
1199
1200 if not is_orchestrator:
1201 return HandleCommandResult(-errno.EINVAL,
1202 stderr="'{0}' is not an orchestrator module".format(module_name))
1203
1204 self.set_module_option("orchestrator", module_name)
1205
1206 return HandleCommandResult()
1207
1208 return HandleCommandResult(-errno.EINVAL, stderr="Module '{0}' not found".format(module_name))
1209
1210 @_cli_write_command('orch pause')
1211 def _pause(self) -> HandleCommandResult:
1212 """Pause orchestrator background work"""
1213 self.pause()
1214 return HandleCommandResult()
1215
1216 @_cli_write_command('orch resume')
1217 def _resume(self) -> HandleCommandResult:
1218 """Resume orchestrator background work (if paused)"""
1219 self.resume()
1220 return HandleCommandResult()
1221
1222 @_cli_write_command('orch cancel')
1223 def _cancel(self) -> HandleCommandResult:
1224 """
1225 cancels ongoing operations
1226
1227 ProgressReferences might get stuck. Let's unstuck them.
1228 """
1229 self.cancel_completions()
1230 return HandleCommandResult()
1231
1232 @_cli_read_command('orch status')
1233 def _status(self,
1234 detail: bool = False,
1235 format: Format = Format.plain) -> HandleCommandResult:
1236 """Report configured backend and its status"""
1237 o = self._select_orchestrator()
1238 if o is None:
1239 raise NoOrchestrator()
1240
1241 avail, why, module_details = self.available()
1242 result: Dict[str, Any] = {
1243 "available": avail,
1244 "backend": o,
1245 }
1246
1247 if avail:
1248 result.update(module_details)
1249 else:
1250 result['reason'] = why
1251
1252 if format != Format.plain:
1253 output = to_format(result, format, many=False, cls=None)
1254 else:
1255 output = "Backend: {0}".format(result['backend'])
1256 output += f"\nAvailable: {'Yes' if result['available'] else 'No'}"
1257 if 'reason' in result:
1258 output += ' ({0})'.format(result['reason'])
1259 if 'paused' in result:
1260 output += f"\nPaused: {'Yes' if result['paused'] else 'No'}"
1261 if 'workers' in result and detail:
1262 output += f"\nHost Parallelism: {result['workers']}"
1263 return HandleCommandResult(stdout=output)
1264
1265 def self_test(self) -> None:
1266 old_orch = self._select_orchestrator()
1267 self._set_backend('')
1268 assert self._select_orchestrator() is None
1269 self._set_backend(old_orch)
1270
1271 e1 = self.remote('selftest', 'remote_from_orchestrator_cli_self_test', "ZeroDivisionError")
1272 try:
1273 raise_if_exception(e1)
1274 assert False
1275 except ZeroDivisionError as e:
1276 assert e.args == ('hello, world',)
1277
1278 e2 = self.remote('selftest', 'remote_from_orchestrator_cli_self_test', "OrchestratorError")
1279 try:
1280 raise_if_exception(e2)
1281 assert False
1282 except OrchestratorError as e:
1283 assert e.args == ('hello, world',)
1284
1285 @staticmethod
1286 def _upgrade_check_image_name(image: Optional[str], ceph_version: Optional[str]) -> None:
1287 """
1288 >>> OrchestratorCli._upgrade_check_image_name('v15.2.0', None)
1289 Traceback (most recent call last):
1290 orchestrator._interface.OrchestratorValidationError: Error: unable to pull image name `v15.2.0`.
1291 Maybe you meant `--ceph-version 15.2.0`?
1292
1293 """
1294 if image and re.match(r'^v?\d+\.\d+\.\d+$', image) and ceph_version is None:
1295 ver = image[1:] if image.startswith('v') else image
1296 s = f"Error: unable to pull image name `{image}`.\n" \
1297 f" Maybe you meant `--ceph-version {ver}`?"
1298 raise OrchestratorValidationError(s)
1299
1300 @_cli_write_command('orch upgrade check')
1301 def _upgrade_check(self,
1302 image: Optional[str] = None,
1303 ceph_version: Optional[str] = None) -> HandleCommandResult:
1304 """Check service versions vs available and target containers"""
1305 self._upgrade_check_image_name(image, ceph_version)
1306 completion = self.upgrade_check(image=image, version=ceph_version)
1307 raise_if_exception(completion)
1308 return HandleCommandResult(stdout=completion.result_str())
1309
1310 @_cli_write_command('orch upgrade status')
1311 def _upgrade_status(self) -> HandleCommandResult:
1312 """Check service versions vs available and target containers"""
1313 completion = self.upgrade_status()
1314 status = raise_if_exception(completion)
1315 r = {
1316 'target_image': status.target_image,
1317 'in_progress': status.in_progress,
1318 'services_complete': status.services_complete,
1319 'progress': status.progress,
1320 'message': status.message,
1321 }
1322 out = json.dumps(r, indent=4)
1323 return HandleCommandResult(stdout=out)
1324
1325 @_cli_write_command('orch upgrade start')
1326 def _upgrade_start(self,
1327 image: Optional[str] = None,
1328 ceph_version: Optional[str] = None) -> HandleCommandResult:
1329 """Initiate upgrade"""
1330 self._upgrade_check_image_name(image, ceph_version)
1331 completion = self.upgrade_start(image, ceph_version)
1332 raise_if_exception(completion)
1333 return HandleCommandResult(stdout=completion.result_str())
1334
1335 @_cli_write_command('orch upgrade pause')
1336 def _upgrade_pause(self) -> HandleCommandResult:
1337 """Pause an in-progress upgrade"""
1338 completion = self.upgrade_pause()
1339 raise_if_exception(completion)
1340 return HandleCommandResult(stdout=completion.result_str())
1341
1342 @_cli_write_command('orch upgrade resume')
1343 def _upgrade_resume(self) -> HandleCommandResult:
1344 """Resume paused upgrade"""
1345 completion = self.upgrade_resume()
1346 raise_if_exception(completion)
1347 return HandleCommandResult(stdout=completion.result_str())
1348
1349 @_cli_write_command('orch upgrade stop')
1350 def _upgrade_stop(self) -> HandleCommandResult:
1351 """Stop an in-progress upgrade"""
1352 completion = self.upgrade_stop()
1353 raise_if_exception(completion)
1354 return HandleCommandResult(stdout=completion.result_str())