]> git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-volume/ceph_volume/devices/lvm/migrate.py
import quincy beta 17.1.0
[ceph.git] / ceph / src / ceph-volume / ceph_volume / devices / lvm / migrate.py
1 from __future__ import print_function
2 import argparse
3 import logging
4 import os
5 from textwrap import dedent
6 from ceph_volume.util import system, disk, merge_dict
7 from ceph_volume.util.device import Device
8 from ceph_volume.util.arg_validators import valid_osd_id
9 from ceph_volume import decorators, terminal, process
10 from ceph_volume.api import lvm as api
11 from ceph_volume.systemd import systemctl
12
13
14 logger = logging.getLogger(__name__)
15 mlogger = terminal.MultiLogger(__name__)
16
17 def get_cluster_name(osd_id, osd_fsid):
18 """
19 From an ``osd_id`` and/or an ``osd_fsid``, filter out all the LVs in the
20 system that match those tag values, then return cluster_name for the first
21 one.
22 """
23 lv_tags = {}
24 lv_tags['ceph.osd_id'] = osd_id
25 lv_tags['ceph.osd_fsid'] = osd_fsid
26
27 lvs = api.get_lvs(tags=lv_tags)
28 if not lvs:
29 mlogger.error(
30 'Unable to find any LV for source OSD: id:{} fsid:{}'.format(
31 osd_id, osd_fsid) )
32 raise SystemExit('Unexpected error, terminating')
33 return next(iter(lvs)).tags["ceph.cluster_name"]
34
35 def get_osd_path(osd_id, osd_fsid):
36 return '/var/lib/ceph/osd/{}-{}'.format(
37 get_cluster_name(osd_id, osd_fsid), osd_id)
38
39 def find_associated_devices(osd_id, osd_fsid):
40 """
41 From an ``osd_id`` and/or an ``osd_fsid``, filter out all the LVs in the
42 system that match those tag values, further detect if any partitions are
43 part of the OSD, and then return the set of LVs and partitions (if any).
44 """
45 lv_tags = {}
46 lv_tags['ceph.osd_id'] = osd_id
47 lv_tags['ceph.osd_fsid'] = osd_fsid
48
49 lvs = api.get_lvs(tags=lv_tags)
50 if not lvs:
51 mlogger.error(
52 'Unable to find any LV for source OSD: id:{} fsid:{}'.format(
53 osd_id, osd_fsid) )
54 raise SystemExit('Unexpected error, terminating')
55
56 devices = set(ensure_associated_lvs(lvs, lv_tags))
57 return [(Device(path), type) for path, type in devices if path]
58
59 def ensure_associated_lvs(lvs, lv_tags):
60 """
61 Go through each LV and ensure if backing devices (journal, wal, block)
62 are LVs or partitions, so that they can be accurately reported.
63 """
64 # look for many LVs for each backing type, because it is possible to
65 # receive a filtering for osd.1, and have multiple failed deployments
66 # leaving many journals with osd.1 - usually, only a single LV will be
67 # returned
68
69 block_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'block'}))
70 db_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'db'}))
71 wal_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'wal'}))
72 backing_devices = [(block_lvs, 'block'), (db_lvs, 'db'),
73 (wal_lvs, 'wal')]
74
75 verified_devices = []
76
77 for lv in lvs:
78 # go through each lv and append it, otherwise query `blkid` to find
79 # a physical device. Do this for each type (journal,db,wal) regardless
80 # if they have been processed in the previous LV, so that bad devices
81 # with the same ID can be caught
82 for ceph_lvs, type in backing_devices:
83
84 if ceph_lvs:
85 verified_devices.extend([(l.lv_path, type) for l in ceph_lvs])
86 continue
87
88 # must be a disk partition, by querying blkid by the uuid we are
89 # ensuring that the device path is always correct
90 try:
91 device_uuid = lv.tags['ceph.{}_uuid'.format(type)]
92 except KeyError:
93 # Bluestore will not have ceph.journal_uuid, and Filestore
94 # will not not have ceph.db_uuid
95 continue
96
97 osd_device = disk.get_device_from_partuuid(device_uuid)
98 if not osd_device:
99 # if the osd_device is not found by the partuuid, then it is
100 # not possible to ensure this device exists anymore, so skip it
101 continue
102 verified_devices.append((osd_device, type))
103
104 return verified_devices
105
106 class VolumeTagTracker(object):
107 def __init__(self, devices, target_lv):
108 self.target_lv = target_lv
109 self.data_device = self.db_device = self.wal_device = None
110 for device, type in devices:
111 if type == 'block':
112 self.data_device = device
113 elif type == 'db':
114 self.db_device = device
115 elif type == 'wal':
116 self.wal_device = device
117 if not self.data_device:
118 mlogger.error('Data device not found')
119 raise SystemExit(
120 "Unexpected error, terminating")
121 if not self.data_device.is_lv:
122 mlogger.error('Data device isn\'t LVM')
123 raise SystemExit(
124 "Unexpected error, terminating")
125
126 self.old_target_tags = self.target_lv.tags.copy()
127 self.old_data_tags = (
128 self.data_device.lv_api.tags.copy()
129 if self.data_device.is_lv else None)
130 self.old_db_tags = (
131 self.db_device.lv_api.tags.copy()
132 if self.db_device and self.db_device.is_lv else None)
133 self.old_wal_tags = (
134 self.wal_device.lv_api.tags.copy()
135 if self.wal_device and self.wal_device.is_lv else None)
136
137 def update_tags_when_lv_create(self, create_type):
138 tags = {}
139 if not self.data_device.is_lv:
140 mlogger.warning(
141 'Data device is not LVM, wouldn\'t update LVM tags')
142 else:
143 tags["ceph.{}_uuid".format(create_type)] = self.target_lv.lv_uuid
144 tags["ceph.{}_device".format(create_type)] = self.target_lv.lv_path
145 self.data_device.lv_api.set_tags(tags)
146
147 tags = self.data_device.lv_api.tags.copy()
148 tags["ceph.type"] = create_type
149 self.target_lv.set_tags(tags)
150
151 aux_dev = None
152 if create_type == "db" and self.wal_device:
153 aux_dev = self.wal_device
154 elif create_type == "wal" and self.db_device:
155 aux_dev = self.db_device
156 else:
157 return
158 if not aux_dev.is_lv:
159 mlogger.warning(
160 '{} device is not LVM, wouldn\'t update LVM tags'.format(
161 create_type.upper()))
162 else:
163 tags = {}
164 tags["ceph.{}_uuid".format(create_type)] = self.target_lv.lv_uuid
165 tags["ceph.{}_device".format(create_type)] = self.target_lv.lv_path
166 aux_dev.lv_api.set_tags(tags)
167
168 def remove_lvs(self, source_devices, target_type):
169 remaining_devices = [self.data_device, self.db_device, self.wal_device]
170
171 outdated_tags = []
172 for device, type in source_devices:
173 if type == "block" or type == target_type:
174 continue
175 remaining_devices.remove(device)
176 if device.is_lv:
177 outdated_tags.append("ceph.{}_uuid".format(type))
178 outdated_tags.append("ceph.{}_device".format(type))
179 device.lv_api.clear_tags()
180 if len(outdated_tags) > 0:
181 for d in remaining_devices:
182 if d and d.is_lv:
183 d.lv_api.clear_tags(outdated_tags)
184
185 def replace_lvs(self, source_devices, target_type):
186 remaining_devices = [self.data_device]
187 if self.db_device:
188 remaining_devices.append(self.db_device)
189 if self.wal_device:
190 remaining_devices.append(self.wal_device)
191
192 outdated_tags = []
193 for device, type in source_devices:
194 if type == "block":
195 continue
196 remaining_devices.remove(device)
197 if device.is_lv:
198 outdated_tags.append("ceph.{}_uuid".format(type))
199 outdated_tags.append("ceph.{}_device".format(type))
200 device.lv_api.clear_tags()
201
202 new_tags = {}
203 new_tags["ceph.{}_uuid".format(target_type)] = self.target_lv.lv_uuid
204 new_tags["ceph.{}_device".format(target_type)] = self.target_lv.lv_path
205
206 for d in remaining_devices:
207 if d and d.is_lv:
208 if len(outdated_tags) > 0:
209 d.lv_api.clear_tags(outdated_tags)
210 d.lv_api.set_tags(new_tags)
211
212 if not self.data_device.is_lv:
213 mlogger.warning(
214 'Data device is not LVM, wouldn\'t properly update target LVM tags')
215 else:
216 tags = self.data_device.lv_api.tags.copy()
217
218 tags["ceph.type"] = target_type
219 tags["ceph.{}_uuid".format(target_type)] = self.target_lv.lv_uuid
220 tags["ceph.{}_device".format(target_type)] = self.target_lv.lv_path
221 self.target_lv.set_tags(tags)
222
223 def undo(self):
224 mlogger.info(
225 'Undoing lv tag set')
226 if self.data_device:
227 if self.old_data_tags:
228 self.data_device.lv_api.set_tags(self.old_data_tags)
229 else:
230 self.data_device.lv_api.clear_tags()
231 if self.db_device:
232 if self.old_db_tags:
233 self.db_device.lv_api.set_tags(self.old_db_tags)
234 else:
235 self.db_device.lv_api.clear_tags()
236 if self.wal_device:
237 if self.old_wal_tags:
238 self.wal_device.lv_api.set_tags(self.old_wal_tags)
239 else:
240 self.wal_device.lv_api.clear_tags()
241 if self.old_target_tags:
242 self.target_lv.set_tags(self.old_target_tags)
243 else:
244 self.target_lv.clear_tags()
245
246 class Migrate(object):
247
248 help = 'Migrate BlueFS data from to another LVM device'
249
250 def __init__(self, argv):
251 self.argv = argv
252 self.osd_id = None
253
254 def get_source_devices(self, devices, target_type=""):
255 ret = []
256 for device, type in devices:
257 if type == target_type:
258 continue
259 if type == 'block':
260 if 'data' not in self.args.from_:
261 continue;
262 elif type == 'db':
263 if 'db' not in self.args.from_:
264 continue;
265 elif type == 'wal':
266 if 'wal' not in self.args.from_:
267 continue;
268 ret.append([device, type])
269 if ret == []:
270 mlogger.error('Source device list is empty')
271 raise SystemExit(
272 'Unable to migrate to : {}'.format(self.args.target))
273 return ret
274
275 # ceph-bluestore-tool uses the following replacement rules
276 # (in the order of precedence, stop on the first match)
277 # if source list has DB volume - target device replaces it.
278 # if source list has WAL volume - target device replace it.
279 # if source list has slow volume only - operation isn't permitted,
280 # requires explicit allocation via new-db/new-wal command.detects which
281 def get_target_type_by_source(self, devices):
282 ret = None
283 for device, type in devices:
284 if type == 'db':
285 return 'db'
286 elif type == 'wal':
287 ret = 'wal'
288 return ret
289
290 def get_filename_by_type(self, type):
291 filename = 'block'
292 if type == 'db' or type == 'wal':
293 filename += '.' + type
294 return filename
295
296 def get_source_args(self, osd_path, devices):
297 ret = []
298 for device, type in devices:
299 ret = ret + ["--devs-source", os.path.join(
300 osd_path, self.get_filename_by_type(type))]
301 return ret
302
303 @decorators.needs_root
304 def migrate_to_new(self, osd_id, osd_fsid, devices, target_lv):
305 source_devices = self.get_source_devices(devices)
306 target_type = self.get_target_type_by_source(source_devices)
307 if not target_type:
308 mlogger.error(
309 "Unable to determine new volume type,"
310 " please use new-db or new-wal command before.")
311 raise SystemExit(
312 "Unable to migrate to : {}".format(self.args.target))
313
314 target_path = target_lv.lv_path
315
316 try:
317 tag_tracker = VolumeTagTracker(devices, target_lv)
318 # we need to update lvm tags for all the remaining volumes
319 # and clear for ones which to be removed
320
321 # ceph-bluestore-tool removes source volume(s) other than block one
322 # and attaches target one after successful migration
323 tag_tracker.replace_lvs(source_devices, target_type)
324
325 osd_path = get_osd_path(osd_id, osd_fsid)
326 source_args = self.get_source_args(osd_path, source_devices)
327 mlogger.info("Migrate to new, Source: {} Target: {}".format(
328 source_args, target_path))
329 stdout, stderr, exit_code = process.call([
330 'ceph-bluestore-tool',
331 '--path',
332 osd_path,
333 '--dev-target',
334 target_path,
335 '--command',
336 'bluefs-bdev-migrate'] +
337 source_args)
338 if exit_code != 0:
339 mlogger.error(
340 'Failed to migrate device, error code:{}'.format(exit_code))
341 raise SystemExit(
342 'Failed to migrate to : {}'.format(self.args.target))
343 else:
344 system.chown(os.path.join(osd_path, "block.{}".format(
345 target_type)))
346 terminal.success('Migration successful.')
347 except:
348 tag_tracker.undo()
349 raise
350
351 return
352
353 @decorators.needs_root
354 def migrate_to_existing(self, osd_id, osd_fsid, devices, target_lv):
355 target_type = target_lv.tags["ceph.type"]
356 if target_type == "wal":
357 mlogger.error("Migrate to WAL is not supported")
358 raise SystemExit(
359 "Unable to migrate to : {}".format(self.args.target))
360 target_filename = self.get_filename_by_type(target_type)
361 if (target_filename == ""):
362 mlogger.error(
363 "Target Logical Volume doesn't have proper volume type "
364 "(ceph.type LVM tag): {}".format(target_type))
365 raise SystemExit(
366 "Unable to migrate to : {}".format(self.args.target))
367
368 osd_path = get_osd_path(osd_id, osd_fsid)
369 source_devices = self.get_source_devices(devices, target_type)
370 target_path = os.path.join(osd_path, target_filename)
371 tag_tracker = VolumeTagTracker(devices, target_lv)
372
373 try:
374 # ceph-bluestore-tool removes source volume(s) other than
375 # block and target ones after successful migration
376 tag_tracker.remove_lvs(source_devices, target_type)
377 source_args = self.get_source_args(osd_path, source_devices)
378 mlogger.info("Migrate to existing, Source: {} Target: {}".format(
379 source_args, target_path))
380 stdout, stderr, exit_code = process.call([
381 'ceph-bluestore-tool',
382 '--path',
383 osd_path,
384 '--dev-target',
385 target_path,
386 '--command',
387 'bluefs-bdev-migrate'] +
388 source_args)
389 if exit_code != 0:
390 mlogger.error(
391 'Failed to migrate device, error code:{}'.format(exit_code))
392 raise SystemExit(
393 'Failed to migrate to : {}'.format(self.args.target))
394 else:
395 terminal.success('Migration successful.')
396 except:
397 tag_tracker.undo()
398 raise
399
400 return
401
402 @decorators.needs_root
403 def migrate_osd(self):
404 if self.args.osd_id and not self.args.no_systemd:
405 osd_is_running = systemctl.osd_is_active(self.args.osd_id)
406 if osd_is_running:
407 mlogger.error('OSD is running, stop it with: '
408 'systemctl stop ceph-osd@{}'.format(
409 self.args.osd_id))
410 raise SystemExit(
411 'Unable to migrate devices associated with OSD ID: {}'
412 .format(self.args.osd_id))
413
414 target_lv = api.get_lv_by_fullname(self.args.target)
415 if not target_lv:
416 mlogger.error(
417 'Target path "{}" is not a Logical Volume'.format(
418 self.args.target))
419 raise SystemExit(
420 'Unable to migrate to : {}'.format(self.args.target))
421 devices = find_associated_devices(self.args.osd_id, self.args.osd_fsid)
422 if (not target_lv.used_by_ceph):
423 self.migrate_to_new(self.args.osd_id, self.args.osd_fsid,
424 devices,
425 target_lv)
426 else:
427 if (target_lv.tags['ceph.osd_id'] != self.args.osd_id or
428 target_lv.tags['ceph.osd_fsid'] != self.args.osd_fsid):
429 mlogger.error(
430 'Target Logical Volume isn\'t used by the specified OSD: '
431 '{} FSID: {}'.format(self.args.osd_id,
432 self.args.osd_fsid))
433 raise SystemExit(
434 'Unable to migrate to : {}'.format(self.args.target))
435
436 self.migrate_to_existing(self.args.osd_id, self.args.osd_fsid,
437 devices,
438 target_lv)
439
440 def make_parser(self, prog, sub_command_help):
441 parser = argparse.ArgumentParser(
442 prog=prog,
443 formatter_class=argparse.RawDescriptionHelpFormatter,
444 description=sub_command_help,
445 )
446
447 parser.add_argument(
448 '--osd-id',
449 required=True,
450 help='Specify an OSD ID to detect associated devices for zapping',
451 type=valid_osd_id
452 )
453
454 parser.add_argument(
455 '--osd-fsid',
456 required=True,
457 help='Specify an OSD FSID to detect associated devices for zapping',
458 )
459 parser.add_argument(
460 '--target',
461 required=True,
462 help='Specify target Logical Volume (LV) to migrate data to',
463 )
464 parser.add_argument(
465 '--from',
466 nargs='*',
467 dest='from_',
468 required=True,
469 choices=['data', 'db', 'wal'],
470 help='Copy BlueFS data from DB device',
471 )
472 parser.add_argument(
473 '--no-systemd',
474 dest='no_systemd',
475 action='store_true',
476 help='Skip checking OSD systemd unit',
477 )
478 return parser
479
480 def main(self):
481 sub_command_help = dedent("""
482 Moves BlueFS data from source volume(s) to the target one, source
483 volumes (except the main (i.e. data or block) one) are removed on
484 success. LVM volumes are permitted for Target only, both already
485 attached or new logical one. In the latter case it is attached to OSD
486 replacing one of the source devices. Following replacement rules apply
487 (in the order of precedence, stop on the first match):
488 * if source list has DB volume - target device replaces it.
489 * if source list has WAL volume - target device replace it.
490 * if source list has slow volume only - operation is not permitted,
491 requires explicit allocation via new-db/new-wal command.
492
493 Example calls for supported scenarios:
494
495 Moves BlueFS data from main device to LV already attached as DB:
496
497 ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data --target vgname/db
498
499 Moves BlueFS data from shared main device to LV which will be attached
500 as a new DB:
501
502 ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data --target vgname/new_db
503
504 Moves BlueFS data from DB device to new LV, DB is replaced:
505
506 ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from db --target vgname/new_db
507
508 Moves BlueFS data from main and DB devices to new LV, DB is replaced:
509
510 ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data db --target vgname/new_db
511
512 Moves BlueFS data from main, DB and WAL devices to new LV, WAL is
513 removed and DB is replaced:
514
515 ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data db wal --target vgname/new_db
516
517 Moves BlueFS data from main, DB and WAL devices to main device, WAL
518 and DB are removed:
519
520 ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from db wal --target vgname/data
521
522 """)
523
524 parser = self.make_parser('ceph-volume lvm migrate', sub_command_help)
525
526 if len(self.argv) == 0:
527 print(sub_command_help)
528 return
529
530 self.args = parser.parse_args(self.argv)
531
532 self.migrate_osd()
533
534 class NewVolume(object):
535 def __init__(self, create_type, argv):
536 self.create_type = create_type
537 self.argv = argv
538
539 def make_parser(self, prog, sub_command_help):
540 parser = argparse.ArgumentParser(
541 prog=prog,
542 formatter_class=argparse.RawDescriptionHelpFormatter,
543 description=sub_command_help,
544 )
545
546 parser.add_argument(
547 '--osd-id',
548 required=True,
549 help='Specify an OSD ID to attach new volume to',
550 type=valid_osd_id,
551 )
552
553 parser.add_argument(
554 '--osd-fsid',
555 required=True,
556 help='Specify an OSD FSIDto attach new volume to',
557 )
558 parser.add_argument(
559 '--target',
560 required=True,
561 help='Specify target Logical Volume (LV) to attach',
562 )
563 parser.add_argument(
564 '--no-systemd',
565 dest='no_systemd',
566 action='store_true',
567 help='Skip checking OSD systemd unit',
568 )
569 return parser
570
571 @decorators.needs_root
572 def make_new_volume(self, osd_id, osd_fsid, devices, target_lv):
573 osd_path = get_osd_path(osd_id, osd_fsid)
574 mlogger.info(
575 'Making new volume at {} for OSD: {} ({})'.format(
576 target_lv.lv_path, osd_id, osd_path))
577 tag_tracker = VolumeTagTracker(devices, target_lv)
578
579 try:
580 tag_tracker.update_tags_when_lv_create(self.create_type)
581
582 stdout, stderr, exit_code = process.call([
583 'ceph-bluestore-tool',
584 '--path',
585 osd_path,
586 '--dev-target',
587 target_lv.lv_path,
588 '--command',
589 'bluefs-bdev-new-{}'.format(self.create_type)
590 ])
591 if exit_code != 0:
592 mlogger.error(
593 'failed to attach new volume, error code:{}'.format(
594 exit_code))
595 raise SystemExit(
596 "Failed to attach new volume: {}".format(
597 self.args.target))
598 else:
599 system.chown(os.path.join(osd_path, "block.{}".format(
600 self.create_type)))
601 terminal.success('New volume attached.')
602 except:
603 tag_tracker.undo()
604 raise
605 return
606
607 @decorators.needs_root
608 def new_volume(self):
609 if self.args.osd_id and not self.args.no_systemd:
610 osd_is_running = systemctl.osd_is_active(self.args.osd_id)
611 if osd_is_running:
612 mlogger.error('OSD ID is running, stop it with:'
613 ' systemctl stop ceph-osd@{}'.format(self.args.osd_id))
614 raise SystemExit(
615 'Unable to attach new volume for OSD: {}'.format(
616 self.args.osd_id))
617
618 target_lv = api.get_lv_by_fullname(self.args.target)
619 if not target_lv:
620 mlogger.error(
621 'Target path {} is not a Logical Volume'.format(
622 self.args.target))
623 raise SystemExit(
624 'Unable to attach new volume : {}'.format(self.args.target))
625 if target_lv.used_by_ceph:
626 mlogger.error(
627 'Target Logical Volume is already used by ceph: {}'.format(
628 self.args.target))
629 raise SystemExit(
630 'Unable to attach new volume : {}'.format(self.args.target))
631 else:
632 devices = find_associated_devices(self.args.osd_id,
633 self.args.osd_fsid)
634 self.make_new_volume(
635 self.args.osd_id,
636 self.args.osd_fsid,
637 devices,
638 target_lv)
639
640 class NewWAL(NewVolume):
641
642 help = 'Allocate new WAL volume for OSD at specified Logical Volume'
643
644 def __init__(self, argv):
645 super(NewWAL, self).__init__("wal", argv)
646
647 def main(self):
648 sub_command_help = dedent("""
649 Attaches the given logical volume to the given OSD as a WAL volume.
650 Logical volume format is vg/lv. Fails if OSD has already got attached DB.
651
652 Example:
653
654 Attach vgname/lvname as a WAL volume to OSD 1
655
656 ceph-volume lvm new-wal --osd-id 1 --osd-fsid 55BD4219-16A7-4037-BC20-0F158EFCC83D --target vgname/new_wal
657 """)
658 parser = self.make_parser('ceph-volume lvm new-wal', sub_command_help)
659
660 if len(self.argv) == 0:
661 print(sub_command_help)
662 return
663
664 self.args = parser.parse_args(self.argv)
665
666 self.new_volume()
667
668 class NewDB(NewVolume):
669
670 help = 'Allocate new DB volume for OSD at specified Logical Volume'
671
672 def __init__(self, argv):
673 super(NewDB, self).__init__("db", argv)
674
675 def main(self):
676 sub_command_help = dedent("""
677 Attaches the given logical volume to the given OSD as a DB volume.
678 Logical volume format is vg/lv. Fails if OSD has already got attached DB.
679
680 Example:
681
682 Attach vgname/lvname as a DB volume to OSD 1
683
684 ceph-volume lvm new-db --osd-id 1 --osd-fsid 55BD4219-16A7-4037-BC20-0F158EFCC83D --target vgname/new_db
685 """)
686
687 parser = self.make_parser('ceph-volume lvm new-db', sub_command_help)
688 if len(self.argv) == 0:
689 print(sub_command_help)
690 return
691 self.args = parser.parse_args(self.argv)
692
693 self.new_volume()