]> git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-volume/ceph_volume/devices/lvm/migrate.py
update ceph source to reef 18.2.1
[ceph.git] / ceph / src / ceph-volume / ceph_volume / devices / lvm / migrate.py
1 from __future__ import print_function
2 import argparse
3 import logging
4 import os
5 from textwrap import dedent
6 from ceph_volume.util import system, disk, merge_dict
7 from ceph_volume.util.device import Device
8 from ceph_volume.util.arg_validators import valid_osd_id
9 from ceph_volume.util import encryption as encryption_utils
10 from ceph_volume import decorators, terminal, process
11 from ceph_volume.api import lvm as api
12 from ceph_volume.systemd import systemctl
13
14
15 logger = logging.getLogger(__name__)
16 mlogger = terminal.MultiLogger(__name__)
17
18 def get_cluster_name(osd_id, osd_fsid):
19 """
20 From an ``osd_id`` and/or an ``osd_fsid``, filter out all the LVs in the
21 system that match those tag values, then return cluster_name for the first
22 one.
23 """
24 lv_tags = {}
25 lv_tags['ceph.osd_id'] = osd_id
26 lv_tags['ceph.osd_fsid'] = osd_fsid
27
28 lvs = api.get_lvs(tags=lv_tags)
29 if not lvs:
30 mlogger.error(
31 'Unable to find any LV for source OSD: id:{} fsid:{}'.format(
32 osd_id, osd_fsid) )
33 raise SystemExit('Unexpected error, terminating')
34 return next(iter(lvs)).tags["ceph.cluster_name"]
35
36 def get_osd_path(osd_id, osd_fsid):
37 return '/var/lib/ceph/osd/{}-{}'.format(
38 get_cluster_name(osd_id, osd_fsid), osd_id)
39
40 def find_associated_devices(osd_id, osd_fsid):
41 """
42 From an ``osd_id`` and/or an ``osd_fsid``, filter out all the LVs in the
43 system that match those tag values, further detect if any partitions are
44 part of the OSD, and then return the set of LVs and partitions (if any).
45 """
46 lv_tags = {}
47 lv_tags['ceph.osd_id'] = osd_id
48 lv_tags['ceph.osd_fsid'] = osd_fsid
49
50 lvs = api.get_lvs(tags=lv_tags)
51 if not lvs:
52 mlogger.error(
53 'Unable to find any LV for source OSD: id:{} fsid:{}'.format(
54 osd_id, osd_fsid) )
55 raise SystemExit('Unexpected error, terminating')
56
57 devices = set(ensure_associated_lvs(lvs, lv_tags))
58 return [(Device(path), type) for path, type in devices if path]
59
60 def ensure_associated_lvs(lvs, lv_tags):
61 """
62 Go through each LV and ensure if backing devices (journal, wal, block)
63 are LVs or partitions, so that they can be accurately reported.
64 """
65 # look for many LVs for each backing type, because it is possible to
66 # receive a filtering for osd.1, and have multiple failed deployments
67 # leaving many journals with osd.1 - usually, only a single LV will be
68 # returned
69
70 block_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'block'}))
71 db_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'db'}))
72 wal_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'wal'}))
73 backing_devices = [(block_lvs, 'block'), (db_lvs, 'db'),
74 (wal_lvs, 'wal')]
75
76 verified_devices = []
77
78 for lv in lvs:
79 # go through each lv and append it, otherwise query `blkid` to find
80 # a physical device. Do this for each type (journal,db,wal) regardless
81 # if they have been processed in the previous LV, so that bad devices
82 # with the same ID can be caught
83 for ceph_lvs, type in backing_devices:
84
85 if ceph_lvs:
86 verified_devices.extend([(l.lv_path, type) for l in ceph_lvs])
87 continue
88
89 # must be a disk partition, by querying blkid by the uuid we are
90 # ensuring that the device path is always correct
91 try:
92 device_uuid = lv.tags['ceph.{}_uuid'.format(type)]
93 except KeyError:
94 # Bluestore will not have ceph.journal_uuid, and Filestore
95 # will not not have ceph.db_uuid
96 continue
97
98 osd_device = disk.get_device_from_partuuid(device_uuid)
99 if not osd_device:
100 # if the osd_device is not found by the partuuid, then it is
101 # not possible to ensure this device exists anymore, so skip it
102 continue
103 verified_devices.append((osd_device, type))
104
105 return verified_devices
106
107 class VolumeTagTracker(object):
108 def __init__(self, devices, target_lv):
109 self.target_lv = target_lv
110 self.data_device = self.db_device = self.wal_device = None
111 for device, type in devices:
112 if type == 'block':
113 self.data_device = device
114 elif type == 'db':
115 self.db_device = device
116 elif type == 'wal':
117 self.wal_device = device
118 if not self.data_device:
119 mlogger.error('Data device not found')
120 raise SystemExit(
121 "Unexpected error, terminating")
122 if not self.data_device.is_lv:
123 mlogger.error('Data device isn\'t LVM')
124 raise SystemExit(
125 "Unexpected error, terminating")
126
127 self.old_target_tags = self.target_lv.tags.copy()
128 self.old_data_tags = (
129 self.data_device.lv_api.tags.copy()
130 if self.data_device.is_lv else None)
131 self.old_db_tags = (
132 self.db_device.lv_api.tags.copy()
133 if self.db_device and self.db_device.is_lv else None)
134 self.old_wal_tags = (
135 self.wal_device.lv_api.tags.copy()
136 if self.wal_device and self.wal_device.is_lv else None)
137
138 def update_tags_when_lv_create(self, create_type):
139 tags = {}
140 if not self.data_device.is_lv:
141 mlogger.warning(
142 'Data device is not LVM, wouldn\'t update LVM tags')
143 else:
144 tags["ceph.{}_uuid".format(create_type)] = self.target_lv.lv_uuid
145 tags["ceph.{}_device".format(create_type)] = self.target_lv.lv_path
146 self.data_device.lv_api.set_tags(tags)
147
148 tags = self.data_device.lv_api.tags.copy()
149 tags["ceph.type"] = create_type
150 self.target_lv.set_tags(tags)
151
152 aux_dev = None
153 if create_type == "db" and self.wal_device:
154 aux_dev = self.wal_device
155 elif create_type == "wal" and self.db_device:
156 aux_dev = self.db_device
157 else:
158 return
159 if not aux_dev.is_lv:
160 mlogger.warning(
161 '{} device is not LVM, wouldn\'t update LVM tags'.format(
162 create_type.upper()))
163 else:
164 tags = {}
165 tags["ceph.{}_uuid".format(create_type)] = self.target_lv.lv_uuid
166 tags["ceph.{}_device".format(create_type)] = self.target_lv.lv_path
167 aux_dev.lv_api.set_tags(tags)
168
169 def remove_lvs(self, source_devices, target_type):
170 remaining_devices = [self.data_device, self.db_device, self.wal_device]
171
172 outdated_tags = []
173 for device, type in source_devices:
174 if type == "block" or type == target_type:
175 continue
176 remaining_devices.remove(device)
177 if device.is_lv:
178 outdated_tags.append("ceph.{}_uuid".format(type))
179 outdated_tags.append("ceph.{}_device".format(type))
180 device.lv_api.clear_tags()
181 if len(outdated_tags) > 0:
182 for d in remaining_devices:
183 if d and d.is_lv:
184 d.lv_api.clear_tags(outdated_tags)
185
186 def replace_lvs(self, source_devices, target_type):
187 remaining_devices = [self.data_device]
188 if self.db_device:
189 remaining_devices.append(self.db_device)
190 if self.wal_device:
191 remaining_devices.append(self.wal_device)
192
193 outdated_tags = []
194 for device, type in source_devices:
195 if type == "block":
196 continue
197 remaining_devices.remove(device)
198 if device.is_lv:
199 outdated_tags.append("ceph.{}_uuid".format(type))
200 outdated_tags.append("ceph.{}_device".format(type))
201 device.lv_api.clear_tags()
202
203 new_tags = {}
204 new_tags["ceph.{}_uuid".format(target_type)] = self.target_lv.lv_uuid
205 new_tags["ceph.{}_device".format(target_type)] = self.target_lv.lv_path
206
207 for d in remaining_devices:
208 if d and d.is_lv:
209 if len(outdated_tags) > 0:
210 d.lv_api.clear_tags(outdated_tags)
211 d.lv_api.set_tags(new_tags)
212
213 if not self.data_device.is_lv:
214 mlogger.warning(
215 'Data device is not LVM, wouldn\'t properly update target LVM tags')
216 else:
217 tags = self.data_device.lv_api.tags.copy()
218
219 tags["ceph.type"] = target_type
220 tags["ceph.{}_uuid".format(target_type)] = self.target_lv.lv_uuid
221 tags["ceph.{}_device".format(target_type)] = self.target_lv.lv_path
222 self.target_lv.set_tags(tags)
223
224 def undo(self):
225 mlogger.info(
226 'Undoing lv tag set')
227 if self.data_device:
228 if self.old_data_tags:
229 self.data_device.lv_api.set_tags(self.old_data_tags)
230 else:
231 self.data_device.lv_api.clear_tags()
232 if self.db_device:
233 if self.old_db_tags:
234 self.db_device.lv_api.set_tags(self.old_db_tags)
235 else:
236 self.db_device.lv_api.clear_tags()
237 if self.wal_device:
238 if self.old_wal_tags:
239 self.wal_device.lv_api.set_tags(self.old_wal_tags)
240 else:
241 self.wal_device.lv_api.clear_tags()
242 if self.old_target_tags:
243 self.target_lv.set_tags(self.old_target_tags)
244 else:
245 self.target_lv.clear_tags()
246
247 class Migrate(object):
248
249 help = 'Migrate BlueFS data from to another LVM device'
250
251 def __init__(self, argv):
252 self.argv = argv
253 self.osd_id = None
254
255 def get_source_devices(self, devices, target_type=""):
256 ret = []
257 for device, type in devices:
258 if type == target_type:
259 continue
260 if type == 'block':
261 if 'data' not in self.args.from_:
262 continue;
263 elif type == 'db':
264 if 'db' not in self.args.from_:
265 continue;
266 elif type == 'wal':
267 if 'wal' not in self.args.from_:
268 continue;
269 ret.append([device, type])
270 if ret == []:
271 mlogger.error('Source device list is empty')
272 raise SystemExit(
273 'Unable to migrate to : {}'.format(self.args.target))
274 return ret
275
276 # ceph-bluestore-tool uses the following replacement rules
277 # (in the order of precedence, stop on the first match)
278 # if source list has DB volume - target device replaces it.
279 # if source list has WAL volume - target device replace it.
280 # if source list has slow volume only - operation isn't permitted,
281 # requires explicit allocation via new-db/new-wal command.detects which
282 def get_target_type_by_source(self, devices):
283 ret = None
284 for device, type in devices:
285 if type == 'db':
286 return 'db'
287 elif type == 'wal':
288 ret = 'wal'
289 return ret
290
291 def get_filename_by_type(self, type):
292 filename = 'block'
293 if type == 'db' or type == 'wal':
294 filename += '.' + type
295 return filename
296
297 def get_source_args(self, osd_path, devices):
298 ret = []
299 for device, type in devices:
300 ret = ret + ["--devs-source", os.path.join(
301 osd_path, self.get_filename_by_type(type))]
302 return ret
303
304 def close_encrypted(self, source_devices):
305 # close source device(-s) if they're encrypted and have been removed
306 for device,type in source_devices:
307 if (type == 'db' or type == 'wal'):
308 logger.info("closing dmcrypt volume {}"
309 .format(device.lv_api.lv_uuid))
310 encryption_utils.dmcrypt_close(
311 mapping = device.lv_api.lv_uuid, skip_path_check=True)
312
313 @decorators.needs_root
314 def migrate_to_new(self, osd_id, osd_fsid, devices, target_lv):
315 source_devices = self.get_source_devices(devices)
316 target_type = self.get_target_type_by_source(source_devices)
317 if not target_type:
318 mlogger.error(
319 "Unable to determine new volume type,"
320 " please use new-db or new-wal command before.")
321 raise SystemExit(
322 "Unable to migrate to : {}".format(self.args.target))
323
324 target_path = target_lv.lv_path
325 tag_tracker = VolumeTagTracker(devices, target_lv)
326 # prepare and encrypt target if data volume is encrypted
327 if tag_tracker.data_device.lv_api.encrypted:
328 secret = encryption_utils.get_dmcrypt_key(osd_id, osd_fsid)
329 mlogger.info(' preparing dmcrypt for {}, uuid {}'.format(target_lv.lv_path, target_lv.lv_uuid))
330 target_path = encryption_utils.prepare_dmcrypt(
331 key=secret, device=target_path, mapping=target_lv.lv_uuid)
332 try:
333 # we need to update lvm tags for all the remaining volumes
334 # and clear for ones which to be removed
335
336 # ceph-bluestore-tool removes source volume(s) other than block one
337 # and attaches target one after successful migration
338 tag_tracker.replace_lvs(source_devices, target_type)
339
340 osd_path = get_osd_path(osd_id, osd_fsid)
341 source_args = self.get_source_args(osd_path, source_devices)
342 mlogger.info("Migrate to new, Source: {} Target: {}".format(
343 source_args, target_path))
344 stdout, stderr, exit_code = process.call([
345 'ceph-bluestore-tool',
346 '--path',
347 osd_path,
348 '--dev-target',
349 target_path,
350 '--command',
351 'bluefs-bdev-migrate'] +
352 source_args)
353 if exit_code != 0:
354 mlogger.error(
355 'Failed to migrate device, error code:{}'.format(exit_code))
356 raise SystemExit(
357 'Failed to migrate to : {}'.format(self.args.target))
358
359 system.chown(os.path.join(osd_path, "block.{}".format(
360 target_type)))
361 if tag_tracker.data_device.lv_api.encrypted:
362 self.close_encrypted(source_devices)
363 terminal.success('Migration successful.')
364
365 except:
366 tag_tracker.undo()
367 raise
368
369 return
370
371 @decorators.needs_root
372 def migrate_to_existing(self, osd_id, osd_fsid, devices, target_lv):
373 target_type = target_lv.tags["ceph.type"]
374 if target_type == "wal":
375 mlogger.error("Migrate to WAL is not supported")
376 raise SystemExit(
377 "Unable to migrate to : {}".format(self.args.target))
378 target_filename = self.get_filename_by_type(target_type)
379 if (target_filename == ""):
380 mlogger.error(
381 "Target Logical Volume doesn't have proper volume type "
382 "(ceph.type LVM tag): {}".format(target_type))
383 raise SystemExit(
384 "Unable to migrate to : {}".format(self.args.target))
385
386 osd_path = get_osd_path(osd_id, osd_fsid)
387 source_devices = self.get_source_devices(devices, target_type)
388 target_path = os.path.join(osd_path, target_filename)
389 tag_tracker = VolumeTagTracker(devices, target_lv)
390
391 try:
392 # ceph-bluestore-tool removes source volume(s) other than
393 # block and target ones after successful migration
394 tag_tracker.remove_lvs(source_devices, target_type)
395 source_args = self.get_source_args(osd_path, source_devices)
396 mlogger.info("Migrate to existing, Source: {} Target: {}".format(
397 source_args, target_path))
398 stdout, stderr, exit_code = process.call([
399 'ceph-bluestore-tool',
400 '--path',
401 osd_path,
402 '--dev-target',
403 target_path,
404 '--command',
405 'bluefs-bdev-migrate'] +
406 source_args)
407 if exit_code != 0:
408 mlogger.error(
409 'Failed to migrate device, error code:{}'.format(exit_code))
410 raise SystemExit(
411 'Failed to migrate to : {}'.format(self.args.target))
412 if tag_tracker.data_device.lv_api.encrypted:
413 self.close_encrypted(source_devices)
414 terminal.success('Migration successful.')
415 except:
416 tag_tracker.undo()
417 raise
418
419 return
420
421 @decorators.needs_root
422 def migrate_osd(self):
423 if self.args.osd_id and not self.args.no_systemd:
424 osd_is_running = systemctl.osd_is_active(self.args.osd_id)
425 if osd_is_running:
426 mlogger.error('OSD is running, stop it with: '
427 'systemctl stop ceph-osd@{}'.format(
428 self.args.osd_id))
429 raise SystemExit(
430 'Unable to migrate devices associated with OSD ID: {}'
431 .format(self.args.osd_id))
432
433 target_lv = api.get_lv_by_fullname(self.args.target)
434 if not target_lv:
435 mlogger.error(
436 'Target path "{}" is not a Logical Volume'.format(
437 self.args.target))
438 raise SystemExit(
439 'Unable to migrate to : {}'.format(self.args.target))
440 devices = find_associated_devices(self.args.osd_id, self.args.osd_fsid)
441 if (not target_lv.used_by_ceph):
442 self.migrate_to_new(self.args.osd_id, self.args.osd_fsid,
443 devices,
444 target_lv)
445 else:
446 if (target_lv.tags['ceph.osd_id'] != self.args.osd_id or
447 target_lv.tags['ceph.osd_fsid'] != self.args.osd_fsid):
448 mlogger.error(
449 'Target Logical Volume isn\'t used by the specified OSD: '
450 '{} FSID: {}'.format(self.args.osd_id,
451 self.args.osd_fsid))
452 raise SystemExit(
453 'Unable to migrate to : {}'.format(self.args.target))
454
455 self.migrate_to_existing(self.args.osd_id, self.args.osd_fsid,
456 devices,
457 target_lv)
458
459 def make_parser(self, prog, sub_command_help):
460 parser = argparse.ArgumentParser(
461 prog=prog,
462 formatter_class=argparse.RawDescriptionHelpFormatter,
463 description=sub_command_help,
464 )
465
466 parser.add_argument(
467 '--osd-id',
468 required=True,
469 help='Specify an OSD ID to detect associated devices for zapping',
470 type=valid_osd_id
471 )
472
473 parser.add_argument(
474 '--osd-fsid',
475 required=True,
476 help='Specify an OSD FSID to detect associated devices for zapping',
477 )
478 parser.add_argument(
479 '--target',
480 required=True,
481 help='Specify target Logical Volume (LV) to migrate data to',
482 )
483 parser.add_argument(
484 '--from',
485 nargs='*',
486 dest='from_',
487 required=True,
488 choices=['data', 'db', 'wal'],
489 help='Copy BlueFS data from DB device',
490 )
491 parser.add_argument(
492 '--no-systemd',
493 dest='no_systemd',
494 action='store_true',
495 help='Skip checking OSD systemd unit',
496 )
497 return parser
498
499 def main(self):
500 sub_command_help = dedent("""
501 Moves BlueFS data from source volume(s) to the target one, source
502 volumes (except the main (i.e. data or block) one) are removed on
503 success. LVM volumes are permitted for Target only, both already
504 attached or new logical one. In the latter case it is attached to OSD
505 replacing one of the source devices. Following replacement rules apply
506 (in the order of precedence, stop on the first match):
507 * if source list has DB volume - target device replaces it.
508 * if source list has WAL volume - target device replace it.
509 * if source list has slow volume only - operation is not permitted,
510 requires explicit allocation via new-db/new-wal command.
511
512 Example calls for supported scenarios:
513
514 Moves BlueFS data from main device to LV already attached as DB:
515
516 ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data --target vgname/db
517
518 Moves BlueFS data from shared main device to LV which will be attached
519 as a new DB:
520
521 ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data --target vgname/new_db
522
523 Moves BlueFS data from DB device to new LV, DB is replaced:
524
525 ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from db --target vgname/new_db
526
527 Moves BlueFS data from main and DB devices to new LV, DB is replaced:
528
529 ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data db --target vgname/new_db
530
531 Moves BlueFS data from main, DB and WAL devices to new LV, WAL is
532 removed and DB is replaced:
533
534 ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data db wal --target vgname/new_db
535
536 Moves BlueFS data from main, DB and WAL devices to main device, WAL
537 and DB are removed:
538
539 ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from db wal --target vgname/data
540
541 """)
542
543 parser = self.make_parser('ceph-volume lvm migrate', sub_command_help)
544
545 if len(self.argv) == 0:
546 print(sub_command_help)
547 return
548
549 self.args = parser.parse_args(self.argv)
550
551 self.migrate_osd()
552
553 class NewVolume(object):
554 def __init__(self, create_type, argv):
555 self.create_type = create_type
556 self.argv = argv
557
558 def make_parser(self, prog, sub_command_help):
559 parser = argparse.ArgumentParser(
560 prog=prog,
561 formatter_class=argparse.RawDescriptionHelpFormatter,
562 description=sub_command_help,
563 )
564
565 parser.add_argument(
566 '--osd-id',
567 required=True,
568 help='Specify an OSD ID to attach new volume to',
569 type=valid_osd_id,
570 )
571
572 parser.add_argument(
573 '--osd-fsid',
574 required=True,
575 help='Specify an OSD FSIDto attach new volume to',
576 )
577 parser.add_argument(
578 '--target',
579 required=True,
580 help='Specify target Logical Volume (LV) to attach',
581 )
582 parser.add_argument(
583 '--no-systemd',
584 dest='no_systemd',
585 action='store_true',
586 help='Skip checking OSD systemd unit',
587 )
588 return parser
589
590 @decorators.needs_root
591 def make_new_volume(self, osd_id, osd_fsid, devices, target_lv):
592 osd_path = get_osd_path(osd_id, osd_fsid)
593 mlogger.info(
594 'Making new volume at {} for OSD: {} ({})'.format(
595 target_lv.lv_path, osd_id, osd_path))
596 target_path = target_lv.lv_path
597 tag_tracker = VolumeTagTracker(devices, target_lv)
598 # prepare and encrypt target if data volume is encrypted
599 if tag_tracker.data_device.lv_api.encrypted:
600 secret = encryption_utils.get_dmcrypt_key(osd_id, osd_fsid)
601 mlogger.info(' preparing dmcrypt for {}, uuid {}'.format(target_lv.lv_path, target_lv.lv_uuid))
602 target_path = encryption_utils.prepare_dmcrypt(
603 key=secret, device=target_path, mapping=target_lv.lv_uuid)
604
605 try:
606 tag_tracker.update_tags_when_lv_create(self.create_type)
607
608 stdout, stderr, exit_code = process.call([
609 'ceph-bluestore-tool',
610 '--path',
611 osd_path,
612 '--dev-target',
613 target_path,
614 '--command',
615 'bluefs-bdev-new-{}'.format(self.create_type)
616 ])
617 if exit_code != 0:
618 mlogger.error(
619 'failed to attach new volume, error code:{}'.format(
620 exit_code))
621 raise SystemExit(
622 "Failed to attach new volume: {}".format(
623 self.args.target))
624 else:
625 system.chown(os.path.join(osd_path, "block.{}".format(
626 self.create_type)))
627 terminal.success('New volume attached.')
628 except:
629 tag_tracker.undo()
630 raise
631 return
632
633 @decorators.needs_root
634 def new_volume(self):
635 if self.args.osd_id and not self.args.no_systemd:
636 osd_is_running = systemctl.osd_is_active(self.args.osd_id)
637 if osd_is_running:
638 mlogger.error('OSD ID is running, stop it with:'
639 ' systemctl stop ceph-osd@{}'.format(self.args.osd_id))
640 raise SystemExit(
641 'Unable to attach new volume for OSD: {}'.format(
642 self.args.osd_id))
643
644 target_lv = api.get_lv_by_fullname(self.args.target)
645 if not target_lv:
646 mlogger.error(
647 'Target path {} is not a Logical Volume'.format(
648 self.args.target))
649 raise SystemExit(
650 'Unable to attach new volume : {}'.format(self.args.target))
651 if target_lv.used_by_ceph:
652 mlogger.error(
653 'Target Logical Volume is already used by ceph: {}'.format(
654 self.args.target))
655 raise SystemExit(
656 'Unable to attach new volume : {}'.format(self.args.target))
657 else:
658 devices = find_associated_devices(self.args.osd_id,
659 self.args.osd_fsid)
660 self.make_new_volume(
661 self.args.osd_id,
662 self.args.osd_fsid,
663 devices,
664 target_lv)
665
666 class NewWAL(NewVolume):
667
668 help = 'Allocate new WAL volume for OSD at specified Logical Volume'
669
670 def __init__(self, argv):
671 super(NewWAL, self).__init__("wal", argv)
672
673 def main(self):
674 sub_command_help = dedent("""
675 Attaches the given logical volume to the given OSD as a WAL volume.
676 Logical volume format is vg/lv. Fails if OSD has already got attached DB.
677
678 Example:
679
680 Attach vgname/lvname as a WAL volume to OSD 1
681
682 ceph-volume lvm new-wal --osd-id 1 --osd-fsid 55BD4219-16A7-4037-BC20-0F158EFCC83D --target vgname/new_wal
683 """)
684 parser = self.make_parser('ceph-volume lvm new-wal', sub_command_help)
685
686 if len(self.argv) == 0:
687 print(sub_command_help)
688 return
689
690 self.args = parser.parse_args(self.argv)
691
692 self.new_volume()
693
694 class NewDB(NewVolume):
695
696 help = 'Allocate new DB volume for OSD at specified Logical Volume'
697
698 def __init__(self, argv):
699 super(NewDB, self).__init__("db", argv)
700
701 def main(self):
702 sub_command_help = dedent("""
703 Attaches the given logical volume to the given OSD as a DB volume.
704 Logical volume format is vg/lv. Fails if OSD has already got attached DB.
705
706 Example:
707
708 Attach vgname/lvname as a DB volume to OSD 1
709
710 ceph-volume lvm new-db --osd-id 1 --osd-fsid 55BD4219-16A7-4037-BC20-0F158EFCC83D --target vgname/new_db
711 """)
712
713 parser = self.make_parser('ceph-volume lvm new-db', sub_command_help)
714 if len(self.argv) == 0:
715 print(sub_command_help)
716 return
717 self.args = parser.parse_args(self.argv)
718
719 self.new_volume()