from __future__ import print_function
import json
import logging
-import uuid
from textwrap import dedent
from ceph_volume.util import prepare as prepare_utils
from ceph_volume.util import encryption as encryption_utils
# get the latest monmap
prepare_utils.get_monmap(osd_id)
# prepare the osd filesystem
- prepare_utils.osd_mkfs_filestore(osd_id, fsid)
+ prepare_utils.osd_mkfs_filestore(osd_id, fsid, cephx_secret)
# write the OSD keyring if it doesn't exist already
prepare_utils.write_keyring(osd_id, cephx_secret)
if secrets.get('dmcrypt_key'):
"""
if disk.is_partition(arg) or disk.is_device(arg):
# we must create a vg, and then a single lv
- vg_name = "ceph-%s" % str(uuid.uuid4())
- api.create_vg(vg_name, arg)
+ vg = api.create_vg(arg)
lv_name = "osd-%s-%s" % (device_type, osd_fsid)
return api.create_lv(
lv_name,
- vg_name, # the volume group
+ vg.name, # the volume group
tags={'ceph.type': device_type})
else:
error = [
raise RuntimeError('no data logical volume found with: %s' % arg)
- def safe_prepare(self, args):
+ def safe_prepare(self, args=None):
"""
An intermediate step between `main()` and `prepare()` so that we can
capture the `self.osd_id` in case we need to rollback
+
+ :param args: Injected args, usually from `lvm create` which compounds
+ both `prepare` and `create`
"""
+ if args is not None:
+ self.args = args
try:
- self.prepare(args)
+ self.prepare()
except Exception:
logger.exception('lvm prepare was unable to complete')
logger.info('will rollback OSD ID creation')
- rollback_osd(args, self.osd_id)
+ rollback_osd(self.args, self.osd_id)
raise
- terminal.success("ceph-volume lvm prepare successful for: %s" % args.data)
+ terminal.success("ceph-volume lvm prepare successful for: %s" % self.args.data)
+
+ def get_cluster_fsid(self):
+ """
+ Allows using --cluster-fsid as an argument, but can fallback to reading
+ from ceph.conf if that is unset (the default behavior).
+ """
+ if self.args.cluster_fsid:
+ return self.args.cluster_fsid
+ else:
+ return conf.ceph.get('global', 'fsid')
@decorators.needs_root
- def prepare(self, args):
+ def prepare(self):
# FIXME we don't allow re-using a keyring, we always generate one for the
# OSD, this needs to be fixed. This could either be a file (!) or a string
# (!!) or some flags that we would need to compound into a dict so that we
# can convert to JSON (!!!)
secrets = {'cephx_secret': prepare_utils.create_key()}
cephx_lockbox_secret = ''
- encrypted = 1 if args.dmcrypt else 0
+ encrypted = 1 if self.args.dmcrypt else 0
cephx_lockbox_secret = '' if not encrypted else prepare_utils.create_key()
if encrypted:
secrets['dmcrypt_key'] = encryption_utils.create_dmcrypt_key()
secrets['cephx_lockbox_secret'] = cephx_lockbox_secret
- cluster_fsid = conf.ceph.get('global', 'fsid')
- osd_fsid = args.osd_fsid or system.generate_uuid()
- crush_device_class = args.crush_device_class
+ cluster_fsid = self.get_cluster_fsid()
+
+ osd_fsid = self.args.osd_fsid or system.generate_uuid()
+ crush_device_class = self.args.crush_device_class
if crush_device_class:
secrets['crush_device_class'] = crush_device_class
# reuse a given ID if it exists, otherwise create a new ID
- self.osd_id = prepare_utils.create_id(osd_fsid, json.dumps(secrets), osd_id=args.osd_id)
+ self.osd_id = prepare_utils.create_id(osd_fsid, json.dumps(secrets), osd_id=self.args.osd_id)
tags = {
'ceph.osd_fsid': osd_fsid,
'ceph.osd_id': self.osd_id,
'ceph.cluster_name': conf.cluster,
'ceph.crush_device_class': crush_device_class,
}
- if args.filestore:
- if not args.journal:
+ if self.args.filestore:
+ if not self.args.journal:
raise RuntimeError('--journal is required when using --filestore')
- data_lv = self.get_lv(args.data)
+ data_lv = self.get_lv(self.args.data)
if not data_lv:
- data_lv = self.prepare_device(args.data, 'data', cluster_fsid, osd_fsid)
+ data_lv = self.prepare_device(self.args.data, 'data', cluster_fsid, osd_fsid)
tags['ceph.data_device'] = data_lv.lv_path
tags['ceph.data_uuid'] = data_lv.lv_uuid
tags['ceph.encrypted'] = encrypted
tags['ceph.vdo'] = api.is_vdo(data_lv.lv_path)
- journal_device, journal_uuid, tags = self.setup_device('journal', args.journal, tags)
+ journal_device, journal_uuid, tags = self.setup_device(
+ 'journal', self.args.journal, tags
+ )
tags['ceph.type'] = 'data'
data_lv.set_tags(tags)
self.osd_id,
osd_fsid,
)
- elif args.bluestore:
- block_lv = self.get_lv(args.data)
+ elif self.args.bluestore:
+ block_lv = self.get_lv(self.args.data)
if not block_lv:
- block_lv = self.prepare_device(args.data, 'block', cluster_fsid, osd_fsid)
+ block_lv = self.prepare_device(self.args.data, 'block', cluster_fsid, osd_fsid)
tags['ceph.block_device'] = block_lv.lv_path
tags['ceph.block_uuid'] = block_lv.lv_uuid
tags['ceph.encrypted'] = encrypted
tags['ceph.vdo'] = api.is_vdo(block_lv.lv_path)
- wal_device, wal_uuid, tags = self.setup_device('wal', args.block_wal, tags)
- db_device, db_uuid, tags = self.setup_device('db', args.block_db, tags)
+ wal_device, wal_uuid, tags = self.setup_device('wal', self.args.block_wal, tags)
+ db_device, db_uuid, tags = self.setup_device('db', self.args.block_db, tags)
tags['ceph.type'] = 'block'
block_lv.set_tags(tags)
Once the OSD is ready, an ad-hoc systemd unit will be enabled so that
it can later get activated and the OSD daemon can get started.
- Most basic Usage looks like (journal will be collocated from the same volume group):
-
- ceph-volume lvm prepare --data {volume group name}
-
Encryption is supported via dmcrypt and the --dmcrypt flag.
- Example calls for supported scenarios:
-
- Dedicated volume group for Journal(s)
- -------------------------------------
-
- Existing logical volume (lv) or device:
-
- ceph-volume lvm prepare --filestore --data {vg/lv} --journal /path/to/device
-
- Or:
-
- ceph-volume lvm prepare --filestore --data {vg/lv} --journal {vg/lv}
-
- Existing block device, that will be made a group and logical volume:
-
- ceph-volume lvm prepare --filestore --data /path/to/device --journal {vg/lv}
-
- Bluestore
- ---------
-
- Existing logical volume (lv):
+ Existing logical volume (lv):
- ceph-volume lvm prepare --bluestore --data {vg/lv}
+ ceph-volume lvm prepare --data {vg/lv}
- Existing block device, that will be made a group and logical volume:
+ Existing block device, that will be made a group and logical volume:
- ceph-volume lvm prepare --bluestore --data /path/to/device
+ ceph-volume lvm prepare --data /path/to/device
- Optionally, can consume db and wal devices or logical volumes:
+ Optionally, can consume db and wal partitions or logical volumes:
- ceph-volume lvm prepare --bluestore --data {vg/lv} --block.wal {device} --block-db {vg/lv}
+ ceph-volume lvm prepare --data {vg/lv} --block.wal {partition} --block.db {vg/lv}
""")
parser = prepare_parser(
prog='ceph-volume lvm prepare',
print(sub_command_help)
return
exclude_group_options(parser, argv=self.argv, groups=['filestore', 'bluestore'])
- args = parser.parse_args(self.argv)
+ self.args = parser.parse_args(self.argv)
+ # the unfortunate mix of one superset for both filestore and bluestore
+ # makes this validation cumbersome
+ if self.args.filestore:
+ if not self.args.journal:
+ raise SystemExit('--journal is required when using --filestore')
# Default to bluestore here since defaulting it in add_argument may
# cause both to be True
- if not args.bluestore and not args.filestore:
- args.bluestore = True
- self.safe_prepare(args)
+ if not self.args.bluestore and not self.args.filestore:
+ self.args.bluestore = True
+ self.safe_prepare(self.args)