]>
git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-volume/ceph_volume/devices/lvm/prepare.py
1 from __future__
import print_function
4 from textwrap
import dedent
5 from ceph_volume
.util
import prepare
as prepare_utils
6 from ceph_volume
.util
import system
, disk
7 from ceph_volume
import conf
, decorators
, terminal
9 from .common
import prepare_parser
12 def prepare_filestore(device
, journal
, secrets
, id_
=None, fsid
=None):
14 :param device: The name of the volume group or lvm to work with
15 :param journal: similar to device but can also be a regular/plain disk
16 :param secrets: A dict with the secrets needed to create the osd (e.g. cephx)
17 :param id_: The OSD id
18 :param fsid: The OSD fsid, also known as the OSD UUID
20 cephx_secret
= secrets
.get('cephx_secret', prepare_utils
.create_key())
21 json_secrets
= json
.dumps(secrets
)
23 # allow re-using an existing fsid, in case prepare failed
24 fsid
= fsid
or system
.generate_uuid()
25 # allow re-using an id, in case a prepare failed
26 osd_id
= id_
or prepare_utils
.create_id(fsid
, json_secrets
)
27 # create the directory
28 prepare_utils
.create_path(osd_id
)
30 prepare_utils
.format_device(device
)
31 # mount the data device
32 prepare_utils
.mount_osd(device
, osd_id
)
34 prepare_utils
.link_journal(journal
, osd_id
)
35 # get the latest monmap
36 prepare_utils
.get_monmap(osd_id
)
37 # prepare the osd filesystem
38 prepare_utils
.osd_mkfs(osd_id
, fsid
)
39 # write the OSD keyring if it doesn't exist already
40 prepare_utils
.write_keyring(osd_id
, cephx_secret
)
43 def prepare_bluestore():
44 raise NotImplemented()
47 class Prepare(object):
49 help = 'Format an LVM device and associate it with an OSD'
51 def __init__(self
, argv
):
54 def get_journal_ptuuid(self
, argument
):
55 uuid
= disk
.get_partuuid(argument
)
57 terminal
.error('blkid could not detect a PARTUUID for device: %s' % argument
)
58 raise RuntimeError('unable to use device for a journal')
61 def get_journal_lv(self
, argument
):
63 Perform some parsing of the value of ``--journal`` so that the process
64 can determine correctly if it got a device path or an lv
65 :param argument: The value of ``--journal``, that will need to be split
66 to retrieve the actual lv
69 vg_name
, lv_name
= argument
.split('/')
70 except (ValueError, AttributeError):
72 return api
.get_lv(lv_name
=lv_name
, vg_name
=vg_name
)
74 @decorators.needs_root
75 def prepare(self
, args
):
76 # FIXME we don't allow re-using a keyring, we always generate one for the
77 # OSD, this needs to be fixed. This could either be a file (!) or a string
78 # (!!) or some flags that we would need to compound into a dict so that we
79 # can convert to JSON (!!!)
80 secrets
= {'cephx_secret': prepare_utils
.create_key()}
82 cluster_fsid
= conf
.ceph
.get('global', 'fsid')
83 fsid
= args
.osd_fsid
or system
.generate_uuid()
84 #osd_id = args.osd_id or prepare_utils.create_id(fsid)
85 # allow re-using an id, in case a prepare failed
86 osd_id
= args
.osd_id
or prepare_utils
.create_id(fsid
, json
.dumps(secrets
))
87 vg_name
, lv_name
= args
.data
.split('/')
89 data_lv
= api
.get_lv(lv_name
=lv_name
, vg_name
=vg_name
)
91 # we must have either an existing data_lv or a newly created, so lets make
92 # sure that the tags are correct
94 raise RuntimeError('no data logical volume found with: %s' % args
.data
)
97 raise RuntimeError('--journal is required when using --filestore')
99 journal_lv
= self
.get_journal_lv(args
.journal
)
101 journal_device
= journal_lv
.lv_path
102 journal_uuid
= journal_lv
.lv_uuid
103 # we can only set tags on an lv, the pv (if any) can't as we
104 # aren't making it part of an lvm group (vg)
105 journal_lv
.set_tags({
106 'ceph.type': 'journal',
107 'ceph.osd_fsid': fsid
,
108 'ceph.osd_id': osd_id
,
109 'ceph.cluster_fsid': cluster_fsid
,
110 'ceph.journal_device': journal_device
,
111 'ceph.journal_uuid': journal_uuid
,
112 'ceph.data_device': data_lv
.lv_path
,
113 'ceph.data_uuid': data_lv
.lv_uuid
,
117 elif os
.path
.isfile(args
.journal
):
119 journal_device
= args
.journal
121 # otherwise assume this is a regular disk partition
123 journal_uuid
= self
.get_journal_ptuuid(args
.journal
)
124 journal_device
= args
.journal
128 'ceph.osd_fsid': fsid
,
129 'ceph.osd_id': osd_id
,
130 'ceph.cluster_fsid': cluster_fsid
,
131 'ceph.journal_device': journal_device
,
132 'ceph.journal_uuid': journal_uuid
,
133 'ceph.data_device': data_lv
.lv_path
,
134 'ceph.data_uuid': data_lv
.lv_uuid
,
145 prepare_bluestore(args
)
148 sub_command_help
= dedent("""
149 Prepare an OSD by assigning an ID and FSID, registering them with the
150 cluster with an ID and FSID, formatting and mounting the volume, and
151 finally by adding all the metadata to the logical volumes using LVM
152 tags, so that it can later be discovered.
154 Once the OSD is ready, an ad-hoc systemd unit will be enabled so that
155 it can later get activated and the OSD daemon can get started.
157 Most basic Usage looks like (journal will be collocated from the same volume group):
159 ceph-volume lvm prepare --data {volume group name}
162 Example calls for supported scenarios:
164 Dedicated volume group for Journal(s)
165 -------------------------------------
167 Existing logical volume (lv) or device:
169 ceph-volume lvm prepare --data {logical volume} --journal /path/to/{lv}|{device}
173 ceph-volume lvm prepare --data {data volume group} --journal {journal volume group}
175 Collocated (same group) for data and journal
176 --------------------------------------------
178 ceph-volume lvm prepare --data {volume group}
181 parser
= prepare_parser(
182 prog
='ceph-volume lvm prepare',
183 description
=sub_command_help
,
185 if len(self
.argv
) == 0:
186 print(sub_command_help
)
188 args
= parser
.parse_args(self
.argv
)