]> git.proxmox.com Git - ceph.git/blame - ceph/src/ceph-volume/ceph_volume/devices/lvm/prepare.py
bump version to 12.2.1-pve3
[ceph.git] / ceph / src / ceph-volume / ceph_volume / devices / lvm / prepare.py
CommitLineData
d2e6a577
FG
1from __future__ import print_function
2import json
3import os
4from textwrap import dedent
5from ceph_volume.util import prepare as prepare_utils
181888fb
FG
6from ceph_volume.util import system, disk
7from ceph_volume import conf, decorators, terminal
d2e6a577
FG
8from . import api
9from .common import prepare_parser
10
11
d2e6a577
FG
12def prepare_filestore(device, journal, secrets, id_=None, fsid=None):
13 """
14 :param device: The name of the volume group or lvm to work with
15 :param journal: similar to device but can also be a regular/plain disk
16 :param secrets: A dict with the secrets needed to create the osd (e.g. cephx)
17 :param id_: The OSD id
18 :param fsid: The OSD fsid, also known as the OSD UUID
19 """
20 cephx_secret = secrets.get('cephx_secret', prepare_utils.create_key())
21 json_secrets = json.dumps(secrets)
22
23 # allow re-using an existing fsid, in case prepare failed
24 fsid = fsid or system.generate_uuid()
25 # allow re-using an id, in case a prepare failed
26 osd_id = id_ or prepare_utils.create_id(fsid, json_secrets)
27 # create the directory
28 prepare_utils.create_path(osd_id)
29 # format the device
30 prepare_utils.format_device(device)
31 # mount the data device
32 prepare_utils.mount_osd(device, osd_id)
33 # symlink the journal
34 prepare_utils.link_journal(journal, osd_id)
35 # get the latest monmap
36 prepare_utils.get_monmap(osd_id)
37 # prepare the osd filesystem
38 prepare_utils.osd_mkfs(osd_id, fsid)
39 # write the OSD keyring if it doesn't exist already
40 prepare_utils.write_keyring(osd_id, cephx_secret)
41
42
43def prepare_bluestore():
44 raise NotImplemented()
45
46
47class Prepare(object):
48
49 help = 'Format an LVM device and associate it with an OSD'
50
51 def __init__(self, argv):
52 self.argv = argv
53
181888fb
FG
54 def get_journal_ptuuid(self, argument):
55 uuid = disk.get_partuuid(argument)
56 if not uuid:
57 terminal.error('blkid could not detect a PARTUUID for device: %s' % argument)
58 raise RuntimeError('unable to use device for a journal')
59 return uuid
60
b5b8bbf5
FG
61 def get_journal_lv(self, argument):
62 """
63 Perform some parsing of the value of ``--journal`` so that the process
64 can determine correctly if it got a device path or an lv
65 :param argument: The value of ``--journal``, that will need to be split
66 to retrieve the actual lv
67 """
68 try:
69 vg_name, lv_name = argument.split('/')
70 except (ValueError, AttributeError):
71 return None
72 return api.get_lv(lv_name=lv_name, vg_name=vg_name)
73
d2e6a577
FG
74 @decorators.needs_root
75 def prepare(self, args):
76 # FIXME we don't allow re-using a keyring, we always generate one for the
77 # OSD, this needs to be fixed. This could either be a file (!) or a string
78 # (!!) or some flags that we would need to compound into a dict so that we
79 # can convert to JSON (!!!)
80 secrets = {'cephx_secret': prepare_utils.create_key()}
81
82 cluster_fsid = conf.ceph.get('global', 'fsid')
83 fsid = args.osd_fsid or system.generate_uuid()
84 #osd_id = args.osd_id or prepare_utils.create_id(fsid)
85 # allow re-using an id, in case a prepare failed
86 osd_id = args.osd_id or prepare_utils.create_id(fsid, json.dumps(secrets))
b5b8bbf5 87 vg_name, lv_name = args.data.split('/')
d2e6a577 88 if args.filestore:
b5b8bbf5 89 data_lv = api.get_lv(lv_name=lv_name, vg_name=vg_name)
d2e6a577 90
d2e6a577
FG
91 # we must have either an existing data_lv or a newly created, so lets make
92 # sure that the tags are correct
93 if not data_lv:
94 raise RuntimeError('no data logical volume found with: %s' % args.data)
b5b8bbf5
FG
95
96 if not args.journal:
97 raise RuntimeError('--journal is required when using --filestore')
b5b8bbf5 98
181888fb
FG
99 journal_lv = self.get_journal_lv(args.journal)
100 if journal_lv:
b5b8bbf5 101 journal_device = journal_lv.lv_path
181888fb
FG
102 journal_uuid = journal_lv.lv_uuid
103 # we can only set tags on an lv, the pv (if any) can't as we
104 # aren't making it part of an lvm group (vg)
b5b8bbf5
FG
105 journal_lv.set_tags({
106 'ceph.type': 'journal',
107 'ceph.osd_fsid': fsid,
108 'ceph.osd_id': osd_id,
109 'ceph.cluster_fsid': cluster_fsid,
110 'ceph.journal_device': journal_device,
181888fb 111 'ceph.journal_uuid': journal_uuid,
b5b8bbf5 112 'ceph.data_device': data_lv.lv_path,
181888fb 113 'ceph.data_uuid': data_lv.lv_uuid,
b5b8bbf5
FG
114 })
115
181888fb
FG
116 # allow a file
117 elif os.path.isfile(args.journal):
118 journal_uuid = ''
119 journal_device = args.journal
120
121 # otherwise assume this is a regular disk partition
122 else:
123 journal_uuid = self.get_journal_ptuuid(args.journal)
124 journal_device = args.journal
125
d2e6a577
FG
126 data_lv.set_tags({
127 'ceph.type': 'data',
128 'ceph.osd_fsid': fsid,
129 'ceph.osd_id': osd_id,
130 'ceph.cluster_fsid': cluster_fsid,
131 'ceph.journal_device': journal_device,
181888fb 132 'ceph.journal_uuid': journal_uuid,
d2e6a577 133 'ceph.data_device': data_lv.lv_path,
181888fb 134 'ceph.data_uuid': data_lv.lv_uuid,
d2e6a577
FG
135 })
136
137 prepare_filestore(
138 data_lv.lv_path,
139 journal_device,
140 secrets,
141 id_=osd_id,
142 fsid=fsid,
143 )
144 elif args.bluestore:
145 prepare_bluestore(args)
146
147 def main(self):
148 sub_command_help = dedent("""
149 Prepare an OSD by assigning an ID and FSID, registering them with the
150 cluster with an ID and FSID, formatting and mounting the volume, and
151 finally by adding all the metadata to the logical volumes using LVM
152 tags, so that it can later be discovered.
153
154 Once the OSD is ready, an ad-hoc systemd unit will be enabled so that
155 it can later get activated and the OSD daemon can get started.
156
157 Most basic Usage looks like (journal will be collocated from the same volume group):
158
159 ceph-volume lvm prepare --data {volume group name}
160
161
162 Example calls for supported scenarios:
163
164 Dedicated volume group for Journal(s)
165 -------------------------------------
166
167 Existing logical volume (lv) or device:
168
169 ceph-volume lvm prepare --data {logical volume} --journal /path/to/{lv}|{device}
170
171 Or:
172
173 ceph-volume lvm prepare --data {data volume group} --journal {journal volume group}
174
175 Collocated (same group) for data and journal
176 --------------------------------------------
177
178 ceph-volume lvm prepare --data {volume group}
179
180 """)
181 parser = prepare_parser(
182 prog='ceph-volume lvm prepare',
183 description=sub_command_help,
184 )
185 if len(self.argv) == 0:
186 print(sub_command_help)
187 return
188 args = parser.parse_args(self.argv)
189 self.prepare(args)