]> git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-volume/ceph_volume/util/prepare.py
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / ceph-volume / ceph_volume / util / prepare.py
1 """
2 These utilities for prepare provide all the pieces needed to prepare a device
3 but also a compounded ("single call") helper to do them in order. Some plugins
4 may want to change some part of the process, while others might want to consume
5 the single-call helper
6 """
7 import os
8 import logging
9 import json
10 from ceph_volume import process, conf, __release__, terminal
11 from ceph_volume.util import system, constants, str_to_int, disk
12
13 logger = logging.getLogger(__name__)
14 mlogger = terminal.MultiLogger(__name__)
15
16
17 def create_key():
18 stdout, stderr, returncode = process.call(
19 ['ceph-authtool', '--gen-print-key'],
20 show_command=True)
21 if returncode != 0:
22 raise RuntimeError('Unable to generate a new auth key')
23 return ' '.join(stdout).strip()
24
25
26 def write_keyring(osd_id, secret, keyring_name='keyring', name=None):
27 """
28 Create a keyring file with the ``ceph-authtool`` utility. Constructs the
29 path over well-known conventions for the OSD, and allows any other custom
30 ``name`` to be set.
31
32 :param osd_id: The ID for the OSD to be used
33 :param secret: The key to be added as (as a string)
34 :param name: Defaults to 'osd.{ID}' but can be used to add other client
35 names, specifically for 'lockbox' type of keys
36 :param keyring_name: Alternative keyring name, for supporting other
37 types of keys like for lockbox
38 """
39 osd_keyring = '/var/lib/ceph/osd/%s-%s/%s' % (conf.cluster, osd_id, keyring_name)
40 name = name or 'osd.%s' % str(osd_id)
41 process.run(
42 [
43 'ceph-authtool', osd_keyring,
44 '--create-keyring',
45 '--name', name,
46 '--add-key', secret
47 ])
48 system.chown(osd_keyring)
49
50
51 def get_journal_size(lv_format=True):
52 """
53 Helper to retrieve the size (defined in megabytes in ceph.conf) to create
54 the journal logical volume, it "translates" the string into a float value,
55 then converts that into gigabytes, and finally (optionally) it formats it
56 back as a string so that it can be used for creating the LV.
57
58 :param lv_format: Return a string to be used for ``lv_create``. A 5 GB size
59 would result in '5G', otherwise it will return a ``Size`` object.
60 """
61 conf_journal_size = conf.ceph.get_safe('osd', 'osd_journal_size', '5120')
62 logger.debug('osd_journal_size set to %s' % conf_journal_size)
63 journal_size = disk.Size(mb=str_to_int(conf_journal_size))
64
65 if journal_size < disk.Size(gb=2):
66 mlogger.error('Refusing to continue with configured size for journal')
67 raise RuntimeError('journal sizes must be larger than 2GB, detected: %s' % journal_size)
68 if lv_format:
69 return '%sG' % journal_size.gb.as_int()
70 return journal_size
71
72
73 def get_block_db_size(lv_format=True):
74 """
75 Helper to retrieve the size (defined in megabytes in ceph.conf) to create
76 the block.db logical volume, it "translates" the string into a float value,
77 then converts that into gigabytes, and finally (optionally) it formats it
78 back as a string so that it can be used for creating the LV.
79
80 :param lv_format: Return a string to be used for ``lv_create``. A 5 GB size
81 would result in '5G', otherwise it will return a ``Size`` object.
82
83 .. note: Configuration values are in bytes, unlike journals which
84 are defined in gigabytes
85 """
86 conf_db_size = None
87 try:
88 conf_db_size = conf.ceph.get_safe('osd', 'bluestore_block_db_size', None)
89 except RuntimeError:
90 logger.exception("failed to load ceph configuration, will use defaults")
91
92 if not conf_db_size:
93 logger.debug(
94 'block.db has no size configuration, will fallback to using as much as possible'
95 )
96 return None
97 logger.debug('bluestore_block_db_size set to %s' % conf_db_size)
98 db_size = disk.Size(b=str_to_int(conf_db_size))
99
100 if db_size < disk.Size(gb=2):
101 mlogger.error('Refusing to continue with configured size for block.db')
102 raise RuntimeError('block.db sizes must be larger than 2GB, detected: %s' % db_size)
103 if lv_format:
104 return '%sG' % db_size.gb.as_int()
105 return db_size
106
107 def get_block_wal_size(lv_format=True):
108 """
109 Helper to retrieve the size (defined in megabytes in ceph.conf) to create
110 the block.wal logical volume, it "translates" the string into a float value,
111 then converts that into gigabytes, and finally (optionally) it formats it
112 back as a string so that it can be used for creating the LV.
113
114 :param lv_format: Return a string to be used for ``lv_create``. A 5 GB size
115 would result in '5G', otherwise it will return a ``Size`` object.
116
117 .. note: Configuration values are in bytes, unlike journals which
118 are defined in gigabytes
119 """
120 conf_wal_size = None
121 try:
122 conf_wal_size = conf.ceph.get_safe('osd', 'bluestore_block_wal_size', None)
123 except RuntimeError:
124 logger.exception("failed to load ceph configuration, will use defaults")
125
126 if not conf_wal_size:
127 logger.debug(
128 'block.wal has no size configuration, will fallback to using as much as possible'
129 )
130 return None
131 logger.debug('bluestore_block_wal_size set to %s' % conf_wal_size)
132 wal_size = disk.Size(b=str_to_int(conf_wal_size))
133
134 if wal_size < disk.Size(gb=2):
135 mlogger.error('Refusing to continue with configured size for block.wal')
136 raise RuntimeError('block.wal sizes must be larger than 2GB, detected: %s' % wal_size)
137 if lv_format:
138 return '%sG' % wal_size.gb.as_int()
139 return wal_size
140
141
142 def create_id(fsid, json_secrets, osd_id=None):
143 """
144 :param fsid: The osd fsid to create, always required
145 :param json_secrets: a json-ready object with whatever secrets are wanted
146 to be passed to the monitor
147 :param osd_id: Reuse an existing ID from an OSD that's been destroyed, if the
148 id does not exist in the cluster a new ID will be created
149 """
150 bootstrap_keyring = '/var/lib/ceph/bootstrap-osd/%s.keyring' % conf.cluster
151 cmd = [
152 'ceph',
153 '--cluster', conf.cluster,
154 '--name', 'client.bootstrap-osd',
155 '--keyring', bootstrap_keyring,
156 '-i', '-',
157 'osd', 'new', fsid
158 ]
159 if osd_id is not None:
160 if osd_id_available(osd_id):
161 cmd.append(osd_id)
162 else:
163 raise RuntimeError("The osd ID {} is already in use or does not exist.".format(osd_id))
164 stdout, stderr, returncode = process.call(
165 cmd,
166 stdin=json_secrets,
167 show_command=True
168 )
169 if returncode != 0:
170 raise RuntimeError('Unable to create a new OSD id')
171 return ' '.join(stdout).strip()
172
173
174 def osd_id_available(osd_id):
175 """
176 Checks to see if an osd ID exists and if it's available for
177 reuse. Returns True if it is, False if it isn't.
178
179 :param osd_id: The osd ID to check
180 """
181 if osd_id is None:
182 return False
183 bootstrap_keyring = '/var/lib/ceph/bootstrap-osd/%s.keyring' % conf.cluster
184 stdout, stderr, returncode = process.call(
185 [
186 'ceph',
187 '--cluster', conf.cluster,
188 '--name', 'client.bootstrap-osd',
189 '--keyring', bootstrap_keyring,
190 'osd',
191 'tree',
192 '-f', 'json',
193 ],
194 show_command=True
195 )
196 if returncode != 0:
197 raise RuntimeError('Unable check if OSD id exists: %s' % osd_id)
198
199 output = json.loads(''.join(stdout).strip())
200 osds = output['nodes']
201 osd = [osd for osd in osds if str(osd['id']) == str(osd_id)]
202 if osd and osd[0].get('status') == "destroyed":
203 return True
204 return False
205
206
207 def mount_tmpfs(path):
208 process.run([
209 'mount',
210 '-t',
211 'tmpfs', 'tmpfs',
212 path
213 ])
214
215 # Restore SELinux context
216 system.set_context(path)
217
218
219 def create_osd_path(osd_id, tmpfs=False):
220 path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)
221 system.mkdir_p('/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id))
222 if tmpfs:
223 mount_tmpfs(path)
224
225
226 def format_device(device):
227 # only supports xfs
228 command = ['mkfs', '-t', 'xfs']
229
230 # get the mkfs options if any for xfs,
231 # fallback to the default options defined in constants.mkfs
232 flags = conf.ceph.get_list(
233 'osd',
234 'osd_mkfs_options_xfs',
235 default=constants.mkfs.get('xfs'),
236 split=' ',
237 )
238
239 # always force
240 if '-f' not in flags:
241 flags.insert(0, '-f')
242
243 command.extend(flags)
244 command.append(device)
245 process.run(command)
246
247
248 def _normalize_mount_flags(flags, extras=None):
249 """
250 Mount flag options have to be a single string, separated by a comma. If the
251 flags are separated by spaces, or with commas and spaces in ceph.conf, the
252 mount options will be passed incorrectly.
253
254 This will help when parsing ceph.conf values return something like::
255
256 ["rw,", "exec,"]
257
258 Or::
259
260 [" rw ,", "exec"]
261
262 :param flags: A list of flags, or a single string of mount flags
263 :param extras: Extra set of mount flags, useful when custom devices like VDO need
264 ad-hoc mount configurations
265 """
266 # Instead of using set(), we append to this new list here, because set()
267 # will create an arbitrary order on the items that is made worst when
268 # testing with tools like tox that includes a randomizer seed. By
269 # controlling the order, it is easier to correctly assert the expectation
270 unique_flags = []
271 if isinstance(flags, list):
272 if extras:
273 flags.extend(extras)
274
275 # ensure that spaces and commas are removed so that they can join
276 # correctly, remove duplicates
277 for f in flags:
278 if f and f not in unique_flags:
279 unique_flags.append(f.strip().strip(','))
280 return ','.join(unique_flags)
281
282 # split them, clean them, and join them back again
283 flags = flags.strip().split(' ')
284 if extras:
285 flags.extend(extras)
286
287 # remove possible duplicates
288 for f in flags:
289 if f and f not in unique_flags:
290 unique_flags.append(f.strip().strip(','))
291 flags = ','.join(unique_flags)
292 # Before returning, split them again, since strings can be mashed up
293 # together, preventing removal of duplicate entries
294 return ','.join(set(flags.split(',')))
295
296
297 def mount_osd(device, osd_id, **kw):
298 extras = []
299 is_vdo = kw.get('is_vdo', '0')
300 if is_vdo == '1':
301 extras = ['discard']
302 destination = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)
303 command = ['mount', '-t', 'xfs', '-o']
304 flags = conf.ceph.get_list(
305 'osd',
306 'osd_mount_options_xfs',
307 default=constants.mount.get('xfs'),
308 split=' ',
309 )
310 command.append(
311 _normalize_mount_flags(flags, extras=extras)
312 )
313 command.append(device)
314 command.append(destination)
315 process.run(command)
316
317 # Restore SELinux context
318 system.set_context(destination)
319
320
321 def _link_device(device, device_type, osd_id):
322 """
323 Allow linking any device type in an OSD directory. ``device`` must the be
324 source, with an absolute path and ``device_type`` will be the destination
325 name, like 'journal', or 'block'
326 """
327 device_path = '/var/lib/ceph/osd/%s-%s/%s' % (
328 conf.cluster,
329 osd_id,
330 device_type
331 )
332 command = ['ln', '-s', device, device_path]
333 system.chown(device)
334
335 process.run(command)
336
337
338 def link_journal(journal_device, osd_id):
339 _link_device(journal_device, 'journal', osd_id)
340
341
342 def link_block(block_device, osd_id):
343 _link_device(block_device, 'block', osd_id)
344
345
346 def link_wal(wal_device, osd_id):
347 _link_device(wal_device, 'block.wal', osd_id)
348
349
350 def link_db(db_device, osd_id):
351 _link_device(db_device, 'block.db', osd_id)
352
353
354 def get_monmap(osd_id):
355 """
356 Before creating the OSD files, a monmap needs to be retrieved so that it
357 can be used to tell the monitor(s) about the new OSD. A call will look like::
358
359 ceph --cluster ceph --name client.bootstrap-osd \
360 --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring \
361 mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap
362 """
363 path = '/var/lib/ceph/osd/%s-%s/' % (conf.cluster, osd_id)
364 bootstrap_keyring = '/var/lib/ceph/bootstrap-osd/%s.keyring' % conf.cluster
365 monmap_destination = os.path.join(path, 'activate.monmap')
366
367 process.run([
368 'ceph',
369 '--cluster', conf.cluster,
370 '--name', 'client.bootstrap-osd',
371 '--keyring', bootstrap_keyring,
372 'mon', 'getmap', '-o', monmap_destination
373 ])
374
375
376 def osd_mkfs_bluestore(osd_id, fsid, keyring=None, wal=False, db=False):
377 """
378 Create the files for the OSD to function. A normal call will look like:
379
380 ceph-osd --cluster ceph --mkfs --mkkey -i 0 \
381 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap \
382 --osd-data /var/lib/ceph/osd/ceph-0 \
383 --osd-uuid 8d208665-89ae-4733-8888-5d3bfbeeec6c \
384 --keyring /var/lib/ceph/osd/ceph-0/keyring \
385 --setuser ceph --setgroup ceph
386
387 In some cases it is required to use the keyring, when it is passed in as
388 a keyword argument it is used as part of the ceph-osd command
389 """
390 path = '/var/lib/ceph/osd/%s-%s/' % (conf.cluster, osd_id)
391 monmap = os.path.join(path, 'activate.monmap')
392
393 system.chown(path)
394
395 base_command = [
396 'ceph-osd',
397 '--cluster', conf.cluster,
398 '--osd-objectstore', 'bluestore',
399 '--mkfs',
400 '-i', osd_id,
401 '--monmap', monmap,
402 ]
403
404 supplementary_command = [
405 '--osd-data', path,
406 '--osd-uuid', fsid,
407 '--setuser', 'ceph',
408 '--setgroup', 'ceph'
409 ]
410
411 if keyring is not None:
412 base_command.extend(['--keyfile', '-'])
413
414 if wal:
415 base_command.extend(
416 ['--bluestore-block-wal-path', wal]
417 )
418 system.chown(wal)
419
420 if db:
421 base_command.extend(
422 ['--bluestore-block-db-path', db]
423 )
424 system.chown(db)
425
426 command = base_command + supplementary_command
427
428 _, _, returncode = process.call(command, stdin=keyring, show_command=True)
429 if returncode != 0:
430 raise RuntimeError('Command failed with exit code %s: %s' % (returncode, ' '.join(command)))
431
432
433 def osd_mkfs_filestore(osd_id, fsid, keyring):
434 """
435 Create the files for the OSD to function. A normal call will look like:
436
437 ceph-osd --cluster ceph --mkfs --mkkey -i 0 \
438 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap \
439 --osd-data /var/lib/ceph/osd/ceph-0 \
440 --osd-journal /var/lib/ceph/osd/ceph-0/journal \
441 --osd-uuid 8d208665-89ae-4733-8888-5d3bfbeeec6c \
442 --keyring /var/lib/ceph/osd/ceph-0/keyring \
443 --setuser ceph --setgroup ceph
444
445 """
446 path = '/var/lib/ceph/osd/%s-%s/' % (conf.cluster, osd_id)
447 monmap = os.path.join(path, 'activate.monmap')
448 journal = os.path.join(path, 'journal')
449
450 system.chown(journal)
451 system.chown(path)
452
453 command = [
454 'ceph-osd',
455 '--cluster', conf.cluster,
456 '--osd-objectstore', 'filestore',
457 '--mkfs',
458 '-i', osd_id,
459 '--monmap', monmap,
460 ]
461
462 if __release__ != 'luminous':
463 # goes through stdin
464 command.extend(['--keyfile', '-'])
465
466 command.extend([
467 '--osd-data', path,
468 '--osd-journal', journal,
469 '--osd-uuid', fsid,
470 '--setuser', 'ceph',
471 '--setgroup', 'ceph'
472 ])
473
474 _, _, returncode = process.call(
475 command, stdin=keyring, terminal_verbose=True, show_command=True
476 )
477 if returncode != 0:
478 raise RuntimeError('Command failed with exit code %s: %s' % (returncode, ' '.join(command)))