2 from collections
import namedtuple
5 from textwrap
import dedent
6 from ceph_volume
import terminal
, decorators
7 from ceph_volume
.util
import disk
, prompt_bool
, arg_validators
, templates
8 from ceph_volume
.util
import prepare
10 from .create
import Create
11 from .prepare
import Prepare
13 mlogger
= terminal
.MultiLogger(__name__
)
14 logger
= logging
.getLogger(__name__
)
17 device_list_template
= """
18 * {path: <25} {size: <10} {state}"""
21 def device_formatter(devices
):
23 for path
, details
in devices
:
24 lines
.append(device_list_template
.format(
25 path
=path
, size
=details
['human_readable_size'],
26 state
='solid' if details
['rotational'] == '0' else 'rotational')
32 def ensure_disjoint_device_lists(data
, db
=[], wal
=[], journal
=[]):
33 # check that all device lists are disjoint with each other
34 if not all([set(data
).isdisjoint(set(db
)),
35 set(data
).isdisjoint(set(wal
)),
36 set(data
).isdisjoint(set(journal
)),
37 set(db
).isdisjoint(set(wal
))]):
38 raise Exception('Device lists are not disjoint')
41 def separate_devices_from_lvs(devices
):
45 phys
.append(d
) if d
.is_device
else lvm
.append(d
)
49 def get_physical_osds(devices
, args
):
51 Goes through passed physical devices and assigns OSDs
53 data_slots
= args
.osds_per_device
55 data_slots
= max(args
.data_slots
, args
.osds_per_device
)
56 rel_data_size
= 1.0 / data_slots
57 mlogger
.debug('relative data size: {}'.format(rel_data_size
))
61 dev_size
= dev
.vg_size
[0]
62 abs_size
= disk
.Size(b
=int(dev_size
* rel_data_size
))
63 free_size
= dev
.vg_free
[0]
64 for _
in range(args
.osds_per_device
):
65 if abs_size
> free_size
:
67 free_size
-= abs_size
.b
70 osd_id
= args
.osd_ids
.pop()
71 ret
.append(Batch
.OSD(dev
.path
,
76 'dmcrypt' if args
.dmcrypt
else None))
80 def get_lvm_osds(lvs
, args
):
82 Goes through passed LVs and assigns planned osds
90 osd_id
= args
.osd_ids
.pop()
91 osd
= Batch
.OSD("{}/{}".format(lv
.vg_name
, lv
.lv_name
),
93 disk
.Size(b
=int(lv
.lvs
[0].lv_size
)),
96 'dmcrypt' if args
.dmcrypt
else None)
101 def get_physical_fast_allocs(devices
, type_
, fast_slots_per_device
, new_osds
, args
):
102 requested_slots
= getattr(args
, '{}_slots'.format(type_
))
103 if not requested_slots
or requested_slots
< fast_slots_per_device
:
105 mlogger
.info('{}_slots argument is too small, ignoring'.format(type_
))
106 requested_slots
= fast_slots_per_device
108 requested_size
= getattr(args
, '{}_size'.format(type_
), 0)
109 if not requested_size
or requested_size
== 0:
110 # no size argument was specified, check ceph.conf
111 get_size_fct
= getattr(prepare
, 'get_{}_size'.format(type_
))
112 requested_size
= get_size_fct(lv_format
=False)
116 if not dev
.available_lvm
:
118 # any LV present is considered a taken slot
119 occupied_slots
= len(dev
.lvs
)
120 # this only looks at the first vg on device, unsure if there is a better
122 dev_size
= dev
.vg_size
[0]
123 abs_size
= disk
.Size(b
=int(dev_size
/ requested_slots
))
124 free_size
= dev
.vg_free
[0]
125 relative_size
= int(abs_size
) / dev_size
127 if requested_size
<= abs_size
:
128 abs_size
= requested_size
129 relative_size
= int(abs_size
) / dev_size
132 '{} was requested for {}, but only {} can be fulfilled'.format(
134 '{}_size'.format(type_
),
138 while abs_size
<= free_size
and len(ret
) < new_osds
and occupied_slots
< fast_slots_per_device
:
139 free_size
-= abs_size
.b
141 ret
.append((dev
.path
, relative_size
, abs_size
, requested_slots
))
145 def get_lvm_fast_allocs(lvs
):
146 return [("{}/{}".format(d
.vg_name
, d
.lv_name
), 100.0,
147 disk
.Size(b
=int(d
.lvs
[0].lv_size
)), 1) for d
in lvs
if not
153 help = 'Automatically size devices for multi-OSD provisioning with minimal interaction'
156 Automatically size devices ready for OSD provisioning based on default strategies.
160 ceph-volume lvm batch [DEVICE...]
162 Devices can be physical block devices or LVs.
163 Optional reporting on possible outcomes is enabled with --report
165 ceph-volume lvm batch --report [DEVICE...]
168 def __init__(self
, argv
):
169 parser
= argparse
.ArgumentParser(
170 prog
='ceph-volume lvm batch',
171 formatter_class
=argparse
.RawDescriptionHelpFormatter
,
172 description
=self
._help
,
179 type=arg_validators
.ValidBatchDevice(),
181 help='Devices to provision OSDs',
186 type=arg_validators
.ValidBatchDevice(),
188 help='Devices to provision OSDs db volumes',
193 type=arg_validators
.ValidBatchDevice(),
195 help='Devices to provision OSDs wal volumes',
200 type=arg_validators
.ValidBatchDevice(),
202 help='Devices to provision OSDs journal volumes',
207 help=('deploy multi-device OSDs if rotational and non-rotational drives '
208 'are passed in DEVICES'),
213 action
='store_false',
215 help=('deploy standalone OSDs if rotational and non-rotational drives '
216 'are passed in DEVICES'),
221 help='bluestore objectstore (default)',
226 help='filestore objectstore',
231 help='Only report on OSD that would be created and exit',
236 help='Avoid prompting for confirmation when provisioning',
240 help='output format, defaults to "pretty"',
242 choices
=['json', 'json-pretty', 'pretty'],
247 help='Enable device encryption via dm-crypt',
250 '--crush-device-class',
251 dest
='crush_device_class',
252 help='Crush device class to assign this OSD to',
258 help='Skip creating and enabling systemd units and starting OSD services',
264 help='Provision more than 1 (the default) OSD per device',
269 help=('Provision more than 1 (the default) OSD slot per device'
270 ' if more slots then osds-per-device are specified, slots'
271 'will stay unoccupied'),
275 type=disk
.Size
.parse
,
276 help='Set (or override) the "bluestore_block_db_size" value, in bytes'
281 help='Provision slots on DB device, can remain unoccupied'
285 type=disk
.Size
.parse
,
286 help='Set (or override) the "bluestore_block_wal_size" value, in bytes'
291 help='Provision slots on WAL device, can remain unoccupied'
293 def journal_size_in_mb_hack(size
):
294 # TODO give user time to adjust, then remove this
295 if size
and size
[-1].isdigit():
296 mlogger
.warning('DEPRECATION NOTICE')
297 mlogger
.warning('--journal-size as integer is parsed as megabytes')
298 mlogger
.warning('A future release will parse integers as bytes')
299 mlogger
.warning('Add a "M" to explicitly pass a megabyte size')
301 return disk
.Size
.parse(size
)
304 type=journal_size_in_mb_hack
,
305 help='Override the "osd_journal_size" value, in megabytes'
310 help='Provision slots on journal device, can remain unoccupied'
315 help='Only prepare all OSDs, do not activate',
321 help='Reuse existing OSD ids',
323 self
.args
= parser
.parse_args(argv
)
325 for dev_list
in ['', 'db_', 'wal_', 'journal_']:
326 setattr(self
, '{}usable'.format(dev_list
), [])
328 def report(self
, plan
):
329 report
= self
._create
_report
(plan
)
332 def _create_report(self
, plan
):
333 if self
.args
.format
== 'pretty':
335 report
+= templates
.total_osds
.format(total_osds
=len(plan
))
337 report
+= templates
.osd_component_titles
339 report
+= templates
.osd_header
340 report
+= osd
.report()
345 json_report
.append(osd
.report_json())
346 if self
.args
.format
== 'json':
347 return json
.dumps(json_report
)
348 elif self
.args
.format
== 'json-pretty':
349 return json
.dumps(json_report
, indent
=4,
352 def _check_slot_args(self
):
354 checking if -slots args are consistent with other arguments
356 if self
.args
.data_slots
and self
.args
.osds_per_device
:
357 if self
.args
.data_slots
< self
.args
.osds_per_device
:
358 raise ValueError('data_slots is smaller then osds_per_device')
360 def _sort_rotational_disks(self
):
362 Helper for legacy auto behaviour.
363 Sorts drives into rotating and non-rotating, the latter being used for
366 mlogger
.warning('DEPRECATION NOTICE')
367 mlogger
.warning('You are using the legacy automatic disk sorting behavior')
368 mlogger
.warning('The Pacific release will change the default to --no-auto')
371 for d
in self
.args
.devices
:
372 rotating
.append(d
) if d
.rotational
else ssd
.append(d
)
373 if ssd
and not rotating
:
374 # no need for additional sorting, we'll only deploy standalone on ssds
376 self
.args
.devices
= rotating
377 if self
.args
.filestore
:
378 self
.args
.journal_devices
= ssd
380 self
.args
.db_devices
= ssd
382 @decorators.needs_root
384 if not self
.args
.devices
:
385 return self
.parser
.print_help()
387 # Default to bluestore here since defaulting it in add_argument may
388 # cause both to be True
389 if not self
.args
.bluestore
and not self
.args
.filestore
:
390 self
.args
.bluestore
= True
392 if (self
.args
.auto
and not self
.args
.db_devices
and not
393 self
.args
.wal_devices
and not self
.args
.journal_devices
):
394 self
._sort
_rotational
_disks
()
396 self
._check
_slot
_args
()
398 ensure_disjoint_device_lists(self
.args
.devices
,
399 self
.args
.db_devices
,
400 self
.args
.wal_devices
,
401 self
.args
.journal_devices
)
403 plan
= self
.get_plan(self
.args
)
409 if not self
.args
.yes
:
411 terminal
.info('The above OSDs would be created if the operation continues')
412 if not prompt_bool('do you want to proceed? (yes/no)'):
413 terminal
.error('aborting OSD provisioning')
418 def _execute(self
, plan
):
419 defaults
= common
.get_default_args()
424 'crush_device_class',
427 defaults
.update({arg
: getattr(self
.args
, arg
) for arg
in global_args
})
429 args
= osd
.get_args(defaults
)
430 if self
.args
.prepare
:
432 p
.safe_prepare(argparse
.Namespace(**args
))
435 c
.create(argparse
.Namespace(**args
))
438 def get_plan(self
, args
):
440 plan
= self
.get_deployment_layout(args
, args
.devices
, args
.db_devices
,
443 plan
= self
.get_deployment_layout(args
, args
.devices
, args
.journal_devices
)
446 def get_deployment_layout(self
, args
, devices
, fast_devices
=[],
447 very_fast_devices
=[]):
449 The methods here are mostly just organization, error reporting and
450 setting up of (default) args. The heavy lifting code for the deployment
451 layout can be found in the static get_*_osds and get_*_fast_allocs
455 phys_devs
, lvm_devs
= separate_devices_from_lvs(devices
)
456 mlogger
.debug(('passed data devices: {} physical,'
457 ' {} LVM').format(len(phys_devs
), len(lvm_devs
)))
459 plan
.extend(get_physical_osds(phys_devs
, args
))
461 plan
.extend(get_lvm_osds(lvm_devs
, args
))
465 mlogger
.info('All data devices are unavailable')
467 requested_osds
= args
.osds_per_device
* len(phys_devs
) + len(lvm_devs
)
469 fast_type
= 'block_db' if args
.bluestore
else 'journal'
470 fast_allocations
= self
.fast_allocations(fast_devices
,
474 if fast_devices
and not fast_allocations
:
475 mlogger
.info('{} fast devices were passed, but none are available'.format(len(fast_devices
)))
477 if fast_devices
and not len(fast_allocations
) == num_osds
:
478 mlogger
.error('{} fast allocations != {} num_osds'.format(
479 len(fast_allocations
), num_osds
))
482 very_fast_allocations
= self
.fast_allocations(very_fast_devices
,
486 if very_fast_devices
and not very_fast_allocations
:
487 mlogger
.info('{} very fast devices were passed, but none are available'.format(len(very_fast_devices
)))
489 if very_fast_devices
and not len(very_fast_allocations
) == num_osds
:
490 mlogger
.error('{} very fast allocations != {} num_osds'.format(
491 len(very_fast_allocations
), num_osds
))
496 osd
.add_fast_device(*fast_allocations
.pop(),
498 if very_fast_devices
and args
.bluestore
:
499 osd
.add_very_fast_device(*very_fast_allocations
.pop())
502 def fast_allocations(self
, devices
, requested_osds
, new_osds
, type_
):
506 phys_devs
, lvm_devs
= separate_devices_from_lvs(devices
)
507 mlogger
.debug(('passed {} devices: {} physical,'
508 ' {} LVM').format(type_
, len(phys_devs
), len(lvm_devs
)))
510 ret
.extend(get_lvm_fast_allocs(lvm_devs
))
512 # fill up uneven distributions across fast devices: 5 osds and 2 fast
513 # devices? create 3 slots on each device rather then deploying
515 if (requested_osds
- len(lvm_devs
)) % len(phys_devs
):
516 fast_slots_per_device
= int((requested_osds
- len(lvm_devs
)) / len(phys_devs
)) + 1
518 fast_slots_per_device
= int((requested_osds
- len(lvm_devs
)) / len(phys_devs
))
521 ret
.extend(get_physical_fast_allocs(phys_devs
,
523 fast_slots_per_device
,
530 This class simply stores info about to-be-deployed OSDs and provides an
531 easy way to retrieve the necessary create arguments.
533 VolSpec
= namedtuple('VolSpec',
548 self
.data
= self
.VolSpec(path
=data_path
,
554 self
.very_fast
= None
555 self
.encryption
= encryption
557 def add_fast_device(self
, path
, rel_size
, abs_size
, slots
, type_
):
558 self
.fast
= self
.VolSpec(path
=path
,
564 def add_very_fast_device(self
, path
, rel_size
, abs_size
, slots
):
565 self
.very_fast
= self
.VolSpec(path
=path
,
571 def _get_osd_plan(self
):
573 'data': self
.data
.path
,
574 'data_size': self
.data
.abs_size
,
575 'encryption': self
.encryption
,
578 type_
= self
.fast
.type_
.replace('.', '_')
581 type_
: self
.fast
.path
,
582 '{}_size'.format(type_
): self
.fast
.abs_size
,
587 'block_wal': self
.very_fast
.path
,
588 'block_wal_size': self
.very_fast
.abs_size
,
591 plan
.update({'osd_id': self
.id_
})
594 def get_args(self
, defaults
):
595 my_defaults
= defaults
.copy()
596 my_defaults
.update(self
._get
_osd
_plan
())
602 report
+= templates
.osd_reused_id
.format(
605 report
+= templates
.osd_encryption
.format(
607 report
+= templates
.osd_component
.format(
608 _type
=self
.data
.type_
,
610 size
=self
.data
.abs_size
,
611 percent
=self
.data
.rel_size
)
613 report
+= templates
.osd_component
.format(
614 _type
=self
.fast
.type_
,
616 size
=self
.fast
.abs_size
,
617 percent
=self
.fast
.rel_size
)
619 report
+= templates
.osd_component
.format(
620 _type
=self
.very_fast
.type_
,
621 path
=self
.very_fast
.path
,
622 size
=self
.very_fast
.abs_size
,
623 percent
=self
.very_fast
.rel_size
)
626 def report_json(self
):
627 # cast all values to string so that the report can be dumped in to
629 return {k
: str(v
) for k
, v
in self
._get
_osd
_plan
().items()}