1 from __future__
import print_function
2 from ceph_volume
.util
import disk
, prepare
, str_to_int
3 from ceph_volume
.api
import lvm
4 from . import validators
5 from .strategies
import Strategy
6 from .strategies
import MixedStrategy
7 from ceph_volume
.devices
.lvm
.create
import Create
8 from ceph_volume
.devices
.lvm
.prepare
import Prepare
9 from ceph_volume
.util
import templates
, system
10 from ceph_volume
.exceptions
import SizeAllocationError
13 class SingleType(Strategy
):
15 Support for all SSDs, or all HDDS
18 def __init__(self
, args
, data_devs
):
19 super(SingleType
, self
).__init
__(args
, data_devs
)
20 self
.validate_compute()
23 def with_auto_devices(cls
, args
, devices
):
24 #SingleType only deploys standalone OSDs
25 return cls(args
, devices
)
29 return "bluestore.SingleType"
31 def report_pretty(self
, filtered_devices
):
34 string
+= templates
.filtered_devices(filtered_devices
)
35 string
+= templates
.total_osds
.format(
36 total_osds
=self
.total_osds
,
38 string
+= templates
.osd_component_titles
40 for osd
in self
.computed
['osds']:
41 string
+= templates
.osd_header
43 string
+= templates
.osd_reused_id
.format(
45 string
+= templates
.osd_component
.format(
47 path
=osd
['data']['path'],
48 size
=osd
['data']['human_readable_size'],
49 percent
=osd
['data']['percentage'],
56 Ensure that the minimum requirements for this type of scenario is
57 met, raise an error if the provided devices would not work
59 # validate minimum size for all devices
60 validators
.minimum_device_size(
61 self
.data_devs
, osds_per_device
=self
.osds_per_device
64 # make sure that data devices do not have any LVs
65 validators
.no_lvm_membership(self
.data_devs
)
68 self
._validate
_osd
_ids
()
72 Go through the rules needed to properly size the lvs, return
73 a dictionary with the result
75 osds
= self
.computed
['osds']
76 for device
in self
.data_devs
:
77 extents
= lvm
.sizing(device
.lvm_size
.b
, parts
=self
.osds_per_device
)
78 for _i
in range(self
.osds_per_device
):
79 osd
= {'data': {}, 'block.db': {}}
80 osd
['data']['path'] = device
.abspath
81 osd
['data']['size'] = extents
['sizes']
82 osd
['data']['parts'] = extents
['parts']
83 osd
['data']['percentage'] = 100 / self
.osds_per_device
84 osd
['data']['human_readable_size'] = str(disk
.Size(gb
=extents
['sizes']))
87 self
.computed
['changed'] = len(osds
) > 0
91 Create vgs/lvs from the incoming set of devices, assign their roles
92 (block, block.db, block.wal, etc..) and offload the OSD creation to
95 osd_vgs
= dict([(osd
['data']['path'], None) for osd
in self
.computed
['osds']])
97 # create the vgs first, mapping them to the device path
98 for osd
in self
.computed
['osds']:
99 vg
= osd_vgs
.get(osd
['data']['path'])
101 vg
= lvm
.create_vg(osd
['data']['path'])
102 osd_vgs
[osd
['data']['path']] = {'vg': vg
, 'parts': osd
['data']['parts']}
104 # create the lvs from the vgs captured in the beginning
105 for create
in osd_vgs
.values():
106 lvs
= lvm
.create_lvs(create
['vg'], parts
=create
['parts'], name_prefix
='osd-data')
107 vg_name
= create
['vg'].name
109 command
= ['--bluestore', '--data']
110 command
.append('%s/%s' % (vg_name
, lv
.name
))
111 if self
.args
.dmcrypt
:
112 command
.append('--dmcrypt')
113 if self
.args
.no_systemd
:
114 command
.append('--no-systemd')
115 if self
.args
.crush_device_class
:
116 command
.extend(['--crush-device-class', self
.args
.crush_device_class
])
119 command
.extend(['--osd-id', self
.osd_ids
.pop(0)])
121 if self
.args
.prepare
:
122 Prepare(command
).main()
124 Create(command
).main()
127 class MixedType(MixedStrategy
):
129 def __init__(self
, args
, data_devs
, db_devs
, wal_devs
=[]):
130 super(MixedType
, self
).__init
__(args
, data_devs
, db_devs
, wal_devs
)
131 self
.block_db_size
= self
.get_block_db_size()
132 self
.block_wal_size
= self
.get_block_wal_size()
133 self
.common_vg
= None
134 self
.common_wal_vg
= None
135 self
.dbs_needed
= len(self
.data_devs
) * self
.osds_per_device
136 self
.wals_needed
= self
.dbs_needed
137 self
.use_large_block_db
= self
.use_large_block_wal
= False
138 self
.validate_compute()
141 def with_auto_devices(cls
, args
, devices
):
142 data_devs
, db_devs
= cls
.split_devices_rotational(devices
)
143 return cls(args
, data_devs
, db_devs
)
147 return "bluestore.MixedType"
149 def get_block_db_size(self
):
150 if self
.args
.block_db_size
:
151 return disk
.Size(b
=self
.args
.block_db_size
)
153 return prepare
.get_block_db_size(lv_format
=False) or disk
.Size(b
=0)
155 def get_block_wal_size(self
):
156 if self
.args
.block_wal_size
:
157 return disk
.Size(b
=self
.args
.block_wal_size
)
159 return prepare
.get_block_wal_size(lv_format
=False) or disk
.Size(b
=0)
161 def report_pretty(self
, filtered_devices
):
164 string
+= templates
.filtered_devices(filtered_devices
)
165 string
+= templates
.total_osds
.format(
166 total_osds
=len(self
.data_devs
) * self
.osds_per_device
169 if self
.db_or_journal_devs
:
170 vg_extents
= lvm
.sizing(self
.total_available_db_space
.b
, parts
=self
.dbs_needed
)
171 db_size
= str(disk
.Size(b
=(vg_extents
['sizes'])))
173 string
+= templates
.ssd_volume_group
.format(
175 total_lv_size
=str(self
.total_available_db_space
),
176 total_lvs
=vg_extents
['parts'] * self
.osds_per_device
,
177 block_lv_size
=db_size
,
178 block_db_devices
=', '.join([ssd
.abspath
for ssd
in
179 self
.db_or_journal_devs
]),
180 lv_size
=self
.block_db_size
or str(disk
.Size(b
=(vg_extents
['sizes']))),
181 total_osds
=len(self
.data_devs
)
185 wal_vg_extents
= lvm
.sizing(self
.total_available_wal_space
.b
,
186 parts
=self
.wals_needed
)
187 wal_size
= str(disk
.Size(b
=(wal_vg_extents
['sizes'])))
188 string
+= templates
.ssd_volume_group
.format(
190 total_lv_size
=str(self
.total_available_wal_space
),
191 total_lvs
=wal_vg_extents
['parts'] * self
.osds_per_device
,
192 block_lv_size
=wal_size
,
193 block_db_devices
=', '.join([dev
.abspath
for dev
in
195 lv_size
=self
.block_wal_size
or str(disk
.Size(b
=(wal_vg_extents
['sizes']))),
196 total_osds
=len(self
.data_devs
)
199 string
+= templates
.osd_component_titles
200 for osd
in self
.computed
['osds']:
201 string
+= templates
.osd_header
203 string
+= templates
.osd_reused_id
.format(
205 string
+= templates
.osd_component
.format(
207 path
=osd
['data']['path'],
208 size
=osd
['data']['human_readable_size'],
209 percent
=osd
['data']['percentage'])
211 if 'block.db' in osd
:
212 string
+= templates
.osd_component
.format(
214 path
=osd
['block.db']['path'],
215 size
=osd
['block.db']['human_readable_size'],
216 percent
=osd
['block.db']['percentage'])
218 if 'block.wal' in osd
:
219 string
+= templates
.osd_component
.format(
221 path
=osd
['block.wal']['path'],
222 size
=osd
['block.wal']['human_readable_size'],
223 percent
=osd
['block.wal']['percentage'])
228 osds
= self
.computed
['osds']
230 if self
.data_devs
and self
.db_or_journal_devs
:
231 if not self
.common_vg
:
232 # there isn't a common vg, so a new one must be created with all
234 self
.computed
['vg'] = {
235 'devices': ", ".join([ssd
.abspath
for ssd
in self
.blank_db_devs
]),
236 'parts': self
.dbs_needed
,
237 'percentages': self
.vg_extents
['percentages'],
238 'sizes': self
.block_db_size
.b
.as_int(),
239 'size': self
.total_blank_db_dev_size
.b
.as_int(),
240 'human_readable_sizes': str(self
.block_db_size
),
241 'human_readable_size': str(self
.total_available_db_space
),
245 vg_name
= self
.common_vg
.name
247 if self
.data_devs
and self
.wal_devs
:
248 if not self
.common_wal_vg
:
249 # there isn't a common vg, so a new one must be created with all
251 self
.computed
['wal_vg'] = {
252 'devices': ", ".join([dev
.abspath
for dev
in self
.blank_wal_devs
]),
253 'parts': self
.wals_needed
,
254 'percentages': self
.wal_vg_extents
['percentages'],
255 'sizes': self
.block_wal_size
.b
.as_int(),
256 'size': self
.total_blank_wal_dev_size
.b
.as_int(),
257 'human_readable_sizes': str(self
.block_wal_size
),
258 'human_readable_size': str(self
.total_available_wal_space
),
260 wal_vg_name
= 'vg/lv'
262 wal_vg_name
= self
.common_wal_vg
.name
264 for device
in self
.data_devs
:
265 for hdd
in range(self
.osds_per_device
):
267 osd
['data']['path'] = device
.abspath
268 osd
['data']['size'] = device
.lvm_size
.b
/ self
.osds_per_device
269 osd
['data']['percentage'] = 100 / self
.osds_per_device
270 osd
['data']['human_readable_size'] = str(
271 disk
.Size(b
=device
.lvm_size
.b
) / self
.osds_per_device
274 if self
.db_or_journal_devs
:
276 osd
['block.db']['path'] = 'vg: %s' % vg_name
277 osd
['block.db']['size'] = int(self
.block_db_size
.b
)
278 osd
['block.db']['human_readable_size'] = str(self
.block_db_size
)
279 osd
['block.db']['percentage'] = self
.vg_extents
['percentages']
282 osd
['block.wal'] = {}
283 osd
['block.wal']['path'] = 'vg: %s' % wal_vg_name
284 osd
['block.wal']['size'] = int(self
.block_wal_size
.b
)
285 osd
['block.wal']['human_readable_size'] = str(self
.block_wal_size
)
286 osd
['block.wal']['percentage'] = self
.wal_vg_extents
['percentages']
289 osd
['osd_id'] = self
.osd_ids
.pop(0)
293 self
.computed
['changed'] = len(osds
) > 0
297 Create vgs/lvs from the incoming set of devices, assign their roles
298 (block, block.db, block.wal, etc..) and offload the OSD creation to
301 data_vgs
= dict([(osd
['data']['path'], None) for osd
in self
.computed
['osds']])
303 # create 1 vg per data device first, mapping them to the device path,
304 # when the lv gets created later, it can create as many as needed (or
306 for osd
in self
.computed
['osds']:
307 vg
= data_vgs
.get(osd
['data']['path'])
309 vg
= lvm
.create_vg(osd
['data']['path'], name_prefix
='ceph-block')
310 data_vgs
[osd
['data']['path']] = vg
312 if self
.data_devs
and self
.db_or_journal_devs
:
313 blank_db_dev_paths
= [d
.abspath
for d
in self
.blank_db_devs
]
315 # no common vg is found, create one with all the blank SSDs
316 if not self
.common_vg
:
317 db_vg
= lvm
.create_vg(blank_db_dev_paths
, name_prefix
='ceph-block-dbs')
318 elif self
.common_vg
and blank_db_dev_paths
:
319 # if a common vg exists then extend it with any blank ssds
320 db_vg
= lvm
.extend_vg(self
.common_vg
, blank_db_dev_paths
)
322 # one common vg with nothing else to extend can be used directly,
323 # either this is one device with one vg, or multiple devices with the
325 db_vg
= self
.common_vg
327 if self
.use_large_block_db
:
328 # make the block.db lvs as large as possible
329 vg_free_count
= str_to_int(db_vg
.vg_free_count
)
330 db_lv_extents
= int(vg_free_count
/ self
.dbs_needed
)
332 db_lv_extents
= db_vg
.sizing(size
=self
.block_db_size
.gb
.as_int())['extents']
334 if self
.data_devs
and self
.wal_devs
:
335 blank_wal_dev_paths
= [d
.abspath
for d
in self
.blank_wal_devs
]
337 if not self
.common_wal_vg
:
338 wal_vg
= lvm
.create_vg(blank_wal_dev_paths
,
339 name_prefix
='ceph-block-wals')
340 elif self
.common_wal_vg
and blank_wal_dev_paths
:
341 wal_vg
= lvm
.extend_vg(self
.common_wal_vg
, blank_wal_dev_paths
)
343 wal_vg
= self
.common_wal_vg
345 if self
.use_large_block_wal
:
346 # make the block.db lvs as large as possible
347 vg_free_count
= str_to_int(wal_vg
.vg_free_count
)
348 wal_lv_extents
= int(vg_free_count
/ self
.wals_needed
)
350 wal_lv_extents
= wal_vg
.sizing(size
=self
.block_wal_size
.gb
.as_int())['extents']
352 # create the data lvs, and create the OSD with an lv from the common
353 # block.db vg from before
354 for osd
in self
.computed
['osds']:
355 data_path
= osd
['data']['path']
356 data_vg
= data_vgs
[data_path
]
357 data_lv_extents
= data_vg
.sizing(parts
=self
.osds_per_device
)['extents']
358 data_uuid
= system
.generate_uuid()
359 data_lv
= lvm
.create_lv(
360 'osd-block', data_uuid
, vg
=data_vg
, extents
=data_lv_extents
)
363 '--data', "%s/%s" % (data_lv
.vg_name
, data_lv
.name
),
365 if 'block.db' in osd
:
366 db_uuid
= system
.generate_uuid()
367 db_lv
= lvm
.create_lv(
368 'osd-block-db', db_uuid
, vg
=db_vg
, extents
=db_lv_extents
)
369 command
.extend([ '--block.db',
370 '{}/{}'.format(db_lv
.vg_name
, db_lv
.name
)])
371 if 'block.wal' in osd
:
372 wal_uuid
= system
.generate_uuid()
373 wal_lv
= lvm
.create_lv(
374 'osd-block-wal', wal_uuid
, vg
=wal_vg
, extents
=wal_lv_extents
)
377 '{}/{}'.format(wal_lv
.vg_name
, wal_lv
.name
)
379 if self
.args
.dmcrypt
:
380 command
.append('--dmcrypt')
381 if self
.args
.no_systemd
:
382 command
.append('--no-systemd')
383 if self
.args
.crush_device_class
:
384 command
.extend(['--crush-device-class', self
.args
.crush_device_class
])
386 command
.extend(['--osd-id', osd
['osd_id']])
388 if self
.args
.prepare
:
389 Prepare(command
).main()
391 Create(command
).main()
395 HDDs represent data devices, and solid state devices are for block.db,
396 make sure that the number of data devices would have enough LVs and
397 those LVs would be large enough to accommodate a block.db
399 # validate minimum size for all devices
400 validators
.minimum_device_size(self
.data_devs
+ self
.db_or_journal_devs
,
401 osds_per_device
=self
.osds_per_device
)
402 validators
.minimum_device_size(self
.wal_devs
,
403 osds_per_device
=self
.osds_per_device
,
406 # make sure that data devices do not have any LVs
407 validators
.no_lvm_membership(self
.data_devs
)
409 if self
.data_devs
and self
.db_or_journal_devs
:
410 self
._validate
_db
_devs
()
412 if self
.data_devs
and self
.wal_devs
:
413 self
._validate
_wal
_devs
()
416 self
._validate
_osd
_ids
()
418 def _validate_db_devs(self
):
419 # do not allow non-common VG to continue
420 validators
.has_common_vg(self
.db_or_journal_devs
)
422 # find the common VG to calculate how much is available
423 self
.common_vg
= self
.get_common_vg(self
.db_or_journal_devs
)
425 # find how many block.db LVs are possible from the common VG
427 common_vg_size
= disk
.Size(gb
=self
.common_vg
.free
)
429 common_vg_size
= disk
.Size(gb
=0)
432 vg_members
= set([d
for d
in self
.db_or_journal_devs
if d
.is_lvm_member
])
433 self
.blank_db_devs
= set(self
.db_or_journal_devs
).difference(vg_members
)
434 self
.total_blank_db_dev_size
= disk
.Size(b
=0)
435 for blank_db_dev
in self
.blank_db_devs
:
436 self
.total_blank_db_dev_size
+= disk
.Size(b
=blank_db_dev
.lvm_size
.b
)
438 self
.total_available_db_space
= self
.total_blank_db_dev_size
+ common_vg_size
440 # If not configured, we default to 0, which is really "use as much as
441 # possible" captured by the `else` condition
442 if self
.block_db_size
.gb
> 0:
444 self
.vg_extents
= lvm
.sizing(
445 self
.total_available_db_space
.b
, size
=self
.block_db_size
.b
* self
.osds_per_device
447 except SizeAllocationError
:
448 msg
= "Not enough space in fast devices (%s) to create %s x %s block.db LV"
450 msg
% (self
.total_available_db_space
, self
.osds_per_device
, self
.block_db_size
)
453 self
.vg_extents
= lvm
.sizing(
454 self
.total_available_db_space
.b
, parts
=self
.dbs_needed
457 # validate that number of block.db LVs possible are enough for number of
459 if self
.total_available_db_space
.b
== 0:
460 msg
= "No space left in fast devices to create block.db LVs"
461 raise RuntimeError(msg
)
463 # bluestore_block_db_size was unset, so we must set this to whatever
464 # size we get by dividing the total available space for block.db LVs
465 # into the number of block.db LVs needed (i.e. "as large as possible")
466 if self
.block_db_size
.b
== 0:
467 self
.block_db_size
= self
.total_available_db_space
/ self
.dbs_needed
468 self
.use_large_block_db
= True
470 total_dbs_possible
= self
.total_available_db_space
/ self
.block_db_size
472 if self
.dbs_needed
> total_dbs_possible
:
473 msg
= "Not enough space (%s) to create %s x %s block.db LVs" % (
474 self
.total_available_db_space
, self
.dbs_needed
, self
.block_db_size
,
476 raise RuntimeError(msg
)
478 def _validate_wal_devs(self
):
479 # do not allow non-common VG to continue
480 validators
.has_common_vg(self
.wal_devs
)
482 # find the common VG to calculate how much is available
483 self
.common_wal_vg
= self
.get_common_vg(self
.wal_devs
)
485 # find how many block.wal LVs are possible from the common VG
486 if self
.common_wal_vg
:
487 common_vg_size
= disk
.Size(gb
=self
.common_wal_vg
.free
)
489 common_vg_size
= disk
.Size(gb
=0)
492 vg_members
= set([d
for d
in self
.wal_devs
if d
.is_lvm_member
])
493 self
.blank_wal_devs
= set(self
.wal_devs
).difference(vg_members
)
494 self
.total_blank_wal_dev_size
= disk
.Size(b
=0)
495 for blank_wal_dev
in self
.blank_wal_devs
:
496 self
.total_blank_wal_dev_size
+= disk
.Size(b
=blank_wal_dev
.lvm_size
.b
)
498 self
.total_available_wal_space
= self
.total_blank_wal_dev_size
+ common_vg_size
500 # If not configured, we default to 0, which is really "use as much as
501 # possible" captured by the `else` condition
502 if self
.block_wal_size
.gb
> 0:
504 self
.vg_extents
= lvm
.sizing(
505 self
.total_available_wal_space
.b
, size
=self
.block_wal_size
.b
* self
.osds_per_device
507 except SizeAllocationError
:
508 msg
= "Not enough space in fast devices (%s) to create %s x %s block.wal LV"
510 msg
% (self
.total_available_wal_space
,
511 self
.osds_per_device
, self
.block_wal_size
)
514 self
.wal_vg_extents
= lvm
.sizing(
515 self
.total_available_wal_space
.b
, parts
=self
.wals_needed
518 # validate that number of block.wal LVs possible are enough for number of
520 if self
.total_available_wal_space
.b
== 0:
521 msg
= "No space left in fast devices to create block.wal LVs"
522 raise RuntimeError(msg
)
524 # bluestore_block_wal_size was unset, so we must set this to whatever
525 # size we get by dividing the total available space for block.wal LVs
526 # into the number of block.wal LVs needed (i.e. "as large as possible")
527 if self
.block_wal_size
.b
== 0:
528 self
.block_wal_size
= self
.total_available_wal_space
/ self
.wals_needed
529 self
.use_large_block_wal
= True
531 total_wals_possible
= self
.total_available_wal_space
/ self
.block_wal_size
533 if self
.wals_needed
> total_wals_possible
:
534 msg
= "Not enough space (%s) to create %s x %s block.wal LVs" % (
535 self
.total_available_wal_space
, self
.wals_needed
,
538 raise RuntimeError(msg
)