1 from __future__
import print_function
2 from ceph_volume
.util
import disk
, prepare
, str_to_int
3 from ceph_volume
.api
import lvm
4 from . import validators
5 from .strategies
import Strategy
6 from .strategies
import MixedStrategy
7 from ceph_volume
.devices
.lvm
.create
import Create
8 from ceph_volume
.devices
.lvm
.prepare
import Prepare
9 from ceph_volume
.util
import templates
, system
10 from ceph_volume
.exceptions
import SizeAllocationError
13 class SingleType(Strategy
):
15 Support for all SSDs, or all HDDS
18 def __init__(self
, args
, data_devs
):
19 super(SingleType
, self
).__init
__(args
, data_devs
)
20 self
.validate_compute()
23 def with_auto_devices(cls
, args
, devices
):
24 #SingleType only deploys standalone OSDs
25 return cls(args
, devices
)
29 return "bluestore.SingleType"
31 def report_pretty(self
, filtered_devices
):
34 string
+= templates
.filtered_devices(filtered_devices
)
35 string
+= templates
.total_osds
.format(
36 total_osds
=self
.total_osds
,
38 string
+= templates
.osd_component_titles
40 for osd
in self
.computed
['osds']:
41 string
+= templates
.osd_header
43 string
+= templates
.osd_reused_id
.format(
45 string
+= templates
.osd_component
.format(
47 path
=osd
['data']['path'],
48 size
=osd
['data']['human_readable_size'],
49 percent
=osd
['data']['percentage'],
56 Ensure that the minimum requirements for this type of scenario is
57 met, raise an error if the provided devices would not work
59 # validate minimum size for all devices
60 validators
.minimum_device_size(
61 self
.data_devs
, osds_per_device
=self
.osds_per_device
64 # make sure that data devices do not have any LVs
65 validators
.no_lvm_membership(self
.data_devs
)
68 self
._validate
_osd
_ids
()
72 Go through the rules needed to properly size the lvs, return
73 a dictionary with the result
75 osds
= self
.computed
['osds']
76 for device
in self
.data_devs
:
77 extents
= lvm
.sizing(device
.lvm_size
.b
, parts
=self
.osds_per_device
)
78 for _i
in range(self
.osds_per_device
):
79 osd
= {'data': {}, 'block.db': {}}
80 osd
['data']['path'] = device
.abspath
81 osd
['data']['size'] = extents
['sizes']
82 osd
['data']['parts'] = extents
['parts']
83 osd
['data']['percentage'] = 100 / self
.osds_per_device
84 osd
['data']['human_readable_size'] = str(disk
.Size(gb
=extents
['sizes']))
87 self
.computed
['changed'] = len(osds
) > 0
91 Create vgs/lvs from the incoming set of devices, assign their roles
92 (block, block.db, block.wal, etc..) and offload the OSD creation to
95 osd_vgs
= dict([(osd
['data']['path'], None) for osd
in self
.computed
['osds']])
97 # create the vgs first, mapping them to the device path
98 for osd
in self
.computed
['osds']:
99 vg
= osd_vgs
.get(osd
['data']['path'])
101 vg
= lvm
.create_vg(osd
['data']['path'])
102 osd_vgs
[osd
['data']['path']] = {'vg': vg
, 'parts': osd
['data']['parts']}
104 # create the lvs from the vgs captured in the beginning
105 for create
in osd_vgs
.values():
106 lvs
= lvm
.create_lvs(create
['vg'], parts
=create
['parts'], name_prefix
='osd-data')
107 vg_name
= create
['vg'].name
109 command
= ['--bluestore', '--data']
110 command
.append('%s/%s' % (vg_name
, lv
.name
))
111 if self
.args
.dmcrypt
:
112 command
.append('--dmcrypt')
113 if self
.args
.no_systemd
:
114 command
.append('--no-systemd')
115 if self
.args
.crush_device_class
:
116 command
.extend(['--crush-device-class', self
.args
.crush_device_class
])
119 command
.extend(['--osd-id', self
.osd_ids
.pop(0)])
121 if self
.args
.prepare
:
122 Prepare(command
).main()
124 Create(command
).main()
127 class MixedType(MixedStrategy
):
129 def __init__(self
, args
, data_devs
, db_devs
, wal_devs
=[]):
130 super(MixedType
, self
).__init
__(args
, data_devs
, db_devs
, wal_devs
)
131 self
.block_db_size
= self
.get_block_db_size()
132 self
.block_wal_size
= self
.get_block_wal_size()
133 self
.system_vgs
= lvm
.VolumeGroups()
134 self
.common_vg
= None
135 self
.common_wal_vg
= None
136 self
.dbs_needed
= len(self
.data_devs
) * self
.osds_per_device
137 self
.wals_needed
= self
.dbs_needed
138 self
.use_large_block_db
= self
.use_large_block_wal
= False
139 self
.validate_compute()
142 def with_auto_devices(cls
, args
, devices
):
143 data_devs
, db_devs
= cls
.split_devices_rotational(devices
)
144 return cls(args
, data_devs
, db_devs
)
148 return "bluestore.MixedType"
150 def get_block_db_size(self
):
151 if self
.args
.block_db_size
:
152 return disk
.Size(b
=self
.args
.block_db_size
)
154 return prepare
.get_block_db_size(lv_format
=False) or disk
.Size(b
=0)
156 def get_block_wal_size(self
):
157 if self
.args
.block_wal_size
:
158 return disk
.Size(b
=self
.args
.block_wal_size
)
160 return prepare
.get_block_wal_size(lv_format
=False) or disk
.Size(b
=0)
162 def report_pretty(self
, filtered_devices
):
165 string
+= templates
.filtered_devices(filtered_devices
)
166 string
+= templates
.total_osds
.format(
167 total_osds
=len(self
.data_devs
) * self
.osds_per_device
170 if self
.db_or_journal_devs
:
171 vg_extents
= lvm
.sizing(self
.total_available_db_space
.b
, parts
=self
.dbs_needed
)
172 db_size
= str(disk
.Size(b
=(vg_extents
['sizes'])))
174 string
+= templates
.ssd_volume_group
.format(
176 total_lv_size
=str(self
.total_available_db_space
),
177 total_lvs
=vg_extents
['parts'] * self
.osds_per_device
,
178 block_lv_size
=db_size
,
179 block_db_devices
=', '.join([ssd
.abspath
for ssd
in
180 self
.db_or_journal_devs
]),
181 lv_size
=self
.block_db_size
or str(disk
.Size(b
=(vg_extents
['sizes']))),
182 total_osds
=len(self
.data_devs
)
186 wal_vg_extents
= lvm
.sizing(self
.total_available_wal_space
.b
,
187 parts
=self
.wals_needed
)
188 wal_size
= str(disk
.Size(b
=(wal_vg_extents
['sizes'])))
189 string
+= templates
.ssd_volume_group
.format(
191 total_lv_size
=str(self
.total_available_wal_space
),
192 total_lvs
=wal_vg_extents
['parts'] * self
.osds_per_device
,
193 block_lv_size
=wal_size
,
194 block_db_devices
=', '.join([dev
.abspath
for dev
in
196 lv_size
=self
.block_wal_size
or str(disk
.Size(b
=(wal_vg_extents
['sizes']))),
197 total_osds
=len(self
.data_devs
)
200 string
+= templates
.osd_component_titles
201 for osd
in self
.computed
['osds']:
202 string
+= templates
.osd_header
204 string
+= templates
.osd_reused_id
.format(
206 string
+= templates
.osd_component
.format(
208 path
=osd
['data']['path'],
209 size
=osd
['data']['human_readable_size'],
210 percent
=osd
['data']['percentage'])
212 if 'block.db' in osd
:
213 string
+= templates
.osd_component
.format(
215 path
=osd
['block.db']['path'],
216 size
=osd
['block.db']['human_readable_size'],
217 percent
=osd
['block.db']['percentage'])
219 if 'block.wal' in osd
:
220 string
+= templates
.osd_component
.format(
222 path
=osd
['block.wal']['path'],
223 size
=osd
['block.wal']['human_readable_size'],
224 percent
=osd
['block.wal']['percentage'])
229 osds
= self
.computed
['osds']
231 if self
.data_devs
and self
.db_or_journal_devs
:
232 if not self
.common_vg
:
233 # there isn't a common vg, so a new one must be created with all
235 self
.computed
['vg'] = {
236 'devices': ", ".join([ssd
.abspath
for ssd
in self
.blank_db_devs
]),
237 'parts': self
.dbs_needed
,
238 'percentages': self
.vg_extents
['percentages'],
239 'sizes': self
.block_db_size
.b
.as_int(),
240 'size': self
.total_blank_db_dev_size
.b
.as_int(),
241 'human_readable_sizes': str(self
.block_db_size
),
242 'human_readable_size': str(self
.total_available_db_space
),
246 vg_name
= self
.common_vg
.name
248 if self
.data_devs
and self
.wal_devs
:
249 if not self
.common_wal_vg
:
250 # there isn't a common vg, so a new one must be created with all
252 self
.computed
['wal_vg'] = {
253 'devices': ", ".join([dev
.abspath
for dev
in self
.blank_wal_devs
]),
254 'parts': self
.wals_needed
,
255 'percentages': self
.wal_vg_extents
['percentages'],
256 'sizes': self
.block_wal_size
.b
.as_int(),
257 'size': self
.total_blank_wal_dev_size
.b
.as_int(),
258 'human_readable_sizes': str(self
.block_wal_size
),
259 'human_readable_size': str(self
.total_available_wal_space
),
261 wal_vg_name
= 'vg/lv'
263 wal_vg_name
= self
.common_wal_vg
.name
265 for device
in self
.data_devs
:
266 for hdd
in range(self
.osds_per_device
):
268 osd
['data']['path'] = device
.abspath
269 osd
['data']['size'] = device
.lvm_size
.b
/ self
.osds_per_device
270 osd
['data']['percentage'] = 100 / self
.osds_per_device
271 osd
['data']['human_readable_size'] = str(
272 disk
.Size(b
=device
.lvm_size
.b
) / self
.osds_per_device
275 if self
.db_or_journal_devs
:
277 osd
['block.db']['path'] = 'vg: %s' % vg_name
278 osd
['block.db']['size'] = int(self
.block_db_size
.b
)
279 osd
['block.db']['human_readable_size'] = str(self
.block_db_size
)
280 osd
['block.db']['percentage'] = self
.vg_extents
['percentages']
283 osd
['block.wal'] = {}
284 osd
['block.wal']['path'] = 'vg: %s' % wal_vg_name
285 osd
['block.wal']['size'] = int(self
.block_wal_size
.b
)
286 osd
['block.wal']['human_readable_size'] = str(self
.block_wal_size
)
287 osd
['block.wal']['percentage'] = self
.wal_vg_extents
['percentages']
290 osd
['osd_id'] = self
.osd_ids
.pop(0)
294 self
.computed
['changed'] = len(osds
) > 0
298 Create vgs/lvs from the incoming set of devices, assign their roles
299 (block, block.db, block.wal, etc..) and offload the OSD creation to
302 data_vgs
= dict([(osd
['data']['path'], None) for osd
in self
.computed
['osds']])
304 # create 1 vg per data device first, mapping them to the device path,
305 # when the lv gets created later, it can create as many as needed (or
307 for osd
in self
.computed
['osds']:
308 vg
= data_vgs
.get(osd
['data']['path'])
310 vg
= lvm
.create_vg(osd
['data']['path'], name_prefix
='ceph-block')
311 data_vgs
[osd
['data']['path']] = vg
313 if self
.data_devs
and self
.db_or_journal_devs
:
314 blank_db_dev_paths
= [d
.abspath
for d
in self
.blank_db_devs
]
316 # no common vg is found, create one with all the blank SSDs
317 if not self
.common_vg
:
318 db_vg
= lvm
.create_vg(blank_db_dev_paths
, name_prefix
='ceph-block-dbs')
319 elif self
.common_vg
and blank_db_dev_paths
:
320 # if a common vg exists then extend it with any blank ssds
321 db_vg
= lvm
.extend_vg(self
.common_vg
, blank_db_dev_paths
)
323 # one common vg with nothing else to extend can be used directly,
324 # either this is one device with one vg, or multiple devices with the
326 db_vg
= self
.common_vg
328 if self
.use_large_block_db
:
329 # make the block.db lvs as large as possible
330 vg_free_count
= str_to_int(db_vg
.vg_free_count
)
331 db_lv_extents
= int(vg_free_count
/ self
.dbs_needed
)
333 db_lv_extents
= db_vg
.sizing(size
=self
.block_db_size
.gb
.as_int())['extents']
335 if self
.data_devs
and self
.wal_devs
:
336 blank_wal_dev_paths
= [d
.abspath
for d
in self
.blank_wal_devs
]
338 if not self
.common_wal_vg
:
339 wal_vg
= lvm
.create_vg(blank_wal_dev_paths
,
340 name_prefix
='ceph-block-wals')
341 elif self
.common_wal_vg
and blank_wal_dev_paths
:
342 wal_vg
= lvm
.extend_vg(self
.common_wal_vg
, blank_wal_dev_paths
)
344 wal_vg
= self
.common_wal_vg
346 if self
.use_large_block_wal
:
347 # make the block.db lvs as large as possible
348 vg_free_count
= str_to_int(wal_vg
.vg_free_count
)
349 wal_lv_extents
= int(vg_free_count
/ self
.wals_needed
)
351 wal_lv_extents
= wal_vg
.sizing(size
=self
.block_wal_size
.gb
.as_int())['extents']
353 # create the data lvs, and create the OSD with an lv from the common
354 # block.db vg from before
355 for osd
in self
.computed
['osds']:
356 data_path
= osd
['data']['path']
357 data_vg
= data_vgs
[data_path
]
358 data_lv_extents
= data_vg
.sizing(parts
=self
.osds_per_device
)['extents']
359 data_uuid
= system
.generate_uuid()
360 data_lv
= lvm
.create_lv(
361 'osd-block', data_uuid
, vg
=data_vg
, extents
=data_lv_extents
)
364 '--data', "%s/%s" % (data_lv
.vg_name
, data_lv
.name
),
366 if 'block.db' in osd
:
367 db_uuid
= system
.generate_uuid()
368 db_lv
= lvm
.create_lv(
369 'osd-block-db', db_uuid
, vg
=db_vg
, extents
=db_lv_extents
)
370 command
.extend([ '--block.db',
371 '{}/{}'.format(db_lv
.vg_name
, db_lv
.name
)])
372 if 'block.wal' in osd
:
373 wal_uuid
= system
.generate_uuid()
374 wal_lv
= lvm
.create_lv(
375 'osd-block-wal', wal_uuid
, vg
=wal_vg
, extents
=wal_lv_extents
)
378 '{}/{}'.format(wal_lv
.vg_name
, wal_lv
.name
)
380 if self
.args
.dmcrypt
:
381 command
.append('--dmcrypt')
382 if self
.args
.no_systemd
:
383 command
.append('--no-systemd')
384 if self
.args
.crush_device_class
:
385 command
.extend(['--crush-device-class', self
.args
.crush_device_class
])
387 command
.extend(['--osd-id', osd
['osd_id']])
389 if self
.args
.prepare
:
390 Prepare(command
).main()
392 Create(command
).main()
396 HDDs represent data devices, and solid state devices are for block.db,
397 make sure that the number of data devices would have enough LVs and
398 those LVs would be large enough to accommodate a block.db
400 # validate minimum size for all devices
401 validators
.minimum_device_size(self
.data_devs
+ self
.db_or_journal_devs
,
402 osds_per_device
=self
.osds_per_device
)
403 validators
.minimum_device_size(self
.wal_devs
,
404 osds_per_device
=self
.osds_per_device
,
407 # make sure that data devices do not have any LVs
408 validators
.no_lvm_membership(self
.data_devs
)
410 if self
.data_devs
and self
.db_or_journal_devs
:
411 self
._validate
_db
_devs
()
413 if self
.data_devs
and self
.wal_devs
:
414 self
._validate
_wal
_devs
()
417 self
._validate
_osd
_ids
()
419 def _validate_db_devs(self
):
420 # do not allow non-common VG to continue
421 validators
.has_common_vg(self
.db_or_journal_devs
)
423 # find the common VG to calculate how much is available
424 self
.common_vg
= self
.get_common_vg(self
.db_or_journal_devs
)
426 # find how many block.db LVs are possible from the common VG
428 common_vg_size
= disk
.Size(gb
=self
.common_vg
.free
)
430 common_vg_size
= disk
.Size(gb
=0)
433 vg_members
= set([d
for d
in self
.db_or_journal_devs
if d
.is_lvm_member
])
434 self
.blank_db_devs
= set(self
.db_or_journal_devs
).difference(vg_members
)
435 self
.total_blank_db_dev_size
= disk
.Size(b
=0)
436 for blank_db_dev
in self
.blank_db_devs
:
437 self
.total_blank_db_dev_size
+= disk
.Size(b
=blank_db_dev
.lvm_size
.b
)
439 self
.total_available_db_space
= self
.total_blank_db_dev_size
+ common_vg_size
441 # If not configured, we default to 0, which is really "use as much as
442 # possible" captured by the `else` condition
443 if self
.block_db_size
.gb
> 0:
445 self
.vg_extents
= lvm
.sizing(
446 self
.total_available_db_space
.b
, size
=self
.block_db_size
.b
* self
.osds_per_device
448 except SizeAllocationError
:
449 msg
= "Not enough space in fast devices (%s) to create %s x %s block.db LV"
451 msg
% (self
.total_available_db_space
, self
.osds_per_device
, self
.block_db_size
)
454 self
.vg_extents
= lvm
.sizing(
455 self
.total_available_db_space
.b
, parts
=self
.dbs_needed
458 # validate that number of block.db LVs possible are enough for number of
460 if self
.total_available_db_space
.b
== 0:
461 msg
= "No space left in fast devices to create block.db LVs"
462 raise RuntimeError(msg
)
464 # bluestore_block_db_size was unset, so we must set this to whatever
465 # size we get by dividing the total available space for block.db LVs
466 # into the number of block.db LVs needed (i.e. "as large as possible")
467 if self
.block_db_size
.b
== 0:
468 self
.block_db_size
= self
.total_available_db_space
/ self
.dbs_needed
469 self
.use_large_block_db
= True
471 total_dbs_possible
= self
.total_available_db_space
/ self
.block_db_size
473 if self
.dbs_needed
> total_dbs_possible
:
474 msg
= "Not enough space (%s) to create %s x %s block.db LVs" % (
475 self
.total_available_db_space
, self
.dbs_needed
, self
.block_db_size
,
477 raise RuntimeError(msg
)
479 def _validate_wal_devs(self
):
480 # do not allow non-common VG to continue
481 validators
.has_common_vg(self
.wal_devs
)
483 # find the common VG to calculate how much is available
484 self
.common_wal_vg
= self
.get_common_vg(self
.wal_devs
)
486 # find how many block.wal LVs are possible from the common VG
487 if self
.common_wal_vg
:
488 common_vg_size
= disk
.Size(gb
=self
.common_wal_vg
.free
)
490 common_vg_size
= disk
.Size(gb
=0)
493 vg_members
= set([d
for d
in self
.wal_devs
if d
.is_lvm_member
])
494 self
.blank_wal_devs
= set(self
.wal_devs
).difference(vg_members
)
495 self
.total_blank_wal_dev_size
= disk
.Size(b
=0)
496 for blank_wal_dev
in self
.blank_wal_devs
:
497 self
.total_blank_wal_dev_size
+= disk
.Size(b
=blank_wal_dev
.lvm_size
.b
)
499 self
.total_available_wal_space
= self
.total_blank_wal_dev_size
+ common_vg_size
501 # If not configured, we default to 0, which is really "use as much as
502 # possible" captured by the `else` condition
503 if self
.block_wal_size
.gb
> 0:
505 self
.vg_extents
= lvm
.sizing(
506 self
.total_available_wal_space
.b
, size
=self
.block_wal_size
.b
* self
.osds_per_device
508 except SizeAllocationError
:
509 msg
= "Not enough space in fast devices (%s) to create %s x %s block.wal LV"
511 msg
% (self
.total_available_wal_space
,
512 self
.osds_per_device
, self
.block_wal_size
)
515 self
.wal_vg_extents
= lvm
.sizing(
516 self
.total_available_wal_space
.b
, parts
=self
.wals_needed
519 # validate that number of block.wal LVs possible are enough for number of
521 if self
.total_available_wal_space
.b
== 0:
522 msg
= "No space left in fast devices to create block.wal LVs"
523 raise RuntimeError(msg
)
525 # bluestore_block_wal_size was unset, so we must set this to whatever
526 # size we get by dividing the total available space for block.wal LVs
527 # into the number of block.wal LVs needed (i.e. "as large as possible")
528 if self
.block_wal_size
.b
== 0:
529 self
.block_wal_size
= self
.total_available_wal_space
/ self
.wals_needed
530 self
.use_large_block_wal
= True
532 total_wals_possible
= self
.total_available_wal_space
/ self
.block_wal_size
534 if self
.wals_needed
> total_wals_possible
:
535 msg
= "Not enough space (%s) to create %s x %s block.wal LVs" % (
536 self
.total_available_wal_space
, self
.wals_needed
,
539 raise RuntimeError(msg
)