]> git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/bluestore.py
bump version to 15.2.4-pve1
[ceph.git] / ceph / src / ceph-volume / ceph_volume / devices / lvm / strategies / bluestore.py
1 from __future__ import print_function
2 from ceph_volume.util import disk, prepare, str_to_int
3 from ceph_volume.api import lvm
4 from . import validators
5 from .strategies import Strategy
6 from .strategies import MixedStrategy
7 from ceph_volume.devices.lvm.create import Create
8 from ceph_volume.devices.lvm.prepare import Prepare
9 from ceph_volume.util import templates, system
10 from ceph_volume.exceptions import SizeAllocationError
11
12
13 class SingleType(Strategy):
14 """
15 Support for all SSDs, or all HDDS
16 """
17
18 def __init__(self, args, data_devs):
19 super(SingleType, self).__init__(args, data_devs)
20 self.validate_compute()
21
22 @classmethod
23 def with_auto_devices(cls, args, devices):
24 #SingleType only deploys standalone OSDs
25 return cls(args, devices)
26
27 @staticmethod
28 def type():
29 return "bluestore.SingleType"
30
31 def report_pretty(self, filtered_devices):
32 string = ""
33 if filtered_devices:
34 string += templates.filtered_devices(filtered_devices)
35 string += templates.total_osds.format(
36 total_osds=self.total_osds,
37 )
38 string += templates.osd_component_titles
39
40 for osd in self.computed['osds']:
41 string += templates.osd_header
42 if 'osd_id' in osd:
43 string += templates.osd_reused_id.format(
44 id_=osd['osd_id'])
45 string += templates.osd_component.format(
46 _type='[data]',
47 path=osd['data']['path'],
48 size=osd['data']['human_readable_size'],
49 percent=osd['data']['percentage'],
50 )
51
52 print(string)
53
54 def validate(self):
55 """
56 Ensure that the minimum requirements for this type of scenario is
57 met, raise an error if the provided devices would not work
58 """
59 # validate minimum size for all devices
60 validators.minimum_device_size(
61 self.data_devs, osds_per_device=self.osds_per_device
62 )
63
64 # make sure that data devices do not have any LVs
65 validators.no_lvm_membership(self.data_devs)
66
67 if self.osd_ids:
68 self._validate_osd_ids()
69
70 def compute(self):
71 """
72 Go through the rules needed to properly size the lvs, return
73 a dictionary with the result
74 """
75 osds = self.computed['osds']
76 for device in self.data_devs:
77 extents = lvm.sizing(device.lvm_size.b, parts=self.osds_per_device)
78 for _i in range(self.osds_per_device):
79 osd = {'data': {}, 'block.db': {}}
80 osd['data']['path'] = device.abspath
81 osd['data']['size'] = extents['sizes']
82 osd['data']['parts'] = extents['parts']
83 osd['data']['percentage'] = 100 / self.osds_per_device
84 osd['data']['human_readable_size'] = str(disk.Size(gb=extents['sizes']))
85 osds.append(osd)
86
87 self.computed['changed'] = len(osds) > 0
88
89 def execute(self):
90 """
91 Create vgs/lvs from the incoming set of devices, assign their roles
92 (block, block.db, block.wal, etc..) and offload the OSD creation to
93 ``lvm create``
94 """
95 osd_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']])
96
97 # create the vgs first, mapping them to the device path
98 for osd in self.computed['osds']:
99 vg = osd_vgs.get(osd['data']['path'])
100 if not vg:
101 vg = lvm.create_vg(osd['data']['path'])
102 osd_vgs[osd['data']['path']] = {'vg': vg, 'parts': osd['data']['parts']}
103
104 # create the lvs from the vgs captured in the beginning
105 for create in osd_vgs.values():
106 lvs = lvm.create_lvs(create['vg'], parts=create['parts'], name_prefix='osd-data')
107 vg_name = create['vg'].name
108 for lv in lvs:
109 command = ['--bluestore', '--data']
110 command.append('%s/%s' % (vg_name, lv.name))
111 if self.args.dmcrypt:
112 command.append('--dmcrypt')
113 if self.args.no_systemd:
114 command.append('--no-systemd')
115 if self.args.crush_device_class:
116 command.extend(['--crush-device-class', self.args.crush_device_class])
117
118 if self.osd_ids:
119 command.extend(['--osd-id', self.osd_ids.pop(0)])
120
121 if self.args.prepare:
122 Prepare(command).main()
123 else:
124 Create(command).main()
125
126
127 class MixedType(MixedStrategy):
128
129 def __init__(self, args, data_devs, db_devs, wal_devs=[]):
130 super(MixedType, self).__init__(args, data_devs, db_devs, wal_devs)
131 self.block_db_size = self.get_block_db_size()
132 self.block_wal_size = self.get_block_wal_size()
133 self.system_vgs = lvm.VolumeGroups()
134 self.common_vg = None
135 self.common_wal_vg = None
136 self.dbs_needed = len(self.data_devs) * self.osds_per_device
137 self.wals_needed = self.dbs_needed
138 self.use_large_block_db = self.use_large_block_wal = False
139 self.validate_compute()
140
141 @classmethod
142 def with_auto_devices(cls, args, devices):
143 data_devs, db_devs = cls.split_devices_rotational(devices)
144 return cls(args, data_devs, db_devs)
145
146 @staticmethod
147 def type():
148 return "bluestore.MixedType"
149
150 def get_block_db_size(self):
151 if self.args.block_db_size:
152 return disk.Size(b=self.args.block_db_size)
153 else:
154 return prepare.get_block_db_size(lv_format=False) or disk.Size(b=0)
155
156 def get_block_wal_size(self):
157 if self.args.block_wal_size:
158 return disk.Size(b=self.args.block_wal_size)
159 else:
160 return prepare.get_block_wal_size(lv_format=False) or disk.Size(b=0)
161
162 def report_pretty(self, filtered_devices):
163 string = ""
164 if filtered_devices:
165 string += templates.filtered_devices(filtered_devices)
166 string += templates.total_osds.format(
167 total_osds=len(self.data_devs) * self.osds_per_device
168 )
169
170 if self.db_or_journal_devs:
171 vg_extents = lvm.sizing(self.total_available_db_space.b, parts=self.dbs_needed)
172 db_size = str(disk.Size(b=(vg_extents['sizes'])))
173
174 string += templates.ssd_volume_group.format(
175 target='block.db',
176 total_lv_size=str(self.total_available_db_space),
177 total_lvs=vg_extents['parts'] * self.osds_per_device,
178 block_lv_size=db_size,
179 block_db_devices=', '.join([ssd.abspath for ssd in
180 self.db_or_journal_devs]),
181 lv_size=self.block_db_size or str(disk.Size(b=(vg_extents['sizes']))),
182 total_osds=len(self.data_devs)
183 )
184
185 if self.wal_devs:
186 wal_vg_extents = lvm.sizing(self.total_available_wal_space.b,
187 parts=self.wals_needed)
188 wal_size = str(disk.Size(b=(wal_vg_extents['sizes'])))
189 string += templates.ssd_volume_group.format(
190 target='block.wal',
191 total_lv_size=str(self.total_available_wal_space),
192 total_lvs=wal_vg_extents['parts'] * self.osds_per_device,
193 block_lv_size=wal_size,
194 block_db_devices=', '.join([dev.abspath for dev in
195 self.wal_devs]),
196 lv_size=self.block_wal_size or str(disk.Size(b=(wal_vg_extents['sizes']))),
197 total_osds=len(self.data_devs)
198 )
199
200 string += templates.osd_component_titles
201 for osd in self.computed['osds']:
202 string += templates.osd_header
203 if 'osd_id' in osd:
204 string += templates.osd_reused_id.format(
205 id_=osd['osd_id'])
206 string += templates.osd_component.format(
207 _type='[data]',
208 path=osd['data']['path'],
209 size=osd['data']['human_readable_size'],
210 percent=osd['data']['percentage'])
211
212 if 'block.db' in osd:
213 string += templates.osd_component.format(
214 _type='[block.db]',
215 path=osd['block.db']['path'],
216 size=osd['block.db']['human_readable_size'],
217 percent=osd['block.db']['percentage'])
218
219 if 'block.wal' in osd:
220 string += templates.osd_component.format(
221 _type='[block.wal]',
222 path=osd['block.wal']['path'],
223 size=osd['block.wal']['human_readable_size'],
224 percent=osd['block.wal']['percentage'])
225
226 print(string)
227
228 def compute(self):
229 osds = self.computed['osds']
230
231 if self.data_devs and self.db_or_journal_devs:
232 if not self.common_vg:
233 # there isn't a common vg, so a new one must be created with all
234 # the blank db devs
235 self.computed['vg'] = {
236 'devices': ", ".join([ssd.abspath for ssd in self.blank_db_devs]),
237 'parts': self.dbs_needed,
238 'percentages': self.vg_extents['percentages'],
239 'sizes': self.block_db_size.b.as_int(),
240 'size': self.total_blank_db_dev_size.b.as_int(),
241 'human_readable_sizes': str(self.block_db_size),
242 'human_readable_size': str(self.total_available_db_space),
243 }
244 vg_name = 'vg/lv'
245 else:
246 vg_name = self.common_vg.name
247
248 if self.data_devs and self.wal_devs:
249 if not self.common_wal_vg:
250 # there isn't a common vg, so a new one must be created with all
251 # the blank wal devs
252 self.computed['wal_vg'] = {
253 'devices': ", ".join([dev.abspath for dev in self.blank_wal_devs]),
254 'parts': self.wals_needed,
255 'percentages': self.wal_vg_extents['percentages'],
256 'sizes': self.block_wal_size.b.as_int(),
257 'size': self.total_blank_wal_dev_size.b.as_int(),
258 'human_readable_sizes': str(self.block_wal_size),
259 'human_readable_size': str(self.total_available_wal_space),
260 }
261 wal_vg_name = 'vg/lv'
262 else:
263 wal_vg_name = self.common_wal_vg.name
264
265 for device in self.data_devs:
266 for hdd in range(self.osds_per_device):
267 osd = {'data': {}}
268 osd['data']['path'] = device.abspath
269 osd['data']['size'] = device.lvm_size.b / self.osds_per_device
270 osd['data']['percentage'] = 100 / self.osds_per_device
271 osd['data']['human_readable_size'] = str(
272 disk.Size(b=device.lvm_size.b) / self.osds_per_device
273 )
274
275 if self.db_or_journal_devs:
276 osd['block.db'] = {}
277 osd['block.db']['path'] = 'vg: %s' % vg_name
278 osd['block.db']['size'] = int(self.block_db_size.b)
279 osd['block.db']['human_readable_size'] = str(self.block_db_size)
280 osd['block.db']['percentage'] = self.vg_extents['percentages']
281
282 if self.wal_devs:
283 osd['block.wal'] = {}
284 osd['block.wal']['path'] = 'vg: %s' % wal_vg_name
285 osd['block.wal']['size'] = int(self.block_wal_size.b)
286 osd['block.wal']['human_readable_size'] = str(self.block_wal_size)
287 osd['block.wal']['percentage'] = self.wal_vg_extents['percentages']
288
289 if self.osd_ids:
290 osd['osd_id'] = self.osd_ids.pop(0)
291
292 osds.append(osd)
293
294 self.computed['changed'] = len(osds) > 0
295
296 def execute(self):
297 """
298 Create vgs/lvs from the incoming set of devices, assign their roles
299 (block, block.db, block.wal, etc..) and offload the OSD creation to
300 ``lvm create``
301 """
302 data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']])
303
304 # create 1 vg per data device first, mapping them to the device path,
305 # when the lv gets created later, it can create as many as needed (or
306 # even just 1)
307 for osd in self.computed['osds']:
308 vg = data_vgs.get(osd['data']['path'])
309 if not vg:
310 vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-block')
311 data_vgs[osd['data']['path']] = vg
312
313 if self.data_devs and self.db_or_journal_devs:
314 blank_db_dev_paths = [d.abspath for d in self.blank_db_devs]
315
316 # no common vg is found, create one with all the blank SSDs
317 if not self.common_vg:
318 db_vg = lvm.create_vg(blank_db_dev_paths, name_prefix='ceph-block-dbs')
319 elif self.common_vg and blank_db_dev_paths:
320 # if a common vg exists then extend it with any blank ssds
321 db_vg = lvm.extend_vg(self.common_vg, blank_db_dev_paths)
322 else:
323 # one common vg with nothing else to extend can be used directly,
324 # either this is one device with one vg, or multiple devices with the
325 # same vg
326 db_vg = self.common_vg
327
328 if self.use_large_block_db:
329 # make the block.db lvs as large as possible
330 vg_free_count = str_to_int(db_vg.vg_free_count)
331 db_lv_extents = int(vg_free_count / self.dbs_needed)
332 else:
333 db_lv_extents = db_vg.sizing(size=self.block_db_size.gb.as_int())['extents']
334
335 if self.data_devs and self.wal_devs:
336 blank_wal_dev_paths = [d.abspath for d in self.blank_wal_devs]
337
338 if not self.common_wal_vg:
339 wal_vg = lvm.create_vg(blank_wal_dev_paths,
340 name_prefix='ceph-block-wals')
341 elif self.common_wal_vg and blank_wal_dev_paths:
342 wal_vg = lvm.extend_vg(self.common_wal_vg, blank_wal_dev_paths)
343 else:
344 wal_vg = self.common_wal_vg
345
346 if self.use_large_block_wal:
347 # make the block.db lvs as large as possible
348 vg_free_count = str_to_int(wal_vg.vg_free_count)
349 wal_lv_extents = int(vg_free_count / self.wals_needed)
350 else:
351 wal_lv_extents = wal_vg.sizing(size=self.block_wal_size.gb.as_int())['extents']
352
353 # create the data lvs, and create the OSD with an lv from the common
354 # block.db vg from before
355 for osd in self.computed['osds']:
356 data_path = osd['data']['path']
357 data_vg = data_vgs[data_path]
358 data_lv_extents = data_vg.sizing(parts=self.osds_per_device)['extents']
359 data_uuid = system.generate_uuid()
360 data_lv = lvm.create_lv(
361 'osd-block', data_uuid, vg=data_vg, extents=data_lv_extents)
362 command = [
363 '--bluestore',
364 '--data', "%s/%s" % (data_lv.vg_name, data_lv.name),
365 ]
366 if 'block.db' in osd:
367 db_uuid = system.generate_uuid()
368 db_lv = lvm.create_lv(
369 'osd-block-db', db_uuid, vg=db_vg, extents=db_lv_extents)
370 command.extend([ '--block.db',
371 '{}/{}'.format(db_lv.vg_name, db_lv.name)])
372 if 'block.wal' in osd:
373 wal_uuid = system.generate_uuid()
374 wal_lv = lvm.create_lv(
375 'osd-block-wal', wal_uuid, vg=wal_vg, extents=wal_lv_extents)
376 command.extend(
377 ['--block.wal',
378 '{}/{}'.format(wal_lv.vg_name, wal_lv.name)
379 ])
380 if self.args.dmcrypt:
381 command.append('--dmcrypt')
382 if self.args.no_systemd:
383 command.append('--no-systemd')
384 if self.args.crush_device_class:
385 command.extend(['--crush-device-class', self.args.crush_device_class])
386 if 'osd_id' in osd:
387 command.extend(['--osd-id', osd['osd_id']])
388
389 if self.args.prepare:
390 Prepare(command).main()
391 else:
392 Create(command).main()
393
394 def validate(self):
395 """
396 HDDs represent data devices, and solid state devices are for block.db,
397 make sure that the number of data devices would have enough LVs and
398 those LVs would be large enough to accommodate a block.db
399 """
400 # validate minimum size for all devices
401 validators.minimum_device_size(self.data_devs + self.db_or_journal_devs,
402 osds_per_device=self.osds_per_device)
403 validators.minimum_device_size(self.wal_devs,
404 osds_per_device=self.osds_per_device,
405 min_size=1)
406
407 # make sure that data devices do not have any LVs
408 validators.no_lvm_membership(self.data_devs)
409
410 if self.data_devs and self.db_or_journal_devs:
411 self._validate_db_devs()
412
413 if self.data_devs and self.wal_devs:
414 self._validate_wal_devs()
415
416 if self.osd_ids:
417 self._validate_osd_ids()
418
419 def _validate_db_devs(self):
420 # do not allow non-common VG to continue
421 validators.has_common_vg(self.db_or_journal_devs)
422
423 # find the common VG to calculate how much is available
424 self.common_vg = self.get_common_vg(self.db_or_journal_devs)
425
426 # find how many block.db LVs are possible from the common VG
427 if self.common_vg:
428 common_vg_size = disk.Size(gb=self.common_vg.free)
429 else:
430 common_vg_size = disk.Size(gb=0)
431
432 # non-VG SSDs
433 vg_members = set([d for d in self.db_or_journal_devs if d.is_lvm_member])
434 self.blank_db_devs = set(self.db_or_journal_devs).difference(vg_members)
435 self.total_blank_db_dev_size = disk.Size(b=0)
436 for blank_db_dev in self.blank_db_devs:
437 self.total_blank_db_dev_size += disk.Size(b=blank_db_dev.lvm_size.b)
438
439 self.total_available_db_space = self.total_blank_db_dev_size + common_vg_size
440
441 # If not configured, we default to 0, which is really "use as much as
442 # possible" captured by the `else` condition
443 if self.block_db_size.gb > 0:
444 try:
445 self.vg_extents = lvm.sizing(
446 self.total_available_db_space.b, size=self.block_db_size.b * self.osds_per_device
447 )
448 except SizeAllocationError:
449 msg = "Not enough space in fast devices (%s) to create %s x %s block.db LV"
450 raise RuntimeError(
451 msg % (self.total_available_db_space, self.osds_per_device, self.block_db_size)
452 )
453 else:
454 self.vg_extents = lvm.sizing(
455 self.total_available_db_space.b, parts=self.dbs_needed
456 )
457
458 # validate that number of block.db LVs possible are enough for number of
459 # OSDs proposed
460 if self.total_available_db_space.b == 0:
461 msg = "No space left in fast devices to create block.db LVs"
462 raise RuntimeError(msg)
463
464 # bluestore_block_db_size was unset, so we must set this to whatever
465 # size we get by dividing the total available space for block.db LVs
466 # into the number of block.db LVs needed (i.e. "as large as possible")
467 if self.block_db_size.b == 0:
468 self.block_db_size = self.total_available_db_space / self.dbs_needed
469 self.use_large_block_db = True
470
471 total_dbs_possible = self.total_available_db_space / self.block_db_size
472
473 if self.dbs_needed > total_dbs_possible:
474 msg = "Not enough space (%s) to create %s x %s block.db LVs" % (
475 self.total_available_db_space, self.dbs_needed, self.block_db_size,
476 )
477 raise RuntimeError(msg)
478
479 def _validate_wal_devs(self):
480 # do not allow non-common VG to continue
481 validators.has_common_vg(self.wal_devs)
482
483 # find the common VG to calculate how much is available
484 self.common_wal_vg = self.get_common_vg(self.wal_devs)
485
486 # find how many block.wal LVs are possible from the common VG
487 if self.common_wal_vg:
488 common_vg_size = disk.Size(gb=self.common_wal_vg.free)
489 else:
490 common_vg_size = disk.Size(gb=0)
491
492 # non-VG SSDs
493 vg_members = set([d for d in self.wal_devs if d.is_lvm_member])
494 self.blank_wal_devs = set(self.wal_devs).difference(vg_members)
495 self.total_blank_wal_dev_size = disk.Size(b=0)
496 for blank_wal_dev in self.blank_wal_devs:
497 self.total_blank_wal_dev_size += disk.Size(b=blank_wal_dev.lvm_size.b)
498
499 self.total_available_wal_space = self.total_blank_wal_dev_size + common_vg_size
500
501 # If not configured, we default to 0, which is really "use as much as
502 # possible" captured by the `else` condition
503 if self.block_wal_size.gb > 0:
504 try:
505 self.vg_extents = lvm.sizing(
506 self.total_available_wal_space.b, size=self.block_wal_size.b * self.osds_per_device
507 )
508 except SizeAllocationError:
509 msg = "Not enough space in fast devices (%s) to create %s x %s block.wal LV"
510 raise RuntimeError(
511 msg % (self.total_available_wal_space,
512 self.osds_per_device, self.block_wal_size)
513 )
514 else:
515 self.wal_vg_extents = lvm.sizing(
516 self.total_available_wal_space.b, parts=self.wals_needed
517 )
518
519 # validate that number of block.wal LVs possible are enough for number of
520 # OSDs proposed
521 if self.total_available_wal_space.b == 0:
522 msg = "No space left in fast devices to create block.wal LVs"
523 raise RuntimeError(msg)
524
525 # bluestore_block_wal_size was unset, so we must set this to whatever
526 # size we get by dividing the total available space for block.wal LVs
527 # into the number of block.wal LVs needed (i.e. "as large as possible")
528 if self.block_wal_size.b == 0:
529 self.block_wal_size = self.total_available_wal_space / self.wals_needed
530 self.use_large_block_wal = True
531
532 total_wals_possible = self.total_available_wal_space / self.block_wal_size
533
534 if self.wals_needed > total_wals_possible:
535 msg = "Not enough space (%s) to create %s x %s block.wal LVs" % (
536 self.total_available_wal_space, self.wals_needed,
537 self.block_wal_size,
538 )
539 raise RuntimeError(msg)
540