]>
git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/bluestore.py
1 from __future__
import print_function
3 from ceph_volume
.util
import disk
, prepare
4 from ceph_volume
.api
import lvm
5 from . import validators
6 from ceph_volume
.devices
.lvm
.create
import Create
7 from ceph_volume
.devices
.lvm
.prepare
import Prepare
8 from ceph_volume
.util
import templates
9 from ceph_volume
.exceptions
import SizeAllocationError
12 class SingleType(object):
14 Support for all SSDs, or all HDDS
17 def __init__(self
, devices
, args
):
19 self
.osds_per_device
= args
.osds_per_device
20 self
.devices
= devices
21 # TODO: add --fast-devices and --slow-devices so these can be customized
22 self
.hdds
= [device
for device
in devices
if device
.sys_api
['rotational'] == '1']
23 self
.ssds
= [device
for device
in devices
if device
.sys_api
['rotational'] == '0']
24 self
.computed
= {'osds': [], 'vgs': [], 'filtered_devices': args
.filtered_devices
}
29 self
.computed
["changed"] = False
33 return "bluestore.SingleType"
38 return len(self
.hdds
) * self
.osds_per_device
40 return len(self
.ssds
) * self
.osds_per_device
42 def report_json(self
):
43 print(json
.dumps(self
.computed
, indent
=4, sort_keys
=True))
45 def report_pretty(self
):
47 if self
.args
.filtered_devices
:
48 string
+= templates
.filtered_devices(self
.args
.filtered_devices
)
49 string
+= templates
.total_osds
.format(
50 total_osds
=self
.total_osds
,
52 string
+= templates
.osd_component_titles
54 for osd
in self
.computed
['osds']:
55 string
+= templates
.osd_header
56 string
+= templates
.osd_component
.format(
58 path
=osd
['data']['path'],
59 size
=osd
['data']['human_readable_size'],
60 percent
=osd
['data']['percentage'],
67 Ensure that the minimum requirements for this type of scenario is
68 met, raise an error if the provided devices would not work
70 # validate minimum size for all devices
71 validators
.minimum_device_size(
72 self
.devices
, osds_per_device
=self
.osds_per_device
75 # make sure that data devices do not have any LVs
76 validators
.no_lvm_membership(self
.hdds
)
80 Go through the rules needed to properly size the lvs, return
81 a dictionary with the result
83 osds
= self
.computed
['osds']
84 for device
in self
.hdds
:
85 for hdd
in range(self
.osds_per_device
):
86 osd
= {'data': {}, 'block.db': {}}
87 osd
['data']['path'] = device
.abspath
88 osd
['data']['size'] = device
.sys_api
['size'] / self
.osds_per_device
89 osd
['data']['parts'] = self
.osds_per_device
90 osd
['data']['percentage'] = 100 / self
.osds_per_device
91 osd
['data']['human_readable_size'] = str(
92 disk
.Size(b
=device
.sys_api
['size']) / self
.osds_per_device
96 for device
in self
.ssds
:
97 extents
= lvm
.sizing(device
.sys_api
['size'], parts
=self
.osds_per_device
)
98 for ssd
in range(self
.osds_per_device
):
99 osd
= {'data': {}, 'block.db': {}}
100 osd
['data']['path'] = device
.abspath
101 osd
['data']['size'] = extents
['sizes']
102 osd
['data']['parts'] = extents
['parts']
103 osd
['data']['percentage'] = 100 / self
.osds_per_device
104 osd
['data']['human_readable_size'] = str(disk
.Size(b
=extents
['sizes']))
107 self
.computed
['changed'] = len(osds
) > 0
111 Create vgs/lvs from the incoming set of devices, assign their roles
112 (block, block.db, block.wal, etc..) and offload the OSD creation to
115 osd_vgs
= dict([(osd
['data']['path'], None) for osd
in self
.computed
['osds']])
117 # create the vgs first, mapping them to the device path
118 for osd
in self
.computed
['osds']:
119 vg
= osd_vgs
.get(osd
['data']['path'])
121 vg
= lvm
.create_vg(osd
['data']['path'])
122 osd_vgs
[osd
['data']['path']] = {'vg': vg
, 'parts': osd
['data']['parts']}
124 # create the lvs from the vgs captured in the beginning
125 for create
in osd_vgs
.values():
126 lvs
= lvm
.create_lvs(create
['vg'], parts
=create
['parts'], name_prefix
='osd-data')
127 vg_name
= create
['vg'].name
129 command
= ['--bluestore', '--data']
130 command
.append('%s/%s' % (vg_name
, lv
.name
))
131 if self
.args
.dmcrypt
:
132 command
.append('--dmcrypt')
133 if self
.args
.no_systemd
:
134 command
.append('--no-systemd')
135 if self
.args
.crush_device_class
:
136 command
.extend(['--crush-device-class', self
.args
.crush_device_class
])
138 if self
.args
.prepare
:
139 Prepare(command
).main()
141 Create(command
).main()
144 class MixedType(object):
146 def __init__(self
, devices
, args
):
148 self
.devices
= devices
149 self
.osds_per_device
= args
.osds_per_device
150 # TODO: add --fast-devices and --slow-devices so these can be customized
151 self
.hdds
= [device
for device
in devices
if device
.sys_api
['rotational'] == '1']
152 self
.ssds
= [device
for device
in devices
if device
.sys_api
['rotational'] == '0']
153 self
.computed
= {'osds': [], 'filtered_devices': args
.filtered_devices
}
154 self
.block_db_size
= self
.get_block_size()
155 self
.system_vgs
= lvm
.VolumeGroups()
156 self
.dbs_needed
= len(self
.hdds
) * self
.osds_per_device
161 self
.computed
["changed"] = False
165 return "bluestore.MixedType"
167 def report_json(self
):
168 print(json
.dumps(self
.computed
, indent
=4, sort_keys
=True))
170 def get_block_size(self
):
171 if self
.args
.block_db_size
:
172 return disk
.Size(b
=self
.args
.block_db_size
)
174 return prepare
.get_block_db_size(lv_format
=False) or disk
.Size(b
=0)
176 def report_pretty(self
):
177 vg_extents
= lvm
.sizing(self
.total_available_db_space
.b
, parts
=self
.dbs_needed
)
178 db_size
= str(disk
.Size(b
=(vg_extents
['sizes'])))
181 if self
.args
.filtered_devices
:
182 string
+= templates
.filtered_devices(self
.args
.filtered_devices
)
183 string
+= templates
.total_osds
.format(
184 total_osds
=len(self
.hdds
) * self
.osds_per_device
187 string
+= templates
.ssd_volume_group
.format(
189 total_lv_size
=str(self
.total_available_db_space
),
190 total_lvs
=vg_extents
['parts'] * self
.osds_per_device
,
191 block_lv_size
=db_size
,
192 block_db_devices
=', '.join([ssd
.abspath
for ssd
in self
.ssds
]),
193 lv_size
=self
.block_db_size
or str(disk
.Size(b
=(vg_extents
['sizes']))),
194 total_osds
=len(self
.hdds
)
197 string
+= templates
.osd_component_titles
198 for osd
in self
.computed
['osds']:
199 string
+= templates
.osd_header
200 string
+= templates
.osd_component
.format(
202 path
=osd
['data']['path'],
203 size
=osd
['data']['human_readable_size'],
204 percent
=osd
['data']['percentage'])
206 string
+= templates
.osd_component
.format(
208 path
=osd
['block.db']['path'],
209 size
=osd
['block.db']['human_readable_size'],
210 percent
=osd
['block.db']['percentage'])
215 osds
= self
.computed
['osds']
217 # unconfigured block db size will be 0, so set it back to using as much
218 # as possible from looking at extents
219 if self
.block_db_size
.b
== 0:
220 self
.block_db_size
= disk
.Size(b
=self
.vg_extents
['sizes'])
222 if not self
.common_vg
:
223 # there isn't a common vg, so a new one must be created with all
225 self
.computed
['vg'] = {
226 'devices': ", ".join([ssd
.abspath
for ssd
in self
.blank_ssds
]),
227 'parts': self
.dbs_needed
,
228 'percentages': self
.vg_extents
['percentages'],
229 'sizes': self
.block_db_size
.b
.as_int(),
230 'size': self
.total_blank_ssd_size
.b
.as_int(),
231 'human_readable_sizes': str(self
.block_db_size
),
232 'human_readable_size': str(self
.total_available_db_space
),
236 vg_name
= self
.common_vg
.name
238 for device
in self
.hdds
:
239 for hdd
in range(self
.osds_per_device
):
240 osd
= {'data': {}, 'block.db': {}}
241 osd
['data']['path'] = device
.abspath
242 osd
['data']['size'] = device
.sys_api
['size'] / self
.osds_per_device
243 osd
['data']['percentage'] = 100 / self
.osds_per_device
244 osd
['data']['human_readable_size'] = str(
245 disk
.Size(b
=(device
.sys_api
['size'])) / self
.osds_per_device
247 osd
['block.db']['path'] = 'vg: %s' % vg_name
248 osd
['block.db']['size'] = int(self
.block_db_size
.b
)
249 osd
['block.db']['human_readable_size'] = str(self
.block_db_size
)
250 osd
['block.db']['percentage'] = self
.vg_extents
['percentages']
253 self
.computed
['changed'] = len(osds
) > 0
257 Create vgs/lvs from the incoming set of devices, assign their roles
258 (block, block.db, block.wal, etc..) and offload the OSD creation to
261 blank_ssd_paths
= [d
.abspath
for d
in self
.blank_ssds
]
262 data_vgs
= dict([(osd
['data']['path'], None) for osd
in self
.computed
['osds']])
264 # no common vg is found, create one with all the blank SSDs
265 if not self
.common_vg
:
266 db_vg
= lvm
.create_vg(blank_ssd_paths
, name_prefix
='ceph-block-dbs')
268 # if a common vg exists then extend it with any blank ssds
269 elif self
.common_vg
and blank_ssd_paths
:
270 db_vg
= lvm
.extend_vg(self
.common_vg
, blank_ssd_paths
)
272 # one common vg with nothing else to extend can be used directly,
273 # either this is one device with one vg, or multiple devices with the
276 db_vg
= self
.common_vg
278 # since we are falling back to a block_db_size that might be "as large
279 # as possible" we can't fully rely on LV format coming from the helper
280 # function that looks up this value
281 block_db_size
= "%sG" % self
.block_db_size
.gb
.as_int()
283 # create 1 vg per data device first, mapping them to the device path,
284 # when the lv gets created later, it can create as many as needed (or
286 for osd
in self
.computed
['osds']:
287 vg
= data_vgs
.get(osd
['data']['path'])
289 vg
= lvm
.create_vg(osd
['data']['path'], name_prefix
='ceph-block')
290 data_vgs
[osd
['data']['path']] = vg
292 # create the data lvs, and create the OSD with an lv from the common
293 # block.db vg from before
294 for osd
in self
.computed
['osds']:
295 data_path
= osd
['data']['path']
296 data_lv_size
= disk
.Size(b
=osd
['data']['size']).gb
.as_int()
297 data_vg
= data_vgs
[data_path
]
298 data_lv_extents
= data_vg
.sizing(size
=data_lv_size
)['extents']
299 data_lv
= lvm
.create_lv(
300 'osd-block', data_vg
.name
, extents
=data_lv_extents
, uuid_name
=True
302 db_lv
= lvm
.create_lv(
303 'osd-block-db', db_vg
.name
, size
=block_db_size
, uuid_name
=True
307 '--data', "%s/%s" % (data_lv
.vg_name
, data_lv
.name
),
308 '--block.db', '%s/%s' % (db_lv
.vg_name
, db_lv
.name
)
310 if self
.args
.dmcrypt
:
311 command
.append('--dmcrypt')
312 if self
.args
.no_systemd
:
313 command
.append('--no-systemd')
314 if self
.args
.crush_device_class
:
315 command
.extend(['--crush-device-class', self
.args
.crush_device_class
])
317 if self
.args
.prepare
:
318 Prepare(command
).main()
320 Create(command
).main()
322 def get_common_vg(self
):
323 # find all the vgs associated with the current device
324 for ssd
in self
.ssds
:
325 for pv
in ssd
.pvs_api
:
326 vg
= self
.system_vgs
.get(vg_name
=pv
.vg_name
)
329 # this should give us just one VG, it would've been caught by
330 # the validator otherwise
335 HDDs represent data devices, and solid state devices are for block.db,
336 make sure that the number of data devices would have enough LVs and
337 those LVs would be large enough to accommodate a block.db
339 # validate minimum size for all devices
340 validators
.minimum_device_size(self
.devices
, osds_per_device
=self
.osds_per_device
)
342 # make sure that data devices do not have any LVs
343 validators
.no_lvm_membership(self
.hdds
)
345 # do not allow non-common VG to continue
346 validators
.has_common_vg(self
.ssds
)
348 # find the common VG to calculate how much is available
349 self
.common_vg
= self
.get_common_vg()
351 # find how many block.db LVs are possible from the common VG
353 common_vg_size
= disk
.Size(gb
=self
.common_vg
.free
)
355 common_vg_size
= disk
.Size(gb
=0)
358 self
.vg_ssds
= set([d
for d
in self
.ssds
if d
.is_lvm_member
])
359 self
.blank_ssds
= set(self
.ssds
).difference(self
.vg_ssds
)
360 self
.total_blank_ssd_size
= disk
.Size(b
=0)
361 for blank_ssd
in self
.blank_ssds
:
362 self
.total_blank_ssd_size
+= disk
.Size(b
=blank_ssd
.sys_api
['size'])
364 self
.total_available_db_space
= self
.total_blank_ssd_size
+ common_vg_size
366 # If not configured, we default to 0, which is really "use as much as
367 # possible" captured by the `else` condition
368 if self
.block_db_size
.gb
> 0:
370 self
.vg_extents
= lvm
.sizing(
371 self
.total_available_db_space
.b
, size
=self
.block_db_size
.b
* self
.osds_per_device
373 except SizeAllocationError
:
374 msg
= "Not enough space in fast devices (%s) to create %s x %s block.db LV"
376 msg
% (self
.total_available_db_space
, self
.osds_per_device
, self
.block_db_size
)
379 self
.vg_extents
= lvm
.sizing(
380 self
.total_available_db_space
.b
, parts
=self
.dbs_needed
383 # validate that number of block.db LVs possible are enough for number of
385 if self
.total_available_db_space
.b
== 0:
386 msg
= "No space left in fast devices to create block.db LVs"
387 raise RuntimeError(msg
)
389 # bluestore_block_db_size was unset, so we must set this to whatever
390 # size we get by dividing the total available space for block.db LVs
391 # into the number of block.db LVs needed (i.e. "as large as possible")
392 if self
.block_db_size
.b
== 0:
393 self
.block_db_size
= self
.total_available_db_space
/ self
.dbs_needed
395 total_dbs_possible
= self
.total_available_db_space
/ self
.block_db_size
397 if self
.dbs_needed
> total_dbs_possible
:
398 msg
= "Not enough space (%s) to create %s x %s block.db LVs" % (
399 self
.total_available_db_space
, self
.dbs_needed
, self
.block_db_size
,
401 raise RuntimeError(msg
)