]>
Commit | Line | Data |
---|---|---|
1adf2230 AA |
1 | from __future__ import print_function |
2 | import json | |
91327a77 | 3 | from ceph_volume.util import disk, prepare |
1adf2230 AA |
4 | from ceph_volume.api import lvm |
5 | from . import validators | |
6 | from ceph_volume.devices.lvm.create import Create | |
91327a77 | 7 | from ceph_volume.devices.lvm.prepare import Prepare |
1adf2230 | 8 | from ceph_volume.util import templates |
91327a77 | 9 | from ceph_volume.exceptions import SizeAllocationError |
1adf2230 AA |
10 | |
11 | ||
12 | class SingleType(object): | |
13 | """ | |
14 | Support for all SSDs, or all HDDS | |
15 | """ | |
16 | ||
17 | def __init__(self, devices, args): | |
18 | self.args = args | |
91327a77 | 19 | self.osds_per_device = args.osds_per_device |
1adf2230 | 20 | self.devices = devices |
91327a77 | 21 | # TODO: add --fast-devices and --slow-devices so these can be customized |
1adf2230 AA |
22 | self.hdds = [device for device in devices if device.sys_api['rotational'] == '1'] |
23 | self.ssds = [device for device in devices if device.sys_api['rotational'] == '0'] | |
91327a77 AA |
24 | self.computed = {'osds': [], 'vgs': [], 'filtered_devices': args.filtered_devices} |
25 | if self.devices: | |
26 | self.validate() | |
27 | self.compute() | |
28 | else: | |
29 | self.computed["changed"] = False | |
30 | ||
31 | @staticmethod | |
32 | def type(): | |
33 | return "bluestore.SingleType" | |
34 | ||
35 | @property | |
36 | def total_osds(self): | |
37 | if self.hdds: | |
38 | return len(self.hdds) * self.osds_per_device | |
39 | else: | |
40 | return len(self.ssds) * self.osds_per_device | |
1adf2230 AA |
41 | |
42 | def report_json(self): | |
43 | print(json.dumps(self.computed, indent=4, sort_keys=True)) | |
44 | ||
45 | def report_pretty(self): | |
46 | string = "" | |
91327a77 AA |
47 | if self.args.filtered_devices: |
48 | string += templates.filtered_devices(self.args.filtered_devices) | |
1adf2230 | 49 | string += templates.total_osds.format( |
91327a77 | 50 | total_osds=self.total_osds, |
1adf2230 AA |
51 | ) |
52 | string += templates.osd_component_titles | |
53 | ||
54 | for osd in self.computed['osds']: | |
55 | string += templates.osd_header | |
56 | string += templates.osd_component.format( | |
57 | _type='[data]', | |
58 | path=osd['data']['path'], | |
59 | size=osd['data']['human_readable_size'], | |
60 | percent=osd['data']['percentage'], | |
61 | ) | |
62 | ||
63 | print(string) | |
64 | ||
65 | def validate(self): | |
66 | """ | |
67 | Ensure that the minimum requirements for this type of scenario is | |
68 | met, raise an error if the provided devices would not work | |
69 | """ | |
70 | # validate minimum size for all devices | |
91327a77 AA |
71 | validators.minimum_device_size( |
72 | self.devices, osds_per_device=self.osds_per_device | |
73 | ) | |
74 | ||
75 | # make sure that data devices do not have any LVs | |
76 | validators.no_lvm_membership(self.hdds) | |
1adf2230 AA |
77 | |
78 | def compute(self): | |
79 | """ | |
80 | Go through the rules needed to properly size the lvs, return | |
81 | a dictionary with the result | |
82 | """ | |
83 | osds = self.computed['osds'] | |
1adf2230 | 84 | for device in self.hdds: |
91327a77 AA |
85 | for hdd in range(self.osds_per_device): |
86 | osd = {'data': {}, 'block.db': {}} | |
87 | osd['data']['path'] = device.abspath | |
88 | osd['data']['size'] = device.sys_api['size'] / self.osds_per_device | |
89 | osd['data']['parts'] = self.osds_per_device | |
90 | osd['data']['percentage'] = 100 / self.osds_per_device | |
91 | osd['data']['human_readable_size'] = str( | |
92 | disk.Size(b=device.sys_api['size']) / self.osds_per_device | |
93 | ) | |
94 | osds.append(osd) | |
1adf2230 AA |
95 | |
96 | for device in self.ssds: | |
91327a77 AA |
97 | extents = lvm.sizing(device.sys_api['size'], parts=self.osds_per_device) |
98 | for ssd in range(self.osds_per_device): | |
1adf2230 AA |
99 | osd = {'data': {}, 'block.db': {}} |
100 | osd['data']['path'] = device.abspath | |
101 | osd['data']['size'] = extents['sizes'] | |
102 | osd['data']['parts'] = extents['parts'] | |
91327a77 | 103 | osd['data']['percentage'] = 100 / self.osds_per_device |
1adf2230 AA |
104 | osd['data']['human_readable_size'] = str(disk.Size(b=extents['sizes'])) |
105 | osds.append(osd) | |
106 | ||
91327a77 AA |
107 | self.computed['changed'] = len(osds) > 0 |
108 | ||
1adf2230 AA |
109 | def execute(self): |
110 | """ | |
111 | Create vgs/lvs from the incoming set of devices, assign their roles | |
112 | (block, block.db, block.wal, etc..) and offload the OSD creation to | |
113 | ``lvm create`` | |
114 | """ | |
115 | osd_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) | |
116 | ||
117 | # create the vgs first, mapping them to the device path | |
118 | for osd in self.computed['osds']: | |
119 | vg = osd_vgs.get(osd['data']['path']) | |
120 | if not vg: | |
121 | vg = lvm.create_vg(osd['data']['path']) | |
122 | osd_vgs[osd['data']['path']] = {'vg': vg, 'parts': osd['data']['parts']} | |
123 | ||
124 | # create the lvs from the vgs captured in the beginning | |
125 | for create in osd_vgs.values(): | |
126 | lvs = lvm.create_lvs(create['vg'], parts=create['parts'], name_prefix='osd-data') | |
127 | vg_name = create['vg'].name | |
128 | for lv in lvs: | |
129 | command = ['--bluestore', '--data'] | |
130 | command.append('%s/%s' % (vg_name, lv.name)) | |
131 | if self.args.dmcrypt: | |
132 | command.append('--dmcrypt') | |
133 | if self.args.no_systemd: | |
134 | command.append('--no-systemd') | |
135 | if self.args.crush_device_class: | |
136 | command.extend(['--crush-device-class', self.args.crush_device_class]) | |
137 | ||
91327a77 AA |
138 | if self.args.prepare: |
139 | Prepare(command).main() | |
140 | else: | |
141 | Create(command).main() | |
1adf2230 AA |
142 | |
143 | ||
144 | class MixedType(object): | |
145 | ||
146 | def __init__(self, devices, args): | |
147 | self.args = args | |
148 | self.devices = devices | |
91327a77 AA |
149 | self.osds_per_device = args.osds_per_device |
150 | # TODO: add --fast-devices and --slow-devices so these can be customized | |
1adf2230 AA |
151 | self.hdds = [device for device in devices if device.sys_api['rotational'] == '1'] |
152 | self.ssds = [device for device in devices if device.sys_api['rotational'] == '0'] | |
91327a77 AA |
153 | self.computed = {'osds': [], 'filtered_devices': args.filtered_devices} |
154 | self.block_db_size = self.get_block_size() | |
155 | self.system_vgs = lvm.VolumeGroups() | |
156 | self.dbs_needed = len(self.hdds) * self.osds_per_device | |
157 | if self.devices: | |
158 | self.validate() | |
159 | self.compute() | |
160 | else: | |
161 | self.computed["changed"] = False | |
162 | ||
163 | @staticmethod | |
164 | def type(): | |
165 | return "bluestore.MixedType" | |
1adf2230 AA |
166 | |
167 | def report_json(self): | |
168 | print(json.dumps(self.computed, indent=4, sort_keys=True)) | |
169 | ||
91327a77 AA |
170 | def get_block_size(self): |
171 | if self.args.block_db_size: | |
172 | return disk.Size(b=self.args.block_db_size) | |
173 | else: | |
174 | return prepare.get_block_db_size(lv_format=False) or disk.Size(b=0) | |
175 | ||
1adf2230 | 176 | def report_pretty(self): |
91327a77 | 177 | vg_extents = lvm.sizing(self.total_available_db_space.b, parts=self.dbs_needed) |
1adf2230 AA |
178 | db_size = str(disk.Size(b=(vg_extents['sizes']))) |
179 | ||
180 | string = "" | |
91327a77 AA |
181 | if self.args.filtered_devices: |
182 | string += templates.filtered_devices(self.args.filtered_devices) | |
1adf2230 | 183 | string += templates.total_osds.format( |
91327a77 | 184 | total_osds=len(self.hdds) * self.osds_per_device |
1adf2230 AA |
185 | ) |
186 | ||
187 | string += templates.ssd_volume_group.format( | |
188 | target='block.db', | |
91327a77 AA |
189 | total_lv_size=str(self.total_available_db_space), |
190 | total_lvs=vg_extents['parts'] * self.osds_per_device, | |
1adf2230 AA |
191 | block_lv_size=db_size, |
192 | block_db_devices=', '.join([ssd.abspath for ssd in self.ssds]), | |
91327a77 | 193 | lv_size=self.block_db_size or str(disk.Size(b=(vg_extents['sizes']))), |
1adf2230 AA |
194 | total_osds=len(self.hdds) |
195 | ) | |
196 | ||
197 | string += templates.osd_component_titles | |
198 | for osd in self.computed['osds']: | |
199 | string += templates.osd_header | |
200 | string += templates.osd_component.format( | |
201 | _type='[data]', | |
202 | path=osd['data']['path'], | |
203 | size=osd['data']['human_readable_size'], | |
204 | percent=osd['data']['percentage']) | |
205 | ||
206 | string += templates.osd_component.format( | |
207 | _type='[block.db]', | |
91327a77 | 208 | path=osd['block.db']['path'], |
1adf2230 AA |
209 | size=osd['block.db']['human_readable_size'], |
210 | percent=osd['block.db']['percentage']) | |
211 | ||
212 | print(string) | |
213 | ||
214 | def compute(self): | |
215 | osds = self.computed['osds'] | |
91327a77 AA |
216 | |
217 | # unconfigured block db size will be 0, so set it back to using as much | |
218 | # as possible from looking at extents | |
219 | if self.block_db_size.b == 0: | |
220 | self.block_db_size = disk.Size(b=self.vg_extents['sizes']) | |
221 | ||
222 | if not self.common_vg: | |
223 | # there isn't a common vg, so a new one must be created with all | |
224 | # the blank SSDs | |
225 | self.computed['vg'] = { | |
226 | 'devices': ", ".join([ssd.abspath for ssd in self.blank_ssds]), | |
227 | 'parts': self.dbs_needed, | |
228 | 'percentages': self.vg_extents['percentages'], | |
229 | 'sizes': self.block_db_size.b.as_int(), | |
230 | 'size': self.total_blank_ssd_size.b.as_int(), | |
231 | 'human_readable_sizes': str(self.block_db_size), | |
232 | 'human_readable_size': str(self.total_available_db_space), | |
233 | } | |
234 | vg_name = 'vg/lv' | |
235 | else: | |
236 | vg_name = self.common_vg.name | |
237 | ||
1adf2230 | 238 | for device in self.hdds: |
91327a77 AA |
239 | for hdd in range(self.osds_per_device): |
240 | osd = {'data': {}, 'block.db': {}} | |
241 | osd['data']['path'] = device.abspath | |
242 | osd['data']['size'] = device.sys_api['size'] / self.osds_per_device | |
243 | osd['data']['percentage'] = 100 / self.osds_per_device | |
244 | osd['data']['human_readable_size'] = str( | |
245 | disk.Size(b=(device.sys_api['size'])) / self.osds_per_device | |
246 | ) | |
247 | osd['block.db']['path'] = 'vg: %s' % vg_name | |
248 | osd['block.db']['size'] = int(self.block_db_size.b) | |
249 | osd['block.db']['human_readable_size'] = str(self.block_db_size) | |
250 | osd['block.db']['percentage'] = self.vg_extents['percentages'] | |
251 | osds.append(osd) | |
252 | ||
253 | self.computed['changed'] = len(osds) > 0 | |
1adf2230 AA |
254 | |
255 | def execute(self): | |
256 | """ | |
257 | Create vgs/lvs from the incoming set of devices, assign their roles | |
258 | (block, block.db, block.wal, etc..) and offload the OSD creation to | |
259 | ``lvm create`` | |
260 | """ | |
91327a77 AA |
261 | blank_ssd_paths = [d.abspath for d in self.blank_ssds] |
262 | data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) | |
263 | ||
264 | # no common vg is found, create one with all the blank SSDs | |
265 | if not self.common_vg: | |
266 | db_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-block-dbs') | |
267 | ||
268 | # if a common vg exists then extend it with any blank ssds | |
269 | elif self.common_vg and blank_ssd_paths: | |
270 | db_vg = lvm.extend_vg(self.common_vg, blank_ssd_paths) | |
271 | ||
272 | # one common vg with nothing else to extend can be used directly, | |
273 | # either this is one device with one vg, or multiple devices with the | |
274 | # same vg | |
275 | else: | |
276 | db_vg = self.common_vg | |
277 | ||
278 | # since we are falling back to a block_db_size that might be "as large | |
279 | # as possible" we can't fully rely on LV format coming from the helper | |
280 | # function that looks up this value | |
281 | block_db_size = "%sG" % self.block_db_size.gb.as_int() | |
282 | ||
283 | # create 1 vg per data device first, mapping them to the device path, | |
284 | # when the lv gets created later, it can create as many as needed (or | |
285 | # even just 1) | |
286 | for osd in self.computed['osds']: | |
287 | vg = data_vgs.get(osd['data']['path']) | |
288 | if not vg: | |
289 | vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-block') | |
290 | data_vgs[osd['data']['path']] = vg | |
1adf2230 | 291 | |
91327a77 AA |
292 | # create the data lvs, and create the OSD with an lv from the common |
293 | # block.db vg from before | |
1adf2230 | 294 | for osd in self.computed['osds']: |
91327a77 AA |
295 | data_path = osd['data']['path'] |
296 | data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int() | |
297 | data_vg = data_vgs[data_path] | |
298 | data_lv_extents = data_vg.sizing(size=data_lv_size)['extents'] | |
299 | data_lv = lvm.create_lv( | |
300 | 'osd-block', data_vg.name, extents=data_lv_extents, uuid_name=True | |
301 | ) | |
302 | db_lv = lvm.create_lv( | |
303 | 'osd-block-db', db_vg.name, size=block_db_size, uuid_name=True | |
304 | ) | |
1adf2230 AA |
305 | command = [ |
306 | '--bluestore', | |
307 | '--data', "%s/%s" % (data_lv.vg_name, data_lv.name), | |
308 | '--block.db', '%s/%s' % (db_lv.vg_name, db_lv.name) | |
309 | ] | |
310 | if self.args.dmcrypt: | |
311 | command.append('--dmcrypt') | |
312 | if self.args.no_systemd: | |
313 | command.append('--no-systemd') | |
314 | if self.args.crush_device_class: | |
315 | command.extend(['--crush-device-class', self.args.crush_device_class]) | |
316 | ||
91327a77 AA |
317 | if self.args.prepare: |
318 | Prepare(command).main() | |
319 | else: | |
320 | Create(command).main() | |
321 | ||
322 | def get_common_vg(self): | |
323 | # find all the vgs associated with the current device | |
324 | for ssd in self.ssds: | |
325 | for pv in ssd.pvs_api: | |
326 | vg = self.system_vgs.get(vg_name=pv.vg_name) | |
327 | if not vg: | |
328 | continue | |
329 | # this should give us just one VG, it would've been caught by | |
330 | # the validator otherwise | |
331 | return vg | |
1adf2230 AA |
332 | |
333 | def validate(self): | |
334 | """ | |
335 | HDDs represent data devices, and solid state devices are for block.db, | |
336 | make sure that the number of data devices would have enough LVs and | |
337 | those LVs would be large enough to accommodate a block.db | |
338 | """ | |
339 | # validate minimum size for all devices | |
91327a77 AA |
340 | validators.minimum_device_size(self.devices, osds_per_device=self.osds_per_device) |
341 | ||
342 | # make sure that data devices do not have any LVs | |
343 | validators.no_lvm_membership(self.hdds) | |
344 | ||
345 | # do not allow non-common VG to continue | |
346 | validators.has_common_vg(self.ssds) | |
347 | ||
348 | # find the common VG to calculate how much is available | |
349 | self.common_vg = self.get_common_vg() | |
350 | ||
351 | # find how many block.db LVs are possible from the common VG | |
352 | if self.common_vg: | |
353 | common_vg_size = disk.Size(gb=self.common_vg.free) | |
354 | else: | |
355 | common_vg_size = disk.Size(gb=0) | |
356 | ||
357 | # non-VG SSDs | |
358 | self.vg_ssds = set([d for d in self.ssds if d.is_lvm_member]) | |
359 | self.blank_ssds = set(self.ssds).difference(self.vg_ssds) | |
360 | self.total_blank_ssd_size = disk.Size(b=0) | |
361 | for blank_ssd in self.blank_ssds: | |
362 | self.total_blank_ssd_size += disk.Size(b=blank_ssd.sys_api['size']) | |
363 | ||
364 | self.total_available_db_space = self.total_blank_ssd_size + common_vg_size | |
365 | ||
366 | # If not configured, we default to 0, which is really "use as much as | |
367 | # possible" captured by the `else` condition | |
368 | if self.block_db_size.gb > 0: | |
369 | try: | |
370 | self.vg_extents = lvm.sizing( | |
371 | self.total_available_db_space.b, size=self.block_db_size.b * self.osds_per_device | |
372 | ) | |
373 | except SizeAllocationError: | |
374 | msg = "Not enough space in fast devices (%s) to create %s x %s block.db LV" | |
375 | raise RuntimeError( | |
376 | msg % (self.total_available_db_space, self.osds_per_device, self.block_db_size) | |
377 | ) | |
378 | else: | |
379 | self.vg_extents = lvm.sizing( | |
380 | self.total_available_db_space.b, parts=self.dbs_needed | |
381 | ) | |
1adf2230 | 382 | |
91327a77 AA |
383 | # validate that number of block.db LVs possible are enough for number of |
384 | # OSDs proposed | |
385 | if self.total_available_db_space.b == 0: | |
386 | msg = "No space left in fast devices to create block.db LVs" | |
387 | raise RuntimeError(msg) | |
388 | ||
389 | # bluestore_block_db_size was unset, so we must set this to whatever | |
390 | # size we get by dividing the total available space for block.db LVs | |
391 | # into the number of block.db LVs needed (i.e. "as large as possible") | |
392 | if self.block_db_size.b == 0: | |
393 | self.block_db_size = self.total_available_db_space / self.dbs_needed | |
1adf2230 | 394 | |
91327a77 | 395 | total_dbs_possible = self.total_available_db_space / self.block_db_size |
1adf2230 | 396 | |
91327a77 AA |
397 | if self.dbs_needed > total_dbs_possible: |
398 | msg = "Not enough space (%s) to create %s x %s block.db LVs" % ( | |
399 | self.total_available_db_space, self.dbs_needed, self.block_db_size, | |
400 | ) | |
401 | raise RuntimeError(msg) |