]> git.proxmox.com Git - ceph.git/blame - ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/filestore.py
update sources to 12.2.10
[ceph.git] / ceph / src / ceph-volume / ceph_volume / devices / lvm / strategies / filestore.py
CommitLineData
1adf2230
AA
1from __future__ import print_function
2import json
3from ceph_volume.util import disk, prepare
4from ceph_volume.api import lvm
5from . import validators
6from ceph_volume.devices.lvm.create import Create
91327a77 7from ceph_volume.devices.lvm.prepare import Prepare
1adf2230 8from ceph_volume.util import templates
91327a77
AA
9from ceph_volume.exceptions import SizeAllocationError
10
11
12def get_journal_size(args):
13 """
14 Helper for Filestore strategies, to prefer the --journal-size value from
15 the CLI over anything that might be in a ceph configuration file (if any).
16 """
17 if args.journal_size:
18 return disk.Size(mb=args.journal_size)
19 else:
20 return prepare.get_journal_size(lv_format=False)
1adf2230
AA
21
22
23class SingleType(object):
24 """
25 Support for all SSDs, or all HDDs, data and journal LVs will be colocated
26 in the same device
27 """
28
29 def __init__(self, devices, args):
30 self.args = args
91327a77 31 self.osds_per_device = args.osds_per_device
1adf2230
AA
32 self.devices = devices
33 self.hdds = [device for device in devices if device.sys_api['rotational'] == '1']
34 self.ssds = [device for device in devices if device.sys_api['rotational'] == '0']
91327a77
AA
35 self.computed = {'osds': [], 'vgs': [], 'filtered_devices': args.filtered_devices}
36 self.journal_size = get_journal_size(args)
37 if self.devices:
38 self.validate()
39 self.compute()
40 else:
41 self.computed["changed"] = False
42
43 @staticmethod
44 def type():
45 return "filestore.SingleType"
46
47 @property
48 def total_osds(self):
49 if self.hdds:
50 return len(self.hdds) * self.osds_per_device
51 else:
52 return len(self.ssds) * self.osds_per_device
1adf2230
AA
53
54 def report_json(self):
55 print(json.dumps(self.computed, indent=4, sort_keys=True))
56
57 def report_pretty(self):
58 string = ""
91327a77
AA
59 if self.args.filtered_devices:
60 string += templates.filtered_devices(self.args.filtered_devices)
1adf2230 61 string += templates.total_osds.format(
91327a77 62 total_osds=self.total_osds
1adf2230
AA
63 )
64 string += templates.osd_component_titles
65
66 for osd in self.computed['osds']:
67 string += templates.osd_header
68 string += templates.osd_component.format(
69 _type='[data]',
70 path=osd['data']['path'],
71 size=osd['data']['human_readable_size'],
72 percent=osd['data']['percentage'],
73 )
74 string += templates.osd_component.format(
75 _type='[journal]',
76 path=osd['journal']['path'],
77 size=osd['journal']['human_readable_size'],
78 percent=osd['journal']['percentage'],
79 )
80
81 print(string)
82
83 def validate(self):
84 """
85 Ensure that the minimum requirements for this type of scenario is
86 met, raise an error if the provided devices would not work
87 """
88 # validate minimum size for all devices
91327a77
AA
89 validators.minimum_device_size(self.devices, osds_per_device=self.osds_per_device)
90
91 # validate collocation
92 if self.hdds:
93 validators.minimum_device_collocated_size(
94 self.hdds, self.journal_size, osds_per_device=self.osds_per_device
95 )
96 else:
97 validators.minimum_device_collocated_size(
98 self.ssds, self.journal_size, osds_per_device=self.osds_per_device
99 )
100
101 # make sure that data devices do not have any LVs
102 validators.no_lvm_membership(self.hdds)
1adf2230
AA
103
104 def compute(self):
105 """
106 Go through the rules needed to properly size the lvs, return
107 a dictionary with the result
108 """
109 # chose whichever is the one group we have to compute against
110 devices = self.hdds or self.ssds
111 osds = self.computed['osds']
1adf2230 112 for device in devices:
91327a77
AA
113 for osd in range(self.osds_per_device):
114 device_size = disk.Size(b=device.sys_api['size'])
115 osd_size = device_size / self.osds_per_device
116 journal_size = self.journal_size
117 data_size = osd_size - journal_size
118 data_percentage = data_size * 100 / device_size
119 osd = {'data': {}, 'journal': {}}
120 osd['data']['path'] = device.abspath
121 osd['data']['size'] = data_size.b.as_int()
122 osd['data']['parts'] = self.osds_per_device
123 osd['data']['percentage'] = int(data_percentage)
124 osd['data']['human_readable_size'] = str(data_size)
125 osd['journal']['path'] = device.abspath
126 osd['journal']['size'] = journal_size.b.as_int()
127 osd['journal']['percentage'] = int(100 - data_percentage)
128 osd['journal']['human_readable_size'] = str(journal_size)
129 osds.append(osd)
130
131 self.computed['changed'] = len(osds) > 0
1adf2230
AA
132
133 def execute(self):
134 """
135 Create vgs/lvs from the incoming set of devices, assign their roles
136 (data, journal) and offload the OSD creation to ``lvm create``
137 """
91327a77 138 device_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']])
1adf2230 139
91327a77
AA
140 # create 1 vg per data device first, mapping them to the device path,
141 # when the lvs get created later, it can create as many as needed,
142 # including the journals since it is going to be collocated
1adf2230 143 for osd in self.computed['osds']:
91327a77
AA
144 vg = device_vgs.get(osd['data']['path'])
145 if not vg:
146 vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-filestore')
147 device_vgs[osd['data']['path']] = vg
1adf2230 148
91327a77
AA
149 # create the lvs from the per-device vg created in the beginning
150 for osd in self.computed['osds']:
151 data_path = osd['data']['path']
152 data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int()
153 device_vg = device_vgs[data_path]
154 data_lv_extents = device_vg.sizing(size=data_lv_size)['extents']
155 journal_lv_extents = device_vg.sizing(size=self.journal_size.gb.as_int())['extents']
156 data_lv = lvm.create_lv(
157 'osd-data', device_vg.name, extents=data_lv_extents, uuid_name=True
158 )
1adf2230 159 journal_lv = lvm.create_lv(
91327a77 160 'osd-journal', device_vg.name, extents=journal_lv_extents, uuid_name=True
1adf2230 161 )
1adf2230
AA
162
163 command = ['--filestore', '--data']
91327a77
AA
164 command.append('%s/%s' % (device_vg.name, data_lv.name))
165 command.extend(['--journal', '%s/%s' % (device_vg.name, journal_lv.name)])
1adf2230
AA
166 if self.args.dmcrypt:
167 command.append('--dmcrypt')
168 if self.args.no_systemd:
169 command.append('--no-systemd')
170 if self.args.crush_device_class:
171 command.extend(['--crush-device-class', self.args.crush_device_class])
172
91327a77
AA
173 if self.args.prepare:
174 Prepare(command).main()
175 else:
176 Create(command).main()
1adf2230
AA
177
178
179class MixedType(object):
180 """
181 Supports HDDs with SSDs, journals will be placed on SSDs, while HDDs will
182 be used fully for data.
183
184 If an existing common VG is detected on SSDs, it will be extended if blank
185 SSDs are used, otherwise it will be used directly.
186 """
187
188 def __init__(self, devices, args):
189 self.args = args
91327a77 190 self.osds_per_device = args.osds_per_device
1adf2230
AA
191 self.devices = devices
192 self.hdds = [device for device in devices if device.sys_api['rotational'] == '1']
193 self.ssds = [device for device in devices if device.sys_api['rotational'] == '0']
91327a77 194 self.computed = {'osds': [], 'vg': None, 'filtered_devices': args.filtered_devices}
1adf2230 195 self.blank_ssds = []
91327a77
AA
196 self.journals_needed = len(self.hdds) * self.osds_per_device
197 self.journal_size = get_journal_size(args)
1adf2230 198 self.system_vgs = lvm.VolumeGroups()
91327a77
AA
199 if self.devices:
200 self.validate()
201 self.compute()
202 else:
203 self.computed["changed"] = False
204
205 @staticmethod
206 def type():
207 return "filestore.MixedType"
1adf2230
AA
208
209 def report_json(self):
210 print(json.dumps(self.computed, indent=4, sort_keys=True))
211
91327a77
AA
212 @property
213 def total_osds(self):
214 if self.hdds:
215 return len(self.hdds) * self.osds_per_device
216 else:
217 return len(self.ssds) * self.osds_per_device
218
1adf2230
AA
219 def report_pretty(self):
220 string = ""
91327a77
AA
221 if self.args.filtered_devices:
222 string += templates.filtered_devices(self.args.filtered_devices)
1adf2230 223 string += templates.total_osds.format(
91327a77 224 total_osds=self.total_osds
1adf2230
AA
225 )
226
227 string += templates.ssd_volume_group.format(
228 target='journal',
229 total_lv_size=str(self.total_available_journal_space),
230 total_lvs=self.journals_needed,
231 block_db_devices=', '.join([d.path for d in self.ssds]),
232 lv_size=str(self.journal_size),
233 total_osds=self.journals_needed
234 )
235
236 string += templates.osd_component_titles
237
238 for osd in self.computed['osds']:
239 string += templates.osd_header
240 string += templates.osd_component.format(
241 _type='[data]',
242 path=osd['data']['path'],
243 size=osd['data']['human_readable_size'],
244 percent=osd['data']['percentage'],
245 )
246 string += templates.osd_component.format(
247 _type='[journal]',
248 path=osd['journal']['path'],
249 size=osd['journal']['human_readable_size'],
250 percent=osd['journal']['percentage'],
251 )
252
253 print(string)
254
255 def get_common_vg(self):
256 # find all the vgs associated with the current device
257 for ssd in self.ssds:
258 for pv in ssd.pvs_api:
259 vg = self.system_vgs.get(vg_name=pv.vg_name)
260 if not vg:
261 continue
262 # this should give us just one VG, it would've been caught by
263 # the validator otherwise
264 return vg
265
266 def validate(self):
267 """
268 Ensure that the minimum requirements for this type of scenario is
269 met, raise an error if the provided devices would not work
270 """
271 # validate minimum size for all devices
91327a77 272 validators.minimum_device_size(self.devices, osds_per_device=self.osds_per_device)
1adf2230
AA
273
274 # make sure that data devices do not have any LVs
275 validators.no_lvm_membership(self.hdds)
276
277 # do not allow non-common VG to continue
278 validators.has_common_vg(self.ssds)
279
280 # find the common VG to calculate how much is available
281 self.common_vg = self.get_common_vg()
282
283 # find how many journals are possible from the common VG
284 if self.common_vg:
285 common_vg_size = disk.Size(gb=self.common_vg.free)
286 else:
287 common_vg_size = disk.Size(gb=0)
288
289 # non-VG SSDs
290 self.vg_ssds = set([d for d in self.ssds if d.is_lvm_member])
291 self.blank_ssds = set(self.ssds).difference(self.vg_ssds)
292 self.total_blank_ssd_size = disk.Size(b=0)
293 for blank_ssd in self.blank_ssds:
294 self.total_blank_ssd_size += disk.Size(b=blank_ssd.sys_api['size'])
295
296 self.total_available_journal_space = self.total_blank_ssd_size + common_vg_size
297
298 try:
299 self.vg_extents = lvm.sizing(
91327a77
AA
300 self.total_available_journal_space.b, size=self.journal_size.b * self.osds_per_device
301 )
302 except SizeAllocationError:
303 msg = "Not enough space in fast devices (%s) to create %s x %s journal LV"
304 raise RuntimeError(
305 msg % (self.total_available_journal_space, self.osds_per_device, self.journal_size)
1adf2230 306 )
1adf2230
AA
307
308 # validate that number of journals possible are enough for number of
309 # OSDs proposed
310 total_journals_possible = self.total_available_journal_space / self.journal_size
91327a77
AA
311 if self.osds_per_device > total_journals_possible:
312 msg = "Not enough space (%s) to create %s x %s journal LVs" % (
313 self.total_available_journal_space, self.journals_needed, self.journal_size
1adf2230
AA
314 )
315 raise RuntimeError(msg)
316
317 def compute(self):
318 """
319 Go through the rules needed to properly size the lvs, return
320 a dictionary with the result
321 """
322 osds = self.computed['osds']
323
324 vg_free = int(self.total_available_journal_space.gb)
325 if not self.common_vg:
326 # there isn't a common vg, so a new one must be created with all
327 # the blank SSDs
328 self.computed['vg'] = {
91327a77 329 'devices': ", ".join([ssd.abspath for ssd in self.blank_ssds]),
1adf2230
AA
330 'parts': self.journals_needed,
331 'percentages': self.vg_extents['percentages'],
91327a77
AA
332 'sizes': self.journal_size.b.as_int(),
333 'size': self.total_blank_ssd_size.b.as_int(),
1adf2230
AA
334 'human_readable_sizes': str(self.journal_size),
335 'human_readable_size': str(self.total_available_journal_space),
336 }
337 vg_name = 'lv/vg'
338 else:
339 vg_name = self.common_vg.name
340
341 for device in self.hdds:
91327a77
AA
342 for osd in range(self.osds_per_device):
343 device_size = disk.Size(b=device.sys_api['size'])
344 data_size = device_size / self.osds_per_device
345 osd = {'data': {}, 'journal': {}}
346 osd['data']['path'] = device.path
347 osd['data']['size'] = data_size.b.as_int()
348 osd['data']['percentage'] = 100 / self.osds_per_device
349 osd['data']['human_readable_size'] = str(data_size)
350 osd['journal']['path'] = 'vg: %s' % vg_name
351 osd['journal']['size'] = self.journal_size.b.as_int()
352 osd['journal']['percentage'] = int(self.journal_size.gb * 100 / vg_free)
353 osd['journal']['human_readable_size'] = str(self.journal_size)
354 osds.append(osd)
355
356 self.computed['changed'] = len(osds) > 0
1adf2230
AA
357
358 def execute(self):
359 """
360 Create vgs/lvs from the incoming set of devices, assign their roles
361 (data, journal) and offload the OSD creation to ``lvm create``
362 """
91327a77
AA
363 blank_ssd_paths = [d.abspath for d in self.blank_ssds]
364 data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']])
1adf2230
AA
365
366 # no common vg is found, create one with all the blank SSDs
367 if not self.common_vg:
91327a77 368 journal_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-journals')
1adf2230 369 # a vg exists that can be extended
91327a77
AA
370 elif self.common_vg and blank_ssd_paths:
371 journal_vg = lvm.extend_vg(self.common_vg, blank_ssd_paths)
1adf2230
AA
372 # one common vg with nothing else to extend can be used directly
373 else:
374 journal_vg = self.common_vg
375
376 journal_size = prepare.get_journal_size(lv_format=True)
377
91327a77
AA
378 # create 1 vg per data device first, mapping them to the device path,
379 # when the lv gets created later, it can create as many as needed (or
380 # even just 1)
1adf2230 381 for osd in self.computed['osds']:
91327a77
AA
382 vg = data_vgs.get(osd['data']['path'])
383 if not vg:
384 vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-data')
385 data_vgs[osd['data']['path']] = vg
386
387 for osd in self.computed['osds']:
388 data_path = osd['data']['path']
389 data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int()
390 data_vg = data_vgs[data_path]
391 data_lv_extents = data_vg.sizing(size=data_lv_size)['extents']
392 data_lv = lvm.create_lv(
393 'osd-data', data_vg.name, extents=data_lv_extents, uuid_name=True
394 )
1adf2230
AA
395 journal_lv = lvm.create_lv(
396 'osd-journal', journal_vg.name, size=journal_size, uuid_name=True
397 )
398
399 command = ['--filestore', '--data']
400 command.append('%s/%s' % (data_vg.name, data_lv.name))
401 command.extend(['--journal', '%s/%s' % (journal_vg.name, journal_lv.name)])
402 if self.args.dmcrypt:
403 command.append('--dmcrypt')
404 if self.args.no_systemd:
405 command.append('--no-systemd')
406 if self.args.crush_device_class:
407 command.extend(['--crush-device-class', self.args.crush_device_class])
408
91327a77
AA
409 if self.args.prepare:
410 Prepare(command).main()
411 else:
412 Create(command).main()