]> git.proxmox.com Git - ceph.git/blame - ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/filestore.py
import 15.2.5
[ceph.git] / ceph / src / ceph-volume / ceph_volume / devices / lvm / strategies / filestore.py
CommitLineData
1adf2230 1from __future__ import print_function
1adf2230
AA
2from ceph_volume.util import disk, prepare
3from ceph_volume.api import lvm
4from . import validators
f64942e4
AA
5from .strategies import Strategy
6from .strategies import MixedStrategy
1adf2230 7from ceph_volume.devices.lvm.create import Create
91327a77 8from ceph_volume.devices.lvm.prepare import Prepare
92f5a8d4 9from ceph_volume.util import templates, system
91327a77
AA
10from ceph_volume.exceptions import SizeAllocationError
11
12
13def get_journal_size(args):
14 """
15 Helper for Filestore strategies, to prefer the --journal-size value from
16 the CLI over anything that might be in a ceph configuration file (if any).
17 """
18 if args.journal_size:
19 return disk.Size(mb=args.journal_size)
20 else:
21 return prepare.get_journal_size(lv_format=False)
1adf2230
AA
22
23
f64942e4 24class SingleType(Strategy):
1adf2230
AA
25 """
26 Support for all SSDs, or all HDDs, data and journal LVs will be colocated
27 in the same device
28 """
29
11fdf7f2
TL
30
31 def __init__(self, args, data_devs):
32 super(SingleType, self).__init__(args, data_devs)
91327a77 33 self.journal_size = get_journal_size(args)
f64942e4 34 self.validate_compute()
91327a77 35
11fdf7f2
TL
36 @classmethod
37 def with_auto_devices(cls, args, devices):
38 return cls(args, devices)
39
91327a77
AA
40 @staticmethod
41 def type():
42 return "filestore.SingleType"
43
11fdf7f2 44 def report_pretty(self, filtered_devices):
1adf2230 45 string = ""
11fdf7f2
TL
46 if filtered_devices:
47 string += templates.filtered_devices(filtered_devices)
1adf2230 48 string += templates.total_osds.format(
91327a77 49 total_osds=self.total_osds
1adf2230
AA
50 )
51 string += templates.osd_component_titles
52
53 for osd in self.computed['osds']:
54 string += templates.osd_header
11fdf7f2
TL
55 if 'osd_id' in osd:
56 string += templates.osd_reused_id.format(
57 id_=osd['osd_id'])
1adf2230
AA
58 string += templates.osd_component.format(
59 _type='[data]',
60 path=osd['data']['path'],
61 size=osd['data']['human_readable_size'],
62 percent=osd['data']['percentage'],
63 )
64 string += templates.osd_component.format(
65 _type='[journal]',
66 path=osd['journal']['path'],
67 size=osd['journal']['human_readable_size'],
68 percent=osd['journal']['percentage'],
69 )
70
71 print(string)
72
73 def validate(self):
74 """
75 Ensure that the minimum requirements for this type of scenario is
76 met, raise an error if the provided devices would not work
77 """
78 # validate minimum size for all devices
11fdf7f2 79 validators.minimum_device_size(self.data_devs, osds_per_device=self.osds_per_device)
91327a77
AA
80
81 # validate collocation
11fdf7f2
TL
82 validators.minimum_device_collocated_size(
83 self.data_devs, self.journal_size, osds_per_device=self.osds_per_device
84 )
91327a77
AA
85
86 # make sure that data devices do not have any LVs
11fdf7f2
TL
87 validators.no_lvm_membership(self.data_devs)
88
89 if self.osd_ids:
90 self._validate_osd_ids()
1adf2230
AA
91
92 def compute(self):
93 """
94 Go through the rules needed to properly size the lvs, return
95 a dictionary with the result
96 """
97 # chose whichever is the one group we have to compute against
1adf2230 98 osds = self.computed['osds']
11fdf7f2 99 for device in self.data_devs:
91327a77 100 for osd in range(self.osds_per_device):
11fdf7f2 101 device_size = disk.Size(b=device.lvm_size.b)
91327a77
AA
102 osd_size = device_size / self.osds_per_device
103 journal_size = self.journal_size
104 data_size = osd_size - journal_size
105 data_percentage = data_size * 100 / device_size
106 osd = {'data': {}, 'journal': {}}
107 osd['data']['path'] = device.abspath
108 osd['data']['size'] = data_size.b.as_int()
109 osd['data']['parts'] = self.osds_per_device
110 osd['data']['percentage'] = int(data_percentage)
111 osd['data']['human_readable_size'] = str(data_size)
112 osd['journal']['path'] = device.abspath
113 osd['journal']['size'] = journal_size.b.as_int()
114 osd['journal']['percentage'] = int(100 - data_percentage)
115 osd['journal']['human_readable_size'] = str(journal_size)
11fdf7f2
TL
116
117 if self.osd_ids:
118 osd['osd_id'] = self.osd_ids.pop()
119
91327a77
AA
120 osds.append(osd)
121
122 self.computed['changed'] = len(osds) > 0
1adf2230
AA
123
124 def execute(self):
125 """
126 Create vgs/lvs from the incoming set of devices, assign their roles
127 (data, journal) and offload the OSD creation to ``lvm create``
128 """
91327a77 129 device_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']])
1adf2230 130
91327a77
AA
131 # create 1 vg per data device first, mapping them to the device path,
132 # when the lvs get created later, it can create as many as needed,
133 # including the journals since it is going to be collocated
1adf2230 134 for osd in self.computed['osds']:
91327a77
AA
135 vg = device_vgs.get(osd['data']['path'])
136 if not vg:
137 vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-filestore')
138 device_vgs[osd['data']['path']] = vg
1adf2230 139
91327a77
AA
140 # create the lvs from the per-device vg created in the beginning
141 for osd in self.computed['osds']:
142 data_path = osd['data']['path']
143 data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int()
144 device_vg = device_vgs[data_path]
145 data_lv_extents = device_vg.sizing(size=data_lv_size)['extents']
146 journal_lv_extents = device_vg.sizing(size=self.journal_size.gb.as_int())['extents']
92f5a8d4 147 data_uuid = system.generate_uuid()
91327a77 148 data_lv = lvm.create_lv(
92f5a8d4
TL
149 'osd-data', data_uuid, vg=device_vg, extents=data_lv_extents)
150 journal_uuid = system.generate_uuid()
1adf2230 151 journal_lv = lvm.create_lv(
92f5a8d4 152 'osd-journal', journal_uuid, vg=device_vg, extents=journal_lv_extents)
1adf2230
AA
153
154 command = ['--filestore', '--data']
91327a77
AA
155 command.append('%s/%s' % (device_vg.name, data_lv.name))
156 command.extend(['--journal', '%s/%s' % (device_vg.name, journal_lv.name)])
1adf2230
AA
157 if self.args.dmcrypt:
158 command.append('--dmcrypt')
159 if self.args.no_systemd:
160 command.append('--no-systemd')
161 if self.args.crush_device_class:
162 command.extend(['--crush-device-class', self.args.crush_device_class])
11fdf7f2
TL
163 if 'osd_id' in osd:
164 command.extend(['--osd-id', osd['osd_id']])
1adf2230 165
91327a77
AA
166 if self.args.prepare:
167 Prepare(command).main()
168 else:
169 Create(command).main()
1adf2230
AA
170
171
f64942e4 172class MixedType(MixedStrategy):
1adf2230
AA
173 """
174 Supports HDDs with SSDs, journals will be placed on SSDs, while HDDs will
175 be used fully for data.
176
177 If an existing common VG is detected on SSDs, it will be extended if blank
178 SSDs are used, otherwise it will be used directly.
179 """
180
11fdf7f2
TL
181
182 def __init__(self, args, data_devs, journal_devs):
183 super(MixedType, self).__init__(args, data_devs, journal_devs)
184 self.blank_journal_devs = []
185 self.journals_needed = len(self.data_devs) * self.osds_per_device
91327a77 186 self.journal_size = get_journal_size(args)
f64942e4 187 self.validate_compute()
91327a77 188
11fdf7f2
TL
189 @classmethod
190 def with_auto_devices(cls, args, devices):
191 data_devs, journal_devs = cls.split_devices_rotational(devices)
192 return cls(args, data_devs, journal_devs)
193
91327a77
AA
194 @staticmethod
195 def type():
196 return "filestore.MixedType"
1adf2230 197
11fdf7f2 198 def report_pretty(self, filtered_devices):
1adf2230 199 string = ""
11fdf7f2
TL
200 if filtered_devices:
201 string += templates.filtered_devices(filtered_devices)
1adf2230 202 string += templates.total_osds.format(
91327a77 203 total_osds=self.total_osds
1adf2230
AA
204 )
205
206 string += templates.ssd_volume_group.format(
207 target='journal',
208 total_lv_size=str(self.total_available_journal_space),
209 total_lvs=self.journals_needed,
11fdf7f2 210 block_db_devices=', '.join([d.path for d in self.db_or_journal_devs]),
1adf2230
AA
211 lv_size=str(self.journal_size),
212 total_osds=self.journals_needed
213 )
214
215 string += templates.osd_component_titles
216
217 for osd in self.computed['osds']:
218 string += templates.osd_header
11fdf7f2
TL
219 if 'osd_id' in osd:
220 string += templates.osd_reused_id.format(
221 id_=osd['osd_id'])
1adf2230
AA
222 string += templates.osd_component.format(
223 _type='[data]',
224 path=osd['data']['path'],
225 size=osd['data']['human_readable_size'],
226 percent=osd['data']['percentage'],
227 )
228 string += templates.osd_component.format(
229 _type='[journal]',
230 path=osd['journal']['path'],
231 size=osd['journal']['human_readable_size'],
232 percent=osd['journal']['percentage'],
233 )
234
235 print(string)
236
1adf2230
AA
237 def validate(self):
238 """
239 Ensure that the minimum requirements for this type of scenario is
240 met, raise an error if the provided devices would not work
241 """
242 # validate minimum size for all devices
91327a77 243 validators.minimum_device_size(self.devices, osds_per_device=self.osds_per_device)
1adf2230
AA
244
245 # make sure that data devices do not have any LVs
11fdf7f2 246 validators.no_lvm_membership(self.data_devs)
1adf2230
AA
247
248 # do not allow non-common VG to continue
11fdf7f2 249 validators.has_common_vg(self.db_or_journal_devs)
1adf2230
AA
250
251 # find the common VG to calculate how much is available
11fdf7f2 252 self.common_vg = self.get_common_vg(self.db_or_journal_devs)
1adf2230
AA
253
254 # find how many journals are possible from the common VG
255 if self.common_vg:
92f5a8d4 256 common_vg_size = disk.Size(b=self.common_vg.free)
1adf2230
AA
257 else:
258 common_vg_size = disk.Size(gb=0)
259
260 # non-VG SSDs
11fdf7f2
TL
261 vg_ssds = set([d for d in self.db_or_journal_devs if d.is_lvm_member])
262 self.blank_journal_devs = set(self.db_or_journal_devs).difference(vg_ssds)
263 self.total_blank_journal_dev_size = disk.Size(b=0)
264 for blank_journal_dev in self.blank_journal_devs:
265 self.total_blank_journal_dev_size += disk.Size(b=blank_journal_dev.lvm_size.b)
1adf2230 266
11fdf7f2 267 self.total_available_journal_space = self.total_blank_journal_dev_size + common_vg_size
1adf2230
AA
268
269 try:
270 self.vg_extents = lvm.sizing(
91327a77
AA
271 self.total_available_journal_space.b, size=self.journal_size.b * self.osds_per_device
272 )
273 except SizeAllocationError:
274 msg = "Not enough space in fast devices (%s) to create %s x %s journal LV"
275 raise RuntimeError(
276 msg % (self.total_available_journal_space, self.osds_per_device, self.journal_size)
1adf2230 277 )
1adf2230
AA
278
279 # validate that number of journals possible are enough for number of
280 # OSDs proposed
281 total_journals_possible = self.total_available_journal_space / self.journal_size
91327a77
AA
282 if self.osds_per_device > total_journals_possible:
283 msg = "Not enough space (%s) to create %s x %s journal LVs" % (
284 self.total_available_journal_space, self.journals_needed, self.journal_size
1adf2230
AA
285 )
286 raise RuntimeError(msg)
287
11fdf7f2
TL
288 if self.osd_ids:
289 self._validate_osd_ids()
290
1adf2230
AA
291 def compute(self):
292 """
293 Go through the rules needed to properly size the lvs, return
294 a dictionary with the result
295 """
296 osds = self.computed['osds']
297
298 vg_free = int(self.total_available_journal_space.gb)
299 if not self.common_vg:
300 # there isn't a common vg, so a new one must be created with all
301 # the blank SSDs
302 self.computed['vg'] = {
11fdf7f2 303 'devices': ", ".join([ssd.abspath for ssd in self.blank_journal_devs]),
1adf2230
AA
304 'parts': self.journals_needed,
305 'percentages': self.vg_extents['percentages'],
91327a77 306 'sizes': self.journal_size.b.as_int(),
11fdf7f2 307 'size': self.total_blank_journal_dev_size.b.as_int(),
1adf2230
AA
308 'human_readable_sizes': str(self.journal_size),
309 'human_readable_size': str(self.total_available_journal_space),
310 }
311 vg_name = 'lv/vg'
312 else:
313 vg_name = self.common_vg.name
314
11fdf7f2 315 for device in self.data_devs:
91327a77 316 for osd in range(self.osds_per_device):
11fdf7f2 317 device_size = disk.Size(b=device.lvm_size.b)
91327a77
AA
318 data_size = device_size / self.osds_per_device
319 osd = {'data': {}, 'journal': {}}
320 osd['data']['path'] = device.path
321 osd['data']['size'] = data_size.b.as_int()
322 osd['data']['percentage'] = 100 / self.osds_per_device
323 osd['data']['human_readable_size'] = str(data_size)
324 osd['journal']['path'] = 'vg: %s' % vg_name
325 osd['journal']['size'] = self.journal_size.b.as_int()
326 osd['journal']['percentage'] = int(self.journal_size.gb * 100 / vg_free)
327 osd['journal']['human_readable_size'] = str(self.journal_size)
11fdf7f2
TL
328
329 if self.osd_ids:
330 osd['osd_id'] = self.osd_ids.pop(0)
331
91327a77
AA
332 osds.append(osd)
333
334 self.computed['changed'] = len(osds) > 0
1adf2230
AA
335
336 def execute(self):
337 """
338 Create vgs/lvs from the incoming set of devices, assign their roles
339 (data, journal) and offload the OSD creation to ``lvm create``
340 """
11fdf7f2 341 blank_journal_dev_paths = [d.abspath for d in self.blank_journal_devs]
91327a77 342 data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']])
1adf2230
AA
343
344 # no common vg is found, create one with all the blank SSDs
345 if not self.common_vg:
11fdf7f2 346 journal_vg = lvm.create_vg(blank_journal_dev_paths, name_prefix='ceph-journals')
1adf2230 347 # a vg exists that can be extended
11fdf7f2
TL
348 elif self.common_vg and blank_journal_dev_paths:
349 journal_vg = lvm.extend_vg(self.common_vg, blank_journal_dev_paths)
1adf2230
AA
350 # one common vg with nothing else to extend can be used directly
351 else:
352 journal_vg = self.common_vg
353
92f5a8d4 354 journal_size = prepare.get_journal_size(lv_format=False)
1adf2230 355
91327a77
AA
356 # create 1 vg per data device first, mapping them to the device path,
357 # when the lv gets created later, it can create as many as needed (or
358 # even just 1)
1adf2230 359 for osd in self.computed['osds']:
91327a77
AA
360 vg = data_vgs.get(osd['data']['path'])
361 if not vg:
362 vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-data')
363 data_vgs[osd['data']['path']] = vg
364
365 for osd in self.computed['osds']:
366 data_path = osd['data']['path']
91327a77 367 data_vg = data_vgs[data_path]
11fdf7f2 368 data_lv_extents = data_vg.sizing(parts=1)['extents']
92f5a8d4 369 data_uuid = system.generate_uuid()
91327a77 370 data_lv = lvm.create_lv(
92f5a8d4
TL
371 'osd-data', data_uuid, vg=data_vg, extents=data_lv_extents)
372 journal_uuid = system.generate_uuid()
1adf2230 373 journal_lv = lvm.create_lv(
92f5a8d4 374 'osd-journal', journal_uuid, vg=journal_vg, size=journal_size)
1adf2230
AA
375
376 command = ['--filestore', '--data']
377 command.append('%s/%s' % (data_vg.name, data_lv.name))
378 command.extend(['--journal', '%s/%s' % (journal_vg.name, journal_lv.name)])
379 if self.args.dmcrypt:
380 command.append('--dmcrypt')
381 if self.args.no_systemd:
382 command.append('--no-systemd')
383 if self.args.crush_device_class:
384 command.extend(['--crush-device-class', self.args.crush_device_class])
11fdf7f2
TL
385 if 'osd_id' in osd:
386 command.extend(['--osd-id', osd['osd_id']])
1adf2230 387
91327a77
AA
388 if self.args.prepare:
389 Prepare(command).main()
390 else:
391 Create(command).main()