]> git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/filestore.py
import new upstream nautilus stable release 14.2.8
[ceph.git] / ceph / src / ceph-volume / ceph_volume / devices / lvm / strategies / filestore.py
1 from __future__ import print_function
2 from ceph_volume.util import disk, prepare
3 from ceph_volume.api import lvm
4 from . import validators
5 from .strategies import Strategy
6 from .strategies import MixedStrategy
7 from ceph_volume.devices.lvm.create import Create
8 from ceph_volume.devices.lvm.prepare import Prepare
9 from ceph_volume.util import templates, system
10 from ceph_volume.exceptions import SizeAllocationError
11
12
13 def get_journal_size(args):
14 """
15 Helper for Filestore strategies, to prefer the --journal-size value from
16 the CLI over anything that might be in a ceph configuration file (if any).
17 """
18 if args.journal_size:
19 return disk.Size(mb=args.journal_size)
20 else:
21 return prepare.get_journal_size(lv_format=False)
22
23
24 class SingleType(Strategy):
25 """
26 Support for all SSDs, or all HDDs, data and journal LVs will be colocated
27 in the same device
28 """
29
30
31 def __init__(self, args, data_devs):
32 super(SingleType, self).__init__(args, data_devs)
33 self.journal_size = get_journal_size(args)
34 self.validate_compute()
35
36 @classmethod
37 def with_auto_devices(cls, args, devices):
38 return cls(args, devices)
39
40 @staticmethod
41 def type():
42 return "filestore.SingleType"
43
44 def report_pretty(self, filtered_devices):
45 string = ""
46 if filtered_devices:
47 string += templates.filtered_devices(filtered_devices)
48 string += templates.total_osds.format(
49 total_osds=self.total_osds
50 )
51 string += templates.osd_component_titles
52
53 for osd in self.computed['osds']:
54 string += templates.osd_header
55 if 'osd_id' in osd:
56 string += templates.osd_reused_id.format(
57 id_=osd['osd_id'])
58 string += templates.osd_component.format(
59 _type='[data]',
60 path=osd['data']['path'],
61 size=osd['data']['human_readable_size'],
62 percent=osd['data']['percentage'],
63 )
64 string += templates.osd_component.format(
65 _type='[journal]',
66 path=osd['journal']['path'],
67 size=osd['journal']['human_readable_size'],
68 percent=osd['journal']['percentage'],
69 )
70
71 print(string)
72
73 def validate(self):
74 """
75 Ensure that the minimum requirements for this type of scenario is
76 met, raise an error if the provided devices would not work
77 """
78 # validate minimum size for all devices
79 validators.minimum_device_size(self.data_devs, osds_per_device=self.osds_per_device)
80
81 # validate collocation
82 validators.minimum_device_collocated_size(
83 self.data_devs, self.journal_size, osds_per_device=self.osds_per_device
84 )
85
86 # make sure that data devices do not have any LVs
87 validators.no_lvm_membership(self.data_devs)
88
89 if self.osd_ids:
90 self._validate_osd_ids()
91
92 def compute(self):
93 """
94 Go through the rules needed to properly size the lvs, return
95 a dictionary with the result
96 """
97 # chose whichever is the one group we have to compute against
98 osds = self.computed['osds']
99 for device in self.data_devs:
100 for osd in range(self.osds_per_device):
101 device_size = disk.Size(b=device.lvm_size.b)
102 osd_size = device_size / self.osds_per_device
103 journal_size = self.journal_size
104 data_size = osd_size - journal_size
105 data_percentage = data_size * 100 / device_size
106 osd = {'data': {}, 'journal': {}}
107 osd['data']['path'] = device.abspath
108 osd['data']['size'] = data_size.b.as_int()
109 osd['data']['parts'] = self.osds_per_device
110 osd['data']['percentage'] = int(data_percentage)
111 osd['data']['human_readable_size'] = str(data_size)
112 osd['journal']['path'] = device.abspath
113 osd['journal']['size'] = journal_size.b.as_int()
114 osd['journal']['percentage'] = int(100 - data_percentage)
115 osd['journal']['human_readable_size'] = str(journal_size)
116
117 if self.osd_ids:
118 osd['osd_id'] = self.osd_ids.pop()
119
120 osds.append(osd)
121
122 self.computed['changed'] = len(osds) > 0
123
124 def execute(self):
125 """
126 Create vgs/lvs from the incoming set of devices, assign their roles
127 (data, journal) and offload the OSD creation to ``lvm create``
128 """
129 device_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']])
130
131 # create 1 vg per data device first, mapping them to the device path,
132 # when the lvs get created later, it can create as many as needed,
133 # including the journals since it is going to be collocated
134 for osd in self.computed['osds']:
135 vg = device_vgs.get(osd['data']['path'])
136 if not vg:
137 vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-filestore')
138 device_vgs[osd['data']['path']] = vg
139
140 # create the lvs from the per-device vg created in the beginning
141 for osd in self.computed['osds']:
142 data_path = osd['data']['path']
143 data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int()
144 device_vg = device_vgs[data_path]
145 data_lv_extents = device_vg.sizing(size=data_lv_size)['extents']
146 journal_lv_extents = device_vg.sizing(size=self.journal_size.gb.as_int())['extents']
147 data_uuid = system.generate_uuid()
148 data_lv = lvm.create_lv(
149 'osd-data', data_uuid, vg=device_vg, extents=data_lv_extents)
150 journal_uuid = system.generate_uuid()
151 journal_lv = lvm.create_lv(
152 'osd-journal', journal_uuid, vg=device_vg, extents=journal_lv_extents)
153
154 command = ['--filestore', '--data']
155 command.append('%s/%s' % (device_vg.name, data_lv.name))
156 command.extend(['--journal', '%s/%s' % (device_vg.name, journal_lv.name)])
157 if self.args.dmcrypt:
158 command.append('--dmcrypt')
159 if self.args.no_systemd:
160 command.append('--no-systemd')
161 if self.args.crush_device_class:
162 command.extend(['--crush-device-class', self.args.crush_device_class])
163 if 'osd_id' in osd:
164 command.extend(['--osd-id', osd['osd_id']])
165
166 if self.args.prepare:
167 Prepare(command).main()
168 else:
169 Create(command).main()
170
171
172 class MixedType(MixedStrategy):
173 """
174 Supports HDDs with SSDs, journals will be placed on SSDs, while HDDs will
175 be used fully for data.
176
177 If an existing common VG is detected on SSDs, it will be extended if blank
178 SSDs are used, otherwise it will be used directly.
179 """
180
181
182 def __init__(self, args, data_devs, journal_devs):
183 super(MixedType, self).__init__(args, data_devs, journal_devs)
184 self.blank_journal_devs = []
185 self.journals_needed = len(self.data_devs) * self.osds_per_device
186 self.journal_size = get_journal_size(args)
187 self.system_vgs = lvm.VolumeGroups()
188 self.validate_compute()
189
190 @classmethod
191 def with_auto_devices(cls, args, devices):
192 data_devs, journal_devs = cls.split_devices_rotational(devices)
193 return cls(args, data_devs, journal_devs)
194
195 @staticmethod
196 def type():
197 return "filestore.MixedType"
198
199 def report_pretty(self, filtered_devices):
200 string = ""
201 if filtered_devices:
202 string += templates.filtered_devices(filtered_devices)
203 string += templates.total_osds.format(
204 total_osds=self.total_osds
205 )
206
207 string += templates.ssd_volume_group.format(
208 target='journal',
209 total_lv_size=str(self.total_available_journal_space),
210 total_lvs=self.journals_needed,
211 block_db_devices=', '.join([d.path for d in self.db_or_journal_devs]),
212 lv_size=str(self.journal_size),
213 total_osds=self.journals_needed
214 )
215
216 string += templates.osd_component_titles
217
218 for osd in self.computed['osds']:
219 string += templates.osd_header
220 if 'osd_id' in osd:
221 string += templates.osd_reused_id.format(
222 id_=osd['osd_id'])
223 string += templates.osd_component.format(
224 _type='[data]',
225 path=osd['data']['path'],
226 size=osd['data']['human_readable_size'],
227 percent=osd['data']['percentage'],
228 )
229 string += templates.osd_component.format(
230 _type='[journal]',
231 path=osd['journal']['path'],
232 size=osd['journal']['human_readable_size'],
233 percent=osd['journal']['percentage'],
234 )
235
236 print(string)
237
238 def validate(self):
239 """
240 Ensure that the minimum requirements for this type of scenario is
241 met, raise an error if the provided devices would not work
242 """
243 # validate minimum size for all devices
244 validators.minimum_device_size(self.devices, osds_per_device=self.osds_per_device)
245
246 # make sure that data devices do not have any LVs
247 validators.no_lvm_membership(self.data_devs)
248
249 # do not allow non-common VG to continue
250 validators.has_common_vg(self.db_or_journal_devs)
251
252 # find the common VG to calculate how much is available
253 self.common_vg = self.get_common_vg(self.db_or_journal_devs)
254
255 # find how many journals are possible from the common VG
256 if self.common_vg:
257 common_vg_size = disk.Size(b=self.common_vg.free)
258 else:
259 common_vg_size = disk.Size(gb=0)
260
261 # non-VG SSDs
262 vg_ssds = set([d for d in self.db_or_journal_devs if d.is_lvm_member])
263 self.blank_journal_devs = set(self.db_or_journal_devs).difference(vg_ssds)
264 self.total_blank_journal_dev_size = disk.Size(b=0)
265 for blank_journal_dev in self.blank_journal_devs:
266 self.total_blank_journal_dev_size += disk.Size(b=blank_journal_dev.lvm_size.b)
267
268 self.total_available_journal_space = self.total_blank_journal_dev_size + common_vg_size
269
270 try:
271 self.vg_extents = lvm.sizing(
272 self.total_available_journal_space.b, size=self.journal_size.b * self.osds_per_device
273 )
274 except SizeAllocationError:
275 msg = "Not enough space in fast devices (%s) to create %s x %s journal LV"
276 raise RuntimeError(
277 msg % (self.total_available_journal_space, self.osds_per_device, self.journal_size)
278 )
279
280 # validate that number of journals possible are enough for number of
281 # OSDs proposed
282 total_journals_possible = self.total_available_journal_space / self.journal_size
283 if self.osds_per_device > total_journals_possible:
284 msg = "Not enough space (%s) to create %s x %s journal LVs" % (
285 self.total_available_journal_space, self.journals_needed, self.journal_size
286 )
287 raise RuntimeError(msg)
288
289 if self.osd_ids:
290 self._validate_osd_ids()
291
292 def compute(self):
293 """
294 Go through the rules needed to properly size the lvs, return
295 a dictionary with the result
296 """
297 osds = self.computed['osds']
298
299 vg_free = int(self.total_available_journal_space.gb)
300 if not self.common_vg:
301 # there isn't a common vg, so a new one must be created with all
302 # the blank SSDs
303 self.computed['vg'] = {
304 'devices': ", ".join([ssd.abspath for ssd in self.blank_journal_devs]),
305 'parts': self.journals_needed,
306 'percentages': self.vg_extents['percentages'],
307 'sizes': self.journal_size.b.as_int(),
308 'size': self.total_blank_journal_dev_size.b.as_int(),
309 'human_readable_sizes': str(self.journal_size),
310 'human_readable_size': str(self.total_available_journal_space),
311 }
312 vg_name = 'lv/vg'
313 else:
314 vg_name = self.common_vg.name
315
316 for device in self.data_devs:
317 for osd in range(self.osds_per_device):
318 device_size = disk.Size(b=device.lvm_size.b)
319 data_size = device_size / self.osds_per_device
320 osd = {'data': {}, 'journal': {}}
321 osd['data']['path'] = device.path
322 osd['data']['size'] = data_size.b.as_int()
323 osd['data']['percentage'] = 100 / self.osds_per_device
324 osd['data']['human_readable_size'] = str(data_size)
325 osd['journal']['path'] = 'vg: %s' % vg_name
326 osd['journal']['size'] = self.journal_size.b.as_int()
327 osd['journal']['percentage'] = int(self.journal_size.gb * 100 / vg_free)
328 osd['journal']['human_readable_size'] = str(self.journal_size)
329
330 if self.osd_ids:
331 osd['osd_id'] = self.osd_ids.pop(0)
332
333 osds.append(osd)
334
335 self.computed['changed'] = len(osds) > 0
336
337 def execute(self):
338 """
339 Create vgs/lvs from the incoming set of devices, assign their roles
340 (data, journal) and offload the OSD creation to ``lvm create``
341 """
342 blank_journal_dev_paths = [d.abspath for d in self.blank_journal_devs]
343 data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']])
344
345 # no common vg is found, create one with all the blank SSDs
346 if not self.common_vg:
347 journal_vg = lvm.create_vg(blank_journal_dev_paths, name_prefix='ceph-journals')
348 # a vg exists that can be extended
349 elif self.common_vg and blank_journal_dev_paths:
350 journal_vg = lvm.extend_vg(self.common_vg, blank_journal_dev_paths)
351 # one common vg with nothing else to extend can be used directly
352 else:
353 journal_vg = self.common_vg
354
355 journal_size = prepare.get_journal_size(lv_format=False)
356
357 # create 1 vg per data device first, mapping them to the device path,
358 # when the lv gets created later, it can create as many as needed (or
359 # even just 1)
360 for osd in self.computed['osds']:
361 vg = data_vgs.get(osd['data']['path'])
362 if not vg:
363 vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-data')
364 data_vgs[osd['data']['path']] = vg
365
366 for osd in self.computed['osds']:
367 data_path = osd['data']['path']
368 data_vg = data_vgs[data_path]
369 data_lv_extents = data_vg.sizing(parts=1)['extents']
370 data_uuid = system.generate_uuid()
371 data_lv = lvm.create_lv(
372 'osd-data', data_uuid, vg=data_vg, extents=data_lv_extents)
373 journal_uuid = system.generate_uuid()
374 journal_lv = lvm.create_lv(
375 'osd-journal', journal_uuid, vg=journal_vg, size=journal_size)
376
377 command = ['--filestore', '--data']
378 command.append('%s/%s' % (data_vg.name, data_lv.name))
379 command.extend(['--journal', '%s/%s' % (journal_vg.name, journal_lv.name)])
380 if self.args.dmcrypt:
381 command.append('--dmcrypt')
382 if self.args.no_systemd:
383 command.append('--no-systemd')
384 if self.args.crush_device_class:
385 command.extend(['--crush-device-class', self.args.crush_device_class])
386 if 'osd_id' in osd:
387 command.extend(['--osd-id', osd['osd_id']])
388
389 if self.args.prepare:
390 Prepare(command).main()
391 else:
392 Create(command).main()