1 from __future__
import print_function
3 from ceph_volume
.util
import disk
, prepare
4 from ceph_volume
.api
import lvm
5 from . import validators
6 from ceph_volume
.devices
.lvm
.create
import Create
7 from ceph_volume
.devices
.lvm
.prepare
import Prepare
8 from ceph_volume
.util
import templates
9 from ceph_volume
.exceptions
import SizeAllocationError
12 def get_journal_size(args
):
14 Helper for Filestore strategies, to prefer the --journal-size value from
15 the CLI over anything that might be in a ceph configuration file (if any).
18 return disk
.Size(mb
=args
.journal_size
)
20 return prepare
.get_journal_size(lv_format
=False)
23 class SingleType(object):
25 Support for all SSDs, or all HDDs, data and journal LVs will be colocated
29 def __init__(self
, devices
, args
):
31 self
.osds_per_device
= args
.osds_per_device
32 self
.devices
= devices
33 self
.hdds
= [device
for device
in devices
if device
.sys_api
['rotational'] == '1']
34 self
.ssds
= [device
for device
in devices
if device
.sys_api
['rotational'] == '0']
35 self
.computed
= {'osds': [], 'vgs': [], 'filtered_devices': args
.filtered_devices
}
36 self
.journal_size
= get_journal_size(args
)
41 self
.computed
["changed"] = False
45 return "filestore.SingleType"
50 return len(self
.hdds
) * self
.osds_per_device
52 return len(self
.ssds
) * self
.osds_per_device
54 def report_json(self
):
55 print(json
.dumps(self
.computed
, indent
=4, sort_keys
=True))
57 def report_pretty(self
):
59 if self
.args
.filtered_devices
:
60 string
+= templates
.filtered_devices(self
.args
.filtered_devices
)
61 string
+= templates
.total_osds
.format(
62 total_osds
=self
.total_osds
64 string
+= templates
.osd_component_titles
66 for osd
in self
.computed
['osds']:
67 string
+= templates
.osd_header
68 string
+= templates
.osd_component
.format(
70 path
=osd
['data']['path'],
71 size
=osd
['data']['human_readable_size'],
72 percent
=osd
['data']['percentage'],
74 string
+= templates
.osd_component
.format(
76 path
=osd
['journal']['path'],
77 size
=osd
['journal']['human_readable_size'],
78 percent
=osd
['journal']['percentage'],
85 Ensure that the minimum requirements for this type of scenario is
86 met, raise an error if the provided devices would not work
88 # validate minimum size for all devices
89 validators
.minimum_device_size(self
.devices
, osds_per_device
=self
.osds_per_device
)
91 # validate collocation
93 validators
.minimum_device_collocated_size(
94 self
.hdds
, self
.journal_size
, osds_per_device
=self
.osds_per_device
97 validators
.minimum_device_collocated_size(
98 self
.ssds
, self
.journal_size
, osds_per_device
=self
.osds_per_device
101 # make sure that data devices do not have any LVs
102 validators
.no_lvm_membership(self
.hdds
)
106 Go through the rules needed to properly size the lvs, return
107 a dictionary with the result
109 # chose whichever is the one group we have to compute against
110 devices
= self
.hdds
or self
.ssds
111 osds
= self
.computed
['osds']
112 for device
in devices
:
113 for osd
in range(self
.osds_per_device
):
114 device_size
= disk
.Size(b
=device
.sys_api
['size'])
115 osd_size
= device_size
/ self
.osds_per_device
116 journal_size
= self
.journal_size
117 data_size
= osd_size
- journal_size
118 data_percentage
= data_size
* 100 / device_size
119 osd
= {'data': {}, 'journal': {}}
120 osd
['data']['path'] = device
.abspath
121 osd
['data']['size'] = data_size
.b
.as_int()
122 osd
['data']['parts'] = self
.osds_per_device
123 osd
['data']['percentage'] = int(data_percentage
)
124 osd
['data']['human_readable_size'] = str(data_size
)
125 osd
['journal']['path'] = device
.abspath
126 osd
['journal']['size'] = journal_size
.b
.as_int()
127 osd
['journal']['percentage'] = int(100 - data_percentage
)
128 osd
['journal']['human_readable_size'] = str(journal_size
)
131 self
.computed
['changed'] = len(osds
) > 0
135 Create vgs/lvs from the incoming set of devices, assign their roles
136 (data, journal) and offload the OSD creation to ``lvm create``
138 device_vgs
= dict([(osd
['data']['path'], None) for osd
in self
.computed
['osds']])
140 # create 1 vg per data device first, mapping them to the device path,
141 # when the lvs get created later, it can create as many as needed,
142 # including the journals since it is going to be collocated
143 for osd
in self
.computed
['osds']:
144 vg
= device_vgs
.get(osd
['data']['path'])
146 vg
= lvm
.create_vg(osd
['data']['path'], name_prefix
='ceph-filestore')
147 device_vgs
[osd
['data']['path']] = vg
149 # create the lvs from the per-device vg created in the beginning
150 for osd
in self
.computed
['osds']:
151 data_path
= osd
['data']['path']
152 data_lv_size
= disk
.Size(b
=osd
['data']['size']).gb
.as_int()
153 device_vg
= device_vgs
[data_path
]
154 data_lv_extents
= device_vg
.sizing(size
=data_lv_size
)['extents']
155 journal_lv_extents
= device_vg
.sizing(size
=self
.journal_size
.gb
.as_int())['extents']
156 data_lv
= lvm
.create_lv(
157 'osd-data', device_vg
.name
, extents
=data_lv_extents
, uuid_name
=True
159 journal_lv
= lvm
.create_lv(
160 'osd-journal', device_vg
.name
, extents
=journal_lv_extents
, uuid_name
=True
163 command
= ['--filestore', '--data']
164 command
.append('%s/%s' % (device_vg
.name
, data_lv
.name
))
165 command
.extend(['--journal', '%s/%s' % (device_vg
.name
, journal_lv
.name
)])
166 if self
.args
.dmcrypt
:
167 command
.append('--dmcrypt')
168 if self
.args
.no_systemd
:
169 command
.append('--no-systemd')
170 if self
.args
.crush_device_class
:
171 command
.extend(['--crush-device-class', self
.args
.crush_device_class
])
173 if self
.args
.prepare
:
174 Prepare(command
).main()
176 Create(command
).main()
179 class MixedType(object):
181 Supports HDDs with SSDs, journals will be placed on SSDs, while HDDs will
182 be used fully for data.
184 If an existing common VG is detected on SSDs, it will be extended if blank
185 SSDs are used, otherwise it will be used directly.
188 def __init__(self
, devices
, args
):
190 self
.osds_per_device
= args
.osds_per_device
191 self
.devices
= devices
192 self
.hdds
= [device
for device
in devices
if device
.sys_api
['rotational'] == '1']
193 self
.ssds
= [device
for device
in devices
if device
.sys_api
['rotational'] == '0']
194 self
.computed
= {'osds': [], 'vg': None, 'filtered_devices': args
.filtered_devices
}
196 self
.journals_needed
= len(self
.hdds
) * self
.osds_per_device
197 self
.journal_size
= get_journal_size(args
)
198 self
.system_vgs
= lvm
.VolumeGroups()
203 self
.computed
["changed"] = False
207 return "filestore.MixedType"
209 def report_json(self
):
210 print(json
.dumps(self
.computed
, indent
=4, sort_keys
=True))
213 def total_osds(self
):
215 return len(self
.hdds
) * self
.osds_per_device
217 return len(self
.ssds
) * self
.osds_per_device
219 def report_pretty(self
):
221 if self
.args
.filtered_devices
:
222 string
+= templates
.filtered_devices(self
.args
.filtered_devices
)
223 string
+= templates
.total_osds
.format(
224 total_osds
=self
.total_osds
227 string
+= templates
.ssd_volume_group
.format(
229 total_lv_size
=str(self
.total_available_journal_space
),
230 total_lvs
=self
.journals_needed
,
231 block_db_devices
=', '.join([d
.path
for d
in self
.ssds
]),
232 lv_size
=str(self
.journal_size
),
233 total_osds
=self
.journals_needed
236 string
+= templates
.osd_component_titles
238 for osd
in self
.computed
['osds']:
239 string
+= templates
.osd_header
240 string
+= templates
.osd_component
.format(
242 path
=osd
['data']['path'],
243 size
=osd
['data']['human_readable_size'],
244 percent
=osd
['data']['percentage'],
246 string
+= templates
.osd_component
.format(
248 path
=osd
['journal']['path'],
249 size
=osd
['journal']['human_readable_size'],
250 percent
=osd
['journal']['percentage'],
255 def get_common_vg(self
):
256 # find all the vgs associated with the current device
257 for ssd
in self
.ssds
:
258 for pv
in ssd
.pvs_api
:
259 vg
= self
.system_vgs
.get(vg_name
=pv
.vg_name
)
262 # this should give us just one VG, it would've been caught by
263 # the validator otherwise
268 Ensure that the minimum requirements for this type of scenario is
269 met, raise an error if the provided devices would not work
271 # validate minimum size for all devices
272 validators
.minimum_device_size(self
.devices
, osds_per_device
=self
.osds_per_device
)
274 # make sure that data devices do not have any LVs
275 validators
.no_lvm_membership(self
.hdds
)
277 # do not allow non-common VG to continue
278 validators
.has_common_vg(self
.ssds
)
280 # find the common VG to calculate how much is available
281 self
.common_vg
= self
.get_common_vg()
283 # find how many journals are possible from the common VG
285 common_vg_size
= disk
.Size(gb
=self
.common_vg
.free
)
287 common_vg_size
= disk
.Size(gb
=0)
290 self
.vg_ssds
= set([d
for d
in self
.ssds
if d
.is_lvm_member
])
291 self
.blank_ssds
= set(self
.ssds
).difference(self
.vg_ssds
)
292 self
.total_blank_ssd_size
= disk
.Size(b
=0)
293 for blank_ssd
in self
.blank_ssds
:
294 self
.total_blank_ssd_size
+= disk
.Size(b
=blank_ssd
.sys_api
['size'])
296 self
.total_available_journal_space
= self
.total_blank_ssd_size
+ common_vg_size
299 self
.vg_extents
= lvm
.sizing(
300 self
.total_available_journal_space
.b
, size
=self
.journal_size
.b
* self
.osds_per_device
302 except SizeAllocationError
:
303 msg
= "Not enough space in fast devices (%s) to create %s x %s journal LV"
305 msg
% (self
.total_available_journal_space
, self
.osds_per_device
, self
.journal_size
)
308 # validate that number of journals possible are enough for number of
310 total_journals_possible
= self
.total_available_journal_space
/ self
.journal_size
311 if self
.osds_per_device
> total_journals_possible
:
312 msg
= "Not enough space (%s) to create %s x %s journal LVs" % (
313 self
.total_available_journal_space
, self
.journals_needed
, self
.journal_size
315 raise RuntimeError(msg
)
319 Go through the rules needed to properly size the lvs, return
320 a dictionary with the result
322 osds
= self
.computed
['osds']
324 vg_free
= int(self
.total_available_journal_space
.gb
)
325 if not self
.common_vg
:
326 # there isn't a common vg, so a new one must be created with all
328 self
.computed
['vg'] = {
329 'devices': ", ".join([ssd
.abspath
for ssd
in self
.blank_ssds
]),
330 'parts': self
.journals_needed
,
331 'percentages': self
.vg_extents
['percentages'],
332 'sizes': self
.journal_size
.b
.as_int(),
333 'size': self
.total_blank_ssd_size
.b
.as_int(),
334 'human_readable_sizes': str(self
.journal_size
),
335 'human_readable_size': str(self
.total_available_journal_space
),
339 vg_name
= self
.common_vg
.name
341 for device
in self
.hdds
:
342 for osd
in range(self
.osds_per_device
):
343 device_size
= disk
.Size(b
=device
.sys_api
['size'])
344 data_size
= device_size
/ self
.osds_per_device
345 osd
= {'data': {}, 'journal': {}}
346 osd
['data']['path'] = device
.path
347 osd
['data']['size'] = data_size
.b
.as_int()
348 osd
['data']['percentage'] = 100 / self
.osds_per_device
349 osd
['data']['human_readable_size'] = str(data_size
)
350 osd
['journal']['path'] = 'vg: %s' % vg_name
351 osd
['journal']['size'] = self
.journal_size
.b
.as_int()
352 osd
['journal']['percentage'] = int(self
.journal_size
.gb
* 100 / vg_free
)
353 osd
['journal']['human_readable_size'] = str(self
.journal_size
)
356 self
.computed
['changed'] = len(osds
) > 0
360 Create vgs/lvs from the incoming set of devices, assign their roles
361 (data, journal) and offload the OSD creation to ``lvm create``
363 blank_ssd_paths
= [d
.abspath
for d
in self
.blank_ssds
]
364 data_vgs
= dict([(osd
['data']['path'], None) for osd
in self
.computed
['osds']])
366 # no common vg is found, create one with all the blank SSDs
367 if not self
.common_vg
:
368 journal_vg
= lvm
.create_vg(blank_ssd_paths
, name_prefix
='ceph-journals')
369 # a vg exists that can be extended
370 elif self
.common_vg
and blank_ssd_paths
:
371 journal_vg
= lvm
.extend_vg(self
.common_vg
, blank_ssd_paths
)
372 # one common vg with nothing else to extend can be used directly
374 journal_vg
= self
.common_vg
376 journal_size
= prepare
.get_journal_size(lv_format
=True)
378 # create 1 vg per data device first, mapping them to the device path,
379 # when the lv gets created later, it can create as many as needed (or
381 for osd
in self
.computed
['osds']:
382 vg
= data_vgs
.get(osd
['data']['path'])
384 vg
= lvm
.create_vg(osd
['data']['path'], name_prefix
='ceph-data')
385 data_vgs
[osd
['data']['path']] = vg
387 for osd
in self
.computed
['osds']:
388 data_path
= osd
['data']['path']
389 data_lv_size
= disk
.Size(b
=osd
['data']['size']).gb
.as_int()
390 data_vg
= data_vgs
[data_path
]
391 data_lv_extents
= data_vg
.sizing(size
=data_lv_size
)['extents']
392 data_lv
= lvm
.create_lv(
393 'osd-data', data_vg
.name
, extents
=data_lv_extents
, uuid_name
=True
395 journal_lv
= lvm
.create_lv(
396 'osd-journal', journal_vg
.name
, size
=journal_size
, uuid_name
=True
399 command
= ['--filestore', '--data']
400 command
.append('%s/%s' % (data_vg
.name
, data_lv
.name
))
401 command
.extend(['--journal', '%s/%s' % (journal_vg
.name
, journal_lv
.name
)])
402 if self
.args
.dmcrypt
:
403 command
.append('--dmcrypt')
404 if self
.args
.no_systemd
:
405 command
.append('--no-systemd')
406 if self
.args
.crush_device_class
:
407 command
.extend(['--crush-device-class', self
.args
.crush_device_class
])
409 if self
.args
.prepare
:
410 Prepare(command
).main()
412 Create(command
).main()