1 from __future__
import print_function
5 from textwrap
import dedent
6 from ceph_volume
import process
, conf
, decorators
, terminal
, __release__
, configuration
7 from ceph_volume
.util
import system
, disk
8 from ceph_volume
.util
import prepare
as prepare_utils
9 from ceph_volume
.util
import encryption
as encryption_utils
10 from ceph_volume
.systemd
import systemctl
11 from ceph_volume
.api
import lvm
as api
12 from .listing
import direct_report
15 logger
= logging
.getLogger(__name__
)
18 def activate_filestore(lvs
, no_systemd
=False):
20 osd_lv
= lvs
.get(lv_tags
={'ceph.type': 'data'})
22 raise RuntimeError('Unable to find a data LV for filestore activation')
23 is_encrypted
= osd_lv
.tags
.get('ceph.encrypted', '0') == '1'
24 is_vdo
= osd_lv
.tags
.get('ceph.vdo', '0')
26 osd_id
= osd_lv
.tags
['ceph.osd_id']
27 configuration
.load_ceph_conf_path(osd_lv
.tags
['ceph.cluster_name'])
29 # it may have a volume with a journal
30 osd_journal_lv
= lvs
.get(lv_tags
={'ceph.type': 'journal'})
31 # TODO: add sensible error reporting if this is ever the case
32 # blow up with a KeyError if this doesn't exist
33 osd_fsid
= osd_lv
.tags
['ceph.osd_fsid']
34 if not osd_journal_lv
:
35 # must be a disk partition, by querying blkid by the uuid we are ensuring that the
36 # device path is always correct
37 journal_uuid
= osd_lv
.tags
['ceph.journal_uuid']
38 osd_journal
= disk
.get_device_from_partuuid(journal_uuid
)
40 journal_uuid
= osd_journal_lv
.lv_uuid
41 osd_journal
= osd_lv
.tags
['ceph.journal_device']
44 raise RuntimeError('unable to detect an lv or device journal for OSD %s' % osd_id
)
46 # this is done here, so that previous checks that ensure path availability
47 # and correctness can still be enforced, and report if any issues are found
49 lockbox_secret
= osd_lv
.tags
['ceph.cephx_lockbox_secret']
50 # this keyring writing is idempotent
51 encryption_utils
.write_lockbox_keyring(osd_id
, osd_fsid
, lockbox_secret
)
52 dmcrypt_secret
= encryption_utils
.get_dmcrypt_key(osd_id
, osd_fsid
)
53 encryption_utils
.luks_open(dmcrypt_secret
, osd_lv
.lv_path
, osd_lv
.lv_uuid
)
54 encryption_utils
.luks_open(dmcrypt_secret
, osd_journal
, journal_uuid
)
56 osd_journal
= '/dev/mapper/%s' % journal_uuid
57 source
= '/dev/mapper/%s' % osd_lv
.lv_uuid
59 source
= osd_lv
.lv_path
62 destination
= '/var/lib/ceph/osd/%s-%s' % (conf
.cluster
, osd_id
)
63 if not system
.device_is_mounted(source
, destination
=destination
):
64 prepare_utils
.mount_osd(source
, osd_id
, is_vdo
=is_vdo
)
66 # ensure that the OSD destination is always chowned properly
67 system
.chown(destination
)
69 # always re-do the symlink regardless if it exists, so that the journal
70 # device path that may have changed can be mapped correctly every time
71 destination
= '/var/lib/ceph/osd/%s-%s/journal' % (conf
.cluster
, osd_id
)
72 process
.run(['ln', '-snf', osd_journal
, destination
])
74 # make sure that the journal has proper permissions
75 system
.chown(osd_journal
)
77 if no_systemd
is False:
78 # enable the ceph-volume unit for this OSD
79 systemctl
.enable_volume(osd_id
, osd_fsid
, 'lvm')
82 systemctl
.enable_osd(osd_id
)
85 systemctl
.start_osd(osd_id
)
86 terminal
.success("ceph-volume lvm activate successful for osd ID: %s" % osd_id
)
89 def get_osd_device_path(osd_lv
, lvs
, device_type
, dmcrypt_secret
=None):
91 ``device_type`` can be one of ``db``, ``wal`` or ``block`` so that
92 we can query ``lvs`` (a ``Volumes`` object) and fallback to querying the uuid
93 if that is not present.
95 Return a path if possible, failing to do that a ``None``, since some of these devices
98 osd_lv
= lvs
.get(lv_tags
={'ceph.type': 'block'})
99 is_encrypted
= osd_lv
.tags
.get('ceph.encrypted', '0') == '1'
100 logger
.debug('Found block device (%s) with encryption: %s', osd_lv
.name
, is_encrypted
)
101 uuid_tag
= 'ceph.%s_uuid' % device_type
102 device_uuid
= osd_lv
.tags
.get(uuid_tag
)
106 device_lv
= lvs
.get(lv_tags
={'ceph.type': device_type
})
109 encryption_utils
.luks_open(dmcrypt_secret
, device_lv
.lv_path
, device_uuid
)
110 return '/dev/mapper/%s' % device_uuid
111 return device_lv
.lv_path
113 # this could be a regular device, so query it with blkid
114 physical_device
= disk
.get_device_from_partuuid(device_uuid
)
117 encryption_utils
.luks_open(dmcrypt_secret
, physical_device
, device_uuid
)
118 return '/dev/mapper/%s' % device_uuid
119 return physical_device
121 raise RuntimeError('could not find %s with uuid %s' % (device_type
, device_uuid
))
124 def activate_bluestore(lvs
, no_systemd
=False):
126 osd_lv
= lvs
.get(lv_tags
={'ceph.type': 'block'})
128 raise RuntimeError('could not find a bluestore OSD to activate')
129 is_encrypted
= osd_lv
.tags
.get('ceph.encrypted', '0') == '1'
130 dmcrypt_secret
= None
131 osd_id
= osd_lv
.tags
['ceph.osd_id']
132 conf
.cluster
= osd_lv
.tags
['ceph.cluster_name']
133 osd_fsid
= osd_lv
.tags
['ceph.osd_fsid']
135 # mount on tmpfs the osd directory
136 osd_path
= '/var/lib/ceph/osd/%s-%s' % (conf
.cluster
, osd_id
)
137 if not system
.path_is_mounted(osd_path
):
138 # mkdir -p and mount as tmpfs
139 prepare_utils
.create_osd_path(osd_id
, tmpfs
=True)
140 # XXX This needs to be removed once ceph-bluestore-tool can deal with
141 # symlinks that exist in the osd dir
142 for link_name
in ['block', 'block.db', 'block.wal']:
143 link_path
= os
.path
.join(osd_path
, link_name
)
144 if os
.path
.exists(link_path
):
145 os
.unlink(os
.path
.join(osd_path
, link_name
))
146 # encryption is handled here, before priming the OSD dir
148 osd_lv_path
= '/dev/mapper/%s' % osd_lv
.lv_uuid
149 lockbox_secret
= osd_lv
.tags
['ceph.cephx_lockbox_secret']
150 encryption_utils
.write_lockbox_keyring(osd_id
, osd_fsid
, lockbox_secret
)
151 dmcrypt_secret
= encryption_utils
.get_dmcrypt_key(osd_id
, osd_fsid
)
152 encryption_utils
.luks_open(dmcrypt_secret
, osd_lv
.lv_path
, osd_lv
.lv_uuid
)
154 osd_lv_path
= osd_lv
.lv_path
156 db_device_path
= get_osd_device_path(osd_lv
, lvs
, 'db', dmcrypt_secret
=dmcrypt_secret
)
157 wal_device_path
= get_osd_device_path(osd_lv
, lvs
, 'wal', dmcrypt_secret
=dmcrypt_secret
)
159 # Once symlinks are removed, the osd dir can be 'primed again. chown first,
160 # regardless of what currently exists so that ``prime-osd-dir`` can succeed
161 # even if permissions are somehow messed up
162 system
.chown(osd_path
)
164 'ceph-bluestore-tool', '--cluster=%s' % conf
.cluster
,
165 'prime-osd-dir', '--dev', osd_lv_path
,
168 if __release__
!= "luminous":
169 # mon-config changes are not available in Luminous
170 prime_command
.append('--no-mon-config')
172 process
.run(prime_command
)
173 # always re-do the symlink regardless if it exists, so that the block,
174 # block.wal, and block.db devices that may have changed can be mapped
175 # correctly every time
176 process
.run(['ln', '-snf', osd_lv_path
, os
.path
.join(osd_path
, 'block')])
177 system
.chown(os
.path
.join(osd_path
, 'block'))
178 system
.chown(osd_path
)
180 destination
= os
.path
.join(osd_path
, 'block.db')
181 process
.run(['ln', '-snf', db_device_path
, destination
])
182 system
.chown(db_device_path
)
183 system
.chown(destination
)
185 destination
= os
.path
.join(osd_path
, 'block.wal')
186 process
.run(['ln', '-snf', wal_device_path
, destination
])
187 system
.chown(wal_device_path
)
188 system
.chown(destination
)
190 if no_systemd
is False:
191 # enable the ceph-volume unit for this OSD
192 systemctl
.enable_volume(osd_id
, osd_fsid
, 'lvm')
195 systemctl
.enable_osd(osd_id
)
198 systemctl
.start_osd(osd_id
)
199 terminal
.success("ceph-volume lvm activate successful for osd ID: %s" % osd_id
)
202 class Activate(object):
204 help = 'Discover and mount the LVM device associated with an OSD ID and start the Ceph OSD'
206 def __init__(self
, argv
):
209 @decorators.needs_root
210 def activate_all(self
, args
):
211 listed_osds
= direct_report()
213 for osd_id
, devices
in listed_osds
.items():
214 # the metadata for all devices in each OSD will contain
215 # the FSID which is required for activation
216 for device
in devices
:
217 fsid
= device
.get('tags', {}).get('ceph.osd_fsid')
222 terminal
.warning('Was unable to find any OSDs to activate')
223 terminal
.warning('Verify OSDs are present with "ceph-volume lvm list"')
225 for osd_fsid
, osd_id
in osds
.items():
226 if systemctl
.osd_is_active(osd_id
):
228 'OSD ID %s FSID %s process is active. Skipping activation' % (osd_id
, osd_fsid
)
231 terminal
.info('Activating OSD ID %s FSID %s' % (osd_id
, osd_fsid
))
232 self
.activate(args
, osd_id
=osd_id
, osd_fsid
=osd_fsid
)
234 @decorators.needs_root
235 def activate(self
, args
, osd_id
=None, osd_fsid
=None):
237 :param args: The parsed arguments coming from the CLI
238 :param osd_id: When activating all, this gets populated with an existing OSD ID
239 :param osd_fsid: When activating all, this gets populated with an existing OSD FSID
241 osd_id
= osd_id
if osd_id
is not None else args
.osd_id
242 osd_fsid
= osd_fsid
if osd_fsid
is not None else args
.osd_fsid
245 # filter them down for the OSD ID and FSID we need to activate
246 if osd_id
and osd_fsid
:
247 lvs
.filter(lv_tags
={'ceph.osd_id': osd_id
, 'ceph.osd_fsid': osd_fsid
})
248 elif osd_fsid
and not osd_id
:
249 lvs
.filter(lv_tags
={'ceph.osd_fsid': osd_fsid
})
251 raise RuntimeError('could not find osd.%s with osd_fsid %s' % (osd_id
, osd_fsid
))
252 # This argument is only available when passed in directly or via
253 # systemd, not when ``create`` is being used
254 if getattr(args
, 'auto_detect_objectstore', False):
255 logger
.info('auto detecting objectstore')
256 # may get multiple lvs, so can't do lvs.get() calls here
258 has_journal
= lv
.tags
.get('ceph.journal_uuid')
260 logger
.info('found a journal associated with the OSD, assuming filestore')
261 return activate_filestore(lvs
, no_systemd
=args
.no_systemd
)
262 logger
.info('unable to find a journal associated with the OSD, assuming bluestore')
263 return activate_bluestore(lvs
, no_systemd
=args
.no_systemd
)
265 activate_bluestore(lvs
, no_systemd
=args
.no_systemd
)
267 activate_filestore(lvs
, no_systemd
=args
.no_systemd
)
270 sub_command_help
= dedent("""
271 Activate OSDs by discovering them with LVM and mounting them in their
272 appropriate destination:
274 ceph-volume lvm activate {ID} {FSID}
276 The lvs associated with the OSD need to have been prepared previously,
277 so that all needed tags and metadata exist.
279 When migrating OSDs, or a multiple-osd activation is needed, the
280 ``--all`` flag can be used instead of the individual ID and FSID:
282 ceph-volume lvm activate --all
285 parser
= argparse
.ArgumentParser(
286 prog
='ceph-volume lvm activate',
287 formatter_class
=argparse
.RawDescriptionHelpFormatter
,
288 description
=sub_command_help
,
295 help='The ID of the OSD, usually an integer, like 0'
301 help='The FSID of the OSD, similar to a SHA1'
304 '--auto-detect-objectstore',
306 help='Autodetect the objectstore by inspecting the OSD',
311 help='bluestore objectstore (default)',
316 help='filestore objectstore',
322 help='Activate all OSDs found in the system',
328 help='Skip creating and enabling systemd units and starting OSD services',
330 if len(self
.argv
) == 0:
331 print(sub_command_help
)
333 args
= parser
.parse_args(self
.argv
)
334 # Default to bluestore here since defaulting it in add_argument may
335 # cause both to be True
336 if not args
.bluestore
and not args
.filestore
:
337 args
.bluestore
= True
338 if args
.activate_all
:
339 self
.activate_all(args
)