1 from __future__
import print_function
5 from textwrap
import dedent
6 from ceph_volume
import process
, conf
, decorators
, terminal
7 from ceph_volume
.util
import system
, disk
8 from ceph_volume
.util
import prepare
as prepare_utils
9 from ceph_volume
.util
import encryption
as encryption_utils
10 from ceph_volume
.systemd
import systemctl
11 from ceph_volume
.api
import lvm
as api
12 from .listing
import direct_report
15 logger
= logging
.getLogger(__name__
)
18 def activate_filestore(lvs
, no_systemd
=False):
20 osd_lv
= lvs
.get(lv_tags
={'ceph.type': 'data'})
22 raise RuntimeError('Unable to find a data LV for filestore activation')
23 is_encrypted
= osd_lv
.tags
.get('ceph.encrypted', '0') == '1'
24 is_vdo
= osd_lv
.tags
.get('ceph.vdo', '0')
26 osd_id
= osd_lv
.tags
['ceph.osd_id']
27 conf
.cluster
= osd_lv
.tags
['ceph.cluster_name']
28 # it may have a volume with a journal
29 osd_journal_lv
= lvs
.get(lv_tags
={'ceph.type': 'journal'})
30 # TODO: add sensible error reporting if this is ever the case
31 # blow up with a KeyError if this doesn't exist
32 osd_fsid
= osd_lv
.tags
['ceph.osd_fsid']
33 if not osd_journal_lv
:
34 # must be a disk partition, by quering blkid by the uuid we are ensuring that the
35 # device path is always correct
36 journal_uuid
= osd_lv
.tags
['ceph.journal_uuid']
37 osd_journal
= disk
.get_device_from_partuuid(journal_uuid
)
39 journal_uuid
= osd_journal_lv
.lv_uuid
40 osd_journal
= osd_lv
.tags
['ceph.journal_device']
43 raise RuntimeError('unable to detect an lv or device journal for OSD %s' % osd_id
)
45 # this is done here, so that previous checks that ensure path availability
46 # and correctness can still be enforced, and report if any issues are found
48 lockbox_secret
= osd_lv
.tags
['ceph.cephx_lockbox_secret']
49 # this keyring writing is idempotent
50 encryption_utils
.write_lockbox_keyring(osd_id
, osd_fsid
, lockbox_secret
)
51 dmcrypt_secret
= encryption_utils
.get_dmcrypt_key(osd_id
, osd_fsid
)
52 encryption_utils
.luks_open(dmcrypt_secret
, osd_lv
.lv_path
, osd_lv
.lv_uuid
)
53 encryption_utils
.luks_open(dmcrypt_secret
, osd_journal
, journal_uuid
)
55 osd_journal
= '/dev/mapper/%s' % journal_uuid
56 source
= '/dev/mapper/%s' % osd_lv
.lv_uuid
58 source
= osd_lv
.lv_path
61 destination
= '/var/lib/ceph/osd/%s-%s' % (conf
.cluster
, osd_id
)
62 if not system
.device_is_mounted(source
, destination
=destination
):
63 prepare_utils
.mount_osd(source
, osd_id
, is_vdo
=is_vdo
)
65 # always re-do the symlink regardless if it exists, so that the journal
66 # device path that may have changed can be mapped correctly every time
67 destination
= '/var/lib/ceph/osd/%s-%s/journal' % (conf
.cluster
, osd_id
)
68 process
.run(['ln', '-snf', osd_journal
, destination
])
70 # make sure that the journal has proper permissions
71 system
.chown(osd_journal
)
73 if no_systemd
is False:
74 # enable the ceph-volume unit for this OSD
75 systemctl
.enable_volume(osd_id
, osd_fsid
, 'lvm')
78 systemctl
.start_osd(osd_id
)
79 terminal
.success("ceph-volume lvm activate successful for osd ID: %s" % osd_id
)
82 def get_osd_device_path(osd_lv
, lvs
, device_type
, dmcrypt_secret
=None):
84 ``device_type`` can be one of ``db``, ``wal`` or ``block`` so that
85 we can query ``lvs`` (a ``Volumes`` object) and fallback to querying the uuid
86 if that is not present.
88 Return a path if possible, failing to do that a ``None``, since some of these devices
91 osd_lv
= lvs
.get(lv_tags
={'ceph.type': 'block'})
92 is_encrypted
= osd_lv
.tags
.get('ceph.encrypted', '0') == '1'
93 logger
.debug('Found block device (%s) with encryption: %s', osd_lv
.name
, is_encrypted
)
94 uuid_tag
= 'ceph.%s_uuid' % device_type
95 device_uuid
= osd_lv
.tags
.get(uuid_tag
)
99 device_lv
= lvs
.get(lv_uuid
=device_uuid
)
102 encryption_utils
.luks_open(dmcrypt_secret
, device_lv
.lv_path
, device_uuid
)
103 return '/dev/mapper/%s' % device_uuid
104 return device_lv
.lv_path
106 # this could be a regular device, so query it with blkid
107 physical_device
= disk
.get_device_from_partuuid(device_uuid
)
108 if physical_device
and is_encrypted
:
109 encryption_utils
.luks_open(dmcrypt_secret
, physical_device
, device_uuid
)
110 return '/dev/mapper/%s' % device_uuid
111 return physical_device
or None
115 def activate_bluestore(lvs
, no_systemd
=False):
117 osd_lv
= lvs
.get(lv_tags
={'ceph.type': 'block'})
119 raise RuntimeError('could not find a bluestore OSD to activate')
120 is_encrypted
= osd_lv
.tags
.get('ceph.encrypted', '0') == '1'
121 dmcrypt_secret
= None
122 osd_id
= osd_lv
.tags
['ceph.osd_id']
123 conf
.cluster
= osd_lv
.tags
['ceph.cluster_name']
124 osd_fsid
= osd_lv
.tags
['ceph.osd_fsid']
126 # mount on tmpfs the osd directory
127 osd_path
= '/var/lib/ceph/osd/%s-%s' % (conf
.cluster
, osd_id
)
128 if not system
.path_is_mounted(osd_path
):
129 # mkdir -p and mount as tmpfs
130 prepare_utils
.create_osd_path(osd_id
, tmpfs
=True)
131 # XXX This needs to be removed once ceph-bluestore-tool can deal with
132 # symlinks that exist in the osd dir
133 for link_name
in ['block', 'block.db', 'block.wal']:
134 link_path
= os
.path
.join(osd_path
, link_name
)
135 if os
.path
.exists(link_path
):
136 os
.unlink(os
.path
.join(osd_path
, link_name
))
137 # encryption is handled here, before priming the OSD dir
139 osd_lv_path
= '/dev/mapper/%s' % osd_lv
.lv_uuid
140 lockbox_secret
= osd_lv
.tags
['ceph.cephx_lockbox_secret']
141 encryption_utils
.write_lockbox_keyring(osd_id
, osd_fsid
, lockbox_secret
)
142 dmcrypt_secret
= encryption_utils
.get_dmcrypt_key(osd_id
, osd_fsid
)
143 encryption_utils
.luks_open(dmcrypt_secret
, osd_lv
.lv_path
, osd_lv
.lv_uuid
)
145 osd_lv_path
= osd_lv
.lv_path
147 db_device_path
= get_osd_device_path(osd_lv
, lvs
, 'db', dmcrypt_secret
=dmcrypt_secret
)
148 wal_device_path
= get_osd_device_path(osd_lv
, lvs
, 'wal', dmcrypt_secret
=dmcrypt_secret
)
150 # Once symlinks are removed, the osd dir can be 'primed again.
152 'ceph-bluestore-tool', '--cluster=%s' % conf
.cluster
,
153 'prime-osd-dir', '--dev', osd_lv_path
,
155 # always re-do the symlink regardless if it exists, so that the block,
156 # block.wal, and block.db devices that may have changed can be mapped
157 # correctly every time
158 process
.run(['ln', '-snf', osd_lv_path
, os
.path
.join(osd_path
, 'block')])
159 system
.chown(os
.path
.join(osd_path
, 'block'))
160 system
.chown(osd_path
)
162 destination
= os
.path
.join(osd_path
, 'block.db')
163 process
.run(['ln', '-snf', db_device_path
, destination
])
164 system
.chown(db_device_path
)
165 system
.chown(destination
)
167 destination
= os
.path
.join(osd_path
, 'block.wal')
168 process
.run(['ln', '-snf', wal_device_path
, destination
])
169 system
.chown(wal_device_path
)
170 system
.chown(destination
)
172 if no_systemd
is False:
173 # enable the ceph-volume unit for this OSD
174 systemctl
.enable_volume(osd_id
, osd_fsid
, 'lvm')
177 systemctl
.start_osd(osd_id
)
178 terminal
.success("ceph-volume lvm activate successful for osd ID: %s" % osd_id
)
181 class Activate(object):
183 help = 'Discover and mount the LVM device associated with an OSD ID and start the Ceph OSD'
185 def __init__(self
, argv
):
188 @decorators.needs_root
189 def activate_all(self
, args
):
190 listed_osds
= direct_report()
192 for osd_id
, devices
in listed_osds
.items():
193 # the metadata for all devices in each OSD will contain
194 # the FSID which is required for activation
195 for device
in devices
:
196 fsid
= device
.get('tags', {}).get('ceph.osd_fsid')
201 terminal
.warning('Was unable to find any OSDs to activate')
202 terminal
.warning('Verify OSDs are present with "ceph-volume lvm list"')
204 for osd_fsid
, osd_id
in osds
.items():
205 if systemctl
.osd_is_active(osd_id
):
207 'OSD ID %s FSID %s process is active. Skipping activation' % (osd_id
, osd_fsid
)
210 terminal
.info('Activating OSD ID %s FSID %s' % (osd_id
, osd_fsid
))
211 self
.activate(args
, osd_id
=osd_id
, osd_fsid
=osd_fsid
)
213 @decorators.needs_root
214 def activate(self
, args
, osd_id
=None, osd_fsid
=None):
216 :param args: The parsed arguments coming from the CLI
217 :param osd_id: When activating all, this gets populated with an existing OSD ID
218 :param osd_fsid: When activating all, this gets populated with an existing OSD FSID
220 osd_id
= osd_id
if osd_id
is not None else args
.osd_id
221 osd_fsid
= osd_fsid
if osd_fsid
is not None else args
.osd_fsid
224 # filter them down for the OSD ID and FSID we need to activate
225 if osd_id
and osd_fsid
:
226 lvs
.filter(lv_tags
={'ceph.osd_id': osd_id
, 'ceph.osd_fsid': osd_fsid
})
227 elif osd_fsid
and not osd_id
:
228 lvs
.filter(lv_tags
={'ceph.osd_fsid': osd_fsid
})
230 raise RuntimeError('could not find osd.%s with fsid %s' % (osd_id
, osd_fsid
))
231 # This argument is only available when passed in directly or via
232 # systemd, not when ``create`` is being used
233 if getattr(args
, 'auto_detect_objectstore', False):
234 logger
.info('auto detecting objectstore')
235 # may get multiple lvs, so can't do lvs.get() calls here
237 has_journal
= lv
.tags
.get('ceph.journal_uuid')
239 logger
.info('found a journal associated with the OSD, assuming filestore')
240 return activate_filestore(lvs
)
241 logger
.info('unable to find a journal associated with the OSD, assuming bluestore')
242 return activate_bluestore(lvs
)
244 activate_bluestore(lvs
, no_systemd
=args
.no_systemd
)
246 activate_filestore(lvs
, no_systemd
=args
.no_systemd
)
249 sub_command_help
= dedent("""
250 Activate OSDs by discovering them with LVM and mounting them in their
251 appropriate destination:
253 ceph-volume lvm activate {ID} {FSID}
255 The lvs associated with the OSD need to have been prepared previously,
256 so that all needed tags and metadata exist.
258 When migrating OSDs, or a multiple-osd activation is needed, the
259 ``--all`` flag can be used instead of the individual ID and FSID:
261 ceph-volume lvm activate --all
264 parser
= argparse
.ArgumentParser(
265 prog
='ceph-volume lvm activate',
266 formatter_class
=argparse
.RawDescriptionHelpFormatter
,
267 description
=sub_command_help
,
274 help='The ID of the OSD, usually an integer, like 0'
280 help='The FSID of the OSD, similar to a SHA1'
283 '--auto-detect-objectstore',
285 help='Autodetect the objectstore by inspecting the OSD',
290 help='bluestore objectstore (default)',
295 help='filestore objectstore',
301 help='Activate all OSDs found in the system',
307 help='Skip creating and enabling systemd units and starting OSD services',
309 if len(self
.argv
) == 0:
310 print(sub_command_help
)
312 args
= parser
.parse_args(self
.argv
)
313 # Default to bluestore here since defaulting it in add_argument may
314 # cause both to be True
315 if not args
.bluestore
and not args
.filestore
:
316 args
.bluestore
= True
317 if args
.activate_all
:
318 self
.activate_all(args
)