1 from __future__
import print_function
5 from textwrap
import dedent
6 from ceph_volume
import process
, conf
, decorators
, terminal
7 from ceph_volume
.util
import system
, disk
8 from ceph_volume
.util
import prepare
as prepare_utils
9 from ceph_volume
.util
import encryption
as encryption_utils
10 from ceph_volume
.systemd
import systemctl
11 from ceph_volume
.api
import lvm
as api
14 logger
= logging
.getLogger(__name__
)
17 def activate_filestore(lvs
):
19 osd_lv
= lvs
.get(lv_tags
={'ceph.type': 'data'})
21 raise RuntimeError('Unable to find a data LV for filestore activation')
22 is_encrypted
= osd_lv
.tags
.get('ceph.encrypted', '0') == '1'
24 osd_id
= osd_lv
.tags
['ceph.osd_id']
25 conf
.cluster
= osd_lv
.tags
['ceph.cluster_name']
26 # it may have a volume with a journal
27 osd_journal_lv
= lvs
.get(lv_tags
={'ceph.type': 'journal'})
28 # TODO: add sensible error reporting if this is ever the case
29 # blow up with a KeyError if this doesn't exist
30 osd_fsid
= osd_lv
.tags
['ceph.osd_fsid']
31 if not osd_journal_lv
:
32 # must be a disk partition, by quering blkid by the uuid we are ensuring that the
33 # device path is always correct
34 journal_uuid
= osd_lv
.tags
['ceph.journal_uuid']
35 osd_journal
= disk
.get_device_from_partuuid(journal_uuid
)
37 journal_uuid
= osd_journal_lv
.lv_uuid
38 osd_journal
= osd_lv
.tags
['ceph.journal_device']
41 raise RuntimeError('unable to detect an lv or device journal for OSD %s' % osd_id
)
43 # this is done here, so that previous checks that ensure path availability
44 # and correctness can still be enforced, and report if any issues are found
46 lockbox_secret
= osd_lv
.tags
['ceph.cephx_lockbox_secret']
47 # this keyring writing is idempotent
48 encryption_utils
.write_lockbox_keyring(osd_id
, osd_fsid
, lockbox_secret
)
49 dmcrypt_secret
= encryption_utils
.get_dmcrypt_key(osd_id
, osd_fsid
)
50 encryption_utils
.luks_open(dmcrypt_secret
, osd_lv
.lv_path
, osd_lv
.lv_uuid
)
51 encryption_utils
.luks_open(dmcrypt_secret
, osd_journal
, journal_uuid
)
53 osd_journal
= '/dev/mapper/%s' % journal_uuid
54 source
= '/dev/mapper/%s' % osd_lv
.lv_uuid
56 source
= osd_lv
.lv_path
58 destination
= '/var/lib/ceph/osd/%s-%s' % (conf
.cluster
, osd_id
)
59 if not system
.device_is_mounted(source
, destination
=destination
):
60 process
.run(['mount', '-v', source
, destination
])
62 # always re-do the symlink regardless if it exists, so that the journal
63 # device path that may have changed can be mapped correctly every time
64 destination
= '/var/lib/ceph/osd/%s-%s/journal' % (conf
.cluster
, osd_id
)
65 process
.run(['ln', '-snf', osd_journal
, destination
])
67 # make sure that the journal has proper permissions
68 system
.chown(osd_journal
)
70 # enable the ceph-volume unit for this OSD
71 systemctl
.enable_volume(osd_id
, osd_fsid
, 'lvm')
74 systemctl
.start_osd(osd_id
)
75 terminal
.success("ceph-volume lvm activate successful for osd ID: %s" % osd_id
)
78 def get_osd_device_path(osd_lv
, lvs
, device_type
, dmcrypt_secret
=None):
80 ``device_type`` can be one of ``db``, ``wal`` or ``block`` so that
81 we can query ``lvs`` (a ``Volumes`` object) and fallback to querying the uuid
82 if that is not present.
84 Return a path if possible, failing to do that a ``None``, since some of these devices
87 osd_lv
= lvs
.get(lv_tags
={'ceph.type': 'block'})
88 is_encrypted
= osd_lv
.tags
.get('ceph.encrypted', '0') == '1'
89 logger
.debug('Found block device (%s) with encryption: %s', osd_lv
.name
, is_encrypted
)
90 uuid_tag
= 'ceph.%s_uuid' % device_type
91 device_uuid
= osd_lv
.tags
.get(uuid_tag
)
95 device_lv
= lvs
.get(lv_uuid
=device_uuid
)
98 encryption_utils
.luks_open(dmcrypt_secret
, device_lv
.lv_path
, device_uuid
)
99 return '/dev/mapper/%s' % device_uuid
100 return device_lv
.lv_path
102 # this could be a regular device, so query it with blkid
103 physical_device
= disk
.get_device_from_partuuid(device_uuid
)
104 if physical_device
and is_encrypted
:
105 encryption_utils
.luks_open(dmcrypt_secret
, physical_device
, device_uuid
)
106 return '/dev/mapper/%s' % device_uuid
107 return physical_device
or None
111 def activate_bluestore(lvs
):
113 osd_lv
= lvs
.get(lv_tags
={'ceph.type': 'block'})
114 is_encrypted
= osd_lv
.tags
.get('ceph.encrypted', '0') == '1'
115 dmcrypt_secret
= None
116 osd_id
= osd_lv
.tags
['ceph.osd_id']
117 conf
.cluster
= osd_lv
.tags
['ceph.cluster_name']
118 osd_fsid
= osd_lv
.tags
['ceph.osd_fsid']
120 # mount on tmpfs the osd directory
121 osd_path
= '/var/lib/ceph/osd/%s-%s' % (conf
.cluster
, osd_id
)
122 if not system
.path_is_mounted(osd_path
):
123 # mkdir -p and mount as tmpfs
124 prepare_utils
.create_osd_path(osd_id
, tmpfs
=True)
125 # XXX This needs to be removed once ceph-bluestore-tool can deal with
126 # symlinks that exist in the osd dir
127 for link_name
in ['block', 'block.db', 'block.wal']:
128 link_path
= os
.path
.join(osd_path
, link_name
)
129 if os
.path
.exists(link_path
):
130 os
.unlink(os
.path
.join(osd_path
, link_name
))
131 # encryption is handled here, before priming the OSD dir
133 osd_lv_path
= '/dev/mapper/%s' % osd_lv
.lv_uuid
134 lockbox_secret
= osd_lv
.tags
['ceph.cephx_lockbox_secret']
135 encryption_utils
.write_lockbox_keyring(osd_id
, osd_fsid
, lockbox_secret
)
136 dmcrypt_secret
= encryption_utils
.get_dmcrypt_key(osd_id
, osd_fsid
)
137 encryption_utils
.luks_open(dmcrypt_secret
, osd_lv
.lv_path
, osd_lv
.lv_uuid
)
139 osd_lv_path
= osd_lv
.lv_path
141 db_device_path
= get_osd_device_path(osd_lv
, lvs
, 'db', dmcrypt_secret
=dmcrypt_secret
)
142 wal_device_path
= get_osd_device_path(osd_lv
, lvs
, 'wal', dmcrypt_secret
=dmcrypt_secret
)
144 # Once symlinks are removed, the osd dir can be 'primed again.
146 'ceph-bluestore-tool', '--cluster=%s' % conf
.cluster
,
147 'prime-osd-dir', '--dev', osd_lv_path
,
149 # always re-do the symlink regardless if it exists, so that the block,
150 # block.wal, and block.db devices that may have changed can be mapped
151 # correctly every time
152 process
.run(['ln', '-snf', osd_lv_path
, os
.path
.join(osd_path
, 'block')])
153 system
.chown(os
.path
.join(osd_path
, 'block'))
154 system
.chown(osd_path
)
156 destination
= os
.path
.join(osd_path
, 'block.db')
157 process
.run(['ln', '-snf', db_device_path
, destination
])
158 system
.chown(db_device_path
)
160 destination
= os
.path
.join(osd_path
, 'block.wal')
161 process
.run(['ln', '-snf', wal_device_path
, destination
])
162 system
.chown(wal_device_path
)
164 # enable the ceph-volume unit for this OSD
165 systemctl
.enable_volume(osd_id
, osd_fsid
, 'lvm')
168 systemctl
.start_osd(osd_id
)
169 terminal
.success("ceph-volume lvm activate successful for osd ID: %s" % osd_id
)
172 class Activate(object):
174 help = 'Discover and mount the LVM device associated with an OSD ID and start the Ceph OSD'
176 def __init__(self
, argv
):
179 @decorators.needs_root
180 def activate(self
, args
):
182 # filter them down for the OSD ID and FSID we need to activate
183 if args
.osd_id
and args
.osd_fsid
:
184 lvs
.filter(lv_tags
={'ceph.osd_id': args
.osd_id
, 'ceph.osd_fsid': args
.osd_fsid
})
185 elif args
.osd_fsid
and not args
.osd_id
:
186 lvs
.filter(lv_tags
={'ceph.osd_fsid': args
.osd_fsid
})
188 raise RuntimeError('could not find osd.%s with fsid %s' % (args
.osd_id
, args
.osd_fsid
))
189 # This argument is only available when passed in directly or via
190 # systemd, not when ``create`` is being used
191 if getattr(args
, 'auto_detect_objectstore', False):
192 logger
.info('auto detecting objectstore')
193 # may get multiple lvs, so can't do lvs.get() calls here
195 has_journal
= lv
.tags
.get('ceph.journal_uuid')
197 logger
.info('found a journal associated with the OSD, assuming filestore')
198 return activate_filestore(lvs
)
199 logger
.info('unable to find a journal associated with the OSD, assuming bluestore')
200 return activate_bluestore(lvs
)
202 activate_bluestore(lvs
)
204 activate_filestore(lvs
)
205 terminal
.success("ceph-volume lvm activate successful for osd ID: %s" % args
.osd_id
)
208 sub_command_help
= dedent("""
209 Activate OSDs by discovering them with LVM and mounting them in their
210 appropriate destination:
212 ceph-volume lvm activate {ID} {FSID}
214 The lvs associated with the OSD need to have been prepared previously,
215 so that all needed tags and metadata exist.
218 parser
= argparse
.ArgumentParser(
219 prog
='ceph-volume lvm activate',
220 formatter_class
=argparse
.RawDescriptionHelpFormatter
,
221 description
=sub_command_help
,
228 help='The ID of the OSD, usually an integer, like 0'
234 help='The FSID of the OSD, similar to a SHA1'
237 '--auto-detect-objectstore',
239 help='Autodetect the objectstore by inspecting the OSD',
244 help='filestore objectstore (not yet implemented)',
249 help='filestore objectstore (current default)',
251 if len(self
.argv
) == 0:
252 print(sub_command_help
)
254 args
= parser
.parse_args(self
.argv
)
255 # Default to bluestore here since defaulting it in add_argument may
256 # cause both to be True
257 if not args
.bluestore
and not args
.filestore
:
258 args
.bluestore
= True