]> git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-volume/ceph_volume/devices/lvm/activate.py
update sources to 12.2.2
[ceph.git] / ceph / src / ceph-volume / ceph_volume / devices / lvm / activate.py
1 from __future__ import print_function
2 import argparse
3 import logging
4 import os
5 from textwrap import dedent
6 from ceph_volume import process, conf, decorators
7 from ceph_volume.util import system, disk
8 from ceph_volume.util import prepare as prepare_utils
9 from ceph_volume.systemd import systemctl
10 from ceph_volume.api import lvm as api
11
12
13 logger = logging.getLogger(__name__)
14
15
16 def activate_filestore(lvs):
17 # find the osd
18 osd_lv = lvs.get(lv_tags={'ceph.type': 'data'})
19 if not osd_lv:
20 raise RuntimeError('Unable to find a data LV for filestore activation')
21 osd_id = osd_lv.tags['ceph.osd_id']
22 conf.cluster = osd_lv.tags['ceph.cluster_name']
23 # it may have a volume with a journal
24 osd_journal_lv = lvs.get(lv_tags={'ceph.type': 'journal'})
25 # TODO: add sensible error reporting if this is ever the case
26 # blow up with a KeyError if this doesn't exist
27 osd_fsid = osd_lv.tags['ceph.osd_fsid']
28 if not osd_journal_lv:
29 # must be a disk partition, by quering blkid by the uuid we are ensuring that the
30 # device path is always correct
31 osd_journal = disk.get_device_from_partuuid(osd_lv.tags['ceph.journal_uuid'])
32 else:
33 osd_journal = osd_lv.tags['ceph.journal_device']
34
35 if not osd_journal:
36 raise RuntimeError('unable to detect an lv or device journal for OSD %s' % osd_id)
37
38 # mount the osd
39 source = osd_lv.lv_path
40 destination = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)
41 if not system.device_is_mounted(source, destination=destination):
42 process.run(['sudo', 'mount', '-v', source, destination])
43
44 # always re-do the symlink regardless if it exists, so that the journal
45 # device path that may have changed can be mapped correctly every time
46 destination = '/var/lib/ceph/osd/%s-%s/journal' % (conf.cluster, osd_id)
47 process.run(['sudo', 'ln', '-snf', osd_journal, destination])
48
49 # make sure that the journal has proper permissions
50 system.chown(osd_journal)
51
52 # enable the ceph-volume unit for this OSD
53 systemctl.enable_volume(osd_id, osd_fsid, 'lvm')
54
55 # start the OSD
56 systemctl.start_osd(osd_id)
57
58
59 def get_osd_device_path(osd_lv, lvs, device_type):
60 """
61 ``device_type`` can be one of ``db``, ``wal`` or ``block`` so that
62 we can query ``lvs`` (a ``Volumes`` object) and fallback to querying the uuid
63 if that is not present.
64
65 Return a path if possible, failing to do that a ``None``, since some of these devices
66 are optional
67 """
68 osd_lv = lvs.get(lv_tags={'ceph.type': 'block'})
69 uuid_tag = 'ceph.%s_uuid' % device_type
70 device_uuid = osd_lv.tags.get(uuid_tag)
71 if not device_uuid:
72 return None
73
74 device_lv = lvs.get(lv_uuid=device_uuid)
75 if device_lv:
76 return device_lv.lv_path
77 else:
78 # this could be a regular device, so query it with blkid
79 physical_device = disk.get_device_from_partuuid(device_uuid)
80 return physical_device or None
81 return None
82
83
84 def activate_bluestore(lvs):
85 # find the osd
86 osd_lv = lvs.get(lv_tags={'ceph.type': 'block'})
87 osd_id = osd_lv.tags['ceph.osd_id']
88 conf.cluster = osd_lv.tags['ceph.cluster_name']
89 osd_fsid = osd_lv.tags['ceph.osd_fsid']
90 db_device_path = get_osd_device_path(osd_lv, lvs, 'db')
91 wal_device_path = get_osd_device_path(osd_lv, lvs, 'wal')
92
93 # mount on tmpfs the osd directory
94 osd_path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)
95 if not system.path_is_mounted(osd_path):
96 # mkdir -p and mount as tmpfs
97 prepare_utils.create_osd_path(osd_id, tmpfs=True)
98 # XXX This needs to be removed once ceph-bluestore-tool can deal with
99 # symlinks that exist in the osd dir
100 for link_name in ['block', 'block.db', 'block.wal']:
101 link_path = os.path.join(osd_path, link_name)
102 if os.path.exists(link_path):
103 os.unlink(os.path.join(osd_path, link_name))
104 # Once symlinks are removed, the osd dir can be 'primed again.
105 process.run([
106 'sudo', 'ceph-bluestore-tool', '--cluster=%s' % conf.cluster,
107 'prime-osd-dir', '--dev', osd_lv.lv_path,
108 '--path', osd_path])
109 # always re-do the symlink regardless if it exists, so that the block,
110 # block.wal, and block.db devices that may have changed can be mapped
111 # correctly every time
112 process.run(['sudo', 'ln', '-snf', osd_lv.lv_path, os.path.join(osd_path, 'block')])
113 system.chown(os.path.join(osd_path, 'block'))
114 system.chown(osd_path)
115 if db_device_path:
116 destination = os.path.join(osd_path, 'block.db')
117 process.run(['sudo', 'ln', '-snf', db_device_path, destination])
118 system.chown(db_device_path)
119 if wal_device_path:
120 destination = os.path.join(osd_path, 'block.wal')
121 process.run(['sudo', 'ln', '-snf', wal_device_path, destination])
122 system.chown(wal_device_path)
123
124 # enable the ceph-volume unit for this OSD
125 systemctl.enable_volume(osd_id, osd_fsid, 'lvm')
126
127 # start the OSD
128 systemctl.start_osd(osd_id)
129
130
131 class Activate(object):
132
133 help = 'Discover and mount the LVM device associated with an OSD ID and start the Ceph OSD'
134
135 def __init__(self, argv):
136 self.argv = argv
137
138 @decorators.needs_root
139 def activate(self, args):
140 lvs = api.Volumes()
141 # filter them down for the OSD ID and FSID we need to activate
142 if args.osd_id and args.osd_fsid:
143 lvs.filter(lv_tags={'ceph.osd_id': args.osd_id, 'ceph.osd_fsid': args.osd_fsid})
144 elif args.osd_fsid and not args.osd_id:
145 lvs.filter(lv_tags={'ceph.osd_fsid': args.osd_fsid})
146 if not lvs:
147 raise RuntimeError('could not find osd.%s with fsid %s' % (args.osd_id, args.osd_fsid))
148 # This argument is only available when passed in directly or via
149 # systemd, not when ``create`` is being used
150 if getattr(args, 'auto_detect_objectstore', False):
151 logger.info('auto detecting objectstore')
152 # may get multiple lvs, so can't do lvs.get() calls here
153 for lv in lvs:
154 has_journal = lv.tags.get('ceph.journal_uuid')
155 if has_journal:
156 logger.info('found a journal associated with the OSD, assuming filestore')
157 return activate_filestore(lvs)
158 logger.info('unable to find a journal associated with the OSD, assuming bluestore')
159 return activate_bluestore(lvs)
160 if args.bluestore:
161 activate_bluestore(lvs)
162 elif args.filestore:
163 activate_filestore(lvs)
164
165 def main(self):
166 sub_command_help = dedent("""
167 Activate OSDs by discovering them with LVM and mounting them in their
168 appropriate destination:
169
170 ceph-volume lvm activate {ID} {FSID}
171
172 The lvs associated with the OSD need to have been prepared previously,
173 so that all needed tags and metadata exist.
174
175 """)
176 parser = argparse.ArgumentParser(
177 prog='ceph-volume lvm activate',
178 formatter_class=argparse.RawDescriptionHelpFormatter,
179 description=sub_command_help,
180 )
181
182 parser.add_argument(
183 'osd_id',
184 metavar='ID',
185 nargs='?',
186 help='The ID of the OSD, usually an integer, like 0'
187 )
188 parser.add_argument(
189 'osd_fsid',
190 metavar='FSID',
191 nargs='?',
192 help='The FSID of the OSD, similar to a SHA1'
193 )
194 parser.add_argument(
195 '--auto-detect-objectstore',
196 action='store_true',
197 help='Autodetect the objectstore by inspecting the OSD',
198 )
199 parser.add_argument(
200 '--bluestore',
201 action='store_true',
202 help='filestore objectstore (not yet implemented)',
203 )
204 parser.add_argument(
205 '--filestore',
206 action='store_true',
207 help='filestore objectstore (current default)',
208 )
209 if len(self.argv) == 0:
210 print(sub_command_help)
211 return
212 args = parser.parse_args(self.argv)
213 # Default to bluestore here since defaulting it in add_argument may
214 # cause both to be True
215 if not args.bluestore and not args.filestore:
216 args.bluestore = True
217 self.activate(args)