]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/devstack.py
import 15.2.4
[ceph.git] / ceph / qa / tasks / devstack.py
1 #!/usr/bin/env python
2 import contextlib
3 import logging
4 from io import BytesIO
5 import textwrap
6 from configparser import ConfigParser
7
8 import six
9 import time
10
11 from teuthology.orchestra import run
12 from teuthology import misc
13 from teuthology.contextutil import nested
14
15 log = logging.getLogger(__name__)
16
17 DEVSTACK_GIT_REPO = 'https://github.com/openstack-dev/devstack.git'
18 DS_STABLE_BRANCHES = ("havana", "grizzly")
19
20 is_devstack_node = lambda role: role.startswith('devstack')
21 is_osd_node = lambda role: role.startswith('osd')
22
23
24 @contextlib.contextmanager
25 def task(ctx, config):
26 if config is None:
27 config = {}
28 if not isinstance(config, dict):
29 raise TypeError("config must be a dict")
30 with nested(lambda: install(ctx=ctx, config=config),
31 lambda: smoke(ctx=ctx, config=config),
32 ):
33 yield
34
35
36 @contextlib.contextmanager
37 def install(ctx, config):
38 """
39 Install OpenStack DevStack and configure it to use a Ceph cluster for
40 Glance and Cinder.
41
42 Requires one node with a role 'devstack'
43
44 Since devstack runs rampant on the system it's used on, typically you will
45 want to reprovision that machine after using devstack on it.
46
47 Also, the default 2GB of RAM that is given to vps nodes is insufficient. I
48 recommend 4GB. Downburst can be instructed to give 4GB to a vps node by
49 adding this to the yaml:
50
51 downburst:
52 ram: 4G
53
54 This was created using documentation found here:
55 https://github.com/openstack-dev/devstack/blob/master/README.md
56 http://docs.ceph.com/docs/master/rbd/rbd-openstack/
57 """
58 if config is None:
59 config = {}
60 if not isinstance(config, dict):
61 raise TypeError("config must be a dict")
62
63 devstack_node = next(iter(ctx.cluster.only(is_devstack_node).remotes.keys()))
64 an_osd_node = next(iter(ctx.cluster.only(is_osd_node).remotes.keys()))
65
66 devstack_branch = config.get("branch", "master")
67 install_devstack(devstack_node, devstack_branch)
68 try:
69 configure_devstack_and_ceph(ctx, config, devstack_node, an_osd_node)
70 yield
71 finally:
72 pass
73
74
75 def install_devstack(devstack_node, branch="master"):
76 log.info("Cloning DevStack repo...")
77
78 args = ['git', 'clone', DEVSTACK_GIT_REPO]
79 devstack_node.run(args=args)
80
81 if branch != "master":
82 if branch in DS_STABLE_BRANCHES and not branch.startswith("stable"):
83 branch = "stable/" + branch
84 log.info("Checking out {branch} branch...".format(branch=branch))
85 cmd = "cd devstack && git checkout " + branch
86 devstack_node.run(args=cmd)
87
88 log.info("Installing DevStack...")
89 args = ['cd', 'devstack', run.Raw('&&'), './stack.sh']
90 devstack_node.run(args=args)
91
92
93 def configure_devstack_and_ceph(ctx, config, devstack_node, ceph_node):
94 pool_size = config.get('pool_size', '128')
95 create_pools(ceph_node, pool_size)
96 distribute_ceph_conf(devstack_node, ceph_node)
97 # This is where we would install python-ceph and ceph-common but it appears
98 # the ceph task does that for us.
99 generate_ceph_keys(ceph_node)
100 distribute_ceph_keys(devstack_node, ceph_node)
101 secret_uuid = set_libvirt_secret(devstack_node, ceph_node)
102 update_devstack_config_files(devstack_node, secret_uuid)
103 set_apache_servername(devstack_node)
104 # Rebooting is the most-often-used method of restarting devstack services
105 misc.reboot(devstack_node)
106 start_devstack(devstack_node)
107 restart_apache(devstack_node)
108
109
110 def create_pools(ceph_node, pool_size):
111 log.info("Creating pools on Ceph cluster...")
112
113 for pool_name in ['volumes', 'images', 'backups']:
114 args = ['sudo', 'ceph', 'osd', 'pool', 'create', pool_name, pool_size]
115 ceph_node.run(args=args)
116
117
118 def distribute_ceph_conf(devstack_node, ceph_node):
119 log.info("Copying ceph.conf to DevStack node...")
120
121 ceph_conf_path = '/etc/ceph/ceph.conf'
122 ceph_conf = misc.get_file(ceph_node, ceph_conf_path, sudo=True)
123 misc.sudo_write_file(devstack_node, ceph_conf_path, ceph_conf)
124
125
126 def generate_ceph_keys(ceph_node):
127 log.info("Generating Ceph keys...")
128
129 ceph_auth_cmds = [
130 ['sudo', 'ceph', 'auth', 'get-or-create', 'client.cinder', 'mon',
131 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rx pool=images'], # noqa
132 ['sudo', 'ceph', 'auth', 'get-or-create', 'client.glance', 'mon',
133 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=images'], # noqa
134 ['sudo', 'ceph', 'auth', 'get-or-create', 'client.cinder-backup', 'mon',
135 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=backups'], # noqa
136 ]
137 for cmd in ceph_auth_cmds:
138 ceph_node.run(args=cmd)
139
140
141 def distribute_ceph_keys(devstack_node, ceph_node):
142 log.info("Copying Ceph keys to DevStack node...")
143
144 def copy_key(from_remote, key_name, to_remote, dest_path, owner):
145 key_stringio = BytesIO()
146 from_remote.run(
147 args=['sudo', 'ceph', 'auth', 'get-or-create', key_name],
148 stdout=key_stringio)
149 key_stringio.seek(0)
150 misc.sudo_write_file(to_remote, dest_path,
151 key_stringio, owner=owner)
152 keys = [
153 dict(name='client.glance',
154 path='/etc/ceph/ceph.client.glance.keyring',
155 # devstack appears to just want root:root
156 #owner='glance:glance',
157 ),
158 dict(name='client.cinder',
159 path='/etc/ceph/ceph.client.cinder.keyring',
160 # devstack appears to just want root:root
161 #owner='cinder:cinder',
162 ),
163 dict(name='client.cinder-backup',
164 path='/etc/ceph/ceph.client.cinder-backup.keyring',
165 # devstack appears to just want root:root
166 #owner='cinder:cinder',
167 ),
168 ]
169 for key_dict in keys:
170 copy_key(ceph_node, key_dict['name'], devstack_node,
171 key_dict['path'], key_dict.get('owner'))
172
173
174 def set_libvirt_secret(devstack_node, ceph_node):
175 log.info("Setting libvirt secret...")
176
177 cinder_key = ceph_node.sh('sudo ceph auth get-key client.cinder').strip()
178 uuid = devstack_node.sh('uuidgen').strip()
179
180 secret_path = '/tmp/secret.xml'
181 secret_template = textwrap.dedent("""
182 <secret ephemeral='no' private='no'>
183 <uuid>{uuid}</uuid>
184 <usage type='ceph'>
185 <name>client.cinder secret</name>
186 </usage>
187 </secret>""")
188 misc.sudo_write_file(devstack_node, secret_path,
189 secret_template.format(uuid=uuid))
190 devstack_node.run(args=['sudo', 'virsh', 'secret-define', '--file',
191 secret_path])
192 devstack_node.run(args=['sudo', 'virsh', 'secret-set-value', '--secret',
193 uuid, '--base64', cinder_key])
194 return uuid
195
196
197 def update_devstack_config_files(devstack_node, secret_uuid):
198 log.info("Updating DevStack config files to use Ceph...")
199
200 def backup_config(node, file_name, backup_ext='.orig.teuth'):
201 node.run(args=['cp', '-f', file_name, file_name + backup_ext])
202
203 def update_config(config_name, config_stream, update_dict,
204 section='DEFAULT'):
205 parser = ConfigParser()
206 parser.read_file(config_stream)
207 for (key, value) in update_dict.items():
208 parser.set(section, key, value)
209 out_stream = six.StringIO()
210 parser.write(out_stream)
211 out_stream.seek(0)
212 return out_stream
213
214 updates = [
215 dict(name='/etc/glance/glance-api.conf', options=dict(
216 default_store='rbd',
217 rbd_store_user='glance',
218 rbd_store_pool='images',
219 show_image_direct_url='True',)),
220 dict(name='/etc/cinder/cinder.conf', options=dict(
221 volume_driver='cinder.volume.drivers.rbd.RBDDriver',
222 rbd_pool='volumes',
223 rbd_ceph_conf='/etc/ceph/ceph.conf',
224 rbd_flatten_volume_from_snapshot='false',
225 rbd_max_clone_depth='5',
226 glance_api_version='2',
227 rbd_user='cinder',
228 rbd_secret_uuid=secret_uuid,
229 backup_driver='cinder.backup.drivers.ceph',
230 backup_ceph_conf='/etc/ceph/ceph.conf',
231 backup_ceph_user='cinder-backup',
232 backup_ceph_chunk_size='134217728',
233 backup_ceph_pool='backups',
234 backup_ceph_stripe_unit='0',
235 backup_ceph_stripe_count='0',
236 restore_discard_excess_bytes='true',
237 )),
238 dict(name='/etc/nova/nova.conf', options=dict(
239 libvirt_images_type='rbd',
240 libvirt_images_rbd_pool='volumes',
241 libvirt_images_rbd_ceph_conf='/etc/ceph/ceph.conf',
242 rbd_user='cinder',
243 rbd_secret_uuid=secret_uuid,
244 libvirt_inject_password='false',
245 libvirt_inject_key='false',
246 libvirt_inject_partition='-2',
247 )),
248 ]
249
250 for update in updates:
251 file_name = update['name']
252 options = update['options']
253 config_data = misc.get_file(devstack_node, file_name, sudo=True)
254 config_stream = six.StringIO(config_data)
255 backup_config(devstack_node, file_name)
256 new_config_stream = update_config(file_name, config_stream, options)
257 misc.sudo_write_file(devstack_node, file_name, new_config_stream)
258
259
260 def set_apache_servername(node):
261 # Apache complains: "Could not reliably determine the server's fully
262 # qualified domain name, using 127.0.0.1 for ServerName"
263 # So, let's make sure it knows its name.
264 log.info("Setting Apache ServerName...")
265
266 hostname = node.hostname
267 config_file = '/etc/apache2/conf.d/servername'
268 misc.sudo_write_file(node, config_file,
269 "ServerName {name}".format(name=hostname))
270
271
272 def start_devstack(devstack_node):
273 log.info("Patching devstack start script...")
274 # This causes screen to start headless - otherwise rejoin-stack.sh fails
275 # because there is no terminal attached.
276 cmd = "cd devstack && sed -ie 's/screen -c/screen -dm -c/' rejoin-stack.sh"
277 devstack_node.run(args=cmd)
278
279 log.info("Starting devstack...")
280 cmd = "cd devstack && ./rejoin-stack.sh"
281 devstack_node.run(args=cmd)
282
283 # This was added because I was getting timeouts on Cinder requests - which
284 # were trying to access Keystone on port 5000. A more robust way to handle
285 # this would be to introduce a wait-loop on devstack_node that checks to
286 # see if a service is listening on port 5000.
287 log.info("Waiting 30s for devstack to start...")
288 time.sleep(30)
289
290
291 def restart_apache(node):
292 node.run(args=['sudo', '/etc/init.d/apache2', 'restart'], wait=True)
293
294
295 @contextlib.contextmanager
296 def exercise(ctx, config):
297 log.info("Running devstack exercises...")
298
299 if config is None:
300 config = {}
301 if not isinstance(config, dict):
302 raise TypeError("config must be a dict")
303
304 devstack_node = next(iter(ctx.cluster.only(is_devstack_node).remotes.keys()))
305
306 # TODO: save the log *and* preserve failures
307 #devstack_archive_dir = create_devstack_archive(ctx, devstack_node)
308
309 try:
310 #cmd = "cd devstack && ./exercise.sh 2>&1 | tee {dir}/exercise.log".format( # noqa
311 # dir=devstack_archive_dir)
312 cmd = "cd devstack && ./exercise.sh"
313 devstack_node.run(args=cmd, wait=True)
314 yield
315 finally:
316 pass
317
318
319 def create_devstack_archive(ctx, devstack_node):
320 test_dir = misc.get_testdir(ctx)
321 devstack_archive_dir = "{test_dir}/archive/devstack".format(
322 test_dir=test_dir)
323 devstack_node.run(args="mkdir -p " + devstack_archive_dir)
324 return devstack_archive_dir
325
326
327 @contextlib.contextmanager
328 def smoke(ctx, config):
329 log.info("Running a basic smoketest...")
330
331 devstack_node = next(iter(ctx.cluster.only(is_devstack_node).remotes.keys()))
332 an_osd_node = next(iter(ctx.cluster.only(is_osd_node).remotes.keys()))
333
334 try:
335 create_volume(devstack_node, an_osd_node, 'smoke0', 1)
336 yield
337 finally:
338 pass
339
340
341 def create_volume(devstack_node, ceph_node, vol_name, size):
342 """
343 :param size: The size of the volume, in GB
344 """
345 size = str(size)
346 log.info("Creating a {size}GB volume named {name}...".format(
347 name=vol_name,
348 size=size))
349 args = ['source', 'devstack/openrc', run.Raw('&&'), 'cinder', 'create',
350 '--display-name', vol_name, size]
351 cinder_create = devstack_node.sh(args, wait=True)
352 vol_info = parse_os_table(cinder_create)
353 log.debug("Volume info: %s", str(vol_info))
354
355 try:
356 rbd_output = ceph_node.sh("rbd --id cinder ls -l volumes", wait=True)
357 except run.CommandFailedError:
358 log.debug("Original rbd call failed; retrying without '--id cinder'")
359 rbd_output = ceph_node.sh("rbd ls -l volumes", wait=True)
360
361 assert vol_info['id'] in rbd_output, \
362 "Volume not found on Ceph cluster"
363 assert vol_info['size'] == size, \
364 "Volume size on Ceph cluster is different than specified"
365 return vol_info['id']
366
367
368 def parse_os_table(table_str):
369 out_dict = dict()
370 for line in table_str.split('\n'):
371 if line.startswith('|'):
372 items = line.split()
373 out_dict[items[1]] = items[3]
374 return out_dict