]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/devstack.py
943a9ffffa22465577ace8ab19c54e3468b7ac16
[ceph.git] / ceph / qa / tasks / devstack.py
1 #!/usr/bin/env python
2 import contextlib
3 import logging
4 from cStringIO import StringIO
5 import textwrap
6 from configparser import ConfigParser
7 import time
8
9 from teuthology.orchestra import run
10 from teuthology import misc
11 from teuthology.contextutil import nested
12
13 log = logging.getLogger(__name__)
14
15 DEVSTACK_GIT_REPO = 'https://github.com/openstack-dev/devstack.git'
16 DS_STABLE_BRANCHES = ("havana", "grizzly")
17
18 is_devstack_node = lambda role: role.startswith('devstack')
19 is_osd_node = lambda role: role.startswith('osd')
20
21
22 @contextlib.contextmanager
23 def task(ctx, config):
24 if config is None:
25 config = {}
26 if not isinstance(config, dict):
27 raise TypeError("config must be a dict")
28 with nested(lambda: install(ctx=ctx, config=config),
29 lambda: smoke(ctx=ctx, config=config),
30 ):
31 yield
32
33
34 @contextlib.contextmanager
35 def install(ctx, config):
36 """
37 Install OpenStack DevStack and configure it to use a Ceph cluster for
38 Glance and Cinder.
39
40 Requires one node with a role 'devstack'
41
42 Since devstack runs rampant on the system it's used on, typically you will
43 want to reprovision that machine after using devstack on it.
44
45 Also, the default 2GB of RAM that is given to vps nodes is insufficient. I
46 recommend 4GB. Downburst can be instructed to give 4GB to a vps node by
47 adding this to the yaml:
48
49 downburst:
50 ram: 4G
51
52 This was created using documentation found here:
53 https://github.com/openstack-dev/devstack/blob/master/README.md
54 http://docs.ceph.com/docs/master/rbd/rbd-openstack/
55 """
56 if config is None:
57 config = {}
58 if not isinstance(config, dict):
59 raise TypeError("config must be a dict")
60
61 devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0]
62 an_osd_node = ctx.cluster.only(is_osd_node).remotes.keys()[0]
63
64 devstack_branch = config.get("branch", "master")
65 install_devstack(devstack_node, devstack_branch)
66 try:
67 configure_devstack_and_ceph(ctx, config, devstack_node, an_osd_node)
68 yield
69 finally:
70 pass
71
72
73 def install_devstack(devstack_node, branch="master"):
74 log.info("Cloning DevStack repo...")
75
76 args = ['git', 'clone', DEVSTACK_GIT_REPO]
77 devstack_node.run(args=args)
78
79 if branch != "master":
80 if branch in DS_STABLE_BRANCHES and not branch.startswith("stable"):
81 branch = "stable/" + branch
82 log.info("Checking out {branch} branch...".format(branch=branch))
83 cmd = "cd devstack && git checkout " + branch
84 devstack_node.run(args=cmd)
85
86 log.info("Installing DevStack...")
87 args = ['cd', 'devstack', run.Raw('&&'), './stack.sh']
88 devstack_node.run(args=args)
89
90
91 def configure_devstack_and_ceph(ctx, config, devstack_node, ceph_node):
92 pool_size = config.get('pool_size', '128')
93 create_pools(ceph_node, pool_size)
94 distribute_ceph_conf(devstack_node, ceph_node)
95 # This is where we would install python-ceph and ceph-common but it appears
96 # the ceph task does that for us.
97 generate_ceph_keys(ceph_node)
98 distribute_ceph_keys(devstack_node, ceph_node)
99 secret_uuid = set_libvirt_secret(devstack_node, ceph_node)
100 update_devstack_config_files(devstack_node, secret_uuid)
101 set_apache_servername(devstack_node)
102 # Rebooting is the most-often-used method of restarting devstack services
103 misc.reboot(devstack_node)
104 start_devstack(devstack_node)
105 restart_apache(devstack_node)
106
107
108 def create_pools(ceph_node, pool_size):
109 log.info("Creating pools on Ceph cluster...")
110
111 for pool_name in ['volumes', 'images', 'backups']:
112 args = ['sudo', 'ceph', 'osd', 'pool', 'create', pool_name, pool_size]
113 ceph_node.run(args=args)
114
115
116 def distribute_ceph_conf(devstack_node, ceph_node):
117 log.info("Copying ceph.conf to DevStack node...")
118
119 ceph_conf_path = '/etc/ceph/ceph.conf'
120 ceph_conf = misc.get_file(ceph_node, ceph_conf_path, sudo=True)
121 misc.sudo_write_file(devstack_node, ceph_conf_path, ceph_conf)
122
123
124 def generate_ceph_keys(ceph_node):
125 log.info("Generating Ceph keys...")
126
127 ceph_auth_cmds = [
128 ['sudo', 'ceph', 'auth', 'get-or-create', 'client.cinder', 'mon',
129 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rx pool=images'], # noqa
130 ['sudo', 'ceph', 'auth', 'get-or-create', 'client.glance', 'mon',
131 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=images'], # noqa
132 ['sudo', 'ceph', 'auth', 'get-or-create', 'client.cinder-backup', 'mon',
133 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=backups'], # noqa
134 ]
135 for cmd in ceph_auth_cmds:
136 ceph_node.run(args=cmd)
137
138
139 def distribute_ceph_keys(devstack_node, ceph_node):
140 log.info("Copying Ceph keys to DevStack node...")
141
142 def copy_key(from_remote, key_name, to_remote, dest_path, owner):
143 key_stringio = StringIO()
144 from_remote.run(
145 args=['sudo', 'ceph', 'auth', 'get-or-create', key_name],
146 stdout=key_stringio)
147 key_stringio.seek(0)
148 misc.sudo_write_file(to_remote, dest_path,
149 key_stringio, owner=owner)
150 keys = [
151 dict(name='client.glance',
152 path='/etc/ceph/ceph.client.glance.keyring',
153 # devstack appears to just want root:root
154 #owner='glance:glance',
155 ),
156 dict(name='client.cinder',
157 path='/etc/ceph/ceph.client.cinder.keyring',
158 # devstack appears to just want root:root
159 #owner='cinder:cinder',
160 ),
161 dict(name='client.cinder-backup',
162 path='/etc/ceph/ceph.client.cinder-backup.keyring',
163 # devstack appears to just want root:root
164 #owner='cinder:cinder',
165 ),
166 ]
167 for key_dict in keys:
168 copy_key(ceph_node, key_dict['name'], devstack_node,
169 key_dict['path'], key_dict.get('owner'))
170
171
172 def set_libvirt_secret(devstack_node, ceph_node):
173 log.info("Setting libvirt secret...")
174
175 cinder_key_stringio = StringIO()
176 ceph_node.run(args=['sudo', 'ceph', 'auth', 'get-key', 'client.cinder'],
177 stdout=cinder_key_stringio)
178 cinder_key = cinder_key_stringio.getvalue().strip()
179
180 uuid_stringio = StringIO()
181 devstack_node.run(args=['uuidgen'], stdout=uuid_stringio)
182 uuid = uuid_stringio.getvalue().strip()
183
184 secret_path = '/tmp/secret.xml'
185 secret_template = textwrap.dedent("""
186 <secret ephemeral='no' private='no'>
187 <uuid>{uuid}</uuid>
188 <usage type='ceph'>
189 <name>client.cinder secret</name>
190 </usage>
191 </secret>""")
192 misc.sudo_write_file(devstack_node, secret_path,
193 secret_template.format(uuid=uuid))
194 devstack_node.run(args=['sudo', 'virsh', 'secret-define', '--file',
195 secret_path])
196 devstack_node.run(args=['sudo', 'virsh', 'secret-set-value', '--secret',
197 uuid, '--base64', cinder_key])
198 return uuid
199
200
201 def update_devstack_config_files(devstack_node, secret_uuid):
202 log.info("Updating DevStack config files to use Ceph...")
203
204 def backup_config(node, file_name, backup_ext='.orig.teuth'):
205 node.run(args=['cp', '-f', file_name, file_name + backup_ext])
206
207 def update_config(config_name, config_stream, update_dict,
208 section='DEFAULT'):
209 parser = ConfigParser()
210 parser.read_file(config_stream)
211 for (key, value) in update_dict.items():
212 parser.set(section, key, value)
213 out_stream = StringIO()
214 parser.write(out_stream)
215 out_stream.seek(0)
216 return out_stream
217
218 updates = [
219 dict(name='/etc/glance/glance-api.conf', options=dict(
220 default_store='rbd',
221 rbd_store_user='glance',
222 rbd_store_pool='images',
223 show_image_direct_url='True',)),
224 dict(name='/etc/cinder/cinder.conf', options=dict(
225 volume_driver='cinder.volume.drivers.rbd.RBDDriver',
226 rbd_pool='volumes',
227 rbd_ceph_conf='/etc/ceph/ceph.conf',
228 rbd_flatten_volume_from_snapshot='false',
229 rbd_max_clone_depth='5',
230 glance_api_version='2',
231 rbd_user='cinder',
232 rbd_secret_uuid=secret_uuid,
233 backup_driver='cinder.backup.drivers.ceph',
234 backup_ceph_conf='/etc/ceph/ceph.conf',
235 backup_ceph_user='cinder-backup',
236 backup_ceph_chunk_size='134217728',
237 backup_ceph_pool='backups',
238 backup_ceph_stripe_unit='0',
239 backup_ceph_stripe_count='0',
240 restore_discard_excess_bytes='true',
241 )),
242 dict(name='/etc/nova/nova.conf', options=dict(
243 libvirt_images_type='rbd',
244 libvirt_images_rbd_pool='volumes',
245 libvirt_images_rbd_ceph_conf='/etc/ceph/ceph.conf',
246 rbd_user='cinder',
247 rbd_secret_uuid=secret_uuid,
248 libvirt_inject_password='false',
249 libvirt_inject_key='false',
250 libvirt_inject_partition='-2',
251 )),
252 ]
253
254 for update in updates:
255 file_name = update['name']
256 options = update['options']
257 config_str = misc.get_file(devstack_node, file_name, sudo=True)
258 config_stream = StringIO(config_str)
259 backup_config(devstack_node, file_name)
260 new_config_stream = update_config(file_name, config_stream, options)
261 misc.sudo_write_file(devstack_node, file_name, new_config_stream)
262
263
264 def set_apache_servername(node):
265 # Apache complains: "Could not reliably determine the server's fully
266 # qualified domain name, using 127.0.0.1 for ServerName"
267 # So, let's make sure it knows its name.
268 log.info("Setting Apache ServerName...")
269
270 hostname = node.hostname
271 config_file = '/etc/apache2/conf.d/servername'
272 misc.sudo_write_file(node, config_file,
273 "ServerName {name}".format(name=hostname))
274
275
276 def start_devstack(devstack_node):
277 log.info("Patching devstack start script...")
278 # This causes screen to start headless - otherwise rejoin-stack.sh fails
279 # because there is no terminal attached.
280 cmd = "cd devstack && sed -ie 's/screen -c/screen -dm -c/' rejoin-stack.sh"
281 devstack_node.run(args=cmd)
282
283 log.info("Starting devstack...")
284 cmd = "cd devstack && ./rejoin-stack.sh"
285 devstack_node.run(args=cmd)
286
287 # This was added because I was getting timeouts on Cinder requests - which
288 # were trying to access Keystone on port 5000. A more robust way to handle
289 # this would be to introduce a wait-loop on devstack_node that checks to
290 # see if a service is listening on port 5000.
291 log.info("Waiting 30s for devstack to start...")
292 time.sleep(30)
293
294
295 def restart_apache(node):
296 node.run(args=['sudo', '/etc/init.d/apache2', 'restart'], wait=True)
297
298
299 @contextlib.contextmanager
300 def exercise(ctx, config):
301 log.info("Running devstack exercises...")
302
303 if config is None:
304 config = {}
305 if not isinstance(config, dict):
306 raise TypeError("config must be a dict")
307
308 devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0]
309
310 # TODO: save the log *and* preserve failures
311 #devstack_archive_dir = create_devstack_archive(ctx, devstack_node)
312
313 try:
314 #cmd = "cd devstack && ./exercise.sh 2>&1 | tee {dir}/exercise.log".format( # noqa
315 # dir=devstack_archive_dir)
316 cmd = "cd devstack && ./exercise.sh"
317 devstack_node.run(args=cmd, wait=True)
318 yield
319 finally:
320 pass
321
322
323 def create_devstack_archive(ctx, devstack_node):
324 test_dir = misc.get_testdir(ctx)
325 devstack_archive_dir = "{test_dir}/archive/devstack".format(
326 test_dir=test_dir)
327 devstack_node.run(args="mkdir -p " + devstack_archive_dir)
328 return devstack_archive_dir
329
330
331 @contextlib.contextmanager
332 def smoke(ctx, config):
333 log.info("Running a basic smoketest...")
334
335 devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0]
336 an_osd_node = ctx.cluster.only(is_osd_node).remotes.keys()[0]
337
338 try:
339 create_volume(devstack_node, an_osd_node, 'smoke0', 1)
340 yield
341 finally:
342 pass
343
344
345 def create_volume(devstack_node, ceph_node, vol_name, size):
346 """
347 :param size: The size of the volume, in GB
348 """
349 size = str(size)
350 log.info("Creating a {size}GB volume named {name}...".format(
351 name=vol_name,
352 size=size))
353 args = ['source', 'devstack/openrc', run.Raw('&&'), 'cinder', 'create',
354 '--display-name', vol_name, size]
355 out_stream = StringIO()
356 devstack_node.run(args=args, stdout=out_stream, wait=True)
357 vol_info = parse_os_table(out_stream.getvalue())
358 log.debug("Volume info: %s", str(vol_info))
359
360 out_stream = StringIO()
361 try:
362 ceph_node.run(args="rbd --id cinder ls -l volumes", stdout=out_stream,
363 wait=True)
364 except run.CommandFailedError:
365 log.debug("Original rbd call failed; retrying without '--id cinder'")
366 ceph_node.run(args="rbd ls -l volumes", stdout=out_stream,
367 wait=True)
368
369 assert vol_info['id'] in out_stream.getvalue(), \
370 "Volume not found on Ceph cluster"
371 assert vol_info['size'] == size, \
372 "Volume size on Ceph cluster is different than specified"
373 return vol_info['id']
374
375
376 def parse_os_table(table_str):
377 out_dict = dict()
378 for line in table_str.split('\n'):
379 if line.startswith('|'):
380 items = line.split()
381 out_dict[items[1]] = items[3]
382 return out_dict