6 from configparser
import ConfigParser
11 from teuthology
.orchestra
import run
12 from teuthology
import misc
13 from teuthology
.contextutil
import nested
15 log
= logging
.getLogger(__name__
)
17 DEVSTACK_GIT_REPO
= 'https://github.com/openstack-dev/devstack.git'
18 DS_STABLE_BRANCHES
= ("havana", "grizzly")
20 is_devstack_node
= lambda role
: role
.startswith('devstack')
21 is_osd_node
= lambda role
: role
.startswith('osd')
24 @contextlib.contextmanager
25 def task(ctx
, config
):
28 if not isinstance(config
, dict):
29 raise TypeError("config must be a dict")
30 with
nested(lambda: install(ctx
=ctx
, config
=config
),
31 lambda: smoke(ctx
=ctx
, config
=config
),
36 @contextlib.contextmanager
37 def install(ctx
, config
):
39 Install OpenStack DevStack and configure it to use a Ceph cluster for
42 Requires one node with a role 'devstack'
44 Since devstack runs rampant on the system it's used on, typically you will
45 want to reprovision that machine after using devstack on it.
47 Also, the default 2GB of RAM that is given to vps nodes is insufficient. I
48 recommend 4GB. Downburst can be instructed to give 4GB to a vps node by
49 adding this to the yaml:
54 This was created using documentation found here:
55 https://github.com/openstack-dev/devstack/blob/master/README.md
56 http://docs.ceph.com/docs/master/rbd/rbd-openstack/
60 if not isinstance(config
, dict):
61 raise TypeError("config must be a dict")
63 devstack_node
= next(iter(ctx
.cluster
.only(is_devstack_node
).remotes
.keys()))
64 an_osd_node
= next(iter(ctx
.cluster
.only(is_osd_node
).remotes
.keys()))
66 devstack_branch
= config
.get("branch", "master")
67 install_devstack(devstack_node
, devstack_branch
)
69 configure_devstack_and_ceph(ctx
, config
, devstack_node
, an_osd_node
)
75 def install_devstack(devstack_node
, branch
="master"):
76 log
.info("Cloning DevStack repo...")
78 args
= ['git', 'clone', DEVSTACK_GIT_REPO
]
79 devstack_node
.run(args
=args
)
81 if branch
!= "master":
82 if branch
in DS_STABLE_BRANCHES
and not branch
.startswith("stable"):
83 branch
= "stable/" + branch
84 log
.info("Checking out {branch} branch...".format(branch
=branch
))
85 cmd
= "cd devstack && git checkout " + branch
86 devstack_node
.run(args
=cmd
)
88 log
.info("Installing DevStack...")
89 args
= ['cd', 'devstack', run
.Raw('&&'), './stack.sh']
90 devstack_node
.run(args
=args
)
93 def configure_devstack_and_ceph(ctx
, config
, devstack_node
, ceph_node
):
94 pool_size
= config
.get('pool_size', '128')
95 create_pools(ceph_node
, pool_size
)
96 distribute_ceph_conf(devstack_node
, ceph_node
)
97 # This is where we would install python-ceph and ceph-common but it appears
98 # the ceph task does that for us.
99 generate_ceph_keys(ceph_node
)
100 distribute_ceph_keys(devstack_node
, ceph_node
)
101 secret_uuid
= set_libvirt_secret(devstack_node
, ceph_node
)
102 update_devstack_config_files(devstack_node
, secret_uuid
)
103 set_apache_servername(devstack_node
)
104 # Rebooting is the most-often-used method of restarting devstack services
105 misc
.reboot(devstack_node
)
106 start_devstack(devstack_node
)
107 restart_apache(devstack_node
)
110 def create_pools(ceph_node
, pool_size
):
111 log
.info("Creating pools on Ceph cluster...")
113 for pool_name
in ['volumes', 'images', 'backups']:
114 args
= ['sudo', 'ceph', 'osd', 'pool', 'create', pool_name
, pool_size
]
115 ceph_node
.run(args
=args
)
118 def distribute_ceph_conf(devstack_node
, ceph_node
):
119 log
.info("Copying ceph.conf to DevStack node...")
121 ceph_conf_path
= '/etc/ceph/ceph.conf'
122 ceph_conf
= misc
.get_file(ceph_node
, ceph_conf_path
, sudo
=True)
123 misc
.sudo_write_file(devstack_node
, ceph_conf_path
, ceph_conf
)
126 def generate_ceph_keys(ceph_node
):
127 log
.info("Generating Ceph keys...")
130 ['sudo', 'ceph', 'auth', 'get-or-create', 'client.cinder', 'mon',
131 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rx pool=images'], # noqa
132 ['sudo', 'ceph', 'auth', 'get-or-create', 'client.glance', 'mon',
133 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=images'], # noqa
134 ['sudo', 'ceph', 'auth', 'get-or-create', 'client.cinder-backup', 'mon',
135 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=backups'], # noqa
137 for cmd
in ceph_auth_cmds
:
138 ceph_node
.run(args
=cmd
)
141 def distribute_ceph_keys(devstack_node
, ceph_node
):
142 log
.info("Copying Ceph keys to DevStack node...")
144 def copy_key(from_remote
, key_name
, to_remote
, dest_path
, owner
):
145 key_stringio
= BytesIO()
147 args
=['sudo', 'ceph', 'auth', 'get-or-create', key_name
],
150 misc
.sudo_write_file(to_remote
, dest_path
,
151 key_stringio
, owner
=owner
)
153 dict(name
='client.glance',
154 path
='/etc/ceph/ceph.client.glance.keyring',
155 # devstack appears to just want root:root
156 #owner='glance:glance',
158 dict(name
='client.cinder',
159 path
='/etc/ceph/ceph.client.cinder.keyring',
160 # devstack appears to just want root:root
161 #owner='cinder:cinder',
163 dict(name
='client.cinder-backup',
164 path
='/etc/ceph/ceph.client.cinder-backup.keyring',
165 # devstack appears to just want root:root
166 #owner='cinder:cinder',
169 for key_dict
in keys
:
170 copy_key(ceph_node
, key_dict
['name'], devstack_node
,
171 key_dict
['path'], key_dict
.get('owner'))
174 def set_libvirt_secret(devstack_node
, ceph_node
):
175 log
.info("Setting libvirt secret...")
177 cinder_key
= ceph_node
.sh('sudo ceph auth get-key client.cinder').strip()
178 uuid
= devstack_node
.sh('uuidgen').strip()
180 secret_path
= '/tmp/secret.xml'
181 secret_template
= textwrap
.dedent("""
182 <secret ephemeral='no' private='no'>
185 <name>client.cinder secret</name>
188 misc
.sudo_write_file(devstack_node
, secret_path
,
189 secret_template
.format(uuid
=uuid
))
190 devstack_node
.run(args
=['sudo', 'virsh', 'secret-define', '--file',
192 devstack_node
.run(args
=['sudo', 'virsh', 'secret-set-value', '--secret',
193 uuid
, '--base64', cinder_key
])
197 def update_devstack_config_files(devstack_node
, secret_uuid
):
198 log
.info("Updating DevStack config files to use Ceph...")
200 def backup_config(node
, file_name
, backup_ext
='.orig.teuth'):
201 node
.run(args
=['cp', '-f', file_name
, file_name
+ backup_ext
])
203 def update_config(config_name
, config_stream
, update_dict
,
205 parser
= ConfigParser()
206 parser
.read_file(config_stream
)
207 for (key
, value
) in update_dict
.items():
208 parser
.set(section
, key
, value
)
209 out_stream
= six
.StringIO()
210 parser
.write(out_stream
)
215 dict(name
='/etc/glance/glance-api.conf', options
=dict(
217 rbd_store_user
='glance',
218 rbd_store_pool
='images',
219 show_image_direct_url
='True',)),
220 dict(name
='/etc/cinder/cinder.conf', options
=dict(
221 volume_driver
='cinder.volume.drivers.rbd.RBDDriver',
223 rbd_ceph_conf
='/etc/ceph/ceph.conf',
224 rbd_flatten_volume_from_snapshot
='false',
225 rbd_max_clone_depth
='5',
226 glance_api_version
='2',
228 rbd_secret_uuid
=secret_uuid
,
229 backup_driver
='cinder.backup.drivers.ceph',
230 backup_ceph_conf
='/etc/ceph/ceph.conf',
231 backup_ceph_user
='cinder-backup',
232 backup_ceph_chunk_size
='134217728',
233 backup_ceph_pool
='backups',
234 backup_ceph_stripe_unit
='0',
235 backup_ceph_stripe_count
='0',
236 restore_discard_excess_bytes
='true',
238 dict(name
='/etc/nova/nova.conf', options
=dict(
239 libvirt_images_type
='rbd',
240 libvirt_images_rbd_pool
='volumes',
241 libvirt_images_rbd_ceph_conf
='/etc/ceph/ceph.conf',
243 rbd_secret_uuid
=secret_uuid
,
244 libvirt_inject_password
='false',
245 libvirt_inject_key
='false',
246 libvirt_inject_partition
='-2',
250 for update
in updates
:
251 file_name
= update
['name']
252 options
= update
['options']
253 config_data
= misc
.get_file(devstack_node
, file_name
, sudo
=True)
254 config_stream
= six
.StringIO(config_data
)
255 backup_config(devstack_node
, file_name
)
256 new_config_stream
= update_config(file_name
, config_stream
, options
)
257 misc
.sudo_write_file(devstack_node
, file_name
, new_config_stream
)
260 def set_apache_servername(node
):
261 # Apache complains: "Could not reliably determine the server's fully
262 # qualified domain name, using 127.0.0.1 for ServerName"
263 # So, let's make sure it knows its name.
264 log
.info("Setting Apache ServerName...")
266 hostname
= node
.hostname
267 config_file
= '/etc/apache2/conf.d/servername'
268 misc
.sudo_write_file(node
, config_file
,
269 "ServerName {name}".format(name
=hostname
))
272 def start_devstack(devstack_node
):
273 log
.info("Patching devstack start script...")
274 # This causes screen to start headless - otherwise rejoin-stack.sh fails
275 # because there is no terminal attached.
276 cmd
= "cd devstack && sed -ie 's/screen -c/screen -dm -c/' rejoin-stack.sh"
277 devstack_node
.run(args
=cmd
)
279 log
.info("Starting devstack...")
280 cmd
= "cd devstack && ./rejoin-stack.sh"
281 devstack_node
.run(args
=cmd
)
283 # This was added because I was getting timeouts on Cinder requests - which
284 # were trying to access Keystone on port 5000. A more robust way to handle
285 # this would be to introduce a wait-loop on devstack_node that checks to
286 # see if a service is listening on port 5000.
287 log
.info("Waiting 30s for devstack to start...")
291 def restart_apache(node
):
292 node
.run(args
=['sudo', '/etc/init.d/apache2', 'restart'], wait
=True)
295 @contextlib.contextmanager
296 def exercise(ctx
, config
):
297 log
.info("Running devstack exercises...")
301 if not isinstance(config
, dict):
302 raise TypeError("config must be a dict")
304 devstack_node
= next(iter(ctx
.cluster
.only(is_devstack_node
).remotes
.keys()))
306 # TODO: save the log *and* preserve failures
307 #devstack_archive_dir = create_devstack_archive(ctx, devstack_node)
310 #cmd = "cd devstack && ./exercise.sh 2>&1 | tee {dir}/exercise.log".format( # noqa
311 # dir=devstack_archive_dir)
312 cmd
= "cd devstack && ./exercise.sh"
313 devstack_node
.run(args
=cmd
, wait
=True)
319 def create_devstack_archive(ctx
, devstack_node
):
320 test_dir
= misc
.get_testdir(ctx
)
321 devstack_archive_dir
= "{test_dir}/archive/devstack".format(
323 devstack_node
.run(args
="mkdir -p " + devstack_archive_dir
)
324 return devstack_archive_dir
327 @contextlib.contextmanager
328 def smoke(ctx
, config
):
329 log
.info("Running a basic smoketest...")
331 devstack_node
= next(iter(ctx
.cluster
.only(is_devstack_node
).remotes
.keys()))
332 an_osd_node
= next(iter(ctx
.cluster
.only(is_osd_node
).remotes
.keys()))
335 create_volume(devstack_node
, an_osd_node
, 'smoke0', 1)
341 def create_volume(devstack_node
, ceph_node
, vol_name
, size
):
343 :param size: The size of the volume, in GB
346 log
.info("Creating a {size}GB volume named {name}...".format(
349 args
= ['source', 'devstack/openrc', run
.Raw('&&'), 'cinder', 'create',
350 '--display-name', vol_name
, size
]
351 cinder_create
= devstack_node
.sh(args
, wait
=True)
352 vol_info
= parse_os_table(cinder_create
)
353 log
.debug("Volume info: %s", str(vol_info
))
356 rbd_output
= ceph_node
.sh("rbd --id cinder ls -l volumes", wait
=True)
357 except run
.CommandFailedError
:
358 log
.debug("Original rbd call failed; retrying without '--id cinder'")
359 rbd_output
= ceph_node
.sh("rbd ls -l volumes", wait
=True)
361 assert vol_info
['id'] in rbd_output
, \
362 "Volume not found on Ceph cluster"
363 assert vol_info
['size'] == size
, \
364 "Volume size on Ceph cluster is different than specified"
365 return vol_info
['id']
368 def parse_os_table(table_str
):
370 for line
in table_str
.split('\n'):
371 if line
.startswith('|'):
373 out_dict
[items
[1]] = items
[3]