]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | """ |
2 | Deploy and configure Keystone for Teuthology | |
3 | """ | |
4 | import argparse | |
5 | import contextlib | |
6 | import logging | |
7 | ||
8 | from teuthology import misc as teuthology | |
9 | from teuthology import contextutil | |
10 | from teuthology.orchestra import run | |
11 | from teuthology.orchestra.connection import split_user | |
12 | from teuthology.packaging import install_package | |
13 | from teuthology.packaging import remove_package | |
14 | ||
15 | log = logging.getLogger(__name__) | |
16 | ||
17 | ||
18 | @contextlib.contextmanager | |
19 | def install_packages(ctx, config): | |
20 | """ | |
21 | Download the packaged dependencies of Keystone. | |
22 | Remove install packages upon exit. | |
23 | ||
24 | The context passed in should be identical to the context | |
25 | passed in to the main task. | |
26 | """ | |
27 | assert isinstance(config, dict) | |
28 | log.info('Installing packages for Keystone...') | |
29 | ||
30 | deps = { | |
31 | 'deb': [ 'libffi-dev', 'libssl-dev', 'libldap2-dev', 'libsasl2-dev' ], | |
32 | 'rpm': [ 'libffi-devel', 'openssl-devel' ], | |
33 | } | |
34 | for (client, _) in config.items(): | |
35 | (remote,) = ctx.cluster.only(client).remotes.iterkeys() | |
36 | for dep in deps[remote.os.package_type]: | |
37 | install_package(dep, remote) | |
38 | try: | |
39 | yield | |
40 | finally: | |
41 | log.info('Removing packaged dependencies of Keystone...') | |
42 | ||
43 | for (client, _) in config.items(): | |
44 | (remote,) = ctx.cluster.only(client).remotes.iterkeys() | |
45 | for dep in deps[remote.os.package_type]: | |
46 | remove_package(dep, remote) | |
47 | ||
48 | def get_keystone_dir(ctx): | |
49 | return '{tdir}/keystone'.format(tdir=teuthology.get_testdir(ctx)) | |
50 | ||
51 | def run_in_keystone_dir(ctx, client, args): | |
52 | ctx.cluster.only(client).run( | |
53 | args=[ 'cd', get_keystone_dir(ctx), run.Raw('&&'), ] + args, | |
54 | ) | |
55 | ||
56 | def run_in_keystone_venv(ctx, client, args): | |
57 | run_in_keystone_dir(ctx, client, | |
58 | [ 'source', | |
59 | '.tox/venv/bin/activate', | |
60 | run.Raw('&&') | |
61 | ] + args) | |
62 | ||
63 | def get_keystone_venved_cmd(ctx, cmd, args): | |
64 | kbindir = get_keystone_dir(ctx) + '/.tox/venv/bin/' | |
65 | return [ kbindir + 'python', kbindir + cmd ] + args | |
66 | ||
67 | def get_toxvenv_dir(ctx): | |
68 | return ctx.tox.venv_path | |
69 | ||
70 | @contextlib.contextmanager | |
71 | def download(ctx, config): | |
72 | """ | |
73 | Download the Keystone from github. | |
74 | Remove downloaded file upon exit. | |
75 | ||
76 | The context passed in should be identical to the context | |
77 | passed in to the main task. | |
78 | """ | |
79 | assert isinstance(config, dict) | |
80 | log.info('Downloading keystone...') | |
81 | keystonedir = get_keystone_dir(ctx) | |
82 | ||
83 | for (client, cconf) in config.items(): | |
84 | ctx.cluster.only(client).run( | |
85 | args=[ | |
86 | 'git', 'clone', | |
87 | '-b', cconf.get('force-branch', 'master'), | |
88 | 'https://github.com/openstack/keystone.git', | |
89 | keystonedir, | |
90 | ], | |
91 | ) | |
92 | ||
93 | sha1 = cconf.get('sha1') | |
94 | if sha1 is not None: | |
95 | run_in_keystone_dir(ctx, client, [ | |
96 | 'git', 'reset', '--hard', sha1, | |
97 | ], | |
98 | ) | |
99 | ||
100 | # hax for http://tracker.ceph.com/issues/23659 | |
101 | run_in_keystone_dir(ctx, client, [ | |
102 | 'sed', '-i', | |
103 | 's/pysaml2<4.0.3,>=2.4.0/pysaml2>=4.5.0/', | |
104 | 'requirements.txt' | |
105 | ], | |
106 | ) | |
107 | try: | |
108 | yield | |
109 | finally: | |
110 | log.info('Removing keystone...') | |
111 | for client in config: | |
112 | ctx.cluster.only(client).run( | |
113 | args=[ 'rm', '-rf', keystonedir ], | |
114 | ) | |
115 | ||
116 | @contextlib.contextmanager | |
117 | def setup_venv(ctx, config): | |
118 | """ | |
119 | Setup the virtualenv for Keystone using tox. | |
120 | """ | |
121 | assert isinstance(config, dict) | |
122 | log.info('Setting up virtualenv for keystone...') | |
123 | for (client, _) in config.items(): | |
124 | run_in_keystone_dir(ctx, client, | |
125 | [ 'source', | |
126 | '{tvdir}/bin/activate'.format(tvdir=get_toxvenv_dir(ctx)), | |
127 | run.Raw('&&'), | |
128 | 'tox', '-e', 'venv', '--notest' | |
129 | ]) | |
130 | ||
131 | run_in_keystone_venv(ctx, client, | |
132 | [ 'pip', 'install', 'python-openstackclient' ]) | |
133 | try: | |
134 | yield | |
135 | finally: | |
136 | pass | |
137 | ||
138 | @contextlib.contextmanager | |
139 | def configure_instance(ctx, config): | |
140 | assert isinstance(config, dict) | |
141 | log.info('Configuring keystone...') | |
142 | ||
143 | keyrepo_dir = '{kdir}/etc/fernet-keys'.format(kdir=get_keystone_dir(ctx)) | |
144 | for (client, _) in config.items(): | |
145 | # prepare the config file | |
146 | run_in_keystone_dir(ctx, client, | |
147 | [ | |
148 | 'cp', '-f', | |
149 | 'etc/keystone.conf.sample', | |
150 | 'etc/keystone.conf' | |
151 | ]) | |
152 | run_in_keystone_dir(ctx, client, | |
153 | [ | |
154 | 'sed', | |
155 | '-e', 's/#admin_token =.*/admin_token = ADMIN/', | |
156 | '-i', 'etc/keystone.conf' | |
157 | ]) | |
158 | run_in_keystone_dir(ctx, client, | |
159 | [ | |
160 | 'sed', | |
161 | '-e', 's^#key_repository =.*^key_repository = {kr}^'.format(kr = keyrepo_dir), | |
162 | '-i', 'etc/keystone.conf' | |
163 | ]) | |
164 | ||
165 | # prepare key repository for Fetnet token authenticator | |
166 | run_in_keystone_dir(ctx, client, [ 'mkdir', '-p', keyrepo_dir ]) | |
167 | run_in_keystone_venv(ctx, client, [ 'keystone-manage', 'fernet_setup' ]) | |
168 | ||
169 | # sync database | |
170 | run_in_keystone_venv(ctx, client, [ 'keystone-manage', 'db_sync' ]) | |
171 | yield | |
172 | ||
173 | @contextlib.contextmanager | |
174 | def run_keystone(ctx, config): | |
175 | assert isinstance(config, dict) | |
176 | log.info('Configuring keystone...') | |
177 | ||
178 | for (client, _) in config.items(): | |
179 | (remote,) = ctx.cluster.only(client).remotes.iterkeys() | |
180 | cluster_name, _, client_id = teuthology.split_role(client) | |
181 | ||
182 | # start the public endpoint | |
183 | client_public_with_id = 'keystone.public' + '.' + client_id | |
184 | client_public_with_cluster = cluster_name + '.' + client_public_with_id | |
185 | ||
186 | public_host, public_port = ctx.keystone.public_endpoints[client] | |
187 | run_cmd = get_keystone_venved_cmd(ctx, 'keystone-wsgi-public', | |
188 | [ '--host', public_host, '--port', str(public_port), | |
189 | # Let's put the Keystone in background, wait for EOF | |
190 | # and after receiving it, send SIGTERM to the daemon. | |
191 | # This crazy hack is because Keystone, in contrast to | |
192 | # our other daemons, doesn't quit on stdin.close(). | |
193 | # Teuthology relies on this behaviour. | |
194 | run.Raw('& { read; kill %1; }') | |
195 | ] | |
196 | ) | |
197 | ctx.daemons.add_daemon( | |
198 | remote, 'keystone', client_public_with_id, | |
199 | cluster=cluster_name, | |
200 | args=run_cmd, | |
201 | logger=log.getChild(client), | |
202 | stdin=run.PIPE, | |
203 | cwd=get_keystone_dir(ctx), | |
204 | wait=False, | |
205 | check_status=False, | |
206 | ) | |
207 | ||
208 | # start the admin endpoint | |
209 | client_admin_with_id = 'keystone.admin' + '.' + client_id | |
210 | ||
211 | admin_host, admin_port = ctx.keystone.admin_endpoints[client] | |
212 | run_cmd = get_keystone_venved_cmd(ctx, 'keystone-wsgi-admin', | |
213 | [ '--host', admin_host, '--port', str(admin_port), | |
214 | run.Raw('& { read; kill %1; }') | |
215 | ] | |
216 | ) | |
217 | ctx.daemons.add_daemon( | |
218 | remote, 'keystone', client_admin_with_id, | |
219 | cluster=cluster_name, | |
220 | args=run_cmd, | |
221 | logger=log.getChild(client), | |
222 | stdin=run.PIPE, | |
223 | cwd=get_keystone_dir(ctx), | |
224 | wait=False, | |
225 | check_status=False, | |
226 | ) | |
227 | ||
228 | # sleep driven synchronization | |
229 | run_in_keystone_venv(ctx, client, [ 'sleep', '15' ]) | |
230 | try: | |
231 | yield | |
232 | finally: | |
233 | log.info('Stopping Keystone admin instance') | |
234 | ctx.daemons.get_daemon('keystone', client_admin_with_id, | |
235 | cluster_name).stop() | |
236 | ||
237 | log.info('Stopping Keystone public instance') | |
238 | ctx.daemons.get_daemon('keystone', client_public_with_id, | |
239 | cluster_name).stop() | |
240 | ||
241 | ||
242 | def dict_to_args(special, items): | |
243 | """ | |
244 | Transform | |
245 | [(key1, val1), (special, val_special), (key3, val3) ] | |
246 | into: | |
247 | [ '--key1', 'val1', '--key3', 'val3', 'val_special' ] | |
248 | """ | |
249 | args=[] | |
250 | for (k, v) in items: | |
251 | if k == special: | |
252 | special_val = v | |
253 | else: | |
254 | args.append('--{k}'.format(k=k)) | |
255 | args.append(v) | |
256 | if special_val: | |
257 | args.append(special_val) | |
258 | return args | |
259 | ||
260 | def run_section_cmds(ctx, cclient, section_cmd, special, | |
261 | section_config_list): | |
262 | admin_host, admin_port = ctx.keystone.admin_endpoints[cclient] | |
263 | ||
264 | auth_section = [ | |
265 | ( 'os-token', 'ADMIN' ), | |
266 | ( 'os-url', 'http://{host}:{port}/v2.0'.format(host=admin_host, | |
267 | port=admin_port) ), | |
268 | ] | |
269 | ||
270 | for section_item in section_config_list: | |
271 | run_in_keystone_venv(ctx, cclient, | |
272 | [ 'openstack' ] + section_cmd.split() + | |
273 | dict_to_args(special, auth_section + section_item.items())) | |
274 | ||
275 | def create_endpoint(ctx, cclient, service, url): | |
276 | endpoint_section = { | |
277 | 'service': service, | |
278 | 'publicurl': url, | |
279 | } | |
280 | return run_section_cmds(ctx, cclient, 'endpoint create', 'service', | |
281 | [ endpoint_section ]) | |
282 | ||
283 | @contextlib.contextmanager | |
284 | def fill_keystone(ctx, config): | |
285 | assert isinstance(config, dict) | |
286 | ||
287 | for (cclient, cconfig) in config.items(): | |
288 | # configure tenants/projects | |
289 | run_section_cmds(ctx, cclient, 'project create', 'name', | |
290 | cconfig['tenants']) | |
291 | run_section_cmds(ctx, cclient, 'user create', 'name', | |
292 | cconfig['users']) | |
293 | run_section_cmds(ctx, cclient, 'role create', 'name', | |
294 | cconfig['roles']) | |
295 | run_section_cmds(ctx, cclient, 'role add', 'name', | |
296 | cconfig['role-mappings']) | |
297 | run_section_cmds(ctx, cclient, 'service create', 'name', | |
298 | cconfig['services']) | |
299 | ||
300 | public_host, public_port = ctx.keystone.public_endpoints[cclient] | |
301 | url = 'http://{host}:{port}/v2.0'.format(host=public_host, | |
302 | port=public_port) | |
303 | create_endpoint(ctx, cclient, 'keystone', url) | |
304 | # for the deferred endpoint creation; currently it's used in rgw.py | |
305 | ctx.keystone.create_endpoint = create_endpoint | |
306 | ||
307 | # sleep driven synchronization -- just in case | |
308 | run_in_keystone_venv(ctx, cclient, [ 'sleep', '3' ]) | |
309 | try: | |
310 | yield | |
311 | finally: | |
312 | pass | |
313 | ||
314 | def assign_ports(ctx, config, initial_port): | |
315 | """ | |
316 | Assign port numbers starting from @initial_port | |
317 | """ | |
318 | port = initial_port | |
319 | role_endpoints = {} | |
320 | for remote, roles_for_host in ctx.cluster.remotes.iteritems(): | |
321 | for role in roles_for_host: | |
322 | if role in config: | |
323 | role_endpoints[role] = (remote.name.split('@')[1], port) | |
324 | port += 1 | |
325 | ||
326 | return role_endpoints | |
327 | ||
328 | @contextlib.contextmanager | |
329 | def task(ctx, config): | |
330 | """ | |
331 | Deploy and configure Keystone | |
332 | ||
333 | Example of configuration: | |
334 | ||
335 | - install: | |
336 | - ceph: | |
337 | - tox: [ client.0 ] | |
338 | - keystone: | |
339 | client.0: | |
340 | force-branch: master | |
341 | tenants: | |
342 | - name: admin | |
343 | description: Admin Tenant | |
344 | users: | |
345 | - name: admin | |
346 | password: ADMIN | |
347 | project: admin | |
348 | roles: [ name: admin, name: Member ] | |
349 | role-mappings: | |
350 | - name: admin | |
351 | user: admin | |
352 | project: admin | |
353 | services: | |
354 | - name: keystone | |
355 | type: identity | |
356 | description: Keystone Identity Service | |
357 | - name: swift | |
358 | type: object-store | |
359 | description: Swift Service | |
360 | """ | |
361 | assert config is None or isinstance(config, list) \ | |
362 | or isinstance(config, dict), \ | |
363 | "task keystone only supports a list or dictionary for configuration" | |
364 | ||
365 | if not ctx.tox: | |
366 | raise ConfigError('keystone must run after the tox task') | |
367 | ||
368 | all_clients = ['client.{id}'.format(id=id_) | |
369 | for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] | |
370 | if config is None: | |
371 | config = all_clients | |
372 | if isinstance(config, list): | |
373 | config = dict.fromkeys(config) | |
374 | ||
375 | log.debug('Keystone config is %s', config) | |
376 | ||
377 | ctx.keystone = argparse.Namespace() | |
378 | ctx.keystone.public_endpoints = assign_ports(ctx, config, 5000) | |
379 | ctx.keystone.admin_endpoints = assign_ports(ctx, config, 35357) | |
380 | ||
381 | with contextutil.nested( | |
382 | lambda: install_packages(ctx=ctx, config=config), | |
383 | lambda: download(ctx=ctx, config=config), | |
384 | lambda: setup_venv(ctx=ctx, config=config), | |
385 | lambda: configure_instance(ctx=ctx, config=config), | |
386 | lambda: run_keystone(ctx=ctx, config=config), | |
387 | lambda: fill_keystone(ctx=ctx, config=config), | |
388 | ): | |
389 | yield |