]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | """ |
2 | rgw routines | |
3 | """ | |
4 | import argparse | |
5 | import contextlib | |
6 | import json | |
7 | import logging | |
8 | import os | |
9 | import errno | |
10 | import util.rgw as rgw_utils | |
11 | ||
12 | from requests.packages.urllib3 import PoolManager | |
13 | from requests.packages.urllib3.util import Retry | |
14 | ||
15 | from cStringIO import StringIO | |
16 | ||
17 | from teuthology.orchestra import run | |
18 | from teuthology import misc as teuthology | |
19 | from teuthology import contextutil | |
20 | from teuthology.orchestra.run import CommandFailedError | |
21 | from util.rgw import rgwadmin, get_config_master_client, extract_zone_info, extract_region_info | |
22 | from util.rados import (rados, create_ec_pool, | |
23 | create_replicated_pool, | |
24 | create_cache_pool) | |
25 | ||
26 | log = logging.getLogger(__name__) | |
27 | ||
28 | @contextlib.contextmanager | |
29 | def create_apache_dirs(ctx, config, on_client = None, except_client = None): | |
30 | """ | |
31 | Remotely create apache directories. Delete when finished. | |
32 | """ | |
33 | log.info('Creating apache directories...') | |
34 | log.debug('client is %r', on_client) | |
35 | testdir = teuthology.get_testdir(ctx) | |
36 | clients_to_create_as = [on_client] | |
37 | if on_client is None: | |
38 | clients_to_create_as = config.keys() | |
39 | for client in clients_to_create_as: | |
40 | if client == except_client: | |
41 | continue | |
42 | cluster_name, daemon_type, client_id = teuthology.split_role(client) | |
43 | client_with_cluster = cluster_name + '.' + daemon_type + '.' + client_id | |
44 | ctx.cluster.only(client).run( | |
45 | args=[ | |
46 | 'mkdir', | |
47 | '-p', | |
48 | '{tdir}/apache/htdocs.{client_with_cluster}'.format(tdir=testdir, | |
49 | client_with_cluster=client_with_cluster), | |
50 | '{tdir}/apache/tmp.{client_with_cluster}/fastcgi_sock'.format( | |
51 | tdir=testdir, | |
52 | client_with_cluster=client_with_cluster), | |
53 | run.Raw('&&'), | |
54 | 'mkdir', | |
55 | '{tdir}/archive/apache.{client_with_cluster}'.format(tdir=testdir, | |
56 | client_with_cluster=client_with_cluster), | |
57 | ], | |
58 | ) | |
59 | try: | |
60 | yield | |
61 | finally: | |
62 | log.info('Cleaning up apache directories...') | |
63 | for client in clients_to_create_as: | |
64 | ctx.cluster.only(client).run( | |
65 | args=[ | |
66 | 'rm', | |
67 | '-rf', | |
68 | '{tdir}/apache/tmp.{client_with_cluster}'.format(tdir=testdir, | |
69 | client_with_cluster=client_with_cluster), | |
70 | run.Raw('&&'), | |
71 | 'rmdir', | |
72 | '{tdir}/apache/htdocs.{client_with_cluster}'.format(tdir=testdir, | |
73 | client_with_cluster=client_with_cluster), | |
74 | ], | |
75 | ) | |
76 | for client in clients_to_create_as: | |
77 | ctx.cluster.only(client).run( | |
78 | args=[ | |
79 | 'rmdir', | |
80 | '{tdir}/apache'.format(tdir=testdir), | |
81 | ], | |
82 | check_status=False, # only need to remove once per host | |
83 | ) | |
84 | ||
85 | ||
86 | def _use_uds_with_fcgi(remote): | |
87 | """ | |
88 | Returns true if this node supports the usage of | |
89 | unix domain sockets with mod_proxy_fcgi. | |
90 | ||
91 | FIXME: returns False always for now until we know for | |
92 | sure what distros will support UDS. RHEL 7.0 is the only one | |
93 | currently I know of, but we can't install that version of apache | |
94 | yet in the labs. | |
95 | """ | |
96 | return False | |
97 | ||
98 | ||
99 | @contextlib.contextmanager | |
100 | def ship_apache_configs(ctx, config, role_endpoints, on_client = None, | |
101 | except_client = None): | |
102 | """ | |
103 | Ship apache config and rgw.fgci to all clients. Clean up on termination | |
104 | """ | |
105 | assert isinstance(config, dict) | |
106 | assert isinstance(role_endpoints, dict) | |
107 | testdir = teuthology.get_testdir(ctx) | |
108 | log.info('Shipping apache config and rgw.fcgi...') | |
109 | src = os.path.join(os.path.dirname(__file__), 'apache.conf.template') | |
110 | clients_to_create_as = [on_client] | |
111 | if on_client is None: | |
112 | clients_to_create_as = config.keys() | |
113 | for client in clients_to_create_as: | |
114 | if client == except_client: | |
115 | continue | |
116 | cluster_name, daemon_type, client_id = teuthology.split_role(client) | |
117 | client_with_id = daemon_type + '.' + client_id | |
118 | client_with_cluster = cluster_name + '.' + client_with_id | |
119 | (remote,) = ctx.cluster.only(client).remotes.keys() | |
120 | system_type = teuthology.get_system_type(remote) | |
121 | conf = config.get(client) | |
122 | if not conf: | |
123 | conf = {} | |
124 | idle_timeout = conf.get('idle_timeout', ctx.rgw.default_idle_timeout) | |
125 | if system_type == 'deb': | |
126 | mod_path = '/usr/lib/apache2/modules' | |
127 | print_continue = 'on' | |
128 | user = 'www-data' | |
129 | group = 'www-data' | |
130 | apache24_modconfig = ''' | |
131 | IncludeOptional /etc/apache2/mods-available/mpm_event.conf | |
132 | IncludeOptional /etc/apache2/mods-available/mpm_event.load | |
133 | ''' | |
134 | else: | |
135 | mod_path = '/usr/lib64/httpd/modules' | |
136 | print_continue = 'off' | |
137 | user = 'apache' | |
138 | group = 'apache' | |
139 | apache24_modconfig = \ | |
140 | 'IncludeOptional /etc/httpd/conf.modules.d/00-mpm.conf' | |
141 | host, port = role_endpoints[client] | |
142 | ||
143 | # decide if we want to use mod_fastcgi or mod_proxy_fcgi | |
144 | template_dir = os.path.dirname(__file__) | |
145 | fcgi_config = os.path.join(template_dir, | |
146 | 'mod_proxy_fcgi.tcp.conf.template') | |
147 | if ctx.rgw.use_fastcgi: | |
148 | log.info("Apache is configured to use mod_fastcgi") | |
149 | fcgi_config = os.path.join(template_dir, | |
150 | 'mod_fastcgi.conf.template') | |
151 | elif _use_uds_with_fcgi(remote): | |
152 | log.info("Apache is configured to use mod_proxy_fcgi with UDS") | |
153 | fcgi_config = os.path.join(template_dir, | |
154 | 'mod_proxy_fcgi.uds.conf.template') | |
155 | else: | |
156 | log.info("Apache is configured to use mod_proxy_fcgi with TCP") | |
157 | ||
158 | with file(fcgi_config, 'rb') as f: | |
159 | fcgi_config = f.read() | |
160 | with file(src, 'rb') as f: | |
161 | conf = f.read() + fcgi_config | |
162 | conf = conf.format( | |
163 | testdir=testdir, | |
164 | mod_path=mod_path, | |
165 | print_continue=print_continue, | |
166 | host=host, | |
167 | port=port, | |
168 | client=client_with_cluster, | |
169 | idle_timeout=idle_timeout, | |
170 | user=user, | |
171 | group=group, | |
172 | apache24_modconfig=apache24_modconfig, | |
173 | ) | |
174 | teuthology.write_file( | |
175 | remote=remote, | |
176 | path='{tdir}/apache/apache.{client_with_cluster}.conf'.format( | |
177 | tdir=testdir, | |
178 | client_with_cluster=client_with_cluster), | |
179 | data=conf, | |
180 | ) | |
181 | rgw_options = [] | |
182 | if ctx.rgw.use_fastcgi or _use_uds_with_fcgi(remote): | |
183 | rgw_options = [ | |
184 | '--rgw-socket-path', | |
185 | '{tdir}/apache/tmp.{client_with_cluster}/fastcgi_sock/rgw_sock'.format( | |
186 | tdir=testdir, | |
187 | client_with_cluster=client_with_cluster | |
188 | ), | |
189 | '--rgw-frontends', | |
190 | 'fastcgi', | |
191 | ] | |
192 | else: | |
193 | rgw_options = [ | |
194 | '--rgw-socket-path', '""', | |
195 | '--rgw-print-continue', 'false', | |
196 | '--rgw-frontends', | |
197 | 'fastcgi socket_port=9000 socket_host=0.0.0.0', | |
198 | ] | |
199 | ||
200 | teuthology.write_file( | |
201 | remote=remote, | |
202 | path='{tdir}/apache/htdocs.{client_with_cluster}/rgw.fcgi'.format( | |
203 | tdir=testdir, | |
204 | client_with_cluster=client_with_cluster), | |
205 | data="""#!/bin/sh | |
206 | ulimit -c unlimited | |
207 | exec radosgw -f -n {client_with_id} --cluster {cluster_name} -k /etc/ceph/{client_with_cluster}.keyring {rgw_options} | |
208 | ||
209 | """.format(tdir=testdir, client_with_id=client_with_id, client_with_cluster=client_with_cluster, cluster_name=cluster_name, rgw_options=" ".join(rgw_options)) | |
210 | ) | |
211 | remote.run( | |
212 | args=[ | |
213 | 'chmod', | |
214 | 'a=rx', | |
215 | '{tdir}/apache/htdocs.{client_with_cluster}/rgw.fcgi'.format(tdir=testdir, | |
216 | client_with_cluster=client_with_cluster), | |
217 | ], | |
218 | ) | |
219 | try: | |
220 | yield | |
221 | finally: | |
222 | log.info('Removing apache config...') | |
223 | for client in clients_to_create_as: | |
224 | ctx.cluster.only(client).run( | |
225 | args=[ | |
226 | 'rm', | |
227 | '-f', | |
228 | '{tdir}/apache/apache.{client_with_cluster}.conf'.format(tdir=testdir, | |
229 | client_with_cluster=client_with_cluster), | |
230 | run.Raw('&&'), | |
231 | 'rm', | |
232 | '-f', | |
233 | '{tdir}/apache/htdocs.{client_with_cluster}/rgw.fcgi'.format( | |
234 | tdir=testdir, | |
235 | client_with_cluster=client_with_cluster), | |
236 | ], | |
237 | ) | |
238 | ||
239 | ||
240 | @contextlib.contextmanager | |
241 | def start_rgw(ctx, config, on_client = None, except_client = None): | |
242 | """ | |
243 | Start rgw on remote sites. | |
244 | """ | |
245 | log.info('Starting rgw...') | |
246 | log.debug('client %r', on_client) | |
247 | clients_to_run = [on_client] | |
248 | if on_client is None: | |
249 | clients_to_run = config.keys() | |
250 | log.debug('client %r', clients_to_run) | |
251 | testdir = teuthology.get_testdir(ctx) | |
252 | for client in clients_to_run: | |
253 | if client == except_client: | |
254 | continue | |
255 | (remote,) = ctx.cluster.only(client).remotes.iterkeys() | |
256 | cluster_name, daemon_type, client_id = teuthology.split_role(client) | |
257 | client_with_id = daemon_type + '.' + client_id | |
258 | client_with_cluster = cluster_name + '.' + client_with_id | |
259 | zone = rgw_utils.zone_for_client(ctx, client) | |
260 | log.debug('zone %s', zone) | |
261 | ||
262 | client_config = config.get(client) | |
263 | if client_config is None: | |
264 | client_config = {} | |
265 | log.info("rgw %s config is %s", client, client_config) | |
266 | id_ = client.split('.', 1)[1] | |
267 | log.info('client {client} is id {id}'.format(client=client, id=id_)) | |
268 | cmd_prefix = [ | |
269 | 'sudo', | |
270 | 'adjust-ulimits', | |
271 | 'ceph-coverage', | |
272 | '{tdir}/archive/coverage'.format(tdir=testdir), | |
273 | 'daemon-helper', | |
274 | 'term', | |
275 | ] | |
276 | ||
277 | rgw_cmd = ['radosgw'] | |
278 | ||
279 | if ctx.rgw.frontend == 'apache': | |
280 | if ctx.rgw.use_fastcgi or _use_uds_with_fcgi(remote): | |
281 | rgw_cmd.extend([ | |
282 | '--rgw-socket-path', | |
283 | '{tdir}/apache/tmp.{client_with_cluster}/fastcgi_sock/rgw_sock'.format( | |
284 | tdir=testdir, | |
285 | client_with_cluster=client_with_cluster, | |
286 | ), | |
287 | '--rgw-frontends', | |
288 | 'fastcgi', | |
289 | ]) | |
290 | else: | |
291 | # for mod_proxy_fcgi, using tcp | |
292 | rgw_cmd.extend([ | |
293 | '--rgw-socket-path', '', | |
294 | '--rgw-print-continue', 'false', | |
295 | '--rgw-frontends', | |
296 | 'fastcgi socket_port=9000 socket_host=0.0.0.0', | |
297 | ]) | |
298 | ||
299 | elif ctx.rgw.frontend == 'civetweb': | |
300 | host, port = ctx.rgw.role_endpoints[client] | |
301 | rgw_cmd.extend([ | |
302 | '--rgw-frontends', | |
303 | 'civetweb port={port}'.format(port=port), | |
304 | ]) | |
305 | ||
306 | if zone is not None: | |
307 | rgw_cmd.extend(['--rgw-zone', zone]) | |
308 | ||
309 | rgw_cmd.extend([ | |
310 | '-n', client_with_id, | |
311 | '--cluster', cluster_name, | |
312 | '-k', '/etc/ceph/{client_with_cluster}.keyring'.format(client_with_cluster=client_with_cluster), | |
313 | '--log-file', | |
314 | '/var/log/ceph/rgw.{client_with_cluster}.log'.format(client_with_cluster=client_with_cluster), | |
315 | '--rgw_ops_log_socket_path', | |
316 | '{tdir}/rgw.opslog.{client_with_cluster}.sock'.format(tdir=testdir, | |
317 | client_with_cluster=client_with_cluster), | |
318 | '--foreground', | |
319 | run.Raw('|'), | |
320 | 'sudo', | |
321 | 'tee', | |
322 | '/var/log/ceph/rgw.{client_with_cluster}.stdout'.format(tdir=testdir, | |
323 | client_with_cluster=client_with_cluster), | |
324 | run.Raw('2>&1'), | |
325 | ]) | |
326 | ||
327 | if client_config.get('valgrind'): | |
328 | cmd_prefix = teuthology.get_valgrind_args( | |
329 | testdir, | |
330 | client, | |
331 | cmd_prefix, | |
332 | client_config.get('valgrind') | |
333 | ) | |
334 | ||
335 | run_cmd = list(cmd_prefix) | |
336 | run_cmd.extend(rgw_cmd) | |
337 | ||
338 | ctx.daemons.add_daemon( | |
339 | remote, 'rgw', client, | |
340 | cluster=cluster_name, | |
341 | args=run_cmd, | |
342 | logger=log.getChild(client), | |
343 | stdin=run.PIPE, | |
344 | wait=False, | |
345 | ) | |
346 | ||
347 | # XXX: add_daemon() doesn't let us wait until radosgw finishes startup | |
348 | # use a connection pool with retry/backoff to poll each gateway until it starts listening | |
349 | http = PoolManager(retries=Retry(connect=8, backoff_factor=1)) | |
350 | for client in clients_to_run: | |
351 | if client == except_client: | |
352 | continue | |
353 | host, port = ctx.rgw.role_endpoints[client] | |
354 | endpoint = 'http://{host}:{port}/'.format(host=host, port=port) | |
355 | log.info('Polling {client} until it starts accepting connections on {endpoint}'.format(client=client, endpoint=endpoint)) | |
356 | http.request('GET', endpoint) | |
357 | ||
358 | try: | |
359 | yield | |
360 | finally: | |
361 | teuthology.stop_daemons_of_type(ctx, 'rgw') | |
362 | for client in config.iterkeys(): | |
363 | ctx.cluster.only(client).run( | |
364 | args=[ | |
365 | 'rm', | |
366 | '-f', | |
367 | '{tdir}/rgw.opslog.{client_with_cluster}.sock'.format(tdir=testdir, | |
368 | client_with_cluster=client_with_cluster), | |
369 | ], | |
370 | ) | |
371 | ||
372 | ||
373 | @contextlib.contextmanager | |
374 | def start_apache(ctx, config, on_client = None, except_client = None): | |
375 | """ | |
376 | Start apache on remote sites. | |
377 | """ | |
378 | log.info('Starting apache...') | |
379 | testdir = teuthology.get_testdir(ctx) | |
380 | apaches = {} | |
381 | clients_to_run = [on_client] | |
382 | if on_client is None: | |
383 | clients_to_run = config.keys() | |
384 | for client in clients_to_run: | |
385 | cluster_name, daemon_type, client_id = teuthology.split_role(client) | |
386 | client_with_cluster = cluster_name + '.' + daemon_type + '.' + client_id | |
387 | if client == except_client: | |
388 | continue | |
389 | (remote,) = ctx.cluster.only(client).remotes.keys() | |
390 | system_type = teuthology.get_system_type(remote) | |
391 | if system_type == 'deb': | |
392 | apache_name = 'apache2' | |
393 | else: | |
394 | try: | |
395 | remote.run( | |
396 | args=[ | |
397 | 'stat', | |
398 | '/usr/sbin/httpd.worker', | |
399 | ], | |
400 | ) | |
401 | apache_name = '/usr/sbin/httpd.worker' | |
402 | except CommandFailedError: | |
403 | apache_name = '/usr/sbin/httpd' | |
404 | ||
405 | proc = remote.run( | |
406 | args=[ | |
407 | 'adjust-ulimits', | |
408 | 'daemon-helper', | |
409 | 'kill', | |
410 | apache_name, | |
411 | '-X', | |
412 | '-f', | |
413 | '{tdir}/apache/apache.{client_with_cluster}.conf'.format(tdir=testdir, | |
414 | client_with_cluster=client_with_cluster), | |
415 | ], | |
416 | logger=log.getChild(client), | |
417 | stdin=run.PIPE, | |
418 | wait=False, | |
419 | ) | |
420 | apaches[client_with_cluster] = proc | |
421 | ||
422 | try: | |
423 | yield | |
424 | finally: | |
425 | log.info('Stopping apache...') | |
426 | for client, proc in apaches.iteritems(): | |
427 | proc.stdin.close() | |
428 | ||
429 | run.wait(apaches.itervalues()) | |
430 | ||
431 | def extract_user_info(client_config): | |
432 | """ | |
433 | Extract user info from the client config specified. Returns a dict | |
434 | that includes system key information. | |
435 | """ | |
436 | # test if there isn't a system user or if there isn't a name for that | |
437 | # user, return None | |
438 | if ('system user' not in client_config or | |
439 | 'name' not in client_config['system user']): | |
440 | return None | |
441 | ||
442 | user_info = dict() | |
443 | user_info['system_key'] = dict( | |
444 | user=client_config['system user']['name'], | |
445 | access_key=client_config['system user']['access key'], | |
446 | secret_key=client_config['system user']['secret key'], | |
447 | ) | |
448 | return user_info | |
449 | ||
450 | ||
451 | def assign_ports(ctx, config): | |
452 | """ | |
453 | Assign port numberst starting with port 7280. | |
454 | """ | |
455 | port = 7280 | |
456 | role_endpoints = {} | |
457 | for remote, roles_for_host in ctx.cluster.remotes.iteritems(): | |
458 | for role in roles_for_host: | |
459 | if role in config: | |
460 | role_endpoints[role] = (remote.name.split('@')[1], port) | |
461 | port += 1 | |
462 | ||
463 | return role_endpoints | |
464 | ||
465 | ||
466 | def fill_in_endpoints(region_info, role_zones, role_endpoints): | |
467 | """ | |
468 | Iterate through the list of role_endpoints, filling in zone information | |
469 | ||
470 | :param region_info: region data | |
471 | :param role_zones: region and zone information. | |
472 | :param role_endpoints: endpoints being used | |
473 | """ | |
474 | for role, (host, port) in role_endpoints.iteritems(): | |
475 | region, zone, zone_info, _ = role_zones[role] | |
476 | host, port = role_endpoints[role] | |
477 | endpoint = 'http://{host}:{port}/'.format(host=host, port=port) | |
478 | # check if the region specified under client actually exists | |
479 | # in region_info (it should, if properly configured). | |
480 | # If not, throw a reasonable error | |
481 | if region not in region_info: | |
482 | raise Exception( | |
483 | 'Region: {region} was specified but no corresponding' | |
484 | ' entry was found under \'regions\''.format(region=region)) | |
485 | ||
486 | region_conf = region_info[region] | |
487 | region_conf.setdefault('endpoints', []) | |
488 | region_conf['endpoints'].append(endpoint) | |
489 | ||
490 | # this is the payload for the 'zones' field in the region field | |
491 | zone_payload = dict() | |
492 | zone_payload['endpoints'] = [endpoint] | |
493 | zone_payload['name'] = zone | |
494 | ||
495 | # Pull the log meta and log data settings out of zone_info, if they | |
496 | # exist, then pop them as they don't actually belong in the zone info | |
497 | for key in ['rgw log meta', 'rgw log data']: | |
498 | new_key = key.split(' ', 1)[1] | |
499 | new_key = new_key.replace(' ', '_') | |
500 | ||
501 | if key in zone_info: | |
502 | value = zone_info.pop(key) | |
503 | else: | |
504 | value = 'false' | |
505 | ||
506 | zone_payload[new_key] = value | |
507 | ||
508 | region_conf.setdefault('zones', []) | |
509 | region_conf['zones'].append(zone_payload) | |
510 | ||
511 | ||
512 | @contextlib.contextmanager | |
513 | def configure_users_for_client(ctx, config, client, everywhere=False): | |
514 | """ | |
515 | Create users by remotely running rgwadmin commands using extracted | |
516 | user information. | |
517 | """ | |
518 | log.info('Configuring users...') | |
519 | log.info('for client %s', client) | |
520 | log.info('everywhere %s', everywhere) | |
521 | ||
522 | # For data sync the master zones and regions must have the | |
523 | # system users of the secondary zones. To keep this simple, | |
524 | # just create the system users on every client if regions are | |
525 | # configured. | |
526 | clients_to_create_as = [client] | |
527 | if everywhere: | |
528 | clients_to_create_as = config.keys() | |
529 | ||
530 | # extract the user info and append it to the payload tuple for the given | |
531 | # client | |
532 | for client, c_config in config.iteritems(): | |
533 | if not c_config: | |
534 | continue | |
535 | user_info = extract_user_info(c_config) | |
536 | if not user_info: | |
537 | continue | |
538 | ||
539 | for client_name in clients_to_create_as: | |
540 | log.debug('Creating user {user} on {client}'.format( | |
541 | user=user_info['system_key']['user'], client=client_name)) | |
542 | rgwadmin(ctx, client_name, | |
543 | cmd=[ | |
544 | 'user', 'create', | |
545 | '--uid', user_info['system_key']['user'], | |
546 | '--access-key', user_info['system_key']['access_key'], | |
547 | '--secret', user_info['system_key']['secret_key'], | |
548 | '--display-name', user_info['system_key']['user'], | |
549 | '--system', | |
550 | ], | |
551 | check_status=True, | |
552 | ) | |
553 | yield | |
554 | ||
555 | @contextlib.contextmanager | |
556 | def configure_users(ctx, config, everywhere=False): | |
557 | """ | |
558 | Create users by remotely running rgwadmin commands using extracted | |
559 | user information. | |
560 | """ | |
561 | log.info('Configuring users...') | |
562 | ||
563 | # extract the user info and append it to the payload tuple for the given | |
564 | # client | |
565 | for client, c_config in config.iteritems(): | |
566 | if not c_config: | |
567 | continue | |
568 | user_info = extract_user_info(c_config) | |
569 | if not user_info: | |
570 | continue | |
571 | ||
572 | # For data sync the master zones and regions must have the | |
573 | # system users of the secondary zones. To keep this simple, | |
574 | # just create the system users on every client if regions are | |
575 | # configured. | |
576 | clients_to_create_as = [client] | |
577 | if everywhere: | |
578 | clients_to_create_as = config.keys() | |
579 | for client_name in clients_to_create_as: | |
580 | log.debug('Creating user {user} on {client}'.format( | |
581 | user=user_info['system_key']['user'], client=client)) | |
582 | rgwadmin(ctx, client_name, | |
583 | cmd=[ | |
584 | 'user', 'create', | |
585 | '--uid', user_info['system_key']['user'], | |
586 | '--access-key', user_info['system_key']['access_key'], | |
587 | '--secret', user_info['system_key']['secret_key'], | |
588 | '--display-name', user_info['system_key']['user'], | |
589 | '--system', | |
590 | ], | |
591 | check_status=True, | |
592 | ) | |
593 | ||
594 | yield | |
595 | ||
596 | @contextlib.contextmanager | |
597 | def create_nonregion_pools(ctx, config, regions): | |
598 | """Create replicated or erasure coded data pools for rgw.""" | |
599 | if regions: | |
600 | yield | |
601 | return | |
602 | ||
603 | log.info('creating data pools') | |
604 | for client in config.keys(): | |
605 | (remote,) = ctx.cluster.only(client).remotes.iterkeys() | |
606 | data_pool = '.rgw.buckets' | |
607 | cluster_name, daemon_type, client_id = teuthology.split_role(client) | |
608 | ||
609 | if ctx.rgw.ec_data_pool: | |
610 | create_ec_pool(remote, data_pool, client, 64, | |
611 | ctx.rgw.erasure_code_profile, cluster_name) | |
612 | else: | |
613 | create_replicated_pool(remote, data_pool, 64, cluster_name) | |
614 | if ctx.rgw.cache_pools: | |
615 | create_cache_pool(remote, data_pool, data_pool + '.cache', 64, | |
616 | 64*1024*1024, cluster_name) | |
617 | yield | |
618 | ||
619 | @contextlib.contextmanager | |
620 | def configure_multisite_regions_and_zones(ctx, config, regions, role_endpoints, realm, master_client): | |
621 | """ | |
622 | Configure multisite regions and zones from rados and rgw. | |
623 | """ | |
624 | if not regions: | |
625 | log.debug( | |
626 | 'In rgw.configure_multisite_regions_and_zones() and regions is None. ' | |
627 | 'Bailing') | |
628 | yield | |
629 | return | |
630 | ||
631 | if not realm: | |
632 | log.debug( | |
633 | 'In rgw.configure_multisite_regions_and_zones() and realm is None. ' | |
634 | 'Bailing') | |
635 | yield | |
636 | return | |
637 | ||
638 | log.info('Configuring multisite regions and zones...') | |
639 | ||
640 | log.debug('config is %r', config) | |
641 | log.debug('regions are %r', regions) | |
642 | log.debug('role_endpoints = %r', role_endpoints) | |
643 | log.debug('realm is %r', realm) | |
644 | ||
645 | # extract the zone info | |
646 | role_zones = dict([(client, extract_zone_info(ctx, client, c_config)) | |
647 | for client, c_config in config.iteritems()]) | |
648 | log.debug('role_zones = %r', role_zones) | |
649 | ||
650 | # extract the user info and append it to the payload tuple for the given | |
651 | # client | |
652 | for client, c_config in config.iteritems(): | |
653 | if not c_config: | |
654 | user_info = None | |
655 | else: | |
656 | user_info = extract_user_info(c_config) | |
657 | ||
658 | (region, zone, zone_info) = role_zones[client] | |
659 | role_zones[client] = (region, zone, zone_info, user_info) | |
660 | ||
661 | region_info = dict([ | |
662 | (region_name, extract_region_info(region_name, r_config)) | |
663 | for region_name, r_config in regions.iteritems()]) | |
664 | ||
665 | fill_in_endpoints(region_info, role_zones, role_endpoints) | |
666 | ||
667 | # clear out the old defaults | |
668 | cluster_name, daemon_type, client_id = teuthology.split_role(master_client) | |
669 | first_mon = teuthology.get_first_mon(ctx, config, cluster_name) | |
670 | (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() | |
671 | ||
672 | # read master zonegroup and master_zone | |
673 | for zonegroup, zg_info in region_info.iteritems(): | |
674 | if zg_info['is_master']: | |
675 | master_zonegroup = zonegroup | |
676 | master_zone = zg_info['master_zone'] | |
677 | break | |
678 | ||
679 | log.debug('master zonegroup =%r', master_zonegroup) | |
680 | log.debug('master zone = %r', master_zone) | |
681 | log.debug('master client = %r', master_client) | |
682 | ||
683 | rgwadmin(ctx, master_client, | |
684 | cmd=['realm', 'create', '--rgw-realm', realm, '--default'], | |
685 | check_status=True) | |
686 | ||
687 | for region, info in region_info.iteritems(): | |
688 | region_json = json.dumps(info) | |
689 | log.debug('region info is: %s', region_json) | |
690 | rgwadmin(ctx, master_client, | |
691 | cmd=['zonegroup', 'set'], | |
692 | stdin=StringIO(region_json), | |
693 | check_status=True) | |
694 | ||
695 | rgwadmin(ctx, master_client, | |
696 | cmd=['zonegroup', 'default', '--rgw-zonegroup', master_zonegroup], | |
697 | check_status=True) | |
698 | ||
699 | for role, (zonegroup, zone, zone_info, user_info) in role_zones.iteritems(): | |
700 | (remote,) = ctx.cluster.only(role).remotes.keys() | |
701 | for pool_info in zone_info['placement_pools']: | |
702 | remote.run(args=['sudo', 'ceph', 'osd', 'pool', 'create', | |
703 | pool_info['val']['index_pool'], '64', '64', '--cluster', cluster_name]) | |
704 | if ctx.rgw.ec_data_pool: | |
705 | create_ec_pool(remote, pool_info['val']['data_pool'], | |
706 | zone, 64, ctx.rgw.erasure_code_profile, cluster_name) | |
707 | else: | |
708 | create_replicated_pool(remote, pool_info['val']['data_pool'], 64, cluster_name) | |
709 | ||
710 | (zonegroup, zone, zone_info, user_info) = role_zones[master_client] | |
711 | zone_json = json.dumps(dict(zone_info.items() + user_info.items())) | |
712 | log.debug("zone info is: %r", zone_json) | |
713 | rgwadmin(ctx, master_client, | |
714 | cmd=['zone', 'set', '--rgw-zonegroup', zonegroup, | |
715 | '--rgw-zone', zone], | |
716 | stdin=StringIO(zone_json), | |
717 | check_status=True) | |
718 | ||
719 | rgwadmin(ctx, master_client, | |
720 | cmd=['zone', 'default', '--rgw-zone', zone], | |
721 | check_status=True) | |
722 | ||
723 | rgwadmin(ctx, master_client, | |
724 | cmd=['period', 'update', '--commit'], | |
725 | check_status=True) | |
726 | ||
727 | yield | |
728 | ||
729 | def configure_compression_in_default_zone(ctx, config): | |
730 | ceph_config = ctx.ceph['ceph'].conf.get('global', {}) | |
731 | ceph_config.update(ctx.ceph['ceph'].conf.get('client', {})) | |
732 | for client, c_config in config.iteritems(): | |
733 | ceph_config.update(ctx.ceph['ceph'].conf.get(client, {})) | |
734 | key = 'rgw compression type' | |
735 | if not key in ceph_config: | |
736 | log.debug('No compression setting to enable') | |
737 | break | |
738 | compression = ceph_config[key] | |
739 | log.debug('Configuring compression type = %s', compression) | |
740 | ||
741 | # XXX: the 'default' zone and zonegroup aren't created until we run RGWRados::init_complete(). | |
742 | # issue a 'radosgw-admin user list' command to trigger this | |
743 | rgwadmin(ctx, client, cmd=['user', 'list'], check_status=True) | |
744 | ||
745 | rgwadmin(ctx, client, | |
746 | cmd=['zone', 'placement', 'modify', '--rgw-zone', 'default', | |
747 | '--placement-id', 'default-placement', '--compression', compression], | |
748 | check_status=True) | |
749 | break # only the first client | |
750 | ||
751 | @contextlib.contextmanager | |
752 | def configure_regions_and_zones(ctx, config, regions, role_endpoints, realm): | |
753 | """ | |
754 | Configure regions and zones from rados and rgw. | |
755 | """ | |
756 | if not regions: | |
757 | log.debug( | |
758 | 'In rgw.configure_regions_and_zones() and regions is None. ' | |
759 | 'Bailing') | |
760 | configure_compression_in_default_zone(ctx, config) | |
761 | yield | |
762 | return | |
763 | ||
764 | if not realm: | |
765 | log.debug( | |
766 | 'In rgw.configure_regions_and_zones() and realm is None. ' | |
767 | 'Bailing') | |
768 | configure_compression_in_default_zone(ctx, config) | |
769 | yield | |
770 | return | |
771 | ||
772 | log.info('Configuring regions and zones...') | |
773 | ||
774 | log.debug('config is %r', config) | |
775 | log.debug('regions are %r', regions) | |
776 | log.debug('role_endpoints = %r', role_endpoints) | |
777 | log.debug('realm is %r', realm) | |
778 | ||
779 | # extract the zone info | |
780 | role_zones = dict([(client, extract_zone_info(ctx, client, c_config)) | |
781 | for client, c_config in config.iteritems()]) | |
782 | log.debug('roles_zones = %r', role_zones) | |
783 | ||
784 | # extract the user info and append it to the payload tuple for the given | |
785 | # client | |
786 | for client, c_config in config.iteritems(): | |
787 | if not c_config: | |
788 | user_info = None | |
789 | else: | |
790 | user_info = extract_user_info(c_config) | |
791 | ||
792 | (region, zone, zone_info) = role_zones[client] | |
793 | role_zones[client] = (region, zone, zone_info, user_info) | |
794 | ||
795 | region_info = dict([ | |
796 | (region_name, extract_region_info(region_name, r_config)) | |
797 | for region_name, r_config in regions.iteritems()]) | |
798 | ||
799 | fill_in_endpoints(region_info, role_zones, role_endpoints) | |
800 | ||
801 | # clear out the old defaults | |
802 | cluster_name, daemon_type, client_id = teuthology.split_role(client) | |
803 | first_mon = teuthology.get_first_mon(ctx, config, cluster_name) | |
804 | (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() | |
805 | # removing these objects from .rgw.root and the per-zone root pools | |
806 | # may or may not matter | |
807 | rados(ctx, mon, | |
808 | cmd=['-p', '.rgw.root', 'rm', 'region_info.default', '--cluster', cluster_name]) | |
809 | rados(ctx, mon, | |
810 | cmd=['-p', '.rgw.root', 'rm', 'zone_info.default', '--cluster', cluster_name]) | |
811 | ||
812 | # read master zonegroup and master_zone | |
813 | for zonegroup, zg_info in region_info.iteritems(): | |
814 | if zg_info['is_master']: | |
815 | master_zonegroup = zonegroup | |
816 | master_zone = zg_info['master_zone'] | |
817 | break | |
818 | ||
819 | for client in config.iterkeys(): | |
820 | (zonegroup, zone, zone_info, user_info) = role_zones[client] | |
821 | if zonegroup == master_zonegroup and zone == master_zone: | |
822 | master_client = client | |
823 | break | |
824 | ||
825 | log.debug('master zonegroup =%r', master_zonegroup) | |
826 | log.debug('master zone = %r', master_zone) | |
827 | log.debug('master client = %r', master_client) | |
828 | log.debug('config %r ', config) | |
829 | ||
830 | (ret, out)=rgwadmin(ctx, client, | |
831 | cmd=['realm', 'create', '--rgw-realm', realm, '--default']) | |
832 | log.debug('realm create ret %r exists %r', -ret, errno.EEXIST) | |
833 | assert ret == 0 or ret != -errno.EEXIST | |
834 | if ret is -errno.EEXIST: | |
835 | log.debug('realm %r exists', realm) | |
836 | ||
837 | for client in config.iterkeys(): | |
838 | for role, (zonegroup, zone, zone_info, user_info) in role_zones.iteritems(): | |
839 | rados(ctx, mon, | |
840 | cmd=['-p', zone_info['domain_root'], | |
841 | 'rm', 'region_info.default', '--cluster', cluster_name]) | |
842 | rados(ctx, mon, | |
843 | cmd=['-p', zone_info['domain_root'], | |
844 | 'rm', 'zone_info.default', '--cluster', cluster_name]) | |
845 | ||
846 | (remote,) = ctx.cluster.only(role).remotes.keys() | |
847 | for pool_info in zone_info['placement_pools']: | |
848 | remote.run(args=['sudo', 'ceph', 'osd', 'pool', 'create', | |
849 | pool_info['val']['index_pool'], '64', '64', '--cluster', cluster_name]) | |
850 | if ctx.rgw.ec_data_pool: | |
851 | create_ec_pool(remote, pool_info['val']['data_pool'], | |
852 | zone, 64, ctx.rgw.erasure_code_profile, cluster_name) | |
853 | else: | |
854 | create_replicated_pool( | |
855 | remote, pool_info['val']['data_pool'], | |
856 | 64, cluster_name) | |
857 | zone_json = json.dumps(dict(zone_info.items() + user_info.items())) | |
858 | log.debug('zone info is: %r', zone_json) | |
859 | rgwadmin(ctx, client, | |
860 | cmd=['zone', 'set', '--rgw-zonegroup', zonegroup, | |
861 | '--rgw-zone', zone], | |
862 | stdin=StringIO(zone_json), | |
863 | check_status=True) | |
864 | ||
865 | for region, info in region_info.iteritems(): | |
866 | region_json = json.dumps(info) | |
867 | log.debug('region info is: %s', region_json) | |
868 | rgwadmin(ctx, client, | |
869 | cmd=['zonegroup', 'set'], | |
870 | stdin=StringIO(region_json), | |
871 | check_status=True) | |
872 | if info['is_master']: | |
873 | rgwadmin(ctx, client, | |
874 | cmd=['zonegroup', 'default', '--rgw-zonegroup', master_zonegroup], | |
875 | check_status=True) | |
876 | ||
877 | (zonegroup, zone, zone_info, user_info) = role_zones[client] | |
878 | rgwadmin(ctx, client, | |
879 | cmd=['zone', 'default', '--rgw-zone', zone], | |
880 | check_status=True) | |
881 | ||
882 | #this used to take master_client, need to edit that accordingly | |
883 | rgwadmin(ctx, client, | |
884 | cmd=['period', 'update', '--commit'], | |
885 | check_status=True) | |
886 | ||
887 | yield | |
888 | ||
889 | @contextlib.contextmanager | |
890 | def pull_configuration(ctx, config, regions, role_endpoints, realm, master_client): | |
891 | """ | |
892 | Configure regions and zones from rados and rgw. | |
893 | """ | |
894 | if not regions: | |
895 | log.debug( | |
896 | 'In rgw.pull_confguration() and regions is None. ' | |
897 | 'Bailing') | |
898 | yield | |
899 | return | |
900 | ||
901 | if not realm: | |
902 | log.debug( | |
903 | 'In rgw.pull_configuration() and realm is None. ' | |
904 | 'Bailing') | |
905 | yield | |
906 | return | |
907 | ||
908 | log.info('Pulling configuration...') | |
909 | ||
910 | log.debug('config is %r', config) | |
911 | log.debug('regions are %r', regions) | |
912 | log.debug('role_endpoints = %r', role_endpoints) | |
913 | log.debug('realm is %r', realm) | |
914 | log.debug('master client = %r', master_client) | |
915 | ||
916 | # extract the zone info | |
917 | role_zones = dict([(client, extract_zone_info(ctx, client, c_config)) | |
918 | for client, c_config in config.iteritems()]) | |
919 | log.debug('roles_zones = %r', role_zones) | |
920 | ||
921 | # extract the user info and append it to the payload tuple for the given | |
922 | # client | |
923 | for client, c_config in config.iteritems(): | |
924 | if not c_config: | |
925 | user_info = None | |
926 | else: | |
927 | user_info = extract_user_info(c_config) | |
928 | ||
929 | (region, zone, zone_info) = role_zones[client] | |
930 | role_zones[client] = (region, zone, zone_info, user_info) | |
931 | ||
932 | region_info = dict([ | |
933 | (region_name, extract_region_info(region_name, r_config)) | |
934 | for region_name, r_config in regions.iteritems()]) | |
935 | ||
936 | fill_in_endpoints(region_info, role_zones, role_endpoints) | |
937 | ||
938 | for client in config.iterkeys(): | |
939 | if client != master_client: | |
940 | cluster_name, daemon_type, client_id = teuthology.split_role(client) | |
941 | host, port = role_endpoints[master_client] | |
942 | endpoint = 'http://{host}:{port}/'.format(host=host, port=port) | |
943 | log.debug("endpoint: %s", endpoint) | |
944 | rgwadmin(ctx, client, | |
945 | cmd=['realm', 'pull', '--rgw-realm', realm, '--default', '--url', | |
946 | endpoint, '--access_key', | |
947 | user_info['system_key']['access_key'], '--secret', | |
948 | user_info['system_key']['secret_key']], | |
949 | check_status=True) | |
950 | ||
951 | (zonegroup, zone, zone_info, zone_user_info) = role_zones[client] | |
952 | zone_json = json.dumps(dict(zone_info.items() + zone_user_info.items())) | |
953 | log.debug("zone info is: %r", zone_json) | |
954 | rgwadmin(ctx, client, | |
955 | cmd=['zone', 'set', '--default', | |
956 | '--rgw-zone', zone], | |
957 | stdin=StringIO(zone_json), | |
958 | check_status=True) | |
959 | ||
960 | rgwadmin(ctx, client, | |
961 | cmd=['zonegroup', 'add', '--rgw-zonegroup', zonegroup, '--rgw-zone', zone], | |
962 | check_status=True) | |
963 | ||
964 | rgwadmin(ctx, client, | |
965 | cmd=['zonegroup', 'default', '--rgw-zonegroup', zonegroup], | |
966 | check_status=True) | |
967 | ||
968 | rgwadmin(ctx, client, | |
969 | cmd=['period', 'update', '--commit', '--url', | |
970 | endpoint, '--access_key', | |
971 | user_info['system_key']['access_key'], '--secret', | |
972 | user_info['system_key']['secret_key']], | |
973 | check_status=True) | |
974 | ||
975 | yield | |
976 | ||
977 | @contextlib.contextmanager | |
978 | def task(ctx, config): | |
979 | """ | |
980 | Either use configure apache to run a rados gateway, or use the built-in | |
981 | civetweb server. | |
982 | Only one should be run per machine, since it uses a hard-coded port for | |
983 | now. | |
984 | ||
985 | For example, to run rgw on all clients:: | |
986 | ||
987 | tasks: | |
988 | - ceph: | |
989 | - rgw: | |
990 | ||
991 | To only run on certain clients:: | |
992 | ||
993 | tasks: | |
994 | - ceph: | |
995 | - rgw: [client.0, client.3] | |
996 | ||
997 | or | |
998 | ||
999 | tasks: | |
1000 | - ceph: | |
1001 | - rgw: | |
1002 | client.0: | |
1003 | client.3: | |
1004 | ||
1005 | You can adjust the idle timeout for fastcgi (default is 30 seconds): | |
1006 | ||
1007 | tasks: | |
1008 | - ceph: | |
1009 | - rgw: | |
1010 | client.0: | |
1011 | idle_timeout: 90 | |
1012 | ||
1013 | To run radosgw through valgrind: | |
1014 | ||
1015 | tasks: | |
1016 | - ceph: | |
1017 | - rgw: | |
1018 | client.0: | |
1019 | valgrind: [--tool=memcheck] | |
1020 | client.3: | |
1021 | valgrind: [--tool=memcheck] | |
1022 | ||
1023 | To use civetweb instead of apache: | |
1024 | ||
1025 | tasks: | |
1026 | - ceph: | |
1027 | - rgw: | |
1028 | - client.0 | |
1029 | overrides: | |
1030 | rgw: | |
1031 | frontend: civetweb | |
1032 | ||
1033 | Note that without a modified fastcgi module e.g. with the default | |
1034 | one on CentOS, you must have rgw print continue = false in ceph.conf:: | |
1035 | ||
1036 | tasks: | |
1037 | - ceph: | |
1038 | conf: | |
1039 | global: | |
1040 | rgw print continue: false | |
1041 | - rgw: [client.0] | |
1042 | ||
1043 | To use mod_proxy_fcgi instead of mod_fastcgi: | |
1044 | ||
1045 | overrides: | |
1046 | rgw: | |
1047 | use_fcgi: true | |
1048 | ||
1049 | To run rgws for multiple regions or zones, describe the regions | |
1050 | and their zones in a regions section. The endpoints will be | |
1051 | generated by this task. Each client must have a region, zone, | |
1052 | and pools assigned in ceph.conf:: | |
1053 | ||
1054 | tasks: | |
1055 | - install: | |
1056 | - ceph: | |
1057 | conf: | |
1058 | client.0: | |
1059 | rgw region: foo | |
1060 | rgw zone: foo-1 | |
1061 | rgw region root pool: .rgw.rroot.foo | |
1062 | rgw zone root pool: .rgw.zroot.foo | |
1063 | rgw log meta: true | |
1064 | rgw log data: true | |
1065 | client.1: | |
1066 | rgw region: bar | |
1067 | rgw zone: bar-master | |
1068 | rgw region root pool: .rgw.rroot.bar | |
1069 | rgw zone root pool: .rgw.zroot.bar | |
1070 | rgw log meta: true | |
1071 | rgw log data: true | |
1072 | client.2: | |
1073 | rgw region: bar | |
1074 | rgw zone: bar-secondary | |
1075 | rgw region root pool: .rgw.rroot.bar | |
1076 | rgw zone root pool: .rgw.zroot.bar-secondary | |
1077 | - rgw: | |
1078 | default_idle_timeout: 30 | |
1079 | ec-data-pool: true | |
1080 | erasure_code_profile: | |
1081 | k: 2 | |
1082 | m: 1 | |
1083 | ruleset-failure-domain: osd | |
1084 | realm: foo | |
1085 | regions: | |
1086 | foo: | |
1087 | api name: api_name # default: region name | |
1088 | is master: true # default: false | |
1089 | master zone: foo-1 # default: first zone | |
1090 | zones: [foo-1] | |
1091 | log meta: true | |
1092 | log data: true | |
1093 | placement targets: [target1, target2] # default: [] | |
1094 | default placement: target2 # default: '' | |
1095 | bar: | |
1096 | api name: bar-api | |
1097 | zones: [bar-master, bar-secondary] | |
1098 | client.0: | |
1099 | system user: | |
1100 | name: foo-system | |
1101 | access key: X2IYPSTY1072DDY1SJMC | |
1102 | secret key: YIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm | |
1103 | client.1: | |
1104 | system user: | |
1105 | name: bar1 | |
1106 | access key: Y2IYPSTY1072DDY1SJMC | |
1107 | secret key: XIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm | |
1108 | client.2: | |
1109 | system user: | |
1110 | name: bar2 | |
1111 | access key: Z2IYPSTY1072DDY1SJMC | |
1112 | secret key: ZIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm | |
1113 | """ | |
1114 | if config is None: | |
1115 | config = dict(('client.{id}'.format(id=id_), None) | |
1116 | for id_ in teuthology.all_roles_of_type( | |
1117 | ctx.cluster, 'client')) | |
1118 | elif isinstance(config, list): | |
1119 | config = dict((name, None) for name in config) | |
1120 | ||
1121 | overrides = ctx.config.get('overrides', {}) | |
1122 | teuthology.deep_merge(config, overrides.get('rgw', {})) | |
1123 | ||
1124 | regions = {} | |
1125 | if 'regions' in config: | |
1126 | # separate region info so only clients are keys in config | |
1127 | regions = config['regions'] | |
1128 | del config['regions'] | |
1129 | ||
1130 | role_endpoints = assign_ports(ctx, config) | |
1131 | ctx.rgw = argparse.Namespace() | |
1132 | ctx.rgw.role_endpoints = role_endpoints | |
1133 | # stash the region info for later, since it was deleted from the config | |
1134 | # structure | |
1135 | ctx.rgw.regions = regions | |
1136 | ||
1137 | realm = None | |
1138 | if 'realm' in config: | |
1139 | # separate region info so only clients are keys in config | |
1140 | realm = config['realm'] | |
1141 | del config['realm'] | |
1142 | ctx.rgw.realm = realm | |
1143 | ||
1144 | ctx.rgw.ec_data_pool = False | |
1145 | if 'ec-data-pool' in config: | |
1146 | ctx.rgw.ec_data_pool = bool(config['ec-data-pool']) | |
1147 | del config['ec-data-pool'] | |
1148 | ctx.rgw.erasure_code_profile = {} | |
1149 | if 'erasure_code_profile' in config: | |
1150 | ctx.rgw.erasure_code_profile = config['erasure_code_profile'] | |
1151 | del config['erasure_code_profile'] | |
1152 | ctx.rgw.default_idle_timeout = 30 | |
1153 | if 'default_idle_timeout' in config: | |
1154 | ctx.rgw.default_idle_timeout = int(config['default_idle_timeout']) | |
1155 | del config['default_idle_timeout'] | |
1156 | ctx.rgw.cache_pools = False | |
1157 | if 'cache-pools' in config: | |
1158 | ctx.rgw.cache_pools = bool(config['cache-pools']) | |
1159 | del config['cache-pools'] | |
1160 | ||
1161 | ctx.rgw.frontend = 'civetweb' | |
1162 | if 'frontend' in config: | |
1163 | ctx.rgw.frontend = config['frontend'] | |
1164 | del config['frontend'] | |
1165 | ||
1166 | ctx.rgw.use_fastcgi = True | |
1167 | if "use_fcgi" in config: | |
1168 | ctx.rgw.use_fastcgi = False | |
1169 | log.info("Using mod_proxy_fcgi instead of mod_fastcgi...") | |
1170 | del config['use_fcgi'] | |
1171 | ||
1172 | subtasks = [ | |
1173 | lambda: create_nonregion_pools( | |
1174 | ctx=ctx, config=config, regions=regions), | |
1175 | ] | |
1176 | log.debug('Nonregion pools created') | |
1177 | ||
1178 | multisite = len(regions) > 1 | |
1179 | ||
1180 | if not multisite: | |
1181 | for zonegroup, zonegroup_info in regions.iteritems(): | |
1182 | log.debug("zonegroup_info =%r", zonegroup_info) | |
1183 | if len(zonegroup_info['zones']) > 1: | |
1184 | multisite = True | |
1185 | break | |
1186 | ||
1187 | log.debug('multisite %s', multisite) | |
1188 | ||
1189 | multi_cluster = False | |
1190 | if multisite: | |
1191 | prev_cluster_name = None | |
1192 | roles = ctx.config['roles'] | |
1193 | #check if any roles have a different cluster_name from eachother | |
1194 | for lst in roles: | |
1195 | for role in lst: | |
1196 | cluster_name, daemon_type, client_id = teuthology.split_role(role) | |
1197 | if cluster_name != prev_cluster_name and prev_cluster_name != None: | |
1198 | multi_cluster = True | |
1199 | break | |
1200 | prev_cluster_name = cluster_name | |
1201 | if multi_cluster: | |
1202 | break | |
1203 | ||
1204 | log.debug('multi_cluster %s', multi_cluster) | |
1205 | ctx.rgw.config = config | |
1206 | master_client = None | |
1207 | ||
1208 | if multi_cluster: | |
1209 | log.debug('multi cluster run') | |
1210 | ||
1211 | master_client = get_config_master_client(ctx=ctx, | |
1212 | config=config, | |
1213 | regions=regions) | |
1214 | log.debug('master_client %r', master_client) | |
1215 | subtasks.extend([ | |
1216 | lambda: configure_multisite_regions_and_zones( | |
1217 | ctx=ctx, | |
1218 | config=config, | |
1219 | regions=regions, | |
1220 | role_endpoints=role_endpoints, | |
1221 | realm=realm, | |
1222 | master_client = master_client, | |
1223 | ) | |
1224 | ]) | |
1225 | ||
1226 | subtasks.extend([ | |
1227 | lambda: configure_users_for_client( | |
1228 | ctx=ctx, | |
1229 | config=config, | |
1230 | client=master_client, | |
1231 | everywhere=False, | |
1232 | ), | |
1233 | ]) | |
1234 | ||
1235 | if ctx.rgw.frontend == 'apache': | |
1236 | subtasks.insert(0, | |
1237 | lambda: create_apache_dirs(ctx=ctx, config=config, | |
1238 | on_client=master_client)) | |
1239 | subtasks.extend([ | |
1240 | lambda: ship_apache_configs(ctx=ctx, config=config, | |
1241 | role_endpoints=role_endpoints, on_client=master_client), | |
1242 | lambda: start_apache(ctx=ctx, config=config, on_client=master_client), | |
1243 | lambda: start_rgw(ctx=ctx, config=config, on_client=master_client), | |
1244 | ]) | |
1245 | elif ctx.rgw.frontend == 'civetweb': | |
1246 | subtasks.extend([ | |
1247 | lambda: start_rgw(ctx=ctx, config=config, on_client=master_client), | |
1248 | ]) | |
1249 | else: | |
1250 | raise ValueError("frontend must be 'apache' or 'civetweb'") | |
1251 | ||
1252 | subtasks.extend([ | |
1253 | lambda: pull_configuration(ctx=ctx, | |
1254 | config=config, | |
1255 | regions=regions, | |
1256 | role_endpoints=role_endpoints, | |
1257 | realm=realm, | |
1258 | master_client=master_client | |
1259 | ), | |
1260 | ]) | |
1261 | ||
1262 | subtasks.extend([ | |
1263 | lambda: configure_users_for_client( | |
1264 | ctx=ctx, | |
1265 | config=config, | |
1266 | client=master_client, | |
1267 | everywhere=True | |
1268 | ), | |
1269 | ]) | |
1270 | ||
1271 | if ctx.rgw.frontend == 'apache': | |
1272 | subtasks.insert(0, | |
1273 | lambda: create_apache_dirs(ctx=ctx, config=config, | |
1274 | on_client=None, | |
1275 | except_client = master_client)) | |
1276 | subtasks.extend([ | |
1277 | lambda: ship_apache_configs(ctx=ctx, config=config, | |
1278 | role_endpoints=role_endpoints, | |
1279 | on_client=None, | |
1280 | except_client = master_client, | |
1281 | ), | |
1282 | lambda: start_apache(ctx=ctx, | |
1283 | config = config, | |
1284 | on_client=None, | |
1285 | except_client = master_client, | |
1286 | ), | |
1287 | lambda: start_rgw(ctx=ctx, | |
1288 | config=config, | |
1289 | on_client=None, | |
1290 | except_client = master_client), | |
1291 | ]) | |
1292 | elif ctx.rgw.frontend == 'civetweb': | |
1293 | subtasks.extend([ | |
1294 | lambda: start_rgw(ctx=ctx, | |
1295 | config=config, | |
1296 | on_client=None, | |
1297 | except_client = master_client), | |
1298 | ]) | |
1299 | else: | |
1300 | raise ValueError("frontend must be 'apache' or 'civetweb'") | |
1301 | ||
1302 | else: | |
1303 | log.debug('single cluster run') | |
1304 | subtasks.extend([ | |
1305 | lambda: configure_regions_and_zones( | |
1306 | ctx=ctx, | |
1307 | config=config, | |
1308 | regions=regions, | |
1309 | role_endpoints=role_endpoints, | |
1310 | realm=realm, | |
1311 | ), | |
1312 | lambda: configure_users( | |
1313 | ctx=ctx, | |
1314 | config=config, | |
1315 | everywhere=True, | |
1316 | ), | |
1317 | ]) | |
1318 | if ctx.rgw.frontend == 'apache': | |
1319 | subtasks.insert(0, lambda: create_apache_dirs(ctx=ctx, config=config)) | |
1320 | subtasks.extend([ | |
1321 | lambda: ship_apache_configs(ctx=ctx, config=config, | |
1322 | role_endpoints=role_endpoints), | |
1323 | lambda: start_apache(ctx=ctx, config=config), | |
1324 | lambda: start_rgw(ctx=ctx, | |
1325 | config=config), | |
1326 | ]) | |
1327 | elif ctx.rgw.frontend == 'civetweb': | |
1328 | subtasks.extend([ | |
1329 | lambda: start_rgw(ctx=ctx, | |
1330 | config=config), | |
1331 | ]) | |
1332 | else: | |
1333 | raise ValueError("frontend must be 'apache' or 'civetweb'") | |
1334 | ||
1335 | log.info("Using %s as radosgw frontend", ctx.rgw.frontend) | |
1336 | with contextutil.nested(*subtasks): | |
1337 | yield |