]> git.proxmox.com Git - ceph.git/blame_incremental - ceph/qa/tasks/rgw.py
import 15.2.4
[ceph.git] / ceph / qa / tasks / rgw.py
... / ...
CommitLineData
1"""
2rgw routines
3"""
4import argparse
5import contextlib
6import logging
7
8from teuthology.orchestra import run
9from teuthology import misc as teuthology
10from teuthology import contextutil
11from teuthology.exceptions import ConfigError
12from tasks.util import get_remote_for_role
13from tasks.util.rgw import rgwadmin, wait_for_radosgw
14from tasks.util.rados import (create_ec_pool,
15 create_replicated_pool,
16 create_cache_pool)
17
18log = logging.getLogger(__name__)
19
20class RGWEndpoint:
21 def __init__(self, hostname=None, port=None, cert=None, dns_name=None, website_dns_name=None):
22 self.hostname = hostname
23 self.port = port
24 self.cert = cert
25 self.dns_name = dns_name
26 self.website_dns_name = website_dns_name
27
28 def url(self):
29 proto = 'https' if self.cert else 'http'
30 return '{proto}://{hostname}:{port}/'.format(proto=proto, hostname=self.hostname, port=self.port)
31
32@contextlib.contextmanager
33def start_rgw(ctx, config, clients):
34 """
35 Start rgw on remote sites.
36 """
37 log.info('Starting rgw...')
38 testdir = teuthology.get_testdir(ctx)
39 for client in clients:
40 (remote,) = ctx.cluster.only(client).remotes.keys()
41 cluster_name, daemon_type, client_id = teuthology.split_role(client)
42 client_with_id = daemon_type + '.' + client_id
43 client_with_cluster = cluster_name + '.' + client_with_id
44
45 client_config = config.get(client)
46 if client_config is None:
47 client_config = {}
48 log.info("rgw %s config is %s", client, client_config)
49 cmd_prefix = [
50 'sudo',
51 'adjust-ulimits',
52 'ceph-coverage',
53 '{tdir}/archive/coverage'.format(tdir=testdir),
54 'daemon-helper',
55 'term',
56 ]
57
58 rgw_cmd = ['radosgw']
59
60 log.info("Using %s as radosgw frontend", ctx.rgw.frontend)
61
62 endpoint = ctx.rgw.role_endpoints[client]
63 frontends = ctx.rgw.frontend
64 frontend_prefix = client_config.get('frontend_prefix', None)
65 if frontend_prefix:
66 frontends += ' prefix={pfx}'.format(pfx=frontend_prefix)
67
68 if endpoint.cert:
69 # add the ssl certificate path
70 frontends += ' ssl_certificate={}'.format(endpoint.cert.certificate)
71 if ctx.rgw.frontend == 'civetweb':
72 frontends += ' port={}s'.format(endpoint.port)
73 else:
74 frontends += ' ssl_port={}'.format(endpoint.port)
75 else:
76 frontends += ' port={}'.format(endpoint.port)
77
78 rgw_cmd.extend([
79 '--rgw-frontends', frontends,
80 '-n', client_with_id,
81 '--cluster', cluster_name,
82 '-k', '/etc/ceph/{client_with_cluster}.keyring'.format(client_with_cluster=client_with_cluster),
83 '--log-file',
84 '/var/log/ceph/rgw.{client_with_cluster}.log'.format(client_with_cluster=client_with_cluster),
85 '--rgw_ops_log_socket_path',
86 '{tdir}/rgw.opslog.{client_with_cluster}.sock'.format(tdir=testdir,
87 client_with_cluster=client_with_cluster),
88 ])
89
90 keystone_role = client_config.get('use-keystone-role', None)
91 if keystone_role is not None:
92 if not ctx.keystone:
93 raise ConfigError('rgw must run after the keystone task')
94 url = 'http://{host}:{port}/v1/KEY_$(tenant_id)s'.format(host=endpoint.hostname,
95 port=endpoint.port)
96 ctx.keystone.create_endpoint(ctx, keystone_role, 'swift', url)
97
98 keystone_host, keystone_port = \
99 ctx.keystone.public_endpoints[keystone_role]
100 rgw_cmd.extend([
101 '--rgw_keystone_url',
102 'http://{khost}:{kport}'.format(khost=keystone_host,
103 kport=keystone_port),
104 ])
105
106
107 if client_config.get('dns-name') is not None:
108 rgw_cmd.extend(['--rgw-dns-name', endpoint.dns_name])
109 if client_config.get('dns-s3website-name') is not None:
110 rgw_cmd.extend(['--rgw-dns-s3website-name', endpoint.website_dns_name])
111
112
113 vault_role = client_config.get('use-vault-role', None)
114 barbican_role = client_config.get('use-barbican-role', None)
115
116 token_path = teuthology.get_testdir(ctx) + '/vault-token'
117 if barbican_role is not None:
118 if not hasattr(ctx, 'barbican'):
119 raise ConfigError('rgw must run after the barbican task')
120
121 barbican_host, barbican_port = \
122 ctx.barbican.endpoints[barbican_role]
123 log.info("Use barbican url=%s:%s", barbican_host, barbican_port)
124
125 rgw_cmd.extend([
126 '--rgw_barbican_url',
127 'http://{bhost}:{bport}'.format(bhost=barbican_host,
128 bport=barbican_port),
129 ])
130 elif vault_role is not None:
131 if not ctx.vault.root_token:
132 raise ConfigError('vault: no "root_token" specified')
133 # create token on file
134 ctx.cluster.only(client).run(args=['echo', '-n', ctx.vault.root_token, run.Raw('>'), token_path])
135 log.info("Token file content")
136 ctx.cluster.only(client).run(args=['cat', token_path])
137 log.info("Restrict access to token file")
138 ctx.cluster.only(client).run(args=['chmod', '600', token_path])
139 ctx.cluster.only(client).run(args=['sudo', 'chown', 'ceph', token_path])
140
141 rgw_cmd.extend([
142 '--rgw_crypt_vault_addr', "{}:{}".format(*ctx.vault.endpoints[vault_role]),
143 '--rgw_crypt_vault_token_file', token_path
144 ])
145
146 rgw_cmd.extend([
147 '--foreground',
148 run.Raw('|'),
149 'sudo',
150 'tee',
151 '/var/log/ceph/rgw.{client_with_cluster}.stdout'.format(client_with_cluster=client_with_cluster),
152 run.Raw('2>&1'),
153 ])
154
155 if client_config.get('valgrind'):
156 cmd_prefix = teuthology.get_valgrind_args(
157 testdir,
158 client_with_cluster,
159 cmd_prefix,
160 client_config.get('valgrind')
161 )
162
163 run_cmd = list(cmd_prefix)
164 run_cmd.extend(rgw_cmd)
165
166 ctx.daemons.add_daemon(
167 remote, 'rgw', client_with_id,
168 cluster=cluster_name,
169 args=run_cmd,
170 logger=log.getChild(client),
171 stdin=run.PIPE,
172 wait=False,
173 )
174
175 # XXX: add_daemon() doesn't let us wait until radosgw finishes startup
176 for client in clients:
177 endpoint = ctx.rgw.role_endpoints[client]
178 url = endpoint.url()
179 log.info('Polling {client} until it starts accepting connections on {url}'.format(client=client, url=url))
180 (remote,) = ctx.cluster.only(client).remotes.keys()
181 wait_for_radosgw(url, remote)
182
183 try:
184 yield
185 finally:
186 for client in clients:
187 cluster_name, daemon_type, client_id = teuthology.split_role(client)
188 client_with_id = daemon_type + '.' + client_id
189 client_with_cluster = cluster_name + '.' + client_with_id
190 ctx.daemons.get_daemon('rgw', client_with_id, cluster_name).stop()
191 ctx.cluster.only(client).run(
192 args=[
193 'rm',
194 '-f',
195 '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir,
196 client=client_with_cluster),
197 ],
198 )
199 ctx.cluster.only(client).run(args=['rm', '-f', token_path])
200
201def assign_endpoints(ctx, config, default_cert):
202 role_endpoints = {}
203 for role, client_config in config.items():
204 client_config = client_config or {}
205 remote = get_remote_for_role(ctx, role)
206
207 cert = client_config.get('ssl certificate', default_cert)
208 if cert:
209 # find the certificate created by the ssl task
210 if not hasattr(ctx, 'ssl_certificates'):
211 raise ConfigError('rgw: no ssl task found for option "ssl certificate"')
212 ssl_certificate = ctx.ssl_certificates.get(cert, None)
213 if not ssl_certificate:
214 raise ConfigError('rgw: missing ssl certificate "{}"'.format(cert))
215 else:
216 ssl_certificate = None
217
218 port = client_config.get('port', 443 if ssl_certificate else 80)
219
220 # if dns-name is given, use it as the hostname (or as a prefix)
221 dns_name = client_config.get('dns-name', '')
222 if len(dns_name) == 0 or dns_name.endswith('.'):
223 dns_name += remote.hostname
224
225 website_dns_name = client_config.get('dns-s3website-name')
226 if website_dns_name is not None and (len(website_dns_name) == 0 or website_dns_name.endswith('.')):
227 website_dns_name += remote.hostname
228
229 role_endpoints[role] = RGWEndpoint(remote.hostname, port, ssl_certificate, dns_name, website_dns_name)
230
231 return role_endpoints
232
233@contextlib.contextmanager
234def create_pools(ctx, clients):
235 """Create replicated or erasure coded data pools for rgw."""
236
237 log.info('Creating data pools')
238 for client in clients:
239 log.debug("Obtaining remote for client {}".format(client))
240 (remote,) = ctx.cluster.only(client).remotes.keys()
241 data_pool = 'default.rgw.buckets.data'
242 cluster_name, daemon_type, client_id = teuthology.split_role(client)
243
244 if ctx.rgw.ec_data_pool:
245 create_ec_pool(remote, data_pool, client, ctx.rgw.data_pool_pg_size,
246 ctx.rgw.erasure_code_profile, cluster_name, 'rgw')
247 else:
248 create_replicated_pool(remote, data_pool, ctx.rgw.data_pool_pg_size, cluster_name, 'rgw')
249
250 index_pool = 'default.rgw.buckets.index'
251 create_replicated_pool(remote, index_pool, ctx.rgw.index_pool_pg_size, cluster_name, 'rgw')
252
253 if ctx.rgw.cache_pools:
254 create_cache_pool(remote, data_pool, data_pool + '.cache', 64,
255 64*1024*1024, cluster_name)
256 log.debug('Pools created')
257 yield
258
259@contextlib.contextmanager
260def configure_compression(ctx, clients, compression):
261 """ set a compression type in the default zone placement """
262 log.info('Configuring compression type = %s', compression)
263 for client in clients:
264 # XXX: the 'default' zone and zonegroup aren't created until we run RGWRados::init_complete().
265 # issue a 'radosgw-admin user list' command to trigger this
266 rgwadmin(ctx, client, cmd=['user', 'list'], check_status=True)
267
268 rgwadmin(ctx, client,
269 cmd=['zone', 'placement', 'modify', '--rgw-zone', 'default',
270 '--placement-id', 'default-placement',
271 '--compression', compression],
272 check_status=True)
273 yield
274
275@contextlib.contextmanager
276def configure_storage_classes(ctx, clients, storage_classes):
277 """ set a compression type in the default zone placement """
278
279 sc = [s.strip() for s in storage_classes.split(',')]
280
281 for client in clients:
282 # XXX: the 'default' zone and zonegroup aren't created until we run RGWRados::init_complete().
283 # issue a 'radosgw-admin user list' command to trigger this
284 rgwadmin(ctx, client, cmd=['user', 'list'], check_status=True)
285
286 for storage_class in sc:
287 log.info('Configuring storage class type = %s', storage_class)
288 rgwadmin(ctx, client,
289 cmd=['zonegroup', 'placement', 'add',
290 '--rgw-zone', 'default',
291 '--placement-id', 'default-placement',
292 '--storage-class', storage_class],
293 check_status=True)
294 rgwadmin(ctx, client,
295 cmd=['zone', 'placement', 'add',
296 '--rgw-zone', 'default',
297 '--placement-id', 'default-placement',
298 '--storage-class', storage_class,
299 '--data-pool', 'default.rgw.buckets.data.' + storage_class.lower()],
300 check_status=True)
301 yield
302
303@contextlib.contextmanager
304def task(ctx, config):
305 """
306 For example, to run rgw on all clients::
307
308 tasks:
309 - ceph:
310 - rgw:
311
312 To only run on certain clients::
313
314 tasks:
315 - ceph:
316 - rgw: [client.0, client.3]
317
318 or
319
320 tasks:
321 - ceph:
322 - rgw:
323 client.0:
324 client.3:
325
326 To run radosgw through valgrind:
327
328 tasks:
329 - ceph:
330 - rgw:
331 client.0:
332 valgrind: [--tool=memcheck]
333 client.3:
334 valgrind: [--tool=memcheck]
335
336 To configure data or index pool pg_size:
337
338 overrides:
339 rgw:
340 data_pool_pg_size: 256
341 index_pool_pg_size: 128
342 """
343 if config is None:
344 config = dict(('client.{id}'.format(id=id_), None)
345 for id_ in teuthology.all_roles_of_type(
346 ctx.cluster, 'client'))
347 elif isinstance(config, list):
348 config = dict((name, None) for name in config)
349
350 clients = config.keys() # http://tracker.ceph.com/issues/20417
351
352 overrides = ctx.config.get('overrides', {})
353 teuthology.deep_merge(config, overrides.get('rgw', {}))
354
355 ctx.rgw = argparse.Namespace()
356
357 ctx.rgw.ec_data_pool = bool(config.pop('ec-data-pool', False))
358 ctx.rgw.erasure_code_profile = config.pop('erasure_code_profile', {})
359 ctx.rgw.cache_pools = bool(config.pop('cache-pools', False))
360 ctx.rgw.frontend = config.pop('frontend', 'civetweb')
361 ctx.rgw.compression_type = config.pop('compression type', None)
362 ctx.rgw.storage_classes = config.pop('storage classes', None)
363 default_cert = config.pop('ssl certificate', None)
364 ctx.rgw.data_pool_pg_size = config.pop('data_pool_pg_size', 64)
365 ctx.rgw.index_pool_pg_size = config.pop('index_pool_pg_size', 64)
366 ctx.rgw.config = config
367
368 log.debug("config is {}".format(config))
369 log.debug("client list is {}".format(clients))
370
371 ctx.rgw.role_endpoints = assign_endpoints(ctx, config, default_cert)
372
373 subtasks = [
374 lambda: create_pools(ctx=ctx, clients=clients),
375 ]
376 if ctx.rgw.compression_type:
377 subtasks.extend([
378 lambda: configure_compression(ctx=ctx, clients=clients,
379 compression=ctx.rgw.compression_type),
380 ])
381 if ctx.rgw.storage_classes:
382 subtasks.extend([
383 lambda: configure_storage_classes(ctx=ctx, clients=clients,
384 storage_classes=ctx.rgw.storage_classes),
385 ])
386 subtasks.extend([
387 lambda: start_rgw(ctx=ctx, config=config, clients=clients),
388 ])
389
390 with contextutil.nested(*subtasks):
391 yield