]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/rgw.py
import 15.2.2 octopus source
[ceph.git] / ceph / qa / tasks / rgw.py
1 """
2 rgw routines
3 """
4 import argparse
5 import contextlib
6 import logging
7
8 from teuthology.orchestra import run
9 from teuthology import misc as teuthology
10 from teuthology import contextutil
11 from teuthology.exceptions import ConfigError
12 from util import get_remote_for_role
13 from util.rgw import rgwadmin, wait_for_radosgw
14 from util.rados import (create_ec_pool,
15 create_replicated_pool,
16 create_cache_pool)
17
18 log = logging.getLogger(__name__)
19
20 class RGWEndpoint:
21 def __init__(self, hostname=None, port=None, cert=None, dns_name=None, website_dns_name=None):
22 self.hostname = hostname
23 self.port = port
24 self.cert = cert
25 self.dns_name = dns_name
26 self.website_dns_name = website_dns_name
27
28 def url(self):
29 proto = 'https' if self.cert else 'http'
30 return '{proto}://{hostname}:{port}/'.format(proto=proto, hostname=self.hostname, port=self.port)
31
32 @contextlib.contextmanager
33 def start_rgw(ctx, config, clients):
34 """
35 Start rgw on remote sites.
36 """
37 log.info('Starting rgw...')
38 testdir = teuthology.get_testdir(ctx)
39 for client in clients:
40 (remote,) = ctx.cluster.only(client).remotes.keys()
41 cluster_name, daemon_type, client_id = teuthology.split_role(client)
42 client_with_id = daemon_type + '.' + client_id
43 client_with_cluster = cluster_name + '.' + client_with_id
44
45 client_config = config.get(client)
46 if client_config is None:
47 client_config = {}
48 log.info("rgw %s config is %s", client, client_config)
49 cmd_prefix = [
50 'sudo',
51 'adjust-ulimits',
52 'ceph-coverage',
53 '{tdir}/archive/coverage'.format(tdir=testdir),
54 'daemon-helper',
55 'term',
56 ]
57
58 rgw_cmd = ['radosgw']
59
60 log.info("Using %s as radosgw frontend", ctx.rgw.frontend)
61
62 endpoint = ctx.rgw.role_endpoints[client]
63 frontends = ctx.rgw.frontend
64 frontend_prefix = client_config.get('frontend_prefix', None)
65 if frontend_prefix:
66 frontends += ' prefix={pfx}'.format(pfx=frontend_prefix)
67
68 if endpoint.cert:
69 # add the ssl certificate path
70 frontends += ' ssl_certificate={}'.format(endpoint.cert.certificate)
71 if ctx.rgw.frontend == 'civetweb':
72 frontends += ' port={}s'.format(endpoint.port)
73 else:
74 frontends += ' ssl_port={}'.format(endpoint.port)
75 else:
76 frontends += ' port={}'.format(endpoint.port)
77
78 rgw_cmd.extend([
79 '--rgw-frontends', frontends,
80 '-n', client_with_id,
81 '--cluster', cluster_name,
82 '-k', '/etc/ceph/{client_with_cluster}.keyring'.format(client_with_cluster=client_with_cluster),
83 '--log-file',
84 '/var/log/ceph/rgw.{client_with_cluster}.log'.format(client_with_cluster=client_with_cluster),
85 '--rgw_ops_log_socket_path',
86 '{tdir}/rgw.opslog.{client_with_cluster}.sock'.format(tdir=testdir,
87 client_with_cluster=client_with_cluster),
88 ])
89
90 keystone_role = client_config.get('use-keystone-role', None)
91 if keystone_role is not None:
92 if not ctx.keystone:
93 raise ConfigError('rgw must run after the keystone task')
94 url = 'http://{host}:{port}/v1/KEY_$(tenant_id)s'.format(host=endpoint.hostname,
95 port=endpoint.port)
96 ctx.keystone.create_endpoint(ctx, keystone_role, 'swift', url)
97
98 keystone_host, keystone_port = \
99 ctx.keystone.public_endpoints[keystone_role]
100 rgw_cmd.extend([
101 '--rgw_keystone_url',
102 'http://{khost}:{kport}'.format(khost=keystone_host,
103 kport=keystone_port),
104 ])
105
106
107 if client_config.get('dns-name'):
108 rgw_cmd.extend(['--rgw-dns-name', endpoint.dns_name])
109 if client_config.get('dns-s3website-name'):
110 rgw_cmd.extend(['--rgw-dns-s3website-name', endpoint.website_dns_name])
111
112
113 vault_role = client_config.get('use-vault-role', None)
114 barbican_role = client_config.get('use-barbican-role', None)
115
116 token_path = teuthology.get_testdir(ctx) + '/vault-token'
117 if barbican_role is not None:
118 if not hasattr(ctx, 'barbican'):
119 raise ConfigError('rgw must run after the barbican task')
120
121 barbican_host, barbican_port = \
122 ctx.barbican.endpoints[barbican_role]
123 log.info("Use barbican url=%s:%s", barbican_host, barbican_port)
124
125 rgw_cmd.extend([
126 '--rgw_barbican_url',
127 'http://{bhost}:{bport}'.format(bhost=barbican_host,
128 bport=barbican_port),
129 ])
130 elif vault_role is not None:
131 if not ctx.vault.root_token:
132 raise ConfigError('vault: no "root_token" specified')
133 # create token on file
134 ctx.cluster.only(client).run(args=['echo', '-n', ctx.vault.root_token, run.Raw('>'), token_path])
135 log.info("Token file content")
136 ctx.cluster.only(client).run(args=['cat', token_path])
137 log.info("Restrict access to token file")
138 ctx.cluster.only(client).run(args=['chmod', '600', token_path])
139 ctx.cluster.only(client).run(args=['sudo', 'chown', 'ceph', token_path])
140
141 rgw_cmd.extend([
142 '--rgw_crypt_vault_addr', "{}:{}".format(*ctx.vault.endpoints[vault_role]),
143 '--rgw_crypt_vault_token_file', token_path
144 ])
145
146 rgw_cmd.extend([
147 '--foreground',
148 run.Raw('|'),
149 'sudo',
150 'tee',
151 '/var/log/ceph/rgw.{client_with_cluster}.stdout'.format(client_with_cluster=client_with_cluster),
152 run.Raw('2>&1'),
153 ])
154
155 if client_config.get('valgrind'):
156 cmd_prefix = teuthology.get_valgrind_args(
157 testdir,
158 client_with_cluster,
159 cmd_prefix,
160 client_config.get('valgrind')
161 )
162
163 run_cmd = list(cmd_prefix)
164 run_cmd.extend(rgw_cmd)
165
166 ctx.daemons.add_daemon(
167 remote, 'rgw', client_with_id,
168 cluster=cluster_name,
169 args=run_cmd,
170 logger=log.getChild(client),
171 stdin=run.PIPE,
172 wait=False,
173 )
174
175 # XXX: add_daemon() doesn't let us wait until radosgw finishes startup
176 for client in clients:
177 endpoint = ctx.rgw.role_endpoints[client]
178 url = endpoint.url()
179 log.info('Polling {client} until it starts accepting connections on {url}'.format(client=client, url=url))
180 (remote,) = ctx.cluster.only(client).remotes.keys()
181 wait_for_radosgw(url, remote)
182
183 try:
184 yield
185 finally:
186 for client in clients:
187 cluster_name, daemon_type, client_id = teuthology.split_role(client)
188 client_with_id = daemon_type + '.' + client_id
189 client_with_cluster = cluster_name + '.' + client_with_id
190 ctx.daemons.get_daemon('rgw', client_with_id, cluster_name).stop()
191 ctx.cluster.only(client).run(
192 args=[
193 'rm',
194 '-f',
195 '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir,
196 client=client_with_cluster),
197 ],
198 )
199 ctx.cluster.only(client).run(args=['rm', '-f', token_path])
200
201 def assign_endpoints(ctx, config, default_cert):
202 role_endpoints = {}
203 for role, client_config in config.items():
204 client_config = client_config or {}
205 remote = get_remote_for_role(ctx, role)
206
207 cert = client_config.get('ssl certificate', default_cert)
208 if cert:
209 # find the certificate created by the ssl task
210 if not hasattr(ctx, 'ssl_certificates'):
211 raise ConfigError('rgw: no ssl task found for option "ssl certificate"')
212 ssl_certificate = ctx.ssl_certificates.get(cert, None)
213 if not ssl_certificate:
214 raise ConfigError('rgw: missing ssl certificate "{}"'.format(cert))
215 else:
216 ssl_certificate = None
217
218 port = client_config.get('port', 443 if ssl_certificate else 80)
219
220 # if dns-name is given, use it as the hostname (or as a prefix)
221 dns_name = client_config.get('dns-name', '')
222 if len(dns_name) == 0 or dns_name.endswith('.'):
223 dns_name += remote.hostname
224
225 website_dns_name = client_config.get('dns-s3website-name')
226 if website_dns_name:
227 if len(website_dns_name) == 0 or website_dns_name.endswith('.'):
228 website_dns_name += remote.hostname
229
230 role_endpoints[role] = RGWEndpoint(remote.hostname, port, ssl_certificate, dns_name, website_dns_name)
231
232 return role_endpoints
233
234 @contextlib.contextmanager
235 def create_pools(ctx, clients):
236 """Create replicated or erasure coded data pools for rgw."""
237
238 log.info('Creating data pools')
239 for client in clients:
240 log.debug("Obtaining remote for client {}".format(client))
241 (remote,) = ctx.cluster.only(client).remotes.keys()
242 data_pool = 'default.rgw.buckets.data'
243 cluster_name, daemon_type, client_id = teuthology.split_role(client)
244
245 if ctx.rgw.ec_data_pool:
246 create_ec_pool(remote, data_pool, client, ctx.rgw.data_pool_pg_size,
247 ctx.rgw.erasure_code_profile, cluster_name, 'rgw')
248 else:
249 create_replicated_pool(remote, data_pool, ctx.rgw.data_pool_pg_size, cluster_name, 'rgw')
250
251 index_pool = 'default.rgw.buckets.index'
252 create_replicated_pool(remote, index_pool, ctx.rgw.index_pool_pg_size, cluster_name, 'rgw')
253
254 if ctx.rgw.cache_pools:
255 create_cache_pool(remote, data_pool, data_pool + '.cache', 64,
256 64*1024*1024, cluster_name)
257 log.debug('Pools created')
258 yield
259
260 @contextlib.contextmanager
261 def configure_compression(ctx, clients, compression):
262 """ set a compression type in the default zone placement """
263 log.info('Configuring compression type = %s', compression)
264 for client in clients:
265 # XXX: the 'default' zone and zonegroup aren't created until we run RGWRados::init_complete().
266 # issue a 'radosgw-admin user list' command to trigger this
267 rgwadmin(ctx, client, cmd=['user', 'list'], check_status=True)
268
269 rgwadmin(ctx, client,
270 cmd=['zone', 'placement', 'modify', '--rgw-zone', 'default',
271 '--placement-id', 'default-placement',
272 '--compression', compression],
273 check_status=True)
274 yield
275
276 @contextlib.contextmanager
277 def configure_storage_classes(ctx, clients, storage_classes):
278 """ set a compression type in the default zone placement """
279
280 sc = [s.strip() for s in storage_classes.split(',')]
281
282 for client in clients:
283 # XXX: the 'default' zone and zonegroup aren't created until we run RGWRados::init_complete().
284 # issue a 'radosgw-admin user list' command to trigger this
285 rgwadmin(ctx, client, cmd=['user', 'list'], check_status=True)
286
287 for storage_class in sc:
288 log.info('Configuring storage class type = %s', storage_class)
289 rgwadmin(ctx, client,
290 cmd=['zonegroup', 'placement', 'add',
291 '--rgw-zone', 'default',
292 '--placement-id', 'default-placement',
293 '--storage-class', storage_class],
294 check_status=True)
295 rgwadmin(ctx, client,
296 cmd=['zone', 'placement', 'add',
297 '--rgw-zone', 'default',
298 '--placement-id', 'default-placement',
299 '--storage-class', storage_class,
300 '--data-pool', 'default.rgw.buckets.data.' + storage_class.lower()],
301 check_status=True)
302 yield
303
304 @contextlib.contextmanager
305 def task(ctx, config):
306 """
307 For example, to run rgw on all clients::
308
309 tasks:
310 - ceph:
311 - rgw:
312
313 To only run on certain clients::
314
315 tasks:
316 - ceph:
317 - rgw: [client.0, client.3]
318
319 or
320
321 tasks:
322 - ceph:
323 - rgw:
324 client.0:
325 client.3:
326
327 To run radosgw through valgrind:
328
329 tasks:
330 - ceph:
331 - rgw:
332 client.0:
333 valgrind: [--tool=memcheck]
334 client.3:
335 valgrind: [--tool=memcheck]
336
337 To configure data or index pool pg_size:
338
339 overrides:
340 rgw:
341 data_pool_pg_size: 256
342 index_pool_pg_size: 128
343 """
344 if config is None:
345 config = dict(('client.{id}'.format(id=id_), None)
346 for id_ in teuthology.all_roles_of_type(
347 ctx.cluster, 'client'))
348 elif isinstance(config, list):
349 config = dict((name, None) for name in config)
350
351 clients = config.keys() # http://tracker.ceph.com/issues/20417
352
353 overrides = ctx.config.get('overrides', {})
354 teuthology.deep_merge(config, overrides.get('rgw', {}))
355
356 ctx.rgw = argparse.Namespace()
357
358 ctx.rgw.ec_data_pool = bool(config.pop('ec-data-pool', False))
359 ctx.rgw.erasure_code_profile = config.pop('erasure_code_profile', {})
360 ctx.rgw.cache_pools = bool(config.pop('cache-pools', False))
361 ctx.rgw.frontend = config.pop('frontend', 'civetweb')
362 ctx.rgw.compression_type = config.pop('compression type', None)
363 ctx.rgw.storage_classes = config.pop('storage classes', None)
364 default_cert = config.pop('ssl certificate', None)
365 ctx.rgw.data_pool_pg_size = config.pop('data_pool_pg_size', 64)
366 ctx.rgw.index_pool_pg_size = config.pop('index_pool_pg_size', 64)
367 ctx.rgw.config = config
368
369 log.debug("config is {}".format(config))
370 log.debug("client list is {}".format(clients))
371
372 ctx.rgw.role_endpoints = assign_endpoints(ctx, config, default_cert)
373
374 subtasks = [
375 lambda: create_pools(ctx=ctx, clients=clients),
376 ]
377 if ctx.rgw.compression_type:
378 subtasks.extend([
379 lambda: configure_compression(ctx=ctx, clients=clients,
380 compression=ctx.rgw.compression_type),
381 ])
382 if ctx.rgw.storage_classes:
383 subtasks.extend([
384 lambda: configure_storage_classes(ctx=ctx, clients=clients,
385 storage_classes=ctx.rgw.storage_classes),
386 ])
387 subtasks.extend([
388 lambda: start_rgw(ctx=ctx, config=config, clients=clients),
389 ])
390
391 with contextutil.nested(*subtasks):
392 yield