]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/rgw.py
update sources to v12.1.1
[ceph.git] / ceph / qa / tasks / rgw.py
CommitLineData
7c673cae
FG
1"""
2rgw routines
3"""
4import argparse
5import contextlib
6import json
7import logging
8import os
9import errno
10import util.rgw as rgw_utils
11
7c673cae
FG
12from teuthology.orchestra import run
13from teuthology import misc as teuthology
14from teuthology import contextutil
15from teuthology.orchestra.run import CommandFailedError
31f18b77 16from util.rgw import rgwadmin, wait_for_radosgw
7c673cae
FG
17from util.rados import (rados, create_ec_pool,
18 create_replicated_pool,
19 create_cache_pool)
20
21log = logging.getLogger(__name__)
22
23@contextlib.contextmanager
224ce89b 24def start_rgw(ctx, config, clients):
7c673cae
FG
25 """
26 Start rgw on remote sites.
27 """
28 log.info('Starting rgw...')
7c673cae 29 testdir = teuthology.get_testdir(ctx)
224ce89b 30 for client in clients:
7c673cae
FG
31 (remote,) = ctx.cluster.only(client).remotes.iterkeys()
32 cluster_name, daemon_type, client_id = teuthology.split_role(client)
33 client_with_id = daemon_type + '.' + client_id
34 client_with_cluster = cluster_name + '.' + client_with_id
7c673cae
FG
35
36 client_config = config.get(client)
37 if client_config is None:
38 client_config = {}
39 log.info("rgw %s config is %s", client, client_config)
7c673cae
FG
40 cmd_prefix = [
41 'sudo',
42 'adjust-ulimits',
43 'ceph-coverage',
44 '{tdir}/archive/coverage'.format(tdir=testdir),
45 'daemon-helper',
46 'term',
47 ]
48
49 rgw_cmd = ['radosgw']
50
31f18b77 51 log.info("Using %s as radosgw frontend", ctx.rgw.frontend)
7c673cae 52
31f18b77 53 host, port = ctx.rgw.role_endpoints[client]
7c673cae 54 rgw_cmd.extend([
31f18b77
FG
55 '--rgw-frontends',
56 '{frontend} port={port}'.format(frontend=ctx.rgw.frontend, port=port),
7c673cae
FG
57 '-n', client_with_id,
58 '--cluster', cluster_name,
59 '-k', '/etc/ceph/{client_with_cluster}.keyring'.format(client_with_cluster=client_with_cluster),
60 '--log-file',
61 '/var/log/ceph/rgw.{client_with_cluster}.log'.format(client_with_cluster=client_with_cluster),
62 '--rgw_ops_log_socket_path',
63 '{tdir}/rgw.opslog.{client_with_cluster}.sock'.format(tdir=testdir,
64 client_with_cluster=client_with_cluster),
65 '--foreground',
66 run.Raw('|'),
67 'sudo',
68 'tee',
69 '/var/log/ceph/rgw.{client_with_cluster}.stdout'.format(tdir=testdir,
70 client_with_cluster=client_with_cluster),
71 run.Raw('2>&1'),
72 ])
73
74 if client_config.get('valgrind'):
75 cmd_prefix = teuthology.get_valgrind_args(
76 testdir,
31f18b77 77 client_with_cluster,
7c673cae
FG
78 cmd_prefix,
79 client_config.get('valgrind')
80 )
81
82 run_cmd = list(cmd_prefix)
83 run_cmd.extend(rgw_cmd)
84
85 ctx.daemons.add_daemon(
31f18b77 86 remote, 'rgw', client_with_id,
7c673cae
FG
87 cluster=cluster_name,
88 args=run_cmd,
89 logger=log.getChild(client),
90 stdin=run.PIPE,
91 wait=False,
92 )
93
94 # XXX: add_daemon() doesn't let us wait until radosgw finishes startup
31f18b77 95 for client in config.keys():
7c673cae
FG
96 host, port = ctx.rgw.role_endpoints[client]
97 endpoint = 'http://{host}:{port}/'.format(host=host, port=port)
98 log.info('Polling {client} until it starts accepting connections on {endpoint}'.format(client=client, endpoint=endpoint))
31f18b77 99 wait_for_radosgw(endpoint)
7c673cae
FG
100
101 try:
102 yield
103 finally:
7c673cae 104 for client in config.iterkeys():
31f18b77
FG
105 cluster_name, daemon_type, client_id = teuthology.split_role(client)
106 client_with_id = daemon_type + '.' + client_id
107 client_with_cluster = cluster_name + '.' + client_with_id
108 ctx.daemons.get_daemon('rgw', client_with_id, cluster_name).stop()
7c673cae
FG
109 ctx.cluster.only(client).run(
110 args=[
111 'rm',
112 '-f',
31f18b77
FG
113 '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir,
114 client=client_with_cluster),
7c673cae
FG
115 ],
116 )
117
7c673cae
FG
118def assign_ports(ctx, config):
119 """
120 Assign port numberst starting with port 7280.
121 """
122 port = 7280
123 role_endpoints = {}
124 for remote, roles_for_host in ctx.cluster.remotes.iteritems():
125 for role in roles_for_host:
126 if role in config:
127 role_endpoints[role] = (remote.name.split('@')[1], port)
128 port += 1
129
130 return role_endpoints
131
7c673cae 132@contextlib.contextmanager
224ce89b 133def create_pools(ctx, clients):
7c673cae 134 """Create replicated or erasure coded data pools for rgw."""
7c673cae 135
31f18b77 136 log.info('Creating data pools')
224ce89b
WB
137 for client in clients:
138 log.debug("Obtaining remote for client {}".format(client))
7c673cae
FG
139 (remote,) = ctx.cluster.only(client).remotes.iterkeys()
140 data_pool = '.rgw.buckets'
141 cluster_name, daemon_type, client_id = teuthology.split_role(client)
142
143 if ctx.rgw.ec_data_pool:
144 create_ec_pool(remote, data_pool, client, 64,
145 ctx.rgw.erasure_code_profile, cluster_name)
146 else:
147 create_replicated_pool(remote, data_pool, 64, cluster_name)
148 if ctx.rgw.cache_pools:
149 create_cache_pool(remote, data_pool, data_pool + '.cache', 64,
150 64*1024*1024, cluster_name)
31f18b77 151 log.debug('Pools created')
7c673cae
FG
152 yield
153
154@contextlib.contextmanager
224ce89b 155def configure_compression(ctx, clients, compression):
31f18b77
FG
156 """ set a compression type in the default zone placement """
157 log.info('Configuring compression type = %s', compression)
224ce89b 158 for client in clients:
7c673cae
FG
159 # XXX: the 'default' zone and zonegroup aren't created until we run RGWRados::init_complete().
160 # issue a 'radosgw-admin user list' command to trigger this
161 rgwadmin(ctx, client, cmd=['user', 'list'], check_status=True)
162
163 rgwadmin(ctx, client,
164 cmd=['zone', 'placement', 'modify', '--rgw-zone', 'default',
31f18b77
FG
165 '--placement-id', 'default-placement',
166 '--compression', compression],
7c673cae 167 check_status=True)
7c673cae
FG
168 yield
169
170@contextlib.contextmanager
171def task(ctx, config):
172 """
7c673cae
FG
173 For example, to run rgw on all clients::
174
175 tasks:
176 - ceph:
177 - rgw:
178
179 To only run on certain clients::
180
181 tasks:
182 - ceph:
183 - rgw: [client.0, client.3]
184
185 or
186
187 tasks:
188 - ceph:
189 - rgw:
190 client.0:
191 client.3:
192
7c673cae
FG
193 To run radosgw through valgrind:
194
195 tasks:
196 - ceph:
197 - rgw:
198 client.0:
199 valgrind: [--tool=memcheck]
200 client.3:
201 valgrind: [--tool=memcheck]
7c673cae
FG
202 """
203 if config is None:
204 config = dict(('client.{id}'.format(id=id_), None)
205 for id_ in teuthology.all_roles_of_type(
206 ctx.cluster, 'client'))
207 elif isinstance(config, list):
208 config = dict((name, None) for name in config)
209
224ce89b
WB
210 clients = config.keys() # http://tracker.ceph.com/issues/20417
211
7c673cae
FG
212 overrides = ctx.config.get('overrides', {})
213 teuthology.deep_merge(config, overrides.get('rgw', {}))
214
7c673cae
FG
215 role_endpoints = assign_ports(ctx, config)
216 ctx.rgw = argparse.Namespace()
217 ctx.rgw.role_endpoints = role_endpoints
7c673cae 218
31f18b77
FG
219 ctx.rgw.ec_data_pool = bool(config.pop('ec-data-pool', False))
220 ctx.rgw.erasure_code_profile = config.pop('erasure_code_profile', {})
221 ctx.rgw.cache_pools = bool(config.pop('cache-pools', False))
222 ctx.rgw.frontend = config.pop('frontend', 'civetweb')
223 ctx.rgw.compression_type = config.pop('compression type', None)
7c673cae 224 ctx.rgw.config = config
7c673cae 225
224ce89b
WB
226 log.debug("config is {}".format(config))
227 log.debug("client list is {}".format(clients))
31f18b77 228 subtasks = [
224ce89b 229 lambda: create_pools(ctx=ctx, clients=clients),
31f18b77
FG
230 ]
231 if ctx.rgw.compression_type:
7c673cae 232 subtasks.extend([
224ce89b 233 lambda: configure_compression(ctx=ctx, clients=clients,
31f18b77 234 compression=ctx.rgw.compression_type),
7c673cae 235 ])
31f18b77 236 subtasks.extend([
224ce89b 237 lambda: start_rgw(ctx=ctx, config=config, clients=clients),
31f18b77 238 ])
7c673cae 239
7c673cae
FG
240 with contextutil.nested(*subtasks):
241 yield