]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/s3tests.py
import ceph quincy 17.2.6
[ceph.git] / ceph / qa / tasks / s3tests.py
1 """
2 Run a set of s3 tests on rgw.
3 """
4 from io import BytesIO
5 from configobj import ConfigObj
6 import base64
7 import contextlib
8 import logging
9 import os
10 import random
11 import string
12
13 from teuthology import misc as teuthology
14 from teuthology import contextutil
15 from teuthology.config import config as teuth_config
16 from teuthology.orchestra import run
17 from teuthology.exceptions import ConfigError
18
19 log = logging.getLogger(__name__)
20
21 @contextlib.contextmanager
22 def download(ctx, config):
23 """
24 Download the s3 tests from the git builder.
25 Remove downloaded s3 file upon exit.
26
27 The context passed in should be identical to the context
28 passed in to the main task.
29 """
30 assert isinstance(config, dict)
31 log.info('Downloading s3-tests...')
32 testdir = teuthology.get_testdir(ctx)
33 for (client, client_config) in config.items():
34 s3tests_branch = client_config.get('force-branch', None)
35 if not s3tests_branch:
36 raise ValueError(
37 "Could not determine what branch to use for s3-tests. Please add 'force-branch: {s3-tests branch name}' to the .yaml config for this s3tests task.")
38
39 log.info("Using branch '%s' for s3tests", s3tests_branch)
40 sha1 = client_config.get('sha1')
41 git_remote = client_config.get('git_remote', teuth_config.ceph_git_base_url)
42 ctx.cluster.only(client).run(
43 args=[
44 'git', 'clone',
45 '-b', s3tests_branch,
46 git_remote + 's3-tests.git',
47 '{tdir}/s3-tests-{client}'.format(tdir=testdir, client=client),
48 ],
49 )
50 if sha1 is not None:
51 ctx.cluster.only(client).run(
52 args=[
53 'cd', '{tdir}/s3-tests-{client}'.format(tdir=testdir, client=client),
54 run.Raw('&&'),
55 'git', 'reset', '--hard', sha1,
56 ],
57 )
58 try:
59 yield
60 finally:
61 log.info('Removing s3-tests...')
62 testdir = teuthology.get_testdir(ctx)
63 for client in config:
64 ctx.cluster.only(client).run(
65 args=[
66 'rm',
67 '-rf',
68 '{tdir}/s3-tests-{client}'.format(tdir=testdir, client=client),
69 ],
70 )
71
72
73 def _config_user(s3tests_conf, section, user):
74 """
75 Configure users for this section by stashing away keys, ids, and
76 email addresses.
77 """
78 s3tests_conf[section].setdefault('user_id', user)
79 s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user))
80 s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
81 s3tests_conf[section].setdefault('access_key',
82 ''.join(random.choice(string.ascii_uppercase) for i in range(20)))
83 s3tests_conf[section].setdefault('secret_key',
84 base64.b64encode(os.urandom(40)).decode())
85 s3tests_conf[section].setdefault('totp_serial',
86 ''.join(random.choice(string.digits) for i in range(10)))
87 s3tests_conf[section].setdefault('totp_seed',
88 base64.b32encode(os.urandom(40)).decode())
89 s3tests_conf[section].setdefault('totp_seconds', '5')
90
91
92 @contextlib.contextmanager
93 def create_users(ctx, config):
94 """
95 Create a main and an alternate s3 user.
96 """
97 assert isinstance(config, dict)
98 log.info('Creating rgw users...')
99 testdir = teuthology.get_testdir(ctx)
100
101 if ctx.sts_variable:
102 users = {'s3 main': 'foo', 's3 alt': 'bar', 's3 tenant': 'testx$tenanteduser', 'iam': 'foobar'}
103 for client in config['clients']:
104 s3tests_conf = config['s3tests_conf'][client]
105 s3tests_conf.setdefault('fixtures', {})
106 s3tests_conf['fixtures'].setdefault('bucket prefix', 'test-' + client + '-{random}-')
107 for section, user in users.items():
108 _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client))
109 log.debug('Creating user {user} on {host}'.format(user=s3tests_conf[section]['user_id'], host=client))
110 cluster_name, daemon_type, client_id = teuthology.split_role(client)
111 client_with_id = daemon_type + '.' + client_id
112 if section=='iam':
113 ctx.cluster.only(client).run(
114 args=[
115 'adjust-ulimits',
116 'ceph-coverage',
117 '{tdir}/archive/coverage'.format(tdir=testdir),
118 'radosgw-admin',
119 '-n', client_with_id,
120 'user', 'create',
121 '--uid', s3tests_conf[section]['user_id'],
122 '--display-name', s3tests_conf[section]['display_name'],
123 '--access-key', s3tests_conf[section]['access_key'],
124 '--secret', s3tests_conf[section]['secret_key'],
125 '--cluster', cluster_name,
126 ],
127 )
128 ctx.cluster.only(client).run(
129 args=[
130 'adjust-ulimits',
131 'ceph-coverage',
132 '{tdir}/archive/coverage'.format(tdir=testdir),
133 'radosgw-admin',
134 '-n', client_with_id,
135 'caps', 'add',
136 '--uid', s3tests_conf[section]['user_id'],
137 '--caps', 'user-policy=*',
138 '--cluster', cluster_name,
139 ],
140 )
141 ctx.cluster.only(client).run(
142 args=[
143 'adjust-ulimits',
144 'ceph-coverage',
145 '{tdir}/archive/coverage'.format(tdir=testdir),
146 'radosgw-admin',
147 '-n', client_with_id,
148 'caps', 'add',
149 '--uid', s3tests_conf[section]['user_id'],
150 '--caps', 'roles=*',
151 '--cluster', cluster_name,
152 ],
153 )
154 ctx.cluster.only(client).run(
155 args=[
156 'adjust-ulimits',
157 'ceph-coverage',
158 '{tdir}/archive/coverage'.format(tdir=testdir),
159 'radosgw-admin',
160 '-n', client_with_id,
161 'caps', 'add',
162 '--uid', s3tests_conf[section]['user_id'],
163 '--caps', 'oidc-provider=*',
164 '--cluster', cluster_name,
165 ],
166 )
167
168 else:
169 ctx.cluster.only(client).run(
170 args=[
171 'adjust-ulimits',
172 'ceph-coverage',
173 '{tdir}/archive/coverage'.format(tdir=testdir),
174 'radosgw-admin',
175 '-n', client_with_id,
176 'user', 'create',
177 '--uid', s3tests_conf[section]['user_id'],
178 '--display-name', s3tests_conf[section]['display_name'],
179 '--access-key', s3tests_conf[section]['access_key'],
180 '--secret', s3tests_conf[section]['secret_key'],
181 '--email', s3tests_conf[section]['email'],
182 '--caps', 'user-policy=*',
183 '--cluster', cluster_name,
184 ],
185 )
186 ctx.cluster.only(client).run(
187 args=[
188 'adjust-ulimits',
189 'ceph-coverage',
190 '{tdir}/archive/coverage'.format(tdir=testdir),
191 'radosgw-admin',
192 '-n', client_with_id,
193 'mfa', 'create',
194 '--uid', s3tests_conf[section]['user_id'],
195 '--totp-serial', s3tests_conf[section]['totp_serial'],
196 '--totp-seed', s3tests_conf[section]['totp_seed'],
197 '--totp-seconds', s3tests_conf[section]['totp_seconds'],
198 '--totp-window', '8',
199 '--totp-seed-type', 'base32',
200 '--cluster', cluster_name,
201 ],
202 )
203
204 else:
205 users = {'s3 main': 'foo', 's3 alt': 'bar', 's3 tenant': 'testx$tenanteduser'}
206 for client in config['clients']:
207 s3tests_conf = config['s3tests_conf'][client]
208 s3tests_conf.setdefault('fixtures', {})
209 s3tests_conf['fixtures'].setdefault('bucket prefix', 'test-' + client + '-{random}-')
210 for section, user in users.items():
211 _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client))
212 log.debug('Creating user {user} on {host}'.format(user=s3tests_conf[section]['user_id'], host=client))
213 cluster_name, daemon_type, client_id = teuthology.split_role(client)
214 client_with_id = daemon_type + '.' + client_id
215 ctx.cluster.only(client).run(
216 args=[
217 'adjust-ulimits',
218 'ceph-coverage',
219 '{tdir}/archive/coverage'.format(tdir=testdir),
220 'radosgw-admin',
221 '-n', client_with_id,
222 'user', 'create',
223 '--uid', s3tests_conf[section]['user_id'],
224 '--display-name', s3tests_conf[section]['display_name'],
225 '--access-key', s3tests_conf[section]['access_key'],
226 '--secret', s3tests_conf[section]['secret_key'],
227 '--email', s3tests_conf[section]['email'],
228 '--caps', 'user-policy=*',
229 '--cluster', cluster_name,
230 ],
231 )
232 ctx.cluster.only(client).run(
233 args=[
234 'adjust-ulimits',
235 'ceph-coverage',
236 '{tdir}/archive/coverage'.format(tdir=testdir),
237 'radosgw-admin',
238 '-n', client_with_id,
239 'mfa', 'create',
240 '--uid', s3tests_conf[section]['user_id'],
241 '--totp-serial', s3tests_conf[section]['totp_serial'],
242 '--totp-seed', s3tests_conf[section]['totp_seed'],
243 '--totp-seconds', s3tests_conf[section]['totp_seconds'],
244 '--totp-window', '8',
245 '--totp-seed-type', 'base32',
246 '--cluster', cluster_name,
247 ],
248 )
249
250 if "TOKEN" in os.environ:
251 s3tests_conf.setdefault('webidentity', {})
252 s3tests_conf['webidentity'].setdefault('token',os.environ['TOKEN'])
253 s3tests_conf['webidentity'].setdefault('aud',os.environ['AUD'])
254 s3tests_conf['webidentity'].setdefault('sub',os.environ['SUB'])
255 s3tests_conf['webidentity'].setdefault('azp',os.environ['AZP'])
256 s3tests_conf['webidentity'].setdefault('user_token',os.environ['USER_TOKEN'])
257 s3tests_conf['webidentity'].setdefault('thumbprint',os.environ['THUMBPRINT'])
258 s3tests_conf['webidentity'].setdefault('KC_REALM',os.environ['KC_REALM'])
259
260 try:
261 yield
262 finally:
263 for client in config['clients']:
264 for user in users.values():
265 uid = '{user}.{client}'.format(user=user, client=client)
266 cluster_name, daemon_type, client_id = teuthology.split_role(client)
267 client_with_id = daemon_type + '.' + client_id
268 ctx.cluster.only(client).run(
269 args=[
270 'adjust-ulimits',
271 'ceph-coverage',
272 '{tdir}/archive/coverage'.format(tdir=testdir),
273 'radosgw-admin',
274 '-n', client_with_id,
275 'user', 'rm',
276 '--uid', uid,
277 '--purge-data',
278 '--cluster', cluster_name,
279 ],
280 )
281
282
283 @contextlib.contextmanager
284 def configure(ctx, config):
285 """
286 Configure the s3-tests. This includes the running of the
287 bootstrap code and the updating of local conf files.
288 """
289 assert isinstance(config, dict)
290 log.info('Configuring s3-tests...')
291 testdir = teuthology.get_testdir(ctx)
292 for client, properties in config['clients'].items():
293 properties = properties or {}
294 s3tests_conf = config['s3tests_conf'][client]
295 s3tests_conf['DEFAULT']['calling_format'] = properties.get('calling-format', 'ordinary')
296
297 # use rgw_server if given, or default to local client
298 role = properties.get('rgw_server', client)
299
300 endpoint = ctx.rgw.role_endpoints.get(role)
301 assert endpoint, 's3tests: no rgw endpoint for {}'.format(role)
302
303 s3tests_conf['DEFAULT']['host'] = endpoint.dns_name
304
305 website_role = properties.get('rgw_website_server')
306 if website_role:
307 website_endpoint = ctx.rgw.role_endpoints.get(website_role)
308 assert website_endpoint, \
309 's3tests: no rgw endpoint for rgw_website_server {}'.format(website_role)
310 assert website_endpoint.website_dns_name, \
311 's3tests: no dns-s3website-name for rgw_website_server {}'.format(website_role)
312 s3tests_conf['DEFAULT']['s3website_domain'] = website_endpoint.website_dns_name
313
314 if hasattr(ctx, 'barbican'):
315 properties = properties['barbican']
316 if properties is not None and 'kms_key' in properties:
317 if not (properties['kms_key'] in ctx.barbican.keys):
318 raise ConfigError('Key '+properties['kms_key']+' not defined')
319
320 if not (properties['kms_key2'] in ctx.barbican.keys):
321 raise ConfigError('Key '+properties['kms_key2']+' not defined')
322
323 key = ctx.barbican.keys[properties['kms_key']]
324 s3tests_conf['DEFAULT']['kms_keyid'] = key['id']
325
326 key = ctx.barbican.keys[properties['kms_key2']]
327 s3tests_conf['DEFAULT']['kms_keyid2'] = key['id']
328
329 elif hasattr(ctx, 'vault'):
330 engine_or_flavor = vars(ctx.vault).get('flavor',ctx.vault.engine)
331 keys=[]
332 for name in (x['Path'] for x in vars(ctx.vault).get('keys', {}).get(ctx.rgw.vault_role)):
333 keys.append(name)
334
335 keys.extend(['testkey-1','testkey-2'])
336 if engine_or_flavor == "old":
337 keys=[keys[i] + "/1" for i in range(len(keys))]
338
339 properties = properties.get('vault_%s' % engine_or_flavor, {})
340 s3tests_conf['DEFAULT']['kms_keyid'] = properties.get('key_path', keys[0])
341 s3tests_conf['DEFAULT']['kms_keyid2'] = properties.get('key_path2', keys[1])
342 elif hasattr(ctx.rgw, 'pykmip_role'):
343 keys=[]
344 for name in (x['Name'] for x in ctx.pykmip.keys[ctx.rgw.pykmip_role]):
345 p=name.partition('-')
346 keys.append(p[2] if p[2] else p[0])
347 keys.extend(['testkey-1', 'testkey-2'])
348 s3tests_conf['DEFAULT']['kms_keyid'] = properties.get('kms_key', keys[0])
349 s3tests_conf['DEFAULT']['kms_keyid2'] = properties.get('kms_key2', keys[1])
350 else:
351 # Fallback scenario where it's the local (ceph.conf) kms being tested
352 s3tests_conf['DEFAULT']['kms_keyid'] = 'testkey-1'
353 s3tests_conf['DEFAULT']['kms_keyid2'] = 'testkey-2'
354
355 slow_backend = properties.get('slow_backend')
356 if slow_backend:
357 s3tests_conf['fixtures']['slow backend'] = slow_backend
358
359 storage_classes = properties.get('storage classes')
360 if storage_classes:
361 s3tests_conf['s3 main']['storage_classes'] = storage_classes
362
363 lc_debug_interval = properties.get('lc_debug_interval')
364 if lc_debug_interval:
365 s3tests_conf['s3 main']['lc_debug_interval'] = lc_debug_interval
366
367 if ctx.rgw_cloudtier is not None:
368 log.info(' ctx.rgw_cloudtier config is %s ...', ctx.rgw_cloudtier.config)
369 client_rgw_config = ctx.rgw_cloudtier.config.get(client)
370 if client_rgw_config:
371 log.info(' ctx.rgw_cloudtier config is %s ...', client_rgw_config)
372 cloudtier_user = client_rgw_config.get('cloudtier_user')
373 cloud_client = client_rgw_config.get('cloud_client')
374 endpoint = ctx.rgw.role_endpoints.get(cloud_client)
375 s3tests_conf['s3 cloud']['host'] = endpoint.dns_name
376 s3tests_conf['s3 cloud']['port'] = endpoint.port
377 s3tests_conf['s3 cloud']['access_key'] = cloudtier_user.get('cloud_access_key')
378 s3tests_conf['s3 cloud']['secret_key'] = cloudtier_user.get('cloud_secret')
379 s3tests_conf['s3 cloud']['cloud_storage_class'] = client_rgw_config.get('cloud_storage_class')
380 s3tests_conf['s3 cloud']['storage_class'] = client_rgw_config.get('cloud_regular_storage_class')
381 s3tests_conf['s3 cloud']['retain_head_object'] = client_rgw_config.get('cloud_retain_head_object')
382 cloud_target_path = client_rgw_config.get('cloud_target_path')
383 cloud_target_storage_class = client_rgw_config.get('cloud_target_storage_class')
384 if (cloud_target_path != None):
385 s3tests_conf['s3 cloud']['target_path'] = cloud_target_path
386 if (cloud_target_storage_class != None):
387 s3tests_conf['s3 cloud']['target_storage_class'] = cloud_target_storage_class
388
389 (remote,) = ctx.cluster.only(client).remotes.keys()
390 remote.run(
391 args=[
392 'cd',
393 '{tdir}/s3-tests-{client}'.format(tdir=testdir, client=client),
394 run.Raw('&&'),
395 './bootstrap',
396 ],
397 )
398 conf_fp = BytesIO()
399 s3tests_conf.write(conf_fp)
400 remote.write_file(
401 path='{tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client),
402 data=conf_fp.getvalue(),
403 )
404
405 log.info('Configuring boto...')
406 boto_src = os.path.join(os.path.dirname(__file__), 'boto.cfg.template')
407 for client, properties in config['clients'].items():
408 with open(boto_src) as f:
409 (remote,) = ctx.cluster.only(client).remotes.keys()
410 conf = f.read().format(
411 idle_timeout=config.get('idle_timeout', 30)
412 )
413 remote.write_file('{tdir}/boto-{client}.cfg'.format(tdir=testdir, client=client), conf)
414
415 try:
416 yield
417
418 finally:
419 log.info('Cleaning up boto...')
420 for client, properties in config['clients'].items():
421 (remote,) = ctx.cluster.only(client).remotes.keys()
422 remote.run(
423 args=[
424 'rm',
425 '{tdir}/boto-{client}.cfg'.format(tdir=testdir, client=client),
426 ],
427 )
428
429 @contextlib.contextmanager
430 def run_tests(ctx, config):
431 """
432 Run the s3tests after everything is set up.
433
434 :param ctx: Context passed to task
435 :param config: specific configuration information
436 """
437 assert isinstance(config, dict)
438 testdir = teuthology.get_testdir(ctx)
439 for client, client_config in config.items():
440 client_config = client_config or {}
441 (cluster_name,_,_) = teuthology.split_role(client)
442 (remote,) = ctx.cluster.only(client).remotes.keys()
443 args = [
444 'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client),
445 'BOTO_CONFIG={tdir}/boto-{client}.cfg'.format(tdir=testdir, client=client)
446 ]
447 # the 'requests' library comes with its own ca bundle to verify ssl
448 # certificates - override that to use the system's ca bundle, which
449 # is where the ssl task installed this certificate
450 if remote.os.package_type == 'deb':
451 args += ['REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt']
452 else:
453 args += ['REQUESTS_CA_BUNDLE=/etc/pki/tls/certs/ca-bundle.crt']
454 # civetweb > 1.8 && beast parsers are strict on rfc2616
455 attrs = ["!fails_on_rgw", "!lifecycle_expiration", "!fails_strict_rfc2616","!test_of_sts","!webidentity_test"]
456 if client_config.get('calling-format') != 'ordinary':
457 attrs += ['!fails_with_subdomain']
458 if not client_config.get('with-sse-s3'):
459 attrs += ['!sse-s3']
460 elif client_config.get('with-sse-s3'):
461 pass
462 elif ctx.ceph[cluster_name].rgw_crypt_sse_s3_backend is None:
463 attrs += ['!sse-s3']
464
465 if 'extra_attrs' in client_config:
466 attrs = client_config.get('extra_attrs')
467 args += [
468 '{tdir}/s3-tests-{client}/virtualenv/bin/python'.format(tdir=testdir, client=client),
469 '-m', 'nose',
470 '-w',
471 '{tdir}/s3-tests-{client}'.format(tdir=testdir, client=client),
472 '-v',
473 '-a', ','.join(attrs),
474 ]
475 if 'extra_args' in client_config:
476 args.append(client_config['extra_args'])
477
478 remote.run(
479 args=args,
480 label="s3 tests against rgw"
481 )
482 yield
483
484 @contextlib.contextmanager
485 def scan_for_leaked_encryption_keys(ctx, config):
486 """
487 Scan radosgw logs for the encryption keys used by s3tests to
488 verify that we're not leaking secrets.
489
490 :param ctx: Context passed to task
491 :param config: specific configuration information
492 """
493 assert isinstance(config, dict)
494
495 try:
496 yield
497 finally:
498 # x-amz-server-side-encryption-customer-key
499 s3test_customer_key = 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs='
500
501 log.debug('Scanning radosgw logs for leaked encryption keys...')
502 procs = list()
503 for client, client_config in config.items():
504 if not client_config.get('scan_for_encryption_keys', True):
505 continue
506 cluster_name, daemon_type, client_id = teuthology.split_role(client)
507 client_with_cluster = '.'.join((cluster_name, daemon_type, client_id))
508 (remote,) = ctx.cluster.only(client).remotes.keys()
509 proc = remote.run(
510 args=[
511 'grep',
512 '--binary-files=text',
513 s3test_customer_key,
514 '/var/log/ceph/rgw.{client}.log'.format(client=client_with_cluster),
515 ],
516 wait=False,
517 check_status=False,
518 )
519 procs.append(proc)
520
521 for proc in procs:
522 proc.wait()
523 if proc.returncode == 1: # 1 means no matches
524 continue
525 log.error('radosgw log is leaking encryption keys!')
526 raise Exception('radosgw log is leaking encryption keys')
527
528 @contextlib.contextmanager
529 def task(ctx, config):
530 """
531 Run the s3-tests suite against rgw.
532
533 To run all tests on all clients::
534
535 tasks:
536 - ceph:
537 - rgw:
538 - s3tests:
539
540 To restrict testing to particular clients::
541
542 tasks:
543 - ceph:
544 - rgw: [client.0]
545 - s3tests: [client.0]
546
547 To run against a server on client.1 and increase the boto timeout to 10m::
548
549 tasks:
550 - ceph:
551 - rgw: [client.1]
552 - s3tests:
553 client.0:
554 rgw_server: client.1
555 idle_timeout: 600
556
557 To pass extra arguments to nose (e.g. to run a certain test)::
558
559 tasks:
560 - ceph:
561 - rgw: [client.0]
562 - s3tests:
563 client.0:
564 extra_args: ['test_s3:test_object_acl_grand_public_read']
565 client.1:
566 extra_args: ['--exclude', 'test_100_continue']
567
568 To run any sts-tests don't forget to set a config variable named 'sts_tests' to 'True' as follows::
569
570 tasks:
571 - ceph:
572 - rgw: [client.0]
573 - s3tests:
574 client.0:
575 sts_tests: True
576 rgw_server: client.0
577
578 To run any cloud-transition tests don't forget to set a config variable named 'cloudtier_tests' to 'True' as follows::
579
580 tasks:
581 - ceph:
582 - rgw: [client.0 client.1]
583 - s3tests:
584 client.0:
585 cloudtier_tests: True
586 rgw_server: client.0
587
588 """
589 assert hasattr(ctx, 'rgw'), 's3tests must run after the rgw task'
590 assert config is None or isinstance(config, list) \
591 or isinstance(config, dict), \
592 "task s3tests only supports a list or dictionary for configuration"
593 all_clients = ['client.{id}'.format(id=id_)
594 for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
595 if config is None:
596 config = all_clients
597 if isinstance(config, list):
598 config = dict.fromkeys(config)
599 clients = config.keys()
600
601 overrides = ctx.config.get('overrides', {})
602 # merge each client section, not the top level.
603 for client in config.keys():
604 if not config[client]:
605 config[client] = {}
606 teuthology.deep_merge(config[client], overrides.get('s3tests', {}))
607
608 log.debug('s3tests config is %s', config)
609
610 s3tests_conf = {}
611
612 for client, client_config in config.items():
613 if 'sts_tests' in client_config:
614 ctx.sts_variable = True
615 else:
616 ctx.sts_variable = False
617 if 'cloudtier_tests' in client_config:
618 ctx.cloudtier_variable = True
619 else:
620 ctx.cloudtier_variable = False
621 #This will be the structure of config file when you want to run webidentity_test (sts-test)
622 if ctx.sts_variable and "TOKEN" in os.environ:
623 for client in clients:
624 endpoint = ctx.rgw.role_endpoints.get(client)
625 assert endpoint, 's3tests: no rgw endpoint for {}'.format(client)
626
627 s3tests_conf[client] = ConfigObj(
628 indent_type='',
629 infile={
630 'DEFAULT':
631 {
632 'port' : endpoint.port,
633 'is_secure' : endpoint.cert is not None,
634 'api_name' : 'default',
635 },
636 'fixtures' : {},
637 's3 main' : {},
638 's3 alt' : {},
639 's3 tenant' : {},
640 'iam' : {},
641 'webidentity': {},
642 }
643 )
644
645 elif ctx.sts_variable:
646 #This will be the structure of config file when you want to run assume_role_test and get_session_token_test (sts-test)
647 for client in clients:
648 endpoint = ctx.rgw.role_endpoints.get(client)
649 assert endpoint, 's3tests: no rgw endpoint for {}'.format(client)
650
651 s3tests_conf[client] = ConfigObj(
652 indent_type='',
653 infile={
654 'DEFAULT':
655 {
656 'port' : endpoint.port,
657 'is_secure' : endpoint.cert is not None,
658 'api_name' : 'default',
659 },
660 'fixtures' : {},
661 's3 main' : {},
662 's3 alt' : {},
663 's3 tenant' : {},
664 'iam' : {},
665 }
666 )
667
668 elif ctx.cloudtier_variable:
669 #This will be the structure of config file when you want to run normal s3-tests
670 for client in clients:
671 endpoint = ctx.rgw.role_endpoints.get(client)
672 assert endpoint, 's3tests: no rgw endpoint for {}'.format(client)
673
674 s3tests_conf[client] = ConfigObj(
675 indent_type='',
676 infile={
677 'DEFAULT':
678 {
679 'port' : endpoint.port,
680 'is_secure' : endpoint.cert is not None,
681 'api_name' : 'default',
682 },
683 'fixtures' : {},
684 's3 main' : {},
685 's3 alt' : {},
686 's3 tenant' : {},
687 's3 cloud' : {},
688 }
689 )
690 else:
691 #This will be the structure of config file when you want to run normal s3-tests
692 for client in clients:
693 endpoint = ctx.rgw.role_endpoints.get(client)
694 assert endpoint, 's3tests: no rgw endpoint for {}'.format(client)
695
696 s3tests_conf[client] = ConfigObj(
697 indent_type='',
698 infile={
699 'DEFAULT':
700 {
701 'port' : endpoint.port,
702 'is_secure' : endpoint.cert is not None,
703 'api_name' : 'default',
704 },
705 'fixtures' : {},
706 's3 main' : {},
707 's3 alt' : {},
708 's3 tenant' : {},
709 }
710 )
711
712 with contextutil.nested(
713 lambda: download(ctx=ctx, config=config),
714 lambda: create_users(ctx=ctx, config=dict(
715 clients=clients,
716 s3tests_conf=s3tests_conf,
717 )),
718 lambda: configure(ctx=ctx, config=dict(
719 clients=config,
720 s3tests_conf=s3tests_conf,
721 )),
722 lambda: run_tests(ctx=ctx, config=config),
723 lambda: scan_for_leaked_encryption_keys(ctx=ctx, config=config),
724 ):
725 pass
726 yield