]> git.proxmox.com Git - ceph.git/blob - ceph/src/test/rgw/rgw_multi/tests.py
update sources to v12.1.0
[ceph.git] / ceph / src / test / rgw / rgw_multi / tests.py
1 import json
2 import random
3 import string
4 import sys
5 import time
6 import logging
7
8 try:
9 from itertools import izip_longest as zip_longest
10 except ImportError:
11 from itertools import zip_longest
12 from itertools import combinations
13
14 import boto
15 import boto.s3.connection
16 from boto.s3.website import WebsiteConfiguration
17 from boto.s3.cors import CORSConfiguration
18
19 from nose.tools import eq_ as eq
20 from nose.plugins.attrib import attr
21 from nose.plugins.skip import SkipTest
22
23 from .multisite import Zone
24
25 from .conn import get_gateway_connection
26
27 class Config:
28 """ test configuration """
29 def __init__(self, **kwargs):
30 # by default, wait up to 5 minutes before giving up on a sync checkpoint
31 self.checkpoint_retries = kwargs.get('checkpoint_retries', 60)
32 self.checkpoint_delay = kwargs.get('checkpoint_delay', 5)
33 # allow some time for realm reconfiguration after changing master zone
34 self.reconfigure_delay = kwargs.get('reconfigure_delay', 5)
35
36 # rgw multisite tests, written against the interfaces provided in rgw_multi.
37 # these tests must be initialized and run by another module that provides
38 # implementations of these interfaces by calling init_multi()
39 realm = None
40 user = None
41 config = None
42 def init_multi(_realm, _user, _config=None):
43 global realm
44 realm = _realm
45 global user
46 user = _user
47 global config
48 config = _config or Config()
49 realm_meta_checkpoint(realm)
50
51 def get_realm():
52 return realm
53
54 log = logging.getLogger(__name__)
55
56 num_buckets = 0
57 run_prefix=''.join(random.choice(string.ascii_lowercase) for _ in range(6))
58
59 def get_gateway_connection(gateway, credentials):
60 """ connect to the given gateway """
61 if gateway.connection is None:
62 gateway.connection = boto.connect_s3(
63 aws_access_key_id = credentials.access_key,
64 aws_secret_access_key = credentials.secret,
65 host = gateway.host,
66 port = gateway.port,
67 is_secure = False,
68 calling_format = boto.s3.connection.OrdinaryCallingFormat())
69 return gateway.connection
70
71 def get_zone_connection(zone, credentials):
72 """ connect to the zone's first gateway """
73 if isinstance(credentials, list):
74 credentials = credentials[0]
75 return get_gateway_connection(zone.gateways[0], credentials)
76
77 def mdlog_list(zone, period = None):
78 cmd = ['mdlog', 'list']
79 if period:
80 cmd += ['--period', period]
81 (mdlog_json, _) = zone.cluster.admin(cmd, read_only=True)
82 mdlog_json = mdlog_json.decode('utf-8')
83 return json.loads(mdlog_json)
84
85 def meta_sync_status(zone):
86 while True:
87 cmd = ['metadata', 'sync', 'status'] + zone.zone_args()
88 meta_sync_status_json, retcode = zone.cluster.admin(cmd, check_retcode=False, read_only=True)
89 if retcode == 0:
90 break
91 assert(retcode == 2) # ENOENT
92 time.sleep(5)
93
94 def mdlog_autotrim(zone):
95 zone.cluster.admin(['mdlog', 'autotrim'])
96
97 def parse_meta_sync_status(meta_sync_status_json):
98 meta_sync_status_json = meta_sync_status_json.decode('utf-8')
99 log.debug('current meta sync status=%s', meta_sync_status_json)
100 sync_status = json.loads(meta_sync_status_json)
101
102 sync_info = sync_status['sync_status']['info']
103 global_sync_status = sync_info['status']
104 num_shards = sync_info['num_shards']
105 period = sync_info['period']
106 realm_epoch = sync_info['realm_epoch']
107
108 sync_markers=sync_status['sync_status']['markers']
109 log.debug('sync_markers=%s', sync_markers)
110 assert(num_shards == len(sync_markers))
111
112 markers={}
113 for i in range(num_shards):
114 # get marker, only if it's an incremental marker for the same realm epoch
115 if realm_epoch > sync_markers[i]['val']['realm_epoch'] or sync_markers[i]['val']['state'] == 0:
116 markers[i] = ''
117 else:
118 markers[i] = sync_markers[i]['val']['marker']
119
120 return period, realm_epoch, num_shards, markers
121
122 def meta_sync_status(zone):
123 for _ in range(config.checkpoint_retries):
124 cmd = ['metadata', 'sync', 'status'] + zone.zone_args()
125 meta_sync_status_json, retcode = zone.cluster.admin(cmd, check_retcode=False, read_only=True)
126 if retcode == 0:
127 return parse_meta_sync_status(meta_sync_status_json)
128 assert(retcode == 2) # ENOENT
129 time.sleep(config.checkpoint_delay)
130
131 assert False, 'failed to read metadata sync status for zone=%s' % zone.name
132
133 def meta_master_log_status(master_zone):
134 cmd = ['mdlog', 'status'] + master_zone.zone_args()
135 mdlog_status_json, retcode = master_zone.cluster.admin(cmd, read_only=True)
136 mdlog_status = json.loads(mdlog_status_json.decode('utf-8'))
137
138 markers = {i: s['marker'] for i, s in enumerate(mdlog_status)}
139 log.debug('master meta markers=%s', markers)
140 return markers
141
142 def compare_meta_status(zone, log_status, sync_status):
143 if len(log_status) != len(sync_status):
144 log.error('len(log_status)=%d, len(sync_status)=%d', len(log_status), len(sync_status))
145 return False
146
147 msg = ''
148 for i, l, s in zip(log_status, log_status.values(), sync_status.values()):
149 if l > s:
150 if len(msg):
151 msg += ', '
152 msg += 'shard=' + str(i) + ' master=' + l + ' target=' + s
153
154 if len(msg) > 0:
155 log.warning('zone %s behind master: %s', zone.name, msg)
156 return False
157
158 return True
159
160 def zone_meta_checkpoint(zone, meta_master_zone = None, master_status = None):
161 if not meta_master_zone:
162 meta_master_zone = zone.realm().meta_master_zone()
163 if not master_status:
164 master_status = meta_master_log_status(meta_master_zone)
165
166 current_realm_epoch = realm.current_period.data['realm_epoch']
167
168 log.info('starting meta checkpoint for zone=%s', zone.name)
169
170 for _ in range(config.checkpoint_retries):
171 period, realm_epoch, num_shards, sync_status = meta_sync_status(zone)
172 if realm_epoch < current_realm_epoch:
173 log.warning('zone %s is syncing realm epoch=%d, behind current realm epoch=%d',
174 zone.name, realm_epoch, current_realm_epoch)
175 else:
176 log.debug('log_status=%s', master_status)
177 log.debug('sync_status=%s', sync_status)
178 if compare_meta_status(zone, master_status, sync_status):
179 log.info('finish meta checkpoint for zone=%s', zone.name)
180 return
181
182 time.sleep(config.checkpoint_delay)
183 assert False, 'failed meta checkpoint for zone=%s' % zone.name
184
185 def zonegroup_meta_checkpoint(zonegroup, meta_master_zone = None, master_status = None):
186 if not meta_master_zone:
187 meta_master_zone = zonegroup.realm().meta_master_zone()
188 if not master_status:
189 master_status = meta_master_log_status(meta_master_zone)
190
191 for zone in zonegroup.zones:
192 if zone == meta_master_zone:
193 continue
194 zone_meta_checkpoint(zone, meta_master_zone, master_status)
195
196 def realm_meta_checkpoint(realm):
197 log.info('meta checkpoint')
198
199 meta_master_zone = realm.meta_master_zone()
200 master_status = meta_master_log_status(meta_master_zone)
201
202 for zonegroup in realm.current_period.zonegroups:
203 zonegroup_meta_checkpoint(zonegroup, meta_master_zone, master_status)
204
205 def parse_data_sync_status(data_sync_status_json):
206 data_sync_status_json = data_sync_status_json.decode('utf-8')
207 log.debug('current data sync status=%s', data_sync_status_json)
208 sync_status = json.loads(data_sync_status_json)
209
210 global_sync_status=sync_status['sync_status']['info']['status']
211 num_shards=sync_status['sync_status']['info']['num_shards']
212
213 sync_markers=sync_status['sync_status']['markers']
214 log.debug('sync_markers=%s', sync_markers)
215 assert(num_shards == len(sync_markers))
216
217 markers={}
218 for i in range(num_shards):
219 markers[i] = sync_markers[i]['val']['marker']
220
221 return (num_shards, markers)
222
223 def data_sync_status(target_zone, source_zone):
224 if target_zone == source_zone:
225 return None
226
227 for _ in range(config.checkpoint_retries):
228 cmd = ['data', 'sync', 'status'] + target_zone.zone_args()
229 cmd += ['--source-zone', source_zone.name]
230 data_sync_status_json, retcode = target_zone.cluster.admin(cmd, check_retcode=False, read_only=True)
231 if retcode == 0:
232 return parse_data_sync_status(data_sync_status_json)
233
234 assert(retcode == 2) # ENOENT
235 time.sleep(config.checkpoint_delay)
236
237 assert False, 'failed to read data sync status for target_zone=%s source_zone=%s' % \
238 (target_zone.name, source_zone.name)
239
240 def bucket_sync_status(target_zone, source_zone, bucket_name):
241 if target_zone == source_zone:
242 return None
243
244 cmd = ['bucket', 'sync', 'status'] + target_zone.zone_args()
245 cmd += ['--source-zone', source_zone.name]
246 cmd += ['--bucket', bucket_name]
247 while True:
248 bucket_sync_status_json, retcode = target_zone.cluster.admin(cmd, check_retcode=False, read_only=True)
249 if retcode == 0:
250 break
251
252 assert(retcode == 2) # ENOENT
253
254 bucket_sync_status_json = bucket_sync_status_json.decode('utf-8')
255 log.debug('current bucket sync status=%s', bucket_sync_status_json)
256 sync_status = json.loads(bucket_sync_status_json)
257
258 markers={}
259 for entry in sync_status:
260 val = entry['val']
261 if val['status'] == 'incremental-sync':
262 pos = val['inc_marker']['position'].split('#')[-1] # get rid of shard id; e.g., 6#00000000002.132.3 -> 00000000002.132.3
263 else:
264 pos = ''
265 markers[entry['key']] = pos
266
267 return markers
268
269 def data_source_log_status(source_zone):
270 source_cluster = source_zone.cluster
271 cmd = ['datalog', 'status'] + source_zone.zone_args()
272 datalog_status_json, retcode = source_cluster.rgw_admin(cmd, read_only=True)
273 datalog_status = json.loads(datalog_status_json.decode('utf-8'))
274
275 markers = {i: s['marker'] for i, s in enumerate(datalog_status)}
276 log.debug('data markers for zone=%s markers=%s', source_zone.name, markers)
277 return markers
278
279 def bucket_source_log_status(source_zone, bucket_name):
280 cmd = ['bilog', 'status'] + source_zone.zone_args()
281 cmd += ['--bucket', bucket_name]
282 source_cluster = source_zone.cluster
283 bilog_status_json, retcode = source_cluster.admin(cmd, read_only=True)
284 bilog_status = json.loads(bilog_status_json.decode('utf-8'))
285
286 m={}
287 markers={}
288 try:
289 m = bilog_status['markers']
290 except:
291 pass
292
293 for s in m:
294 key = s['key']
295 val = s['val']
296 markers[key] = val
297
298 log.debug('bilog markers for zone=%s bucket=%s markers=%s', source_zone.name, bucket_name, markers)
299 return markers
300
301 def compare_data_status(target_zone, source_zone, log_status, sync_status):
302 if len(log_status) != len(sync_status):
303 log.error('len(log_status)=%d len(sync_status)=%d', len(log_status), len(sync_status))
304 return False
305
306 msg = ''
307 for i, l, s in zip(log_status, log_status.values(), sync_status.values()):
308 if l > s:
309 if len(msg):
310 msg += ', '
311 msg += 'shard=' + str(i) + ' master=' + l + ' target=' + s
312
313 if len(msg) > 0:
314 log.warning('data of zone %s behind zone %s: %s', target_zone.name, source_zone.name, msg)
315 return False
316
317 return True
318
319 def compare_bucket_status(target_zone, source_zone, bucket_name, log_status, sync_status):
320 if len(log_status) != len(sync_status):
321 log.error('len(log_status)=%d len(sync_status)=%d', len(log_status), len(sync_status))
322 return False
323
324 msg = ''
325 for i, l, s in zip(log_status, log_status.values(), sync_status.values()):
326 if l > s:
327 if len(msg):
328 msg += ', '
329 msg += 'shard=' + str(i) + ' master=' + l + ' target=' + s
330
331 if len(msg) > 0:
332 log.warning('bucket %s zone %s behind zone %s: %s', bucket_name, target_zone.name, source_zone.name, msg)
333 return False
334
335 return True
336
337 def zone_data_checkpoint(target_zone, source_zone_conn):
338 if target_zone == source_zone:
339 return
340
341 log_status = data_source_log_status(source_zone)
342 log.info('starting data checkpoint for target_zone=%s source_zone=%s', target_zone.name, source_zone.name)
343
344 for _ in range(config.checkpoint_retries):
345 num_shards, sync_status = data_sync_status(target_zone, source_zone)
346
347 log.debug('log_status=%s', log_status)
348 log.debug('sync_status=%s', sync_status)
349
350 if compare_data_status(target_zone, source_zone, log_status, sync_status):
351 log.info('finished data checkpoint for target_zone=%s source_zone=%s',
352 target_zone.name, source_zone.name)
353 return
354 time.sleep(config.checkpoint_delay)
355
356 assert False, 'failed data checkpoint for target_zone=%s source_zone=%s' % \
357 (target_zone.name, source_zone.name)
358
359
360 def zone_bucket_checkpoint(target_zone, source_zone, bucket_name):
361 if target_zone == source_zone:
362 return
363
364 log_status = bucket_source_log_status(source_zone, bucket_name)
365 log.info('starting bucket checkpoint for target_zone=%s source_zone=%s bucket=%s', target_zone.name, source_zone.name, bucket_name)
366
367 for _ in range(config.checkpoint_retries):
368 sync_status = bucket_sync_status(target_zone, source_zone, bucket_name)
369
370 log.debug('log_status=%s', log_status)
371 log.debug('sync_status=%s', sync_status)
372
373 if compare_bucket_status(target_zone, source_zone, bucket_name, log_status, sync_status):
374 log.info('finished bucket checkpoint for target_zone=%s source_zone=%s bucket=%s', target_zone.name, source_zone.name, bucket_name)
375 return
376
377 time.sleep(config.checkpoint_delay)
378
379 assert False, 'finished bucket checkpoint for target_zone=%s source_zone=%s bucket=%s' % \
380 (target_zone.name, source_zone.name, bucket_name)
381
382 def set_master_zone(zone):
383 zone.modify(zone.cluster, ['--master'])
384 zonegroup = zone.zonegroup
385 zonegroup.period.update(zone, commit=True)
386 zonegroup.master_zone = zone
387 log.info('Set master zone=%s, waiting %ds for reconfiguration..', zone.name, config.reconfigure_delay)
388 time.sleep(config.reconfigure_delay)
389
390 def gen_bucket_name():
391 global num_buckets
392
393 num_buckets += 1
394 return run_prefix + '-' + str(num_buckets)
395
396 class ZonegroupConns:
397 def __init__(self, zonegroup):
398 self.zonegroup = zonegroup
399 self.zones = []
400 self.ro_zones = []
401 self.rw_zones = []
402 self.master_zone = None
403 for z in zonegroup.zones:
404 zone_conn = z.get_conn(user.credentials)
405 self.zones.append(zone_conn)
406 if z.is_read_only():
407 self.ro_zones.append(zone_conn)
408 else:
409 self.rw_zones.append(zone_conn)
410
411 if z == zonegroup.master_zone:
412 self.master_zone = zone_conn
413
414 def check_all_buckets_exist(zone_conn, buckets):
415 if not zone_conn.zone.has_buckets():
416 return True
417
418 for b in buckets:
419 try:
420 zone_conn.get_bucket(b)
421 except:
422 log.critical('zone %s does not contain bucket %s', zone.name, b)
423 return False
424
425 return True
426
427 def check_all_buckets_dont_exist(zone_conn, buckets):
428 if not zone_conn.zone.has_buckets():
429 return True
430
431 for b in buckets:
432 try:
433 zone_conn.get_bucket(b)
434 except:
435 continue
436
437 log.critical('zone %s contains bucket %s', zone.zone, b)
438 return False
439
440 return True
441
442 def create_bucket_per_zone(zonegroup_conns, buckets_per_zone = 1):
443 buckets = []
444 zone_bucket = []
445 for zone in zonegroup_conns.rw_zones:
446 for i in xrange(buckets_per_zone):
447 bucket_name = gen_bucket_name()
448 log.info('create bucket zone=%s name=%s', zone.name, bucket_name)
449 bucket = zone.create_bucket(bucket_name)
450 buckets.append(bucket_name)
451 zone_bucket.append((zone, bucket))
452
453 return buckets, zone_bucket
454
455 def create_bucket_per_zone_in_realm():
456 buckets = []
457 zone_bucket = []
458 for zonegroup in realm.current_period.zonegroups:
459 zg_conn = ZonegroupConns(zonegroup)
460 b, z = create_bucket_per_zone(zg_conn)
461 buckets.extend(b)
462 zone_bucket.extend(z)
463 return buckets, zone_bucket
464
465 def test_bucket_create():
466 zonegroup = realm.master_zonegroup()
467 zonegroup_conns = ZonegroupConns(zonegroup)
468 buckets, _ = create_bucket_per_zone(zonegroup_conns)
469 zonegroup_meta_checkpoint(zonegroup)
470
471 for zone in zonegroup_conns.zones:
472 assert check_all_buckets_exist(zone, buckets)
473
474 def test_bucket_recreate():
475 zonegroup = realm.master_zonegroup()
476 zonegroup_conns = ZonegroupConns(zonegroup)
477 buckets, _ = create_bucket_per_zone(zonegroup_conns)
478 zonegroup_meta_checkpoint(zonegroup)
479
480
481 for zone in zonegroup_conns.zones:
482 assert check_all_buckets_exist(zone, buckets)
483
484 # recreate buckets on all zones, make sure they weren't removed
485 for zone in zonegroup_conns.rw_zones:
486 for bucket_name in buckets:
487 bucket = zone.create_bucket(bucket_name)
488
489 for zone in zonegroup_conns.zones:
490 assert check_all_buckets_exist(zone, buckets)
491
492 zonegroup_meta_checkpoint(zonegroup)
493
494 for zone in zonegroup_conns.zones:
495 assert check_all_buckets_exist(zone, buckets)
496
497 def test_bucket_remove():
498 zonegroup = realm.master_zonegroup()
499 zonegroup_conns = ZonegroupConns(zonegroup)
500 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
501 zonegroup_meta_checkpoint(zonegroup)
502
503 for zone in zonegroup_conns.zones:
504 assert check_all_buckets_exist(zone, buckets)
505
506 for zone, bucket_name in zone_bucket:
507 zone.conn.delete_bucket(bucket_name)
508
509 zonegroup_meta_checkpoint(zonegroup)
510
511 for zone in zonegroup_conns.zones:
512 assert check_all_buckets_dont_exist(zone, buckets)
513
514 def get_bucket(zone, bucket_name):
515 return zone.conn.get_bucket(bucket_name)
516
517 def get_key(zone, bucket_name, obj_name):
518 b = get_bucket(zone, bucket_name)
519 return b.get_key(obj_name)
520
521 def new_key(zone, bucket_name, obj_name):
522 b = get_bucket(zone, bucket_name)
523 return b.new_key(obj_name)
524
525 def check_bucket_eq(zone_conn1, zone_conn2, bucket):
526 return zone_conn2.check_bucket_eq(zone_conn1, bucket.name)
527
528 def test_object_sync():
529 zonegroup = realm.master_zonegroup()
530 zonegroup_conns = ZonegroupConns(zonegroup)
531 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
532
533 objnames = [ 'myobj', '_myobj', ':', '&' ]
534 content = 'asdasd'
535
536 # don't wait for meta sync just yet
537 for zone, bucket_name in zone_bucket:
538 for objname in objnames:
539 k = new_key(zone, bucket_name, objname)
540 k.set_contents_from_string(content)
541
542 zonegroup_meta_checkpoint(zonegroup)
543
544 for source_conn, bucket in zone_bucket:
545 for target_conn in zonegroup_conns.zones:
546 if source_conn.zone == target_conn.zone:
547 continue
548
549 zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
550 check_bucket_eq(source_conn, target_conn, bucket)
551
552 def test_object_delete():
553 zonegroup = realm.master_zonegroup()
554 zonegroup_conns = ZonegroupConns(zonegroup)
555 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
556
557 objname = 'myobj'
558 content = 'asdasd'
559
560 # don't wait for meta sync just yet
561 for zone, bucket in zone_bucket:
562 k = new_key(zone, bucket, objname)
563 k.set_contents_from_string(content)
564
565 zonegroup_meta_checkpoint(zonegroup)
566
567 # check object exists
568 for source_conn, bucket in zone_bucket:
569 for target_conn in zonegroup_conns.zones:
570 if source_conn.zone == target_conn.zone:
571 continue
572
573 zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
574 check_bucket_eq(source_conn, target_conn, bucket)
575
576 # check object removal
577 for source_conn, bucket in zone_bucket:
578 k = get_key(source_conn, bucket, objname)
579 k.delete()
580 for target_conn in zonegroup_conns.zones:
581 if source_conn.zone == target_conn.zone:
582 continue
583
584 zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
585 check_bucket_eq(source_conn, target_conn, bucket)
586
587 def get_latest_object_version(key):
588 for k in key.bucket.list_versions(key.name):
589 if k.is_latest:
590 return k
591 return None
592
593 def test_versioned_object_incremental_sync():
594 zonegroup = realm.master_zonegroup()
595 zonegroup_conns = ZonegroupConns(zonegroup)
596 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
597
598 # enable versioning
599 for _, bucket in zone_bucket:
600 bucket.configure_versioning(True)
601
602 zonegroup_meta_checkpoint(zonegroup)
603
604 # upload a dummy object to each bucket and wait for sync. this forces each
605 # bucket to finish a full sync and switch to incremental
606 for source_conn, bucket in zone_bucket:
607 new_key(source_conn, bucket, 'dummy').set_contents_from_string('')
608 for target_conn in zonegroup_conns.zones:
609 if source_conn.zone == target_conn.zone:
610 continue
611 zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
612
613 for _, bucket in zone_bucket:
614 # create and delete multiple versions of an object from each zone
615 for zone_conn in zonegroup_conns.rw_zones:
616 obj = 'obj-' + zone_conn.name
617 k = new_key(zone_conn, bucket, obj)
618
619 k.set_contents_from_string('version1')
620 v = get_latest_object_version(k)
621 log.debug('version1 id=%s', v.version_id)
622 # don't delete version1 - this tests that the initial version
623 # doesn't get squashed into later versions
624
625 # create and delete the following object versions to test that
626 # the operations don't race with each other during sync
627 k.set_contents_from_string('version2')
628 v = get_latest_object_version(k)
629 log.debug('version2 id=%s', v.version_id)
630 k.bucket.delete_key(obj, version_id=v.version_id)
631
632 k.set_contents_from_string('version3')
633 v = get_latest_object_version(k)
634 log.debug('version3 id=%s', v.version_id)
635 k.bucket.delete_key(obj, version_id=v.version_id)
636
637 for source_conn, bucket in zone_bucket:
638 for target_conn in zonegroup_conns.zones:
639 if source_conn.zone == target_conn.zone:
640 continue
641 zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
642 check_bucket_eq(source_conn, target_conn, bucket)
643
644 def test_bucket_versioning():
645 buckets, zone_bucket = create_bucket_per_zone_in_realm()
646 for _, bucket in zone_bucket:
647 bucket.configure_versioning(True)
648 res = bucket.get_versioning_status()
649 key = 'Versioning'
650 assert(key in res and res[key] == 'Enabled')
651
652 def test_bucket_acl():
653 buckets, zone_bucket = create_bucket_per_zone_in_realm()
654 for _, bucket in zone_bucket:
655 assert(len(bucket.get_acl().acl.grants) == 1) # single grant on owner
656 bucket.set_acl('public-read')
657 assert(len(bucket.get_acl().acl.grants) == 2) # new grant on AllUsers
658
659 def test_bucket_cors():
660 buckets, zone_bucket = create_bucket_per_zone_in_realm()
661 for _, bucket in zone_bucket:
662 cors_cfg = CORSConfiguration()
663 cors_cfg.add_rule(['DELETE'], 'https://www.example.com', allowed_header='*', max_age_seconds=3000)
664 bucket.set_cors(cors_cfg)
665 assert(bucket.get_cors().to_xml() == cors_cfg.to_xml())
666
667 def test_bucket_delete_notempty():
668 zonegroup = realm.master_zonegroup()
669 zonegroup_conns = ZonegroupConns(zonegroup)
670 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
671 zonegroup_meta_checkpoint(zonegroup)
672
673 for zone_conn, bucket_name in zone_bucket:
674 # upload an object to each bucket on its own zone
675 conn = zone_conn.get_connection()
676 bucket = conn.get_bucket(bucket_name)
677 k = bucket.new_key('foo')
678 k.set_contents_from_string('bar')
679 # attempt to delete the bucket before this object can sync
680 try:
681 conn.delete_bucket(bucket_name)
682 except boto.exception.S3ResponseError as e:
683 assert(e.error_code == 'BucketNotEmpty')
684 continue
685 assert False # expected 409 BucketNotEmpty
686
687 # assert that each bucket still exists on the master
688 c1 = zonegroup_conns.master_zone.conn
689 for _, bucket_name in zone_bucket:
690 assert c1.get_bucket(bucket_name)
691
692 def test_multi_period_incremental_sync():
693 zonegroup = realm.master_zonegroup()
694 if len(zonegroup.zones) < 3:
695 raise SkipTest("test_multi_period_incremental_sync skipped. Requires 3 or more zones in master zonegroup.")
696
697 # periods to include in mdlog comparison
698 mdlog_periods = [realm.current_period.id]
699
700 # create a bucket in each zone
701 zonegroup_conns = ZonegroupConns(zonegroup)
702 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
703
704 zonegroup_meta_checkpoint(zonegroup)
705
706 z1, z2, z3 = zonegroup.zones[0:3]
707 assert(z1 == zonegroup.master_zone)
708
709 # kill zone 3 gateways to freeze sync status to incremental in first period
710 z3.stop()
711
712 # change master to zone 2 -> period 2
713 set_master_zone(z2)
714 mdlog_periods += [realm.current_period.id]
715
716 for zone_conn, _ in zone_bucket:
717 if zone_conn.zone == z3:
718 continue
719 bucket_name = gen_bucket_name()
720 log.info('create bucket zone=%s name=%s', zone_conn.name, bucket_name)
721 bucket = zone_conn.conn.create_bucket(bucket_name)
722 buckets.append(bucket_name)
723
724 # wait for zone 1 to sync
725 zone_meta_checkpoint(z1)
726
727 # change master back to zone 1 -> period 3
728 set_master_zone(z1)
729 mdlog_periods += [realm.current_period.id]
730
731 for zone_conn, bucket_name in zone_bucket:
732 if zone_conn.zone == z3:
733 continue
734 bucket_name = gen_bucket_name()
735 log.info('create bucket zone=%s name=%s', zone_conn.name, bucket_name)
736 bucket = zone_conn.conn.create_bucket(bucket_name)
737 buckets.append(bucket_name)
738
739 # restart zone 3 gateway and wait for sync
740 z3.start()
741 zonegroup_meta_checkpoint(zonegroup)
742
743 # verify that we end up with the same objects
744 for bucket_name in buckets:
745 for source_conn, _ in zone_bucket:
746 for target_conn in zonegroup_conns.zones:
747 if source_conn.zone == target_conn.zone:
748 continue
749
750 target_conn.check_bucket_eq(source_conn, bucket_name)
751
752 # verify that mdlogs are not empty and match for each period
753 for period in mdlog_periods:
754 master_mdlog = mdlog_list(z1, period)
755 assert len(master_mdlog) > 0
756 for zone in zonegroup.zones:
757 if zone == z1:
758 continue
759 mdlog = mdlog_list(zone, period)
760 assert len(mdlog) == len(master_mdlog)
761
762 # autotrim mdlogs for master zone
763 mdlog_autotrim(z1)
764
765 # autotrim mdlogs for peers
766 for zone in zonegroup.zones:
767 if zone == z1:
768 continue
769 mdlog_autotrim(zone)
770
771 # verify that mdlogs are empty for each period
772 for period in mdlog_periods:
773 for zone in zonegroup.zones:
774 mdlog = mdlog_list(zone, period)
775 assert len(mdlog) == 0
776
777 def test_zonegroup_remove():
778 zonegroup = realm.master_zonegroup()
779 zonegroup_conns = ZonegroupConns(zonegroup)
780 if len(zonegroup.zones) < 2:
781 raise SkipTest("test_zonegroup_remove skipped. Requires 2 or more zones in master zonegroup.")
782
783 zonegroup_meta_checkpoint(zonegroup)
784 z1, z2 = zonegroup.zones[0:2]
785 c1, c2 = (z1.cluster, z2.cluster)
786
787 # create a new zone in zonegroup on c2 and commit
788 zone = Zone('remove', zonegroup, c2)
789 zone.create(c2)
790 zonegroup.zones.append(zone)
791 zonegroup.period.update(zone, commit=True)
792
793 zonegroup.remove(c1, zone)
794
795 # another 'zonegroup remove' should fail with ENOENT
796 _, retcode = zonegroup.remove(c1, zone, check_retcode=False)
797 assert(retcode == 2) # ENOENT
798
799 # delete the new zone
800 zone.delete(c2)
801
802 # validate the resulting period
803 zonegroup.period.update(z1, commit=True)
804
805 def test_set_bucket_website():
806 buckets, zone_bucket = create_bucket_per_zone_in_realm()
807 for _, bucket in zone_bucket:
808 website_cfg = WebsiteConfiguration(suffix='index.html',error_key='error.html')
809 try:
810 bucket.set_website_configuration(website_cfg)
811 except boto.exception.S3ResponseError as e:
812 if e.error_code == 'MethodNotAllowed':
813 raise SkipTest("test_set_bucket_website skipped. Requires rgw_enable_static_website = 1.")
814 assert(bucket.get_website_configuration_with_xml()[1] == website_cfg.to_xml())
815
816 def test_set_bucket_policy():
817 policy = '''{
818 "Version": "2012-10-17",
819 "Statement": [{
820 "Effect": "Allow",
821 "Principal": "*"
822 }]
823 }'''
824 buckets, zone_bucket = create_bucket_per_zone_in_realm()
825 for _, bucket in zone_bucket:
826 bucket.set_policy(policy)
827 assert(bucket.get_policy() == policy)