]>
git.proxmox.com Git - ceph.git/blob - ceph/src/test/rgw/rgw_multi/tests.py
d23fba3097f4025cb1500e576b2d08f89739f7ad
9 from itertools
import izip_longest
as zip_longest
11 from itertools
import zip_longest
12 from itertools
import combinations
15 import boto
.s3
.connection
16 from boto
.s3
.website
import WebsiteConfiguration
17 from boto
.s3
.cors
import CORSConfiguration
19 from nose
.tools
import eq_
as eq
20 from nose
.plugins
.attrib
import attr
21 from nose
.plugins
.skip
import SkipTest
23 from .multisite
import Zone
25 from .conn
import get_gateway_connection
28 """ test configuration """
29 def __init__(self
, **kwargs
):
30 # by default, wait up to 5 minutes before giving up on a sync checkpoint
31 self
.checkpoint_retries
= kwargs
.get('checkpoint_retries', 60)
32 self
.checkpoint_delay
= kwargs
.get('checkpoint_delay', 5)
33 # allow some time for realm reconfiguration after changing master zone
34 self
.reconfigure_delay
= kwargs
.get('reconfigure_delay', 5)
36 # rgw multisite tests, written against the interfaces provided in rgw_multi.
37 # these tests must be initialized and run by another module that provides
38 # implementations of these interfaces by calling init_multi()
42 def init_multi(_realm
, _user
, _config
=None):
48 config
= _config
or Config()
49 realm_meta_checkpoint(realm
)
54 log
= logging
.getLogger(__name__
)
57 run_prefix
=''.join(random
.choice(string
.ascii_lowercase
) for _
in range(6))
59 def get_gateway_connection(gateway
, credentials
):
60 """ connect to the given gateway """
61 if gateway
.connection
is None:
62 gateway
.connection
= boto
.connect_s3(
63 aws_access_key_id
= credentials
.access_key
,
64 aws_secret_access_key
= credentials
.secret
,
68 calling_format
= boto
.s3
.connection
.OrdinaryCallingFormat())
69 return gateway
.connection
71 def get_zone_connection(zone
, credentials
):
72 """ connect to the zone's first gateway """
73 if isinstance(credentials
, list):
74 credentials
= credentials
[0]
75 return get_gateway_connection(zone
.gateways
[0], credentials
)
77 def mdlog_list(zone
, period
= None):
78 cmd
= ['mdlog', 'list']
80 cmd
+= ['--period', period
]
81 (mdlog_json
, _
) = zone
.cluster
.admin(cmd
, read_only
=True)
82 mdlog_json
= mdlog_json
.decode('utf-8')
83 return json
.loads(mdlog_json
)
85 def meta_sync_status(zone
):
87 cmd
= ['metadata', 'sync', 'status'] + zone
.zone_args()
88 meta_sync_status_json
, retcode
= zone
.cluster
.admin(cmd
, check_retcode
=False, read_only
=True)
91 assert(retcode
== 2) # ENOENT
94 def mdlog_autotrim(zone
):
95 zone
.cluster
.admin(['mdlog', 'autotrim'])
97 def parse_meta_sync_status(meta_sync_status_json
):
98 meta_sync_status_json
= meta_sync_status_json
.decode('utf-8')
99 log
.debug('current meta sync status=%s', meta_sync_status_json
)
100 sync_status
= json
.loads(meta_sync_status_json
)
102 sync_info
= sync_status
['sync_status']['info']
103 global_sync_status
= sync_info
['status']
104 num_shards
= sync_info
['num_shards']
105 period
= sync_info
['period']
106 realm_epoch
= sync_info
['realm_epoch']
108 sync_markers
=sync_status
['sync_status']['markers']
109 log
.debug('sync_markers=%s', sync_markers
)
110 assert(num_shards
== len(sync_markers
))
113 for i
in range(num_shards
):
114 # get marker, only if it's an incremental marker for the same realm epoch
115 if realm_epoch
> sync_markers
[i
]['val']['realm_epoch'] or sync_markers
[i
]['val']['state'] == 0:
118 markers
[i
] = sync_markers
[i
]['val']['marker']
120 return period
, realm_epoch
, num_shards
, markers
122 def meta_sync_status(zone
):
123 for _
in range(config
.checkpoint_retries
):
124 cmd
= ['metadata', 'sync', 'status'] + zone
.zone_args()
125 meta_sync_status_json
, retcode
= zone
.cluster
.admin(cmd
, check_retcode
=False, read_only
=True)
127 return parse_meta_sync_status(meta_sync_status_json
)
128 assert(retcode
== 2) # ENOENT
129 time
.sleep(config
.checkpoint_delay
)
131 assert False, 'failed to read metadata sync status for zone=%s' % zone
.name
133 def meta_master_log_status(master_zone
):
134 cmd
= ['mdlog', 'status'] + master_zone
.zone_args()
135 mdlog_status_json
, retcode
= master_zone
.cluster
.admin(cmd
, read_only
=True)
136 mdlog_status
= json
.loads(mdlog_status_json
.decode('utf-8'))
138 markers
= {i
: s
['marker'] for i
, s
in enumerate(mdlog_status
)}
139 log
.debug('master meta markers=%s', markers
)
142 def compare_meta_status(zone
, log_status
, sync_status
):
143 if len(log_status
) != len(sync_status
):
144 log
.error('len(log_status)=%d, len(sync_status)=%d', len(log_status
), len(sync_status
))
148 for i
, l
, s
in zip(log_status
, log_status
.values(), sync_status
.values()):
152 msg
+= 'shard=' + str(i
) + ' master=' + l
+ ' target=' + s
155 log
.warning('zone %s behind master: %s', zone
.name
, msg
)
160 def zone_meta_checkpoint(zone
, meta_master_zone
= None, master_status
= None):
161 if not meta_master_zone
:
162 meta_master_zone
= zone
.realm().meta_master_zone()
163 if not master_status
:
164 master_status
= meta_master_log_status(meta_master_zone
)
166 current_realm_epoch
= realm
.current_period
.data
['realm_epoch']
168 log
.info('starting meta checkpoint for zone=%s', zone
.name
)
170 for _
in range(config
.checkpoint_retries
):
171 period
, realm_epoch
, num_shards
, sync_status
= meta_sync_status(zone
)
172 if realm_epoch
< current_realm_epoch
:
173 log
.warning('zone %s is syncing realm epoch=%d, behind current realm epoch=%d',
174 zone
.name
, realm_epoch
, current_realm_epoch
)
176 log
.debug('log_status=%s', master_status
)
177 log
.debug('sync_status=%s', sync_status
)
178 if compare_meta_status(zone
, master_status
, sync_status
):
179 log
.info('finish meta checkpoint for zone=%s', zone
.name
)
182 time
.sleep(config
.checkpoint_delay
)
183 assert False, 'failed meta checkpoint for zone=%s' % zone
.name
185 def zonegroup_meta_checkpoint(zonegroup
, meta_master_zone
= None, master_status
= None):
186 if not meta_master_zone
:
187 meta_master_zone
= zonegroup
.realm().meta_master_zone()
188 if not master_status
:
189 master_status
= meta_master_log_status(meta_master_zone
)
191 for zone
in zonegroup
.zones
:
192 if zone
== meta_master_zone
:
194 zone_meta_checkpoint(zone
, meta_master_zone
, master_status
)
196 def realm_meta_checkpoint(realm
):
197 log
.info('meta checkpoint')
199 meta_master_zone
= realm
.meta_master_zone()
200 master_status
= meta_master_log_status(meta_master_zone
)
202 for zonegroup
in realm
.current_period
.zonegroups
:
203 zonegroup_meta_checkpoint(zonegroup
, meta_master_zone
, master_status
)
205 def parse_data_sync_status(data_sync_status_json
):
206 data_sync_status_json
= data_sync_status_json
.decode('utf-8')
207 log
.debug('current data sync status=%s', data_sync_status_json
)
208 sync_status
= json
.loads(data_sync_status_json
)
210 global_sync_status
=sync_status
['sync_status']['info']['status']
211 num_shards
=sync_status
['sync_status']['info']['num_shards']
213 sync_markers
=sync_status
['sync_status']['markers']
214 log
.debug('sync_markers=%s', sync_markers
)
215 assert(num_shards
== len(sync_markers
))
218 for i
in range(num_shards
):
219 markers
[i
] = sync_markers
[i
]['val']['marker']
221 return (num_shards
, markers
)
223 def data_sync_status(target_zone
, source_zone
):
224 if target_zone
== source_zone
:
227 for _
in range(config
.checkpoint_retries
):
228 cmd
= ['data', 'sync', 'status'] + target_zone
.zone_args()
229 cmd
+= ['--source-zone', source_zone
.name
]
230 data_sync_status_json
, retcode
= target_zone
.cluster
.admin(cmd
, check_retcode
=False, read_only
=True)
232 return parse_data_sync_status(data_sync_status_json
)
234 assert(retcode
== 2) # ENOENT
235 time
.sleep(config
.checkpoint_delay
)
237 assert False, 'failed to read data sync status for target_zone=%s source_zone=%s' % \
238 (target_zone
.name
, source_zone
.name
)
240 def bucket_sync_status(target_zone
, source_zone
, bucket_name
):
241 if target_zone
== source_zone
:
244 cmd
= ['bucket', 'sync', 'status'] + target_zone
.zone_args()
245 cmd
+= ['--source-zone', source_zone
.name
]
246 cmd
+= ['--bucket', bucket_name
]
248 bucket_sync_status_json
, retcode
= target_zone
.cluster
.admin(cmd
, check_retcode
=False, read_only
=True)
252 assert(retcode
== 2) # ENOENT
254 bucket_sync_status_json
= bucket_sync_status_json
.decode('utf-8')
255 log
.debug('current bucket sync status=%s', bucket_sync_status_json
)
256 sync_status
= json
.loads(bucket_sync_status_json
)
259 for entry
in sync_status
:
261 if val
['status'] == 'incremental-sync':
262 pos
= val
['inc_marker']['position'].split('#')[-1] # get rid of shard id; e.g., 6#00000000002.132.3 -> 00000000002.132.3
265 markers
[entry
['key']] = pos
269 def data_source_log_status(source_zone
):
270 source_cluster
= source_zone
.cluster
271 cmd
= ['datalog', 'status'] + source_zone
.zone_args()
272 datalog_status_json
, retcode
= source_cluster
.rgw_admin(cmd
, read_only
=True)
273 datalog_status
= json
.loads(datalog_status_json
.decode('utf-8'))
275 markers
= {i
: s
['marker'] for i
, s
in enumerate(datalog_status
)}
276 log
.debug('data markers for zone=%s markers=%s', source_zone
.name
, markers
)
279 def bucket_source_log_status(source_zone
, bucket_name
):
280 cmd
= ['bilog', 'status'] + source_zone
.zone_args()
281 cmd
+= ['--bucket', bucket_name
]
282 source_cluster
= source_zone
.cluster
283 bilog_status_json
, retcode
= source_cluster
.admin(cmd
, read_only
=True)
284 bilog_status
= json
.loads(bilog_status_json
.decode('utf-8'))
289 m
= bilog_status
['markers']
298 log
.debug('bilog markers for zone=%s bucket=%s markers=%s', source_zone
.name
, bucket_name
, markers
)
301 def compare_data_status(target_zone
, source_zone
, log_status
, sync_status
):
302 if len(log_status
) != len(sync_status
):
303 log
.error('len(log_status)=%d len(sync_status)=%d', len(log_status
), len(sync_status
))
307 for i
, l
, s
in zip(log_status
, log_status
.values(), sync_status
.values()):
311 msg
+= 'shard=' + str(i
) + ' master=' + l
+ ' target=' + s
314 log
.warning('data of zone %s behind zone %s: %s', target_zone
.name
, source_zone
.name
, msg
)
319 def compare_bucket_status(target_zone
, source_zone
, bucket_name
, log_status
, sync_status
):
320 if len(log_status
) != len(sync_status
):
321 log
.error('len(log_status)=%d len(sync_status)=%d', len(log_status
), len(sync_status
))
325 for i
, l
, s
in zip(log_status
, log_status
.values(), sync_status
.values()):
329 msg
+= 'shard=' + str(i
) + ' master=' + l
+ ' target=' + s
332 log
.warning('bucket %s zone %s behind zone %s: %s', bucket_name
, target_zone
.name
, source_zone
.name
, msg
)
337 def zone_data_checkpoint(target_zone
, source_zone_conn
):
338 if target_zone
== source_zone
:
341 log_status
= data_source_log_status(source_zone
)
342 log
.info('starting data checkpoint for target_zone=%s source_zone=%s', target_zone
.name
, source_zone
.name
)
344 for _
in range(config
.checkpoint_retries
):
345 num_shards
, sync_status
= data_sync_status(target_zone
, source_zone
)
347 log
.debug('log_status=%s', log_status
)
348 log
.debug('sync_status=%s', sync_status
)
350 if compare_data_status(target_zone
, source_zone
, log_status
, sync_status
):
351 log
.info('finished data checkpoint for target_zone=%s source_zone=%s',
352 target_zone
.name
, source_zone
.name
)
354 time
.sleep(config
.checkpoint_delay
)
356 assert False, 'failed data checkpoint for target_zone=%s source_zone=%s' % \
357 (target_zone
.name
, source_zone
.name
)
360 def zone_bucket_checkpoint(target_zone
, source_zone
, bucket_name
):
361 if target_zone
== source_zone
:
364 log_status
= bucket_source_log_status(source_zone
, bucket_name
)
365 log
.info('starting bucket checkpoint for target_zone=%s source_zone=%s bucket=%s', target_zone
.name
, source_zone
.name
, bucket_name
)
367 for _
in range(config
.checkpoint_retries
):
368 sync_status
= bucket_sync_status(target_zone
, source_zone
, bucket_name
)
370 log
.debug('log_status=%s', log_status
)
371 log
.debug('sync_status=%s', sync_status
)
373 if compare_bucket_status(target_zone
, source_zone
, bucket_name
, log_status
, sync_status
):
374 log
.info('finished bucket checkpoint for target_zone=%s source_zone=%s bucket=%s', target_zone
.name
, source_zone
.name
, bucket_name
)
377 time
.sleep(config
.checkpoint_delay
)
379 assert False, 'finished bucket checkpoint for target_zone=%s source_zone=%s bucket=%s' % \
380 (target_zone
.name
, source_zone
.name
, bucket_name
)
382 def set_master_zone(zone
):
383 zone
.modify(zone
.cluster
, ['--master'])
384 zonegroup
= zone
.zonegroup
385 zonegroup
.period
.update(zone
, commit
=True)
386 zonegroup
.master_zone
= zone
387 log
.info('Set master zone=%s, waiting %ds for reconfiguration..', zone
.name
, config
.reconfigure_delay
)
388 time
.sleep(config
.reconfigure_delay
)
390 def gen_bucket_name():
394 return run_prefix
+ '-' + str(num_buckets
)
396 class ZonegroupConns
:
397 def __init__(self
, zonegroup
):
398 self
.zonegroup
= zonegroup
402 self
.master_zone
= None
403 for z
in zonegroup
.zones
:
404 zone_conn
= z
.get_conn(user
.credentials
)
405 self
.zones
.append(zone_conn
)
407 self
.ro_zones
.append(zone_conn
)
409 self
.rw_zones
.append(zone_conn
)
411 if z
== zonegroup
.master_zone
:
412 self
.master_zone
= zone_conn
414 def check_all_buckets_exist(zone_conn
, buckets
):
415 if not zone_conn
.zone
.has_buckets():
420 zone_conn
.get_bucket(b
)
422 log
.critical('zone %s does not contain bucket %s', zone
.name
, b
)
427 def check_all_buckets_dont_exist(zone_conn
, buckets
):
428 if not zone_conn
.zone
.has_buckets():
433 zone_conn
.get_bucket(b
)
437 log
.critical('zone %s contains bucket %s', zone
.zone
, b
)
442 def create_bucket_per_zone(zonegroup_conns
, buckets_per_zone
= 1):
445 for zone
in zonegroup_conns
.rw_zones
:
446 for i
in xrange(buckets_per_zone
):
447 bucket_name
= gen_bucket_name()
448 log
.info('create bucket zone=%s name=%s', zone
.name
, bucket_name
)
449 bucket
= zone
.create_bucket(bucket_name
)
450 buckets
.append(bucket_name
)
451 zone_bucket
.append((zone
, bucket
))
453 return buckets
, zone_bucket
455 def create_bucket_per_zone_in_realm():
458 for zonegroup
in realm
.current_period
.zonegroups
:
459 zg_conn
= ZonegroupConns(zonegroup
)
460 b
, z
= create_bucket_per_zone(zg_conn
)
462 zone_bucket
.extend(z
)
463 return buckets
, zone_bucket
465 def test_bucket_create():
466 zonegroup
= realm
.master_zonegroup()
467 zonegroup_conns
= ZonegroupConns(zonegroup
)
468 buckets
, _
= create_bucket_per_zone(zonegroup_conns
)
469 zonegroup_meta_checkpoint(zonegroup
)
471 for zone
in zonegroup_conns
.zones
:
472 assert check_all_buckets_exist(zone
, buckets
)
474 def test_bucket_recreate():
475 zonegroup
= realm
.master_zonegroup()
476 zonegroup_conns
= ZonegroupConns(zonegroup
)
477 buckets
, _
= create_bucket_per_zone(zonegroup_conns
)
478 zonegroup_meta_checkpoint(zonegroup
)
481 for zone
in zonegroup_conns
.zones
:
482 assert check_all_buckets_exist(zone
, buckets
)
484 # recreate buckets on all zones, make sure they weren't removed
485 for zone
in zonegroup_conns
.rw_zones
:
486 for bucket_name
in buckets
:
487 bucket
= zone
.create_bucket(bucket_name
)
489 for zone
in zonegroup_conns
.zones
:
490 assert check_all_buckets_exist(zone
, buckets
)
492 zonegroup_meta_checkpoint(zonegroup
)
494 for zone
in zonegroup_conns
.zones
:
495 assert check_all_buckets_exist(zone
, buckets
)
497 def test_bucket_remove():
498 zonegroup
= realm
.master_zonegroup()
499 zonegroup_conns
= ZonegroupConns(zonegroup
)
500 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
501 zonegroup_meta_checkpoint(zonegroup
)
503 for zone
in zonegroup_conns
.zones
:
504 assert check_all_buckets_exist(zone
, buckets
)
506 for zone
, bucket_name
in zone_bucket
:
507 zone
.conn
.delete_bucket(bucket_name
)
509 zonegroup_meta_checkpoint(zonegroup
)
511 for zone
in zonegroup_conns
.zones
:
512 assert check_all_buckets_dont_exist(zone
, buckets
)
514 def get_bucket(zone
, bucket_name
):
515 return zone
.conn
.get_bucket(bucket_name
)
517 def get_key(zone
, bucket_name
, obj_name
):
518 b
= get_bucket(zone
, bucket_name
)
519 return b
.get_key(obj_name
)
521 def new_key(zone
, bucket_name
, obj_name
):
522 b
= get_bucket(zone
, bucket_name
)
523 return b
.new_key(obj_name
)
525 def check_bucket_eq(zone_conn1
, zone_conn2
, bucket
):
526 return zone_conn2
.check_bucket_eq(zone_conn1
, bucket
.name
)
528 def test_object_sync():
529 zonegroup
= realm
.master_zonegroup()
530 zonegroup_conns
= ZonegroupConns(zonegroup
)
531 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
533 objnames
= [ 'myobj', '_myobj', ':', '&' ]
536 # don't wait for meta sync just yet
537 for zone
, bucket_name
in zone_bucket
:
538 for objname
in objnames
:
539 k
= new_key(zone
, bucket_name
, objname
)
540 k
.set_contents_from_string(content
)
542 zonegroup_meta_checkpoint(zonegroup
)
544 for source_conn
, bucket
in zone_bucket
:
545 for target_conn
in zonegroup_conns
.zones
:
546 if source_conn
.zone
== target_conn
.zone
:
549 zone_bucket_checkpoint(target_conn
.zone
, source_conn
.zone
, bucket
.name
)
550 check_bucket_eq(source_conn
, target_conn
, bucket
)
552 def test_object_delete():
553 zonegroup
= realm
.master_zonegroup()
554 zonegroup_conns
= ZonegroupConns(zonegroup
)
555 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
560 # don't wait for meta sync just yet
561 for zone
, bucket
in zone_bucket
:
562 k
= new_key(zone
, bucket
, objname
)
563 k
.set_contents_from_string(content
)
565 zonegroup_meta_checkpoint(zonegroup
)
567 # check object exists
568 for source_conn
, bucket
in zone_bucket
:
569 for target_conn
in zonegroup_conns
.zones
:
570 if source_conn
.zone
== target_conn
.zone
:
573 zone_bucket_checkpoint(target_conn
.zone
, source_conn
.zone
, bucket
.name
)
574 check_bucket_eq(source_conn
, target_conn
, bucket
)
576 # check object removal
577 for source_conn
, bucket
in zone_bucket
:
578 k
= get_key(source_conn
, bucket
, objname
)
580 for target_conn
in zonegroup_conns
.zones
:
581 if source_conn
.zone
== target_conn
.zone
:
584 zone_bucket_checkpoint(target_conn
.zone
, source_conn
.zone
, bucket
.name
)
585 check_bucket_eq(source_conn
, target_conn
, bucket
)
587 def get_latest_object_version(key
):
588 for k
in key
.bucket
.list_versions(key
.name
):
593 def test_versioned_object_incremental_sync():
594 zonegroup
= realm
.master_zonegroup()
595 zonegroup_conns
= ZonegroupConns(zonegroup
)
596 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
599 for _
, bucket
in zone_bucket
:
600 bucket
.configure_versioning(True)
602 zonegroup_meta_checkpoint(zonegroup
)
604 # upload a dummy object to each bucket and wait for sync. this forces each
605 # bucket to finish a full sync and switch to incremental
606 for source_conn
, bucket
in zone_bucket
:
607 new_key(source_conn
, bucket
, 'dummy').set_contents_from_string('')
608 for target_conn
in zonegroup_conns
.zones
:
609 if source_conn
.zone
== target_conn
.zone
:
611 zone_bucket_checkpoint(target_conn
.zone
, source_conn
.zone
, bucket
.name
)
613 for _
, bucket
in zone_bucket
:
614 # create and delete multiple versions of an object from each zone
615 for zone_conn
in zonegroup_conns
.rw_zones
:
616 obj
= 'obj-' + zone_conn
.name
617 k
= new_key(zone_conn
, bucket
, obj
)
619 k
.set_contents_from_string('version1')
620 v
= get_latest_object_version(k
)
621 log
.debug('version1 id=%s', v
.version_id
)
622 # don't delete version1 - this tests that the initial version
623 # doesn't get squashed into later versions
625 # create and delete the following object versions to test that
626 # the operations don't race with each other during sync
627 k
.set_contents_from_string('version2')
628 v
= get_latest_object_version(k
)
629 log
.debug('version2 id=%s', v
.version_id
)
630 k
.bucket
.delete_key(obj
, version_id
=v
.version_id
)
632 k
.set_contents_from_string('version3')
633 v
= get_latest_object_version(k
)
634 log
.debug('version3 id=%s', v
.version_id
)
635 k
.bucket
.delete_key(obj
, version_id
=v
.version_id
)
637 for source_conn
, bucket
in zone_bucket
:
638 for target_conn
in zonegroup_conns
.zones
:
639 if source_conn
.zone
== target_conn
.zone
:
641 zone_bucket_checkpoint(target_conn
.zone
, source_conn
.zone
, bucket
.name
)
642 check_bucket_eq(source_conn
, target_conn
, bucket
)
644 def test_bucket_versioning():
645 buckets
, zone_bucket
= create_bucket_per_zone_in_realm()
646 for _
, bucket
in zone_bucket
:
647 bucket
.configure_versioning(True)
648 res
= bucket
.get_versioning_status()
650 assert(key
in res
and res
[key
] == 'Enabled')
652 def test_bucket_acl():
653 buckets
, zone_bucket
= create_bucket_per_zone_in_realm()
654 for _
, bucket
in zone_bucket
:
655 assert(len(bucket
.get_acl().acl
.grants
) == 1) # single grant on owner
656 bucket
.set_acl('public-read')
657 assert(len(bucket
.get_acl().acl
.grants
) == 2) # new grant on AllUsers
659 def test_bucket_cors():
660 buckets
, zone_bucket
= create_bucket_per_zone_in_realm()
661 for _
, bucket
in zone_bucket
:
662 cors_cfg
= CORSConfiguration()
663 cors_cfg
.add_rule(['DELETE'], 'https://www.example.com', allowed_header
='*', max_age_seconds
=3000)
664 bucket
.set_cors(cors_cfg
)
665 assert(bucket
.get_cors().to_xml() == cors_cfg
.to_xml())
667 def test_bucket_delete_notempty():
668 zonegroup
= realm
.master_zonegroup()
669 zonegroup_conns
= ZonegroupConns(zonegroup
)
670 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
671 zonegroup_meta_checkpoint(zonegroup
)
673 for zone_conn
, bucket_name
in zone_bucket
:
674 # upload an object to each bucket on its own zone
675 conn
= zone_conn
.get_connection()
676 bucket
= conn
.get_bucket(bucket_name
)
677 k
= bucket
.new_key('foo')
678 k
.set_contents_from_string('bar')
679 # attempt to delete the bucket before this object can sync
681 conn
.delete_bucket(bucket_name
)
682 except boto
.exception
.S3ResponseError
as e
:
683 assert(e
.error_code
== 'BucketNotEmpty')
685 assert False # expected 409 BucketNotEmpty
687 # assert that each bucket still exists on the master
688 c1
= zonegroup_conns
.master_zone
.conn
689 for _
, bucket_name
in zone_bucket
:
690 assert c1
.get_bucket(bucket_name
)
692 def test_multi_period_incremental_sync():
693 zonegroup
= realm
.master_zonegroup()
694 if len(zonegroup
.zones
) < 3:
695 raise SkipTest("test_multi_period_incremental_sync skipped. Requires 3 or more zones in master zonegroup.")
697 # periods to include in mdlog comparison
698 mdlog_periods
= [realm
.current_period
.id]
700 # create a bucket in each zone
701 zonegroup_conns
= ZonegroupConns(zonegroup
)
702 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
704 zonegroup_meta_checkpoint(zonegroup
)
706 z1
, z2
, z3
= zonegroup
.zones
[0:3]
707 assert(z1
== zonegroup
.master_zone
)
709 # kill zone 3 gateways to freeze sync status to incremental in first period
712 # change master to zone 2 -> period 2
714 mdlog_periods
+= [realm
.current_period
.id]
716 for zone_conn
, _
in zone_bucket
:
717 if zone_conn
.zone
== z3
:
719 bucket_name
= gen_bucket_name()
720 log
.info('create bucket zone=%s name=%s', zone_conn
.name
, bucket_name
)
721 bucket
= zone_conn
.conn
.create_bucket(bucket_name
)
722 buckets
.append(bucket_name
)
724 # wait for zone 1 to sync
725 zone_meta_checkpoint(z1
)
727 # change master back to zone 1 -> period 3
729 mdlog_periods
+= [realm
.current_period
.id]
731 for zone_conn
, bucket_name
in zone_bucket
:
732 if zone_conn
.zone
== z3
:
734 bucket_name
= gen_bucket_name()
735 log
.info('create bucket zone=%s name=%s', zone_conn
.name
, bucket_name
)
736 bucket
= zone_conn
.conn
.create_bucket(bucket_name
)
737 buckets
.append(bucket_name
)
739 # restart zone 3 gateway and wait for sync
741 zonegroup_meta_checkpoint(zonegroup
)
743 # verify that we end up with the same objects
744 for bucket_name
in buckets
:
745 for source_conn
, _
in zone_bucket
:
746 for target_conn
in zonegroup_conns
.zones
:
747 if source_conn
.zone
== target_conn
.zone
:
750 target_conn
.check_bucket_eq(source_conn
, bucket_name
)
752 # verify that mdlogs are not empty and match for each period
753 for period
in mdlog_periods
:
754 master_mdlog
= mdlog_list(z1
, period
)
755 assert len(master_mdlog
) > 0
756 for zone
in zonegroup
.zones
:
759 mdlog
= mdlog_list(zone
, period
)
760 assert len(mdlog
) == len(master_mdlog
)
762 # autotrim mdlogs for master zone
765 # autotrim mdlogs for peers
766 for zone
in zonegroup
.zones
:
771 # verify that mdlogs are empty for each period
772 for period
in mdlog_periods
:
773 for zone
in zonegroup
.zones
:
774 mdlog
= mdlog_list(zone
, period
)
775 assert len(mdlog
) == 0
777 def test_zonegroup_remove():
778 zonegroup
= realm
.master_zonegroup()
779 zonegroup_conns
= ZonegroupConns(zonegroup
)
780 if len(zonegroup
.zones
) < 2:
781 raise SkipTest("test_zonegroup_remove skipped. Requires 2 or more zones in master zonegroup.")
783 zonegroup_meta_checkpoint(zonegroup
)
784 z1
, z2
= zonegroup
.zones
[0:2]
785 c1
, c2
= (z1
.cluster
, z2
.cluster
)
787 # create a new zone in zonegroup on c2 and commit
788 zone
= Zone('remove', zonegroup
, c2
)
790 zonegroup
.zones
.append(zone
)
791 zonegroup
.period
.update(zone
, commit
=True)
793 zonegroup
.remove(c1
, zone
)
795 # another 'zonegroup remove' should fail with ENOENT
796 _
, retcode
= zonegroup
.remove(c1
, zone
, check_retcode
=False)
797 assert(retcode
== 2) # ENOENT
799 # delete the new zone
802 # validate the resulting period
803 zonegroup
.period
.update(z1
, commit
=True)
805 def test_set_bucket_website():
806 buckets
, zone_bucket
= create_bucket_per_zone_in_realm()
807 for _
, bucket
in zone_bucket
:
808 website_cfg
= WebsiteConfiguration(suffix
='index.html',error_key
='error.html')
810 bucket
.set_website_configuration(website_cfg
)
811 except boto
.exception
.S3ResponseError
as e
:
812 if e
.error_code
== 'MethodNotAllowed':
813 raise SkipTest("test_set_bucket_website skipped. Requires rgw_enable_static_website = 1.")
814 assert(bucket
.get_website_configuration_with_xml()[1] == website_cfg
.to_xml())
816 def test_set_bucket_policy():
818 "Version": "2012-10-17",
824 buckets
, zone_bucket
= create_bucket_per_zone_in_realm()
825 for _
, bucket
in zone_bucket
:
826 bucket
.set_policy(policy
)
827 assert(bucket
.get_policy() == policy
)