import sys
import time
import logging
+
try:
from itertools import izip_longest as zip_longest
except ImportError:
import boto
import boto.s3.connection
+from boto.s3.website import WebsiteConfiguration
+from boto.s3.cors import CORSConfiguration
from nose.tools import eq_ as eq
from nose.plugins.attrib import attr
from .multisite import Zone
+from .conn import get_gateway_connection
+
+class Config:
+ """ test configuration """
+ def __init__(self, **kwargs):
+ # by default, wait up to 5 minutes before giving up on a sync checkpoint
+ self.checkpoint_retries = kwargs.get('checkpoint_retries', 60)
+ self.checkpoint_delay = kwargs.get('checkpoint_delay', 5)
+ # allow some time for realm reconfiguration after changing master zone
+ self.reconfigure_delay = kwargs.get('reconfigure_delay', 5)
+
# rgw multisite tests, written against the interfaces provided in rgw_multi.
# these tests must be initialized and run by another module that provides
# implementations of these interfaces by calling init_multi()
realm = None
user = None
-def init_multi(_realm, _user):
+config = None
+def init_multi(_realm, _user, _config=None):
global realm
realm = _realm
global user
user = _user
+ global config
+ config = _config or Config()
+ realm_meta_checkpoint(realm)
+
+def get_realm():
+ return realm
log = logging.getLogger(__name__)
mdlog_json = mdlog_json.decode('utf-8')
return json.loads(mdlog_json)
-def mdlog_autotrim(zone):
- zone.cluster.admin(['mdlog', 'autotrim'])
-
def meta_sync_status(zone):
while True:
cmd = ['metadata', 'sync', 'status'] + zone.zone_args()
assert(retcode == 2) # ENOENT
time.sleep(5)
+def mdlog_autotrim(zone):
+ zone.cluster.admin(['mdlog', 'autotrim'])
+
+def parse_meta_sync_status(meta_sync_status_json):
meta_sync_status_json = meta_sync_status_json.decode('utf-8')
log.debug('current meta sync status=%s', meta_sync_status_json)
sync_status = json.loads(meta_sync_status_json)
return period, realm_epoch, num_shards, markers
+def meta_sync_status(zone):
+ for _ in range(config.checkpoint_retries):
+ cmd = ['metadata', 'sync', 'status'] + zone.zone_args()
+ meta_sync_status_json, retcode = zone.cluster.admin(cmd, check_retcode=False, read_only=True)
+ if retcode == 0:
+ return parse_meta_sync_status(meta_sync_status_json)
+ assert(retcode == 2) # ENOENT
+ time.sleep(config.checkpoint_delay)
+
+ assert False, 'failed to read metadata sync status for zone=%s' % zone.name
+
def meta_master_log_status(master_zone):
cmd = ['mdlog', 'status'] + master_zone.zone_args()
mdlog_status_json, retcode = master_zone.cluster.admin(cmd, read_only=True)
log.info('starting meta checkpoint for zone=%s', zone.name)
- while True:
+ for _ in range(config.checkpoint_retries):
period, realm_epoch, num_shards, sync_status = meta_sync_status(zone)
if realm_epoch < current_realm_epoch:
log.warning('zone %s is syncing realm epoch=%d, behind current realm epoch=%d',
log.debug('log_status=%s', master_status)
log.debug('sync_status=%s', sync_status)
if compare_meta_status(zone, master_status, sync_status):
- break
+ log.info('finish meta checkpoint for zone=%s', zone.name)
+ return
- time.sleep(5)
-
- log.info('finish meta checkpoint for zone=%s', zone.name)
+ time.sleep(config.checkpoint_delay)
+ assert False, 'failed meta checkpoint for zone=%s' % zone.name
def zonegroup_meta_checkpoint(zonegroup, meta_master_zone = None, master_status = None):
if not meta_master_zone:
for zonegroup in realm.current_period.zonegroups:
zonegroup_meta_checkpoint(zonegroup, meta_master_zone, master_status)
-def data_sync_status(target_zone, source_zone):
- if target_zone == source_zone:
- return None
-
- while True:
- cmd = ['data', 'sync', 'status'] + target_zone.zone_args()
- cmd += ['--source-zone', source_zone.name]
- data_sync_status_json, retcode = target_zone.cluster.admin(cmd, check_retcode=False, read_only=True)
- if retcode == 0:
- break
-
- assert(retcode == 2) # ENOENT
-
+def parse_data_sync_status(data_sync_status_json):
data_sync_status_json = data_sync_status_json.decode('utf-8')
log.debug('current data sync status=%s', data_sync_status_json)
sync_status = json.loads(data_sync_status_json)
return (num_shards, markers)
+def data_sync_status(target_zone, source_zone):
+ if target_zone == source_zone:
+ return None
+
+ for _ in range(config.checkpoint_retries):
+ cmd = ['data', 'sync', 'status'] + target_zone.zone_args()
+ cmd += ['--source-zone', source_zone.name]
+ data_sync_status_json, retcode = target_zone.cluster.admin(cmd, check_retcode=False, read_only=True)
+ if retcode == 0:
+ return parse_data_sync_status(data_sync_status_json)
+
+ assert(retcode == 2) # ENOENT
+ time.sleep(config.checkpoint_delay)
+
+ assert False, 'failed to read data sync status for target_zone=%s source_zone=%s' % \
+ (target_zone.name, source_zone.name)
+
def bucket_sync_status(target_zone, source_zone, bucket_name):
if target_zone == source_zone:
return None
return True
-def zone_data_checkpoint(target_zone, source_zone):
+def zone_data_checkpoint(target_zone, source_zone_conn):
if target_zone == source_zone:
return
+ log_status = data_source_log_status(source_zone)
log.info('starting data checkpoint for target_zone=%s source_zone=%s', target_zone.name, source_zone.name)
- while True:
- log_status = data_source_log_status(source_zone)
+ for _ in range(config.checkpoint_retries):
num_shards, sync_status = data_sync_status(target_zone, source_zone)
log.debug('log_status=%s', log_status)
log.debug('sync_status=%s', sync_status)
if compare_data_status(target_zone, source_zone, log_status, sync_status):
- break
+ log.info('finished data checkpoint for target_zone=%s source_zone=%s',
+ target_zone.name, source_zone.name)
+ return
+ time.sleep(config.checkpoint_delay)
- time.sleep(5)
+ assert False, 'failed data checkpoint for target_zone=%s source_zone=%s' % \
+ (target_zone.name, source_zone.name)
- log.info('finished data checkpoint for target_zone=%s source_zone=%s', target_zone.name, source_zone.name)
def zone_bucket_checkpoint(target_zone, source_zone, bucket_name):
if target_zone == source_zone:
return
+ log_status = bucket_source_log_status(source_zone, bucket_name)
log.info('starting bucket checkpoint for target_zone=%s source_zone=%s bucket=%s', target_zone.name, source_zone.name, bucket_name)
- while True:
- log_status = bucket_source_log_status(source_zone, bucket_name)
+ for _ in range(config.checkpoint_retries):
sync_status = bucket_sync_status(target_zone, source_zone, bucket_name)
log.debug('log_status=%s', log_status)
log.debug('sync_status=%s', sync_status)
if compare_bucket_status(target_zone, source_zone, bucket_name, log_status, sync_status):
- break
+ log.info('finished bucket checkpoint for target_zone=%s source_zone=%s bucket=%s', target_zone.name, source_zone.name, bucket_name)
+ return
- time.sleep(5)
+ time.sleep(config.checkpoint_delay)
- log.info('finished bucket checkpoint for target_zone=%s source_zone=%s bucket=%s', target_zone.name, source_zone.name, bucket_name)
+ assert False, 'finished bucket checkpoint for target_zone=%s source_zone=%s bucket=%s' % \
+ (target_zone.name, source_zone.name, bucket_name)
def set_master_zone(zone):
zone.modify(zone.cluster, ['--master'])
zonegroup = zone.zonegroup
zonegroup.period.update(zone, commit=True)
zonegroup.master_zone = zone
- # wait for reconfiguration, so that later metadata requests go to the new master
- time.sleep(5)
+ log.info('Set master zone=%s, waiting %ds for reconfiguration..', zone.name, config.reconfigure_delay)
+ time.sleep(config.reconfigure_delay)
def gen_bucket_name():
global num_buckets
num_buckets += 1
return run_prefix + '-' + str(num_buckets)
-def check_all_buckets_exist(zone, buckets):
- conn = get_zone_connection(zone, user.credentials)
+class ZonegroupConns:
+ def __init__(self, zonegroup):
+ self.zonegroup = zonegroup
+ self.zones = []
+ self.ro_zones = []
+ self.rw_zones = []
+ self.master_zone = None
+ for z in zonegroup.zones:
+ zone_conn = z.get_conn(user.credentials)
+ self.zones.append(zone_conn)
+ if z.is_read_only():
+ self.ro_zones.append(zone_conn)
+ else:
+ self.rw_zones.append(zone_conn)
+
+ if z == zonegroup.master_zone:
+ self.master_zone = zone_conn
+
+def check_all_buckets_exist(zone_conn, buckets):
+ if not zone_conn.zone.has_buckets():
+ return True
+
for b in buckets:
try:
- conn.get_bucket(b)
+ zone_conn.get_bucket(b)
except:
log.critical('zone %s does not contain bucket %s', zone.name, b)
return False
return True
-def check_all_buckets_dont_exist(zone, buckets):
- conn = get_zone_connection(zone, user.credentials)
+def check_all_buckets_dont_exist(zone_conn, buckets):
+ if not zone_conn.zone.has_buckets():
+ return True
+
for b in buckets:
try:
- conn.get_bucket(b)
+ zone_conn.get_bucket(b)
except:
continue
return True
-def create_bucket_per_zone(zonegroup):
+def create_bucket_per_zone(zonegroup_conns, buckets_per_zone = 1):
buckets = []
- zone_bucket = {}
- for zone in zonegroup.zones:
- conn = get_zone_connection(zone, user.credentials)
- bucket_name = gen_bucket_name()
- log.info('create bucket zone=%s name=%s', zone.name, bucket_name)
- bucket = conn.create_bucket(bucket_name)
- buckets.append(bucket_name)
- zone_bucket[zone] = bucket
+ zone_bucket = []
+ for zone in zonegroup_conns.rw_zones:
+ for i in xrange(buckets_per_zone):
+ bucket_name = gen_bucket_name()
+ log.info('create bucket zone=%s name=%s', zone.name, bucket_name)
+ bucket = zone.create_bucket(bucket_name)
+ buckets.append(bucket_name)
+ zone_bucket.append((zone, bucket))
return buckets, zone_bucket
def create_bucket_per_zone_in_realm():
buckets = []
- zone_bucket = {}
+ zone_bucket = []
for zonegroup in realm.current_period.zonegroups:
- b, z = create_bucket_per_zone(zonegroup)
+ zg_conn = ZonegroupConns(zonegroup)
+ b, z = create_bucket_per_zone(zg_conn)
buckets.extend(b)
- zone_bucket.update(z)
+ zone_bucket.extend(z)
return buckets, zone_bucket
def test_bucket_create():
zonegroup = realm.master_zonegroup()
- buckets, _ = create_bucket_per_zone(zonegroup)
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ buckets, _ = create_bucket_per_zone(zonegroup_conns)
zonegroup_meta_checkpoint(zonegroup)
- for zone in zonegroup.zones:
+ for zone in zonegroup_conns.zones:
assert check_all_buckets_exist(zone, buckets)
def test_bucket_recreate():
zonegroup = realm.master_zonegroup()
- buckets, _ = create_bucket_per_zone(zonegroup)
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ buckets, _ = create_bucket_per_zone(zonegroup_conns)
zonegroup_meta_checkpoint(zonegroup)
- for zone in zonegroup.zones:
+
+ for zone in zonegroup_conns.zones:
assert check_all_buckets_exist(zone, buckets)
# recreate buckets on all zones, make sure they weren't removed
- for zone in zonegroup.zones:
+ for zone in zonegroup_conns.rw_zones:
for bucket_name in buckets:
- conn = get_zone_connection(zone, user.credentials)
- bucket = conn.create_bucket(bucket_name)
+ bucket = zone.create_bucket(bucket_name)
- for zone in zonegroup.zones:
+ for zone in zonegroup_conns.zones:
assert check_all_buckets_exist(zone, buckets)
zonegroup_meta_checkpoint(zonegroup)
- for zone in zonegroup.zones:
+ for zone in zonegroup_conns.zones:
assert check_all_buckets_exist(zone, buckets)
def test_bucket_remove():
zonegroup = realm.master_zonegroup()
- buckets, zone_bucket = create_bucket_per_zone(zonegroup)
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
zonegroup_meta_checkpoint(zonegroup)
- for zone in zonegroup.zones:
+ for zone in zonegroup_conns.zones:
assert check_all_buckets_exist(zone, buckets)
- for zone, bucket_name in zone_bucket.items():
- conn = get_zone_connection(zone, user.credentials)
- conn.delete_bucket(bucket_name)
+ for zone, bucket_name in zone_bucket:
+ zone.conn.delete_bucket(bucket_name)
zonegroup_meta_checkpoint(zonegroup)
- for zone in zonegroup.zones:
+ for zone in zonegroup_conns.zones:
assert check_all_buckets_dont_exist(zone, buckets)
def get_bucket(zone, bucket_name):
- conn = get_zone_connection(zone, user.credentials)
- return conn.get_bucket(bucket_name)
+ return zone.conn.get_bucket(bucket_name)
def get_key(zone, bucket_name, obj_name):
b = get_bucket(zone, bucket_name)
b = get_bucket(zone, bucket_name)
return b.new_key(obj_name)
-def check_object_eq(k1, k2, check_extra = True):
- assert k1
- assert k2
- log.debug('comparing key name=%s', k1.name)
- eq(k1.name, k2.name)
- eq(k1.get_contents_as_string(), k2.get_contents_as_string())
- eq(k1.metadata, k2.metadata)
- eq(k1.cache_control, k2.cache_control)
- eq(k1.content_type, k2.content_type)
- eq(k1.content_encoding, k2.content_encoding)
- eq(k1.content_disposition, k2.content_disposition)
- eq(k1.content_language, k2.content_language)
- eq(k1.etag, k2.etag)
- eq(k1.last_modified, k2.last_modified)
- if check_extra:
- eq(k1.owner.id, k2.owner.id)
- eq(k1.owner.display_name, k2.owner.display_name)
- eq(k1.storage_class, k2.storage_class)
- eq(k1.size, k2.size)
- eq(k1.version_id, k2.version_id)
- eq(k1.encrypted, k2.encrypted)
-
-def check_bucket_eq(zone1, zone2, bucket_name):
- log.info('comparing bucket=%s zones={%s, %s}', bucket_name, zone1.name, zone2.name)
- b1 = get_bucket(zone1, bucket_name)
- b2 = get_bucket(zone2, bucket_name)
-
- log.debug('bucket1 objects:')
- for o in b1.get_all_versions():
- log.debug('o=%s', o.name)
- log.debug('bucket2 objects:')
- for o in b2.get_all_versions():
- log.debug('o=%s', o.name)
-
- for k1, k2 in zip_longest(b1.get_all_versions(), b2.get_all_versions()):
- if k1 is None:
- log.critical('key=%s is missing from zone=%s', k2.name, zone1.name)
- assert False
- if k2 is None:
- log.critical('key=%s is missing from zone=%s', k1.name, zone2.name)
- assert False
-
- check_object_eq(k1, k2)
-
- # now get the keys through a HEAD operation, verify that the available data is the same
- k1_head = b1.get_key(k1.name)
- k2_head = b2.get_key(k2.name)
-
- check_object_eq(k1_head, k2_head, False)
-
- log.info('success, bucket identical: bucket=%s zones={%s, %s}', bucket_name, zone1.name, zone2.name)
-
+def check_bucket_eq(zone_conn1, zone_conn2, bucket):
+ return zone_conn2.check_bucket_eq(zone_conn1, bucket.name)
def test_object_sync():
zonegroup = realm.master_zonegroup()
- buckets, zone_bucket = create_bucket_per_zone(zonegroup)
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
objnames = [ 'myobj', '_myobj', ':', '&' ]
content = 'asdasd'
# don't wait for meta sync just yet
- for zone, bucket_name in zone_bucket.items():
+ for zone, bucket_name in zone_bucket:
for objname in objnames:
k = new_key(zone, bucket_name, objname)
k.set_contents_from_string(content)
zonegroup_meta_checkpoint(zonegroup)
- for source_zone, bucket in zone_bucket.items():
- for target_zone in zonegroup.zones:
- if source_zone == target_zone:
+ for source_conn, bucket in zone_bucket:
+ for target_conn in zonegroup_conns.zones:
+ if source_conn.zone == target_conn.zone:
continue
- zone_bucket_checkpoint(target_zone, source_zone, bucket.name)
- check_bucket_eq(source_zone, target_zone, bucket)
+ zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
+ check_bucket_eq(source_conn, target_conn, bucket)
def test_object_delete():
zonegroup = realm.master_zonegroup()
- buckets, zone_bucket = create_bucket_per_zone(zonegroup)
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
objname = 'myobj'
content = 'asdasd'
# don't wait for meta sync just yet
- for zone, bucket in zone_bucket.items():
+ for zone, bucket in zone_bucket:
k = new_key(zone, bucket, objname)
k.set_contents_from_string(content)
zonegroup_meta_checkpoint(zonegroup)
# check object exists
- for source_zone, bucket in zone_bucket.items():
- for target_zone in zonegroup.zones:
- if source_zone == target_zone:
+ for source_conn, bucket in zone_bucket:
+ for target_conn in zonegroup_conns.zones:
+ if source_conn.zone == target_conn.zone:
continue
- zone_bucket_checkpoint(target_zone, source_zone, bucket.name)
- check_bucket_eq(source_zone, target_zone, bucket)
+ zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
+ check_bucket_eq(source_conn, target_conn, bucket)
# check object removal
- for source_zone, bucket in zone_bucket.items():
- k = get_key(source_zone, bucket, objname)
+ for source_conn, bucket in zone_bucket:
+ k = get_key(source_conn, bucket, objname)
k.delete()
- for target_zone in zonegroup.zones:
- if source_zone == target_zone:
+ for target_conn in zonegroup_conns.zones:
+ if source_conn.zone == target_conn.zone:
continue
- zone_bucket_checkpoint(target_zone, source_zone, bucket.name)
- check_bucket_eq(source_zone, target_zone, bucket)
+ zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
+ check_bucket_eq(source_conn, target_conn, bucket)
def get_latest_object_version(key):
for k in key.bucket.list_versions(key.name):
def test_versioned_object_incremental_sync():
zonegroup = realm.master_zonegroup()
- buckets, zone_bucket = create_bucket_per_zone(zonegroup)
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
# enable versioning
- for zone, bucket in zone_bucket.items():
+ for _, bucket in zone_bucket:
bucket.configure_versioning(True)
zonegroup_meta_checkpoint(zonegroup)
# upload a dummy object to each bucket and wait for sync. this forces each
# bucket to finish a full sync and switch to incremental
- for source_zone, bucket in zone_bucket.items():
- new_key(source_zone, bucket, 'dummy').set_contents_from_string('')
- for target_zone in zonegroup.zones:
- if source_zone == target_zone:
+ for source_conn, bucket in zone_bucket:
+ new_key(source_conn, bucket, 'dummy').set_contents_from_string('')
+ for target_conn in zonegroup_conns.zones:
+ if source_conn.zone == target_conn.zone:
continue
- zone_bucket_checkpoint(target_zone, source_zone, bucket.name)
+ zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
- for _, bucket in zone_bucket.items():
+ for _, bucket in zone_bucket:
# create and delete multiple versions of an object from each zone
- for zone in zonegroup.zones:
- obj = 'obj-' + zone.name
- k = new_key(zone, bucket, obj)
+ for zone_conn in zonegroup_conns.rw_zones:
+ obj = 'obj-' + zone_conn.name
+ k = new_key(zone_conn, bucket, obj)
k.set_contents_from_string('version1')
v = get_latest_object_version(k)
log.debug('version3 id=%s', v.version_id)
k.bucket.delete_key(obj, version_id=v.version_id)
- for source_zone, bucket in zone_bucket.items():
- for target_zone in zonegroup.zones:
- if source_zone == target_zone:
+ for source_conn, bucket in zone_bucket:
+ for target_conn in zonegroup_conns.zones:
+ if source_conn.zone == target_conn.zone:
continue
- zone_bucket_checkpoint(target_zone, source_zone, bucket.name)
- check_bucket_eq(source_zone, target_zone, bucket)
+ zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
+ check_bucket_eq(source_conn, target_conn, bucket)
def test_bucket_versioning():
buckets, zone_bucket = create_bucket_per_zone_in_realm()
- for zone, bucket in zone_bucket.items():
+ for _, bucket in zone_bucket:
bucket.configure_versioning(True)
res = bucket.get_versioning_status()
key = 'Versioning'
def test_bucket_acl():
buckets, zone_bucket = create_bucket_per_zone_in_realm()
- for zone, bucket in zone_bucket.items():
+ for _, bucket in zone_bucket:
assert(len(bucket.get_acl().acl.grants) == 1) # single grant on owner
bucket.set_acl('public-read')
assert(len(bucket.get_acl().acl.grants) == 2) # new grant on AllUsers
+def test_bucket_cors():
+ buckets, zone_bucket = create_bucket_per_zone_in_realm()
+ for _, bucket in zone_bucket:
+ cors_cfg = CORSConfiguration()
+ cors_cfg.add_rule(['DELETE'], 'https://www.example.com', allowed_header='*', max_age_seconds=3000)
+ bucket.set_cors(cors_cfg)
+ assert(bucket.get_cors().to_xml() == cors_cfg.to_xml())
+
def test_bucket_delete_notempty():
zonegroup = realm.master_zonegroup()
- buckets, zone_bucket = create_bucket_per_zone(zonegroup)
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
zonegroup_meta_checkpoint(zonegroup)
- for zone, bucket_name in zone_bucket.items():
+ for zone_conn, bucket_name in zone_bucket:
# upload an object to each bucket on its own zone
- conn = get_zone_connection(zone, user.credentials)
+ conn = zone_conn.get_connection()
bucket = conn.get_bucket(bucket_name)
k = bucket.new_key('foo')
k.set_contents_from_string('bar')
assert False # expected 409 BucketNotEmpty
# assert that each bucket still exists on the master
- c1 = get_zone_connection(zonegroup.master_zone, user.credentials)
- for _, bucket_name in zone_bucket.items():
+ c1 = zonegroup_conns.master_zone.conn
+ for _, bucket_name in zone_bucket:
assert c1.get_bucket(bucket_name)
def test_multi_period_incremental_sync():
mdlog_periods = [realm.current_period.id]
# create a bucket in each zone
- buckets = []
- for zone in zonegroup.zones:
- conn = get_zone_connection(zone, user.credentials)
- bucket_name = gen_bucket_name()
- log.info('create bucket zone=%s name=%s', zone.name, bucket_name)
- bucket = conn.create_bucket(bucket_name)
- buckets.append(bucket_name)
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
zonegroup_meta_checkpoint(zonegroup)
set_master_zone(z2)
mdlog_periods += [realm.current_period.id]
- # create another bucket in each zone, except for z3
- for zone in zonegroup.zones:
- if zone == z3:
+ for zone_conn, _ in zone_bucket:
+ if zone_conn.zone == z3:
continue
- conn = get_zone_connection(zone, user.credentials)
bucket_name = gen_bucket_name()
- log.info('create bucket zone=%s name=%s', zone.name, bucket_name)
- bucket = conn.create_bucket(bucket_name)
+ log.info('create bucket zone=%s name=%s', zone_conn.name, bucket_name)
+ bucket = zone_conn.conn.create_bucket(bucket_name)
buckets.append(bucket_name)
# wait for zone 1 to sync
set_master_zone(z1)
mdlog_periods += [realm.current_period.id]
- # create another bucket in each zone, except for z3
- for zone in zonegroup.zones:
- if zone == z3:
+ for zone_conn, bucket_name in zone_bucket:
+ if zone_conn.zone == z3:
continue
- conn = get_zone_connection(zone, user.credentials)
bucket_name = gen_bucket_name()
- log.info('create bucket zone=%s name=%s', zone.name, bucket_name)
- bucket = conn.create_bucket(bucket_name)
+ log.info('create bucket zone=%s name=%s', zone_conn.name, bucket_name)
+ bucket = zone_conn.conn.create_bucket(bucket_name)
buckets.append(bucket_name)
# restart zone 3 gateway and wait for sync
z3.start()
zonegroup_meta_checkpoint(zonegroup)
- # verify that we end up with the same buckets
+ # verify that we end up with the same objects
for bucket_name in buckets:
- for source_zone, target_zone in combinations(zonegroup.zones, 2):
- check_bucket_eq(source_zone, target_zone, bucket_name)
+ for source_conn, _ in zone_bucket:
+ for target_conn in zonegroup_conns.zones:
+ if source_conn.zone == target_conn.zone:
+ continue
+
+ target_conn.check_bucket_eq(source_conn, bucket_name)
# verify that mdlogs are not empty and match for each period
for period in mdlog_periods:
def test_zonegroup_remove():
zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
if len(zonegroup.zones) < 2:
raise SkipTest("test_zonegroup_remove skipped. Requires 2 or more zones in master zonegroup.")
zonegroup.zones.append(zone)
zonegroup.period.update(zone, commit=True)
- # try to 'zone delete' the new zone from cluster 1
- # must fail with ENOENT because the zone is local to cluster 2
- retcode = zone.delete(c1, check_retcode=False)
- assert(retcode == 2) # ENOENT
-
- # use 'zonegroup remove', expecting success
zonegroup.remove(c1, zone)
# another 'zonegroup remove' should fail with ENOENT
# validate the resulting period
zonegroup.period.update(z1, commit=True)
+
+def test_set_bucket_website():
+ buckets, zone_bucket = create_bucket_per_zone_in_realm()
+ for _, bucket in zone_bucket:
+ website_cfg = WebsiteConfiguration(suffix='index.html',error_key='error.html')
+ try:
+ bucket.set_website_configuration(website_cfg)
+ except boto.exception.S3ResponseError as e:
+ if e.error_code == 'MethodNotAllowed':
+ raise SkipTest("test_set_bucket_website skipped. Requires rgw_enable_static_website = 1.")
+ assert(bucket.get_website_configuration_with_xml()[1] == website_cfg.to_xml())
+
+def test_set_bucket_policy():
+ policy = '''{
+ "Version": "2012-10-17",
+ "Statement": [{
+ "Effect": "Allow",
+ "Principal": "*"
+ }]
+}'''
+ buckets, zone_bucket = create_bucket_per_zone_in_realm()
+ for _, bucket in zone_bucket:
+ bucket.set_policy(policy)
+ assert(bucket.get_policy() == policy)