]>
git.proxmox.com Git - ceph.git/blob - ceph/src/test/rgw/rgw_multi/tests.py
10 from itertools
import izip_longest
as zip_longest
12 from itertools
import zip_longest
13 from itertools
import combinations
14 from six
import StringIO
17 import boto
.s3
.connection
18 from boto
.s3
.website
import WebsiteConfiguration
19 from boto
.s3
.cors
import CORSConfiguration
21 from nose
.tools
import eq_
as eq
22 from nose
.plugins
.attrib
import attr
23 from nose
.plugins
.skip
import SkipTest
25 from .multisite
import Zone
, ZoneGroup
, Credentials
27 from .conn
import get_gateway_connection
28 from .tools
import assert_raises
31 """ test configuration """
32 def __init__(self
, **kwargs
):
33 # by default, wait up to 5 minutes before giving up on a sync checkpoint
34 self
.checkpoint_retries
= kwargs
.get('checkpoint_retries', 60)
35 self
.checkpoint_delay
= kwargs
.get('checkpoint_delay', 5)
36 # allow some time for realm reconfiguration after changing master zone
37 self
.reconfigure_delay
= kwargs
.get('reconfigure_delay', 5)
38 self
.tenant
= kwargs
.get('tenant', '')
40 # rgw multisite tests, written against the interfaces provided in rgw_multi.
41 # these tests must be initialized and run by another module that provides
42 # implementations of these interfaces by calling init_multi()
46 def init_multi(_realm
, _user
, _config
=None):
52 config
= _config
or Config()
53 realm_meta_checkpoint(realm
)
56 return user
.id if user
is not None else ''
59 return config
.tenant
if config
is not None and config
.tenant
is not None else ''
64 log
= logging
.getLogger('rgw_multi.tests')
67 run_prefix
=''.join(random
.choice(string
.ascii_lowercase
) for _
in range(6))
69 def get_gateway_connection(gateway
, credentials
):
70 """ connect to the given gateway """
71 if gateway
.connection
is None:
72 gateway
.connection
= boto
.connect_s3(
73 aws_access_key_id
= credentials
.access_key
,
74 aws_secret_access_key
= credentials
.secret
,
78 calling_format
= boto
.s3
.connection
.OrdinaryCallingFormat())
79 return gateway
.connection
81 def get_zone_connection(zone
, credentials
):
82 """ connect to the zone's first gateway """
83 if isinstance(credentials
, list):
84 credentials
= credentials
[0]
85 return get_gateway_connection(zone
.gateways
[0], credentials
)
87 def mdlog_list(zone
, period
= None):
88 cmd
= ['mdlog', 'list']
90 cmd
+= ['--period', period
]
91 (mdlog_json
, _
) = zone
.cluster
.admin(cmd
, read_only
=True)
92 mdlog_json
= mdlog_json
.decode('utf-8')
93 return json
.loads(mdlog_json
)
95 def meta_sync_status(zone
):
97 cmd
= ['metadata', 'sync', 'status'] + zone
.zone_args()
98 meta_sync_status_json
, retcode
= zone
.cluster
.admin(cmd
, check_retcode
=False, read_only
=True)
101 assert(retcode
== 2) # ENOENT
104 def mdlog_autotrim(zone
):
105 zone
.cluster
.admin(['mdlog', 'autotrim'])
107 def datalog_list(zone
, period
= None):
108 cmd
= ['datalog', 'list']
109 (datalog_json
, _
) = zone
.cluster
.admin(cmd
, read_only
=True)
110 datalog_json
= datalog_json
.decode('utf-8')
111 return json
.loads(datalog_json
)
113 def datalog_autotrim(zone
):
114 zone
.cluster
.admin(['datalog', 'autotrim'])
116 def bilog_list(zone
, bucket
, args
= None):
117 cmd
= ['bilog', 'list', '--bucket', bucket
] + (args
or [])
118 cmd
+= ['--tenant', config
.tenant
, '--uid', user
.name
] if config
.tenant
else []
119 bilog
, _
= zone
.cluster
.admin(cmd
, read_only
=True)
120 bilog
= bilog
.decode('utf-8')
121 return json
.loads(bilog
)
123 def bilog_autotrim(zone
, args
= None):
124 zone
.cluster
.admin(['bilog', 'autotrim'] + (args
or []))
126 def parse_meta_sync_status(meta_sync_status_json
):
127 meta_sync_status_json
= meta_sync_status_json
.decode('utf-8')
128 log
.debug('current meta sync status=%s', meta_sync_status_json
)
129 sync_status
= json
.loads(meta_sync_status_json
)
131 sync_info
= sync_status
['sync_status']['info']
132 global_sync_status
= sync_info
['status']
133 num_shards
= sync_info
['num_shards']
134 period
= sync_info
['period']
135 realm_epoch
= sync_info
['realm_epoch']
137 sync_markers
=sync_status
['sync_status']['markers']
138 log
.debug('sync_markers=%s', sync_markers
)
139 assert(num_shards
== len(sync_markers
))
142 for i
in range(num_shards
):
143 # get marker, only if it's an incremental marker for the same realm epoch
144 if realm_epoch
> sync_markers
[i
]['val']['realm_epoch'] or sync_markers
[i
]['val']['state'] == 0:
147 markers
[i
] = sync_markers
[i
]['val']['marker']
149 return period
, realm_epoch
, num_shards
, markers
151 def meta_sync_status(zone
):
152 for _
in range(config
.checkpoint_retries
):
153 cmd
= ['metadata', 'sync', 'status'] + zone
.zone_args()
154 meta_sync_status_json
, retcode
= zone
.cluster
.admin(cmd
, check_retcode
=False, read_only
=True)
156 return parse_meta_sync_status(meta_sync_status_json
)
157 assert(retcode
== 2) # ENOENT
158 time
.sleep(config
.checkpoint_delay
)
160 assert False, 'failed to read metadata sync status for zone=%s' % zone
.name
162 def meta_master_log_status(master_zone
):
163 cmd
= ['mdlog', 'status'] + master_zone
.zone_args()
164 mdlog_status_json
, retcode
= master_zone
.cluster
.admin(cmd
, read_only
=True)
165 mdlog_status
= json
.loads(mdlog_status_json
.decode('utf-8'))
167 markers
= {i
: s
['marker'] for i
, s
in enumerate(mdlog_status
)}
168 log
.debug('master meta markers=%s', markers
)
171 def compare_meta_status(zone
, log_status
, sync_status
):
172 if len(log_status
) != len(sync_status
):
173 log
.error('len(log_status)=%d, len(sync_status)=%d', len(log_status
), len(sync_status
))
177 for i
, l
, s
in zip(log_status
, log_status
.values(), sync_status
.values()):
181 msg
+= 'shard=' + str(i
) + ' master=' + l
+ ' target=' + s
184 log
.warning('zone %s behind master: %s', zone
.name
, msg
)
189 def zone_meta_checkpoint(zone
, meta_master_zone
= None, master_status
= None):
190 if not meta_master_zone
:
191 meta_master_zone
= zone
.realm().meta_master_zone()
192 if not master_status
:
193 master_status
= meta_master_log_status(meta_master_zone
)
195 current_realm_epoch
= realm
.current_period
.data
['realm_epoch']
197 log
.info('starting meta checkpoint for zone=%s', zone
.name
)
199 for _
in range(config
.checkpoint_retries
):
200 period
, realm_epoch
, num_shards
, sync_status
= meta_sync_status(zone
)
201 if realm_epoch
< current_realm_epoch
:
202 log
.warning('zone %s is syncing realm epoch=%d, behind current realm epoch=%d',
203 zone
.name
, realm_epoch
, current_realm_epoch
)
205 log
.debug('log_status=%s', master_status
)
206 log
.debug('sync_status=%s', sync_status
)
207 if compare_meta_status(zone
, master_status
, sync_status
):
208 log
.info('finish meta checkpoint for zone=%s', zone
.name
)
211 time
.sleep(config
.checkpoint_delay
)
212 assert False, 'failed meta checkpoint for zone=%s' % zone
.name
214 def zonegroup_meta_checkpoint(zonegroup
, meta_master_zone
= None, master_status
= None):
215 if not meta_master_zone
:
216 meta_master_zone
= zonegroup
.realm().meta_master_zone()
217 if not master_status
:
218 master_status
= meta_master_log_status(meta_master_zone
)
220 for zone
in zonegroup
.zones
:
221 if zone
== meta_master_zone
:
223 zone_meta_checkpoint(zone
, meta_master_zone
, master_status
)
225 def realm_meta_checkpoint(realm
):
226 log
.info('meta checkpoint')
228 meta_master_zone
= realm
.meta_master_zone()
229 master_status
= meta_master_log_status(meta_master_zone
)
231 for zonegroup
in realm
.current_period
.zonegroups
:
232 zonegroup_meta_checkpoint(zonegroup
, meta_master_zone
, master_status
)
234 def parse_data_sync_status(data_sync_status_json
):
235 data_sync_status_json
= data_sync_status_json
.decode('utf-8')
236 log
.debug('current data sync status=%s', data_sync_status_json
)
237 sync_status
= json
.loads(data_sync_status_json
)
239 global_sync_status
=sync_status
['sync_status']['info']['status']
240 num_shards
=sync_status
['sync_status']['info']['num_shards']
242 sync_markers
=sync_status
['sync_status']['markers']
243 log
.debug('sync_markers=%s', sync_markers
)
244 assert(num_shards
== len(sync_markers
))
247 for i
in range(num_shards
):
248 markers
[i
] = sync_markers
[i
]['val']['marker']
250 return (num_shards
, markers
)
252 def data_sync_status(target_zone
, source_zone
):
253 if target_zone
== source_zone
:
256 for _
in range(config
.checkpoint_retries
):
257 cmd
= ['data', 'sync', 'status'] + target_zone
.zone_args()
258 cmd
+= ['--source-zone', source_zone
.name
]
259 data_sync_status_json
, retcode
= target_zone
.cluster
.admin(cmd
, check_retcode
=False, read_only
=True)
261 return parse_data_sync_status(data_sync_status_json
)
263 assert(retcode
== 2) # ENOENT
264 time
.sleep(config
.checkpoint_delay
)
266 assert False, 'failed to read data sync status for target_zone=%s source_zone=%s' % \
267 (target_zone
.name
, source_zone
.name
)
269 def bucket_sync_status(target_zone
, source_zone
, bucket_name
):
270 if target_zone
== source_zone
:
273 cmd
= ['bucket', 'sync', 'markers'] + target_zone
.zone_args()
274 cmd
+= ['--source-zone', source_zone
.name
]
275 cmd
+= ['--bucket', bucket_name
]
276 cmd
+= ['--tenant', config
.tenant
, '--uid', user
.name
] if config
.tenant
else []
278 bucket_sync_status_json
, retcode
= target_zone
.cluster
.admin(cmd
, check_retcode
=False, read_only
=True)
282 assert(retcode
== 2) # ENOENT
284 bucket_sync_status_json
= bucket_sync_status_json
.decode('utf-8')
285 log
.debug('current bucket sync markers=%s', bucket_sync_status_json
)
286 sync_status
= json
.loads(bucket_sync_status_json
)
289 for entry
in sync_status
:
291 if val
['status'] == 'incremental-sync':
292 pos
= val
['inc_marker']['position'].split('#')[-1] # get rid of shard id; e.g., 6#00000000002.132.3 -> 00000000002.132.3
295 markers
[entry
['key']] = pos
299 def data_source_log_status(source_zone
):
300 source_cluster
= source_zone
.cluster
301 cmd
= ['datalog', 'status'] + source_zone
.zone_args()
302 datalog_status_json
, retcode
= source_cluster
.admin(cmd
, read_only
=True)
303 datalog_status
= json
.loads(datalog_status_json
.decode('utf-8'))
305 markers
= {i
: s
['marker'] for i
, s
in enumerate(datalog_status
)}
306 log
.debug('data markers for zone=%s markers=%s', source_zone
.name
, markers
)
309 def bucket_source_log_status(source_zone
, bucket_name
):
310 cmd
= ['bilog', 'status'] + source_zone
.zone_args()
311 cmd
+= ['--bucket', bucket_name
]
312 cmd
+= ['--tenant', config
.tenant
, '--uid', user
.name
] if config
.tenant
else []
313 source_cluster
= source_zone
.cluster
314 bilog_status_json
, retcode
= source_cluster
.admin(cmd
, read_only
=True)
315 bilog_status
= json
.loads(bilog_status_json
.decode('utf-8'))
320 m
= bilog_status
['markers']
329 log
.debug('bilog markers for zone=%s bucket=%s markers=%s', source_zone
.name
, bucket_name
, markers
)
332 def compare_data_status(target_zone
, source_zone
, log_status
, sync_status
):
333 if len(log_status
) != len(sync_status
):
334 log
.error('len(log_status)=%d len(sync_status)=%d', len(log_status
), len(sync_status
))
338 for i
, l
, s
in zip(log_status
, log_status
.values(), sync_status
.values()):
342 msg
+= 'shard=' + str(i
) + ' master=' + l
+ ' target=' + s
345 log
.warning('data of zone %s behind zone %s: %s', target_zone
.name
, source_zone
.name
, msg
)
350 def compare_bucket_status(target_zone
, source_zone
, bucket_name
, log_status
, sync_status
):
351 if len(log_status
) != len(sync_status
):
352 log
.error('len(log_status)=%d len(sync_status)=%d', len(log_status
), len(sync_status
))
356 for i
, l
, s
in zip(log_status
, log_status
.values(), sync_status
.values()):
360 msg
+= 'shard=' + str(i
) + ' master=' + l
+ ' target=' + s
363 log
.warning('bucket %s zone %s behind zone %s: %s', bucket_name
, target_zone
.name
, source_zone
.name
, msg
)
368 def zone_data_checkpoint(target_zone
, source_zone
):
369 if target_zone
== source_zone
:
372 log_status
= data_source_log_status(source_zone
)
373 log
.info('starting data checkpoint for target_zone=%s source_zone=%s', target_zone
.name
, source_zone
.name
)
375 for _
in range(config
.checkpoint_retries
):
376 num_shards
, sync_status
= data_sync_status(target_zone
, source_zone
)
378 log
.debug('log_status=%s', log_status
)
379 log
.debug('sync_status=%s', sync_status
)
381 if compare_data_status(target_zone
, source_zone
, log_status
, sync_status
):
382 log
.info('finished data checkpoint for target_zone=%s source_zone=%s',
383 target_zone
.name
, source_zone
.name
)
385 time
.sleep(config
.checkpoint_delay
)
387 assert False, 'failed data checkpoint for target_zone=%s source_zone=%s' % \
388 (target_zone
.name
, source_zone
.name
)
390 def zonegroup_data_checkpoint(zonegroup_conns
):
391 for source_conn
in zonegroup_conns
.rw_zones
:
392 for target_conn
in zonegroup_conns
.zones
:
393 if source_conn
.zone
== target_conn
.zone
:
395 log
.debug('data checkpoint: source=%s target=%s', source_conn
.zone
.name
, target_conn
.zone
.name
)
396 zone_data_checkpoint(target_conn
.zone
, source_conn
.zone
)
398 def zone_bucket_checkpoint(target_zone
, source_zone
, bucket_name
):
399 if target_zone
== source_zone
:
402 log_status
= bucket_source_log_status(source_zone
, bucket_name
)
403 log
.info('starting bucket checkpoint for target_zone=%s source_zone=%s bucket=%s', target_zone
.name
, source_zone
.name
, bucket_name
)
405 for _
in range(config
.checkpoint_retries
):
406 sync_status
= bucket_sync_status(target_zone
, source_zone
, bucket_name
)
408 log
.debug('log_status=%s', log_status
)
409 log
.debug('sync_status=%s', sync_status
)
411 if compare_bucket_status(target_zone
, source_zone
, bucket_name
, log_status
, sync_status
):
412 log
.info('finished bucket checkpoint for target_zone=%s source_zone=%s bucket=%s', target_zone
.name
, source_zone
.name
, bucket_name
)
415 time
.sleep(config
.checkpoint_delay
)
417 assert False, 'failed bucket checkpoint for target_zone=%s source_zone=%s bucket=%s' % \
418 (target_zone
.name
, source_zone
.name
, bucket_name
)
420 def zonegroup_bucket_checkpoint(zonegroup_conns
, bucket_name
):
421 for source_conn
in zonegroup_conns
.rw_zones
:
422 for target_conn
in zonegroup_conns
.zones
:
423 if source_conn
.zone
== target_conn
.zone
:
425 log
.debug('bucket checkpoint: source=%s target=%s bucket=%s', source_conn
.zone
.name
, target_conn
.zone
.name
, bucket_name
)
426 zone_bucket_checkpoint(target_conn
.zone
, source_conn
.zone
, bucket_name
)
427 for source_conn
, target_conn
in combinations(zonegroup_conns
.zones
, 2):
428 if target_conn
.zone
.has_buckets():
429 target_conn
.check_bucket_eq(source_conn
, bucket_name
)
431 def set_master_zone(zone
):
432 zone
.modify(zone
.cluster
, ['--master'])
433 zonegroup
= zone
.zonegroup
434 zonegroup
.period
.update(zone
, commit
=True)
435 zonegroup
.master_zone
= zone
436 log
.info('Set master zone=%s, waiting %ds for reconfiguration..', zone
.name
, config
.reconfigure_delay
)
437 time
.sleep(config
.reconfigure_delay
)
439 def set_sync_from_all(zone
, flag
):
440 s
= 'true' if flag
else 'false'
441 zone
.modify(zone
.cluster
, ['--sync-from-all={}'.format(s
)])
442 zonegroup
= zone
.zonegroup
443 zonegroup
.period
.update(zone
, commit
=True)
444 log
.info('Set sync_from_all flag on zone %s to %s', zone
.name
, s
)
445 time
.sleep(config
.reconfigure_delay
)
447 def set_redirect_zone(zone
, redirect_zone
):
448 id_str
= redirect_zone
.id if redirect_zone
else ''
449 zone
.modify(zone
.cluster
, ['--redirect-zone={}'.format(id_str
)])
450 zonegroup
= zone
.zonegroup
451 zonegroup
.period
.update(zone
, commit
=True)
452 log
.info('Set redirect_zone zone %s to "%s"', zone
.name
, id_str
)
453 time
.sleep(config
.reconfigure_delay
)
455 def enable_bucket_sync(zone
, bucket_name
):
456 cmd
= ['bucket', 'sync', 'enable', '--bucket', bucket_name
] + zone
.zone_args()
457 zone
.cluster
.admin(cmd
)
459 def disable_bucket_sync(zone
, bucket_name
):
460 cmd
= ['bucket', 'sync', 'disable', '--bucket', bucket_name
] + zone
.zone_args()
461 zone
.cluster
.admin(cmd
)
463 def check_buckets_sync_status_obj_not_exist(zone
, buckets
):
464 for _
in range(config
.checkpoint_retries
):
465 cmd
= ['log', 'list'] + zone
.zone_arg()
466 log_list
, ret
= zone
.cluster
.admin(cmd
, check_retcode
=False, read_only
=True)
467 for bucket
in buckets
:
468 if log_list
.find(':'+bucket
+":") >= 0:
472 time
.sleep(config
.checkpoint_delay
)
475 def gen_bucket_name():
479 return run_prefix
+ '-' + str(num_buckets
)
481 class ZonegroupConns
:
482 def __init__(self
, zonegroup
):
483 self
.zonegroup
= zonegroup
487 self
.master_zone
= None
488 for z
in zonegroup
.zones
:
489 zone_conn
= z
.get_conn(user
.credentials
)
490 self
.zones
.append(zone_conn
)
492 self
.ro_zones
.append(zone_conn
)
494 self
.rw_zones
.append(zone_conn
)
496 if z
== zonegroup
.master_zone
:
497 self
.master_zone
= zone_conn
499 def check_all_buckets_exist(zone_conn
, buckets
):
500 if not zone_conn
.zone
.has_buckets():
505 zone_conn
.get_bucket(b
)
507 log
.critical('zone %s does not contain bucket %s', zone
.name
, b
)
512 def check_all_buckets_dont_exist(zone_conn
, buckets
):
513 if not zone_conn
.zone
.has_buckets():
518 zone_conn
.get_bucket(b
)
522 log
.critical('zone %s contains bucket %s', zone
.zone
, b
)
527 def create_bucket_per_zone(zonegroup_conns
, buckets_per_zone
= 1):
530 for zone
in zonegroup_conns
.rw_zones
:
531 for i
in xrange(buckets_per_zone
):
532 bucket_name
= gen_bucket_name()
533 log
.info('create bucket zone=%s name=%s', zone
.name
, bucket_name
)
534 bucket
= zone
.create_bucket(bucket_name
)
535 buckets
.append(bucket_name
)
536 zone_bucket
.append((zone
, bucket
))
538 return buckets
, zone_bucket
540 def create_bucket_per_zone_in_realm():
543 for zonegroup
in realm
.current_period
.zonegroups
:
544 zg_conn
= ZonegroupConns(zonegroup
)
545 b
, z
= create_bucket_per_zone(zg_conn
)
547 zone_bucket
.extend(z
)
548 return buckets
, zone_bucket
550 def test_bucket_create():
551 zonegroup
= realm
.master_zonegroup()
552 zonegroup_conns
= ZonegroupConns(zonegroup
)
553 buckets
, _
= create_bucket_per_zone(zonegroup_conns
)
554 zonegroup_meta_checkpoint(zonegroup
)
556 for zone
in zonegroup_conns
.zones
:
557 assert check_all_buckets_exist(zone
, buckets
)
559 def test_bucket_recreate():
560 zonegroup
= realm
.master_zonegroup()
561 zonegroup_conns
= ZonegroupConns(zonegroup
)
562 buckets
, _
= create_bucket_per_zone(zonegroup_conns
)
563 zonegroup_meta_checkpoint(zonegroup
)
566 for zone
in zonegroup_conns
.zones
:
567 assert check_all_buckets_exist(zone
, buckets
)
569 # recreate buckets on all zones, make sure they weren't removed
570 for zone
in zonegroup_conns
.rw_zones
:
571 for bucket_name
in buckets
:
572 bucket
= zone
.create_bucket(bucket_name
)
574 for zone
in zonegroup_conns
.zones
:
575 assert check_all_buckets_exist(zone
, buckets
)
577 zonegroup_meta_checkpoint(zonegroup
)
579 for zone
in zonegroup_conns
.zones
:
580 assert check_all_buckets_exist(zone
, buckets
)
582 def test_bucket_remove():
583 zonegroup
= realm
.master_zonegroup()
584 zonegroup_conns
= ZonegroupConns(zonegroup
)
585 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
586 zonegroup_meta_checkpoint(zonegroup
)
588 for zone
in zonegroup_conns
.zones
:
589 assert check_all_buckets_exist(zone
, buckets
)
591 for zone
, bucket_name
in zone_bucket
:
592 zone
.conn
.delete_bucket(bucket_name
)
594 zonegroup_meta_checkpoint(zonegroup
)
596 for zone
in zonegroup_conns
.zones
:
597 assert check_all_buckets_dont_exist(zone
, buckets
)
599 def get_bucket(zone
, bucket_name
):
600 return zone
.conn
.get_bucket(bucket_name
)
602 def get_key(zone
, bucket_name
, obj_name
):
603 b
= get_bucket(zone
, bucket_name
)
604 return b
.get_key(obj_name
)
606 def new_key(zone
, bucket_name
, obj_name
):
607 b
= get_bucket(zone
, bucket_name
)
608 return b
.new_key(obj_name
)
610 def check_bucket_eq(zone_conn1
, zone_conn2
, bucket
):
611 if zone_conn2
.zone
.has_buckets():
612 zone_conn2
.check_bucket_eq(zone_conn1
, bucket
.name
)
614 def test_object_sync():
615 zonegroup
= realm
.master_zonegroup()
616 zonegroup_conns
= ZonegroupConns(zonegroup
)
617 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
619 objnames
= [ 'myobj', '_myobj', ':', '&' ]
622 # don't wait for meta sync just yet
623 for zone
, bucket_name
in zone_bucket
:
624 for objname
in objnames
:
625 k
= new_key(zone
, bucket_name
, objname
)
626 k
.set_contents_from_string(content
)
628 zonegroup_meta_checkpoint(zonegroup
)
630 for source_conn
, bucket
in zone_bucket
:
631 for target_conn
in zonegroup_conns
.zones
:
632 if source_conn
.zone
== target_conn
.zone
:
635 zone_bucket_checkpoint(target_conn
.zone
, source_conn
.zone
, bucket
.name
)
636 check_bucket_eq(source_conn
, target_conn
, bucket
)
638 def test_object_delete():
639 zonegroup
= realm
.master_zonegroup()
640 zonegroup_conns
= ZonegroupConns(zonegroup
)
641 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
646 # don't wait for meta sync just yet
647 for zone
, bucket
in zone_bucket
:
648 k
= new_key(zone
, bucket
, objname
)
649 k
.set_contents_from_string(content
)
651 zonegroup_meta_checkpoint(zonegroup
)
653 # check object exists
654 for source_conn
, bucket
in zone_bucket
:
655 for target_conn
in zonegroup_conns
.zones
:
656 if source_conn
.zone
== target_conn
.zone
:
659 zone_bucket_checkpoint(target_conn
.zone
, source_conn
.zone
, bucket
.name
)
660 check_bucket_eq(source_conn
, target_conn
, bucket
)
662 # check object removal
663 for source_conn
, bucket
in zone_bucket
:
664 k
= get_key(source_conn
, bucket
, objname
)
666 for target_conn
in zonegroup_conns
.zones
:
667 if source_conn
.zone
== target_conn
.zone
:
670 zone_bucket_checkpoint(target_conn
.zone
, source_conn
.zone
, bucket
.name
)
671 check_bucket_eq(source_conn
, target_conn
, bucket
)
673 def get_latest_object_version(key
):
674 for k
in key
.bucket
.list_versions(key
.name
):
679 def test_versioned_object_incremental_sync():
680 zonegroup
= realm
.master_zonegroup()
681 zonegroup_conns
= ZonegroupConns(zonegroup
)
682 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
685 for _
, bucket
in zone_bucket
:
686 bucket
.configure_versioning(True)
688 zonegroup_meta_checkpoint(zonegroup
)
690 # upload a dummy object to each bucket and wait for sync. this forces each
691 # bucket to finish a full sync and switch to incremental
692 for source_conn
, bucket
in zone_bucket
:
693 new_key(source_conn
, bucket
, 'dummy').set_contents_from_string('')
694 for target_conn
in zonegroup_conns
.zones
:
695 if source_conn
.zone
== target_conn
.zone
:
697 zone_bucket_checkpoint(target_conn
.zone
, source_conn
.zone
, bucket
.name
)
699 for _
, bucket
in zone_bucket
:
700 # create and delete multiple versions of an object from each zone
701 for zone_conn
in zonegroup_conns
.rw_zones
:
702 obj
= 'obj-' + zone_conn
.name
703 k
= new_key(zone_conn
, bucket
, obj
)
705 k
.set_contents_from_string('version1')
706 log
.debug('version1 id=%s', k
.version_id
)
707 # don't delete version1 - this tests that the initial version
708 # doesn't get squashed into later versions
710 # create and delete the following object versions to test that
711 # the operations don't race with each other during sync
712 k
.set_contents_from_string('version2')
713 log
.debug('version2 id=%s', k
.version_id
)
714 k
.bucket
.delete_key(obj
, version_id
=k
.version_id
)
716 k
.set_contents_from_string('version3')
717 log
.debug('version3 id=%s', k
.version_id
)
718 k
.bucket
.delete_key(obj
, version_id
=k
.version_id
)
720 for _
, bucket
in zone_bucket
:
721 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket
.name
)
723 for _
, bucket
in zone_bucket
:
724 # overwrite the acls to test that metadata-only entries are applied
725 for zone_conn
in zonegroup_conns
.rw_zones
:
726 obj
= 'obj-' + zone_conn
.name
727 k
= new_key(zone_conn
, bucket
.name
, obj
)
728 v
= get_latest_object_version(k
)
731 for _
, bucket
in zone_bucket
:
732 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket
.name
)
734 def test_version_suspended_incremental_sync():
735 zonegroup
= realm
.master_zonegroup()
736 zonegroup_conns
= ZonegroupConns(zonegroup
)
738 zone
= zonegroup_conns
.rw_zones
[0]
740 # create a non-versioned bucket
741 bucket
= zone
.create_bucket(gen_bucket_name())
742 log
.debug('created bucket=%s', bucket
.name
)
743 zonegroup_meta_checkpoint(zonegroup
)
745 # upload an initial object
746 key1
= new_key(zone
, bucket
, 'obj')
747 key1
.set_contents_from_string('')
748 log
.debug('created initial version id=%s', key1
.version_id
)
749 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket
.name
)
752 bucket
.configure_versioning(True)
753 zonegroup_meta_checkpoint(zonegroup
)
755 # re-upload the object as a new version
756 key2
= new_key(zone
, bucket
, 'obj')
757 key2
.set_contents_from_string('')
758 log
.debug('created new version id=%s', key2
.version_id
)
759 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket
.name
)
762 bucket
.configure_versioning(False)
763 zonegroup_meta_checkpoint(zonegroup
)
765 # re-upload the object as a 'null' version
766 key3
= new_key(zone
, bucket
, 'obj')
767 key3
.set_contents_from_string('')
768 log
.debug('created null version id=%s', key3
.version_id
)
769 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket
.name
)
771 def test_delete_marker_full_sync():
772 zonegroup
= realm
.master_zonegroup()
773 zonegroup_conns
= ZonegroupConns(zonegroup
)
774 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
777 for _
, bucket
in zone_bucket
:
778 bucket
.configure_versioning(True)
779 zonegroup_meta_checkpoint(zonegroup
)
781 for zone
, bucket
in zone_bucket
:
782 # upload an initial object
783 key1
= new_key(zone
, bucket
, 'obj')
784 key1
.set_contents_from_string('')
786 # create a delete marker
787 key2
= new_key(zone
, bucket
, 'obj')
791 for _
, bucket
in zone_bucket
:
792 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket
.name
)
794 def test_suspended_delete_marker_full_sync():
795 zonegroup
= realm
.master_zonegroup()
796 zonegroup_conns
= ZonegroupConns(zonegroup
)
797 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
799 # enable/suspend versioning
800 for _
, bucket
in zone_bucket
:
801 bucket
.configure_versioning(True)
802 bucket
.configure_versioning(False)
803 zonegroup_meta_checkpoint(zonegroup
)
805 for zone
, bucket
in zone_bucket
:
806 # upload an initial object
807 key1
= new_key(zone
, bucket
, 'obj')
808 key1
.set_contents_from_string('')
810 # create a delete marker
811 key2
= new_key(zone
, bucket
, 'obj')
815 for _
, bucket
in zone_bucket
:
816 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket
.name
)
818 def test_bucket_versioning():
819 buckets
, zone_bucket
= create_bucket_per_zone_in_realm()
820 for _
, bucket
in zone_bucket
:
821 bucket
.configure_versioning(True)
822 res
= bucket
.get_versioning_status()
824 assert(key
in res
and res
[key
] == 'Enabled')
826 def test_bucket_acl():
827 buckets
, zone_bucket
= create_bucket_per_zone_in_realm()
828 for _
, bucket
in zone_bucket
:
829 assert(len(bucket
.get_acl().acl
.grants
) == 1) # single grant on owner
830 bucket
.set_acl('public-read')
831 assert(len(bucket
.get_acl().acl
.grants
) == 2) # new grant on AllUsers
833 def test_bucket_cors():
834 buckets
, zone_bucket
= create_bucket_per_zone_in_realm()
835 for _
, bucket
in zone_bucket
:
836 cors_cfg
= CORSConfiguration()
837 cors_cfg
.add_rule(['DELETE'], 'https://www.example.com', allowed_header
='*', max_age_seconds
=3000)
838 bucket
.set_cors(cors_cfg
)
839 assert(bucket
.get_cors().to_xml() == cors_cfg
.to_xml())
841 def test_bucket_delete_notempty():
842 zonegroup
= realm
.master_zonegroup()
843 zonegroup_conns
= ZonegroupConns(zonegroup
)
844 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
845 zonegroup_meta_checkpoint(zonegroup
)
847 for zone_conn
, bucket_name
in zone_bucket
:
848 # upload an object to each bucket on its own zone
849 conn
= zone_conn
.get_connection()
850 bucket
= conn
.get_bucket(bucket_name
)
851 k
= bucket
.new_key('foo')
852 k
.set_contents_from_string('bar')
853 # attempt to delete the bucket before this object can sync
855 conn
.delete_bucket(bucket_name
)
856 except boto
.exception
.S3ResponseError
as e
:
857 assert(e
.error_code
== 'BucketNotEmpty')
859 assert False # expected 409 BucketNotEmpty
861 # assert that each bucket still exists on the master
862 c1
= zonegroup_conns
.master_zone
.conn
863 for _
, bucket_name
in zone_bucket
:
864 assert c1
.get_bucket(bucket_name
)
866 def test_multi_period_incremental_sync():
867 zonegroup
= realm
.master_zonegroup()
868 if len(zonegroup
.zones
) < 3:
869 raise SkipTest("test_multi_period_incremental_sync skipped. Requires 3 or more zones in master zonegroup.")
871 # periods to include in mdlog comparison
872 mdlog_periods
= [realm
.current_period
.id]
874 # create a bucket in each zone
875 zonegroup_conns
= ZonegroupConns(zonegroup
)
876 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
878 zonegroup_meta_checkpoint(zonegroup
)
880 z1
, z2
, z3
= zonegroup
.zones
[0:3]
881 assert(z1
== zonegroup
.master_zone
)
883 # kill zone 3 gateways to freeze sync status to incremental in first period
886 # change master to zone 2 -> period 2
888 mdlog_periods
+= [realm
.current_period
.id]
890 for zone_conn
, _
in zone_bucket
:
891 if zone_conn
.zone
== z3
:
893 bucket_name
= gen_bucket_name()
894 log
.info('create bucket zone=%s name=%s', zone_conn
.name
, bucket_name
)
895 bucket
= zone_conn
.conn
.create_bucket(bucket_name
)
896 buckets
.append(bucket_name
)
898 # wait for zone 1 to sync
899 zone_meta_checkpoint(z1
)
901 # change master back to zone 1 -> period 3
903 mdlog_periods
+= [realm
.current_period
.id]
905 for zone_conn
, bucket_name
in zone_bucket
:
906 if zone_conn
.zone
== z3
:
908 bucket_name
= gen_bucket_name()
909 log
.info('create bucket zone=%s name=%s', zone_conn
.name
, bucket_name
)
910 zone_conn
.conn
.create_bucket(bucket_name
)
911 buckets
.append(bucket_name
)
913 # restart zone 3 gateway and wait for sync
915 zonegroup_meta_checkpoint(zonegroup
)
917 # verify that we end up with the same objects
918 for bucket_name
in buckets
:
919 for source_conn
, _
in zone_bucket
:
920 for target_conn
in zonegroup_conns
.zones
:
921 if source_conn
.zone
== target_conn
.zone
:
924 if target_conn
.zone
.has_buckets():
925 target_conn
.check_bucket_eq(source_conn
, bucket_name
)
927 # verify that mdlogs are not empty and match for each period
928 for period
in mdlog_periods
:
929 master_mdlog
= mdlog_list(z1
, period
)
930 assert len(master_mdlog
) > 0
931 for zone
in zonegroup
.zones
:
934 mdlog
= mdlog_list(zone
, period
)
935 assert len(mdlog
) == len(master_mdlog
)
937 # autotrim mdlogs for master zone
940 # autotrim mdlogs for peers
941 for zone
in zonegroup
.zones
:
946 # verify that mdlogs are empty for each period
947 for period
in mdlog_periods
:
948 for zone
in zonegroup
.zones
:
949 mdlog
= mdlog_list(zone
, period
)
950 assert len(mdlog
) == 0
952 def test_datalog_autotrim():
953 zonegroup
= realm
.master_zonegroup()
954 zonegroup_conns
= ZonegroupConns(zonegroup
)
955 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
957 # upload an object to each zone to generate a datalog entry
958 for zone
, bucket
in zone_bucket
:
959 k
= new_key(zone
, bucket
.name
, 'key')
960 k
.set_contents_from_string('body')
962 # wait for data sync to catch up
963 zonegroup_data_checkpoint(zonegroup_conns
)
966 for zone
, _
in zone_bucket
:
967 datalog_autotrim(zone
.zone
)
968 datalog
= datalog_list(zone
.zone
)
969 assert len(datalog
) == 0
971 def test_multi_zone_redirect():
972 zonegroup
= realm
.master_zonegroup()
973 if len(zonegroup
.rw_zones
) < 2:
974 raise SkipTest("test_multi_period_incremental_sync skipped. Requires 3 or more zones in master zonegroup.")
976 zonegroup_conns
= ZonegroupConns(zonegroup
)
977 (zc1
, zc2
) = zonegroup_conns
.rw_zones
[0:2]
979 z1
, z2
= (zc1
.zone
, zc2
.zone
)
981 set_sync_from_all(z2
, False)
983 # create a bucket on the first zone
984 bucket_name
= gen_bucket_name()
985 log
.info('create bucket zone=%s name=%s', z1
.name
, bucket_name
)
986 bucket
= zc1
.conn
.create_bucket(bucket_name
)
989 key
= bucket
.new_key(obj
)
991 key
.set_contents_from_string(data
)
993 zonegroup_meta_checkpoint(zonegroup
)
995 # try to read object from second zone (should fail)
996 bucket2
= get_bucket(zc2
, bucket_name
)
997 assert_raises(boto
.exception
.S3ResponseError
, bucket2
.get_key
, obj
)
999 set_redirect_zone(z2
, z1
)
1001 key2
= bucket2
.get_key(obj
)
1003 eq(data
, key2
.get_contents_as_string())
1005 key
= bucket
.new_key(obj
)
1007 for x
in ['a', 'b', 'c', 'd']:
1009 key
.set_contents_from_string(data
)
1010 eq(data
, key2
.get_contents_as_string())
1012 # revert config changes
1013 set_sync_from_all(z2
, True)
1014 set_redirect_zone(z2
, None)
1016 def test_zonegroup_remove():
1017 zonegroup
= realm
.master_zonegroup()
1018 zonegroup_conns
= ZonegroupConns(zonegroup
)
1019 if len(zonegroup
.zones
) < 2:
1020 raise SkipTest("test_zonegroup_remove skipped. Requires 2 or more zones in master zonegroup.")
1022 zonegroup_meta_checkpoint(zonegroup
)
1023 z1
, z2
= zonegroup
.zones
[0:2]
1024 c1
, c2
= (z1
.cluster
, z2
.cluster
)
1026 # get admin credentials out of existing zone
1027 system_key
= z1
.data
['system_key']
1028 admin_creds
= Credentials(system_key
['access_key'], system_key
['secret_key'])
1030 # create a new zone in zonegroup on c2 and commit
1031 zone
= Zone('remove', zonegroup
, c2
)
1032 zone
.create(c2
, admin_creds
.credential_args())
1033 zonegroup
.zones
.append(zone
)
1034 zonegroup
.period
.update(zone
, commit
=True)
1036 zonegroup
.remove(c1
, zone
)
1038 # another 'zonegroup remove' should fail with ENOENT
1039 _
, retcode
= zonegroup
.remove(c1
, zone
, check_retcode
=False)
1040 assert(retcode
== 2) # ENOENT
1042 # delete the new zone
1045 # validate the resulting period
1046 zonegroup
.period
.update(z1
, commit
=True)
1049 def test_zg_master_zone_delete():
1051 master_zg
= realm
.master_zonegroup()
1052 master_zone
= master_zg
.master_zone
1054 assert(len(master_zg
.zones
) >= 1)
1055 master_cluster
= master_zg
.zones
[0].cluster
1057 rm_zg
= ZoneGroup('remove_zg')
1058 rm_zg
.create(master_cluster
)
1060 rm_zone
= Zone('remove', rm_zg
, master_cluster
)
1061 rm_zone
.create(master_cluster
)
1062 master_zg
.period
.update(master_zone
, commit
=True)
1065 rm_zone
.delete(master_cluster
)
1066 # Period update: This should now fail as the zone will be the master zone
1068 _
, retcode
= master_zg
.period
.update(master_zone
, check_retcode
=False)
1069 assert(retcode
== errno
.EINVAL
)
1071 # Proceed to delete the zonegroup as well, previous period now does not
1072 # contain a dangling master_zone, this must succeed
1073 rm_zg
.delete(master_cluster
)
1074 master_zg
.period
.update(master_zone
, commit
=True)
1076 def test_set_bucket_website():
1077 buckets
, zone_bucket
= create_bucket_per_zone_in_realm()
1078 for _
, bucket
in zone_bucket
:
1079 website_cfg
= WebsiteConfiguration(suffix
='index.html',error_key
='error.html')
1081 bucket
.set_website_configuration(website_cfg
)
1082 except boto
.exception
.S3ResponseError
as e
:
1083 if e
.error_code
== 'MethodNotAllowed':
1084 raise SkipTest("test_set_bucket_website skipped. Requires rgw_enable_static_website = 1.")
1085 assert(bucket
.get_website_configuration_with_xml()[1] == website_cfg
.to_xml())
1087 def test_set_bucket_policy():
1089 "Version": "2012-10-17",
1095 buckets
, zone_bucket
= create_bucket_per_zone_in_realm()
1096 for _
, bucket
in zone_bucket
:
1097 bucket
.set_policy(policy
)
1098 assert(bucket
.get_policy() == policy
)
1100 def test_bucket_sync_disable():
1101 zonegroup
= realm
.master_zonegroup()
1102 zonegroup_conns
= ZonegroupConns(zonegroup
)
1103 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
1105 for bucket_name
in buckets
:
1106 disable_bucket_sync(realm
.meta_master_zone(), bucket_name
)
1108 for zone
in zonegroup
.zones
:
1109 check_buckets_sync_status_obj_not_exist(zone
, buckets
)
1111 zonegroup_data_checkpoint(zonegroup_conns
)
1113 def test_bucket_sync_enable_right_after_disable():
1114 zonegroup
= realm
.master_zonegroup()
1115 zonegroup_conns
= ZonegroupConns(zonegroup
)
1116 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
1118 objnames
= ['obj1', 'obj2', 'obj3', 'obj4']
1121 for zone
, bucket
in zone_bucket
:
1122 for objname
in objnames
:
1123 k
= new_key(zone
, bucket
.name
, objname
)
1124 k
.set_contents_from_string(content
)
1126 for bucket_name
in buckets
:
1127 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket_name
)
1129 for bucket_name
in buckets
:
1130 disable_bucket_sync(realm
.meta_master_zone(), bucket_name
)
1131 enable_bucket_sync(realm
.meta_master_zone(), bucket_name
)
1133 objnames_2
= ['obj5', 'obj6', 'obj7', 'obj8']
1135 for zone
, bucket
in zone_bucket
:
1136 for objname
in objnames_2
:
1137 k
= new_key(zone
, bucket
.name
, objname
)
1138 k
.set_contents_from_string(content
)
1140 for bucket_name
in buckets
:
1141 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket_name
)
1143 zonegroup_data_checkpoint(zonegroup_conns
)
1145 def test_bucket_sync_disable_enable():
1146 zonegroup
= realm
.master_zonegroup()
1147 zonegroup_conns
= ZonegroupConns(zonegroup
)
1148 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
1150 objnames
= [ 'obj1', 'obj2', 'obj3', 'obj4' ]
1153 for zone
, bucket
in zone_bucket
:
1154 for objname
in objnames
:
1155 k
= new_key(zone
, bucket
.name
, objname
)
1156 k
.set_contents_from_string(content
)
1158 zonegroup_meta_checkpoint(zonegroup
)
1160 for bucket_name
in buckets
:
1161 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket_name
)
1163 for bucket_name
in buckets
:
1164 disable_bucket_sync(realm
.meta_master_zone(), bucket_name
)
1166 zonegroup_meta_checkpoint(zonegroup
)
1168 objnames_2
= [ 'obj5', 'obj6', 'obj7', 'obj8' ]
1170 for zone
, bucket
in zone_bucket
:
1171 for objname
in objnames_2
:
1172 k
= new_key(zone
, bucket
.name
, objname
)
1173 k
.set_contents_from_string(content
)
1175 for bucket_name
in buckets
:
1176 enable_bucket_sync(realm
.meta_master_zone(), bucket_name
)
1178 for bucket_name
in buckets
:
1179 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket_name
)
1181 zonegroup_data_checkpoint(zonegroup_conns
)
1183 def test_multipart_object_sync():
1184 zonegroup
= realm
.master_zonegroup()
1185 zonegroup_conns
= ZonegroupConns(zonegroup
)
1186 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
1188 _
, bucket
= zone_bucket
[0]
1190 # initiate a multipart upload
1191 upload
= bucket
.initiate_multipart_upload('MULTIPART')
1192 mp
= boto
.s3
.multipart
.MultiPartUpload(bucket
)
1193 mp
.key_name
= upload
.key_name
1195 part_size
= 5 * 1024 * 1024 # 5M min part size
1196 mp
.upload_part_from_file(StringIO('a' * part_size
), 1)
1197 mp
.upload_part_from_file(StringIO('b' * part_size
), 2)
1198 mp
.upload_part_from_file(StringIO('c' * part_size
), 3)
1199 mp
.upload_part_from_file(StringIO('d' * part_size
), 4)
1200 mp
.complete_upload()
1202 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket
.name
)
1204 def test_encrypted_object_sync():
1205 zonegroup
= realm
.master_zonegroup()
1206 zonegroup_conns
= ZonegroupConns(zonegroup
)
1208 if len(zonegroup
.rw_zones
) < 2:
1209 raise SkipTest("test_zonegroup_remove skipped. Requires 2 or more zones in master zonegroup.")
1211 (zone1
, zone2
) = zonegroup_conns
.rw_zones
[0:2]
1213 # create a bucket on the first zone
1214 bucket_name
= gen_bucket_name()
1215 log
.info('create bucket zone=%s name=%s', zone1
.name
, bucket_name
)
1216 bucket
= zone1
.conn
.create_bucket(bucket_name
)
1218 # upload an object with sse-c encryption
1220 'x-amz-server-side-encryption-customer-algorithm': 'AES256',
1221 'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
1222 'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
1224 key
= bucket
.new_key('testobj-sse-c')
1226 key
.set_contents_from_string(data
, headers
=sse_c_headers
)
1228 # upload an object with sse-kms encryption
1230 'x-amz-server-side-encryption': 'aws:kms',
1231 # testkey-1 must be present in 'rgw crypt s3 kms encryption keys' (vstart.sh adds this)
1232 'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-1',
1234 key
= bucket
.new_key('testobj-sse-kms')
1235 key
.set_contents_from_string(data
, headers
=sse_kms_headers
)
1237 # wait for the bucket metadata and data to sync
1238 zonegroup_meta_checkpoint(zonegroup
)
1239 zone_bucket_checkpoint(zone2
.zone
, zone1
.zone
, bucket_name
)
1241 # read the encrypted objects from the second zone
1242 bucket2
= get_bucket(zone2
, bucket_name
)
1243 key
= bucket2
.get_key('testobj-sse-c', headers
=sse_c_headers
)
1244 eq(data
, key
.get_contents_as_string(headers
=sse_c_headers
))
1246 key
= bucket2
.get_key('testobj-sse-kms')
1247 eq(data
, key
.get_contents_as_string())
1249 def test_bucket_index_log_trim():
1250 zonegroup
= realm
.master_zonegroup()
1251 zonegroup_conns
= ZonegroupConns(zonegroup
)
1253 zone
= zonegroup_conns
.rw_zones
[0]
1255 # create a test bucket, upload some objects, and wait for sync
1256 def make_test_bucket():
1257 name
= gen_bucket_name()
1258 log
.info('create bucket zone=%s name=%s', zone
.name
, name
)
1259 bucket
= zone
.conn
.create_bucket(name
)
1260 for objname
in ('a', 'b', 'c', 'd'):
1261 k
= new_key(zone
, name
, objname
)
1262 k
.set_contents_from_string('foo')
1263 zonegroup_meta_checkpoint(zonegroup
)
1264 zonegroup_bucket_checkpoint(zonegroup_conns
, name
)
1267 # create a 'cold' bucket
1268 cold_bucket
= make_test_bucket()
1270 # trim with max-buckets=0 to clear counters for cold bucket. this should
1271 # prevent it from being considered 'active' by the next autotrim
1272 bilog_autotrim(zone
.zone
, [
1273 '--rgw-sync-log-trim-max-buckets', '0',
1276 # create an 'active' bucket
1277 active_bucket
= make_test_bucket()
1279 # trim with max-buckets=1 min-cold-buckets=0 to trim active bucket only
1280 bilog_autotrim(zone
.zone
, [
1281 '--rgw-sync-log-trim-max-buckets', '1',
1282 '--rgw-sync-log-trim-min-cold-buckets', '0',
1285 # verify active bucket has empty bilog
1286 active_bilog
= bilog_list(zone
.zone
, active_bucket
.name
)
1287 assert(len(active_bilog
) == 0)
1289 # verify cold bucket has nonempty bilog
1290 cold_bilog
= bilog_list(zone
.zone
, cold_bucket
.name
)
1291 assert(len(cold_bilog
) > 0)
1293 # trim with min-cold-buckets=999 to trim all buckets
1294 bilog_autotrim(zone
.zone
, [
1295 '--rgw-sync-log-trim-max-buckets', '999',
1296 '--rgw-sync-log-trim-min-cold-buckets', '999',
1299 # verify cold bucket has empty bilog
1300 cold_bilog
= bilog_list(zone
.zone
, cold_bucket
.name
)
1301 assert(len(cold_bilog
) == 0)
1303 def test_bucket_creation_time():
1304 zonegroup
= realm
.master_zonegroup()
1305 zonegroup_conns
= ZonegroupConns(zonegroup
)
1307 zone_buckets
= [zone
.get_connection().get_all_buckets() for zone
in zonegroup_conns
.rw_zones
]
1308 for z1
, z2
in combinations(zone_buckets
, 2):
1309 for a
, b
in zip(z1
, z2
):
1311 eq(a
.creation_date
, b
.creation_date
)