]>
git.proxmox.com Git - ceph.git/blob - ceph/src/test/rgw/rgw_multi/tests.py
a30993ea50c5fa015208b21927112342422bad99
10 from itertools
import combinations
11 from io
import StringIO
14 import boto
.s3
.connection
15 from boto
.s3
.website
import WebsiteConfiguration
16 from boto
.s3
.cors
import CORSConfiguration
18 from nose
.tools
import eq_
as eq
19 from nose
.plugins
.attrib
import attr
20 from nose
.plugins
.skip
import SkipTest
22 from .multisite
import Zone
, ZoneGroup
, Credentials
24 from .conn
import get_gateway_connection
25 from .tools
import assert_raises
28 """ test configuration """
29 def __init__(self
, **kwargs
):
30 # by default, wait up to 5 minutes before giving up on a sync checkpoint
31 self
.checkpoint_retries
= kwargs
.get('checkpoint_retries', 60)
32 self
.checkpoint_delay
= kwargs
.get('checkpoint_delay', 5)
33 # allow some time for realm reconfiguration after changing master zone
34 self
.reconfigure_delay
= kwargs
.get('reconfigure_delay', 5)
35 self
.tenant
= kwargs
.get('tenant', '')
37 # rgw multisite tests, written against the interfaces provided in rgw_multi.
38 # these tests must be initialized and run by another module that provides
39 # implementations of these interfaces by calling init_multi()
43 def init_multi(_realm
, _user
, _config
=None):
49 config
= _config
or Config()
50 realm_meta_checkpoint(realm
)
53 return user
.id if user
is not None else ''
56 return config
.tenant
if config
is not None and config
.tenant
is not None else ''
61 log
= logging
.getLogger('rgw_multi.tests')
64 run_prefix
=''.join(random
.choice(string
.ascii_lowercase
) for _
in range(6))
66 def get_zone_connection(zone
, credentials
):
67 """ connect to the zone's first gateway """
68 if isinstance(credentials
, list):
69 credentials
= credentials
[0]
70 return get_gateway_connection(zone
.gateways
[0], credentials
)
72 def mdlog_list(zone
, period
= None):
73 cmd
= ['mdlog', 'list']
75 cmd
+= ['--period', period
]
76 (mdlog_json
, _
) = zone
.cluster
.admin(cmd
, read_only
=True)
77 return json
.loads(mdlog_json
)
79 def mdlog_autotrim(zone
):
80 zone
.cluster
.admin(['mdlog', 'autotrim'])
82 def datalog_list(zone
, args
= None):
83 cmd
= ['datalog', 'list'] + (args
or [])
84 (datalog_json
, _
) = zone
.cluster
.admin(cmd
, read_only
=True)
85 return json
.loads(datalog_json
)
87 def datalog_status(zone
):
88 cmd
= ['datalog', 'status']
89 (datalog_json
, _
) = zone
.cluster
.admin(cmd
, read_only
=True)
90 return json
.loads(datalog_json
)
92 def datalog_autotrim(zone
):
93 zone
.cluster
.admin(['datalog', 'autotrim'])
95 def bilog_list(zone
, bucket
, args
= None):
96 cmd
= ['bilog', 'list', '--bucket', bucket
] + (args
or [])
97 cmd
+= ['--tenant', config
.tenant
, '--uid', user
.name
] if config
.tenant
else []
98 bilog
, _
= zone
.cluster
.admin(cmd
, read_only
=True)
99 return json
.loads(bilog
)
101 def bilog_autotrim(zone
, args
= None):
102 zone
.cluster
.admin(['bilog', 'autotrim'] + (args
or []))
104 def parse_meta_sync_status(meta_sync_status_json
):
105 log
.debug('current meta sync status=%s', meta_sync_status_json
)
106 sync_status
= json
.loads(meta_sync_status_json
)
108 sync_info
= sync_status
['sync_status']['info']
109 global_sync_status
= sync_info
['status']
110 num_shards
= sync_info
['num_shards']
111 period
= sync_info
['period']
112 realm_epoch
= sync_info
['realm_epoch']
114 sync_markers
=sync_status
['sync_status']['markers']
115 log
.debug('sync_markers=%s', sync_markers
)
116 assert(num_shards
== len(sync_markers
))
119 for i
in range(num_shards
):
120 # get marker, only if it's an incremental marker for the same realm epoch
121 if realm_epoch
> sync_markers
[i
]['val']['realm_epoch'] or sync_markers
[i
]['val']['state'] == 0:
124 markers
[i
] = sync_markers
[i
]['val']['marker']
126 return period
, realm_epoch
, num_shards
, markers
128 def meta_sync_status(zone
):
129 for _
in range(config
.checkpoint_retries
):
130 cmd
= ['metadata', 'sync', 'status'] + zone
.zone_args()
131 meta_sync_status_json
, retcode
= zone
.cluster
.admin(cmd
, check_retcode
=False, read_only
=True)
133 return parse_meta_sync_status(meta_sync_status_json
)
134 assert(retcode
== 2) # ENOENT
135 time
.sleep(config
.checkpoint_delay
)
137 assert False, 'failed to read metadata sync status for zone=%s' % zone
.name
139 def meta_master_log_status(master_zone
):
140 cmd
= ['mdlog', 'status'] + master_zone
.zone_args()
141 mdlog_status_json
, retcode
= master_zone
.cluster
.admin(cmd
, read_only
=True)
142 mdlog_status
= json
.loads(mdlog_status_json
)
144 markers
= {i
: s
['marker'] for i
, s
in enumerate(mdlog_status
)}
145 log
.debug('master meta markers=%s', markers
)
148 def compare_meta_status(zone
, log_status
, sync_status
):
149 if len(log_status
) != len(sync_status
):
150 log
.error('len(log_status)=%d, len(sync_status)=%d', len(log_status
), len(sync_status
))
154 for i
, l
, s
in zip(log_status
, log_status
.values(), sync_status
.values()):
158 msg
+= 'shard=' + str(i
) + ' master=' + l
+ ' target=' + s
161 log
.warning('zone %s behind master: %s', zone
.name
, msg
)
166 def zone_meta_checkpoint(zone
, meta_master_zone
= None, master_status
= None):
167 if not meta_master_zone
:
168 meta_master_zone
= zone
.realm().meta_master_zone()
169 if not master_status
:
170 master_status
= meta_master_log_status(meta_master_zone
)
172 current_realm_epoch
= realm
.current_period
.data
['realm_epoch']
174 log
.info('starting meta checkpoint for zone=%s', zone
.name
)
176 for _
in range(config
.checkpoint_retries
):
177 period
, realm_epoch
, num_shards
, sync_status
= meta_sync_status(zone
)
178 if realm_epoch
< current_realm_epoch
:
179 log
.warning('zone %s is syncing realm epoch=%d, behind current realm epoch=%d',
180 zone
.name
, realm_epoch
, current_realm_epoch
)
182 log
.debug('log_status=%s', master_status
)
183 log
.debug('sync_status=%s', sync_status
)
184 if compare_meta_status(zone
, master_status
, sync_status
):
185 log
.info('finish meta checkpoint for zone=%s', zone
.name
)
188 time
.sleep(config
.checkpoint_delay
)
189 assert False, 'failed meta checkpoint for zone=%s' % zone
.name
191 def zonegroup_meta_checkpoint(zonegroup
, meta_master_zone
= None, master_status
= None):
192 if not meta_master_zone
:
193 meta_master_zone
= zonegroup
.realm().meta_master_zone()
194 if not master_status
:
195 master_status
= meta_master_log_status(meta_master_zone
)
197 for zone
in zonegroup
.zones
:
198 if zone
== meta_master_zone
:
200 zone_meta_checkpoint(zone
, meta_master_zone
, master_status
)
202 def realm_meta_checkpoint(realm
):
203 log
.info('meta checkpoint')
205 meta_master_zone
= realm
.meta_master_zone()
206 master_status
= meta_master_log_status(meta_master_zone
)
208 for zonegroup
in realm
.current_period
.zonegroups
:
209 zonegroup_meta_checkpoint(zonegroup
, meta_master_zone
, master_status
)
211 def parse_data_sync_status(data_sync_status_json
):
212 log
.debug('current data sync status=%s', data_sync_status_json
)
213 sync_status
= json
.loads(data_sync_status_json
)
215 global_sync_status
=sync_status
['sync_status']['info']['status']
216 num_shards
=sync_status
['sync_status']['info']['num_shards']
218 sync_markers
=sync_status
['sync_status']['markers']
219 log
.debug('sync_markers=%s', sync_markers
)
220 assert(num_shards
== len(sync_markers
))
223 for i
in range(num_shards
):
224 markers
[i
] = sync_markers
[i
]['val']['marker']
226 return (num_shards
, markers
)
228 def data_sync_status(target_zone
, source_zone
):
229 if target_zone
== source_zone
:
232 for _
in range(config
.checkpoint_retries
):
233 cmd
= ['data', 'sync', 'status'] + target_zone
.zone_args()
234 cmd
+= ['--source-zone', source_zone
.name
]
235 data_sync_status_json
, retcode
= target_zone
.cluster
.admin(cmd
, check_retcode
=False, read_only
=True)
237 return parse_data_sync_status(data_sync_status_json
)
239 assert(retcode
== 2) # ENOENT
240 time
.sleep(config
.checkpoint_delay
)
242 assert False, 'failed to read data sync status for target_zone=%s source_zone=%s' % \
243 (target_zone
.name
, source_zone
.name
)
245 def bucket_sync_status(target_zone
, source_zone
, bucket_name
):
246 if target_zone
== source_zone
:
249 cmd
= ['bucket', 'sync', 'markers'] + target_zone
.zone_args()
250 cmd
+= ['--source-zone', source_zone
.name
]
251 cmd
+= ['--bucket', bucket_name
]
252 cmd
+= ['--tenant', config
.tenant
, '--uid', user
.name
] if config
.tenant
else []
254 bucket_sync_status_json
, retcode
= target_zone
.cluster
.admin(cmd
, check_retcode
=False, read_only
=True)
258 assert(retcode
== 2) # ENOENT
260 sync_status
= json
.loads(bucket_sync_status_json
)
263 for entry
in sync_status
:
265 pos
= val
['inc_marker']['position'].split('#')[-1] # get rid of shard id; e.g., 6#00000000002.132.3 -> 00000000002.132.3
266 markers
[entry
['key']] = pos
270 def data_source_log_status(source_zone
):
271 source_cluster
= source_zone
.cluster
272 cmd
= ['datalog', 'status'] + source_zone
.zone_args()
273 datalog_status_json
, retcode
= source_cluster
.admin(cmd
, read_only
=True)
274 datalog_status
= json
.loads(datalog_status_json
)
276 markers
= {i
: s
['marker'] for i
, s
in enumerate(datalog_status
)}
277 log
.debug('data markers for zone=%s markers=%s', source_zone
.name
, markers
)
280 def bucket_source_log_status(source_zone
, bucket_name
):
281 cmd
= ['bilog', 'status'] + source_zone
.zone_args()
282 cmd
+= ['--bucket', bucket_name
]
283 cmd
+= ['--tenant', config
.tenant
, '--uid', user
.name
] if config
.tenant
else []
284 source_cluster
= source_zone
.cluster
285 bilog_status_json
, retcode
= source_cluster
.admin(cmd
, read_only
=True)
286 bilog_status
= json
.loads(bilog_status_json
)
291 m
= bilog_status
['markers']
300 log
.debug('bilog markers for zone=%s bucket=%s markers=%s', source_zone
.name
, bucket_name
, markers
)
303 def compare_data_status(target_zone
, source_zone
, log_status
, sync_status
):
304 if len(log_status
) != len(sync_status
):
305 log
.error('len(log_status)=%d len(sync_status)=%d', len(log_status
), len(sync_status
))
309 for i
, l
, s
in zip(log_status
, log_status
.values(), sync_status
.values()):
313 msg
+= 'shard=' + str(i
) + ' master=' + l
+ ' target=' + s
316 log
.warning('data of zone %s behind zone %s: %s', target_zone
.name
, source_zone
.name
, msg
)
321 def compare_bucket_status(target_zone
, source_zone
, bucket_name
, log_status
, sync_status
):
322 if len(log_status
) != len(sync_status
):
323 log
.error('len(log_status)=%d len(sync_status)=%d', len(log_status
), len(sync_status
))
327 for i
, l
, s
in zip(log_status
, log_status
.values(), sync_status
.values()):
331 msg
+= 'shard=' + str(i
) + ' master=' + l
+ ' target=' + s
334 log
.warning('bucket %s zone %s behind zone %s: %s', bucket_name
, target_zone
.name
, source_zone
.name
, msg
)
339 def zone_data_checkpoint(target_zone
, source_zone
):
340 if not target_zone
.syncs_from(source_zone
.name
):
343 log_status
= data_source_log_status(source_zone
)
344 log
.info('starting data checkpoint for target_zone=%s source_zone=%s', target_zone
.name
, source_zone
.name
)
346 for _
in range(config
.checkpoint_retries
):
347 num_shards
, sync_status
= data_sync_status(target_zone
, source_zone
)
349 log
.debug('log_status=%s', log_status
)
350 log
.debug('sync_status=%s', sync_status
)
352 if compare_data_status(target_zone
, source_zone
, log_status
, sync_status
):
353 log
.info('finished data checkpoint for target_zone=%s source_zone=%s',
354 target_zone
.name
, source_zone
.name
)
356 time
.sleep(config
.checkpoint_delay
)
358 assert False, 'failed data checkpoint for target_zone=%s source_zone=%s' % \
359 (target_zone
.name
, source_zone
.name
)
361 def zonegroup_data_checkpoint(zonegroup_conns
):
362 for source_conn
in zonegroup_conns
.rw_zones
:
363 for target_conn
in zonegroup_conns
.zones
:
364 if source_conn
.zone
== target_conn
.zone
:
366 log
.debug('data checkpoint: source=%s target=%s', source_conn
.zone
.name
, target_conn
.zone
.name
)
367 zone_data_checkpoint(target_conn
.zone
, source_conn
.zone
)
369 def zone_bucket_checkpoint(target_zone
, source_zone
, bucket_name
):
370 if not target_zone
.syncs_from(source_zone
.name
):
373 cmd
= ['bucket', 'sync', 'checkpoint']
374 cmd
+= ['--bucket', bucket_name
, '--source-zone', source_zone
.name
]
375 retry_delay_ms
= config
.checkpoint_delay
* 1000
376 timeout_sec
= config
.checkpoint_retries
* config
.checkpoint_delay
377 cmd
+= ['--retry-delay-ms', str(retry_delay_ms
), '--timeout-sec', str(timeout_sec
)]
378 cmd
+= target_zone
.zone_args()
379 target_zone
.cluster
.admin(cmd
, debug_rgw
=1)
381 def zonegroup_bucket_checkpoint(zonegroup_conns
, bucket_name
):
382 for source_conn
in zonegroup_conns
.rw_zones
:
383 for target_conn
in zonegroup_conns
.zones
:
384 if source_conn
.zone
== target_conn
.zone
:
386 log
.debug('bucket checkpoint: source=%s target=%s bucket=%s', source_conn
.zone
.name
, target_conn
.zone
.name
, bucket_name
)
387 zone_bucket_checkpoint(target_conn
.zone
, source_conn
.zone
, bucket_name
)
388 for source_conn
, target_conn
in combinations(zonegroup_conns
.zones
, 2):
389 if target_conn
.zone
.has_buckets():
390 target_conn
.check_bucket_eq(source_conn
, bucket_name
)
392 def set_master_zone(zone
):
393 zone
.modify(zone
.cluster
, ['--master'])
394 zonegroup
= zone
.zonegroup
395 zonegroup
.period
.update(zone
, commit
=True)
396 zonegroup
.master_zone
= zone
397 log
.info('Set master zone=%s, waiting %ds for reconfiguration..', zone
.name
, config
.reconfigure_delay
)
398 time
.sleep(config
.reconfigure_delay
)
400 def set_sync_from_all(zone
, flag
):
401 s
= 'true' if flag
else 'false'
402 zone
.modify(zone
.cluster
, ['--sync-from-all={}'.format(s
)])
403 zonegroup
= zone
.zonegroup
404 zonegroup
.period
.update(zone
, commit
=True)
405 log
.info('Set sync_from_all flag on zone %s to %s', zone
.name
, s
)
406 time
.sleep(config
.reconfigure_delay
)
408 def set_redirect_zone(zone
, redirect_zone
):
409 id_str
= redirect_zone
.id if redirect_zone
else ''
410 zone
.modify(zone
.cluster
, ['--redirect-zone={}'.format(id_str
)])
411 zonegroup
= zone
.zonegroup
412 zonegroup
.period
.update(zone
, commit
=True)
413 log
.info('Set redirect_zone zone %s to "%s"', zone
.name
, id_str
)
414 time
.sleep(config
.reconfigure_delay
)
416 def enable_bucket_sync(zone
, bucket_name
):
417 cmd
= ['bucket', 'sync', 'enable', '--bucket', bucket_name
] + zone
.zone_args()
418 zone
.cluster
.admin(cmd
)
420 def disable_bucket_sync(zone
, bucket_name
):
421 cmd
= ['bucket', 'sync', 'disable', '--bucket', bucket_name
] + zone
.zone_args()
422 zone
.cluster
.admin(cmd
)
424 def check_buckets_sync_status_obj_not_exist(zone
, buckets
):
425 for _
in range(config
.checkpoint_retries
):
426 cmd
= ['log', 'list'] + zone
.zone_arg()
427 log_list
, ret
= zone
.cluster
.admin(cmd
, check_retcode
=False, read_only
=True)
428 for bucket
in buckets
:
429 if log_list
.find(':'+bucket
+":") >= 0:
433 time
.sleep(config
.checkpoint_delay
)
436 def gen_bucket_name():
440 return run_prefix
+ '-' + str(num_buckets
)
442 class ZonegroupConns
:
443 def __init__(self
, zonegroup
):
444 self
.zonegroup
= zonegroup
448 self
.master_zone
= None
449 for z
in zonegroup
.zones
:
450 zone_conn
= z
.get_conn(user
.credentials
)
451 self
.zones
.append(zone_conn
)
453 self
.ro_zones
.append(zone_conn
)
455 self
.rw_zones
.append(zone_conn
)
457 if z
== zonegroup
.master_zone
:
458 self
.master_zone
= zone_conn
460 def check_all_buckets_exist(zone_conn
, buckets
):
461 if not zone_conn
.zone
.has_buckets():
466 zone_conn
.get_bucket(b
)
468 log
.critical('zone %s does not contain bucket %s', zone
.name
, b
)
473 def check_all_buckets_dont_exist(zone_conn
, buckets
):
474 if not zone_conn
.zone
.has_buckets():
479 zone_conn
.get_bucket(b
)
483 log
.critical('zone %s contains bucket %s', zone
.zone
, b
)
488 def create_bucket_per_zone(zonegroup_conns
, buckets_per_zone
= 1):
491 for zone
in zonegroup_conns
.rw_zones
:
492 for i
in range(buckets_per_zone
):
493 bucket_name
= gen_bucket_name()
494 log
.info('create bucket zone=%s name=%s', zone
.name
, bucket_name
)
495 bucket
= zone
.create_bucket(bucket_name
)
496 buckets
.append(bucket_name
)
497 zone_bucket
.append((zone
, bucket
))
499 return buckets
, zone_bucket
501 def create_bucket_per_zone_in_realm():
504 for zonegroup
in realm
.current_period
.zonegroups
:
505 zg_conn
= ZonegroupConns(zonegroup
)
506 b
, z
= create_bucket_per_zone(zg_conn
)
508 zone_bucket
.extend(z
)
509 return buckets
, zone_bucket
511 def test_bucket_create():
512 zonegroup
= realm
.master_zonegroup()
513 zonegroup_conns
= ZonegroupConns(zonegroup
)
514 buckets
, _
= create_bucket_per_zone(zonegroup_conns
)
515 zonegroup_meta_checkpoint(zonegroup
)
517 for zone
in zonegroup_conns
.zones
:
518 assert check_all_buckets_exist(zone
, buckets
)
520 def test_bucket_recreate():
521 zonegroup
= realm
.master_zonegroup()
522 zonegroup_conns
= ZonegroupConns(zonegroup
)
523 buckets
, _
= create_bucket_per_zone(zonegroup_conns
)
524 zonegroup_meta_checkpoint(zonegroup
)
527 for zone
in zonegroup_conns
.zones
:
528 assert check_all_buckets_exist(zone
, buckets
)
530 # recreate buckets on all zones, make sure they weren't removed
531 for zone
in zonegroup_conns
.rw_zones
:
532 for bucket_name
in buckets
:
533 bucket
= zone
.create_bucket(bucket_name
)
535 for zone
in zonegroup_conns
.zones
:
536 assert check_all_buckets_exist(zone
, buckets
)
538 zonegroup_meta_checkpoint(zonegroup
)
540 for zone
in zonegroup_conns
.zones
:
541 assert check_all_buckets_exist(zone
, buckets
)
543 def test_bucket_remove():
544 zonegroup
= realm
.master_zonegroup()
545 zonegroup_conns
= ZonegroupConns(zonegroup
)
546 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
547 zonegroup_meta_checkpoint(zonegroup
)
549 for zone
in zonegroup_conns
.zones
:
550 assert check_all_buckets_exist(zone
, buckets
)
552 for zone
, bucket_name
in zone_bucket
:
553 zone
.conn
.delete_bucket(bucket_name
)
555 zonegroup_meta_checkpoint(zonegroup
)
557 for zone
in zonegroup_conns
.zones
:
558 assert check_all_buckets_dont_exist(zone
, buckets
)
560 def get_bucket(zone
, bucket_name
):
561 return zone
.conn
.get_bucket(bucket_name
)
563 def get_key(zone
, bucket_name
, obj_name
):
564 b
= get_bucket(zone
, bucket_name
)
565 return b
.get_key(obj_name
)
567 def new_key(zone
, bucket_name
, obj_name
):
568 b
= get_bucket(zone
, bucket_name
)
569 return b
.new_key(obj_name
)
571 def check_bucket_eq(zone_conn1
, zone_conn2
, bucket
):
572 if zone_conn2
.zone
.has_buckets():
573 zone_conn2
.check_bucket_eq(zone_conn1
, bucket
.name
)
575 def test_object_sync():
576 zonegroup
= realm
.master_zonegroup()
577 zonegroup_conns
= ZonegroupConns(zonegroup
)
578 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
580 objnames
= [ 'myobj', '_myobj', ':', '&' ]
583 # don't wait for meta sync just yet
584 for zone
, bucket_name
in zone_bucket
:
585 for objname
in objnames
:
586 k
= new_key(zone
, bucket_name
, objname
)
587 k
.set_contents_from_string(content
)
589 zonegroup_meta_checkpoint(zonegroup
)
591 for source_conn
, bucket
in zone_bucket
:
592 for target_conn
in zonegroup_conns
.zones
:
593 if source_conn
.zone
== target_conn
.zone
:
596 zone_bucket_checkpoint(target_conn
.zone
, source_conn
.zone
, bucket
.name
)
597 check_bucket_eq(source_conn
, target_conn
, bucket
)
599 def test_object_delete():
600 zonegroup
= realm
.master_zonegroup()
601 zonegroup_conns
= ZonegroupConns(zonegroup
)
602 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
607 # don't wait for meta sync just yet
608 for zone
, bucket
in zone_bucket
:
609 k
= new_key(zone
, bucket
, objname
)
610 k
.set_contents_from_string(content
)
612 zonegroup_meta_checkpoint(zonegroup
)
614 # check object exists
615 for source_conn
, bucket
in zone_bucket
:
616 for target_conn
in zonegroup_conns
.zones
:
617 if source_conn
.zone
== target_conn
.zone
:
620 zone_bucket_checkpoint(target_conn
.zone
, source_conn
.zone
, bucket
.name
)
621 check_bucket_eq(source_conn
, target_conn
, bucket
)
623 # check object removal
624 for source_conn
, bucket
in zone_bucket
:
625 k
= get_key(source_conn
, bucket
, objname
)
627 for target_conn
in zonegroup_conns
.zones
:
628 if source_conn
.zone
== target_conn
.zone
:
631 zone_bucket_checkpoint(target_conn
.zone
, source_conn
.zone
, bucket
.name
)
632 check_bucket_eq(source_conn
, target_conn
, bucket
)
634 def get_latest_object_version(key
):
635 for k
in key
.bucket
.list_versions(key
.name
):
640 def test_versioned_object_incremental_sync():
641 zonegroup
= realm
.master_zonegroup()
642 zonegroup_conns
= ZonegroupConns(zonegroup
)
643 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
646 for _
, bucket
in zone_bucket
:
647 bucket
.configure_versioning(True)
649 zonegroup_meta_checkpoint(zonegroup
)
651 # upload a dummy object to each bucket and wait for sync. this forces each
652 # bucket to finish a full sync and switch to incremental
653 for source_conn
, bucket
in zone_bucket
:
654 new_key(source_conn
, bucket
, 'dummy').set_contents_from_string('')
655 for target_conn
in zonegroup_conns
.zones
:
656 if source_conn
.zone
== target_conn
.zone
:
658 zone_bucket_checkpoint(target_conn
.zone
, source_conn
.zone
, bucket
.name
)
660 for _
, bucket
in zone_bucket
:
661 # create and delete multiple versions of an object from each zone
662 for zone_conn
in zonegroup_conns
.rw_zones
:
663 obj
= 'obj-' + zone_conn
.name
664 k
= new_key(zone_conn
, bucket
, obj
)
666 k
.set_contents_from_string('version1')
667 log
.debug('version1 id=%s', k
.version_id
)
668 # don't delete version1 - this tests that the initial version
669 # doesn't get squashed into later versions
671 # create and delete the following object versions to test that
672 # the operations don't race with each other during sync
673 k
.set_contents_from_string('version2')
674 log
.debug('version2 id=%s', k
.version_id
)
675 k
.bucket
.delete_key(obj
, version_id
=k
.version_id
)
677 k
.set_contents_from_string('version3')
678 log
.debug('version3 id=%s', k
.version_id
)
679 k
.bucket
.delete_key(obj
, version_id
=k
.version_id
)
681 for _
, bucket
in zone_bucket
:
682 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket
.name
)
684 for _
, bucket
in zone_bucket
:
685 # overwrite the acls to test that metadata-only entries are applied
686 for zone_conn
in zonegroup_conns
.rw_zones
:
687 obj
= 'obj-' + zone_conn
.name
688 k
= new_key(zone_conn
, bucket
.name
, obj
)
689 v
= get_latest_object_version(k
)
692 for _
, bucket
in zone_bucket
:
693 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket
.name
)
695 def test_concurrent_versioned_object_incremental_sync():
696 zonegroup
= realm
.master_zonegroup()
697 zonegroup_conns
= ZonegroupConns(zonegroup
)
698 zone
= zonegroup_conns
.rw_zones
[0]
700 # create a versioned bucket
701 bucket
= zone
.create_bucket(gen_bucket_name())
702 log
.debug('created bucket=%s', bucket
.name
)
703 bucket
.configure_versioning(True)
705 zonegroup_meta_checkpoint(zonegroup
)
707 # upload a dummy object and wait for sync. this forces each zone to finish
708 # a full sync and switch to incremental
709 new_key(zone
, bucket
, 'dummy').set_contents_from_string('')
710 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket
.name
)
712 # create several concurrent versions on each zone and let them race to sync
715 for zone_conn
in zonegroup_conns
.rw_zones
:
716 k
= new_key(zone_conn
, bucket
, obj
)
717 k
.set_contents_from_string('version1')
718 log
.debug('zone=%s version=%s', zone_conn
.zone
.name
, k
.version_id
)
720 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket
.name
)
721 zonegroup_data_checkpoint(zonegroup_conns
)
723 def test_version_suspended_incremental_sync():
724 zonegroup
= realm
.master_zonegroup()
725 zonegroup_conns
= ZonegroupConns(zonegroup
)
727 zone
= zonegroup_conns
.rw_zones
[0]
729 # create a non-versioned bucket
730 bucket
= zone
.create_bucket(gen_bucket_name())
731 log
.debug('created bucket=%s', bucket
.name
)
732 zonegroup_meta_checkpoint(zonegroup
)
734 # upload an initial object
735 key1
= new_key(zone
, bucket
, 'obj')
736 key1
.set_contents_from_string('')
737 log
.debug('created initial version id=%s', key1
.version_id
)
738 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket
.name
)
741 bucket
.configure_versioning(True)
742 zonegroup_meta_checkpoint(zonegroup
)
744 # re-upload the object as a new version
745 key2
= new_key(zone
, bucket
, 'obj')
746 key2
.set_contents_from_string('')
747 log
.debug('created new version id=%s', key2
.version_id
)
748 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket
.name
)
751 bucket
.configure_versioning(False)
752 zonegroup_meta_checkpoint(zonegroup
)
754 # re-upload the object as a 'null' version
755 key3
= new_key(zone
, bucket
, 'obj')
756 key3
.set_contents_from_string('')
757 log
.debug('created null version id=%s', key3
.version_id
)
758 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket
.name
)
760 def test_delete_marker_full_sync():
761 zonegroup
= realm
.master_zonegroup()
762 zonegroup_conns
= ZonegroupConns(zonegroup
)
763 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
766 for _
, bucket
in zone_bucket
:
767 bucket
.configure_versioning(True)
768 zonegroup_meta_checkpoint(zonegroup
)
770 for zone
, bucket
in zone_bucket
:
771 # upload an initial object
772 key1
= new_key(zone
, bucket
, 'obj')
773 key1
.set_contents_from_string('')
775 # create a delete marker
776 key2
= new_key(zone
, bucket
, 'obj')
780 for _
, bucket
in zone_bucket
:
781 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket
.name
)
783 def test_suspended_delete_marker_full_sync():
784 zonegroup
= realm
.master_zonegroup()
785 zonegroup_conns
= ZonegroupConns(zonegroup
)
786 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
788 # enable/suspend versioning
789 for _
, bucket
in zone_bucket
:
790 bucket
.configure_versioning(True)
791 bucket
.configure_versioning(False)
792 zonegroup_meta_checkpoint(zonegroup
)
794 for zone
, bucket
in zone_bucket
:
795 # upload an initial object
796 key1
= new_key(zone
, bucket
, 'obj')
797 key1
.set_contents_from_string('')
799 # create a delete marker
800 key2
= new_key(zone
, bucket
, 'obj')
804 for _
, bucket
in zone_bucket
:
805 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket
.name
)
807 def test_bucket_versioning():
808 buckets
, zone_bucket
= create_bucket_per_zone_in_realm()
809 for _
, bucket
in zone_bucket
:
810 bucket
.configure_versioning(True)
811 res
= bucket
.get_versioning_status()
813 assert(key
in res
and res
[key
] == 'Enabled')
815 def test_bucket_acl():
816 buckets
, zone_bucket
= create_bucket_per_zone_in_realm()
817 for _
, bucket
in zone_bucket
:
818 assert(len(bucket
.get_acl().acl
.grants
) == 1) # single grant on owner
819 bucket
.set_acl('public-read')
820 assert(len(bucket
.get_acl().acl
.grants
) == 2) # new grant on AllUsers
822 def test_bucket_cors():
823 buckets
, zone_bucket
= create_bucket_per_zone_in_realm()
824 for _
, bucket
in zone_bucket
:
825 cors_cfg
= CORSConfiguration()
826 cors_cfg
.add_rule(['DELETE'], 'https://www.example.com', allowed_header
='*', max_age_seconds
=3000)
827 bucket
.set_cors(cors_cfg
)
828 assert(bucket
.get_cors().to_xml() == cors_cfg
.to_xml())
830 def test_bucket_delete_notempty():
831 zonegroup
= realm
.master_zonegroup()
832 zonegroup_conns
= ZonegroupConns(zonegroup
)
833 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
834 zonegroup_meta_checkpoint(zonegroup
)
836 for zone_conn
, bucket_name
in zone_bucket
:
837 # upload an object to each bucket on its own zone
838 conn
= zone_conn
.get_connection()
839 bucket
= conn
.get_bucket(bucket_name
)
840 k
= bucket
.new_key('foo')
841 k
.set_contents_from_string('bar')
842 # attempt to delete the bucket before this object can sync
844 conn
.delete_bucket(bucket_name
)
845 except boto
.exception
.S3ResponseError
as e
:
846 assert(e
.error_code
== 'BucketNotEmpty')
848 assert False # expected 409 BucketNotEmpty
850 # assert that each bucket still exists on the master
851 c1
= zonegroup_conns
.master_zone
.conn
852 for _
, bucket_name
in zone_bucket
:
853 assert c1
.get_bucket(bucket_name
)
855 def test_multi_period_incremental_sync():
856 zonegroup
= realm
.master_zonegroup()
857 if len(zonegroup
.zones
) < 3:
858 raise SkipTest("test_multi_period_incremental_sync skipped. Requires 3 or more zones in master zonegroup.")
860 # periods to include in mdlog comparison
861 mdlog_periods
= [realm
.current_period
.id]
863 # create a bucket in each zone
864 zonegroup_conns
= ZonegroupConns(zonegroup
)
865 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
867 zonegroup_meta_checkpoint(zonegroup
)
869 z1
, z2
, z3
= zonegroup
.zones
[0:3]
870 assert(z1
== zonegroup
.master_zone
)
872 # kill zone 3 gateways to freeze sync status to incremental in first period
875 # change master to zone 2 -> period 2
877 mdlog_periods
+= [realm
.current_period
.id]
879 for zone_conn
, _
in zone_bucket
:
880 if zone_conn
.zone
== z3
:
882 bucket_name
= gen_bucket_name()
883 log
.info('create bucket zone=%s name=%s', zone_conn
.name
, bucket_name
)
884 bucket
= zone_conn
.conn
.create_bucket(bucket_name
)
885 buckets
.append(bucket_name
)
887 # wait for zone 1 to sync
888 zone_meta_checkpoint(z1
)
890 # change master back to zone 1 -> period 3
892 mdlog_periods
+= [realm
.current_period
.id]
894 for zone_conn
, bucket_name
in zone_bucket
:
895 if zone_conn
.zone
== z3
:
897 bucket_name
= gen_bucket_name()
898 log
.info('create bucket zone=%s name=%s', zone_conn
.name
, bucket_name
)
899 zone_conn
.conn
.create_bucket(bucket_name
)
900 buckets
.append(bucket_name
)
902 # restart zone 3 gateway and wait for sync
904 zonegroup_meta_checkpoint(zonegroup
)
906 # verify that we end up with the same objects
907 for bucket_name
in buckets
:
908 for source_conn
, _
in zone_bucket
:
909 for target_conn
in zonegroup_conns
.zones
:
910 if source_conn
.zone
== target_conn
.zone
:
913 if target_conn
.zone
.has_buckets():
914 target_conn
.check_bucket_eq(source_conn
, bucket_name
)
916 # verify that mdlogs are not empty and match for each period
917 for period
in mdlog_periods
:
918 master_mdlog
= mdlog_list(z1
, period
)
919 assert len(master_mdlog
) > 0
920 for zone
in zonegroup
.zones
:
923 mdlog
= mdlog_list(zone
, period
)
924 assert len(mdlog
) == len(master_mdlog
)
926 # autotrim mdlogs for master zone
929 # autotrim mdlogs for peers
930 for zone
in zonegroup
.zones
:
935 # verify that mdlogs are empty for each period
936 for period
in mdlog_periods
:
937 for zone
in zonegroup
.zones
:
938 mdlog
= mdlog_list(zone
, period
)
939 assert len(mdlog
) == 0
941 def test_datalog_autotrim():
942 zonegroup
= realm
.master_zonegroup()
943 zonegroup_conns
= ZonegroupConns(zonegroup
)
944 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
946 # upload an object to each zone to generate a datalog entry
947 for zone
, bucket
in zone_bucket
:
948 k
= new_key(zone
, bucket
.name
, 'key')
949 k
.set_contents_from_string('body')
951 # wait for metadata and data sync to catch up
952 zonegroup_meta_checkpoint(zonegroup
)
953 zonegroup_data_checkpoint(zonegroup_conns
)
956 for zone
, _
in zone_bucket
:
957 # read max markers for each shard
958 status
= datalog_status(zone
.zone
)
960 datalog_autotrim(zone
.zone
)
962 for shard_id
, shard_status
in enumerate(status
):
964 before_trim
= dateutil
.parser
.isoparse(shard_status
['last_update'])
965 except: # empty timestamps look like "0.000000" and will fail here
967 entries
= datalog_list(zone
.zone
, ['--shard-id', str(shard_id
), '--max-entries', '1'])
970 after_trim
= dateutil
.parser
.isoparse(entries
[0]['timestamp'])
971 assert before_trim
< after_trim
, "any datalog entries must be newer than trim"
973 def test_multi_zone_redirect():
974 zonegroup
= realm
.master_zonegroup()
975 if len(zonegroup
.rw_zones
) < 2:
976 raise SkipTest("test_multi_period_incremental_sync skipped. Requires 3 or more zones in master zonegroup.")
978 zonegroup_conns
= ZonegroupConns(zonegroup
)
979 (zc1
, zc2
) = zonegroup_conns
.rw_zones
[0:2]
981 z1
, z2
= (zc1
.zone
, zc2
.zone
)
983 set_sync_from_all(z2
, False)
985 # create a bucket on the first zone
986 bucket_name
= gen_bucket_name()
987 log
.info('create bucket zone=%s name=%s', z1
.name
, bucket_name
)
988 bucket
= zc1
.conn
.create_bucket(bucket_name
)
991 key
= bucket
.new_key(obj
)
993 key
.set_contents_from_string(data
)
995 zonegroup_meta_checkpoint(zonegroup
)
997 # try to read object from second zone (should fail)
998 bucket2
= get_bucket(zc2
, bucket_name
)
999 assert_raises(boto
.exception
.S3ResponseError
, bucket2
.get_key
, obj
)
1001 set_redirect_zone(z2
, z1
)
1003 key2
= bucket2
.get_key(obj
)
1005 eq(data
, key2
.get_contents_as_string(encoding
='ascii'))
1007 key
= bucket
.new_key(obj
)
1009 for x
in ['a', 'b', 'c', 'd']:
1011 key
.set_contents_from_string(data
)
1012 eq(data
, key2
.get_contents_as_string(encoding
='ascii'))
1014 # revert config changes
1015 set_sync_from_all(z2
, True)
1016 set_redirect_zone(z2
, None)
1018 def test_zonegroup_remove():
1019 zonegroup
= realm
.master_zonegroup()
1020 zonegroup_conns
= ZonegroupConns(zonegroup
)
1021 if len(zonegroup
.zones
) < 2:
1022 raise SkipTest("test_zonegroup_remove skipped. Requires 2 or more zones in master zonegroup.")
1024 zonegroup_meta_checkpoint(zonegroup
)
1025 z1
, z2
= zonegroup
.zones
[0:2]
1026 c1
, c2
= (z1
.cluster
, z2
.cluster
)
1028 # get admin credentials out of existing zone
1029 system_key
= z1
.data
['system_key']
1030 admin_creds
= Credentials(system_key
['access_key'], system_key
['secret_key'])
1032 # create a new zone in zonegroup on c2 and commit
1033 zone
= Zone('remove', zonegroup
, c2
)
1034 zone
.create(c2
, admin_creds
.credential_args())
1035 zonegroup
.zones
.append(zone
)
1036 zonegroup
.period
.update(zone
, commit
=True)
1038 zonegroup
.remove(c1
, zone
)
1040 # another 'zonegroup remove' should fail with ENOENT
1041 _
, retcode
= zonegroup
.remove(c1
, zone
, check_retcode
=False)
1042 assert(retcode
== 2) # ENOENT
1044 # delete the new zone
1047 # validate the resulting period
1048 zonegroup
.period
.update(z1
, commit
=True)
1051 def test_zg_master_zone_delete():
1053 master_zg
= realm
.master_zonegroup()
1054 master_zone
= master_zg
.master_zone
1056 assert(len(master_zg
.zones
) >= 1)
1057 master_cluster
= master_zg
.zones
[0].cluster
1059 rm_zg
= ZoneGroup('remove_zg')
1060 rm_zg
.create(master_cluster
)
1062 rm_zone
= Zone('remove', rm_zg
, master_cluster
)
1063 rm_zone
.create(master_cluster
)
1064 master_zg
.period
.update(master_zone
, commit
=True)
1067 rm_zone
.delete(master_cluster
)
1068 # Period update: This should now fail as the zone will be the master zone
1070 _
, retcode
= master_zg
.period
.update(master_zone
, check_retcode
=False)
1071 assert(retcode
== errno
.EINVAL
)
1073 # Proceed to delete the zonegroup as well, previous period now does not
1074 # contain a dangling master_zone, this must succeed
1075 rm_zg
.delete(master_cluster
)
1076 master_zg
.period
.update(master_zone
, commit
=True)
1078 def test_set_bucket_website():
1079 buckets
, zone_bucket
= create_bucket_per_zone_in_realm()
1080 for _
, bucket
in zone_bucket
:
1081 website_cfg
= WebsiteConfiguration(suffix
='index.html',error_key
='error.html')
1083 bucket
.set_website_configuration(website_cfg
)
1084 except boto
.exception
.S3ResponseError
as e
:
1085 if e
.error_code
== 'MethodNotAllowed':
1086 raise SkipTest("test_set_bucket_website skipped. Requires rgw_enable_static_website = 1.")
1087 assert(bucket
.get_website_configuration_with_xml()[1] == website_cfg
.to_xml())
1089 def test_set_bucket_policy():
1091 "Version": "2012-10-17",
1097 buckets
, zone_bucket
= create_bucket_per_zone_in_realm()
1098 for _
, bucket
in zone_bucket
:
1099 bucket
.set_policy(policy
)
1100 assert(bucket
.get_policy().decode('ascii') == policy
)
1102 def test_bucket_sync_disable():
1103 zonegroup
= realm
.master_zonegroup()
1104 zonegroup_conns
= ZonegroupConns(zonegroup
)
1105 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
1106 zonegroup_meta_checkpoint(zonegroup
)
1108 for bucket_name
in buckets
:
1109 disable_bucket_sync(realm
.meta_master_zone(), bucket_name
)
1111 for zone
in zonegroup
.zones
:
1112 check_buckets_sync_status_obj_not_exist(zone
, buckets
)
1114 zonegroup_data_checkpoint(zonegroup_conns
)
1116 def test_bucket_sync_enable_right_after_disable():
1117 zonegroup
= realm
.master_zonegroup()
1118 zonegroup_conns
= ZonegroupConns(zonegroup
)
1119 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
1121 objnames
= ['obj1', 'obj2', 'obj3', 'obj4']
1124 for zone
, bucket
in zone_bucket
:
1125 for objname
in objnames
:
1126 k
= new_key(zone
, bucket
.name
, objname
)
1127 k
.set_contents_from_string(content
)
1129 zonegroup_meta_checkpoint(zonegroup
)
1131 for bucket_name
in buckets
:
1132 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket_name
)
1134 for bucket_name
in buckets
:
1135 disable_bucket_sync(realm
.meta_master_zone(), bucket_name
)
1136 enable_bucket_sync(realm
.meta_master_zone(), bucket_name
)
1138 objnames_2
= ['obj5', 'obj6', 'obj7', 'obj8']
1140 for zone
, bucket
in zone_bucket
:
1141 for objname
in objnames_2
:
1142 k
= new_key(zone
, bucket
.name
, objname
)
1143 k
.set_contents_from_string(content
)
1145 for bucket_name
in buckets
:
1146 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket_name
)
1148 zonegroup_data_checkpoint(zonegroup_conns
)
1150 def test_bucket_sync_disable_enable():
1151 zonegroup
= realm
.master_zonegroup()
1152 zonegroup_conns
= ZonegroupConns(zonegroup
)
1153 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
1155 objnames
= [ 'obj1', 'obj2', 'obj3', 'obj4' ]
1158 for zone
, bucket
in zone_bucket
:
1159 for objname
in objnames
:
1160 k
= new_key(zone
, bucket
.name
, objname
)
1161 k
.set_contents_from_string(content
)
1163 zonegroup_meta_checkpoint(zonegroup
)
1165 for bucket_name
in buckets
:
1166 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket_name
)
1168 for bucket_name
in buckets
:
1169 disable_bucket_sync(realm
.meta_master_zone(), bucket_name
)
1171 zonegroup_meta_checkpoint(zonegroup
)
1173 objnames_2
= [ 'obj5', 'obj6', 'obj7', 'obj8' ]
1175 for zone
, bucket
in zone_bucket
:
1176 for objname
in objnames_2
:
1177 k
= new_key(zone
, bucket
.name
, objname
)
1178 k
.set_contents_from_string(content
)
1180 for bucket_name
in buckets
:
1181 enable_bucket_sync(realm
.meta_master_zone(), bucket_name
)
1183 for bucket_name
in buckets
:
1184 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket_name
)
1186 zonegroup_data_checkpoint(zonegroup_conns
)
1188 def test_multipart_object_sync():
1189 zonegroup
= realm
.master_zonegroup()
1190 zonegroup_conns
= ZonegroupConns(zonegroup
)
1191 buckets
, zone_bucket
= create_bucket_per_zone(zonegroup_conns
)
1193 _
, bucket
= zone_bucket
[0]
1195 # initiate a multipart upload
1196 upload
= bucket
.initiate_multipart_upload('MULTIPART')
1197 mp
= boto
.s3
.multipart
.MultiPartUpload(bucket
)
1198 mp
.key_name
= upload
.key_name
1200 part_size
= 5 * 1024 * 1024 # 5M min part size
1201 mp
.upload_part_from_file(StringIO('a' * part_size
), 1)
1202 mp
.upload_part_from_file(StringIO('b' * part_size
), 2)
1203 mp
.upload_part_from_file(StringIO('c' * part_size
), 3)
1204 mp
.upload_part_from_file(StringIO('d' * part_size
), 4)
1205 mp
.complete_upload()
1207 zonegroup_bucket_checkpoint(zonegroup_conns
, bucket
.name
)
1209 def test_encrypted_object_sync():
1210 zonegroup
= realm
.master_zonegroup()
1211 zonegroup_conns
= ZonegroupConns(zonegroup
)
1213 if len(zonegroup
.rw_zones
) < 2:
1214 raise SkipTest("test_zonegroup_remove skipped. Requires 2 or more zones in master zonegroup.")
1216 (zone1
, zone2
) = zonegroup_conns
.rw_zones
[0:2]
1218 # create a bucket on the first zone
1219 bucket_name
= gen_bucket_name()
1220 log
.info('create bucket zone=%s name=%s', zone1
.name
, bucket_name
)
1221 bucket
= zone1
.conn
.create_bucket(bucket_name
)
1223 # upload an object with sse-c encryption
1225 'x-amz-server-side-encryption-customer-algorithm': 'AES256',
1226 'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
1227 'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
1229 key
= bucket
.new_key('testobj-sse-c')
1231 key
.set_contents_from_string(data
, headers
=sse_c_headers
)
1233 # upload an object with sse-kms encryption
1235 'x-amz-server-side-encryption': 'aws:kms',
1236 # testkey-1 must be present in 'rgw crypt s3 kms encryption keys' (vstart.sh adds this)
1237 'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-1',
1239 key
= bucket
.new_key('testobj-sse-kms')
1240 key
.set_contents_from_string(data
, headers
=sse_kms_headers
)
1242 # wait for the bucket metadata and data to sync
1243 zonegroup_meta_checkpoint(zonegroup
)
1244 zone_bucket_checkpoint(zone2
.zone
, zone1
.zone
, bucket_name
)
1246 # read the encrypted objects from the second zone
1247 bucket2
= get_bucket(zone2
, bucket_name
)
1248 key
= bucket2
.get_key('testobj-sse-c', headers
=sse_c_headers
)
1249 eq(data
, key
.get_contents_as_string(headers
=sse_c_headers
, encoding
='ascii'))
1251 key
= bucket2
.get_key('testobj-sse-kms')
1252 eq(data
, key
.get_contents_as_string(encoding
='ascii'))
1254 def test_bucket_index_log_trim():
1255 zonegroup
= realm
.master_zonegroup()
1256 zonegroup_conns
= ZonegroupConns(zonegroup
)
1258 zone
= zonegroup_conns
.rw_zones
[0]
1260 # create a test bucket, upload some objects, and wait for sync
1261 def make_test_bucket():
1262 name
= gen_bucket_name()
1263 log
.info('create bucket zone=%s name=%s', zone
.name
, name
)
1264 bucket
= zone
.conn
.create_bucket(name
)
1265 for objname
in ('a', 'b', 'c', 'd'):
1266 k
= new_key(zone
, name
, objname
)
1267 k
.set_contents_from_string('foo')
1268 zonegroup_meta_checkpoint(zonegroup
)
1269 zonegroup_bucket_checkpoint(zonegroup_conns
, name
)
1272 # create a 'cold' bucket
1273 cold_bucket
= make_test_bucket()
1275 # trim with max-buckets=0 to clear counters for cold bucket. this should
1276 # prevent it from being considered 'active' by the next autotrim
1277 bilog_autotrim(zone
.zone
, [
1278 '--rgw-sync-log-trim-max-buckets', '0',
1281 # create an 'active' bucket
1282 active_bucket
= make_test_bucket()
1284 # trim with max-buckets=1 min-cold-buckets=0 to trim active bucket only
1285 bilog_autotrim(zone
.zone
, [
1286 '--rgw-sync-log-trim-max-buckets', '1',
1287 '--rgw-sync-log-trim-min-cold-buckets', '0',
1290 # verify active bucket has empty bilog
1291 active_bilog
= bilog_list(zone
.zone
, active_bucket
.name
)
1292 assert(len(active_bilog
) == 0)
1294 # verify cold bucket has nonempty bilog
1295 cold_bilog
= bilog_list(zone
.zone
, cold_bucket
.name
)
1296 assert(len(cold_bilog
) > 0)
1298 # trim with min-cold-buckets=999 to trim all buckets
1299 bilog_autotrim(zone
.zone
, [
1300 '--rgw-sync-log-trim-max-buckets', '999',
1301 '--rgw-sync-log-trim-min-cold-buckets', '999',
1304 # verify cold bucket has empty bilog
1305 cold_bilog
= bilog_list(zone
.zone
, cold_bucket
.name
)
1306 assert(len(cold_bilog
) == 0)
1308 def test_bucket_creation_time():
1309 zonegroup
= realm
.master_zonegroup()
1310 zonegroup_conns
= ZonegroupConns(zonegroup
)
1312 zonegroup_meta_checkpoint(zonegroup
)
1314 zone_buckets
= [zone
.get_connection().get_all_buckets() for zone
in zonegroup_conns
.rw_zones
]
1315 for z1
, z2
in combinations(zone_buckets
, 2):
1316 for a
, b
in zip(z1
, z2
):
1318 eq(a
.creation_date
, b
.creation_date
)