]> git.proxmox.com Git - ceph.git/blob - ceph/src/test/rgw/rgw_multi/tests.py
import ceph 14.2.5
[ceph.git] / ceph / src / test / rgw / rgw_multi / tests.py
1 import json
2 import random
3 import string
4 import sys
5 import time
6 import logging
7 import errno
8
9 try:
10 from itertools import izip_longest as zip_longest
11 except ImportError:
12 from itertools import zip_longest
13 from itertools import combinations
14 from six import StringIO
15
16 import boto
17 import boto.s3.connection
18 from boto.s3.website import WebsiteConfiguration
19 from boto.s3.cors import CORSConfiguration
20
21 from nose.tools import eq_ as eq
22 from nose.plugins.attrib import attr
23 from nose.plugins.skip import SkipTest
24
25 from .multisite import Zone, ZoneGroup, Credentials
26
27 from .conn import get_gateway_connection
28 from .tools import assert_raises
29
30 class Config:
31 """ test configuration """
32 def __init__(self, **kwargs):
33 # by default, wait up to 5 minutes before giving up on a sync checkpoint
34 self.checkpoint_retries = kwargs.get('checkpoint_retries', 60)
35 self.checkpoint_delay = kwargs.get('checkpoint_delay', 5)
36 # allow some time for realm reconfiguration after changing master zone
37 self.reconfigure_delay = kwargs.get('reconfigure_delay', 5)
38 self.tenant = kwargs.get('tenant', '')
39
40 # rgw multisite tests, written against the interfaces provided in rgw_multi.
41 # these tests must be initialized and run by another module that provides
42 # implementations of these interfaces by calling init_multi()
43 realm = None
44 user = None
45 config = None
46 def init_multi(_realm, _user, _config=None):
47 global realm
48 realm = _realm
49 global user
50 user = _user
51 global config
52 config = _config or Config()
53 realm_meta_checkpoint(realm)
54
55 def get_user():
56 return user.id if user is not None else ''
57
58 def get_tenant():
59 return config.tenant if config is not None and config.tenant is not None else ''
60
61 def get_realm():
62 return realm
63
64 log = logging.getLogger('rgw_multi.tests')
65
66 num_buckets = 0
67 run_prefix=''.join(random.choice(string.ascii_lowercase) for _ in range(6))
68
69 def get_gateway_connection(gateway, credentials):
70 """ connect to the given gateway """
71 if gateway.connection is None:
72 gateway.connection = boto.connect_s3(
73 aws_access_key_id = credentials.access_key,
74 aws_secret_access_key = credentials.secret,
75 host = gateway.host,
76 port = gateway.port,
77 is_secure = False,
78 calling_format = boto.s3.connection.OrdinaryCallingFormat())
79 return gateway.connection
80
81 def get_zone_connection(zone, credentials):
82 """ connect to the zone's first gateway """
83 if isinstance(credentials, list):
84 credentials = credentials[0]
85 return get_gateway_connection(zone.gateways[0], credentials)
86
87 def mdlog_list(zone, period = None):
88 cmd = ['mdlog', 'list']
89 if period:
90 cmd += ['--period', period]
91 (mdlog_json, _) = zone.cluster.admin(cmd, read_only=True)
92 mdlog_json = mdlog_json.decode('utf-8')
93 return json.loads(mdlog_json)
94
95 def meta_sync_status(zone):
96 while True:
97 cmd = ['metadata', 'sync', 'status'] + zone.zone_args()
98 meta_sync_status_json, retcode = zone.cluster.admin(cmd, check_retcode=False, read_only=True)
99 if retcode == 0:
100 break
101 assert(retcode == 2) # ENOENT
102 time.sleep(5)
103
104 def mdlog_autotrim(zone):
105 zone.cluster.admin(['mdlog', 'autotrim'])
106
107 def datalog_list(zone, period = None):
108 cmd = ['datalog', 'list']
109 (datalog_json, _) = zone.cluster.admin(cmd, read_only=True)
110 datalog_json = datalog_json.decode('utf-8')
111 return json.loads(datalog_json)
112
113 def datalog_autotrim(zone):
114 zone.cluster.admin(['datalog', 'autotrim'])
115
116 def bilog_list(zone, bucket, args = None):
117 cmd = ['bilog', 'list', '--bucket', bucket] + (args or [])
118 cmd += ['--tenant', config.tenant, '--uid', user.name] if config.tenant else []
119 bilog, _ = zone.cluster.admin(cmd, read_only=True)
120 bilog = bilog.decode('utf-8')
121 return json.loads(bilog)
122
123 def bilog_autotrim(zone, args = None):
124 zone.cluster.admin(['bilog', 'autotrim'] + (args or []))
125
126 def parse_meta_sync_status(meta_sync_status_json):
127 meta_sync_status_json = meta_sync_status_json.decode('utf-8')
128 log.debug('current meta sync status=%s', meta_sync_status_json)
129 sync_status = json.loads(meta_sync_status_json)
130
131 sync_info = sync_status['sync_status']['info']
132 global_sync_status = sync_info['status']
133 num_shards = sync_info['num_shards']
134 period = sync_info['period']
135 realm_epoch = sync_info['realm_epoch']
136
137 sync_markers=sync_status['sync_status']['markers']
138 log.debug('sync_markers=%s', sync_markers)
139 assert(num_shards == len(sync_markers))
140
141 markers={}
142 for i in range(num_shards):
143 # get marker, only if it's an incremental marker for the same realm epoch
144 if realm_epoch > sync_markers[i]['val']['realm_epoch'] or sync_markers[i]['val']['state'] == 0:
145 markers[i] = ''
146 else:
147 markers[i] = sync_markers[i]['val']['marker']
148
149 return period, realm_epoch, num_shards, markers
150
151 def meta_sync_status(zone):
152 for _ in range(config.checkpoint_retries):
153 cmd = ['metadata', 'sync', 'status'] + zone.zone_args()
154 meta_sync_status_json, retcode = zone.cluster.admin(cmd, check_retcode=False, read_only=True)
155 if retcode == 0:
156 return parse_meta_sync_status(meta_sync_status_json)
157 assert(retcode == 2) # ENOENT
158 time.sleep(config.checkpoint_delay)
159
160 assert False, 'failed to read metadata sync status for zone=%s' % zone.name
161
162 def meta_master_log_status(master_zone):
163 cmd = ['mdlog', 'status'] + master_zone.zone_args()
164 mdlog_status_json, retcode = master_zone.cluster.admin(cmd, read_only=True)
165 mdlog_status = json.loads(mdlog_status_json.decode('utf-8'))
166
167 markers = {i: s['marker'] for i, s in enumerate(mdlog_status)}
168 log.debug('master meta markers=%s', markers)
169 return markers
170
171 def compare_meta_status(zone, log_status, sync_status):
172 if len(log_status) != len(sync_status):
173 log.error('len(log_status)=%d, len(sync_status)=%d', len(log_status), len(sync_status))
174 return False
175
176 msg = ''
177 for i, l, s in zip(log_status, log_status.values(), sync_status.values()):
178 if l > s:
179 if len(msg):
180 msg += ', '
181 msg += 'shard=' + str(i) + ' master=' + l + ' target=' + s
182
183 if len(msg) > 0:
184 log.warning('zone %s behind master: %s', zone.name, msg)
185 return False
186
187 return True
188
189 def zone_meta_checkpoint(zone, meta_master_zone = None, master_status = None):
190 if not meta_master_zone:
191 meta_master_zone = zone.realm().meta_master_zone()
192 if not master_status:
193 master_status = meta_master_log_status(meta_master_zone)
194
195 current_realm_epoch = realm.current_period.data['realm_epoch']
196
197 log.info('starting meta checkpoint for zone=%s', zone.name)
198
199 for _ in range(config.checkpoint_retries):
200 period, realm_epoch, num_shards, sync_status = meta_sync_status(zone)
201 if realm_epoch < current_realm_epoch:
202 log.warning('zone %s is syncing realm epoch=%d, behind current realm epoch=%d',
203 zone.name, realm_epoch, current_realm_epoch)
204 else:
205 log.debug('log_status=%s', master_status)
206 log.debug('sync_status=%s', sync_status)
207 if compare_meta_status(zone, master_status, sync_status):
208 log.info('finish meta checkpoint for zone=%s', zone.name)
209 return
210
211 time.sleep(config.checkpoint_delay)
212 assert False, 'failed meta checkpoint for zone=%s' % zone.name
213
214 def zonegroup_meta_checkpoint(zonegroup, meta_master_zone = None, master_status = None):
215 if not meta_master_zone:
216 meta_master_zone = zonegroup.realm().meta_master_zone()
217 if not master_status:
218 master_status = meta_master_log_status(meta_master_zone)
219
220 for zone in zonegroup.zones:
221 if zone == meta_master_zone:
222 continue
223 zone_meta_checkpoint(zone, meta_master_zone, master_status)
224
225 def realm_meta_checkpoint(realm):
226 log.info('meta checkpoint')
227
228 meta_master_zone = realm.meta_master_zone()
229 master_status = meta_master_log_status(meta_master_zone)
230
231 for zonegroup in realm.current_period.zonegroups:
232 zonegroup_meta_checkpoint(zonegroup, meta_master_zone, master_status)
233
234 def parse_data_sync_status(data_sync_status_json):
235 data_sync_status_json = data_sync_status_json.decode('utf-8')
236 log.debug('current data sync status=%s', data_sync_status_json)
237 sync_status = json.loads(data_sync_status_json)
238
239 global_sync_status=sync_status['sync_status']['info']['status']
240 num_shards=sync_status['sync_status']['info']['num_shards']
241
242 sync_markers=sync_status['sync_status']['markers']
243 log.debug('sync_markers=%s', sync_markers)
244 assert(num_shards == len(sync_markers))
245
246 markers={}
247 for i in range(num_shards):
248 markers[i] = sync_markers[i]['val']['marker']
249
250 return (num_shards, markers)
251
252 def data_sync_status(target_zone, source_zone):
253 if target_zone == source_zone:
254 return None
255
256 for _ in range(config.checkpoint_retries):
257 cmd = ['data', 'sync', 'status'] + target_zone.zone_args()
258 cmd += ['--source-zone', source_zone.name]
259 data_sync_status_json, retcode = target_zone.cluster.admin(cmd, check_retcode=False, read_only=True)
260 if retcode == 0:
261 return parse_data_sync_status(data_sync_status_json)
262
263 assert(retcode == 2) # ENOENT
264 time.sleep(config.checkpoint_delay)
265
266 assert False, 'failed to read data sync status for target_zone=%s source_zone=%s' % \
267 (target_zone.name, source_zone.name)
268
269 def bucket_sync_status(target_zone, source_zone, bucket_name):
270 if target_zone == source_zone:
271 return None
272
273 cmd = ['bucket', 'sync', 'markers'] + target_zone.zone_args()
274 cmd += ['--source-zone', source_zone.name]
275 cmd += ['--bucket', bucket_name]
276 cmd += ['--tenant', config.tenant, '--uid', user.name] if config.tenant else []
277 while True:
278 bucket_sync_status_json, retcode = target_zone.cluster.admin(cmd, check_retcode=False, read_only=True)
279 if retcode == 0:
280 break
281
282 assert(retcode == 2) # ENOENT
283
284 bucket_sync_status_json = bucket_sync_status_json.decode('utf-8')
285 log.debug('current bucket sync markers=%s', bucket_sync_status_json)
286 sync_status = json.loads(bucket_sync_status_json)
287
288 markers={}
289 for entry in sync_status:
290 val = entry['val']
291 if val['status'] == 'incremental-sync':
292 pos = val['inc_marker']['position'].split('#')[-1] # get rid of shard id; e.g., 6#00000000002.132.3 -> 00000000002.132.3
293 else:
294 pos = ''
295 markers[entry['key']] = pos
296
297 return markers
298
299 def data_source_log_status(source_zone):
300 source_cluster = source_zone.cluster
301 cmd = ['datalog', 'status'] + source_zone.zone_args()
302 datalog_status_json, retcode = source_cluster.admin(cmd, read_only=True)
303 datalog_status = json.loads(datalog_status_json.decode('utf-8'))
304
305 markers = {i: s['marker'] for i, s in enumerate(datalog_status)}
306 log.debug('data markers for zone=%s markers=%s', source_zone.name, markers)
307 return markers
308
309 def bucket_source_log_status(source_zone, bucket_name):
310 cmd = ['bilog', 'status'] + source_zone.zone_args()
311 cmd += ['--bucket', bucket_name]
312 cmd += ['--tenant', config.tenant, '--uid', user.name] if config.tenant else []
313 source_cluster = source_zone.cluster
314 bilog_status_json, retcode = source_cluster.admin(cmd, read_only=True)
315 bilog_status = json.loads(bilog_status_json.decode('utf-8'))
316
317 m={}
318 markers={}
319 try:
320 m = bilog_status['markers']
321 except:
322 pass
323
324 for s in m:
325 key = s['key']
326 val = s['val']
327 markers[key] = val
328
329 log.debug('bilog markers for zone=%s bucket=%s markers=%s', source_zone.name, bucket_name, markers)
330 return markers
331
332 def compare_data_status(target_zone, source_zone, log_status, sync_status):
333 if len(log_status) != len(sync_status):
334 log.error('len(log_status)=%d len(sync_status)=%d', len(log_status), len(sync_status))
335 return False
336
337 msg = ''
338 for i, l, s in zip(log_status, log_status.values(), sync_status.values()):
339 if l > s:
340 if len(msg):
341 msg += ', '
342 msg += 'shard=' + str(i) + ' master=' + l + ' target=' + s
343
344 if len(msg) > 0:
345 log.warning('data of zone %s behind zone %s: %s', target_zone.name, source_zone.name, msg)
346 return False
347
348 return True
349
350 def compare_bucket_status(target_zone, source_zone, bucket_name, log_status, sync_status):
351 if len(log_status) != len(sync_status):
352 log.error('len(log_status)=%d len(sync_status)=%d', len(log_status), len(sync_status))
353 return False
354
355 msg = ''
356 for i, l, s in zip(log_status, log_status.values(), sync_status.values()):
357 if l > s:
358 if len(msg):
359 msg += ', '
360 msg += 'shard=' + str(i) + ' master=' + l + ' target=' + s
361
362 if len(msg) > 0:
363 log.warning('bucket %s zone %s behind zone %s: %s', bucket_name, target_zone.name, source_zone.name, msg)
364 return False
365
366 return True
367
368 def zone_data_checkpoint(target_zone, source_zone):
369 if target_zone == source_zone:
370 return
371
372 log_status = data_source_log_status(source_zone)
373 log.info('starting data checkpoint for target_zone=%s source_zone=%s', target_zone.name, source_zone.name)
374
375 for _ in range(config.checkpoint_retries):
376 num_shards, sync_status = data_sync_status(target_zone, source_zone)
377
378 log.debug('log_status=%s', log_status)
379 log.debug('sync_status=%s', sync_status)
380
381 if compare_data_status(target_zone, source_zone, log_status, sync_status):
382 log.info('finished data checkpoint for target_zone=%s source_zone=%s',
383 target_zone.name, source_zone.name)
384 return
385 time.sleep(config.checkpoint_delay)
386
387 assert False, 'failed data checkpoint for target_zone=%s source_zone=%s' % \
388 (target_zone.name, source_zone.name)
389
390 def zonegroup_data_checkpoint(zonegroup_conns):
391 for source_conn in zonegroup_conns.rw_zones:
392 for target_conn in zonegroup_conns.zones:
393 if source_conn.zone == target_conn.zone:
394 continue
395 log.debug('data checkpoint: source=%s target=%s', source_conn.zone.name, target_conn.zone.name)
396 zone_data_checkpoint(target_conn.zone, source_conn.zone)
397
398 def zone_bucket_checkpoint(target_zone, source_zone, bucket_name):
399 if target_zone == source_zone:
400 return
401
402 log_status = bucket_source_log_status(source_zone, bucket_name)
403 log.info('starting bucket checkpoint for target_zone=%s source_zone=%s bucket=%s', target_zone.name, source_zone.name, bucket_name)
404
405 for _ in range(config.checkpoint_retries):
406 sync_status = bucket_sync_status(target_zone, source_zone, bucket_name)
407
408 log.debug('log_status=%s', log_status)
409 log.debug('sync_status=%s', sync_status)
410
411 if compare_bucket_status(target_zone, source_zone, bucket_name, log_status, sync_status):
412 log.info('finished bucket checkpoint for target_zone=%s source_zone=%s bucket=%s', target_zone.name, source_zone.name, bucket_name)
413 return
414
415 time.sleep(config.checkpoint_delay)
416
417 assert False, 'failed bucket checkpoint for target_zone=%s source_zone=%s bucket=%s' % \
418 (target_zone.name, source_zone.name, bucket_name)
419
420 def zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name):
421 for source_conn in zonegroup_conns.rw_zones:
422 for target_conn in zonegroup_conns.zones:
423 if source_conn.zone == target_conn.zone:
424 continue
425 log.debug('bucket checkpoint: source=%s target=%s bucket=%s', source_conn.zone.name, target_conn.zone.name, bucket_name)
426 zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket_name)
427 for source_conn, target_conn in combinations(zonegroup_conns.zones, 2):
428 if target_conn.zone.has_buckets():
429 target_conn.check_bucket_eq(source_conn, bucket_name)
430
431 def set_master_zone(zone):
432 zone.modify(zone.cluster, ['--master'])
433 zonegroup = zone.zonegroup
434 zonegroup.period.update(zone, commit=True)
435 zonegroup.master_zone = zone
436 log.info('Set master zone=%s, waiting %ds for reconfiguration..', zone.name, config.reconfigure_delay)
437 time.sleep(config.reconfigure_delay)
438
439 def set_sync_from_all(zone, flag):
440 s = 'true' if flag else 'false'
441 zone.modify(zone.cluster, ['--sync-from-all={}'.format(s)])
442 zonegroup = zone.zonegroup
443 zonegroup.period.update(zone, commit=True)
444 log.info('Set sync_from_all flag on zone %s to %s', zone.name, s)
445 time.sleep(config.reconfigure_delay)
446
447 def set_redirect_zone(zone, redirect_zone):
448 id_str = redirect_zone.id if redirect_zone else ''
449 zone.modify(zone.cluster, ['--redirect-zone={}'.format(id_str)])
450 zonegroup = zone.zonegroup
451 zonegroup.period.update(zone, commit=True)
452 log.info('Set redirect_zone zone %s to "%s"', zone.name, id_str)
453 time.sleep(config.reconfigure_delay)
454
455 def enable_bucket_sync(zone, bucket_name):
456 cmd = ['bucket', 'sync', 'enable', '--bucket', bucket_name] + zone.zone_args()
457 zone.cluster.admin(cmd)
458
459 def disable_bucket_sync(zone, bucket_name):
460 cmd = ['bucket', 'sync', 'disable', '--bucket', bucket_name] + zone.zone_args()
461 zone.cluster.admin(cmd)
462
463 def check_buckets_sync_status_obj_not_exist(zone, buckets):
464 for _ in range(config.checkpoint_retries):
465 cmd = ['log', 'list'] + zone.zone_arg()
466 log_list, ret = zone.cluster.admin(cmd, check_retcode=False, read_only=True)
467 for bucket in buckets:
468 if log_list.find(':'+bucket+":") >= 0:
469 break
470 else:
471 return
472 time.sleep(config.checkpoint_delay)
473 assert False
474
475 def gen_bucket_name():
476 global num_buckets
477
478 num_buckets += 1
479 return run_prefix + '-' + str(num_buckets)
480
481 class ZonegroupConns:
482 def __init__(self, zonegroup):
483 self.zonegroup = zonegroup
484 self.zones = []
485 self.ro_zones = []
486 self.rw_zones = []
487 self.master_zone = None
488 for z in zonegroup.zones:
489 zone_conn = z.get_conn(user.credentials)
490 self.zones.append(zone_conn)
491 if z.is_read_only():
492 self.ro_zones.append(zone_conn)
493 else:
494 self.rw_zones.append(zone_conn)
495
496 if z == zonegroup.master_zone:
497 self.master_zone = zone_conn
498
499 def check_all_buckets_exist(zone_conn, buckets):
500 if not zone_conn.zone.has_buckets():
501 return True
502
503 for b in buckets:
504 try:
505 zone_conn.get_bucket(b)
506 except:
507 log.critical('zone %s does not contain bucket %s', zone.name, b)
508 return False
509
510 return True
511
512 def check_all_buckets_dont_exist(zone_conn, buckets):
513 if not zone_conn.zone.has_buckets():
514 return True
515
516 for b in buckets:
517 try:
518 zone_conn.get_bucket(b)
519 except:
520 continue
521
522 log.critical('zone %s contains bucket %s', zone.zone, b)
523 return False
524
525 return True
526
527 def create_bucket_per_zone(zonegroup_conns, buckets_per_zone = 1):
528 buckets = []
529 zone_bucket = []
530 for zone in zonegroup_conns.rw_zones:
531 for i in xrange(buckets_per_zone):
532 bucket_name = gen_bucket_name()
533 log.info('create bucket zone=%s name=%s', zone.name, bucket_name)
534 bucket = zone.create_bucket(bucket_name)
535 buckets.append(bucket_name)
536 zone_bucket.append((zone, bucket))
537
538 return buckets, zone_bucket
539
540 def create_bucket_per_zone_in_realm():
541 buckets = []
542 zone_bucket = []
543 for zonegroup in realm.current_period.zonegroups:
544 zg_conn = ZonegroupConns(zonegroup)
545 b, z = create_bucket_per_zone(zg_conn)
546 buckets.extend(b)
547 zone_bucket.extend(z)
548 return buckets, zone_bucket
549
550 def test_bucket_create():
551 zonegroup = realm.master_zonegroup()
552 zonegroup_conns = ZonegroupConns(zonegroup)
553 buckets, _ = create_bucket_per_zone(zonegroup_conns)
554 zonegroup_meta_checkpoint(zonegroup)
555
556 for zone in zonegroup_conns.zones:
557 assert check_all_buckets_exist(zone, buckets)
558
559 def test_bucket_recreate():
560 zonegroup = realm.master_zonegroup()
561 zonegroup_conns = ZonegroupConns(zonegroup)
562 buckets, _ = create_bucket_per_zone(zonegroup_conns)
563 zonegroup_meta_checkpoint(zonegroup)
564
565
566 for zone in zonegroup_conns.zones:
567 assert check_all_buckets_exist(zone, buckets)
568
569 # recreate buckets on all zones, make sure they weren't removed
570 for zone in zonegroup_conns.rw_zones:
571 for bucket_name in buckets:
572 bucket = zone.create_bucket(bucket_name)
573
574 for zone in zonegroup_conns.zones:
575 assert check_all_buckets_exist(zone, buckets)
576
577 zonegroup_meta_checkpoint(zonegroup)
578
579 for zone in zonegroup_conns.zones:
580 assert check_all_buckets_exist(zone, buckets)
581
582 def test_bucket_remove():
583 zonegroup = realm.master_zonegroup()
584 zonegroup_conns = ZonegroupConns(zonegroup)
585 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
586 zonegroup_meta_checkpoint(zonegroup)
587
588 for zone in zonegroup_conns.zones:
589 assert check_all_buckets_exist(zone, buckets)
590
591 for zone, bucket_name in zone_bucket:
592 zone.conn.delete_bucket(bucket_name)
593
594 zonegroup_meta_checkpoint(zonegroup)
595
596 for zone in zonegroup_conns.zones:
597 assert check_all_buckets_dont_exist(zone, buckets)
598
599 def get_bucket(zone, bucket_name):
600 return zone.conn.get_bucket(bucket_name)
601
602 def get_key(zone, bucket_name, obj_name):
603 b = get_bucket(zone, bucket_name)
604 return b.get_key(obj_name)
605
606 def new_key(zone, bucket_name, obj_name):
607 b = get_bucket(zone, bucket_name)
608 return b.new_key(obj_name)
609
610 def check_bucket_eq(zone_conn1, zone_conn2, bucket):
611 if zone_conn2.zone.has_buckets():
612 zone_conn2.check_bucket_eq(zone_conn1, bucket.name)
613
614 def test_object_sync():
615 zonegroup = realm.master_zonegroup()
616 zonegroup_conns = ZonegroupConns(zonegroup)
617 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
618
619 objnames = [ 'myobj', '_myobj', ':', '&' ]
620 content = 'asdasd'
621
622 # don't wait for meta sync just yet
623 for zone, bucket_name in zone_bucket:
624 for objname in objnames:
625 k = new_key(zone, bucket_name, objname)
626 k.set_contents_from_string(content)
627
628 zonegroup_meta_checkpoint(zonegroup)
629
630 for source_conn, bucket in zone_bucket:
631 for target_conn in zonegroup_conns.zones:
632 if source_conn.zone == target_conn.zone:
633 continue
634
635 zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
636 check_bucket_eq(source_conn, target_conn, bucket)
637
638 def test_object_delete():
639 zonegroup = realm.master_zonegroup()
640 zonegroup_conns = ZonegroupConns(zonegroup)
641 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
642
643 objname = 'myobj'
644 content = 'asdasd'
645
646 # don't wait for meta sync just yet
647 for zone, bucket in zone_bucket:
648 k = new_key(zone, bucket, objname)
649 k.set_contents_from_string(content)
650
651 zonegroup_meta_checkpoint(zonegroup)
652
653 # check object exists
654 for source_conn, bucket in zone_bucket:
655 for target_conn in zonegroup_conns.zones:
656 if source_conn.zone == target_conn.zone:
657 continue
658
659 zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
660 check_bucket_eq(source_conn, target_conn, bucket)
661
662 # check object removal
663 for source_conn, bucket in zone_bucket:
664 k = get_key(source_conn, bucket, objname)
665 k.delete()
666 for target_conn in zonegroup_conns.zones:
667 if source_conn.zone == target_conn.zone:
668 continue
669
670 zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
671 check_bucket_eq(source_conn, target_conn, bucket)
672
673 def get_latest_object_version(key):
674 for k in key.bucket.list_versions(key.name):
675 if k.is_latest:
676 return k
677 return None
678
679 def test_versioned_object_incremental_sync():
680 zonegroup = realm.master_zonegroup()
681 zonegroup_conns = ZonegroupConns(zonegroup)
682 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
683
684 # enable versioning
685 for _, bucket in zone_bucket:
686 bucket.configure_versioning(True)
687
688 zonegroup_meta_checkpoint(zonegroup)
689
690 # upload a dummy object to each bucket and wait for sync. this forces each
691 # bucket to finish a full sync and switch to incremental
692 for source_conn, bucket in zone_bucket:
693 new_key(source_conn, bucket, 'dummy').set_contents_from_string('')
694 for target_conn in zonegroup_conns.zones:
695 if source_conn.zone == target_conn.zone:
696 continue
697 zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
698
699 for _, bucket in zone_bucket:
700 # create and delete multiple versions of an object from each zone
701 for zone_conn in zonegroup_conns.rw_zones:
702 obj = 'obj-' + zone_conn.name
703 k = new_key(zone_conn, bucket, obj)
704
705 k.set_contents_from_string('version1')
706 log.debug('version1 id=%s', k.version_id)
707 # don't delete version1 - this tests that the initial version
708 # doesn't get squashed into later versions
709
710 # create and delete the following object versions to test that
711 # the operations don't race with each other during sync
712 k.set_contents_from_string('version2')
713 log.debug('version2 id=%s', k.version_id)
714 k.bucket.delete_key(obj, version_id=k.version_id)
715
716 k.set_contents_from_string('version3')
717 log.debug('version3 id=%s', k.version_id)
718 k.bucket.delete_key(obj, version_id=k.version_id)
719
720 for _, bucket in zone_bucket:
721 zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
722
723 for _, bucket in zone_bucket:
724 # overwrite the acls to test that metadata-only entries are applied
725 for zone_conn in zonegroup_conns.rw_zones:
726 obj = 'obj-' + zone_conn.name
727 k = new_key(zone_conn, bucket.name, obj)
728 v = get_latest_object_version(k)
729 v.make_public()
730
731 for _, bucket in zone_bucket:
732 zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
733
734 def test_version_suspended_incremental_sync():
735 zonegroup = realm.master_zonegroup()
736 zonegroup_conns = ZonegroupConns(zonegroup)
737
738 zone = zonegroup_conns.rw_zones[0]
739
740 # create a non-versioned bucket
741 bucket = zone.create_bucket(gen_bucket_name())
742 log.debug('created bucket=%s', bucket.name)
743 zonegroup_meta_checkpoint(zonegroup)
744
745 # upload an initial object
746 key1 = new_key(zone, bucket, 'obj')
747 key1.set_contents_from_string('')
748 log.debug('created initial version id=%s', key1.version_id)
749 zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
750
751 # enable versioning
752 bucket.configure_versioning(True)
753 zonegroup_meta_checkpoint(zonegroup)
754
755 # re-upload the object as a new version
756 key2 = new_key(zone, bucket, 'obj')
757 key2.set_contents_from_string('')
758 log.debug('created new version id=%s', key2.version_id)
759 zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
760
761 # suspend versioning
762 bucket.configure_versioning(False)
763 zonegroup_meta_checkpoint(zonegroup)
764
765 # re-upload the object as a 'null' version
766 key3 = new_key(zone, bucket, 'obj')
767 key3.set_contents_from_string('')
768 log.debug('created null version id=%s', key3.version_id)
769 zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
770
771 def test_delete_marker_full_sync():
772 zonegroup = realm.master_zonegroup()
773 zonegroup_conns = ZonegroupConns(zonegroup)
774 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
775
776 # enable versioning
777 for _, bucket in zone_bucket:
778 bucket.configure_versioning(True)
779 zonegroup_meta_checkpoint(zonegroup)
780
781 for zone, bucket in zone_bucket:
782 # upload an initial object
783 key1 = new_key(zone, bucket, 'obj')
784 key1.set_contents_from_string('')
785
786 # create a delete marker
787 key2 = new_key(zone, bucket, 'obj')
788 key2.delete()
789
790 # wait for full sync
791 for _, bucket in zone_bucket:
792 zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
793
794 def test_suspended_delete_marker_full_sync():
795 zonegroup = realm.master_zonegroup()
796 zonegroup_conns = ZonegroupConns(zonegroup)
797 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
798
799 # enable/suspend versioning
800 for _, bucket in zone_bucket:
801 bucket.configure_versioning(True)
802 bucket.configure_versioning(False)
803 zonegroup_meta_checkpoint(zonegroup)
804
805 for zone, bucket in zone_bucket:
806 # upload an initial object
807 key1 = new_key(zone, bucket, 'obj')
808 key1.set_contents_from_string('')
809
810 # create a delete marker
811 key2 = new_key(zone, bucket, 'obj')
812 key2.delete()
813
814 # wait for full sync
815 for _, bucket in zone_bucket:
816 zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
817
818 def test_bucket_versioning():
819 buckets, zone_bucket = create_bucket_per_zone_in_realm()
820 for _, bucket in zone_bucket:
821 bucket.configure_versioning(True)
822 res = bucket.get_versioning_status()
823 key = 'Versioning'
824 assert(key in res and res[key] == 'Enabled')
825
826 def test_bucket_acl():
827 buckets, zone_bucket = create_bucket_per_zone_in_realm()
828 for _, bucket in zone_bucket:
829 assert(len(bucket.get_acl().acl.grants) == 1) # single grant on owner
830 bucket.set_acl('public-read')
831 assert(len(bucket.get_acl().acl.grants) == 2) # new grant on AllUsers
832
833 def test_bucket_cors():
834 buckets, zone_bucket = create_bucket_per_zone_in_realm()
835 for _, bucket in zone_bucket:
836 cors_cfg = CORSConfiguration()
837 cors_cfg.add_rule(['DELETE'], 'https://www.example.com', allowed_header='*', max_age_seconds=3000)
838 bucket.set_cors(cors_cfg)
839 assert(bucket.get_cors().to_xml() == cors_cfg.to_xml())
840
841 def test_bucket_delete_notempty():
842 zonegroup = realm.master_zonegroup()
843 zonegroup_conns = ZonegroupConns(zonegroup)
844 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
845 zonegroup_meta_checkpoint(zonegroup)
846
847 for zone_conn, bucket_name in zone_bucket:
848 # upload an object to each bucket on its own zone
849 conn = zone_conn.get_connection()
850 bucket = conn.get_bucket(bucket_name)
851 k = bucket.new_key('foo')
852 k.set_contents_from_string('bar')
853 # attempt to delete the bucket before this object can sync
854 try:
855 conn.delete_bucket(bucket_name)
856 except boto.exception.S3ResponseError as e:
857 assert(e.error_code == 'BucketNotEmpty')
858 continue
859 assert False # expected 409 BucketNotEmpty
860
861 # assert that each bucket still exists on the master
862 c1 = zonegroup_conns.master_zone.conn
863 for _, bucket_name in zone_bucket:
864 assert c1.get_bucket(bucket_name)
865
866 def test_multi_period_incremental_sync():
867 zonegroup = realm.master_zonegroup()
868 if len(zonegroup.zones) < 3:
869 raise SkipTest("test_multi_period_incremental_sync skipped. Requires 3 or more zones in master zonegroup.")
870
871 # periods to include in mdlog comparison
872 mdlog_periods = [realm.current_period.id]
873
874 # create a bucket in each zone
875 zonegroup_conns = ZonegroupConns(zonegroup)
876 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
877
878 zonegroup_meta_checkpoint(zonegroup)
879
880 z1, z2, z3 = zonegroup.zones[0:3]
881 assert(z1 == zonegroup.master_zone)
882
883 # kill zone 3 gateways to freeze sync status to incremental in first period
884 z3.stop()
885
886 # change master to zone 2 -> period 2
887 set_master_zone(z2)
888 mdlog_periods += [realm.current_period.id]
889
890 for zone_conn, _ in zone_bucket:
891 if zone_conn.zone == z3:
892 continue
893 bucket_name = gen_bucket_name()
894 log.info('create bucket zone=%s name=%s', zone_conn.name, bucket_name)
895 bucket = zone_conn.conn.create_bucket(bucket_name)
896 buckets.append(bucket_name)
897
898 # wait for zone 1 to sync
899 zone_meta_checkpoint(z1)
900
901 # change master back to zone 1 -> period 3
902 set_master_zone(z1)
903 mdlog_periods += [realm.current_period.id]
904
905 for zone_conn, bucket_name in zone_bucket:
906 if zone_conn.zone == z3:
907 continue
908 bucket_name = gen_bucket_name()
909 log.info('create bucket zone=%s name=%s', zone_conn.name, bucket_name)
910 zone_conn.conn.create_bucket(bucket_name)
911 buckets.append(bucket_name)
912
913 # restart zone 3 gateway and wait for sync
914 z3.start()
915 zonegroup_meta_checkpoint(zonegroup)
916
917 # verify that we end up with the same objects
918 for bucket_name in buckets:
919 for source_conn, _ in zone_bucket:
920 for target_conn in zonegroup_conns.zones:
921 if source_conn.zone == target_conn.zone:
922 continue
923
924 if target_conn.zone.has_buckets():
925 target_conn.check_bucket_eq(source_conn, bucket_name)
926
927 # verify that mdlogs are not empty and match for each period
928 for period in mdlog_periods:
929 master_mdlog = mdlog_list(z1, period)
930 assert len(master_mdlog) > 0
931 for zone in zonegroup.zones:
932 if zone == z1:
933 continue
934 mdlog = mdlog_list(zone, period)
935 assert len(mdlog) == len(master_mdlog)
936
937 # autotrim mdlogs for master zone
938 mdlog_autotrim(z1)
939
940 # autotrim mdlogs for peers
941 for zone in zonegroup.zones:
942 if zone == z1:
943 continue
944 mdlog_autotrim(zone)
945
946 # verify that mdlogs are empty for each period
947 for period in mdlog_periods:
948 for zone in zonegroup.zones:
949 mdlog = mdlog_list(zone, period)
950 assert len(mdlog) == 0
951
952 def test_datalog_autotrim():
953 zonegroup = realm.master_zonegroup()
954 zonegroup_conns = ZonegroupConns(zonegroup)
955 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
956
957 # upload an object to each zone to generate a datalog entry
958 for zone, bucket in zone_bucket:
959 k = new_key(zone, bucket.name, 'key')
960 k.set_contents_from_string('body')
961
962 # wait for data sync to catch up
963 zonegroup_data_checkpoint(zonegroup_conns)
964
965 # trim each datalog
966 for zone, _ in zone_bucket:
967 datalog_autotrim(zone.zone)
968 datalog = datalog_list(zone.zone)
969 assert len(datalog) == 0
970
971 def test_multi_zone_redirect():
972 zonegroup = realm.master_zonegroup()
973 if len(zonegroup.rw_zones) < 2:
974 raise SkipTest("test_multi_period_incremental_sync skipped. Requires 3 or more zones in master zonegroup.")
975
976 zonegroup_conns = ZonegroupConns(zonegroup)
977 (zc1, zc2) = zonegroup_conns.rw_zones[0:2]
978
979 z1, z2 = (zc1.zone, zc2.zone)
980
981 set_sync_from_all(z2, False)
982
983 # create a bucket on the first zone
984 bucket_name = gen_bucket_name()
985 log.info('create bucket zone=%s name=%s', z1.name, bucket_name)
986 bucket = zc1.conn.create_bucket(bucket_name)
987 obj = 'testredirect'
988
989 key = bucket.new_key(obj)
990 data = 'A'*512
991 key.set_contents_from_string(data)
992
993 zonegroup_meta_checkpoint(zonegroup)
994
995 # try to read object from second zone (should fail)
996 bucket2 = get_bucket(zc2, bucket_name)
997 assert_raises(boto.exception.S3ResponseError, bucket2.get_key, obj)
998
999 set_redirect_zone(z2, z1)
1000
1001 key2 = bucket2.get_key(obj)
1002
1003 eq(data, key2.get_contents_as_string())
1004
1005 key = bucket.new_key(obj)
1006
1007 for x in ['a', 'b', 'c', 'd']:
1008 data = x*512
1009 key.set_contents_from_string(data)
1010 eq(data, key2.get_contents_as_string())
1011
1012 # revert config changes
1013 set_sync_from_all(z2, True)
1014 set_redirect_zone(z2, None)
1015
1016 def test_zonegroup_remove():
1017 zonegroup = realm.master_zonegroup()
1018 zonegroup_conns = ZonegroupConns(zonegroup)
1019 if len(zonegroup.zones) < 2:
1020 raise SkipTest("test_zonegroup_remove skipped. Requires 2 or more zones in master zonegroup.")
1021
1022 zonegroup_meta_checkpoint(zonegroup)
1023 z1, z2 = zonegroup.zones[0:2]
1024 c1, c2 = (z1.cluster, z2.cluster)
1025
1026 # get admin credentials out of existing zone
1027 system_key = z1.data['system_key']
1028 admin_creds = Credentials(system_key['access_key'], system_key['secret_key'])
1029
1030 # create a new zone in zonegroup on c2 and commit
1031 zone = Zone('remove', zonegroup, c2)
1032 zone.create(c2, admin_creds.credential_args())
1033 zonegroup.zones.append(zone)
1034 zonegroup.period.update(zone, commit=True)
1035
1036 zonegroup.remove(c1, zone)
1037
1038 # another 'zonegroup remove' should fail with ENOENT
1039 _, retcode = zonegroup.remove(c1, zone, check_retcode=False)
1040 assert(retcode == 2) # ENOENT
1041
1042 # delete the new zone
1043 zone.delete(c2)
1044
1045 # validate the resulting period
1046 zonegroup.period.update(z1, commit=True)
1047
1048
1049 def test_zg_master_zone_delete():
1050
1051 master_zg = realm.master_zonegroup()
1052 master_zone = master_zg.master_zone
1053
1054 assert(len(master_zg.zones) >= 1)
1055 master_cluster = master_zg.zones[0].cluster
1056
1057 rm_zg = ZoneGroup('remove_zg')
1058 rm_zg.create(master_cluster)
1059
1060 rm_zone = Zone('remove', rm_zg, master_cluster)
1061 rm_zone.create(master_cluster)
1062 master_zg.period.update(master_zone, commit=True)
1063
1064
1065 rm_zone.delete(master_cluster)
1066 # Period update: This should now fail as the zone will be the master zone
1067 # in that zg
1068 _, retcode = master_zg.period.update(master_zone, check_retcode=False)
1069 assert(retcode == errno.EINVAL)
1070
1071 # Proceed to delete the zonegroup as well, previous period now does not
1072 # contain a dangling master_zone, this must succeed
1073 rm_zg.delete(master_cluster)
1074 master_zg.period.update(master_zone, commit=True)
1075
1076 def test_set_bucket_website():
1077 buckets, zone_bucket = create_bucket_per_zone_in_realm()
1078 for _, bucket in zone_bucket:
1079 website_cfg = WebsiteConfiguration(suffix='index.html',error_key='error.html')
1080 try:
1081 bucket.set_website_configuration(website_cfg)
1082 except boto.exception.S3ResponseError as e:
1083 if e.error_code == 'MethodNotAllowed':
1084 raise SkipTest("test_set_bucket_website skipped. Requires rgw_enable_static_website = 1.")
1085 assert(bucket.get_website_configuration_with_xml()[1] == website_cfg.to_xml())
1086
1087 def test_set_bucket_policy():
1088 policy = '''{
1089 "Version": "2012-10-17",
1090 "Statement": [{
1091 "Effect": "Allow",
1092 "Principal": "*"
1093 }]
1094 }'''
1095 buckets, zone_bucket = create_bucket_per_zone_in_realm()
1096 for _, bucket in zone_bucket:
1097 bucket.set_policy(policy)
1098 assert(bucket.get_policy() == policy)
1099
1100 def test_bucket_sync_disable():
1101 zonegroup = realm.master_zonegroup()
1102 zonegroup_conns = ZonegroupConns(zonegroup)
1103 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
1104
1105 for bucket_name in buckets:
1106 disable_bucket_sync(realm.meta_master_zone(), bucket_name)
1107
1108 for zone in zonegroup.zones:
1109 check_buckets_sync_status_obj_not_exist(zone, buckets)
1110
1111 zonegroup_data_checkpoint(zonegroup_conns)
1112
1113 def test_bucket_sync_enable_right_after_disable():
1114 zonegroup = realm.master_zonegroup()
1115 zonegroup_conns = ZonegroupConns(zonegroup)
1116 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
1117
1118 objnames = ['obj1', 'obj2', 'obj3', 'obj4']
1119 content = 'asdasd'
1120
1121 for zone, bucket in zone_bucket:
1122 for objname in objnames:
1123 k = new_key(zone, bucket.name, objname)
1124 k.set_contents_from_string(content)
1125
1126 for bucket_name in buckets:
1127 zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name)
1128
1129 for bucket_name in buckets:
1130 disable_bucket_sync(realm.meta_master_zone(), bucket_name)
1131 enable_bucket_sync(realm.meta_master_zone(), bucket_name)
1132
1133 objnames_2 = ['obj5', 'obj6', 'obj7', 'obj8']
1134
1135 for zone, bucket in zone_bucket:
1136 for objname in objnames_2:
1137 k = new_key(zone, bucket.name, objname)
1138 k.set_contents_from_string(content)
1139
1140 for bucket_name in buckets:
1141 zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name)
1142
1143 zonegroup_data_checkpoint(zonegroup_conns)
1144
1145 def test_bucket_sync_disable_enable():
1146 zonegroup = realm.master_zonegroup()
1147 zonegroup_conns = ZonegroupConns(zonegroup)
1148 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
1149
1150 objnames = [ 'obj1', 'obj2', 'obj3', 'obj4' ]
1151 content = 'asdasd'
1152
1153 for zone, bucket in zone_bucket:
1154 for objname in objnames:
1155 k = new_key(zone, bucket.name, objname)
1156 k.set_contents_from_string(content)
1157
1158 zonegroup_meta_checkpoint(zonegroup)
1159
1160 for bucket_name in buckets:
1161 zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name)
1162
1163 for bucket_name in buckets:
1164 disable_bucket_sync(realm.meta_master_zone(), bucket_name)
1165
1166 zonegroup_meta_checkpoint(zonegroup)
1167
1168 objnames_2 = [ 'obj5', 'obj6', 'obj7', 'obj8' ]
1169
1170 for zone, bucket in zone_bucket:
1171 for objname in objnames_2:
1172 k = new_key(zone, bucket.name, objname)
1173 k.set_contents_from_string(content)
1174
1175 for bucket_name in buckets:
1176 enable_bucket_sync(realm.meta_master_zone(), bucket_name)
1177
1178 for bucket_name in buckets:
1179 zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name)
1180
1181 zonegroup_data_checkpoint(zonegroup_conns)
1182
1183 def test_multipart_object_sync():
1184 zonegroup = realm.master_zonegroup()
1185 zonegroup_conns = ZonegroupConns(zonegroup)
1186 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
1187
1188 _, bucket = zone_bucket[0]
1189
1190 # initiate a multipart upload
1191 upload = bucket.initiate_multipart_upload('MULTIPART')
1192 mp = boto.s3.multipart.MultiPartUpload(bucket)
1193 mp.key_name = upload.key_name
1194 mp.id = upload.id
1195 part_size = 5 * 1024 * 1024 # 5M min part size
1196 mp.upload_part_from_file(StringIO('a' * part_size), 1)
1197 mp.upload_part_from_file(StringIO('b' * part_size), 2)
1198 mp.upload_part_from_file(StringIO('c' * part_size), 3)
1199 mp.upload_part_from_file(StringIO('d' * part_size), 4)
1200 mp.complete_upload()
1201
1202 zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
1203
1204 def test_encrypted_object_sync():
1205 zonegroup = realm.master_zonegroup()
1206 zonegroup_conns = ZonegroupConns(zonegroup)
1207
1208 if len(zonegroup.rw_zones) < 2:
1209 raise SkipTest("test_zonegroup_remove skipped. Requires 2 or more zones in master zonegroup.")
1210
1211 (zone1, zone2) = zonegroup_conns.rw_zones[0:2]
1212
1213 # create a bucket on the first zone
1214 bucket_name = gen_bucket_name()
1215 log.info('create bucket zone=%s name=%s', zone1.name, bucket_name)
1216 bucket = zone1.conn.create_bucket(bucket_name)
1217
1218 # upload an object with sse-c encryption
1219 sse_c_headers = {
1220 'x-amz-server-side-encryption-customer-algorithm': 'AES256',
1221 'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
1222 'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
1223 }
1224 key = bucket.new_key('testobj-sse-c')
1225 data = 'A'*512
1226 key.set_contents_from_string(data, headers=sse_c_headers)
1227
1228 # upload an object with sse-kms encryption
1229 sse_kms_headers = {
1230 'x-amz-server-side-encryption': 'aws:kms',
1231 # testkey-1 must be present in 'rgw crypt s3 kms encryption keys' (vstart.sh adds this)
1232 'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-1',
1233 }
1234 key = bucket.new_key('testobj-sse-kms')
1235 key.set_contents_from_string(data, headers=sse_kms_headers)
1236
1237 # wait for the bucket metadata and data to sync
1238 zonegroup_meta_checkpoint(zonegroup)
1239 zone_bucket_checkpoint(zone2.zone, zone1.zone, bucket_name)
1240
1241 # read the encrypted objects from the second zone
1242 bucket2 = get_bucket(zone2, bucket_name)
1243 key = bucket2.get_key('testobj-sse-c', headers=sse_c_headers)
1244 eq(data, key.get_contents_as_string(headers=sse_c_headers))
1245
1246 key = bucket2.get_key('testobj-sse-kms')
1247 eq(data, key.get_contents_as_string())
1248
1249 def test_bucket_index_log_trim():
1250 zonegroup = realm.master_zonegroup()
1251 zonegroup_conns = ZonegroupConns(zonegroup)
1252
1253 zone = zonegroup_conns.rw_zones[0]
1254
1255 # create a test bucket, upload some objects, and wait for sync
1256 def make_test_bucket():
1257 name = gen_bucket_name()
1258 log.info('create bucket zone=%s name=%s', zone.name, name)
1259 bucket = zone.conn.create_bucket(name)
1260 for objname in ('a', 'b', 'c', 'd'):
1261 k = new_key(zone, name, objname)
1262 k.set_contents_from_string('foo')
1263 zonegroup_meta_checkpoint(zonegroup)
1264 zonegroup_bucket_checkpoint(zonegroup_conns, name)
1265 return bucket
1266
1267 # create a 'cold' bucket
1268 cold_bucket = make_test_bucket()
1269
1270 # trim with max-buckets=0 to clear counters for cold bucket. this should
1271 # prevent it from being considered 'active' by the next autotrim
1272 bilog_autotrim(zone.zone, [
1273 '--rgw-sync-log-trim-max-buckets', '0',
1274 ])
1275
1276 # create an 'active' bucket
1277 active_bucket = make_test_bucket()
1278
1279 # trim with max-buckets=1 min-cold-buckets=0 to trim active bucket only
1280 bilog_autotrim(zone.zone, [
1281 '--rgw-sync-log-trim-max-buckets', '1',
1282 '--rgw-sync-log-trim-min-cold-buckets', '0',
1283 ])
1284
1285 # verify active bucket has empty bilog
1286 active_bilog = bilog_list(zone.zone, active_bucket.name)
1287 assert(len(active_bilog) == 0)
1288
1289 # verify cold bucket has nonempty bilog
1290 cold_bilog = bilog_list(zone.zone, cold_bucket.name)
1291 assert(len(cold_bilog) > 0)
1292
1293 # trim with min-cold-buckets=999 to trim all buckets
1294 bilog_autotrim(zone.zone, [
1295 '--rgw-sync-log-trim-max-buckets', '999',
1296 '--rgw-sync-log-trim-min-cold-buckets', '999',
1297 ])
1298
1299 # verify cold bucket has empty bilog
1300 cold_bilog = bilog_list(zone.zone, cold_bucket.name)
1301 assert(len(cold_bilog) == 0)
1302
1303 def test_bucket_creation_time():
1304 zonegroup = realm.master_zonegroup()
1305 zonegroup_conns = ZonegroupConns(zonegroup)
1306
1307 zone_buckets = [zone.get_connection().get_all_buckets() for zone in zonegroup_conns.rw_zones]
1308 for z1, z2 in combinations(zone_buckets, 2):
1309 for a, b in zip(z1, z2):
1310 eq(a.name, b.name)
1311 eq(a.creation_date, b.creation_date)