]> git.proxmox.com Git - ceph.git/blob - ceph/src/test/rgw/rgw_multi/tests.py
a30993ea50c5fa015208b21927112342422bad99
[ceph.git] / ceph / src / test / rgw / rgw_multi / tests.py
1 import json
2 import random
3 import string
4 import sys
5 import time
6 import logging
7 import errno
8 import dateutil.parser
9
10 from itertools import combinations
11 from io import StringIO
12
13 import boto
14 import boto.s3.connection
15 from boto.s3.website import WebsiteConfiguration
16 from boto.s3.cors import CORSConfiguration
17
18 from nose.tools import eq_ as eq
19 from nose.plugins.attrib import attr
20 from nose.plugins.skip import SkipTest
21
22 from .multisite import Zone, ZoneGroup, Credentials
23
24 from .conn import get_gateway_connection
25 from .tools import assert_raises
26
27 class Config:
28 """ test configuration """
29 def __init__(self, **kwargs):
30 # by default, wait up to 5 minutes before giving up on a sync checkpoint
31 self.checkpoint_retries = kwargs.get('checkpoint_retries', 60)
32 self.checkpoint_delay = kwargs.get('checkpoint_delay', 5)
33 # allow some time for realm reconfiguration after changing master zone
34 self.reconfigure_delay = kwargs.get('reconfigure_delay', 5)
35 self.tenant = kwargs.get('tenant', '')
36
37 # rgw multisite tests, written against the interfaces provided in rgw_multi.
38 # these tests must be initialized and run by another module that provides
39 # implementations of these interfaces by calling init_multi()
40 realm = None
41 user = None
42 config = None
43 def init_multi(_realm, _user, _config=None):
44 global realm
45 realm = _realm
46 global user
47 user = _user
48 global config
49 config = _config or Config()
50 realm_meta_checkpoint(realm)
51
52 def get_user():
53 return user.id if user is not None else ''
54
55 def get_tenant():
56 return config.tenant if config is not None and config.tenant is not None else ''
57
58 def get_realm():
59 return realm
60
61 log = logging.getLogger('rgw_multi.tests')
62
63 num_buckets = 0
64 run_prefix=''.join(random.choice(string.ascii_lowercase) for _ in range(6))
65
66 def get_zone_connection(zone, credentials):
67 """ connect to the zone's first gateway """
68 if isinstance(credentials, list):
69 credentials = credentials[0]
70 return get_gateway_connection(zone.gateways[0], credentials)
71
72 def mdlog_list(zone, period = None):
73 cmd = ['mdlog', 'list']
74 if period:
75 cmd += ['--period', period]
76 (mdlog_json, _) = zone.cluster.admin(cmd, read_only=True)
77 return json.loads(mdlog_json)
78
79 def mdlog_autotrim(zone):
80 zone.cluster.admin(['mdlog', 'autotrim'])
81
82 def datalog_list(zone, args = None):
83 cmd = ['datalog', 'list'] + (args or [])
84 (datalog_json, _) = zone.cluster.admin(cmd, read_only=True)
85 return json.loads(datalog_json)
86
87 def datalog_status(zone):
88 cmd = ['datalog', 'status']
89 (datalog_json, _) = zone.cluster.admin(cmd, read_only=True)
90 return json.loads(datalog_json)
91
92 def datalog_autotrim(zone):
93 zone.cluster.admin(['datalog', 'autotrim'])
94
95 def bilog_list(zone, bucket, args = None):
96 cmd = ['bilog', 'list', '--bucket', bucket] + (args or [])
97 cmd += ['--tenant', config.tenant, '--uid', user.name] if config.tenant else []
98 bilog, _ = zone.cluster.admin(cmd, read_only=True)
99 return json.loads(bilog)
100
101 def bilog_autotrim(zone, args = None):
102 zone.cluster.admin(['bilog', 'autotrim'] + (args or []))
103
104 def parse_meta_sync_status(meta_sync_status_json):
105 log.debug('current meta sync status=%s', meta_sync_status_json)
106 sync_status = json.loads(meta_sync_status_json)
107
108 sync_info = sync_status['sync_status']['info']
109 global_sync_status = sync_info['status']
110 num_shards = sync_info['num_shards']
111 period = sync_info['period']
112 realm_epoch = sync_info['realm_epoch']
113
114 sync_markers=sync_status['sync_status']['markers']
115 log.debug('sync_markers=%s', sync_markers)
116 assert(num_shards == len(sync_markers))
117
118 markers={}
119 for i in range(num_shards):
120 # get marker, only if it's an incremental marker for the same realm epoch
121 if realm_epoch > sync_markers[i]['val']['realm_epoch'] or sync_markers[i]['val']['state'] == 0:
122 markers[i] = ''
123 else:
124 markers[i] = sync_markers[i]['val']['marker']
125
126 return period, realm_epoch, num_shards, markers
127
128 def meta_sync_status(zone):
129 for _ in range(config.checkpoint_retries):
130 cmd = ['metadata', 'sync', 'status'] + zone.zone_args()
131 meta_sync_status_json, retcode = zone.cluster.admin(cmd, check_retcode=False, read_only=True)
132 if retcode == 0:
133 return parse_meta_sync_status(meta_sync_status_json)
134 assert(retcode == 2) # ENOENT
135 time.sleep(config.checkpoint_delay)
136
137 assert False, 'failed to read metadata sync status for zone=%s' % zone.name
138
139 def meta_master_log_status(master_zone):
140 cmd = ['mdlog', 'status'] + master_zone.zone_args()
141 mdlog_status_json, retcode = master_zone.cluster.admin(cmd, read_only=True)
142 mdlog_status = json.loads(mdlog_status_json)
143
144 markers = {i: s['marker'] for i, s in enumerate(mdlog_status)}
145 log.debug('master meta markers=%s', markers)
146 return markers
147
148 def compare_meta_status(zone, log_status, sync_status):
149 if len(log_status) != len(sync_status):
150 log.error('len(log_status)=%d, len(sync_status)=%d', len(log_status), len(sync_status))
151 return False
152
153 msg = ''
154 for i, l, s in zip(log_status, log_status.values(), sync_status.values()):
155 if l > s:
156 if len(msg):
157 msg += ', '
158 msg += 'shard=' + str(i) + ' master=' + l + ' target=' + s
159
160 if len(msg) > 0:
161 log.warning('zone %s behind master: %s', zone.name, msg)
162 return False
163
164 return True
165
166 def zone_meta_checkpoint(zone, meta_master_zone = None, master_status = None):
167 if not meta_master_zone:
168 meta_master_zone = zone.realm().meta_master_zone()
169 if not master_status:
170 master_status = meta_master_log_status(meta_master_zone)
171
172 current_realm_epoch = realm.current_period.data['realm_epoch']
173
174 log.info('starting meta checkpoint for zone=%s', zone.name)
175
176 for _ in range(config.checkpoint_retries):
177 period, realm_epoch, num_shards, sync_status = meta_sync_status(zone)
178 if realm_epoch < current_realm_epoch:
179 log.warning('zone %s is syncing realm epoch=%d, behind current realm epoch=%d',
180 zone.name, realm_epoch, current_realm_epoch)
181 else:
182 log.debug('log_status=%s', master_status)
183 log.debug('sync_status=%s', sync_status)
184 if compare_meta_status(zone, master_status, sync_status):
185 log.info('finish meta checkpoint for zone=%s', zone.name)
186 return
187
188 time.sleep(config.checkpoint_delay)
189 assert False, 'failed meta checkpoint for zone=%s' % zone.name
190
191 def zonegroup_meta_checkpoint(zonegroup, meta_master_zone = None, master_status = None):
192 if not meta_master_zone:
193 meta_master_zone = zonegroup.realm().meta_master_zone()
194 if not master_status:
195 master_status = meta_master_log_status(meta_master_zone)
196
197 for zone in zonegroup.zones:
198 if zone == meta_master_zone:
199 continue
200 zone_meta_checkpoint(zone, meta_master_zone, master_status)
201
202 def realm_meta_checkpoint(realm):
203 log.info('meta checkpoint')
204
205 meta_master_zone = realm.meta_master_zone()
206 master_status = meta_master_log_status(meta_master_zone)
207
208 for zonegroup in realm.current_period.zonegroups:
209 zonegroup_meta_checkpoint(zonegroup, meta_master_zone, master_status)
210
211 def parse_data_sync_status(data_sync_status_json):
212 log.debug('current data sync status=%s', data_sync_status_json)
213 sync_status = json.loads(data_sync_status_json)
214
215 global_sync_status=sync_status['sync_status']['info']['status']
216 num_shards=sync_status['sync_status']['info']['num_shards']
217
218 sync_markers=sync_status['sync_status']['markers']
219 log.debug('sync_markers=%s', sync_markers)
220 assert(num_shards == len(sync_markers))
221
222 markers={}
223 for i in range(num_shards):
224 markers[i] = sync_markers[i]['val']['marker']
225
226 return (num_shards, markers)
227
228 def data_sync_status(target_zone, source_zone):
229 if target_zone == source_zone:
230 return None
231
232 for _ in range(config.checkpoint_retries):
233 cmd = ['data', 'sync', 'status'] + target_zone.zone_args()
234 cmd += ['--source-zone', source_zone.name]
235 data_sync_status_json, retcode = target_zone.cluster.admin(cmd, check_retcode=False, read_only=True)
236 if retcode == 0:
237 return parse_data_sync_status(data_sync_status_json)
238
239 assert(retcode == 2) # ENOENT
240 time.sleep(config.checkpoint_delay)
241
242 assert False, 'failed to read data sync status for target_zone=%s source_zone=%s' % \
243 (target_zone.name, source_zone.name)
244
245 def bucket_sync_status(target_zone, source_zone, bucket_name):
246 if target_zone == source_zone:
247 return None
248
249 cmd = ['bucket', 'sync', 'markers'] + target_zone.zone_args()
250 cmd += ['--source-zone', source_zone.name]
251 cmd += ['--bucket', bucket_name]
252 cmd += ['--tenant', config.tenant, '--uid', user.name] if config.tenant else []
253 while True:
254 bucket_sync_status_json, retcode = target_zone.cluster.admin(cmd, check_retcode=False, read_only=True)
255 if retcode == 0:
256 break
257
258 assert(retcode == 2) # ENOENT
259
260 sync_status = json.loads(bucket_sync_status_json)
261
262 markers={}
263 for entry in sync_status:
264 val = entry['val']
265 pos = val['inc_marker']['position'].split('#')[-1] # get rid of shard id; e.g., 6#00000000002.132.3 -> 00000000002.132.3
266 markers[entry['key']] = pos
267
268 return markers
269
270 def data_source_log_status(source_zone):
271 source_cluster = source_zone.cluster
272 cmd = ['datalog', 'status'] + source_zone.zone_args()
273 datalog_status_json, retcode = source_cluster.admin(cmd, read_only=True)
274 datalog_status = json.loads(datalog_status_json)
275
276 markers = {i: s['marker'] for i, s in enumerate(datalog_status)}
277 log.debug('data markers for zone=%s markers=%s', source_zone.name, markers)
278 return markers
279
280 def bucket_source_log_status(source_zone, bucket_name):
281 cmd = ['bilog', 'status'] + source_zone.zone_args()
282 cmd += ['--bucket', bucket_name]
283 cmd += ['--tenant', config.tenant, '--uid', user.name] if config.tenant else []
284 source_cluster = source_zone.cluster
285 bilog_status_json, retcode = source_cluster.admin(cmd, read_only=True)
286 bilog_status = json.loads(bilog_status_json)
287
288 m={}
289 markers={}
290 try:
291 m = bilog_status['markers']
292 except:
293 pass
294
295 for s in m:
296 key = s['key']
297 val = s['val']
298 markers[key] = val
299
300 log.debug('bilog markers for zone=%s bucket=%s markers=%s', source_zone.name, bucket_name, markers)
301 return markers
302
303 def compare_data_status(target_zone, source_zone, log_status, sync_status):
304 if len(log_status) != len(sync_status):
305 log.error('len(log_status)=%d len(sync_status)=%d', len(log_status), len(sync_status))
306 return False
307
308 msg = ''
309 for i, l, s in zip(log_status, log_status.values(), sync_status.values()):
310 if l > s:
311 if len(msg):
312 msg += ', '
313 msg += 'shard=' + str(i) + ' master=' + l + ' target=' + s
314
315 if len(msg) > 0:
316 log.warning('data of zone %s behind zone %s: %s', target_zone.name, source_zone.name, msg)
317 return False
318
319 return True
320
321 def compare_bucket_status(target_zone, source_zone, bucket_name, log_status, sync_status):
322 if len(log_status) != len(sync_status):
323 log.error('len(log_status)=%d len(sync_status)=%d', len(log_status), len(sync_status))
324 return False
325
326 msg = ''
327 for i, l, s in zip(log_status, log_status.values(), sync_status.values()):
328 if l > s:
329 if len(msg):
330 msg += ', '
331 msg += 'shard=' + str(i) + ' master=' + l + ' target=' + s
332
333 if len(msg) > 0:
334 log.warning('bucket %s zone %s behind zone %s: %s', bucket_name, target_zone.name, source_zone.name, msg)
335 return False
336
337 return True
338
339 def zone_data_checkpoint(target_zone, source_zone):
340 if not target_zone.syncs_from(source_zone.name):
341 return
342
343 log_status = data_source_log_status(source_zone)
344 log.info('starting data checkpoint for target_zone=%s source_zone=%s', target_zone.name, source_zone.name)
345
346 for _ in range(config.checkpoint_retries):
347 num_shards, sync_status = data_sync_status(target_zone, source_zone)
348
349 log.debug('log_status=%s', log_status)
350 log.debug('sync_status=%s', sync_status)
351
352 if compare_data_status(target_zone, source_zone, log_status, sync_status):
353 log.info('finished data checkpoint for target_zone=%s source_zone=%s',
354 target_zone.name, source_zone.name)
355 return
356 time.sleep(config.checkpoint_delay)
357
358 assert False, 'failed data checkpoint for target_zone=%s source_zone=%s' % \
359 (target_zone.name, source_zone.name)
360
361 def zonegroup_data_checkpoint(zonegroup_conns):
362 for source_conn in zonegroup_conns.rw_zones:
363 for target_conn in zonegroup_conns.zones:
364 if source_conn.zone == target_conn.zone:
365 continue
366 log.debug('data checkpoint: source=%s target=%s', source_conn.zone.name, target_conn.zone.name)
367 zone_data_checkpoint(target_conn.zone, source_conn.zone)
368
369 def zone_bucket_checkpoint(target_zone, source_zone, bucket_name):
370 if not target_zone.syncs_from(source_zone.name):
371 return
372
373 cmd = ['bucket', 'sync', 'checkpoint']
374 cmd += ['--bucket', bucket_name, '--source-zone', source_zone.name]
375 retry_delay_ms = config.checkpoint_delay * 1000
376 timeout_sec = config.checkpoint_retries * config.checkpoint_delay
377 cmd += ['--retry-delay-ms', str(retry_delay_ms), '--timeout-sec', str(timeout_sec)]
378 cmd += target_zone.zone_args()
379 target_zone.cluster.admin(cmd, debug_rgw=1)
380
381 def zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name):
382 for source_conn in zonegroup_conns.rw_zones:
383 for target_conn in zonegroup_conns.zones:
384 if source_conn.zone == target_conn.zone:
385 continue
386 log.debug('bucket checkpoint: source=%s target=%s bucket=%s', source_conn.zone.name, target_conn.zone.name, bucket_name)
387 zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket_name)
388 for source_conn, target_conn in combinations(zonegroup_conns.zones, 2):
389 if target_conn.zone.has_buckets():
390 target_conn.check_bucket_eq(source_conn, bucket_name)
391
392 def set_master_zone(zone):
393 zone.modify(zone.cluster, ['--master'])
394 zonegroup = zone.zonegroup
395 zonegroup.period.update(zone, commit=True)
396 zonegroup.master_zone = zone
397 log.info('Set master zone=%s, waiting %ds for reconfiguration..', zone.name, config.reconfigure_delay)
398 time.sleep(config.reconfigure_delay)
399
400 def set_sync_from_all(zone, flag):
401 s = 'true' if flag else 'false'
402 zone.modify(zone.cluster, ['--sync-from-all={}'.format(s)])
403 zonegroup = zone.zonegroup
404 zonegroup.period.update(zone, commit=True)
405 log.info('Set sync_from_all flag on zone %s to %s', zone.name, s)
406 time.sleep(config.reconfigure_delay)
407
408 def set_redirect_zone(zone, redirect_zone):
409 id_str = redirect_zone.id if redirect_zone else ''
410 zone.modify(zone.cluster, ['--redirect-zone={}'.format(id_str)])
411 zonegroup = zone.zonegroup
412 zonegroup.period.update(zone, commit=True)
413 log.info('Set redirect_zone zone %s to "%s"', zone.name, id_str)
414 time.sleep(config.reconfigure_delay)
415
416 def enable_bucket_sync(zone, bucket_name):
417 cmd = ['bucket', 'sync', 'enable', '--bucket', bucket_name] + zone.zone_args()
418 zone.cluster.admin(cmd)
419
420 def disable_bucket_sync(zone, bucket_name):
421 cmd = ['bucket', 'sync', 'disable', '--bucket', bucket_name] + zone.zone_args()
422 zone.cluster.admin(cmd)
423
424 def check_buckets_sync_status_obj_not_exist(zone, buckets):
425 for _ in range(config.checkpoint_retries):
426 cmd = ['log', 'list'] + zone.zone_arg()
427 log_list, ret = zone.cluster.admin(cmd, check_retcode=False, read_only=True)
428 for bucket in buckets:
429 if log_list.find(':'+bucket+":") >= 0:
430 break
431 else:
432 return
433 time.sleep(config.checkpoint_delay)
434 assert False
435
436 def gen_bucket_name():
437 global num_buckets
438
439 num_buckets += 1
440 return run_prefix + '-' + str(num_buckets)
441
442 class ZonegroupConns:
443 def __init__(self, zonegroup):
444 self.zonegroup = zonegroup
445 self.zones = []
446 self.ro_zones = []
447 self.rw_zones = []
448 self.master_zone = None
449 for z in zonegroup.zones:
450 zone_conn = z.get_conn(user.credentials)
451 self.zones.append(zone_conn)
452 if z.is_read_only():
453 self.ro_zones.append(zone_conn)
454 else:
455 self.rw_zones.append(zone_conn)
456
457 if z == zonegroup.master_zone:
458 self.master_zone = zone_conn
459
460 def check_all_buckets_exist(zone_conn, buckets):
461 if not zone_conn.zone.has_buckets():
462 return True
463
464 for b in buckets:
465 try:
466 zone_conn.get_bucket(b)
467 except:
468 log.critical('zone %s does not contain bucket %s', zone.name, b)
469 return False
470
471 return True
472
473 def check_all_buckets_dont_exist(zone_conn, buckets):
474 if not zone_conn.zone.has_buckets():
475 return True
476
477 for b in buckets:
478 try:
479 zone_conn.get_bucket(b)
480 except:
481 continue
482
483 log.critical('zone %s contains bucket %s', zone.zone, b)
484 return False
485
486 return True
487
488 def create_bucket_per_zone(zonegroup_conns, buckets_per_zone = 1):
489 buckets = []
490 zone_bucket = []
491 for zone in zonegroup_conns.rw_zones:
492 for i in range(buckets_per_zone):
493 bucket_name = gen_bucket_name()
494 log.info('create bucket zone=%s name=%s', zone.name, bucket_name)
495 bucket = zone.create_bucket(bucket_name)
496 buckets.append(bucket_name)
497 zone_bucket.append((zone, bucket))
498
499 return buckets, zone_bucket
500
501 def create_bucket_per_zone_in_realm():
502 buckets = []
503 zone_bucket = []
504 for zonegroup in realm.current_period.zonegroups:
505 zg_conn = ZonegroupConns(zonegroup)
506 b, z = create_bucket_per_zone(zg_conn)
507 buckets.extend(b)
508 zone_bucket.extend(z)
509 return buckets, zone_bucket
510
511 def test_bucket_create():
512 zonegroup = realm.master_zonegroup()
513 zonegroup_conns = ZonegroupConns(zonegroup)
514 buckets, _ = create_bucket_per_zone(zonegroup_conns)
515 zonegroup_meta_checkpoint(zonegroup)
516
517 for zone in zonegroup_conns.zones:
518 assert check_all_buckets_exist(zone, buckets)
519
520 def test_bucket_recreate():
521 zonegroup = realm.master_zonegroup()
522 zonegroup_conns = ZonegroupConns(zonegroup)
523 buckets, _ = create_bucket_per_zone(zonegroup_conns)
524 zonegroup_meta_checkpoint(zonegroup)
525
526
527 for zone in zonegroup_conns.zones:
528 assert check_all_buckets_exist(zone, buckets)
529
530 # recreate buckets on all zones, make sure they weren't removed
531 for zone in zonegroup_conns.rw_zones:
532 for bucket_name in buckets:
533 bucket = zone.create_bucket(bucket_name)
534
535 for zone in zonegroup_conns.zones:
536 assert check_all_buckets_exist(zone, buckets)
537
538 zonegroup_meta_checkpoint(zonegroup)
539
540 for zone in zonegroup_conns.zones:
541 assert check_all_buckets_exist(zone, buckets)
542
543 def test_bucket_remove():
544 zonegroup = realm.master_zonegroup()
545 zonegroup_conns = ZonegroupConns(zonegroup)
546 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
547 zonegroup_meta_checkpoint(zonegroup)
548
549 for zone in zonegroup_conns.zones:
550 assert check_all_buckets_exist(zone, buckets)
551
552 for zone, bucket_name in zone_bucket:
553 zone.conn.delete_bucket(bucket_name)
554
555 zonegroup_meta_checkpoint(zonegroup)
556
557 for zone in zonegroup_conns.zones:
558 assert check_all_buckets_dont_exist(zone, buckets)
559
560 def get_bucket(zone, bucket_name):
561 return zone.conn.get_bucket(bucket_name)
562
563 def get_key(zone, bucket_name, obj_name):
564 b = get_bucket(zone, bucket_name)
565 return b.get_key(obj_name)
566
567 def new_key(zone, bucket_name, obj_name):
568 b = get_bucket(zone, bucket_name)
569 return b.new_key(obj_name)
570
571 def check_bucket_eq(zone_conn1, zone_conn2, bucket):
572 if zone_conn2.zone.has_buckets():
573 zone_conn2.check_bucket_eq(zone_conn1, bucket.name)
574
575 def test_object_sync():
576 zonegroup = realm.master_zonegroup()
577 zonegroup_conns = ZonegroupConns(zonegroup)
578 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
579
580 objnames = [ 'myobj', '_myobj', ':', '&' ]
581 content = 'asdasd'
582
583 # don't wait for meta sync just yet
584 for zone, bucket_name in zone_bucket:
585 for objname in objnames:
586 k = new_key(zone, bucket_name, objname)
587 k.set_contents_from_string(content)
588
589 zonegroup_meta_checkpoint(zonegroup)
590
591 for source_conn, bucket in zone_bucket:
592 for target_conn in zonegroup_conns.zones:
593 if source_conn.zone == target_conn.zone:
594 continue
595
596 zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
597 check_bucket_eq(source_conn, target_conn, bucket)
598
599 def test_object_delete():
600 zonegroup = realm.master_zonegroup()
601 zonegroup_conns = ZonegroupConns(zonegroup)
602 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
603
604 objname = 'myobj'
605 content = 'asdasd'
606
607 # don't wait for meta sync just yet
608 for zone, bucket in zone_bucket:
609 k = new_key(zone, bucket, objname)
610 k.set_contents_from_string(content)
611
612 zonegroup_meta_checkpoint(zonegroup)
613
614 # check object exists
615 for source_conn, bucket in zone_bucket:
616 for target_conn in zonegroup_conns.zones:
617 if source_conn.zone == target_conn.zone:
618 continue
619
620 zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
621 check_bucket_eq(source_conn, target_conn, bucket)
622
623 # check object removal
624 for source_conn, bucket in zone_bucket:
625 k = get_key(source_conn, bucket, objname)
626 k.delete()
627 for target_conn in zonegroup_conns.zones:
628 if source_conn.zone == target_conn.zone:
629 continue
630
631 zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
632 check_bucket_eq(source_conn, target_conn, bucket)
633
634 def get_latest_object_version(key):
635 for k in key.bucket.list_versions(key.name):
636 if k.is_latest:
637 return k
638 return None
639
640 def test_versioned_object_incremental_sync():
641 zonegroup = realm.master_zonegroup()
642 zonegroup_conns = ZonegroupConns(zonegroup)
643 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
644
645 # enable versioning
646 for _, bucket in zone_bucket:
647 bucket.configure_versioning(True)
648
649 zonegroup_meta_checkpoint(zonegroup)
650
651 # upload a dummy object to each bucket and wait for sync. this forces each
652 # bucket to finish a full sync and switch to incremental
653 for source_conn, bucket in zone_bucket:
654 new_key(source_conn, bucket, 'dummy').set_contents_from_string('')
655 for target_conn in zonegroup_conns.zones:
656 if source_conn.zone == target_conn.zone:
657 continue
658 zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
659
660 for _, bucket in zone_bucket:
661 # create and delete multiple versions of an object from each zone
662 for zone_conn in zonegroup_conns.rw_zones:
663 obj = 'obj-' + zone_conn.name
664 k = new_key(zone_conn, bucket, obj)
665
666 k.set_contents_from_string('version1')
667 log.debug('version1 id=%s', k.version_id)
668 # don't delete version1 - this tests that the initial version
669 # doesn't get squashed into later versions
670
671 # create and delete the following object versions to test that
672 # the operations don't race with each other during sync
673 k.set_contents_from_string('version2')
674 log.debug('version2 id=%s', k.version_id)
675 k.bucket.delete_key(obj, version_id=k.version_id)
676
677 k.set_contents_from_string('version3')
678 log.debug('version3 id=%s', k.version_id)
679 k.bucket.delete_key(obj, version_id=k.version_id)
680
681 for _, bucket in zone_bucket:
682 zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
683
684 for _, bucket in zone_bucket:
685 # overwrite the acls to test that metadata-only entries are applied
686 for zone_conn in zonegroup_conns.rw_zones:
687 obj = 'obj-' + zone_conn.name
688 k = new_key(zone_conn, bucket.name, obj)
689 v = get_latest_object_version(k)
690 v.make_public()
691
692 for _, bucket in zone_bucket:
693 zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
694
695 def test_concurrent_versioned_object_incremental_sync():
696 zonegroup = realm.master_zonegroup()
697 zonegroup_conns = ZonegroupConns(zonegroup)
698 zone = zonegroup_conns.rw_zones[0]
699
700 # create a versioned bucket
701 bucket = zone.create_bucket(gen_bucket_name())
702 log.debug('created bucket=%s', bucket.name)
703 bucket.configure_versioning(True)
704
705 zonegroup_meta_checkpoint(zonegroup)
706
707 # upload a dummy object and wait for sync. this forces each zone to finish
708 # a full sync and switch to incremental
709 new_key(zone, bucket, 'dummy').set_contents_from_string('')
710 zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
711
712 # create several concurrent versions on each zone and let them race to sync
713 obj = 'obj'
714 for i in range(10):
715 for zone_conn in zonegroup_conns.rw_zones:
716 k = new_key(zone_conn, bucket, obj)
717 k.set_contents_from_string('version1')
718 log.debug('zone=%s version=%s', zone_conn.zone.name, k.version_id)
719
720 zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
721 zonegroup_data_checkpoint(zonegroup_conns)
722
723 def test_version_suspended_incremental_sync():
724 zonegroup = realm.master_zonegroup()
725 zonegroup_conns = ZonegroupConns(zonegroup)
726
727 zone = zonegroup_conns.rw_zones[0]
728
729 # create a non-versioned bucket
730 bucket = zone.create_bucket(gen_bucket_name())
731 log.debug('created bucket=%s', bucket.name)
732 zonegroup_meta_checkpoint(zonegroup)
733
734 # upload an initial object
735 key1 = new_key(zone, bucket, 'obj')
736 key1.set_contents_from_string('')
737 log.debug('created initial version id=%s', key1.version_id)
738 zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
739
740 # enable versioning
741 bucket.configure_versioning(True)
742 zonegroup_meta_checkpoint(zonegroup)
743
744 # re-upload the object as a new version
745 key2 = new_key(zone, bucket, 'obj')
746 key2.set_contents_from_string('')
747 log.debug('created new version id=%s', key2.version_id)
748 zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
749
750 # suspend versioning
751 bucket.configure_versioning(False)
752 zonegroup_meta_checkpoint(zonegroup)
753
754 # re-upload the object as a 'null' version
755 key3 = new_key(zone, bucket, 'obj')
756 key3.set_contents_from_string('')
757 log.debug('created null version id=%s', key3.version_id)
758 zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
759
760 def test_delete_marker_full_sync():
761 zonegroup = realm.master_zonegroup()
762 zonegroup_conns = ZonegroupConns(zonegroup)
763 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
764
765 # enable versioning
766 for _, bucket in zone_bucket:
767 bucket.configure_versioning(True)
768 zonegroup_meta_checkpoint(zonegroup)
769
770 for zone, bucket in zone_bucket:
771 # upload an initial object
772 key1 = new_key(zone, bucket, 'obj')
773 key1.set_contents_from_string('')
774
775 # create a delete marker
776 key2 = new_key(zone, bucket, 'obj')
777 key2.delete()
778
779 # wait for full sync
780 for _, bucket in zone_bucket:
781 zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
782
783 def test_suspended_delete_marker_full_sync():
784 zonegroup = realm.master_zonegroup()
785 zonegroup_conns = ZonegroupConns(zonegroup)
786 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
787
788 # enable/suspend versioning
789 for _, bucket in zone_bucket:
790 bucket.configure_versioning(True)
791 bucket.configure_versioning(False)
792 zonegroup_meta_checkpoint(zonegroup)
793
794 for zone, bucket in zone_bucket:
795 # upload an initial object
796 key1 = new_key(zone, bucket, 'obj')
797 key1.set_contents_from_string('')
798
799 # create a delete marker
800 key2 = new_key(zone, bucket, 'obj')
801 key2.delete()
802
803 # wait for full sync
804 for _, bucket in zone_bucket:
805 zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
806
807 def test_bucket_versioning():
808 buckets, zone_bucket = create_bucket_per_zone_in_realm()
809 for _, bucket in zone_bucket:
810 bucket.configure_versioning(True)
811 res = bucket.get_versioning_status()
812 key = 'Versioning'
813 assert(key in res and res[key] == 'Enabled')
814
815 def test_bucket_acl():
816 buckets, zone_bucket = create_bucket_per_zone_in_realm()
817 for _, bucket in zone_bucket:
818 assert(len(bucket.get_acl().acl.grants) == 1) # single grant on owner
819 bucket.set_acl('public-read')
820 assert(len(bucket.get_acl().acl.grants) == 2) # new grant on AllUsers
821
822 def test_bucket_cors():
823 buckets, zone_bucket = create_bucket_per_zone_in_realm()
824 for _, bucket in zone_bucket:
825 cors_cfg = CORSConfiguration()
826 cors_cfg.add_rule(['DELETE'], 'https://www.example.com', allowed_header='*', max_age_seconds=3000)
827 bucket.set_cors(cors_cfg)
828 assert(bucket.get_cors().to_xml() == cors_cfg.to_xml())
829
830 def test_bucket_delete_notempty():
831 zonegroup = realm.master_zonegroup()
832 zonegroup_conns = ZonegroupConns(zonegroup)
833 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
834 zonegroup_meta_checkpoint(zonegroup)
835
836 for zone_conn, bucket_name in zone_bucket:
837 # upload an object to each bucket on its own zone
838 conn = zone_conn.get_connection()
839 bucket = conn.get_bucket(bucket_name)
840 k = bucket.new_key('foo')
841 k.set_contents_from_string('bar')
842 # attempt to delete the bucket before this object can sync
843 try:
844 conn.delete_bucket(bucket_name)
845 except boto.exception.S3ResponseError as e:
846 assert(e.error_code == 'BucketNotEmpty')
847 continue
848 assert False # expected 409 BucketNotEmpty
849
850 # assert that each bucket still exists on the master
851 c1 = zonegroup_conns.master_zone.conn
852 for _, bucket_name in zone_bucket:
853 assert c1.get_bucket(bucket_name)
854
855 def test_multi_period_incremental_sync():
856 zonegroup = realm.master_zonegroup()
857 if len(zonegroup.zones) < 3:
858 raise SkipTest("test_multi_period_incremental_sync skipped. Requires 3 or more zones in master zonegroup.")
859
860 # periods to include in mdlog comparison
861 mdlog_periods = [realm.current_period.id]
862
863 # create a bucket in each zone
864 zonegroup_conns = ZonegroupConns(zonegroup)
865 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
866
867 zonegroup_meta_checkpoint(zonegroup)
868
869 z1, z2, z3 = zonegroup.zones[0:3]
870 assert(z1 == zonegroup.master_zone)
871
872 # kill zone 3 gateways to freeze sync status to incremental in first period
873 z3.stop()
874
875 # change master to zone 2 -> period 2
876 set_master_zone(z2)
877 mdlog_periods += [realm.current_period.id]
878
879 for zone_conn, _ in zone_bucket:
880 if zone_conn.zone == z3:
881 continue
882 bucket_name = gen_bucket_name()
883 log.info('create bucket zone=%s name=%s', zone_conn.name, bucket_name)
884 bucket = zone_conn.conn.create_bucket(bucket_name)
885 buckets.append(bucket_name)
886
887 # wait for zone 1 to sync
888 zone_meta_checkpoint(z1)
889
890 # change master back to zone 1 -> period 3
891 set_master_zone(z1)
892 mdlog_periods += [realm.current_period.id]
893
894 for zone_conn, bucket_name in zone_bucket:
895 if zone_conn.zone == z3:
896 continue
897 bucket_name = gen_bucket_name()
898 log.info('create bucket zone=%s name=%s', zone_conn.name, bucket_name)
899 zone_conn.conn.create_bucket(bucket_name)
900 buckets.append(bucket_name)
901
902 # restart zone 3 gateway and wait for sync
903 z3.start()
904 zonegroup_meta_checkpoint(zonegroup)
905
906 # verify that we end up with the same objects
907 for bucket_name in buckets:
908 for source_conn, _ in zone_bucket:
909 for target_conn in zonegroup_conns.zones:
910 if source_conn.zone == target_conn.zone:
911 continue
912
913 if target_conn.zone.has_buckets():
914 target_conn.check_bucket_eq(source_conn, bucket_name)
915
916 # verify that mdlogs are not empty and match for each period
917 for period in mdlog_periods:
918 master_mdlog = mdlog_list(z1, period)
919 assert len(master_mdlog) > 0
920 for zone in zonegroup.zones:
921 if zone == z1:
922 continue
923 mdlog = mdlog_list(zone, period)
924 assert len(mdlog) == len(master_mdlog)
925
926 # autotrim mdlogs for master zone
927 mdlog_autotrim(z1)
928
929 # autotrim mdlogs for peers
930 for zone in zonegroup.zones:
931 if zone == z1:
932 continue
933 mdlog_autotrim(zone)
934
935 # verify that mdlogs are empty for each period
936 for period in mdlog_periods:
937 for zone in zonegroup.zones:
938 mdlog = mdlog_list(zone, period)
939 assert len(mdlog) == 0
940
941 def test_datalog_autotrim():
942 zonegroup = realm.master_zonegroup()
943 zonegroup_conns = ZonegroupConns(zonegroup)
944 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
945
946 # upload an object to each zone to generate a datalog entry
947 for zone, bucket in zone_bucket:
948 k = new_key(zone, bucket.name, 'key')
949 k.set_contents_from_string('body')
950
951 # wait for metadata and data sync to catch up
952 zonegroup_meta_checkpoint(zonegroup)
953 zonegroup_data_checkpoint(zonegroup_conns)
954
955 # trim each datalog
956 for zone, _ in zone_bucket:
957 # read max markers for each shard
958 status = datalog_status(zone.zone)
959
960 datalog_autotrim(zone.zone)
961
962 for shard_id, shard_status in enumerate(status):
963 try:
964 before_trim = dateutil.parser.isoparse(shard_status['last_update'])
965 except: # empty timestamps look like "0.000000" and will fail here
966 continue
967 entries = datalog_list(zone.zone, ['--shard-id', str(shard_id), '--max-entries', '1'])
968 if not len(entries):
969 continue
970 after_trim = dateutil.parser.isoparse(entries[0]['timestamp'])
971 assert before_trim < after_trim, "any datalog entries must be newer than trim"
972
973 def test_multi_zone_redirect():
974 zonegroup = realm.master_zonegroup()
975 if len(zonegroup.rw_zones) < 2:
976 raise SkipTest("test_multi_period_incremental_sync skipped. Requires 3 or more zones in master zonegroup.")
977
978 zonegroup_conns = ZonegroupConns(zonegroup)
979 (zc1, zc2) = zonegroup_conns.rw_zones[0:2]
980
981 z1, z2 = (zc1.zone, zc2.zone)
982
983 set_sync_from_all(z2, False)
984
985 # create a bucket on the first zone
986 bucket_name = gen_bucket_name()
987 log.info('create bucket zone=%s name=%s', z1.name, bucket_name)
988 bucket = zc1.conn.create_bucket(bucket_name)
989 obj = 'testredirect'
990
991 key = bucket.new_key(obj)
992 data = 'A'*512
993 key.set_contents_from_string(data)
994
995 zonegroup_meta_checkpoint(zonegroup)
996
997 # try to read object from second zone (should fail)
998 bucket2 = get_bucket(zc2, bucket_name)
999 assert_raises(boto.exception.S3ResponseError, bucket2.get_key, obj)
1000
1001 set_redirect_zone(z2, z1)
1002
1003 key2 = bucket2.get_key(obj)
1004
1005 eq(data, key2.get_contents_as_string(encoding='ascii'))
1006
1007 key = bucket.new_key(obj)
1008
1009 for x in ['a', 'b', 'c', 'd']:
1010 data = x*512
1011 key.set_contents_from_string(data)
1012 eq(data, key2.get_contents_as_string(encoding='ascii'))
1013
1014 # revert config changes
1015 set_sync_from_all(z2, True)
1016 set_redirect_zone(z2, None)
1017
1018 def test_zonegroup_remove():
1019 zonegroup = realm.master_zonegroup()
1020 zonegroup_conns = ZonegroupConns(zonegroup)
1021 if len(zonegroup.zones) < 2:
1022 raise SkipTest("test_zonegroup_remove skipped. Requires 2 or more zones in master zonegroup.")
1023
1024 zonegroup_meta_checkpoint(zonegroup)
1025 z1, z2 = zonegroup.zones[0:2]
1026 c1, c2 = (z1.cluster, z2.cluster)
1027
1028 # get admin credentials out of existing zone
1029 system_key = z1.data['system_key']
1030 admin_creds = Credentials(system_key['access_key'], system_key['secret_key'])
1031
1032 # create a new zone in zonegroup on c2 and commit
1033 zone = Zone('remove', zonegroup, c2)
1034 zone.create(c2, admin_creds.credential_args())
1035 zonegroup.zones.append(zone)
1036 zonegroup.period.update(zone, commit=True)
1037
1038 zonegroup.remove(c1, zone)
1039
1040 # another 'zonegroup remove' should fail with ENOENT
1041 _, retcode = zonegroup.remove(c1, zone, check_retcode=False)
1042 assert(retcode == 2) # ENOENT
1043
1044 # delete the new zone
1045 zone.delete(c2)
1046
1047 # validate the resulting period
1048 zonegroup.period.update(z1, commit=True)
1049
1050
1051 def test_zg_master_zone_delete():
1052
1053 master_zg = realm.master_zonegroup()
1054 master_zone = master_zg.master_zone
1055
1056 assert(len(master_zg.zones) >= 1)
1057 master_cluster = master_zg.zones[0].cluster
1058
1059 rm_zg = ZoneGroup('remove_zg')
1060 rm_zg.create(master_cluster)
1061
1062 rm_zone = Zone('remove', rm_zg, master_cluster)
1063 rm_zone.create(master_cluster)
1064 master_zg.period.update(master_zone, commit=True)
1065
1066
1067 rm_zone.delete(master_cluster)
1068 # Period update: This should now fail as the zone will be the master zone
1069 # in that zg
1070 _, retcode = master_zg.period.update(master_zone, check_retcode=False)
1071 assert(retcode == errno.EINVAL)
1072
1073 # Proceed to delete the zonegroup as well, previous period now does not
1074 # contain a dangling master_zone, this must succeed
1075 rm_zg.delete(master_cluster)
1076 master_zg.period.update(master_zone, commit=True)
1077
1078 def test_set_bucket_website():
1079 buckets, zone_bucket = create_bucket_per_zone_in_realm()
1080 for _, bucket in zone_bucket:
1081 website_cfg = WebsiteConfiguration(suffix='index.html',error_key='error.html')
1082 try:
1083 bucket.set_website_configuration(website_cfg)
1084 except boto.exception.S3ResponseError as e:
1085 if e.error_code == 'MethodNotAllowed':
1086 raise SkipTest("test_set_bucket_website skipped. Requires rgw_enable_static_website = 1.")
1087 assert(bucket.get_website_configuration_with_xml()[1] == website_cfg.to_xml())
1088
1089 def test_set_bucket_policy():
1090 policy = '''{
1091 "Version": "2012-10-17",
1092 "Statement": [{
1093 "Effect": "Allow",
1094 "Principal": "*"
1095 }]
1096 }'''
1097 buckets, zone_bucket = create_bucket_per_zone_in_realm()
1098 for _, bucket in zone_bucket:
1099 bucket.set_policy(policy)
1100 assert(bucket.get_policy().decode('ascii') == policy)
1101
1102 def test_bucket_sync_disable():
1103 zonegroup = realm.master_zonegroup()
1104 zonegroup_conns = ZonegroupConns(zonegroup)
1105 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
1106 zonegroup_meta_checkpoint(zonegroup)
1107
1108 for bucket_name in buckets:
1109 disable_bucket_sync(realm.meta_master_zone(), bucket_name)
1110
1111 for zone in zonegroup.zones:
1112 check_buckets_sync_status_obj_not_exist(zone, buckets)
1113
1114 zonegroup_data_checkpoint(zonegroup_conns)
1115
1116 def test_bucket_sync_enable_right_after_disable():
1117 zonegroup = realm.master_zonegroup()
1118 zonegroup_conns = ZonegroupConns(zonegroup)
1119 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
1120
1121 objnames = ['obj1', 'obj2', 'obj3', 'obj4']
1122 content = 'asdasd'
1123
1124 for zone, bucket in zone_bucket:
1125 for objname in objnames:
1126 k = new_key(zone, bucket.name, objname)
1127 k.set_contents_from_string(content)
1128
1129 zonegroup_meta_checkpoint(zonegroup)
1130
1131 for bucket_name in buckets:
1132 zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name)
1133
1134 for bucket_name in buckets:
1135 disable_bucket_sync(realm.meta_master_zone(), bucket_name)
1136 enable_bucket_sync(realm.meta_master_zone(), bucket_name)
1137
1138 objnames_2 = ['obj5', 'obj6', 'obj7', 'obj8']
1139
1140 for zone, bucket in zone_bucket:
1141 for objname in objnames_2:
1142 k = new_key(zone, bucket.name, objname)
1143 k.set_contents_from_string(content)
1144
1145 for bucket_name in buckets:
1146 zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name)
1147
1148 zonegroup_data_checkpoint(zonegroup_conns)
1149
1150 def test_bucket_sync_disable_enable():
1151 zonegroup = realm.master_zonegroup()
1152 zonegroup_conns = ZonegroupConns(zonegroup)
1153 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
1154
1155 objnames = [ 'obj1', 'obj2', 'obj3', 'obj4' ]
1156 content = 'asdasd'
1157
1158 for zone, bucket in zone_bucket:
1159 for objname in objnames:
1160 k = new_key(zone, bucket.name, objname)
1161 k.set_contents_from_string(content)
1162
1163 zonegroup_meta_checkpoint(zonegroup)
1164
1165 for bucket_name in buckets:
1166 zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name)
1167
1168 for bucket_name in buckets:
1169 disable_bucket_sync(realm.meta_master_zone(), bucket_name)
1170
1171 zonegroup_meta_checkpoint(zonegroup)
1172
1173 objnames_2 = [ 'obj5', 'obj6', 'obj7', 'obj8' ]
1174
1175 for zone, bucket in zone_bucket:
1176 for objname in objnames_2:
1177 k = new_key(zone, bucket.name, objname)
1178 k.set_contents_from_string(content)
1179
1180 for bucket_name in buckets:
1181 enable_bucket_sync(realm.meta_master_zone(), bucket_name)
1182
1183 for bucket_name in buckets:
1184 zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name)
1185
1186 zonegroup_data_checkpoint(zonegroup_conns)
1187
1188 def test_multipart_object_sync():
1189 zonegroup = realm.master_zonegroup()
1190 zonegroup_conns = ZonegroupConns(zonegroup)
1191 buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
1192
1193 _, bucket = zone_bucket[0]
1194
1195 # initiate a multipart upload
1196 upload = bucket.initiate_multipart_upload('MULTIPART')
1197 mp = boto.s3.multipart.MultiPartUpload(bucket)
1198 mp.key_name = upload.key_name
1199 mp.id = upload.id
1200 part_size = 5 * 1024 * 1024 # 5M min part size
1201 mp.upload_part_from_file(StringIO('a' * part_size), 1)
1202 mp.upload_part_from_file(StringIO('b' * part_size), 2)
1203 mp.upload_part_from_file(StringIO('c' * part_size), 3)
1204 mp.upload_part_from_file(StringIO('d' * part_size), 4)
1205 mp.complete_upload()
1206
1207 zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
1208
1209 def test_encrypted_object_sync():
1210 zonegroup = realm.master_zonegroup()
1211 zonegroup_conns = ZonegroupConns(zonegroup)
1212
1213 if len(zonegroup.rw_zones) < 2:
1214 raise SkipTest("test_zonegroup_remove skipped. Requires 2 or more zones in master zonegroup.")
1215
1216 (zone1, zone2) = zonegroup_conns.rw_zones[0:2]
1217
1218 # create a bucket on the first zone
1219 bucket_name = gen_bucket_name()
1220 log.info('create bucket zone=%s name=%s', zone1.name, bucket_name)
1221 bucket = zone1.conn.create_bucket(bucket_name)
1222
1223 # upload an object with sse-c encryption
1224 sse_c_headers = {
1225 'x-amz-server-side-encryption-customer-algorithm': 'AES256',
1226 'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
1227 'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
1228 }
1229 key = bucket.new_key('testobj-sse-c')
1230 data = 'A'*512
1231 key.set_contents_from_string(data, headers=sse_c_headers)
1232
1233 # upload an object with sse-kms encryption
1234 sse_kms_headers = {
1235 'x-amz-server-side-encryption': 'aws:kms',
1236 # testkey-1 must be present in 'rgw crypt s3 kms encryption keys' (vstart.sh adds this)
1237 'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-1',
1238 }
1239 key = bucket.new_key('testobj-sse-kms')
1240 key.set_contents_from_string(data, headers=sse_kms_headers)
1241
1242 # wait for the bucket metadata and data to sync
1243 zonegroup_meta_checkpoint(zonegroup)
1244 zone_bucket_checkpoint(zone2.zone, zone1.zone, bucket_name)
1245
1246 # read the encrypted objects from the second zone
1247 bucket2 = get_bucket(zone2, bucket_name)
1248 key = bucket2.get_key('testobj-sse-c', headers=sse_c_headers)
1249 eq(data, key.get_contents_as_string(headers=sse_c_headers, encoding='ascii'))
1250
1251 key = bucket2.get_key('testobj-sse-kms')
1252 eq(data, key.get_contents_as_string(encoding='ascii'))
1253
1254 def test_bucket_index_log_trim():
1255 zonegroup = realm.master_zonegroup()
1256 zonegroup_conns = ZonegroupConns(zonegroup)
1257
1258 zone = zonegroup_conns.rw_zones[0]
1259
1260 # create a test bucket, upload some objects, and wait for sync
1261 def make_test_bucket():
1262 name = gen_bucket_name()
1263 log.info('create bucket zone=%s name=%s', zone.name, name)
1264 bucket = zone.conn.create_bucket(name)
1265 for objname in ('a', 'b', 'c', 'd'):
1266 k = new_key(zone, name, objname)
1267 k.set_contents_from_string('foo')
1268 zonegroup_meta_checkpoint(zonegroup)
1269 zonegroup_bucket_checkpoint(zonegroup_conns, name)
1270 return bucket
1271
1272 # create a 'cold' bucket
1273 cold_bucket = make_test_bucket()
1274
1275 # trim with max-buckets=0 to clear counters for cold bucket. this should
1276 # prevent it from being considered 'active' by the next autotrim
1277 bilog_autotrim(zone.zone, [
1278 '--rgw-sync-log-trim-max-buckets', '0',
1279 ])
1280
1281 # create an 'active' bucket
1282 active_bucket = make_test_bucket()
1283
1284 # trim with max-buckets=1 min-cold-buckets=0 to trim active bucket only
1285 bilog_autotrim(zone.zone, [
1286 '--rgw-sync-log-trim-max-buckets', '1',
1287 '--rgw-sync-log-trim-min-cold-buckets', '0',
1288 ])
1289
1290 # verify active bucket has empty bilog
1291 active_bilog = bilog_list(zone.zone, active_bucket.name)
1292 assert(len(active_bilog) == 0)
1293
1294 # verify cold bucket has nonempty bilog
1295 cold_bilog = bilog_list(zone.zone, cold_bucket.name)
1296 assert(len(cold_bilog) > 0)
1297
1298 # trim with min-cold-buckets=999 to trim all buckets
1299 bilog_autotrim(zone.zone, [
1300 '--rgw-sync-log-trim-max-buckets', '999',
1301 '--rgw-sync-log-trim-min-cold-buckets', '999',
1302 ])
1303
1304 # verify cold bucket has empty bilog
1305 cold_bilog = bilog_list(zone.zone, cold_bucket.name)
1306 assert(len(cold_bilog) == 0)
1307
1308 def test_bucket_creation_time():
1309 zonegroup = realm.master_zonegroup()
1310 zonegroup_conns = ZonegroupConns(zonegroup)
1311
1312 zonegroup_meta_checkpoint(zonegroup)
1313
1314 zone_buckets = [zone.get_connection().get_all_buckets() for zone in zonegroup_conns.rw_zones]
1315 for z1, z2 in combinations(zone_buckets, 2):
1316 for a, b in zip(z1, z2):
1317 eq(a.name, b.name)
1318 eq(a.creation_date, b.creation_date)