]> git.proxmox.com Git - ceph.git/blob - ceph/src/test/rgw/rgw_multi/zone_rados.py
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / test / rgw / rgw_multi / zone_rados.py
1 import logging
2 from boto.s3.deletemarker import DeleteMarker
3
4 try:
5 from itertools import izip_longest as zip_longest
6 except ImportError:
7 from itertools import zip_longest
8
9 from nose.tools import eq_ as eq
10
11 from .multisite import *
12
13 log = logging.getLogger(__name__)
14
15 def check_object_eq(k1, k2, check_extra = True):
16 assert k1
17 assert k2
18 log.debug('comparing key name=%s', k1.name)
19 eq(k1.name, k2.name)
20 eq(k1.version_id, k2.version_id)
21 eq(k1.is_latest, k2.is_latest)
22 eq(k1.last_modified, k2.last_modified)
23 if isinstance(k1, DeleteMarker):
24 assert isinstance(k2, DeleteMarker)
25 return
26
27 eq(k1.get_contents_as_string(), k2.get_contents_as_string())
28 eq(k1.metadata, k2.metadata)
29 eq(k1.cache_control, k2.cache_control)
30 eq(k1.content_type, k2.content_type)
31 eq(k1.content_encoding, k2.content_encoding)
32 eq(k1.content_disposition, k2.content_disposition)
33 eq(k1.content_language, k2.content_language)
34 eq(k1.etag, k2.etag)
35 if check_extra:
36 eq(k1.owner.id, k2.owner.id)
37 eq(k1.owner.display_name, k2.owner.display_name)
38 eq(k1.storage_class, k2.storage_class)
39 eq(k1.size, k2.size)
40 eq(k1.encrypted, k2.encrypted)
41
42 class RadosZone(Zone):
43 def __init__(self, name, zonegroup = None, cluster = None, data = None, zone_id = None, gateways = None):
44 super(RadosZone, self).__init__(name, zonegroup, cluster, data, zone_id, gateways)
45
46 def tier_type(self):
47 return "rados"
48
49
50 class Conn(ZoneConn):
51 def __init__(self, zone, credentials):
52 super(RadosZone.Conn, self).__init__(zone, credentials)
53
54 def get_bucket(self, name):
55 return self.conn.get_bucket(name)
56
57 def create_bucket(self, name):
58 return self.conn.create_bucket(name)
59
60 def delete_bucket(self, name):
61 return self.conn.delete_bucket(name)
62
63 def check_bucket_eq(self, zone_conn, bucket_name):
64 log.info('comparing bucket=%s zones={%s, %s}', bucket_name, self.name, zone_conn.name)
65 b1 = self.get_bucket(bucket_name)
66 b2 = zone_conn.get_bucket(bucket_name)
67
68 b1_versions = b1.list_versions()
69 log.debug('bucket1 objects:')
70 for o in b1_versions:
71 log.debug('o=%s', o.name)
72
73 b2_versions = b2.list_versions()
74 log.debug('bucket2 objects:')
75 for o in b2_versions:
76 log.debug('o=%s', o.name)
77
78 for k1, k2 in zip_longest(b1_versions, b2_versions):
79 if k1 is None:
80 log.critical('key=%s is missing from zone=%s', k2.name, self.name)
81 assert False
82 if k2 is None:
83 log.critical('key=%s is missing from zone=%s', k1.name, zone_conn.name)
84 assert False
85
86 check_object_eq(k1, k2)
87
88 if isinstance(k1, DeleteMarker):
89 # verify that HEAD sees a delete marker
90 assert b1.get_key(k1.name) is None
91 assert b2.get_key(k2.name) is None
92 else:
93 # now get the keys through a HEAD operation, verify that the available data is the same
94 k1_head = b1.get_key(k1.name, version_id=k1.version_id)
95 k2_head = b2.get_key(k2.name, version_id=k2.version_id)
96 check_object_eq(k1_head, k2_head, False)
97
98 if k1.version_id:
99 # compare the olh to make sure they agree about the current version
100 k1_olh = b1.get_key(k1.name)
101 k2_olh = b2.get_key(k2.name)
102 # if there's a delete marker, HEAD will return None
103 if k1_olh or k2_olh:
104 check_object_eq(k1_olh, k2_olh, False)
105
106 log.info('success, bucket identical: bucket=%s zones={%s, %s}', bucket_name, self.name, zone_conn.name)
107
108 return True
109
110 def get_conn(self, credentials):
111 return self.Conn(self, credentials)
112