]>
git.proxmox.com Git - ceph.git/blob - ceph/src/test/rgw/rgw_multi/zone_rados.py
2 from boto
.s3
.deletemarker
import DeleteMarker
5 from itertools
import izip_longest
as zip_longest
7 from itertools
import zip_longest
9 from nose
.tools
import eq_
as eq
11 from .multisite
import *
13 log
= logging
.getLogger(__name__
)
15 def check_object_eq(k1
, k2
, check_extra
= True):
18 log
.debug('comparing key name=%s', k1
.name
)
20 eq(k1
.version_id
, k2
.version_id
)
21 eq(k1
.is_latest
, k2
.is_latest
)
22 eq(k1
.last_modified
, k2
.last_modified
)
23 if isinstance(k1
, DeleteMarker
):
24 assert isinstance(k2
, DeleteMarker
)
27 eq(k1
.get_contents_as_string(), k2
.get_contents_as_string())
28 eq(k1
.metadata
, k2
.metadata
)
29 eq(k1
.cache_control
, k2
.cache_control
)
30 eq(k1
.content_type
, k2
.content_type
)
31 eq(k1
.content_encoding
, k2
.content_encoding
)
32 eq(k1
.content_disposition
, k2
.content_disposition
)
33 eq(k1
.content_language
, k2
.content_language
)
36 eq(k1
.owner
.id, k2
.owner
.id)
37 eq(k1
.owner
.display_name
, k2
.owner
.display_name
)
38 eq(k1
.storage_class
, k2
.storage_class
)
40 eq(k1
.encrypted
, k2
.encrypted
)
42 class RadosZone(Zone
):
43 def __init__(self
, name
, zonegroup
= None, cluster
= None, data
= None, zone_id
= None, gateways
= None):
44 super(RadosZone
, self
).__init
__(name
, zonegroup
, cluster
, data
, zone_id
, gateways
)
51 def __init__(self
, zone
, credentials
):
52 super(RadosZone
.Conn
, self
).__init
__(zone
, credentials
)
54 def get_bucket(self
, name
):
55 return self
.conn
.get_bucket(name
)
57 def create_bucket(self
, name
):
58 return self
.conn
.create_bucket(name
)
60 def delete_bucket(self
, name
):
61 return self
.conn
.delete_bucket(name
)
63 def check_bucket_eq(self
, zone_conn
, bucket_name
):
64 log
.info('comparing bucket=%s zones={%s, %s}', bucket_name
, self
.name
, zone_conn
.name
)
65 b1
= self
.get_bucket(bucket_name
)
66 b2
= zone_conn
.get_bucket(bucket_name
)
68 b1_versions
= b1
.list_versions()
69 log
.debug('bucket1 objects:')
71 log
.debug('o=%s', o
.name
)
73 b2_versions
= b2
.list_versions()
74 log
.debug('bucket2 objects:')
76 log
.debug('o=%s', o
.name
)
78 for k1
, k2
in zip_longest(b1_versions
, b2_versions
):
80 log
.critical('key=%s is missing from zone=%s', k2
.name
, self
.name
)
83 log
.critical('key=%s is missing from zone=%s', k1
.name
, zone_conn
.name
)
86 check_object_eq(k1
, k2
)
88 if isinstance(k1
, DeleteMarker
):
89 # verify that HEAD sees a delete marker
90 assert b1
.get_key(k1
.name
) is None
91 assert b2
.get_key(k2
.name
) is None
93 # now get the keys through a HEAD operation, verify that the available data is the same
94 k1_head
= b1
.get_key(k1
.name
, version_id
=k1
.version_id
)
95 k2_head
= b2
.get_key(k2
.name
, version_id
=k2
.version_id
)
96 check_object_eq(k1_head
, k2_head
, False)
99 # compare the olh to make sure they agree about the current version
100 k1_olh
= b1
.get_key(k1
.name
)
101 k2_olh
= b2
.get_key(k2
.name
)
102 # if there's a delete marker, HEAD will return None
104 check_object_eq(k1_olh
, k2_olh
, False)
106 log
.info('success, bucket identical: bucket=%s zones={%s, %s}', bucket_name
, self
.name
, zone_conn
.name
)
110 def get_conn(self
, credentials
):
111 return self
.Conn(self
, credentials
)