]>
Commit | Line | Data |
---|---|---|
31f18b77 FG |
1 | import logging |
2 | ||
3 | try: | |
4 | from itertools import izip_longest as zip_longest | |
5 | except ImportError: | |
6 | from itertools import zip_longest | |
7 | ||
8 | from nose.tools import eq_ as eq | |
9 | ||
10 | from .multisite import * | |
11 | ||
12 | log = logging.getLogger(__name__) | |
13 | ||
14 | def check_object_eq(k1, k2, check_extra = True): | |
15 | assert k1 | |
16 | assert k2 | |
17 | log.debug('comparing key name=%s', k1.name) | |
18 | eq(k1.name, k2.name) | |
19 | eq(k1.get_contents_as_string(), k2.get_contents_as_string()) | |
20 | eq(k1.metadata, k2.metadata) | |
21 | eq(k1.cache_control, k2.cache_control) | |
22 | eq(k1.content_type, k2.content_type) | |
23 | eq(k1.content_encoding, k2.content_encoding) | |
24 | eq(k1.content_disposition, k2.content_disposition) | |
25 | eq(k1.content_language, k2.content_language) | |
26 | eq(k1.etag, k2.etag) | |
27 | eq(k1.last_modified, k2.last_modified) | |
28 | if check_extra: | |
29 | eq(k1.owner.id, k2.owner.id) | |
30 | eq(k1.owner.display_name, k2.owner.display_name) | |
31 | eq(k1.storage_class, k2.storage_class) | |
32 | eq(k1.size, k2.size) | |
33 | eq(k1.version_id, k2.version_id) | |
34 | eq(k1.encrypted, k2.encrypted) | |
35 | ||
36 | ||
37 | class RadosZone(Zone): | |
38 | def __init__(self, name, zonegroup = None, cluster = None, data = None, zone_id = None, gateways = None): | |
39 | super(RadosZone, self).__init__(name, zonegroup, cluster, data, zone_id, gateways) | |
40 | ||
41 | def tier_type(self): | |
42 | return "rados" | |
43 | ||
44 | ||
45 | class Conn(ZoneConn): | |
46 | def __init__(self, zone, credentials): | |
47 | super(RadosZone.Conn, self).__init__(zone, credentials) | |
48 | ||
49 | def get_bucket(self, name): | |
50 | return self.conn.get_bucket(name) | |
51 | ||
52 | def create_bucket(self, name): | |
53 | return self.conn.create_bucket(name) | |
54 | ||
55 | def check_bucket_eq(self, zone_conn, bucket_name): | |
56 | log.info('comparing bucket=%s zones={%s, %s}', bucket_name, self.name, zone_conn.name) | |
57 | b1 = self.get_bucket(bucket_name) | |
58 | b2 = zone_conn.get_bucket(bucket_name) | |
59 | ||
60 | log.debug('bucket1 objects:') | |
61 | for o in b1.get_all_versions(): | |
62 | log.debug('o=%s', o.name) | |
63 | log.debug('bucket2 objects:') | |
64 | for o in b2.get_all_versions(): | |
65 | log.debug('o=%s', o.name) | |
66 | ||
67 | for k1, k2 in zip_longest(b1.get_all_versions(), b2.get_all_versions()): | |
68 | if k1 is None: | |
69 | log.critical('key=%s is missing from zone=%s', k2.name, self.name) | |
70 | assert False | |
71 | if k2 is None: | |
72 | log.critical('key=%s is missing from zone=%s', k1.name, zone_conn.name) | |
73 | assert False | |
74 | ||
75 | check_object_eq(k1, k2) | |
76 | ||
77 | # now get the keys through a HEAD operation, verify that the available data is the same | |
78 | k1_head = b1.get_key(k1.name) | |
79 | k2_head = b2.get_key(k2.name) | |
80 | ||
81 | check_object_eq(k1_head, k2_head, False) | |
82 | ||
83 | log.info('success, bucket identical: bucket=%s zones={%s, %s}', bucket_name, self.name, zone_conn.name) | |
84 | ||
85 | return True | |
86 | ||
87 | def get_conn(self, credentials): | |
88 | return self.Conn(self, credentials) | |
89 |