]>
Commit | Line | Data |
---|---|---|
9f95a23c TL |
1 | import logging |
2 | ||
3 | from nose import SkipTest | |
4 | from nose.tools import assert_not_equal, assert_equal | |
5 | ||
6 | from boto.s3.deletemarker import DeleteMarker | |
7 | ||
8 | from .tests import get_realm, \ | |
9 | ZonegroupConns, \ | |
10 | zonegroup_meta_checkpoint, \ | |
11 | zone_meta_checkpoint, \ | |
12 | zone_bucket_checkpoint, \ | |
13 | zone_data_checkpoint, \ | |
14 | zonegroup_bucket_checkpoint, \ | |
15 | check_bucket_eq, \ | |
16 | gen_bucket_name, \ | |
17 | get_user, \ | |
18 | get_tenant | |
19 | ||
20 | from .zone_az import print_connection_info | |
21 | ||
22 | ||
23 | # configure logging for the tests module | |
24 | log = logging.getLogger(__name__) | |
25 | ||
26 | ||
27 | ########################################## | |
28 | # utility functions for archive zone tests | |
29 | ########################################## | |
30 | ||
31 | def check_az_configured(): | |
32 | """check if at least one archive zone exist""" | |
33 | realm = get_realm() | |
34 | zonegroup = realm.master_zonegroup() | |
35 | ||
36 | az_zones = zonegroup.zones_by_type.get("archive") | |
37 | if az_zones is None or len(az_zones) != 1: | |
38 | raise SkipTest("Requires one archive zone") | |
39 | ||
40 | ||
41 | def is_az_zone(zone_conn): | |
42 | """check if a specific zone is archive zone""" | |
43 | if not zone_conn: | |
44 | return False | |
45 | return zone_conn.zone.tier_type() == "archive" | |
46 | ||
47 | ||
48 | def init_env(): | |
49 | """initialize the environment""" | |
50 | check_az_configured() | |
51 | ||
52 | realm = get_realm() | |
53 | zonegroup = realm.master_zonegroup() | |
54 | zonegroup_conns = ZonegroupConns(zonegroup) | |
55 | ||
56 | zonegroup_meta_checkpoint(zonegroup) | |
57 | ||
58 | az_zones = [] | |
59 | zones = [] | |
60 | for conn in zonegroup_conns.zones: | |
61 | if is_az_zone(conn): | |
62 | zone_meta_checkpoint(conn.zone) | |
63 | az_zones.append(conn) | |
64 | elif not conn.zone.is_read_only(): | |
65 | zones.append(conn) | |
66 | ||
67 | assert_not_equal(len(zones), 0) | |
68 | assert_not_equal(len(az_zones), 0) | |
69 | return zones, az_zones | |
70 | ||
71 | ||
72 | def zone_full_checkpoint(target_zone, source_zone): | |
73 | zone_meta_checkpoint(target_zone) | |
74 | zone_data_checkpoint(target_zone, source_zone) | |
75 | ||
76 | ||
77 | def check_bucket_exists_on_zone(zone, bucket_name): | |
78 | try: | |
79 | zone.conn.get_bucket(bucket_name) | |
80 | except: | |
81 | return False | |
82 | return True | |
83 | ||
84 | ||
85 | def check_key_exists(key): | |
86 | try: | |
87 | key.get_contents_as_string() | |
88 | except: | |
89 | return False | |
90 | return True | |
91 | ||
92 | ||
93 | def get_versioning_status(bucket): | |
94 | res = bucket.get_versioning_status() | |
95 | key = 'Versioning' | |
96 | if not key in res: | |
97 | return None | |
98 | else: | |
99 | return res[key] | |
100 | ||
101 | ||
102 | def get_versioned_objs(bucket): | |
103 | b = [] | |
104 | for b_entry in bucket.list_versions(): | |
105 | if isinstance(b_entry, DeleteMarker): | |
106 | continue | |
107 | d = {} | |
108 | d['version_id'] = b_entry.version_id | |
109 | d['size'] = b_entry.size | |
110 | d['etag'] = b_entry.etag | |
111 | d['is_latest'] = b_entry.is_latest | |
112 | b.append({b_entry.key:d}) | |
113 | return b | |
114 | ||
115 | ||
116 | def get_versioned_entries(bucket): | |
117 | dm = [] | |
118 | ver = [] | |
119 | for b_entry in bucket.list_versions(): | |
120 | if isinstance(b_entry, DeleteMarker): | |
121 | d = {} | |
122 | d['version_id'] = b_entry.version_id | |
123 | d['is_latest'] = b_entry.is_latest | |
124 | dm.append({b_entry.name:d}) | |
125 | else: | |
126 | d = {} | |
127 | d['version_id'] = b_entry.version_id | |
128 | d['size'] = b_entry.size | |
129 | d['etag'] = b_entry.etag | |
130 | d['is_latest'] = b_entry.is_latest | |
131 | ver.append({b_entry.key:d}) | |
132 | return (dm, ver) | |
133 | ||
134 | ||
135 | def get_number_buckets_by_zone(zone): | |
136 | return len(zone.conn.get_all_buckets()) | |
137 | ||
138 | ||
139 | def get_bucket_names_by_zone(zone): | |
140 | return [b.name for b in zone.conn.get_all_buckets()] | |
141 | ||
142 | ||
143 | def get_full_bucket_name(partial_bucket_name, bucket_names_az): | |
144 | full_bucket_name = None | |
145 | for bucket_name in bucket_names_az: | |
146 | if bucket_name.startswith(partial_bucket_name): | |
147 | full_bucket_name = bucket_name | |
148 | break | |
149 | return full_bucket_name | |
150 | ||
151 | ||
152 | #################### | |
153 | # archive zone tests | |
154 | #################### | |
155 | ||
156 | ||
157 | def test_az_info(): | |
158 | """ log information for manual testing """ | |
159 | return SkipTest("only used in manual testing") | |
160 | zones, az_zones = init_env() | |
161 | realm = get_realm() | |
162 | zonegroup = realm.master_zonegroup() | |
163 | bucket_name = gen_bucket_name() | |
164 | # create bucket on the first of the rados zones | |
165 | bucket = zones[0].create_bucket(bucket_name) | |
166 | # create objects in the bucket | |
167 | number_of_objects = 3 | |
168 | for i in range(number_of_objects): | |
169 | key = bucket.new_key(str(i)) | |
170 | key.set_contents_from_string('bar') | |
171 | print('Zonegroup: ' + zonegroup.name) | |
172 | print('user: ' + get_user()) | |
173 | print('tenant: ' + get_tenant()) | |
174 | print('Master Zone') | |
175 | print_connection_info(zones[0].conn) | |
176 | print('Archive Zone') | |
177 | print_connection_info(az_zones[0].conn) | |
178 | print('Bucket: ' + bucket_name) | |
179 | ||
180 | ||
181 | def test_az_create_empty_bucket(): | |
182 | """ test empty bucket replication """ | |
183 | zones, az_zones = init_env() | |
184 | bucket_name = gen_bucket_name() | |
185 | # create bucket on the non archive zone | |
186 | zones[0].create_bucket(bucket_name) | |
187 | # sync | |
188 | zone_full_checkpoint(az_zones[0].zone, zones[0].zone) | |
189 | # bucket exist on the archive zone | |
190 | p = check_bucket_exists_on_zone(az_zones[0], bucket_name) | |
191 | assert_equal(p, True) | |
192 | ||
193 | ||
194 | def test_az_check_empty_bucket_versioning(): | |
195 | """ test bucket vesioning with empty bucket """ | |
196 | zones, az_zones = init_env() | |
197 | bucket_name = gen_bucket_name() | |
198 | # create bucket on the non archive zone | |
199 | bucket = zones[0].create_bucket(bucket_name) | |
200 | # sync | |
201 | zone_full_checkpoint(az_zones[0].zone, zones[0].zone) | |
202 | # get bucket on archive zone | |
203 | bucket_az = az_zones[0].conn.get_bucket(bucket_name) | |
204 | # check for non bucket versioning | |
205 | p1 = get_versioning_status(bucket) is None | |
206 | assert_equal(p1, True) | |
207 | p2 = get_versioning_status(bucket_az) is None | |
208 | assert_equal(p2, True) | |
209 | ||
210 | ||
211 | def test_az_object_replication(): | |
212 | """ test object replication """ | |
213 | zones, az_zones = init_env() | |
214 | bucket_name = gen_bucket_name() | |
215 | # create bucket on the non archive zone | |
216 | bucket = zones[0].create_bucket(bucket_name) | |
217 | key = bucket.new_key("foo") | |
218 | key.set_contents_from_string("bar") | |
219 | # sync | |
220 | zone_full_checkpoint(az_zones[0].zone, zones[0].zone) | |
221 | # check object on archive zone | |
222 | bucket_az = az_zones[0].conn.get_bucket(bucket_name) | |
223 | key_az = bucket_az.get_key("foo") | |
224 | p1 = key_az.get_contents_as_string() == "bar" | |
225 | assert_equal(p1, True) | |
226 | ||
227 | ||
228 | def test_az_object_replication_versioning(): | |
229 | """ test object replication versioning """ | |
230 | zones, az_zones = init_env() | |
231 | bucket_name = gen_bucket_name() | |
232 | # create object on the non archive zone | |
233 | bucket = zones[0].create_bucket(bucket_name) | |
234 | key = bucket.new_key("foo") | |
235 | key.set_contents_from_string("bar") | |
236 | # sync | |
237 | zone_full_checkpoint(az_zones[0].zone, zones[0].zone) | |
238 | # check object content on archive zone | |
239 | bucket_az = az_zones[0].conn.get_bucket(bucket_name) | |
240 | key_az = bucket_az.get_key("foo") | |
241 | p1 = key_az.get_contents_as_string() == "bar" | |
242 | assert_equal(p1, True) | |
243 | # grab object versioning and etag | |
244 | for b_version in bucket.list_versions(): | |
245 | b_version_id = b_version.version_id | |
246 | b_version_etag = b_version.etag | |
247 | for b_az_version in bucket_az.list_versions(): | |
248 | b_az_version_id = b_az_version.version_id | |
249 | b_az_version_etag = b_az_version.etag | |
250 | # check | |
251 | p2 = b_version_id == 'null' | |
252 | assert_equal(p2, True) | |
253 | p3 = b_az_version_id != 'null' | |
254 | assert_equal(p3, True) | |
255 | p4 = b_version_etag == b_az_version_etag | |
256 | assert_equal(p4, True) | |
257 | ||
258 | ||
259 | def test_az_lazy_activation_of_versioned_bucket(): | |
260 | """ test lazy activation of versioned bucket """ | |
261 | zones, az_zones = init_env() | |
262 | bucket_name = gen_bucket_name() | |
263 | # create object on the non archive zone | |
264 | bucket = zones[0].create_bucket(bucket_name) | |
265 | # sync | |
266 | zone_full_checkpoint(az_zones[0].zone, zones[0].zone) | |
267 | # get bucket on archive zone | |
268 | bucket_az = az_zones[0].conn.get_bucket(bucket_name) | |
269 | # check for non bucket versioning | |
270 | p1 = get_versioning_status(bucket) is None | |
271 | assert_equal(p1, True) | |
272 | p2 = get_versioning_status(bucket_az) is None | |
273 | assert_equal(p2, True) | |
274 | # create object on non archive zone | |
275 | key = bucket.new_key("foo") | |
276 | key.set_contents_from_string("bar") | |
277 | # sync | |
278 | zone_full_checkpoint(az_zones[0].zone, zones[0].zone) | |
279 | # check lazy versioned buckets | |
280 | p3 = get_versioning_status(bucket) is None | |
281 | assert_equal(p3, True) | |
282 | p4 = get_versioning_status(bucket_az) == 'Enabled' | |
283 | assert_equal(p4, True) | |
284 | ||
285 | ||
286 | def test_az_archive_zone_double_object_replication_versioning(): | |
287 | """ test archive zone double object replication versioning """ | |
288 | zones, az_zones = init_env() | |
289 | bucket_name = gen_bucket_name() | |
290 | # create object on the non archive zone | |
291 | bucket = zones[0].create_bucket(bucket_name) | |
292 | key = bucket.new_key("foo") | |
293 | key.set_contents_from_string("bar") | |
294 | # sync | |
295 | zone_full_checkpoint(az_zones[0].zone, zones[0].zone) | |
296 | # get bucket on archive zone | |
297 | bucket_az = az_zones[0].conn.get_bucket(bucket_name) | |
298 | # check for non bucket versioning | |
299 | p1 = get_versioning_status(bucket) is None | |
300 | assert_equal(p1, True) | |
301 | p2 = get_versioning_status(bucket_az) == 'Enabled' | |
302 | assert_equal(p2, True) | |
303 | # overwrite object on non archive zone | |
304 | key = bucket.new_key("foo") | |
305 | key.set_contents_from_string("ouch") | |
306 | # sync | |
307 | zone_full_checkpoint(az_zones[0].zone, zones[0].zone) | |
308 | # check lazy versioned buckets | |
309 | p3 = get_versioning_status(bucket) is None | |
310 | assert_equal(p3, True) | |
311 | p4 = get_versioning_status(bucket_az) == 'Enabled' | |
312 | assert_equal(p4, True) | |
313 | # get versioned objects | |
314 | objs = get_versioned_objs(bucket) | |
315 | objs_az = get_versioned_objs(bucket_az) | |
316 | # check version_id, size, and is_latest on non archive zone | |
317 | p5 = objs[0]['foo']['version_id'] == 'null' | |
318 | assert_equal(p5, True) | |
319 | p6 = objs[0]['foo']['size'] == 4 | |
320 | assert_equal(p6, True) | |
321 | p7 = objs[0]['foo']['is_latest'] == True | |
322 | assert_equal(p7, True) | |
323 | # check version_id, size, is_latest on archive zone | |
324 | latest_obj_az_etag = None | |
325 | for obj_az in objs_az: | |
326 | current_obj_az = obj_az['foo'] | |
327 | if current_obj_az['is_latest'] == True: | |
328 | p8 = current_obj_az['size'] == 4 | |
329 | assert_equal(p8, True) | |
330 | latest_obj_az_etag = current_obj_az['etag'] | |
331 | else: | |
332 | p9 = current_obj_az['size'] == 3 | |
333 | assert_equal(p9, True) | |
334 | assert_not_equal(current_obj_az['version_id'], 'null') | |
335 | # check last versions' etags | |
336 | p10 = objs[0]['foo']['etag'] == latest_obj_az_etag | |
337 | assert_equal(p10, True) | |
338 | ||
339 | ||
340 | def test_az_deleted_object_replication(): | |
341 | """ test zone deleted object replication """ | |
342 | zones, az_zones = init_env() | |
343 | bucket_name = gen_bucket_name() | |
344 | # create object on the non archive zone | |
345 | bucket = zones[0].create_bucket(bucket_name) | |
346 | key = bucket.new_key("foo") | |
347 | key.set_contents_from_string("bar") | |
348 | p1 = key.get_contents_as_string() == "bar" | |
349 | assert_equal(p1, True) | |
350 | # sync | |
351 | zone_full_checkpoint(az_zones[0].zone, zones[0].zone) | |
352 | # update object on non archive zone | |
353 | key.set_contents_from_string("soup") | |
354 | p2 = key.get_contents_as_string() == "soup" | |
355 | assert_equal(p2, True) | |
356 | # sync | |
357 | zone_full_checkpoint(az_zones[0].zone, zones[0].zone) | |
358 | # delete object on non archive zone | |
359 | key.delete() | |
360 | # sync | |
361 | zone_full_checkpoint(az_zones[0].zone, zones[0].zone) | |
362 | # check object on non archive zone | |
363 | p3 = check_key_exists(key) == False | |
364 | assert_equal(p3, True) | |
365 | # check objects on archive zone | |
366 | bucket_az = az_zones[0].conn.get_bucket(bucket_name) | |
367 | key_az = bucket_az.get_key("foo") | |
368 | p4 = check_key_exists(key_az) == True | |
369 | assert_equal(p4, True) | |
370 | p5 = key_az.get_contents_as_string() == "soup" | |
371 | assert_equal(p5, True) | |
372 | b_ver_az = get_versioned_objs(bucket_az) | |
373 | p6 = len(b_ver_az) == 2 | |
374 | assert_equal(p6, True) | |
375 | ||
376 | ||
377 | def test_az_bucket_renaming_on_empty_bucket_deletion(): | |
378 | """ test bucket renaming on empty bucket deletion """ | |
379 | zones, az_zones = init_env() | |
380 | bucket_name = gen_bucket_name() | |
381 | # grab number of buckets on non archive zone | |
382 | num_buckets = get_number_buckets_by_zone(zones[0]) | |
383 | # grab number of buckets on archive zone | |
384 | num_buckets_az = get_number_buckets_by_zone(az_zones[0]) | |
385 | # create bucket on non archive zone | |
386 | bucket = zones[0].create_bucket(bucket_name) | |
387 | # sync | |
388 | zone_full_checkpoint(az_zones[0].zone, zones[0].zone) | |
389 | # delete bucket in non archive zone | |
390 | zones[0].delete_bucket(bucket_name) | |
391 | # sync | |
392 | zone_full_checkpoint(az_zones[0].zone, zones[0].zone) | |
393 | # check no new buckets on non archive zone | |
394 | p1 = get_number_buckets_by_zone(zones[0]) == num_buckets | |
395 | assert_equal(p1, True) | |
396 | # check non deletion on bucket on archive zone | |
397 | p2 = get_number_buckets_by_zone(az_zones[0]) == (num_buckets_az + 1) | |
398 | assert_equal(p2, True) | |
399 | # check bucket renaming | |
400 | bucket_names_az = get_bucket_names_by_zone(az_zones[0]) | |
401 | new_bucket_name = bucket_name + '-deleted-' | |
402 | p3 = any(bucket_name.startswith(new_bucket_name) for bucket_name in bucket_names_az) | |
403 | assert_equal(p3, True) | |
404 | ||
405 | ||
406 | def test_az_old_object_version_in_archive_zone(): | |
407 | """ test old object version in archive zone """ | |
408 | zones, az_zones = init_env() | |
409 | bucket_name = gen_bucket_name() | |
410 | # grab number of buckets on non archive zone | |
411 | num_buckets = get_number_buckets_by_zone(zones[0]) | |
412 | # grab number of buckets on archive zone | |
413 | num_buckets_az = get_number_buckets_by_zone(az_zones[0]) | |
414 | # create bucket on non archive zone | |
415 | bucket = zones[0].create_bucket(bucket_name) | |
416 | # create object on non archive zone | |
417 | key = bucket.new_key("foo") | |
418 | key.set_contents_from_string("zero") | |
419 | # sync | |
420 | zone_full_checkpoint(az_zones[0].zone, zones[0].zone) | |
421 | # save object version on archive zone | |
422 | bucket_az = az_zones[0].conn.get_bucket(bucket_name) | |
423 | b_ver_az = get_versioned_objs(bucket_az) | |
424 | obj_az_version_id = b_ver_az[0]['foo']['version_id'] | |
425 | # update object on non archive zone | |
426 | key.set_contents_from_string("one") | |
427 | # sync | |
428 | zone_full_checkpoint(az_zones[0].zone, zones[0].zone) | |
429 | # delete object on non archive zone | |
430 | key.delete() | |
431 | # delete bucket on non archive zone | |
432 | zones[0].delete_bucket(bucket_name) | |
433 | # sync | |
434 | zone_full_checkpoint(az_zones[0].zone, zones[0].zone) | |
435 | # check same buckets on non archive zone | |
436 | p1 = get_number_buckets_by_zone(zones[0]) == num_buckets | |
437 | assert_equal(p1, True) | |
438 | # check for new bucket on archive zone | |
439 | p2 = get_number_buckets_by_zone(az_zones[0]) == (num_buckets_az + 1) | |
440 | assert_equal(p2, True) | |
441 | # get new bucket name on archive zone | |
442 | bucket_names_az = get_bucket_names_by_zone(az_zones[0]) | |
443 | new_bucket_name_az = get_full_bucket_name(bucket_name + '-deleted-', bucket_names_az) | |
444 | p3 = new_bucket_name_az is not None | |
445 | assert_equal(p3, True) | |
446 | # check number of objects on archive zone | |
447 | new_bucket_az = az_zones[0].conn.get_bucket(new_bucket_name_az) | |
448 | new_b_ver_az = get_versioned_objs(new_bucket_az) | |
449 | p4 = len(new_b_ver_az) == 2 | |
450 | assert_equal(p4, True) | |
451 | # check versioned objects on archive zone | |
452 | new_key_az = new_bucket_az.get_key("foo", version_id=obj_az_version_id) | |
453 | p5 = new_key_az.get_contents_as_string() == "zero" | |
454 | assert_equal(p5, True) | |
455 | new_key_latest_az = new_bucket_az.get_key("foo") | |
456 | p6 = new_key_latest_az.get_contents_as_string() == "one" | |
457 | assert_equal(p6, True) | |
458 | ||
459 | ||
460 | def test_az_force_bucket_renaming_if_same_bucket_name(): | |
461 | """ test force bucket renaming if same bucket name """ | |
462 | zones, az_zones = init_env() | |
463 | bucket_name = gen_bucket_name() | |
464 | # grab number of buckets on non archive zone | |
465 | num_buckets = get_number_buckets_by_zone(zones[0]) | |
466 | # grab number of buckets on archive zone | |
467 | num_buckets_az = get_number_buckets_by_zone(az_zones[0]) | |
468 | # create bucket on non archive zone | |
469 | bucket = zones[0].create_bucket(bucket_name) | |
470 | # sync | |
471 | zone_full_checkpoint(az_zones[0].zone, zones[0].zone) | |
472 | # check same buckets on non archive zone | |
473 | p1 = get_number_buckets_by_zone(zones[0]) == (num_buckets + 1) | |
474 | assert_equal(p1, True) | |
475 | # check for new bucket on archive zone | |
476 | p2 = get_number_buckets_by_zone(az_zones[0]) == (num_buckets_az + 1) | |
477 | assert_equal(p2, True) | |
478 | # delete bucket on non archive zone | |
479 | zones[0].delete_bucket(bucket_name) | |
480 | # sync | |
481 | zone_full_checkpoint(az_zones[0].zone, zones[0].zone) | |
482 | # check number of buckets on non archive zone | |
483 | p3 = get_number_buckets_by_zone(zones[0]) == num_buckets | |
484 | assert_equal(p3, True) | |
485 | # check number of buckets on archive zone | |
486 | p4 = get_number_buckets_by_zone(az_zones[0]) == (num_buckets_az + 1) | |
487 | assert_equal(p4, True) | |
488 | # get new bucket name on archive zone | |
489 | bucket_names_az = get_bucket_names_by_zone(az_zones[0]) | |
490 | new_bucket_name_az = get_full_bucket_name(bucket_name + '-deleted-', bucket_names_az) | |
491 | p5 = new_bucket_name_az is not None | |
492 | assert_equal(p5, True) | |
493 | # create bucket on non archive zone | |
494 | _ = zones[0].create_bucket(new_bucket_name_az) | |
495 | # sync | |
496 | zone_full_checkpoint(az_zones[0].zone, zones[0].zone) | |
497 | # check number of buckets on non archive zone | |
498 | p6 = get_number_buckets_by_zone(zones[0]) == (num_buckets + 1) | |
499 | assert_equal(p6, True) | |
500 | # check number of buckets on archive zone | |
501 | p7 = get_number_buckets_by_zone(az_zones[0]) == (num_buckets_az + 2) | |
502 | assert_equal(p7, True) | |
503 | ||
504 | ||
505 | def test_az_versioning_support_in_zones(): | |
506 | """ test versioning support on zones """ | |
507 | zones, az_zones = init_env() | |
508 | bucket_name = gen_bucket_name() | |
509 | # create bucket on non archive zone | |
510 | bucket = zones[0].create_bucket(bucket_name) | |
511 | # sync | |
512 | zone_full_checkpoint(az_zones[0].zone, zones[0].zone) | |
513 | # get bucket on archive zone | |
514 | bucket_az = az_zones[0].conn.get_bucket(bucket_name) | |
515 | # check non versioned buckets | |
516 | p1 = get_versioning_status(bucket) is None | |
517 | assert_equal(p1, True) | |
518 | p2 = get_versioning_status(bucket_az) is None | |
519 | assert_equal(p2, True) | |
520 | # create object on non archive zone | |
521 | key = bucket.new_key("foo") | |
522 | key.set_contents_from_string("zero") | |
523 | # sync | |
524 | zone_full_checkpoint(az_zones[0].zone, zones[0].zone) | |
525 | # check bucket versioning | |
526 | p3 = get_versioning_status(bucket) is None | |
527 | assert_equal(p3, True) | |
528 | p4 = get_versioning_status(bucket_az) == 'Enabled' | |
529 | assert_equal(p4, True) | |
530 | # enable bucket versioning on non archive zone | |
531 | bucket.configure_versioning(True) | |
532 | # sync | |
533 | zone_full_checkpoint(az_zones[0].zone, zones[0].zone) | |
534 | # check bucket versioning | |
535 | p5 = get_versioning_status(bucket) == 'Enabled' | |
536 | assert_equal(p5, True) | |
537 | p6 = get_versioning_status(bucket_az) == 'Enabled' | |
538 | assert_equal(p6, True) | |
539 | # delete object on non archive zone | |
540 | key.delete() | |
541 | # sync | |
542 | zone_full_checkpoint(az_zones[0].zone, zones[0].zone) | |
543 | # check delete-markers and versions on non archive zone | |
544 | (b_dm, b_ver) = get_versioned_entries(bucket) | |
545 | p7 = len(b_dm) == 1 | |
546 | assert_equal(p7, True) | |
547 | p8 = len(b_ver) == 1 | |
548 | assert_equal(p8, True) | |
549 | # check delete-markers and versions on archive zone | |
550 | (b_dm_az, b_ver_az) = get_versioned_entries(bucket_az) | |
551 | p9 = len(b_dm_az) == 1 | |
552 | assert_equal(p9, True) | |
553 | p10 = len(b_ver_az) == 1 | |
554 | assert_equal(p10, True) | |
555 | # delete delete-marker on non archive zone | |
556 | dm_version_id = b_dm[0]['foo']['version_id'] | |
557 | bucket.delete_key("foo", version_id=dm_version_id) | |
558 | # sync | |
559 | zone_full_checkpoint(az_zones[0].zone, zones[0].zone) | |
560 | # check delete-markers and versions on non archive zone | |
561 | (b_dm, b_ver) = get_versioned_entries(bucket) | |
562 | p11 = len(b_dm) == 0 | |
563 | assert_equal(p11, True) | |
564 | p12 = len(b_ver) == 1 | |
565 | assert_equal(p12, True) | |
566 | # check delete-markers and versions on archive zone | |
567 | (b_dm_az, b_ver_az) = get_versioned_entries(bucket_az) | |
568 | p13 = len(b_dm_az) == 1 | |
569 | assert_equal(p13, True) | |
570 | p14 = len(b_ver_az) == 1 | |
571 | assert_equal(p14, True) | |
572 | # delete delete-marker on archive zone | |
573 | dm_az_version_id = b_dm_az[0]['foo']['version_id'] | |
574 | bucket_az.delete_key("foo", version_id=dm_az_version_id) | |
575 | # sync | |
576 | zone_full_checkpoint(az_zones[0].zone, zones[0].zone) | |
577 | # check delete-markers and versions on non archive zone | |
578 | (b_dm, b_ver) = get_versioned_entries(bucket) | |
579 | p15 = len(b_dm) == 0 | |
580 | assert_equal(p15, True) | |
581 | p16 = len(b_ver) == 1 | |
582 | assert_equal(p16, True) | |
583 | # check delete-markers and versions on archive zone | |
584 | (b_dm_az, b_ver_az) = get_versioned_entries(bucket_az) | |
585 | p17 = len(b_dm_az) == 0 | |
586 | assert_equal(p17, True) | |
587 | p17 = len(b_ver_az) == 1 | |
588 | assert_equal(p17, True) | |
589 | # check body in zones | |
590 | obj_version_id = b_ver[0]['foo']['version_id'] | |
591 | key = bucket.get_key("foo", version_id=obj_version_id) | |
592 | p18 = key.get_contents_as_string() == "zero" | |
593 | assert_equal(p18, True) | |
594 | obj_az_version_id = b_ver_az[0]['foo']['version_id'] | |
595 | key_az = bucket_az.get_key("foo", version_id=obj_az_version_id) | |
596 | p19 = key_az.get_contents_as_string() == "zero" | |
597 | assert_equal(p19, True) |