1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
8 #include "common/config.h"
9 #include "common/Formatter.h"
10 #include "common/errno.h"
12 #include "rgw_rados.h"
13 #include "rgw_orphan.h"
15 #define dout_subsys ceph_subsys_rgw
17 #define DEFAULT_NUM_SHARDS 64
19 static string
obj_fingerprint(const string
& oid
, const char *force_ns
= NULL
)
21 ssize_t pos
= oid
.find('_');
23 cerr
<< "ERROR: object does not have a bucket marker: " << oid
<< std::endl
;
26 string obj_marker
= oid
.substr(0, pos
);
30 rgw_obj_key::parse_raw_oid(oid
.substr(pos
+ 1), &key
);
40 rgw_obj
new_obj(b
, key
);
41 s
= obj_marker
+ "_" + new_obj
.get_oid();
45 size_t i
= s
.size() - 1;
46 for (; i
>= s
.size() - 10; --i
) {
48 if (!isdigit(c
) && c
!= '.' && c
!= '_') {
53 return s
.substr(0, i
+ 1);
56 int RGWOrphanStore::read_job(const string
& job_name
, RGWOrphanSearchState
& state
)
59 map
<string
, bufferlist
> vals
;
60 keys
.insert(job_name
);
61 int r
= ioctx
.omap_get_vals_by_keys(oid
, keys
, &vals
);
66 map
<string
, bufferlist
>::iterator iter
= vals
.find(job_name
);
67 if (iter
== vals
.end()) {
72 bufferlist
& bl
= iter
->second
;
74 } catch (buffer::error
& err
) {
75 lderr(store
->ctx()) << "ERROR: could not decode buffer" << dendl
;
82 int RGWOrphanStore::write_job(const string
& job_name
, const RGWOrphanSearchState
& state
)
84 map
<string
, bufferlist
> vals
;
88 int r
= ioctx
.omap_set(oid
, vals
);
96 int RGWOrphanStore::remove_job(const string
& job_name
)
99 keys
.insert(job_name
);
101 int r
= ioctx
.omap_rm_keys(oid
, keys
);
109 int RGWOrphanStore::list_jobs(map
<string
,RGWOrphanSearchState
>& job_list
)
111 map
<string
,bufferlist
> vals
;
116 // loop through all the omap vals from index object, storing them to job_list,
117 // read in batches of 1024, we update the marker every iteration and exit the
118 // loop when we find that total size read out is less than batch size
120 r
= ioctx
.omap_get_vals(oid
, marker
, MAX_READ
, &vals
);
126 for (const auto &it
: vals
) {
128 RGWOrphanSearchState state
;
130 bufferlist bl
= it
.second
;
132 } catch (buffer::error
& err
) {
133 lderr(store
->ctx()) << "ERROR: could not decode buffer" << dendl
;
136 job_list
[it
.first
] = state
;
138 } while (r
== MAX_READ
);
143 int RGWOrphanStore::init()
145 rgw_pool
& log_pool
= store
->get_zone_params().log_pool
;
146 int r
= rgw_init_ioctx(store
->get_rados_handle(), log_pool
, ioctx
);
148 cerr
<< "ERROR: failed to open log pool (" << log_pool
<< " ret=" << r
<< std::endl
;
155 int RGWOrphanStore::store_entries(const string
& oid
, const map
<string
, bufferlist
>& entries
)
157 librados::ObjectWriteOperation op
;
158 op
.omap_set(entries
);
159 cout
<< "storing " << entries
.size() << " entries at " << oid
<< std::endl
;
160 ldout(store
->ctx(), 20) << "storing " << entries
.size() << " entries at " << oid
<< ": " << dendl
;
161 for (map
<string
, bufferlist
>::const_iterator iter
= entries
.begin(); iter
!= entries
.end(); ++iter
) {
162 ldout(store
->ctx(), 20) << " > " << iter
->first
<< dendl
;
164 int ret
= ioctx
.operate(oid
, &op
);
166 lderr(store
->ctx()) << "ERROR: " << __func__
<< "(" << oid
<< ") returned ret=" << ret
<< dendl
;
172 int RGWOrphanStore::read_entries(const string
& oid
, const string
& marker
, map
<string
, bufferlist
> *entries
, bool *truncated
)
174 #define MAX_OMAP_GET 100
175 int ret
= ioctx
.omap_get_vals(oid
, marker
, MAX_OMAP_GET
, entries
);
176 if (ret
< 0 && ret
!= -ENOENT
) {
177 cerr
<< "ERROR: " << __func__
<< "(" << oid
<< ") returned ret=" << cpp_strerror(-ret
) << std::endl
;
180 *truncated
= (entries
->size() == MAX_OMAP_GET
);
185 int RGWOrphanSearch::init(const string
& job_name
, RGWOrphanSearchInfo
*info
) {
186 int r
= orphan_store
.init();
191 RGWOrphanSearchState state
;
192 r
= orphan_store
.read_job(job_name
, state
);
193 if (r
< 0 && r
!= -ENOENT
) {
194 lderr(store
->ctx()) << "ERROR: failed to read state ret=" << r
<< dendl
;
199 search_info
= state
.info
;
200 search_stage
= state
.stage
;
201 } else if (info
) { /* r == -ENOENT, initiate a new job if info was provided */
203 search_info
.job_name
= job_name
;
204 search_info
.num_shards
= (info
->num_shards
? info
->num_shards
: DEFAULT_NUM_SHARDS
);
205 search_info
.start_time
= ceph_clock_now();
206 search_stage
= RGWOrphanSearchStage(ORPHAN_SEARCH_STAGE_INIT
);
210 lderr(store
->ctx()) << "ERROR: failed to write state ret=" << r
<< dendl
;
214 lderr(store
->ctx()) << "ERROR: job not found" << dendl
;
218 index_objs_prefix
= RGW_ORPHAN_INDEX_PREFIX
+ string(".");
219 index_objs_prefix
+= job_name
;
221 for (int i
= 0; i
< search_info
.num_shards
; i
++) {
224 snprintf(buf
, sizeof(buf
), "%s.rados.%d", index_objs_prefix
.c_str(), i
);
225 all_objs_index
[i
] = buf
;
227 snprintf(buf
, sizeof(buf
), "%s.buckets.%d", index_objs_prefix
.c_str(), i
);
228 buckets_instance_index
[i
] = buf
;
230 snprintf(buf
, sizeof(buf
), "%s.linked.%d", index_objs_prefix
.c_str(), i
);
231 linked_objs_index
[i
] = buf
;
236 int RGWOrphanSearch::log_oids(map
<int, string
>& log_shards
, map
<int, list
<string
> >& oids
)
238 map
<int, list
<string
> >::iterator miter
= oids
.begin();
240 list
<log_iter_info
> liters
; /* a list of iterator pairs for begin and end */
242 for (; miter
!= oids
.end(); ++miter
) {
244 info
.oid
= log_shards
[miter
->first
];
245 info
.cur
= miter
->second
.begin();
246 info
.end
= miter
->second
.end();
247 liters
.push_back(info
);
250 list
<log_iter_info
>::iterator list_iter
;
251 while (!liters
.empty()) {
252 list_iter
= liters
.begin();
254 while (list_iter
!= liters
.end()) {
255 log_iter_info
& cur_info
= *list_iter
;
257 list
<string
>::iterator
& cur
= cur_info
.cur
;
258 list
<string
>::iterator
& end
= cur_info
.end
;
260 map
<string
, bufferlist
> entries
;
261 #define MAX_OMAP_SET_ENTRIES 100
262 for (int j
= 0; cur
!= end
&& j
!= MAX_OMAP_SET_ENTRIES
; ++cur
, ++j
) {
263 ldout(store
->ctx(), 20) << "adding obj: " << *cur
<< dendl
;
264 entries
[*cur
] = bufferlist();
267 int ret
= orphan_store
.store_entries(cur_info
.oid
, entries
);
271 list
<log_iter_info
>::iterator tmp
= list_iter
;
281 int RGWOrphanSearch::build_all_oids_index()
283 librados::IoCtx ioctx
;
285 int ret
= rgw_init_ioctx(store
->get_rados_handle(), search_info
.pool
, ioctx
);
287 lderr(store
->ctx()) << __func__
<< ": rgw_init_ioctx() returned ret=" << ret
<< dendl
;
291 ioctx
.set_namespace(librados::all_nspaces
);
292 librados::NObjectIterator i
= ioctx
.nobjects_begin();
293 librados::NObjectIterator i_end
= ioctx
.nobjects_end();
295 map
<int, list
<string
> > oids
;
300 cout
<< "logging all objects in the pool" << std::endl
;
302 for (; i
!= i_end
; ++i
) {
303 string nspace
= i
->get_nspace();
304 string oid
= i
->get_oid();
305 string locator
= i
->get_locator();
307 ssize_t pos
= oid
.find('_');
309 cout
<< "unidentified oid: " << oid
<< ", skipping" << std::endl
;
310 /* what is this object, oids should be in the format of <bucket marker>_<obj>,
315 string stripped_oid
= oid
.substr(pos
+ 1);
317 if (!rgw_obj_key::parse_raw_oid(stripped_oid
, &key
)) {
318 cout
<< "cannot parse oid: " << oid
<< ", skipping" << std::endl
;
322 if (key
.ns
.empty()) {
323 /* skipping head objects, we don't want to remove these as they are mutable and
324 * cleaning them up is racy (can race with object removal and a later recreation)
326 cout
<< "skipping head object: oid=" << oid
<< std::endl
;
330 string oid_fp
= obj_fingerprint(oid
);
332 ldout(store
->ctx(), 20) << "oid_fp=" << oid_fp
<< dendl
;
334 int shard
= orphan_shard(oid_fp
);
335 oids
[shard
].push_back(oid
);
337 #define COUNT_BEFORE_FLUSH 1000
339 if (++count
>= COUNT_BEFORE_FLUSH
) {
340 ldout(store
->ctx(), 1) << "iterated through " << total
<< " objects" << dendl
;
341 ret
= log_oids(all_objs_index
, oids
);
343 cerr
<< __func__
<< ": ERROR: log_oids() returned ret=" << ret
<< std::endl
;
350 ret
= log_oids(all_objs_index
, oids
);
352 cerr
<< __func__
<< ": ERROR: log_oids() returned ret=" << ret
<< std::endl
;
359 int RGWOrphanSearch::build_buckets_instance_index()
363 string section
= "bucket.instance";
364 int ret
= store
->meta_mgr
->list_keys_init(section
, &handle
);
366 lderr(store
->ctx()) << "ERROR: can't get key: " << cpp_strerror(-ret
) << dendl
;
370 map
<int, list
<string
> > instances
;
374 RGWObjectCtx
obj_ctx(store
);
381 ret
= store
->meta_mgr
->list_keys_next(handle
, max
, keys
, &truncated
);
383 lderr(store
->ctx()) << "ERROR: lists_keys_next(): " << cpp_strerror(-ret
) << dendl
;
387 for (list
<string
>::iterator iter
= keys
.begin(); iter
!= keys
.end(); ++iter
) {
389 ldout(store
->ctx(), 10) << "bucket_instance=" << *iter
<< " total=" << total
<< dendl
;
390 int shard
= orphan_shard(*iter
);
391 instances
[shard
].push_back(*iter
);
393 if (++count
>= COUNT_BEFORE_FLUSH
) {
394 ret
= log_oids(buckets_instance_index
, instances
);
396 lderr(store
->ctx()) << __func__
<< ": ERROR: log_oids() returned ret=" << ret
<< dendl
;
406 ret
= log_oids(buckets_instance_index
, instances
);
408 lderr(store
->ctx()) << __func__
<< ": ERROR: log_oids() returned ret=" << ret
<< dendl
;
411 store
->meta_mgr
->list_keys_complete(handle
);
416 int RGWOrphanSearch::handle_stat_result(map
<int, list
<string
> >& oids
, RGWRados::Object::Stat::Result
& result
)
418 set
<string
> obj_oids
;
419 rgw_bucket
& bucket
= result
.obj
.bucket
;
420 if (!result
.has_manifest
) { /* a very very old object, or part of a multipart upload during upload */
421 const string loc
= bucket
.bucket_id
+ "_" + result
.obj
.get_oid();
422 obj_oids
.insert(obj_fingerprint(loc
));
425 * multipart parts don't have manifest on them, it's in the meta object. Instead of reading the
426 * meta object, just add a "shadow" object to the mix
428 obj_oids
.insert(obj_fingerprint(loc
, "shadow"));
430 RGWObjManifest
& manifest
= result
.manifest
;
432 RGWObjManifest::obj_iterator miter
;
433 for (miter
= manifest
.obj_begin(); miter
!= manifest
.obj_end(); ++miter
) {
434 const rgw_raw_obj
& loc
= miter
.get_location().get_raw_obj(store
);
436 obj_oids
.insert(obj_fingerprint(s
));
440 for (set
<string
>::iterator iter
= obj_oids
.begin(); iter
!= obj_oids
.end(); ++iter
) {
441 ldout(store
->ctx(), 20) << __func__
<< ": oid for obj=" << result
.obj
<< ": " << *iter
<< dendl
;
443 int shard
= orphan_shard(*iter
);
444 oids
[shard
].push_back(*iter
);
450 int RGWOrphanSearch::pop_and_handle_stat_op(map
<int, list
<string
> >& oids
, std::deque
<RGWRados::Object::Stat
>& ops
)
452 RGWRados::Object::Stat
& front_op
= ops
.front();
454 int ret
= front_op
.wait();
456 if (ret
!= -ENOENT
) {
457 lderr(store
->ctx()) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret
) << dendl
;
461 ret
= handle_stat_result(oids
, front_op
.result
);
463 lderr(store
->ctx()) << "ERROR: handle_stat_response() returned error: " << cpp_strerror(-ret
) << dendl
;
470 int RGWOrphanSearch::build_linked_oids_for_bucket(const string
& bucket_instance_id
, map
<int, list
<string
> >& oids
)
472 ldout(store
->ctx(), 10) << "building linked oids for bucket instance: " << bucket_instance_id
<< dendl
;
473 RGWBucketInfo bucket_info
;
474 RGWObjectCtx
obj_ctx(store
);
475 int ret
= store
->get_bucket_instance_info(obj_ctx
, bucket_instance_id
, bucket_info
, NULL
, NULL
);
477 if (ret
== -ENOENT
) {
478 /* probably raced with bucket removal */
481 lderr(store
->ctx()) << __func__
<< ": ERROR: RGWRados::get_bucket_instance_info() returned ret=" << ret
<< dendl
;
485 RGWRados::Bucket
target(store
, bucket_info
);
486 RGWRados::Bucket::List
list_op(&target
);
489 list_op
.params
.marker
= rgw_obj_key(marker
);
490 list_op
.params
.list_versions
= true;
491 list_op
.params
.enforce_ns
= false;
495 deque
<RGWRados::Object::Stat
> stat_ops
;
500 vector
<rgw_bucket_dir_entry
> result
;
502 #define MAX_LIST_OBJS_ENTRIES 100
503 ret
= list_op
.list_objects(MAX_LIST_OBJS_ENTRIES
, &result
, NULL
, &truncated
);
505 cerr
<< "ERROR: store->list_objects(): " << cpp_strerror(-ret
) << std::endl
;
509 for (vector
<rgw_bucket_dir_entry
>::iterator iter
= result
.begin(); iter
!= result
.end(); ++iter
) {
510 rgw_bucket_dir_entry
& entry
= *iter
;
511 if (entry
.key
.instance
.empty()) {
512 ldout(store
->ctx(), 20) << "obj entry: " << entry
.key
.name
<< dendl
;
514 ldout(store
->ctx(), 20) << "obj entry: " << entry
.key
.name
<< " [" << entry
.key
.instance
<< "]" << dendl
;
517 ldout(store
->ctx(), 20) << __func__
<< ": entry.key.name=" << entry
.key
.name
<< " entry.key.instance=" << entry
.key
.instance
<< dendl
;
518 rgw_obj
obj(bucket_info
.bucket
, entry
.key
);
520 RGWRados::Object
op_target(store
, bucket_info
, obj_ctx
, obj
);
522 stat_ops
.push_back(RGWRados::Object::Stat(&op_target
));
523 RGWRados::Object::Stat
& op
= stat_ops
.back();
526 ret
= op
.stat_async();
528 lderr(store
->ctx()) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret
) << dendl
;
531 if (stat_ops
.size() >= max_concurrent_ios
) {
532 ret
= pop_and_handle_stat_op(oids
, stat_ops
);
534 if (ret
!= -ENOENT
) {
535 lderr(store
->ctx()) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret
) << dendl
;
539 if (++count
>= COUNT_BEFORE_FLUSH
) {
540 ret
= log_oids(linked_objs_index
, oids
);
542 cerr
<< __func__
<< ": ERROR: log_oids() returned ret=" << ret
<< std::endl
;
551 while (!stat_ops
.empty()) {
552 ret
= pop_and_handle_stat_op(oids
, stat_ops
);
554 if (ret
!= -ENOENT
) {
555 lderr(store
->ctx()) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret
) << dendl
;
563 int RGWOrphanSearch::build_linked_oids_index()
565 map
<int, list
<string
> > oids
;
566 map
<int, string
>::iterator iter
= buckets_instance_index
.find(search_stage
.shard
);
567 for (; iter
!= buckets_instance_index
.end(); ++iter
) {
568 ldout(store
->ctx(), 0) << "building linked oids index: " << iter
->first
<< "/" << buckets_instance_index
.size() << dendl
;
571 string oid
= iter
->second
;
574 map
<string
, bufferlist
> entries
;
575 int ret
= orphan_store
.read_entries(oid
, search_stage
.marker
, &entries
, &truncated
);
576 if (ret
== -ENOENT
) {
582 lderr(store
->ctx()) << __func__
<< ": ERROR: read_entries() oid=" << oid
<< " returned ret=" << ret
<< dendl
;
586 if (entries
.empty()) {
590 for (map
<string
, bufferlist
>::iterator eiter
= entries
.begin(); eiter
!= entries
.end(); ++eiter
) {
591 ldout(store
->ctx(), 20) << " indexed entry: " << eiter
->first
<< dendl
;
592 ret
= build_linked_oids_for_bucket(eiter
->first
, oids
);
594 lderr(store
->ctx()) << __func__
<< ": ERROR: build_linked_oids_for_bucket() indexed entry=" << eiter
->first
595 << " returned ret=" << ret
<< dendl
;
600 search_stage
.shard
= iter
->first
;
601 search_stage
.marker
= entries
.rbegin()->first
; /* last entry */
604 search_stage
.marker
.clear();
607 int ret
= log_oids(linked_objs_index
, oids
);
609 cerr
<< __func__
<< ": ERROR: log_oids() returned ret=" << ret
<< std::endl
;
615 cerr
<< __func__
<< ": ERROR: failed to write state ret=" << ret
<< std::endl
;
623 librados::IoCtx ioctx
;
626 map
<string
, bufferlist
> entries
;
627 map
<string
, bufferlist
>::iterator iter
;
632 OMAPReader(librados::IoCtx
& _ioctx
, const string
& _oid
) : ioctx(_ioctx
), oid(_oid
), truncated(true) {
633 iter
= entries
.end();
636 int get_next(string
*key
, bufferlist
*pbl
, bool *done
);
639 int OMAPReader::get_next(string
*key
, bufferlist
*pbl
, bool *done
)
641 if (iter
!= entries
.end()) {
657 #define MAX_OMAP_GET_ENTRIES 100
658 int ret
= ioctx
.omap_get_vals(oid
, marker
, MAX_OMAP_GET_ENTRIES
, &entries
);
660 if (ret
== -ENOENT
) {
667 truncated
= (entries
.size() == MAX_OMAP_GET_ENTRIES
);
668 iter
= entries
.begin();
669 return get_next(key
, pbl
, done
);
672 int RGWOrphanSearch::compare_oid_indexes()
674 assert(linked_objs_index
.size() == all_objs_index
.size());
676 librados::IoCtx
& ioctx
= orphan_store
.get_ioctx();
678 librados::IoCtx data_ioctx
;
680 int ret
= rgw_init_ioctx(store
->get_rados_handle(), search_info
.pool
, data_ioctx
);
682 lderr(store
->ctx()) << __func__
<< ": rgw_init_ioctx() returned ret=" << ret
<< dendl
;
686 uint64_t time_threshold
= search_info
.start_time
.sec() - stale_secs
;
688 map
<int, string
>::iterator liter
= linked_objs_index
.begin();
689 map
<int, string
>::iterator aiter
= all_objs_index
.begin();
691 for (; liter
!= linked_objs_index
.end(); ++liter
, ++aiter
) {
692 OMAPReader
linked_entries(ioctx
, liter
->second
);
693 OMAPReader
all_entries(ioctx
, aiter
->second
);
698 bool linked_done
= false;
703 int r
= all_entries
.get_next(&key
, NULL
, &done
);
711 string key_fp
= obj_fingerprint(key
);
713 while (cur_linked
< key_fp
&& !linked_done
) {
714 r
= linked_entries
.get_next(&cur_linked
, NULL
, &linked_done
);
720 if (cur_linked
== key_fp
) {
721 ldout(store
->ctx(), 20) << "linked: " << key
<< dendl
;
726 r
= data_ioctx
.stat(key
, NULL
, &mtime
);
729 lderr(store
->ctx()) << "ERROR: ioctx.stat(" << key
<< ") returned ret=" << r
<< dendl
;
733 if (stale_secs
&& (uint64_t)mtime
>= time_threshold
) {
734 ldout(store
->ctx(), 20) << "skipping: " << key
<< " (mtime=" << mtime
<< " threshold=" << time_threshold
<< ")" << dendl
;
737 ldout(store
->ctx(), 20) << "leaked: " << key
<< dendl
;
738 cout
<< "leaked: " << key
<< std::endl
;
745 int RGWOrphanSearch::run()
749 switch (search_stage
.stage
) {
751 case ORPHAN_SEARCH_STAGE_INIT
:
752 ldout(store
->ctx(), 0) << __func__
<< "(): initializing state" << dendl
;
753 search_stage
= RGWOrphanSearchStage(ORPHAN_SEARCH_STAGE_LSPOOL
);
756 lderr(store
->ctx()) << __func__
<< ": ERROR: failed to save state, ret=" << r
<< dendl
;
760 case ORPHAN_SEARCH_STAGE_LSPOOL
:
761 ldout(store
->ctx(), 0) << __func__
<< "(): building index of all objects in pool" << dendl
;
762 r
= build_all_oids_index();
764 lderr(store
->ctx()) << __func__
<< ": ERROR: build_all_objs_index returned ret=" << r
<< dendl
;
768 search_stage
= RGWOrphanSearchStage(ORPHAN_SEARCH_STAGE_LSBUCKETS
);
771 lderr(store
->ctx()) << __func__
<< ": ERROR: failed to save state, ret=" << r
<< dendl
;
776 case ORPHAN_SEARCH_STAGE_LSBUCKETS
:
777 ldout(store
->ctx(), 0) << __func__
<< "(): building index of all bucket indexes" << dendl
;
778 r
= build_buckets_instance_index();
780 lderr(store
->ctx()) << __func__
<< ": ERROR: build_all_objs_index returned ret=" << r
<< dendl
;
784 search_stage
= RGWOrphanSearchStage(ORPHAN_SEARCH_STAGE_ITERATE_BI
);
787 lderr(store
->ctx()) << __func__
<< ": ERROR: failed to save state, ret=" << r
<< dendl
;
793 case ORPHAN_SEARCH_STAGE_ITERATE_BI
:
794 ldout(store
->ctx(), 0) << __func__
<< "(): building index of all linked objects" << dendl
;
795 r
= build_linked_oids_index();
797 lderr(store
->ctx()) << __func__
<< ": ERROR: build_all_objs_index returned ret=" << r
<< dendl
;
801 search_stage
= RGWOrphanSearchStage(ORPHAN_SEARCH_STAGE_COMPARE
);
804 lderr(store
->ctx()) << __func__
<< ": ERROR: failed to save state, ret=" << r
<< dendl
;
809 case ORPHAN_SEARCH_STAGE_COMPARE
:
810 r
= compare_oid_indexes();
812 lderr(store
->ctx()) << __func__
<< ": ERROR: build_all_objs_index returned ret=" << r
<< dendl
;
826 int RGWOrphanSearch::remove_index(map
<int, string
>& index
)
828 librados::IoCtx
& ioctx
= orphan_store
.get_ioctx();
830 for (map
<int, string
>::iterator iter
= index
.begin(); iter
!= index
.end(); ++iter
) {
831 int r
= ioctx
.remove(iter
->second
);
834 ldout(store
->ctx(), 0) << "ERROR: couldn't remove " << iter
->second
<< ": ret=" << r
<< dendl
;
841 int RGWOrphanSearch::finish()
843 int r
= remove_index(all_objs_index
);
845 ldout(store
->ctx(), 0) << "ERROR: remove_index(" << all_objs_index
<< ") returned ret=" << r
<< dendl
;
847 r
= remove_index(buckets_instance_index
);
849 ldout(store
->ctx(), 0) << "ERROR: remove_index(" << buckets_instance_index
<< ") returned ret=" << r
<< dendl
;
851 r
= remove_index(linked_objs_index
);
853 ldout(store
->ctx(), 0) << "ERROR: remove_index(" << linked_objs_index
<< ") returned ret=" << r
<< dendl
;
856 r
= orphan_store
.remove_job(search_info
.job_name
);
858 ldout(store
->ctx(), 0) << "ERROR: could not remove job name (" << search_info
.job_name
<< ") ret=" << r
<< dendl
;