1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab ft=cpp
8 #include "rgw_bucket.h"
9 #include "rgw_reshard.h"
11 #include "rgw_sal_rados.h"
12 #include "cls/rgw/cls_rgw_client.h"
13 #include "cls/lock/cls_lock_client.h"
14 #include "common/errno.h"
15 #include "common/ceph_json.h"
17 #include "common/dout.h"
19 #include "services/svc_zone.h"
20 #include "services/svc_sys_obj.h"
21 #include "services/svc_tier_rados.h"
23 #define dout_context g_ceph_context
24 #define dout_subsys ceph_subsys_rgw
26 const string reshard_oid_prefix
= "reshard.";
27 const string reshard_lock_name
= "reshard_process";
28 const string bucket_instance_lock_name
= "bucket_instance_lock";
30 /* All primes up to 2000 used to attempt to make dynamic sharding use
31 * a prime numbers of shards. Note: this list also includes 1 for when
32 * 1 shard is the most appropriate, even though 1 is not prime.
34 const std::initializer_list
<uint16_t> RGWBucketReshard::reshard_primes
= {
35 1, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61,
36 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137,
37 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211,
38 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283,
39 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379,
40 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461,
41 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563,
42 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643,
43 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739,
44 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829,
45 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
46 941, 947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013, 1019, 1021,
47 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093,
48 1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181,
49 1187, 1193, 1201, 1213, 1217, 1223, 1229, 1231, 1237, 1249, 1259,
50 1277, 1279, 1283, 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321,
51 1327, 1361, 1367, 1373, 1381, 1399, 1409, 1423, 1427, 1429, 1433,
52 1439, 1447, 1451, 1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493,
53 1499, 1511, 1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579,
54 1583, 1597, 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657,
55 1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733, 1741,
56 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811, 1823, 1831,
57 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889, 1901, 1907, 1913,
58 1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997, 1999
61 class BucketReshardShard
{
62 rgw::sal::RGWRadosStore
*store
;
63 const RGWBucketInfo
& bucket_info
;
65 const rgw::bucket_index_layout_generation
& idx_layout
;
66 RGWRados::BucketShard bs
;
67 vector
<rgw_cls_bi_entry
> entries
;
68 map
<RGWObjCategory
, rgw_bucket_category_stats
> stats
;
69 deque
<librados::AioCompletion
*>& aio_completions
;
70 uint64_t max_aio_completions
;
71 uint64_t reshard_shard_batch_size
;
73 int wait_next_completion() {
74 librados::AioCompletion
*c
= aio_completions
.front();
75 aio_completions
.pop_front();
77 c
->wait_for_complete();
79 int ret
= c
->get_return_value();
83 derr
<< "ERROR: reshard rados operation failed: " << cpp_strerror(-ret
) << dendl
;
90 int get_completion(librados::AioCompletion
**c
) {
91 if (aio_completions
.size() >= max_aio_completions
) {
92 int ret
= wait_next_completion();
98 *c
= librados::Rados::aio_create_completion(nullptr, nullptr);
99 aio_completions
.push_back(*c
);
105 BucketReshardShard(const DoutPrefixProvider
*dpp
,
106 rgw::sal::RGWRadosStore
*_store
, const RGWBucketInfo
& _bucket_info
,
107 int _num_shard
, const rgw::bucket_index_layout_generation
& _idx_layout
,
108 deque
<librados::AioCompletion
*>& _completions
) :
109 store(_store
), bucket_info(_bucket_info
), idx_layout(_idx_layout
), bs(store
->getRados()),
110 aio_completions(_completions
)
112 num_shard
= (idx_layout
.layout
.normal
.num_shards
> 0 ? _num_shard
: -1);
114 bs
.init(bucket_info
.bucket
, num_shard
, idx_layout
, nullptr /* no RGWBucketInfo */, dpp
);
116 max_aio_completions
=
117 store
->ctx()->_conf
.get_val
<uint64_t>("rgw_reshard_max_aio");
118 reshard_shard_batch_size
=
119 store
->ctx()->_conf
.get_val
<uint64_t>("rgw_reshard_batch_size");
122 int get_num_shard() {
126 int add_entry(rgw_cls_bi_entry
& entry
, bool account
, RGWObjCategory category
,
127 const rgw_bucket_category_stats
& entry_stats
) {
128 entries
.push_back(entry
);
130 rgw_bucket_category_stats
& target
= stats
[category
];
131 target
.num_entries
+= entry_stats
.num_entries
;
132 target
.total_size
+= entry_stats
.total_size
;
133 target
.total_size_rounded
+= entry_stats
.total_size_rounded
;
134 target
.actual_size
+= entry_stats
.actual_size
;
136 if (entries
.size() >= reshard_shard_batch_size
) {
147 if (entries
.size() == 0) {
151 librados::ObjectWriteOperation op
;
152 for (auto& entry
: entries
) {
153 store
->getRados()->bi_put(op
, bs
, entry
);
155 cls_rgw_bucket_update_stats(op
, false, stats
);
157 librados::AioCompletion
*c
;
158 int ret
= get_completion(&c
);
162 ret
= bs
.bucket_obj
.aio_operate(c
, &op
);
164 derr
<< "ERROR: failed to store entries in target bucket shard (bs=" << bs
.bucket
<< "/" << bs
.shard_id
<< ") error=" << cpp_strerror(-ret
) << dendl
;
174 while (!aio_completions
.empty()) {
175 int r
= wait_next_completion();
182 }; // class BucketReshardShard
185 class BucketReshardManager
{
186 rgw::sal::RGWRadosStore
*store
;
187 const RGWBucketInfo
& target_bucket_info
;
188 deque
<librados::AioCompletion
*> completions
;
189 int num_target_shards
;
190 vector
<BucketReshardShard
*> target_shards
;
193 BucketReshardManager(const DoutPrefixProvider
*dpp
,
194 rgw::sal::RGWRadosStore
*_store
,
195 const RGWBucketInfo
& _target_bucket_info
,
196 int _num_target_shards
) :
197 store(_store
), target_bucket_info(_target_bucket_info
),
198 num_target_shards(_num_target_shards
)
200 const auto& idx_layout
= target_bucket_info
.layout
.current_index
;
201 target_shards
.resize(num_target_shards
);
202 for (int i
= 0; i
< num_target_shards
; ++i
) {
203 target_shards
[i
] = new BucketReshardShard(dpp
, store
, target_bucket_info
, i
, idx_layout
, completions
);
207 ~BucketReshardManager() {
208 for (auto& shard
: target_shards
) {
209 int ret
= shard
->wait_all_aio();
211 ldout(store
->ctx(), 20) << __func__
<<
212 ": shard->wait_all_aio() returned ret=" << ret
<< dendl
;
217 int add_entry(int shard_index
,
218 rgw_cls_bi_entry
& entry
, bool account
, RGWObjCategory category
,
219 const rgw_bucket_category_stats
& entry_stats
) {
220 int ret
= target_shards
[shard_index
]->add_entry(entry
, account
, category
,
223 derr
<< "ERROR: target_shards.add_entry(" << entry
.idx
<<
224 ") returned error: " << cpp_strerror(-ret
) << dendl
;
233 for (auto& shard
: target_shards
) {
234 int r
= shard
->flush();
236 derr
<< "ERROR: target_shards[" << shard
->get_num_shard() << "].flush() returned error: " << cpp_strerror(-r
) << dendl
;
240 for (auto& shard
: target_shards
) {
241 int r
= shard
->wait_all_aio();
243 derr
<< "ERROR: target_shards[" << shard
->get_num_shard() << "].wait_all_aio() returned error: " << cpp_strerror(-r
) << dendl
;
248 target_shards
.clear();
251 }; // class BucketReshardManager
253 RGWBucketReshard::RGWBucketReshard(rgw::sal::RGWRadosStore
*_store
,
254 const RGWBucketInfo
& _bucket_info
,
255 const map
<string
, bufferlist
>& _bucket_attrs
,
256 RGWBucketReshardLock
* _outer_reshard_lock
) :
257 store(_store
), bucket_info(_bucket_info
), bucket_attrs(_bucket_attrs
),
258 reshard_lock(store
, bucket_info
, true),
259 outer_reshard_lock(_outer_reshard_lock
)
262 int RGWBucketReshard::set_resharding_status(const DoutPrefixProvider
*dpp
,
263 rgw::sal::RGWRadosStore
* store
,
264 const RGWBucketInfo
& bucket_info
,
265 const string
& new_instance_id
,
267 cls_rgw_reshard_status status
)
269 if (new_instance_id
.empty()) {
270 ldpp_dout(dpp
, 0) << __func__
<< " missing new bucket instance id" << dendl
;
274 cls_rgw_bucket_instance_entry instance_entry
;
275 instance_entry
.set_status(new_instance_id
, num_shards
, status
);
277 int ret
= store
->getRados()->bucket_set_reshard(dpp
, bucket_info
, instance_entry
);
279 ldpp_dout(dpp
, 0) << "RGWReshard::" << __func__
<< " ERROR: error setting bucket resharding flag on bucket index: "
280 << cpp_strerror(-ret
) << dendl
;
286 // reshard lock assumes lock is held
287 int RGWBucketReshard::clear_resharding(const DoutPrefixProvider
*dpp
,
288 rgw::sal::RGWRadosStore
* store
,
289 const RGWBucketInfo
& bucket_info
)
291 int ret
= clear_index_shard_reshard_status(dpp
, store
, bucket_info
);
293 ldpp_dout(dpp
, 0) << "RGWBucketReshard::" << __func__
<<
294 " ERROR: error clearing reshard status from index shard " <<
295 cpp_strerror(-ret
) << dendl
;
299 cls_rgw_bucket_instance_entry instance_entry
;
300 ret
= store
->getRados()->bucket_set_reshard(dpp
, bucket_info
, instance_entry
);
302 ldpp_dout(dpp
, 0) << "RGWReshard::" << __func__
<<
303 " ERROR: error setting bucket resharding flag on bucket index: " <<
304 cpp_strerror(-ret
) << dendl
;
311 int RGWBucketReshard::clear_index_shard_reshard_status(const DoutPrefixProvider
*dpp
,
312 rgw::sal::RGWRadosStore
* store
,
313 const RGWBucketInfo
& bucket_info
)
315 uint32_t num_shards
= bucket_info
.layout
.current_index
.layout
.normal
.num_shards
;
317 if (num_shards
< std::numeric_limits
<uint32_t>::max()) {
318 int ret
= set_resharding_status(dpp
, store
, bucket_info
,
319 bucket_info
.bucket
.bucket_id
,
320 (num_shards
< 1 ? 1 : num_shards
),
321 cls_rgw_reshard_status::NOT_RESHARDING
);
323 ldpp_dout(dpp
, 0) << "RGWBucketReshard::" << __func__
<<
324 " ERROR: error clearing reshard status from index shard " <<
325 cpp_strerror(-ret
) << dendl
;
333 static int create_new_bucket_instance(rgw::sal::RGWRadosStore
*store
,
335 const RGWBucketInfo
& bucket_info
,
336 map
<string
, bufferlist
>& attrs
,
337 RGWBucketInfo
& new_bucket_info
,
338 const DoutPrefixProvider
*dpp
)
340 new_bucket_info
= bucket_info
;
342 store
->getRados()->create_bucket_id(&new_bucket_info
.bucket
.bucket_id
);
344 new_bucket_info
.layout
.current_index
.layout
.normal
.num_shards
= new_num_shards
;
345 new_bucket_info
.objv_tracker
.clear();
347 new_bucket_info
.new_bucket_instance_id
.clear();
348 new_bucket_info
.reshard_status
= cls_rgw_reshard_status::NOT_RESHARDING
;
350 int ret
= store
->svc()->bi
->init_index(dpp
, new_bucket_info
);
352 cerr
<< "ERROR: failed to init new bucket indexes: " << cpp_strerror(-ret
) << std::endl
;
356 ret
= store
->getRados()->put_bucket_instance_info(new_bucket_info
, true, real_time(), &attrs
, dpp
);
358 cerr
<< "ERROR: failed to store new bucket instance info: " << cpp_strerror(-ret
) << std::endl
;
365 int RGWBucketReshard::create_new_bucket_instance(int new_num_shards
,
366 RGWBucketInfo
& new_bucket_info
,
367 const DoutPrefixProvider
*dpp
)
369 return ::create_new_bucket_instance(store
, new_num_shards
,
370 bucket_info
, bucket_attrs
, new_bucket_info
, dpp
);
373 int RGWBucketReshard::cancel(const DoutPrefixProvider
*dpp
)
375 int ret
= reshard_lock
.lock();
380 ret
= clear_resharding(dpp
);
382 reshard_lock
.unlock();
386 class BucketInfoReshardUpdate
388 const DoutPrefixProvider
*dpp
;
389 rgw::sal::RGWRadosStore
*store
;
390 RGWBucketInfo
& bucket_info
;
391 std::map
<string
, bufferlist
> bucket_attrs
;
393 bool in_progress
{false};
395 int set_status(cls_rgw_reshard_status s
, const DoutPrefixProvider
*dpp
) {
396 bucket_info
.reshard_status
= s
;
397 int ret
= store
->getRados()->put_bucket_instance_info(bucket_info
, false, real_time(), &bucket_attrs
, dpp
);
399 ldpp_dout(dpp
, 0) << "ERROR: failed to write bucket info, ret=" << ret
<< dendl
;
406 BucketInfoReshardUpdate(const DoutPrefixProvider
*_dpp
,
407 rgw::sal::RGWRadosStore
*_store
,
408 RGWBucketInfo
& _bucket_info
,
409 map
<string
, bufferlist
>& _bucket_attrs
,
410 const string
& new_bucket_id
) :
413 bucket_info(_bucket_info
),
414 bucket_attrs(_bucket_attrs
)
416 bucket_info
.new_bucket_instance_id
= new_bucket_id
;
419 ~BucketInfoReshardUpdate() {
421 // resharding must not have ended correctly, clean up
423 RGWBucketReshard::clear_index_shard_reshard_status(dpp
, store
, bucket_info
);
425 ldpp_dout(dpp
, -1) << "Error: " << __func__
<<
426 " clear_index_shard_status returned " << ret
<< dendl
;
428 bucket_info
.new_bucket_instance_id
.clear();
430 // clears new_bucket_instance as well
431 set_status(cls_rgw_reshard_status::NOT_RESHARDING
, dpp
);
436 int ret
= set_status(cls_rgw_reshard_status::IN_PROGRESS
, dpp
);
445 int ret
= set_status(cls_rgw_reshard_status::DONE
, dpp
);
455 RGWBucketReshardLock::RGWBucketReshardLock(rgw::sal::RGWRadosStore
* _store
,
456 const std::string
& reshard_lock_oid
,
459 lock_oid(reshard_lock_oid
),
460 ephemeral(_ephemeral
),
461 internal_lock(reshard_lock_name
)
463 const int lock_dur_secs
= store
->ctx()->_conf
.get_val
<uint64_t>(
464 "rgw_reshard_bucket_lock_duration");
465 duration
= std::chrono::seconds(lock_dur_secs
);
467 #define COOKIE_LEN 16
468 char cookie_buf
[COOKIE_LEN
+ 1];
469 gen_rand_alphanumeric(store
->ctx(), cookie_buf
, sizeof(cookie_buf
) - 1);
470 cookie_buf
[COOKIE_LEN
] = '\0';
472 internal_lock
.set_cookie(cookie_buf
);
473 internal_lock
.set_duration(duration
);
476 int RGWBucketReshardLock::lock() {
477 internal_lock
.set_must_renew(false);
480 ret
= internal_lock
.lock_exclusive_ephemeral(&store
->getRados()->reshard_pool_ctx
,
483 ret
= internal_lock
.lock_exclusive(&store
->getRados()->reshard_pool_ctx
, lock_oid
);
486 ldout(store
->ctx(), 0) << "RGWReshardLock::" << __func__
<<
487 " failed to acquire lock on " << lock_oid
<< " ret=" << ret
<< dendl
;
490 reset_time(Clock::now());
495 void RGWBucketReshardLock::unlock() {
496 int ret
= internal_lock
.unlock(&store
->getRados()->reshard_pool_ctx
, lock_oid
);
498 ldout(store
->ctx(), 0) << "WARNING: RGWBucketReshardLock::" << __func__
<<
499 " failed to drop lock on " << lock_oid
<< " ret=" << ret
<< dendl
;
503 int RGWBucketReshardLock::renew(const Clock::time_point
& now
) {
504 internal_lock
.set_must_renew(true);
507 ret
= internal_lock
.lock_exclusive_ephemeral(&store
->getRados()->reshard_pool_ctx
,
510 ret
= internal_lock
.lock_exclusive(&store
->getRados()->reshard_pool_ctx
, lock_oid
);
512 if (ret
< 0) { /* expired or already locked by another processor */
513 std::stringstream error_s
;
514 if (-ENOENT
== ret
) {
515 error_s
<< "ENOENT (lock expired or never initially locked)";
517 error_s
<< ret
<< " (" << cpp_strerror(-ret
) << ")";
519 ldout(store
->ctx(), 5) << __func__
<< "(): failed to renew lock on " <<
520 lock_oid
<< " with error " << error_s
.str() << dendl
;
523 internal_lock
.set_must_renew(false);
526 ldout(store
->ctx(), 20) << __func__
<< "(): successfully renewed lock on " <<
533 int RGWBucketReshard::do_reshard(int num_shards
,
534 RGWBucketInfo
& new_bucket_info
,
538 Formatter
*formatter
,
539 const DoutPrefixProvider
*dpp
)
542 const rgw_bucket
& bucket
= bucket_info
.bucket
;
543 (*out
) << "tenant: " << bucket
.tenant
<< std::endl
;
544 (*out
) << "bucket name: " << bucket
.name
<< std::endl
;
545 (*out
) << "old bucket instance id: " << bucket
.bucket_id
<<
547 (*out
) << "new bucket instance id: " << new_bucket_info
.bucket
.bucket_id
<<
551 /* update bucket info -- in progress*/
552 list
<rgw_cls_bi_entry
> entries
;
554 if (max_entries
< 0) {
555 ldpp_dout(dpp
, 0) << __func__
<<
556 ": can't reshard, negative max_entries" << dendl
;
560 // NB: destructor cleans up sharding state if reshard does not
561 // complete successfully
562 BucketInfoReshardUpdate
bucket_info_updater(dpp
, store
, bucket_info
, bucket_attrs
, new_bucket_info
.bucket
.bucket_id
);
564 int ret
= bucket_info_updater
.start();
566 ldpp_dout(dpp
, 0) << __func__
<< ": failed to update bucket info ret=" << ret
<< dendl
;
570 int num_target_shards
= (new_bucket_info
.layout
.current_index
.layout
.normal
.num_shards
> 0 ? new_bucket_info
.layout
.current_index
.layout
.normal
.num_shards
: 1);
572 BucketReshardManager
target_shards_mgr(dpp
, store
, new_bucket_info
, num_target_shards
);
574 bool verbose_json_out
= verbose
&& (formatter
!= nullptr) && (out
!= nullptr);
576 if (verbose_json_out
) {
577 formatter
->open_array_section("entries");
580 uint64_t total_entries
= 0;
582 if (!verbose_json_out
&& out
) {
583 (*out
) << "total entries:";
586 const int num_source_shards
=
587 (bucket_info
.layout
.current_index
.layout
.normal
.num_shards
> 0 ? bucket_info
.layout
.current_index
.layout
.normal
.num_shards
: 1);
589 for (int i
= 0; i
< num_source_shards
; ++i
) {
590 bool is_truncated
= true;
592 while (is_truncated
) {
594 ret
= store
->getRados()->bi_list(dpp
, bucket_info
, i
, string(), marker
, max_entries
, &entries
, &is_truncated
);
595 if (ret
< 0 && ret
!= -ENOENT
) {
596 derr
<< "ERROR: bi_list(): " << cpp_strerror(-ret
) << dendl
;
600 for (auto iter
= entries
.begin(); iter
!= entries
.end(); ++iter
) {
601 rgw_cls_bi_entry
& entry
= *iter
;
602 if (verbose_json_out
) {
603 formatter
->open_object_section("entry");
605 encode_json("shard_id", i
, formatter
);
606 encode_json("num_entry", total_entries
, formatter
);
607 encode_json("entry", entry
, formatter
);
614 cls_rgw_obj_key cls_key
;
615 RGWObjCategory category
;
616 rgw_bucket_category_stats stats
;
617 bool account
= entry
.get_info(&cls_key
, &category
, &stats
);
618 rgw_obj_key
key(cls_key
);
619 rgw_obj
obj(new_bucket_info
.bucket
, key
);
621 if (key
.ns
== RGW_OBJ_NS_MULTIPART
&& mp
.from_meta(key
.name
)) {
622 // place the multipart .meta object on the same shard as its head object
623 obj
.index_hash_source
= mp
.get_key();
625 int ret
= store
->getRados()->get_target_shard_id(new_bucket_info
.layout
.current_index
.layout
.normal
, obj
.get_hash_object(), &target_shard_id
);
627 ldpp_dout(dpp
, -1) << "ERROR: get_target_shard_id() returned ret=" << ret
<< dendl
;
631 int shard_index
= (target_shard_id
> 0 ? target_shard_id
: 0);
633 ret
= target_shards_mgr
.add_entry(shard_index
, entry
, account
,
639 Clock::time_point now
= Clock::now();
640 if (reshard_lock
.should_renew(now
)) {
641 // assume outer locks have timespans at least the size of ours, so
642 // can call inside conditional
643 if (outer_reshard_lock
) {
644 ret
= outer_reshard_lock
->renew(now
);
649 ret
= reshard_lock
.renew(now
);
651 ldpp_dout(dpp
, -1) << "Error renewing bucket lock: " << ret
<< dendl
;
655 if (verbose_json_out
) {
656 formatter
->close_section();
657 formatter
->flush(*out
);
658 } else if (out
&& !(total_entries
% 1000)) {
659 (*out
) << " " << total_entries
;
665 if (verbose_json_out
) {
666 formatter
->close_section();
667 formatter
->flush(*out
);
669 (*out
) << " " << total_entries
<< std::endl
;
672 ret
= target_shards_mgr
.finish();
674 ldpp_dout(dpp
, -1) << "ERROR: failed to reshard" << dendl
;
678 ret
= store
->ctl()->bucket
->link_bucket(new_bucket_info
.owner
, new_bucket_info
.bucket
, bucket_info
.creation_time
, null_yield
, dpp
);
680 ldpp_dout(dpp
, -1) << "failed to link new bucket instance (bucket_id=" << new_bucket_info
.bucket
.bucket_id
<< ": " << cpp_strerror(-ret
) << ")" << dendl
;
684 ret
= bucket_info_updater
.complete();
686 ldpp_dout(dpp
, 0) << __func__
<< ": failed to update bucket info ret=" << ret
<< dendl
;
687 /* don't error out, reshard process succeeded */
691 // NB: some error clean-up is done by ~BucketInfoReshardUpdate
692 } // RGWBucketReshard::do_reshard
694 int RGWBucketReshard::get_status(const DoutPrefixProvider
*dpp
, list
<cls_rgw_bucket_instance_entry
> *status
)
696 return store
->svc()->bi_rados
->get_reshard_status(dpp
, bucket_info
, status
);
700 int RGWBucketReshard::execute(int num_shards
, int max_op_entries
,
701 const DoutPrefixProvider
*dpp
,
702 bool verbose
, ostream
*out
, Formatter
*formatter
,
703 RGWReshard
* reshard_log
)
705 int ret
= reshard_lock
.lock();
710 RGWBucketInfo new_bucket_info
;
711 ret
= create_new_bucket_instance(num_shards
, new_bucket_info
, dpp
);
713 // shard state is uncertain, but this will attempt to remove them anyway
718 ret
= reshard_log
->update(dpp
, bucket_info
, new_bucket_info
);
724 // set resharding status of current bucket_info & shards with
725 // information about planned resharding
726 ret
= set_resharding_status(dpp
, new_bucket_info
.bucket
.bucket_id
,
727 num_shards
, cls_rgw_reshard_status::IN_PROGRESS
);
732 ret
= do_reshard(num_shards
,
735 verbose
, out
, formatter
, dpp
);
740 // at this point we've done the main work; we'll make a best-effort
741 // to clean-up but will not indicate any errors encountered
743 reshard_lock
.unlock();
745 // resharding successful, so remove old bucket index shards; use
746 // best effort and don't report out an error; the lock isn't needed
747 // at this point since all we're using a best effor to to remove old
749 ret
= store
->svc()->bi
->clean_index(dpp
, bucket_info
);
751 ldpp_dout(dpp
, -1) << "Error: " << __func__
<<
752 " failed to clean up old shards; " <<
753 "RGWRados::clean_bucket_index returned " << ret
<< dendl
;
756 ret
= store
->ctl()->bucket
->remove_bucket_instance_info(bucket_info
.bucket
,
757 bucket_info
, null_yield
, dpp
);
759 ldpp_dout(dpp
, -1) << "Error: " << __func__
<<
760 " failed to clean old bucket info object \"" <<
761 bucket_info
.bucket
.get_key() <<
762 "\"created after successful resharding with error " << ret
<< dendl
;
765 ldpp_dout(dpp
, 1) << __func__
<<
766 " INFO: reshard of bucket \"" << bucket_info
.bucket
.name
<< "\" from \"" <<
767 bucket_info
.bucket
.get_key() << "\" to \"" <<
768 new_bucket_info
.bucket
.get_key() << "\" completed successfully" << dendl
;
774 reshard_lock
.unlock();
776 // since the real problem is the issue that led to this error code
777 // path, we won't touch ret and instead use another variable to
778 // temporarily error codes
779 int ret2
= store
->svc()->bi
->clean_index(dpp
, new_bucket_info
);
781 ldpp_dout(dpp
, -1) << "Error: " << __func__
<<
782 " failed to clean up shards from failed incomplete resharding; " <<
783 "RGWRados::clean_bucket_index returned " << ret2
<< dendl
;
786 ret2
= store
->ctl()->bucket
->remove_bucket_instance_info(new_bucket_info
.bucket
,
790 ldpp_dout(dpp
, -1) << "Error: " << __func__
<<
791 " failed to clean bucket info object \"" <<
792 new_bucket_info
.bucket
.get_key() <<
793 "\"created during incomplete resharding with error " << ret2
<< dendl
;
800 RGWReshard::RGWReshard(rgw::sal::RGWRadosStore
* _store
, bool _verbose
, ostream
*_out
,
801 Formatter
*_formatter
) :
802 store(_store
), instance_lock(bucket_instance_lock_name
),
803 verbose(_verbose
), out(_out
), formatter(_formatter
)
805 num_logshards
= store
->ctx()->_conf
.get_val
<uint64_t>("rgw_reshard_num_logs");
808 string
RGWReshard::get_logshard_key(const string
& tenant
,
809 const string
& bucket_name
)
811 return tenant
+ ":" + bucket_name
;
814 #define MAX_RESHARD_LOGSHARDS_PRIME 7877
816 void RGWReshard::get_bucket_logshard_oid(const string
& tenant
, const string
& bucket_name
, string
*oid
)
818 string key
= get_logshard_key(tenant
, bucket_name
);
820 uint32_t sid
= ceph_str_hash_linux(key
.c_str(), key
.size());
821 uint32_t sid2
= sid
^ ((sid
& 0xFF) << 24);
822 sid
= sid2
% MAX_RESHARD_LOGSHARDS_PRIME
% num_logshards
;
824 get_logshard_oid(int(sid
), oid
);
827 int RGWReshard::add(const DoutPrefixProvider
*dpp
, cls_rgw_reshard_entry
& entry
)
829 if (!store
->svc()->zone
->can_reshard()) {
830 ldpp_dout(dpp
, 20) << __func__
<< " Resharding is disabled" << dendl
;
836 get_bucket_logshard_oid(entry
.tenant
, entry
.bucket_name
, &logshard_oid
);
838 librados::ObjectWriteOperation op
;
839 cls_rgw_reshard_add(op
, entry
);
841 int ret
= rgw_rados_operate(dpp
, store
->getRados()->reshard_pool_ctx
, logshard_oid
, &op
, null_yield
);
843 ldpp_dout(dpp
, -1) << "ERROR: failed to add entry to reshard log, oid=" << logshard_oid
<< " tenant=" << entry
.tenant
<< " bucket=" << entry
.bucket_name
<< dendl
;
849 int RGWReshard::update(const DoutPrefixProvider
*dpp
, const RGWBucketInfo
& bucket_info
, const RGWBucketInfo
& new_bucket_info
)
851 cls_rgw_reshard_entry entry
;
852 entry
.bucket_name
= bucket_info
.bucket
.name
;
853 entry
.bucket_id
= bucket_info
.bucket
.bucket_id
;
854 entry
.tenant
= bucket_info
.owner
.tenant
;
856 int ret
= get(entry
);
861 entry
.new_instance_id
= new_bucket_info
.bucket
.name
+ ":" + new_bucket_info
.bucket
.bucket_id
;
863 ret
= add(dpp
, entry
);
865 ldpp_dout(dpp
, 0) << __func__
<< ":Error in updating entry bucket " << entry
.bucket_name
<< ": " <<
866 cpp_strerror(-ret
) << dendl
;
873 int RGWReshard::list(int logshard_num
, string
& marker
, uint32_t max
, std::list
<cls_rgw_reshard_entry
>& entries
, bool *is_truncated
)
877 get_logshard_oid(logshard_num
, &logshard_oid
);
879 int ret
= cls_rgw_reshard_list(store
->getRados()->reshard_pool_ctx
, logshard_oid
, marker
, max
, entries
, is_truncated
);
882 lderr(store
->ctx()) << "ERROR: failed to list reshard log entries, oid=" << logshard_oid
<< " "
883 << "marker=" << marker
<< " " << cpp_strerror(ret
) << dendl
;
884 if (ret
== -ENOENT
) {
885 *is_truncated
= false;
888 if (ret
== -EACCES
) {
889 lderr(store
->ctx()) << "access denied to pool " << store
->svc()->zone
->get_zone_params().reshard_pool
890 << ". Fix the pool access permissions of your client" << dendl
;
898 int RGWReshard::get(cls_rgw_reshard_entry
& entry
)
902 get_bucket_logshard_oid(entry
.tenant
, entry
.bucket_name
, &logshard_oid
);
904 int ret
= cls_rgw_reshard_get(store
->getRados()->reshard_pool_ctx
, logshard_oid
, entry
);
906 if (ret
!= -ENOENT
) {
907 lderr(store
->ctx()) << "ERROR: failed to get entry from reshard log, oid=" << logshard_oid
<< " tenant=" << entry
.tenant
<<
908 " bucket=" << entry
.bucket_name
<< dendl
;
916 int RGWReshard::remove(const DoutPrefixProvider
*dpp
, cls_rgw_reshard_entry
& entry
)
920 get_bucket_logshard_oid(entry
.tenant
, entry
.bucket_name
, &logshard_oid
);
922 librados::ObjectWriteOperation op
;
923 cls_rgw_reshard_remove(op
, entry
);
925 int ret
= rgw_rados_operate(dpp
, store
->getRados()->reshard_pool_ctx
, logshard_oid
, &op
, null_yield
);
927 ldpp_dout(dpp
, -1) << "ERROR: failed to remove entry from reshard log, oid=" << logshard_oid
<< " tenant=" << entry
.tenant
<< " bucket=" << entry
.bucket_name
<< dendl
;
934 int RGWReshard::clear_bucket_resharding(const string
& bucket_instance_oid
, cls_rgw_reshard_entry
& entry
)
936 int ret
= cls_rgw_clear_bucket_resharding(store
->getRados()->reshard_pool_ctx
, bucket_instance_oid
);
938 lderr(store
->ctx()) << "ERROR: failed to clear bucket resharding, bucket_instance_oid=" << bucket_instance_oid
<< dendl
;
945 int RGWReshardWait::wait(optional_yield y
)
947 std::unique_lock
lock(mutex
);
954 auto& context
= y
.get_io_context();
955 auto& yield
= y
.get_yield_context();
957 Waiter
waiter(context
);
958 waiters
.push_back(waiter
);
961 waiter
.timer
.expires_after(duration
);
963 boost::system::error_code ec
;
964 waiter
.timer
.async_wait(yield
[ec
]);
967 waiters
.erase(waiters
.iterator_to(waiter
));
971 cond
.wait_for(lock
, duration
);
980 void RGWReshardWait::stop()
982 std::scoped_lock
lock(mutex
);
985 for (auto& waiter
: waiters
) {
986 // unblock any waiters with ECANCELED
987 waiter
.timer
.cancel();
991 int RGWReshard::process_single_logshard(int logshard_num
, const DoutPrefixProvider
*dpp
)
994 bool truncated
= true;
996 constexpr uint32_t max_entries
= 1000;
999 get_logshard_oid(logshard_num
, &logshard_oid
);
1001 RGWBucketReshardLock
logshard_lock(store
, logshard_oid
, false);
1003 int ret
= logshard_lock
.lock();
1005 ldpp_dout(dpp
, 5) << __func__
<< "(): failed to acquire lock on " <<
1006 logshard_oid
<< ", ret = " << ret
<<dendl
;
1011 std::list
<cls_rgw_reshard_entry
> entries
;
1012 ret
= list(logshard_num
, marker
, max_entries
, entries
, &truncated
);
1014 ldpp_dout(dpp
, 10) << "cannot list all reshards in logshard oid=" <<
1015 logshard_oid
<< dendl
;
1019 for(auto& entry
: entries
) { // logshard entries
1020 if(entry
.new_instance_id
.empty()) {
1022 ldpp_dout(dpp
, 20) << __func__
<< " resharding " <<
1023 entry
.bucket_name
<< dendl
;
1026 RGWBucketInfo bucket_info
;
1027 map
<string
, bufferlist
> attrs
;
1029 ret
= store
->getRados()->get_bucket_info(store
->svc(),
1030 entry
.tenant
, entry
.bucket_name
,
1031 bucket_info
, nullptr,
1032 null_yield
, dpp
, &attrs
);
1033 if (ret
< 0 || bucket_info
.bucket
.bucket_id
!= entry
.bucket_id
) {
1035 ldpp_dout(dpp
, 0) << __func__
<<
1036 ": Error in get_bucket_info for bucket " << entry
.bucket_name
<<
1037 ": " << cpp_strerror(-ret
) << dendl
;
1038 if (ret
!= -ENOENT
) {
1039 // any error other than ENOENT will abort
1043 ldpp_dout(dpp
, 0) << __func__
<<
1044 ": Bucket: " << entry
.bucket_name
<<
1045 " already resharded by someone, skipping " << dendl
;
1048 // we've encountered a reshard queue entry for an apparently
1049 // non-existent bucket; let's try to recover by cleaning up
1050 ldpp_dout(dpp
, 0) << __func__
<<
1051 ": removing reshard queue entry for a resharded or non-existent bucket" <<
1052 entry
.bucket_name
<< dendl
;
1054 ret
= remove(dpp
, entry
);
1056 ldpp_dout(dpp
, 0) << __func__
<<
1057 ": Error removing non-existent bucket " <<
1058 entry
.bucket_name
<< " from resharding queue: " <<
1059 cpp_strerror(-ret
) << dendl
;
1063 // we cleaned up, move on to the next entry
1064 goto finished_entry
;
1067 RGWBucketReshard
br(store
, bucket_info
, attrs
, nullptr);
1068 ret
= br
.execute(entry
.new_num_shards
, max_entries
, dpp
, false, nullptr,
1071 ldpp_dout(dpp
, 0) << __func__
<<
1072 ": Error during resharding bucket " << entry
.bucket_name
<< ":" <<
1073 cpp_strerror(-ret
)<< dendl
;
1077 ldpp_dout(dpp
, 20) << __func__
<<
1078 " removing reshard queue entry for bucket " << entry
.bucket_name
<<
1081 ret
= remove(dpp
, entry
);
1083 ldpp_dout(dpp
, 0) << __func__
<< ": Error removing bucket " <<
1084 entry
.bucket_name
<< " from resharding queue: " <<
1085 cpp_strerror(-ret
) << dendl
;
1088 } // if new instance id is empty
1092 Clock::time_point now
= Clock::now();
1093 if (logshard_lock
.should_renew(now
)) {
1094 ret
= logshard_lock
.renew(now
);
1100 entry
.get_key(&marker
);
1102 } while (truncated
);
1104 logshard_lock
.unlock();
1109 void RGWReshard::get_logshard_oid(int shard_num
, string
*logshard
)
1112 snprintf(buf
, sizeof(buf
), "%010u", (unsigned)shard_num
);
1114 string
objname(reshard_oid_prefix
);
1115 *logshard
= objname
+ buf
;
1118 int RGWReshard::process_all_logshards(const DoutPrefixProvider
*dpp
)
1120 if (!store
->svc()->zone
->can_reshard()) {
1121 ldpp_dout(dpp
, 20) << __func__
<< " Resharding is disabled" << dendl
;
1126 for (int i
= 0; i
< num_logshards
; i
++) {
1128 get_logshard_oid(i
, &logshard
);
1130 ldpp_dout(dpp
, 20) << "processing logshard = " << logshard
<< dendl
;
1132 ret
= process_single_logshard(i
, dpp
);
1134 ldpp_dout(dpp
, 20) << "finish processing logshard = " << logshard
<< " , ret = " << ret
<< dendl
;
1140 bool RGWReshard::going_down()
1145 void RGWReshard::start_processor()
1147 worker
= new ReshardWorker(store
->ctx(), this);
1148 worker
->create("rgw_reshard");
1151 void RGWReshard::stop_processor()
1162 void *RGWReshard::ReshardWorker::entry() {
1164 utime_t start
= ceph_clock_now();
1165 reshard
->process_all_logshards(this);
1167 if (reshard
->going_down())
1170 utime_t end
= ceph_clock_now();
1172 int secs
= cct
->_conf
.get_val
<uint64_t>("rgw_reshard_thread_interval");
1174 if (secs
<= end
.sec())
1175 continue; // next round
1179 std::unique_lock locker
{lock
};
1180 cond
.wait_for(locker
, std::chrono::seconds(secs
));
1181 } while (!reshard
->going_down());
1186 void RGWReshard::ReshardWorker::stop()
1188 std::lock_guard l
{lock
};
1192 CephContext
*RGWReshard::ReshardWorker::get_cct() const
1197 unsigned RGWReshard::ReshardWorker::get_subsys() const
1202 std::ostream
& RGWReshard::ReshardWorker::gen_prefix(std::ostream
& out
) const
1204 return out
<< "rgw reshard worker thread: ";