]> git.proxmox.com Git - ceph.git/blame - ceph/src/cls/rgw/cls_rgw_client.h
bump version to 18.2.4-pve3
[ceph.git] / ceph / src / cls / rgw / cls_rgw_client.h
CommitLineData
f67539c2 1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
f64942e4
AA
2// vim: ts=8 sw=2 smarttab
3
20effc67 4#pragma once
7c673cae 5
7c673cae
FG
6#include "include/str_list.h"
7#include "include/rados/librados.hpp"
7c673cae 8#include "cls_rgw_ops.h"
b32b8144 9#include "cls_rgw_const.h"
7c673cae 10#include "common/RefCountedObj.h"
9f95a23c 11#include "common/strtol.h"
7c673cae
FG
12#include "include/compat.h"
13#include "common/ceph_time.h"
9f95a23c 14#include "common/ceph_mutex.h"
7c673cae 15
20effc67 16
7c673cae
FG
17// Forward declaration
18class BucketIndexAioManager;
7c673cae
FG
19/*
20 * Bucket index AIO request argument, this is used to pass a argument
21 * to callback.
22 */
23struct BucketIndexAioArg : public RefCountedObject {
24 BucketIndexAioArg(int _id, BucketIndexAioManager* _manager) :
25 id(_id), manager(_manager) {}
26 int id;
27 BucketIndexAioManager* manager;
28};
29
30/*
20effc67
TL
31 * This class manages AIO completions. This class is not completely
32 * thread-safe, methods like *get_next_request_id* is not thread-safe
33 * and is expected to be called from within one thread.
7c673cae
FG
34 */
35class BucketIndexAioManager {
20effc67
TL
36public:
37
38 // allows us to reaccess the shard id and shard's oid during and
39 // after the asynchronous call is made
40 struct RequestObj {
41 int shard_id;
42 std::string oid;
43
44 RequestObj(int _shard_id, const std::string& _oid) :
45 shard_id(_shard_id), oid(_oid)
46 {/* empty */}
47 };
48
49
7c673cae 50private:
20effc67
TL
51 // NB: the following 4 maps use the request_id as the key; this
52 // is not the same as the shard_id!
f67539c2
TL
53 std::map<int, librados::AioCompletion*> pendings;
54 std::map<int, librados::AioCompletion*> completions;
20effc67
TL
55 std::map<int, const RequestObj> pending_objs;
56 std::map<int, const RequestObj> completion_objs;
57
9f95a23c
TL
58 int next = 0;
59 ceph::mutex lock = ceph::make_mutex("BucketIndexAioManager::lock");
60 ceph::condition_variable cond;
7c673cae
FG
61 /*
62 * Callback implementation for AIO request.
63 */
64 static void bucket_index_op_completion_cb(void* cb, void* arg) {
65 BucketIndexAioArg* cb_arg = (BucketIndexAioArg*) arg;
66 cb_arg->manager->do_completion(cb_arg->id);
67 cb_arg->put();
68 }
69
70 /*
71 * Get next request ID. This method is not thread-safe.
72 *
73 * Return next request ID.
74 */
20effc67
TL
75 int get_next_request_id() { return next++; }
76
7c673cae
FG
77 /*
78 * Add a new pending AIO completion instance.
79 *
80 * @param id - the request ID.
81 * @param completion - the AIO completion instance.
82 * @param oid - the object id associated with the object, if it is NULL, we don't
83 * track the object id per callback.
84 */
20effc67
TL
85 void add_pending(int request_id, librados::AioCompletion* completion, const int shard_id, const std::string& oid) {
86 pendings[request_id] = completion;
87 pending_objs.emplace(request_id, RequestObj(shard_id, oid));
7c673cae 88 }
20effc67 89
7c673cae
FG
90public:
91 /*
92 * Create a new instance.
93 */
9f95a23c 94 BucketIndexAioManager() = default;
7c673cae
FG
95
96 /*
97 * Do completion for the given AIO request.
98 */
20effc67 99 void do_completion(int request_id);
7c673cae
FG
100
101 /*
102 * Wait for AIO completions.
103 *
104 * valid_ret_code - valid AIO return code.
105 * num_completions - number of completions.
106 * ret_code - return code of failed AIO.
f67539c2 107 * objs - a std::list of objects that has been finished the AIO.
7c673cae
FG
108 *
109 * Return false if there is no pending AIO, true otherwise.
110 */
20effc67
TL
111 bool wait_for_completions(int valid_ret_code,
112 int *num_completions = nullptr,
113 int *ret_code = nullptr,
114 std::map<int, std::string> *completed_objs = nullptr,
115 std::map<int, std::string> *retry_objs = nullptr);
7c673cae
FG
116
117 /**
118 * Do aio read operation.
119 */
20effc67 120 bool aio_operate(librados::IoCtx& io_ctx, const int shard_id, const std::string& oid, librados::ObjectReadOperation *op) {
9f95a23c 121 std::lock_guard l{lock};
20effc67
TL
122 const int request_id = get_next_request_id();
123 BucketIndexAioArg *arg = new BucketIndexAioArg(request_id, this);
9f95a23c 124 librados::AioCompletion *c = librados::Rados::aio_create_completion((void*)arg, bucket_index_op_completion_cb);
7c673cae
FG
125 int r = io_ctx.aio_operate(oid, c, (librados::ObjectReadOperation*)op, NULL);
126 if (r >= 0) {
20effc67 127 add_pending(arg->id, c, shard_id, oid);
7c673cae 128 } else {
9f95a23c 129 arg->put();
7c673cae
FG
130 c->release();
131 }
132 return r;
133 }
134
135 /**
136 * Do aio write operation.
137 */
20effc67 138 bool aio_operate(librados::IoCtx& io_ctx, const int shard_id, const std::string& oid, librados::ObjectWriteOperation *op) {
9f95a23c 139 std::lock_guard l{lock};
20effc67
TL
140 const int request_id = get_next_request_id();
141 BucketIndexAioArg *arg = new BucketIndexAioArg(request_id, this);
9f95a23c 142 librados::AioCompletion *c = librados::Rados::aio_create_completion((void*)arg, bucket_index_op_completion_cb);
7c673cae
FG
143 int r = io_ctx.aio_operate(oid, c, (librados::ObjectWriteOperation*)op);
144 if (r >= 0) {
20effc67 145 add_pending(arg->id, c, shard_id, oid);
7c673cae 146 } else {
9f95a23c 147 arg->put();
7c673cae
FG
148 c->release();
149 }
150 return r;
151 }
152};
153
154class RGWGetDirHeader_CB : public RefCountedObject {
155public:
156 ~RGWGetDirHeader_CB() override {}
157 virtual void handle_response(int r, rgw_bucket_dir_header& header) = 0;
158};
159
160class BucketIndexShardsManager {
161private:
162 // Per shard setting manager, for example, marker.
f67539c2 163 std::map<int, std::string> value_by_shards;
7c673cae 164public:
f67539c2
TL
165 const static std::string KEY_VALUE_SEPARATOR;
166 const static std::string SHARDS_SEPARATOR;
7c673cae 167
f67539c2 168 void add(int shard, const std::string& value) {
7c673cae
FG
169 value_by_shards[shard] = value;
170 }
171
f67539c2
TL
172 const std::string& get(int shard, const std::string& default_value) const {
173 auto iter = value_by_shards.find(shard);
7c673cae
FG
174 return (iter == value_by_shards.end() ? default_value : iter->second);
175 }
176
f67539c2
TL
177 const std::map<int, std::string>& get() const {
178 return value_by_shards;
179 }
180 std::map<int, std::string>& get() {
7c673cae
FG
181 return value_by_shards;
182 }
183
f67539c2 184 bool empty() const {
7c673cae
FG
185 return value_by_shards.empty();
186 }
187
f67539c2 188 void to_string(std::string *out) const {
7c673cae
FG
189 if (!out) {
190 return;
191 }
192 out->clear();
f67539c2
TL
193 for (auto iter = value_by_shards.begin();
194 iter != value_by_shards.end(); ++iter) {
7c673cae
FG
195 if (out->length()) {
196 // Not the first item, append a separator first
197 out->append(SHARDS_SEPARATOR);
198 }
199 char buf[16];
200 snprintf(buf, sizeof(buf), "%d", iter->first);
201 out->append(buf);
202 out->append(KEY_VALUE_SEPARATOR);
203 out->append(iter->second);
204 }
205 }
206
f67539c2
TL
207 static bool is_shards_marker(const std::string& marker) {
208 return marker.find(KEY_VALUE_SEPARATOR) != std::string::npos;
7c673cae
FG
209 }
210
211 /*
f67539c2 212 * convert from std::string. There are two options of how the std::string looks like:
7c673cae
FG
213 *
214 * 1. Single shard, no shard id specified, e.g. 000001.23.1
215 *
216 * for this case, if passed shard_id >= 0, use this shard id, otherwise assume that it's a
217 * bucket with no shards.
218 *
219 * 2. One or more shards, shard id specified for each shard, e.g., 0#00002.12,1#00003.23.2
220 *
221 */
1e59de90 222 int from_string(std::string_view composed_marker, int shard_id) {
7c673cae 223 value_by_shards.clear();
f67539c2 224 std::vector<std::string> shards;
7c673cae
FG
225 get_str_vec(composed_marker, SHARDS_SEPARATOR.c_str(), shards);
226 if (shards.size() > 1 && shard_id >= 0) {
227 return -EINVAL;
228 }
f67539c2 229 for (auto iter = shards.begin(); iter != shards.end(); ++iter) {
7c673cae 230 size_t pos = iter->find(KEY_VALUE_SEPARATOR);
f67539c2 231 if (pos == std::string::npos) {
7c673cae
FG
232 if (!value_by_shards.empty()) {
233 return -EINVAL;
234 }
235 if (shard_id < 0) {
236 add(0, *iter);
237 } else {
238 add(shard_id, *iter);
239 }
240 return 0;
241 }
f67539c2
TL
242 std::string shard_str = iter->substr(0, pos);
243 std::string err;
7c673cae
FG
244 int shard = (int)strict_strtol(shard_str.c_str(), 10, &err);
245 if (!err.empty()) {
246 return -EINVAL;
247 }
248 add(shard, iter->substr(pos + 1));
249 }
250 return 0;
251 }
b32b8144
FG
252
253 // trim the '<shard-id>#' prefix from a single shard marker if present
254 static std::string get_shard_marker(const std::string& marker) {
255 auto p = marker.find(KEY_VALUE_SEPARATOR);
256 if (p == marker.npos) {
257 return marker;
258 }
259 return marker.substr(p + 1);
260 }
7c673cae
FG
261};
262
263/* bucket index */
f64942e4 264void cls_rgw_bucket_init_index(librados::ObjectWriteOperation& o);
7c673cae
FG
265
266class CLSRGWConcurrentIO {
267protected:
268 librados::IoCtx& io_ctx;
20effc67
TL
269
270 // map of shard # to oid; the shards that are remaining to be processed
f67539c2 271 std::map<int, std::string>& objs_container;
20effc67 272 // iterator to work through objs_container
f67539c2 273 std::map<int, std::string>::iterator iter;
20effc67 274
7c673cae
FG
275 uint32_t max_aio;
276 BucketIndexAioManager manager;
277
f67539c2 278 virtual int issue_op(int shard_id, const std::string& oid) = 0;
7c673cae
FG
279
280 virtual void cleanup() {}
281 virtual int valid_ret_code() { return 0; }
282 // Return true if multiple rounds of OPs might be needed, this happens when
283 // OP needs to be re-send until a certain code is returned.
284 virtual bool need_multiple_rounds() { return false; }
285 // Add a new object to the end of the container.
f67539c2
TL
286 virtual void add_object(int shard, const std::string& oid) {}
287 virtual void reset_container(std::map<int, std::string>& objs) {}
7c673cae
FG
288
289public:
f64942e4
AA
290
291 CLSRGWConcurrentIO(librados::IoCtx& ioc,
f67539c2 292 std::map<int, std::string>& _objs_container,
f64942e4
AA
293 uint32_t _max_aio) :
294 io_ctx(ioc), objs_container(_objs_container), max_aio(_max_aio)
295 {}
296
1e59de90 297 virtual ~CLSRGWConcurrentIO() {}
7c673cae 298
20effc67
TL
299 int operator()();
300}; // class CLSRGWConcurrentIO
7c673cae 301
7c673cae
FG
302
303class CLSRGWIssueBucketIndexInit : public CLSRGWConcurrentIO {
304protected:
f67539c2 305 int issue_op(int shard_id, const std::string& oid) override;
7c673cae
FG
306 int valid_ret_code() override { return -EEXIST; }
307 void cleanup() override;
308public:
20effc67
TL
309 CLSRGWIssueBucketIndexInit(librados::IoCtx& ioc,
310 std::map<int, std::string>& _bucket_objs,
311 uint32_t _max_aio) :
7c673cae 312 CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio) {}
1e59de90 313 virtual ~CLSRGWIssueBucketIndexInit() override {}
7c673cae
FG
314};
315
f64942e4
AA
316
317class CLSRGWIssueBucketIndexClean : public CLSRGWConcurrentIO {
318protected:
f67539c2 319 int issue_op(int shard_id, const std::string& oid) override;
f64942e4
AA
320 int valid_ret_code() override {
321 return -ENOENT;
322 }
323
324public:
325 CLSRGWIssueBucketIndexClean(librados::IoCtx& ioc,
f67539c2 326 std::map<int, std::string>& _bucket_objs,
f64942e4
AA
327 uint32_t _max_aio) :
328 CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio)
329 {}
1e59de90 330 virtual ~CLSRGWIssueBucketIndexClean() override {}
f64942e4
AA
331};
332
333
7c673cae
FG
334class CLSRGWIssueSetTagTimeout : public CLSRGWConcurrentIO {
335 uint64_t tag_timeout;
336protected:
f67539c2 337 int issue_op(int shard_id, const std::string& oid) override;
7c673cae 338public:
f67539c2 339 CLSRGWIssueSetTagTimeout(librados::IoCtx& ioc, std::map<int, std::string>& _bucket_objs,
7c673cae
FG
340 uint32_t _max_aio, uint64_t _tag_timeout) :
341 CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio), tag_timeout(_tag_timeout) {}
1e59de90 342 virtual ~CLSRGWIssueSetTagTimeout() override {}
7c673cae
FG
343};
344
11fdf7f2
TL
345void cls_rgw_bucket_update_stats(librados::ObjectWriteOperation& o,
346 bool absolute,
f67539c2 347 const std::map<RGWObjCategory, rgw_bucket_category_stats>& stats);
7c673cae 348
20effc67 349void cls_rgw_bucket_prepare_op(librados::ObjectWriteOperation& o, RGWModifyOp op, const std::string& tag,
f67539c2 350 const cls_rgw_obj_key& key, const std::string& locator, bool log_op,
20effc67 351 uint16_t bilog_op, const rgw_zone_set& zones_trace);
7c673cae 352
20effc67
TL
353void cls_rgw_bucket_complete_op(librados::ObjectWriteOperation& o, RGWModifyOp op, const std::string& tag,
354 const rgw_bucket_entry_ver& ver,
7c673cae 355 const cls_rgw_obj_key& key,
20effc67
TL
356 const rgw_bucket_dir_entry_meta& dir_meta,
357 const std::list<cls_rgw_obj_key> *remove_objs, bool log_op,
358 uint16_t bilog_op, const rgw_zone_set *zones_trace);
7c673cae 359
f67539c2
TL
360void cls_rgw_remove_obj(librados::ObjectWriteOperation& o, std::list<std::string>& keep_attr_prefixes);
361void cls_rgw_obj_store_pg_ver(librados::ObjectWriteOperation& o, const std::string& attr);
362void cls_rgw_obj_check_attrs_prefix(librados::ObjectOperation& o, const std::string& prefix, bool fail_if_exist);
7c673cae
FG
363void cls_rgw_obj_check_mtime(librados::ObjectOperation& o, const ceph::real_time& mtime, bool high_precision_time, RGWCheckMTimeType type);
364
f67539c2 365int cls_rgw_bi_get(librados::IoCtx& io_ctx, const std::string oid,
20effc67 366 BIIndexType index_type, const cls_rgw_obj_key& key,
7c673cae 367 rgw_cls_bi_entry *entry);
20effc67
TL
368int cls_rgw_bi_put(librados::IoCtx& io_ctx, const std::string oid, const rgw_cls_bi_entry& entry);
369void cls_rgw_bi_put(librados::ObjectWriteOperation& op, const std::string oid, const rgw_cls_bi_entry& entry);
370int cls_rgw_bi_list(librados::IoCtx& io_ctx, const std::string& oid,
f67539c2
TL
371 const std::string& name, const std::string& marker, uint32_t max,
372 std::list<rgw_cls_bi_entry> *entries, bool *is_truncated);
7c673cae
FG
373
374
9f95a23c 375void cls_rgw_bucket_link_olh(librados::ObjectWriteOperation& op,
20effc67
TL
376 const cls_rgw_obj_key& key, const ceph::buffer::list& olh_tag,
377 bool delete_marker, const std::string& op_tag, const rgw_bucket_dir_entry_meta *meta,
378 uint64_t olh_epoch, ceph::real_time unmod_since, bool high_precision_time, bool log_op, const rgw_zone_set& zones_trace);
9f95a23c 379void cls_rgw_bucket_unlink_instance(librados::ObjectWriteOperation& op,
f67539c2 380 const cls_rgw_obj_key& key, const std::string& op_tag,
20effc67 381 const std::string& olh_tag, uint64_t olh_epoch, bool log_op, const rgw_zone_set& zones_trace);
f67539c2
TL
382void cls_rgw_get_olh_log(librados::ObjectReadOperation& op, const cls_rgw_obj_key& olh, uint64_t ver_marker, const std::string& olh_tag, rgw_cls_read_olh_log_ret& log_ret, int& op_ret);
383void cls_rgw_trim_olh_log(librados::ObjectWriteOperation& op, const cls_rgw_obj_key& olh, uint64_t ver, const std::string& olh_tag);
384void cls_rgw_clear_olh(librados::ObjectWriteOperation& op, const cls_rgw_obj_key& olh, const std::string& olh_tag);
9f95a23c
TL
385
386// these overloads which call io_ctx.operate() should not be called in the rgw.
387// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
388#ifndef CLS_CLIENT_HIDE_IOCTX
f67539c2 389int cls_rgw_bucket_link_olh(librados::IoCtx& io_ctx, const std::string& oid,
20effc67
TL
390 const cls_rgw_obj_key& key, const ceph::buffer::list& olh_tag,
391 bool delete_marker, const std::string& op_tag, const rgw_bucket_dir_entry_meta *meta,
392 uint64_t olh_epoch, ceph::real_time unmod_since, bool high_precision_time, bool log_op, const rgw_zone_set& zones_trace);
f67539c2
TL
393int cls_rgw_bucket_unlink_instance(librados::IoCtx& io_ctx, const std::string& oid,
394 const cls_rgw_obj_key& key, const std::string& op_tag,
20effc67 395 const std::string& olh_tag, uint64_t olh_epoch, bool log_op, const rgw_zone_set& zones_trace);
f67539c2
TL
396int cls_rgw_get_olh_log(librados::IoCtx& io_ctx, std::string& oid, const cls_rgw_obj_key& olh, uint64_t ver_marker,
397 const std::string& olh_tag, rgw_cls_read_olh_log_ret& log_ret);
398int cls_rgw_clear_olh(librados::IoCtx& io_ctx, std::string& oid, const cls_rgw_obj_key& olh, const std::string& olh_tag);
399int cls_rgw_usage_log_trim(librados::IoCtx& io_ctx, const std::string& oid, const std::string& user, const std::string& bucket,
9f95a23c
TL
400 uint64_t start_epoch, uint64_t end_epoch);
401#endif
402
7c673cae
FG
403
404/**
f67539c2 405 * Std::list the bucket with the starting object and filter prefix.
7c673cae 406 * NOTE: this method do listing requests for each bucket index shards identified by
f67539c2 407 * the keys of the *list_results* std::map, which means the std::map should be popludated
7c673cae
FG
408 * by the caller to fill with each bucket index object id.
409 *
410 * io_ctx - IO context for rados.
411 * start_obj - marker for the listing.
412 * filter_prefix - filter prefix.
413 * num_entries - number of entries to request for each object (note the total
414 * amount of entries returned depends on the number of shardings).
f67539c2 415 * list_results - the std::list results keyed by bucket index object id.
7c673cae
FG
416 * max_aio - the maximum number of AIO (for throttling).
417 *
418 * Return 0 on success, a failure code otherwise.
419*/
420
421class CLSRGWIssueBucketList : public CLSRGWConcurrentIO {
422 cls_rgw_obj_key start_obj;
f67539c2
TL
423 std::string filter_prefix;
424 std::string delimiter;
7c673cae
FG
425 uint32_t num_entries;
426 bool list_versions;
20effc67
TL
427 std::map<int, rgw_cls_list_ret>& result; // request_id -> return value
428
7c673cae 429protected:
f67539c2 430 int issue_op(int shard_id, const std::string& oid) override;
20effc67
TL
431 void reset_container(std::map<int, std::string>& objs) override;
432
7c673cae 433public:
9f95a23c
TL
434 CLSRGWIssueBucketList(librados::IoCtx& io_ctx,
435 const cls_rgw_obj_key& _start_obj,
f67539c2
TL
436 const std::string& _filter_prefix,
437 const std::string& _delimiter,
9f95a23c 438 uint32_t _num_entries,
7c673cae 439 bool _list_versions,
20effc67
TL
440 std::map<int, std::string>& oids, // shard_id -> shard_oid
441 // shard_id -> return value
f67539c2 442 std::map<int, rgw_cls_list_ret>& list_results,
7c673cae
FG
443 uint32_t max_aio) :
444 CLSRGWConcurrentIO(io_ctx, oids, max_aio),
9f95a23c
TL
445 start_obj(_start_obj), filter_prefix(_filter_prefix), delimiter(_delimiter),
446 num_entries(_num_entries), list_versions(_list_versions),
447 result(list_results)
448 {}
7c673cae
FG
449};
450
81eedcae
TL
451void cls_rgw_bucket_list_op(librados::ObjectReadOperation& op,
452 const cls_rgw_obj_key& start_obj,
453 const std::string& filter_prefix,
9f95a23c 454 const std::string& delimiter,
81eedcae
TL
455 uint32_t num_entries,
456 bool list_versions,
457 rgw_cls_list_ret* result);
458
9f95a23c
TL
459void cls_rgw_bilog_list(librados::ObjectReadOperation& op,
460 const std::string& marker, uint32_t max,
461 cls_rgw_bi_log_list_ret *pdata, int *ret = nullptr);
462
7c673cae 463class CLSRGWIssueBILogList : public CLSRGWConcurrentIO {
f67539c2 464 std::map<int, cls_rgw_bi_log_list_ret>& result;
7c673cae
FG
465 BucketIndexShardsManager& marker_mgr;
466 uint32_t max;
467protected:
f67539c2 468 int issue_op(int shard_id, const std::string& oid) override;
7c673cae
FG
469public:
470 CLSRGWIssueBILogList(librados::IoCtx& io_ctx, BucketIndexShardsManager& _marker_mgr, uint32_t _max,
f67539c2
TL
471 std::map<int, std::string>& oids,
472 std::map<int, cls_rgw_bi_log_list_ret>& bi_log_lists, uint32_t max_aio) :
7c673cae
FG
473 CLSRGWConcurrentIO(io_ctx, oids, max_aio), result(bi_log_lists),
474 marker_mgr(_marker_mgr), max(_max) {}
1e59de90 475 virtual ~CLSRGWIssueBILogList() override {}
7c673cae
FG
476};
477
9f95a23c
TL
478void cls_rgw_bilog_trim(librados::ObjectWriteOperation& op,
479 const std::string& start_marker,
480 const std::string& end_marker);
481
7c673cae
FG
482class CLSRGWIssueBILogTrim : public CLSRGWConcurrentIO {
483 BucketIndexShardsManager& start_marker_mgr;
484 BucketIndexShardsManager& end_marker_mgr;
485protected:
f67539c2 486 int issue_op(int shard_id, const std::string& oid) override;
7c673cae
FG
487 // Trim until -ENODATA is returned.
488 int valid_ret_code() override { return -ENODATA; }
489 bool need_multiple_rounds() override { return true; }
f67539c2
TL
490 void add_object(int shard, const std::string& oid) override { objs_container[shard] = oid; }
491 void reset_container(std::map<int, std::string>& objs) override {
7c673cae
FG
492 objs_container.swap(objs);
493 iter = objs_container.begin();
494 objs.clear();
495 }
496public:
497 CLSRGWIssueBILogTrim(librados::IoCtx& io_ctx, BucketIndexShardsManager& _start_marker_mgr,
f67539c2 498 BucketIndexShardsManager& _end_marker_mgr, std::map<int, std::string>& _bucket_objs, uint32_t max_aio) :
7c673cae
FG
499 CLSRGWConcurrentIO(io_ctx, _bucket_objs, max_aio),
500 start_marker_mgr(_start_marker_mgr), end_marker_mgr(_end_marker_mgr) {}
1e59de90 501 virtual ~CLSRGWIssueBILogTrim() override {}
7c673cae
FG
502};
503
504/**
505 * Check the bucket index.
506 *
507 * io_ctx - IO context for rados.
508 * bucket_objs_ret - check result for all shards.
509 * max_aio - the maximum number of AIO (for throttling).
510 *
511 * Return 0 on success, a failure code otherwise.
512 */
f67539c2
TL
513class CLSRGWIssueBucketCheck : public CLSRGWConcurrentIO /*<std::map<std::string, rgw_cls_check_index_ret> >*/ {
514 std::map<int, rgw_cls_check_index_ret>& result;
7c673cae 515protected:
f67539c2 516 int issue_op(int shard_id, const std::string& oid) override;
7c673cae 517public:
f67539c2
TL
518 CLSRGWIssueBucketCheck(librados::IoCtx& ioc, std::map<int, std::string>& oids,
519 std::map<int, rgw_cls_check_index_ret>& bucket_objs_ret,
11fdf7f2 520 uint32_t _max_aio) :
7c673cae 521 CLSRGWConcurrentIO(ioc, oids, _max_aio), result(bucket_objs_ret) {}
1e59de90 522 virtual ~CLSRGWIssueBucketCheck() override {}
7c673cae
FG
523};
524
525class CLSRGWIssueBucketRebuild : public CLSRGWConcurrentIO {
526protected:
f67539c2 527 int issue_op(int shard_id, const std::string& oid) override;
7c673cae 528public:
f67539c2 529 CLSRGWIssueBucketRebuild(librados::IoCtx& io_ctx, std::map<int, std::string>& bucket_objs,
7c673cae 530 uint32_t max_aio) : CLSRGWConcurrentIO(io_ctx, bucket_objs, max_aio) {}
1e59de90 531 virtual ~CLSRGWIssueBucketRebuild() override {}
7c673cae
FG
532};
533
534class CLSRGWIssueGetDirHeader : public CLSRGWConcurrentIO {
f67539c2 535 std::map<int, rgw_cls_list_ret>& result;
7c673cae 536protected:
f67539c2 537 int issue_op(int shard_id, const std::string& oid) override;
7c673cae 538public:
f67539c2 539 CLSRGWIssueGetDirHeader(librados::IoCtx& io_ctx, std::map<int, std::string>& oids, std::map<int, rgw_cls_list_ret>& dir_headers,
7c673cae
FG
540 uint32_t max_aio) :
541 CLSRGWConcurrentIO(io_ctx, oids, max_aio), result(dir_headers) {}
1e59de90 542 virtual ~CLSRGWIssueGetDirHeader() override {}
7c673cae
FG
543};
544
31f18b77
FG
545class CLSRGWIssueSetBucketResharding : public CLSRGWConcurrentIO {
546 cls_rgw_bucket_instance_entry entry;
547protected:
f67539c2 548 int issue_op(int shard_id, const std::string& oid) override;
31f18b77 549public:
f67539c2 550 CLSRGWIssueSetBucketResharding(librados::IoCtx& ioc, std::map<int, std::string>& _bucket_objs,
31f18b77
FG
551 const cls_rgw_bucket_instance_entry& _entry,
552 uint32_t _max_aio) : CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio), entry(_entry) {}
1e59de90 553 virtual ~CLSRGWIssueSetBucketResharding() override {}
31f18b77
FG
554};
555
c07f9fc5
FG
556class CLSRGWIssueResyncBucketBILog : public CLSRGWConcurrentIO {
557protected:
f67539c2 558 int issue_op(int shard_id, const std::string& oid);
c07f9fc5 559public:
f67539c2 560 CLSRGWIssueResyncBucketBILog(librados::IoCtx& io_ctx, std::map<int, std::string>& _bucket_objs, uint32_t max_aio) :
c07f9fc5 561 CLSRGWConcurrentIO(io_ctx, _bucket_objs, max_aio) {}
1e59de90 562 virtual ~CLSRGWIssueResyncBucketBILog() override {}
c07f9fc5
FG
563};
564
565class CLSRGWIssueBucketBILogStop : public CLSRGWConcurrentIO {
566protected:
f67539c2 567 int issue_op(int shard_id, const std::string& oid);
c07f9fc5 568public:
f67539c2 569 CLSRGWIssueBucketBILogStop(librados::IoCtx& io_ctx, std::map<int, std::string>& _bucket_objs, uint32_t max_aio) :
c07f9fc5 570 CLSRGWConcurrentIO(io_ctx, _bucket_objs, max_aio) {}
1e59de90 571 virtual ~CLSRGWIssueBucketBILogStop() override {}
c07f9fc5
FG
572};
573
f67539c2 574int cls_rgw_get_dir_header_async(librados::IoCtx& io_ctx, std::string& oid, RGWGetDirHeader_CB *ctx);
7c673cae 575
f67539c2 576void cls_rgw_encode_suggestion(char op, rgw_bucket_dir_entry& dirent, ceph::buffer::list& updates);
7c673cae 577
f67539c2 578void cls_rgw_suggest_changes(librados::ObjectWriteOperation& o, ceph::buffer::list& updates);
7c673cae
FG
579
580/* usage logging */
9f95a23c
TL
581// these overloads which call io_ctx.operate() should not be called in the rgw.
582// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
583#ifndef CLS_CLIENT_HIDE_IOCTX
f67539c2
TL
584int cls_rgw_usage_log_read(librados::IoCtx& io_ctx, const std::string& oid, const std::string& user, const std::string& bucket,
585 uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, std::string& read_iter,
586 std::map<rgw_user_bucket, rgw_usage_log_entry>& usage, bool *is_truncated);
9f95a23c 587#endif
7c673cae 588
f67539c2 589void cls_rgw_usage_log_trim(librados::ObjectWriteOperation& op, const std::string& user, const std::string& bucket, uint64_t start_epoch, uint64_t end_epoch);
7c673cae 590
11fdf7f2 591void cls_rgw_usage_log_clear(librados::ObjectWriteOperation& op);
7c673cae
FG
592void cls_rgw_usage_log_add(librados::ObjectWriteOperation& op, rgw_usage_log_info& info);
593
594/* garbage collection */
595void cls_rgw_gc_set_entry(librados::ObjectWriteOperation& op, uint32_t expiration_secs, cls_rgw_gc_obj_info& info);
f67539c2
TL
596void cls_rgw_gc_defer_entry(librados::ObjectWriteOperation& op, uint32_t expiration_secs, const std::string& tag);
597void cls_rgw_gc_remove(librados::ObjectWriteOperation& op, const std::vector<std::string>& tags);
7c673cae 598
9f95a23c
TL
599// these overloads which call io_ctx.operate() should not be called in the rgw.
600// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
601#ifndef CLS_CLIENT_HIDE_IOCTX
f67539c2
TL
602int cls_rgw_gc_list(librados::IoCtx& io_ctx, std::string& oid, std::string& marker, uint32_t max, bool expired_only,
603 std::list<cls_rgw_gc_obj_info>& entries, bool *truncated, std::string& next_marker);
9f95a23c 604#endif
7c673cae
FG
605
606/* lifecycle */
9f95a23c
TL
607// these overloads which call io_ctx.operate() should not be called in the rgw.
608// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
609#ifndef CLS_CLIENT_HIDE_IOCTX
f67539c2
TL
610int cls_rgw_lc_get_head(librados::IoCtx& io_ctx, const std::string& oid, cls_rgw_lc_obj_head& head);
611int cls_rgw_lc_put_head(librados::IoCtx& io_ctx, const std::string& oid, cls_rgw_lc_obj_head& head);
1e59de90 612int cls_rgw_lc_get_next_entry(librados::IoCtx& io_ctx, const std::string& oid, const std::string& marker, cls_rgw_lc_entry& entry);
f67539c2
TL
613int cls_rgw_lc_rm_entry(librados::IoCtx& io_ctx, const std::string& oid, const cls_rgw_lc_entry& entry);
614int cls_rgw_lc_set_entry(librados::IoCtx& io_ctx, const std::string& oid, const cls_rgw_lc_entry& entry);
615int cls_rgw_lc_get_entry(librados::IoCtx& io_ctx, const std::string& oid, const std::string& marker, cls_rgw_lc_entry& entry);
616int cls_rgw_lc_list(librados::IoCtx& io_ctx, const std::string& oid,
617 const std::string& marker, uint32_t max_entries,
20effc67 618 std::vector<cls_rgw_lc_entry>& entries);
9f95a23c 619#endif
7c673cae 620
1e59de90
TL
621/* multipart */
622void cls_rgw_mp_upload_part_info_update(librados::ObjectWriteOperation& op, const std::string& part_key, const RGWUploadPartInfo& info);
623
31f18b77
FG
624/* resharding */
625void cls_rgw_reshard_add(librados::ObjectWriteOperation& op, const cls_rgw_reshard_entry& entry);
9f95a23c
TL
626void cls_rgw_reshard_remove(librados::ObjectWriteOperation& op, const cls_rgw_reshard_entry& entry);
627// these overloads which call io_ctx.operate() should not be called in the rgw.
628// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
629#ifndef CLS_CLIENT_HIDE_IOCTX
f67539c2
TL
630int cls_rgw_reshard_list(librados::IoCtx& io_ctx, const std::string& oid, std::string& marker, uint32_t max,
631 std::list<cls_rgw_reshard_entry>& entries, bool* is_truncated);
632int cls_rgw_reshard_get(librados::IoCtx& io_ctx, const std::string& oid, cls_rgw_reshard_entry& entry);
9f95a23c 633#endif
31f18b77 634
f64942e4 635/* resharding attribute on bucket index shard headers */
9f95a23c
TL
636void cls_rgw_guard_bucket_resharding(librados::ObjectOperation& op, int ret_err);
637// these overloads which call io_ctx.operate() should not be called in the rgw.
638// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
639#ifndef CLS_CLIENT_HIDE_IOCTX
f67539c2 640int cls_rgw_set_bucket_resharding(librados::IoCtx& io_ctx, const std::string& oid,
31f18b77 641 const cls_rgw_bucket_instance_entry& entry);
f67539c2
TL
642int cls_rgw_clear_bucket_resharding(librados::IoCtx& io_ctx, const std::string& oid);
643int cls_rgw_get_bucket_resharding(librados::IoCtx& io_ctx, const std::string& oid,
31f18b77 644 cls_rgw_bucket_instance_entry *entry);
9f95a23c 645#endif