]> git.proxmox.com Git - ceph.git/blame_incremental - ceph/src/cls/rgw/cls_rgw_client.h
bump version to 19.2.0-pve1
[ceph.git] / ceph / src / cls / rgw / cls_rgw_client.h
... / ...
CommitLineData
1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2// vim: ts=8 sw=2 smarttab
3
4#pragma once
5
6#include <boost/intrusive_ptr.hpp>
7#include <boost/smart_ptr/intrusive_ref_counter.hpp>
8#include "include/str_list.h"
9#include "include/rados/librados.hpp"
10#include "cls_rgw_ops.h"
11#include "cls_rgw_const.h"
12#include "common/RefCountedObj.h"
13#include "common/strtol.h"
14#include "include/compat.h"
15#include "common/ceph_time.h"
16#include "common/ceph_mutex.h"
17
18
19// Forward declaration
20class BucketIndexAioManager;
21/*
22 * Bucket index AIO request argument, this is used to pass a argument
23 * to callback.
24 */
25struct BucketIndexAioArg : public RefCountedObject {
26 BucketIndexAioArg(int _id, BucketIndexAioManager* _manager) :
27 id(_id), manager(_manager) {}
28 int id;
29 BucketIndexAioManager* manager;
30};
31
32/*
33 * This class manages AIO completions. This class is not completely
34 * thread-safe, methods like *get_next_request_id* is not thread-safe
35 * and is expected to be called from within one thread.
36 */
37class BucketIndexAioManager {
38public:
39
40 // allows us to reaccess the shard id and shard's oid during and
41 // after the asynchronous call is made
42 struct RequestObj {
43 int shard_id;
44 std::string oid;
45
46 RequestObj(int _shard_id, const std::string& _oid) :
47 shard_id(_shard_id), oid(_oid)
48 {/* empty */}
49 };
50
51
52private:
53 // NB: the following 4 maps use the request_id as the key; this
54 // is not the same as the shard_id!
55 std::map<int, librados::AioCompletion*> pendings;
56 std::map<int, librados::AioCompletion*> completions;
57 std::map<int, const RequestObj> pending_objs;
58 std::map<int, const RequestObj> completion_objs;
59
60 int next = 0;
61 ceph::mutex lock = ceph::make_mutex("BucketIndexAioManager::lock");
62 ceph::condition_variable cond;
63 /*
64 * Callback implementation for AIO request.
65 */
66 static void bucket_index_op_completion_cb(void* cb, void* arg) {
67 BucketIndexAioArg* cb_arg = (BucketIndexAioArg*) arg;
68 cb_arg->manager->do_completion(cb_arg->id);
69 cb_arg->put();
70 }
71
72 /*
73 * Get next request ID. This method is not thread-safe.
74 *
75 * Return next request ID.
76 */
77 int get_next_request_id() { return next++; }
78
79 /*
80 * Add a new pending AIO completion instance.
81 *
82 * @param id - the request ID.
83 * @param completion - the AIO completion instance.
84 * @param oid - the object id associated with the object, if it is NULL, we don't
85 * track the object id per callback.
86 */
87 void add_pending(int request_id, librados::AioCompletion* completion, const int shard_id, const std::string& oid) {
88 pendings[request_id] = completion;
89 pending_objs.emplace(request_id, RequestObj(shard_id, oid));
90 }
91
92public:
93 /*
94 * Create a new instance.
95 */
96 BucketIndexAioManager() = default;
97
98 /*
99 * Do completion for the given AIO request.
100 */
101 void do_completion(int request_id);
102
103 /*
104 * Wait for AIO completions.
105 *
106 * valid_ret_code - valid AIO return code.
107 * num_completions - number of completions.
108 * ret_code - return code of failed AIO.
109 * objs - a std::list of objects that has been finished the AIO.
110 *
111 * Return false if there is no pending AIO, true otherwise.
112 */
113 bool wait_for_completions(int valid_ret_code,
114 int *num_completions = nullptr,
115 int *ret_code = nullptr,
116 std::map<int, std::string> *completed_objs = nullptr,
117 std::map<int, std::string> *retry_objs = nullptr);
118
119 /**
120 * Do aio read operation.
121 */
122 bool aio_operate(librados::IoCtx& io_ctx, const int shard_id, const std::string& oid, librados::ObjectReadOperation *op) {
123 std::lock_guard l{lock};
124 const int request_id = get_next_request_id();
125 BucketIndexAioArg *arg = new BucketIndexAioArg(request_id, this);
126 librados::AioCompletion *c = librados::Rados::aio_create_completion((void*)arg, bucket_index_op_completion_cb);
127 int r = io_ctx.aio_operate(oid, c, (librados::ObjectReadOperation*)op, NULL);
128 if (r >= 0) {
129 add_pending(arg->id, c, shard_id, oid);
130 } else {
131 arg->put();
132 c->release();
133 }
134 return r;
135 }
136
137 /**
138 * Do aio write operation.
139 */
140 bool aio_operate(librados::IoCtx& io_ctx, const int shard_id, const std::string& oid, librados::ObjectWriteOperation *op) {
141 std::lock_guard l{lock};
142 const int request_id = get_next_request_id();
143 BucketIndexAioArg *arg = new BucketIndexAioArg(request_id, this);
144 librados::AioCompletion *c = librados::Rados::aio_create_completion((void*)arg, bucket_index_op_completion_cb);
145 int r = io_ctx.aio_operate(oid, c, (librados::ObjectWriteOperation*)op);
146 if (r >= 0) {
147 add_pending(arg->id, c, shard_id, oid);
148 } else {
149 arg->put();
150 c->release();
151 }
152 return r;
153 }
154};
155
156class RGWGetDirHeader_CB : public boost::intrusive_ref_counter<RGWGetDirHeader_CB> {
157public:
158 virtual ~RGWGetDirHeader_CB() {}
159 virtual void handle_response(int r, const rgw_bucket_dir_header& header) = 0;
160};
161
162class BucketIndexShardsManager {
163private:
164 // Per shard setting manager, for example, marker.
165 std::map<int, std::string> value_by_shards;
166public:
167 const static std::string KEY_VALUE_SEPARATOR;
168 const static std::string SHARDS_SEPARATOR;
169
170 void add(int shard, const std::string& value) {
171 value_by_shards[shard] = value;
172 }
173
174 const std::string& get(int shard, const std::string& default_value) const {
175 auto iter = value_by_shards.find(shard);
176 return (iter == value_by_shards.end() ? default_value : iter->second);
177 }
178
179 const std::map<int, std::string>& get() const {
180 return value_by_shards;
181 }
182 std::map<int, std::string>& get() {
183 return value_by_shards;
184 }
185
186 bool empty() const {
187 return value_by_shards.empty();
188 }
189
190 void to_string(std::string *out) const {
191 if (!out) {
192 return;
193 }
194 out->clear();
195 for (auto iter = value_by_shards.begin();
196 iter != value_by_shards.end(); ++iter) {
197 if (out->length()) {
198 // Not the first item, append a separator first
199 out->append(SHARDS_SEPARATOR);
200 }
201 char buf[16];
202 snprintf(buf, sizeof(buf), "%d", iter->first);
203 out->append(buf);
204 out->append(KEY_VALUE_SEPARATOR);
205 out->append(iter->second);
206 }
207 }
208
209 static bool is_shards_marker(const std::string& marker) {
210 return marker.find(KEY_VALUE_SEPARATOR) != std::string::npos;
211 }
212
213 /*
214 * convert from std::string. There are two options of how the std::string looks like:
215 *
216 * 1. Single shard, no shard id specified, e.g. 000001.23.1
217 *
218 * for this case, if passed shard_id >= 0, use this shard id, otherwise assume that it's a
219 * bucket with no shards.
220 *
221 * 2. One or more shards, shard id specified for each shard, e.g., 0#00002.12,1#00003.23.2
222 *
223 */
224 int from_string(std::string_view composed_marker, int shard_id) {
225 value_by_shards.clear();
226 std::vector<std::string> shards;
227 get_str_vec(composed_marker, SHARDS_SEPARATOR.c_str(), shards);
228 if (shards.size() > 1 && shard_id >= 0) {
229 return -EINVAL;
230 }
231 for (auto iter = shards.begin(); iter != shards.end(); ++iter) {
232 size_t pos = iter->find(KEY_VALUE_SEPARATOR);
233 if (pos == std::string::npos) {
234 if (!value_by_shards.empty()) {
235 return -EINVAL;
236 }
237 if (shard_id < 0) {
238 add(0, *iter);
239 } else {
240 add(shard_id, *iter);
241 }
242 return 0;
243 }
244 std::string shard_str = iter->substr(0, pos);
245 std::string err;
246 int shard = (int)strict_strtol(shard_str.c_str(), 10, &err);
247 if (!err.empty()) {
248 return -EINVAL;
249 }
250 add(shard, iter->substr(pos + 1));
251 }
252 return 0;
253 }
254
255 // trim the '<shard-id>#' prefix from a single shard marker if present
256 static std::string get_shard_marker(const std::string& marker) {
257 auto p = marker.find(KEY_VALUE_SEPARATOR);
258 if (p == marker.npos) {
259 return marker;
260 }
261 return marker.substr(p + 1);
262 }
263};
264
265/* bucket index */
266void cls_rgw_bucket_init_index(librados::ObjectWriteOperation& o);
267
268class CLSRGWConcurrentIO {
269protected:
270 librados::IoCtx& io_ctx;
271
272 // map of shard # to oid; the shards that are remaining to be processed
273 std::map<int, std::string>& objs_container;
274 // iterator to work through objs_container
275 std::map<int, std::string>::iterator iter;
276
277 uint32_t max_aio;
278 BucketIndexAioManager manager;
279
280 virtual int issue_op(int shard_id, const std::string& oid) = 0;
281
282 virtual void cleanup() {}
283 virtual int valid_ret_code() { return 0; }
284 // Return true if multiple rounds of OPs might be needed, this happens when
285 // OP needs to be re-send until a certain code is returned.
286 virtual bool need_multiple_rounds() { return false; }
287 // Add a new object to the end of the container.
288 virtual void add_object(int shard, const std::string& oid) {}
289 virtual void reset_container(std::map<int, std::string>& objs) {}
290
291public:
292
293 CLSRGWConcurrentIO(librados::IoCtx& ioc,
294 std::map<int, std::string>& _objs_container,
295 uint32_t _max_aio) :
296 io_ctx(ioc), objs_container(_objs_container), max_aio(_max_aio)
297 {}
298
299 virtual ~CLSRGWConcurrentIO() {}
300
301 int operator()();
302}; // class CLSRGWConcurrentIO
303
304
305class CLSRGWIssueBucketIndexInit : public CLSRGWConcurrentIO {
306protected:
307 int issue_op(int shard_id, const std::string& oid) override;
308 int valid_ret_code() override { return -EEXIST; }
309 void cleanup() override;
310public:
311 CLSRGWIssueBucketIndexInit(librados::IoCtx& ioc,
312 std::map<int, std::string>& _bucket_objs,
313 uint32_t _max_aio) :
314 CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio) {}
315 virtual ~CLSRGWIssueBucketIndexInit() override {}
316};
317
318
319class CLSRGWIssueBucketIndexClean : public CLSRGWConcurrentIO {
320protected:
321 int issue_op(int shard_id, const std::string& oid) override;
322 int valid_ret_code() override {
323 return -ENOENT;
324 }
325
326public:
327 CLSRGWIssueBucketIndexClean(librados::IoCtx& ioc,
328 std::map<int, std::string>& _bucket_objs,
329 uint32_t _max_aio) :
330 CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio)
331 {}
332 virtual ~CLSRGWIssueBucketIndexClean() override {}
333};
334
335
336class CLSRGWIssueSetTagTimeout : public CLSRGWConcurrentIO {
337 uint64_t tag_timeout;
338protected:
339 int issue_op(int shard_id, const std::string& oid) override;
340public:
341 CLSRGWIssueSetTagTimeout(librados::IoCtx& ioc, std::map<int, std::string>& _bucket_objs,
342 uint32_t _max_aio, uint64_t _tag_timeout) :
343 CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio), tag_timeout(_tag_timeout) {}
344 virtual ~CLSRGWIssueSetTagTimeout() override {}
345};
346
347void cls_rgw_bucket_update_stats(librados::ObjectWriteOperation& o,
348 bool absolute,
349 const std::map<RGWObjCategory, rgw_bucket_category_stats>& stats);
350
351void cls_rgw_bucket_prepare_op(librados::ObjectWriteOperation& o, RGWModifyOp op, const std::string& tag,
352 const cls_rgw_obj_key& key, const std::string& locator, bool log_op,
353 uint16_t bilog_op, const rgw_zone_set& zones_trace);
354
355void cls_rgw_bucket_complete_op(librados::ObjectWriteOperation& o, RGWModifyOp op, const std::string& tag,
356 const rgw_bucket_entry_ver& ver,
357 const cls_rgw_obj_key& key,
358 const rgw_bucket_dir_entry_meta& dir_meta,
359 const std::list<cls_rgw_obj_key> *remove_objs, bool log_op,
360 uint16_t bilog_op, const rgw_zone_set *zones_trace,
361 const std::string& obj_locator = ""); // ignored if it's the empty string
362
363void cls_rgw_remove_obj(librados::ObjectWriteOperation& o, std::list<std::string>& keep_attr_prefixes);
364void cls_rgw_obj_store_pg_ver(librados::ObjectWriteOperation& o, const std::string& attr);
365void cls_rgw_obj_check_attrs_prefix(librados::ObjectOperation& o, const std::string& prefix, bool fail_if_exist);
366void cls_rgw_obj_check_mtime(librados::ObjectOperation& o, const ceph::real_time& mtime, bool high_precision_time, RGWCheckMTimeType type);
367
368int cls_rgw_bi_get(librados::IoCtx& io_ctx, const std::string oid,
369 BIIndexType index_type, const cls_rgw_obj_key& key,
370 rgw_cls_bi_entry *entry);
371int cls_rgw_bi_put(librados::IoCtx& io_ctx, const std::string oid, const rgw_cls_bi_entry& entry);
372void cls_rgw_bi_put(librados::ObjectWriteOperation& op, const std::string oid, const rgw_cls_bi_entry& entry);
373int cls_rgw_bi_list(librados::IoCtx& io_ctx, const std::string& oid,
374 const std::string& name, const std::string& marker, uint32_t max,
375 std::list<rgw_cls_bi_entry> *entries, bool *is_truncated);
376
377
378void cls_rgw_bucket_link_olh(librados::ObjectWriteOperation& op,
379 const cls_rgw_obj_key& key, const ceph::buffer::list& olh_tag,
380 bool delete_marker, const std::string& op_tag, const rgw_bucket_dir_entry_meta *meta,
381 uint64_t olh_epoch, ceph::real_time unmod_since, bool high_precision_time, bool log_op, const rgw_zone_set& zones_trace);
382void cls_rgw_bucket_unlink_instance(librados::ObjectWriteOperation& op,
383 const cls_rgw_obj_key& key, const std::string& op_tag,
384 const std::string& olh_tag, uint64_t olh_epoch, bool log_op, const rgw_zone_set& zones_trace);
385void cls_rgw_get_olh_log(librados::ObjectReadOperation& op, const cls_rgw_obj_key& olh, uint64_t ver_marker, const std::string& olh_tag, rgw_cls_read_olh_log_ret& log_ret, int& op_ret);
386void cls_rgw_trim_olh_log(librados::ObjectWriteOperation& op, const cls_rgw_obj_key& olh, uint64_t ver, const std::string& olh_tag);
387void cls_rgw_clear_olh(librados::ObjectWriteOperation& op, const cls_rgw_obj_key& olh, const std::string& olh_tag);
388
389// these overloads which call io_ctx.operate() should not be called in the rgw.
390// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
391#ifndef CLS_CLIENT_HIDE_IOCTX
392int cls_rgw_bucket_link_olh(librados::IoCtx& io_ctx, const std::string& oid,
393 const cls_rgw_obj_key& key, const ceph::buffer::list& olh_tag,
394 bool delete_marker, const std::string& op_tag, const rgw_bucket_dir_entry_meta *meta,
395 uint64_t olh_epoch, ceph::real_time unmod_since, bool high_precision_time, bool log_op, const rgw_zone_set& zones_trace);
396int cls_rgw_bucket_unlink_instance(librados::IoCtx& io_ctx, const std::string& oid,
397 const cls_rgw_obj_key& key, const std::string& op_tag,
398 const std::string& olh_tag, uint64_t olh_epoch, bool log_op, const rgw_zone_set& zones_trace);
399int cls_rgw_get_olh_log(librados::IoCtx& io_ctx, std::string& oid, const cls_rgw_obj_key& olh, uint64_t ver_marker,
400 const std::string& olh_tag, rgw_cls_read_olh_log_ret& log_ret);
401int cls_rgw_clear_olh(librados::IoCtx& io_ctx, std::string& oid, const cls_rgw_obj_key& olh, const std::string& olh_tag);
402int cls_rgw_usage_log_trim(librados::IoCtx& io_ctx, const std::string& oid, const std::string& user, const std::string& bucket,
403 uint64_t start_epoch, uint64_t end_epoch);
404#endif
405
406
407/**
408 * Std::list the bucket with the starting object and filter prefix.
409 * NOTE: this method do listing requests for each bucket index shards identified by
410 * the keys of the *list_results* std::map, which means the std::map should be populated
411 * by the caller to fill with each bucket index object id.
412 *
413 * io_ctx - IO context for rados.
414 * start_obj - marker for the listing.
415 * filter_prefix - filter prefix.
416 * num_entries - number of entries to request for each object (note the total
417 * amount of entries returned depends on the number of shardings).
418 * list_results - the std::list results keyed by bucket index object id.
419 * max_aio - the maximum number of AIO (for throttling).
420 *
421 * Return 0 on success, a failure code otherwise.
422*/
423
424class CLSRGWIssueBucketList : public CLSRGWConcurrentIO {
425 cls_rgw_obj_key start_obj;
426 std::string filter_prefix;
427 std::string delimiter;
428 uint32_t num_entries;
429 bool list_versions;
430 std::map<int, rgw_cls_list_ret>& result; // request_id -> return value
431
432protected:
433 int issue_op(int shard_id, const std::string& oid) override;
434 void reset_container(std::map<int, std::string>& objs) override;
435
436public:
437 CLSRGWIssueBucketList(librados::IoCtx& io_ctx,
438 const cls_rgw_obj_key& _start_obj,
439 const std::string& _filter_prefix,
440 const std::string& _delimiter,
441 uint32_t _num_entries,
442 bool _list_versions,
443 std::map<int, std::string>& oids, // shard_id -> shard_oid
444 // shard_id -> return value
445 std::map<int, rgw_cls_list_ret>& list_results,
446 uint32_t max_aio) :
447 CLSRGWConcurrentIO(io_ctx, oids, max_aio),
448 start_obj(_start_obj), filter_prefix(_filter_prefix), delimiter(_delimiter),
449 num_entries(_num_entries), list_versions(_list_versions),
450 result(list_results)
451 {}
452};
453
454void cls_rgw_bucket_list_op(librados::ObjectReadOperation& op,
455 const cls_rgw_obj_key& start_obj,
456 const std::string& filter_prefix,
457 const std::string& delimiter,
458 uint32_t num_entries,
459 bool list_versions,
460 rgw_cls_list_ret* result);
461
462void cls_rgw_bilog_list(librados::ObjectReadOperation& op,
463 const std::string& marker, uint32_t max,
464 cls_rgw_bi_log_list_ret *pdata, int *ret = nullptr);
465
466class CLSRGWIssueBILogList : public CLSRGWConcurrentIO {
467 std::map<int, cls_rgw_bi_log_list_ret>& result;
468 BucketIndexShardsManager& marker_mgr;
469 uint32_t max;
470protected:
471 int issue_op(int shard_id, const std::string& oid) override;
472public:
473 CLSRGWIssueBILogList(librados::IoCtx& io_ctx, BucketIndexShardsManager& _marker_mgr, uint32_t _max,
474 std::map<int, std::string>& oids,
475 std::map<int, cls_rgw_bi_log_list_ret>& bi_log_lists, uint32_t max_aio) :
476 CLSRGWConcurrentIO(io_ctx, oids, max_aio), result(bi_log_lists),
477 marker_mgr(_marker_mgr), max(_max) {}
478 virtual ~CLSRGWIssueBILogList() override {}
479};
480
481void cls_rgw_bilog_trim(librados::ObjectWriteOperation& op,
482 const std::string& start_marker,
483 const std::string& end_marker);
484
485class CLSRGWIssueBILogTrim : public CLSRGWConcurrentIO {
486 BucketIndexShardsManager& start_marker_mgr;
487 BucketIndexShardsManager& end_marker_mgr;
488protected:
489 int issue_op(int shard_id, const std::string& oid) override;
490 // Trim until -ENODATA is returned.
491 int valid_ret_code() override { return -ENODATA; }
492 bool need_multiple_rounds() override { return true; }
493 void add_object(int shard, const std::string& oid) override { objs_container[shard] = oid; }
494 void reset_container(std::map<int, std::string>& objs) override {
495 objs_container.swap(objs);
496 iter = objs_container.begin();
497 objs.clear();
498 }
499public:
500 CLSRGWIssueBILogTrim(librados::IoCtx& io_ctx, BucketIndexShardsManager& _start_marker_mgr,
501 BucketIndexShardsManager& _end_marker_mgr, std::map<int, std::string>& _bucket_objs, uint32_t max_aio) :
502 CLSRGWConcurrentIO(io_ctx, _bucket_objs, max_aio),
503 start_marker_mgr(_start_marker_mgr), end_marker_mgr(_end_marker_mgr) {}
504 virtual ~CLSRGWIssueBILogTrim() override {}
505};
506
507/**
508 * Check the bucket index.
509 *
510 * io_ctx - IO context for rados.
511 * bucket_objs_ret - check result for all shards.
512 * max_aio - the maximum number of AIO (for throttling).
513 *
514 * Return 0 on success, a failure code otherwise.
515 */
516class CLSRGWIssueBucketCheck : public CLSRGWConcurrentIO /*<std::map<std::string, rgw_cls_check_index_ret> >*/ {
517 std::map<int, rgw_cls_check_index_ret>& result;
518protected:
519 int issue_op(int shard_id, const std::string& oid) override;
520public:
521 CLSRGWIssueBucketCheck(librados::IoCtx& ioc, std::map<int, std::string>& oids,
522 std::map<int, rgw_cls_check_index_ret>& bucket_objs_ret,
523 uint32_t _max_aio) :
524 CLSRGWConcurrentIO(ioc, oids, _max_aio), result(bucket_objs_ret) {}
525 virtual ~CLSRGWIssueBucketCheck() override {}
526};
527
528class CLSRGWIssueBucketRebuild : public CLSRGWConcurrentIO {
529protected:
530 int issue_op(int shard_id, const std::string& oid) override;
531public:
532 CLSRGWIssueBucketRebuild(librados::IoCtx& io_ctx, std::map<int, std::string>& bucket_objs,
533 uint32_t max_aio) : CLSRGWConcurrentIO(io_ctx, bucket_objs, max_aio) {}
534 virtual ~CLSRGWIssueBucketRebuild() override {}
535};
536
537class CLSRGWIssueGetDirHeader : public CLSRGWConcurrentIO {
538 std::map<int, rgw_cls_list_ret>& result;
539protected:
540 int issue_op(int shard_id, const std::string& oid) override;
541public:
542 CLSRGWIssueGetDirHeader(librados::IoCtx& io_ctx, std::map<int, std::string>& oids, std::map<int, rgw_cls_list_ret>& dir_headers,
543 uint32_t max_aio) :
544 CLSRGWConcurrentIO(io_ctx, oids, max_aio), result(dir_headers) {}
545 virtual ~CLSRGWIssueGetDirHeader() override {}
546};
547
548class CLSRGWIssueSetBucketResharding : public CLSRGWConcurrentIO {
549 cls_rgw_bucket_instance_entry entry;
550protected:
551 int issue_op(int shard_id, const std::string& oid) override;
552public:
553 CLSRGWIssueSetBucketResharding(librados::IoCtx& ioc, std::map<int, std::string>& _bucket_objs,
554 const cls_rgw_bucket_instance_entry& _entry,
555 uint32_t _max_aio) : CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio), entry(_entry) {}
556 virtual ~CLSRGWIssueSetBucketResharding() override {}
557};
558
559class CLSRGWIssueResyncBucketBILog : public CLSRGWConcurrentIO {
560protected:
561 int issue_op(int shard_id, const std::string& oid);
562public:
563 CLSRGWIssueResyncBucketBILog(librados::IoCtx& io_ctx, std::map<int, std::string>& _bucket_objs, uint32_t max_aio) :
564 CLSRGWConcurrentIO(io_ctx, _bucket_objs, max_aio) {}
565 virtual ~CLSRGWIssueResyncBucketBILog() override {}
566};
567
568class CLSRGWIssueBucketBILogStop : public CLSRGWConcurrentIO {
569protected:
570 int issue_op(int shard_id, const std::string& oid);
571public:
572 CLSRGWIssueBucketBILogStop(librados::IoCtx& io_ctx, std::map<int, std::string>& _bucket_objs, uint32_t max_aio) :
573 CLSRGWConcurrentIO(io_ctx, _bucket_objs, max_aio) {}
574 virtual ~CLSRGWIssueBucketBILogStop() override {}
575};
576
577int cls_rgw_get_dir_header_async(librados::IoCtx& io_ctx, const std::string& oid,
578 boost::intrusive_ptr<RGWGetDirHeader_CB> cb);
579
580void cls_rgw_encode_suggestion(char op, rgw_bucket_dir_entry& dirent, ceph::buffer::list& updates);
581
582void cls_rgw_suggest_changes(librados::ObjectWriteOperation& o, ceph::buffer::list& updates);
583
584/* usage logging */
585// these overloads which call io_ctx.operate() should not be called in the rgw.
586// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
587#ifndef CLS_CLIENT_HIDE_IOCTX
588int cls_rgw_usage_log_read(librados::IoCtx& io_ctx, const std::string& oid, const std::string& user, const std::string& bucket,
589 uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, std::string& read_iter,
590 std::map<rgw_user_bucket, rgw_usage_log_entry>& usage, bool *is_truncated);
591#endif
592
593void cls_rgw_usage_log_trim(librados::ObjectWriteOperation& op, const std::string& user, const std::string& bucket, uint64_t start_epoch, uint64_t end_epoch);
594
595void cls_rgw_usage_log_clear(librados::ObjectWriteOperation& op);
596void cls_rgw_usage_log_add(librados::ObjectWriteOperation& op, rgw_usage_log_info& info);
597
598/* garbage collection */
599void cls_rgw_gc_set_entry(librados::ObjectWriteOperation& op, uint32_t expiration_secs, cls_rgw_gc_obj_info& info);
600void cls_rgw_gc_defer_entry(librados::ObjectWriteOperation& op, uint32_t expiration_secs, const std::string& tag);
601void cls_rgw_gc_remove(librados::ObjectWriteOperation& op, const std::vector<std::string>& tags);
602
603// these overloads which call io_ctx.operate() should not be called in the rgw.
604// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
605#ifndef CLS_CLIENT_HIDE_IOCTX
606int cls_rgw_gc_list(librados::IoCtx& io_ctx, std::string& oid, std::string& marker, uint32_t max, bool expired_only,
607 std::list<cls_rgw_gc_obj_info>& entries, bool *truncated, std::string& next_marker);
608#endif
609
610/* lifecycle */
611// these overloads which call io_ctx.operate() should not be called in the rgw.
612// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
613#ifndef CLS_CLIENT_HIDE_IOCTX
614int cls_rgw_lc_get_head(librados::IoCtx& io_ctx, const std::string& oid, cls_rgw_lc_obj_head& head);
615int cls_rgw_lc_put_head(librados::IoCtx& io_ctx, const std::string& oid, cls_rgw_lc_obj_head& head);
616int cls_rgw_lc_get_next_entry(librados::IoCtx& io_ctx, const std::string& oid, const std::string& marker, cls_rgw_lc_entry& entry);
617int cls_rgw_lc_rm_entry(librados::IoCtx& io_ctx, const std::string& oid, const cls_rgw_lc_entry& entry);
618int cls_rgw_lc_set_entry(librados::IoCtx& io_ctx, const std::string& oid, const cls_rgw_lc_entry& entry);
619int cls_rgw_lc_get_entry(librados::IoCtx& io_ctx, const std::string& oid, const std::string& marker, cls_rgw_lc_entry& entry);
620int cls_rgw_lc_list(librados::IoCtx& io_ctx, const std::string& oid,
621 const std::string& marker, uint32_t max_entries,
622 std::vector<cls_rgw_lc_entry>& entries);
623#endif
624
625/* multipart */
626void cls_rgw_mp_upload_part_info_update(librados::ObjectWriteOperation& op, const std::string& part_key, const RGWUploadPartInfo& info);
627
628/* resharding */
629void cls_rgw_reshard_add(librados::ObjectWriteOperation& op, const cls_rgw_reshard_entry& entry);
630void cls_rgw_reshard_remove(librados::ObjectWriteOperation& op, const cls_rgw_reshard_entry& entry);
631// these overloads which call io_ctx.operate() should not be called in the rgw.
632// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
633#ifndef CLS_CLIENT_HIDE_IOCTX
634int cls_rgw_reshard_list(librados::IoCtx& io_ctx, const std::string& oid, std::string& marker, uint32_t max,
635 std::list<cls_rgw_reshard_entry>& entries, bool* is_truncated);
636int cls_rgw_reshard_get(librados::IoCtx& io_ctx, const std::string& oid, cls_rgw_reshard_entry& entry);
637#endif
638
639/* resharding attribute on bucket index shard headers */
640void cls_rgw_guard_bucket_resharding(librados::ObjectOperation& op, int ret_err);
641// these overloads which call io_ctx.operate() should not be called in the rgw.
642// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
643#ifndef CLS_CLIENT_HIDE_IOCTX
644int cls_rgw_set_bucket_resharding(librados::IoCtx& io_ctx, const std::string& oid,
645 const cls_rgw_bucket_instance_entry& entry);
646int cls_rgw_clear_bucket_resharding(librados::IoCtx& io_ctx, const std::string& oid);
647int cls_rgw_get_bucket_resharding(librados::IoCtx& io_ctx, const std::string& oid,
648 cls_rgw_bucket_instance_entry *entry);
649#endif