]> git.proxmox.com Git - ceph.git/blob - ceph/src/cls/rgw/cls_rgw_client.h
import quincy beta 17.1.0
[ceph.git] / ceph / src / cls / rgw / cls_rgw_client.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #pragma once
5
6 #include "include/str_list.h"
7 #include "include/rados/librados.hpp"
8 #include "cls_rgw_ops.h"
9 #include "cls_rgw_const.h"
10 #include "common/RefCountedObj.h"
11 #include "common/strtol.h"
12 #include "include/compat.h"
13 #include "common/ceph_time.h"
14 #include "common/ceph_mutex.h"
15
16
17 // Forward declaration
18 class BucketIndexAioManager;
19 /*
20 * Bucket index AIO request argument, this is used to pass a argument
21 * to callback.
22 */
23 struct BucketIndexAioArg : public RefCountedObject {
24 BucketIndexAioArg(int _id, BucketIndexAioManager* _manager) :
25 id(_id), manager(_manager) {}
26 int id;
27 BucketIndexAioManager* manager;
28 };
29
30 /*
31 * This class manages AIO completions. This class is not completely
32 * thread-safe, methods like *get_next_request_id* is not thread-safe
33 * and is expected to be called from within one thread.
34 */
35 class BucketIndexAioManager {
36 public:
37
38 // allows us to reaccess the shard id and shard's oid during and
39 // after the asynchronous call is made
40 struct RequestObj {
41 int shard_id;
42 std::string oid;
43
44 RequestObj(int _shard_id, const std::string& _oid) :
45 shard_id(_shard_id), oid(_oid)
46 {/* empty */}
47 };
48
49
50 private:
51 // NB: the following 4 maps use the request_id as the key; this
52 // is not the same as the shard_id!
53 std::map<int, librados::AioCompletion*> pendings;
54 std::map<int, librados::AioCompletion*> completions;
55 std::map<int, const RequestObj> pending_objs;
56 std::map<int, const RequestObj> completion_objs;
57
58 int next = 0;
59 ceph::mutex lock = ceph::make_mutex("BucketIndexAioManager::lock");
60 ceph::condition_variable cond;
61 /*
62 * Callback implementation for AIO request.
63 */
64 static void bucket_index_op_completion_cb(void* cb, void* arg) {
65 BucketIndexAioArg* cb_arg = (BucketIndexAioArg*) arg;
66 cb_arg->manager->do_completion(cb_arg->id);
67 cb_arg->put();
68 }
69
70 /*
71 * Get next request ID. This method is not thread-safe.
72 *
73 * Return next request ID.
74 */
75 int get_next_request_id() { return next++; }
76
77 /*
78 * Add a new pending AIO completion instance.
79 *
80 * @param id - the request ID.
81 * @param completion - the AIO completion instance.
82 * @param oid - the object id associated with the object, if it is NULL, we don't
83 * track the object id per callback.
84 */
85 void add_pending(int request_id, librados::AioCompletion* completion, const int shard_id, const std::string& oid) {
86 pendings[request_id] = completion;
87 pending_objs.emplace(request_id, RequestObj(shard_id, oid));
88 }
89
90 public:
91 /*
92 * Create a new instance.
93 */
94 BucketIndexAioManager() = default;
95
96 /*
97 * Do completion for the given AIO request.
98 */
99 void do_completion(int request_id);
100
101 /*
102 * Wait for AIO completions.
103 *
104 * valid_ret_code - valid AIO return code.
105 * num_completions - number of completions.
106 * ret_code - return code of failed AIO.
107 * objs - a std::list of objects that has been finished the AIO.
108 *
109 * Return false if there is no pending AIO, true otherwise.
110 */
111 bool wait_for_completions(int valid_ret_code,
112 int *num_completions = nullptr,
113 int *ret_code = nullptr,
114 std::map<int, std::string> *completed_objs = nullptr,
115 std::map<int, std::string> *retry_objs = nullptr);
116
117 /**
118 * Do aio read operation.
119 */
120 bool aio_operate(librados::IoCtx& io_ctx, const int shard_id, const std::string& oid, librados::ObjectReadOperation *op) {
121 std::lock_guard l{lock};
122 const int request_id = get_next_request_id();
123 BucketIndexAioArg *arg = new BucketIndexAioArg(request_id, this);
124 librados::AioCompletion *c = librados::Rados::aio_create_completion((void*)arg, bucket_index_op_completion_cb);
125 int r = io_ctx.aio_operate(oid, c, (librados::ObjectReadOperation*)op, NULL);
126 if (r >= 0) {
127 add_pending(arg->id, c, shard_id, oid);
128 } else {
129 arg->put();
130 c->release();
131 }
132 return r;
133 }
134
135 /**
136 * Do aio write operation.
137 */
138 bool aio_operate(librados::IoCtx& io_ctx, const int shard_id, const std::string& oid, librados::ObjectWriteOperation *op) {
139 std::lock_guard l{lock};
140 const int request_id = get_next_request_id();
141 BucketIndexAioArg *arg = new BucketIndexAioArg(request_id, this);
142 librados::AioCompletion *c = librados::Rados::aio_create_completion((void*)arg, bucket_index_op_completion_cb);
143 int r = io_ctx.aio_operate(oid, c, (librados::ObjectWriteOperation*)op);
144 if (r >= 0) {
145 add_pending(arg->id, c, shard_id, oid);
146 } else {
147 arg->put();
148 c->release();
149 }
150 return r;
151 }
152 };
153
154 class RGWGetDirHeader_CB : public RefCountedObject {
155 public:
156 ~RGWGetDirHeader_CB() override {}
157 virtual void handle_response(int r, rgw_bucket_dir_header& header) = 0;
158 };
159
160 class BucketIndexShardsManager {
161 private:
162 // Per shard setting manager, for example, marker.
163 std::map<int, std::string> value_by_shards;
164 public:
165 const static std::string KEY_VALUE_SEPARATOR;
166 const static std::string SHARDS_SEPARATOR;
167
168 void add(int shard, const std::string& value) {
169 value_by_shards[shard] = value;
170 }
171
172 const std::string& get(int shard, const std::string& default_value) const {
173 auto iter = value_by_shards.find(shard);
174 return (iter == value_by_shards.end() ? default_value : iter->second);
175 }
176
177 const std::map<int, std::string>& get() const {
178 return value_by_shards;
179 }
180 std::map<int, std::string>& get() {
181 return value_by_shards;
182 }
183
184 bool empty() const {
185 return value_by_shards.empty();
186 }
187
188 void to_string(std::string *out) const {
189 if (!out) {
190 return;
191 }
192 out->clear();
193 for (auto iter = value_by_shards.begin();
194 iter != value_by_shards.end(); ++iter) {
195 if (out->length()) {
196 // Not the first item, append a separator first
197 out->append(SHARDS_SEPARATOR);
198 }
199 char buf[16];
200 snprintf(buf, sizeof(buf), "%d", iter->first);
201 out->append(buf);
202 out->append(KEY_VALUE_SEPARATOR);
203 out->append(iter->second);
204 }
205 }
206
207 static bool is_shards_marker(const std::string& marker) {
208 return marker.find(KEY_VALUE_SEPARATOR) != std::string::npos;
209 }
210
211 /*
212 * convert from std::string. There are two options of how the std::string looks like:
213 *
214 * 1. Single shard, no shard id specified, e.g. 000001.23.1
215 *
216 * for this case, if passed shard_id >= 0, use this shard id, otherwise assume that it's a
217 * bucket with no shards.
218 *
219 * 2. One or more shards, shard id specified for each shard, e.g., 0#00002.12,1#00003.23.2
220 *
221 */
222 int from_string(const std::string& composed_marker, int shard_id) {
223 value_by_shards.clear();
224 std::vector<std::string> shards;
225 get_str_vec(composed_marker, SHARDS_SEPARATOR.c_str(), shards);
226 if (shards.size() > 1 && shard_id >= 0) {
227 return -EINVAL;
228 }
229 for (auto iter = shards.begin(); iter != shards.end(); ++iter) {
230 size_t pos = iter->find(KEY_VALUE_SEPARATOR);
231 if (pos == std::string::npos) {
232 if (!value_by_shards.empty()) {
233 return -EINVAL;
234 }
235 if (shard_id < 0) {
236 add(0, *iter);
237 } else {
238 add(shard_id, *iter);
239 }
240 return 0;
241 }
242 std::string shard_str = iter->substr(0, pos);
243 std::string err;
244 int shard = (int)strict_strtol(shard_str.c_str(), 10, &err);
245 if (!err.empty()) {
246 return -EINVAL;
247 }
248 add(shard, iter->substr(pos + 1));
249 }
250 return 0;
251 }
252
253 // trim the '<shard-id>#' prefix from a single shard marker if present
254 static std::string get_shard_marker(const std::string& marker) {
255 auto p = marker.find(KEY_VALUE_SEPARATOR);
256 if (p == marker.npos) {
257 return marker;
258 }
259 return marker.substr(p + 1);
260 }
261 };
262
263 /* bucket index */
264 void cls_rgw_bucket_init_index(librados::ObjectWriteOperation& o);
265
266 class CLSRGWConcurrentIO {
267 protected:
268 librados::IoCtx& io_ctx;
269
270 // map of shard # to oid; the shards that are remaining to be processed
271 std::map<int, std::string>& objs_container;
272 // iterator to work through objs_container
273 std::map<int, std::string>::iterator iter;
274
275 uint32_t max_aio;
276 BucketIndexAioManager manager;
277
278 virtual int issue_op(int shard_id, const std::string& oid) = 0;
279
280 virtual void cleanup() {}
281 virtual int valid_ret_code() { return 0; }
282 // Return true if multiple rounds of OPs might be needed, this happens when
283 // OP needs to be re-send until a certain code is returned.
284 virtual bool need_multiple_rounds() { return false; }
285 // Add a new object to the end of the container.
286 virtual void add_object(int shard, const std::string& oid) {}
287 virtual void reset_container(std::map<int, std::string>& objs) {}
288
289 public:
290
291 CLSRGWConcurrentIO(librados::IoCtx& ioc,
292 std::map<int, std::string>& _objs_container,
293 uint32_t _max_aio) :
294 io_ctx(ioc), objs_container(_objs_container), max_aio(_max_aio)
295 {}
296
297 virtual ~CLSRGWConcurrentIO()
298 {}
299
300 int operator()();
301 }; // class CLSRGWConcurrentIO
302
303
304 class CLSRGWIssueBucketIndexInit : public CLSRGWConcurrentIO {
305 protected:
306 int issue_op(int shard_id, const std::string& oid) override;
307 int valid_ret_code() override { return -EEXIST; }
308 void cleanup() override;
309 public:
310 CLSRGWIssueBucketIndexInit(librados::IoCtx& ioc,
311 std::map<int, std::string>& _bucket_objs,
312 uint32_t _max_aio) :
313 CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio) {}
314 };
315
316
317 class CLSRGWIssueBucketIndexClean : public CLSRGWConcurrentIO {
318 protected:
319 int issue_op(int shard_id, const std::string& oid) override;
320 int valid_ret_code() override {
321 return -ENOENT;
322 }
323
324 public:
325 CLSRGWIssueBucketIndexClean(librados::IoCtx& ioc,
326 std::map<int, std::string>& _bucket_objs,
327 uint32_t _max_aio) :
328 CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio)
329 {}
330 };
331
332
333 class CLSRGWIssueSetTagTimeout : public CLSRGWConcurrentIO {
334 uint64_t tag_timeout;
335 protected:
336 int issue_op(int shard_id, const std::string& oid) override;
337 public:
338 CLSRGWIssueSetTagTimeout(librados::IoCtx& ioc, std::map<int, std::string>& _bucket_objs,
339 uint32_t _max_aio, uint64_t _tag_timeout) :
340 CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio), tag_timeout(_tag_timeout) {}
341 };
342
343 void cls_rgw_bucket_update_stats(librados::ObjectWriteOperation& o,
344 bool absolute,
345 const std::map<RGWObjCategory, rgw_bucket_category_stats>& stats);
346
347 void cls_rgw_bucket_prepare_op(librados::ObjectWriteOperation& o, RGWModifyOp op, const std::string& tag,
348 const cls_rgw_obj_key& key, const std::string& locator, bool log_op,
349 uint16_t bilog_op, const rgw_zone_set& zones_trace);
350
351 void cls_rgw_bucket_complete_op(librados::ObjectWriteOperation& o, RGWModifyOp op, const std::string& tag,
352 const rgw_bucket_entry_ver& ver,
353 const cls_rgw_obj_key& key,
354 const rgw_bucket_dir_entry_meta& dir_meta,
355 const std::list<cls_rgw_obj_key> *remove_objs, bool log_op,
356 uint16_t bilog_op, const rgw_zone_set *zones_trace);
357
358 void cls_rgw_remove_obj(librados::ObjectWriteOperation& o, std::list<std::string>& keep_attr_prefixes);
359 void cls_rgw_obj_store_pg_ver(librados::ObjectWriteOperation& o, const std::string& attr);
360 void cls_rgw_obj_check_attrs_prefix(librados::ObjectOperation& o, const std::string& prefix, bool fail_if_exist);
361 void cls_rgw_obj_check_mtime(librados::ObjectOperation& o, const ceph::real_time& mtime, bool high_precision_time, RGWCheckMTimeType type);
362
363 int cls_rgw_bi_get(librados::IoCtx& io_ctx, const std::string oid,
364 BIIndexType index_type, const cls_rgw_obj_key& key,
365 rgw_cls_bi_entry *entry);
366 int cls_rgw_bi_put(librados::IoCtx& io_ctx, const std::string oid, const rgw_cls_bi_entry& entry);
367 void cls_rgw_bi_put(librados::ObjectWriteOperation& op, const std::string oid, const rgw_cls_bi_entry& entry);
368 int cls_rgw_bi_list(librados::IoCtx& io_ctx, const std::string& oid,
369 const std::string& name, const std::string& marker, uint32_t max,
370 std::list<rgw_cls_bi_entry> *entries, bool *is_truncated);
371
372
373 void cls_rgw_bucket_link_olh(librados::ObjectWriteOperation& op,
374 const cls_rgw_obj_key& key, const ceph::buffer::list& olh_tag,
375 bool delete_marker, const std::string& op_tag, const rgw_bucket_dir_entry_meta *meta,
376 uint64_t olh_epoch, ceph::real_time unmod_since, bool high_precision_time, bool log_op, const rgw_zone_set& zones_trace);
377 void cls_rgw_bucket_unlink_instance(librados::ObjectWriteOperation& op,
378 const cls_rgw_obj_key& key, const std::string& op_tag,
379 const std::string& olh_tag, uint64_t olh_epoch, bool log_op, const rgw_zone_set& zones_trace);
380 void cls_rgw_get_olh_log(librados::ObjectReadOperation& op, const cls_rgw_obj_key& olh, uint64_t ver_marker, const std::string& olh_tag, rgw_cls_read_olh_log_ret& log_ret, int& op_ret);
381 void cls_rgw_trim_olh_log(librados::ObjectWriteOperation& op, const cls_rgw_obj_key& olh, uint64_t ver, const std::string& olh_tag);
382 void cls_rgw_clear_olh(librados::ObjectWriteOperation& op, const cls_rgw_obj_key& olh, const std::string& olh_tag);
383
384 // these overloads which call io_ctx.operate() should not be called in the rgw.
385 // rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
386 #ifndef CLS_CLIENT_HIDE_IOCTX
387 int cls_rgw_bucket_link_olh(librados::IoCtx& io_ctx, const std::string& oid,
388 const cls_rgw_obj_key& key, const ceph::buffer::list& olh_tag,
389 bool delete_marker, const std::string& op_tag, const rgw_bucket_dir_entry_meta *meta,
390 uint64_t olh_epoch, ceph::real_time unmod_since, bool high_precision_time, bool log_op, const rgw_zone_set& zones_trace);
391 int cls_rgw_bucket_unlink_instance(librados::IoCtx& io_ctx, const std::string& oid,
392 const cls_rgw_obj_key& key, const std::string& op_tag,
393 const std::string& olh_tag, uint64_t olh_epoch, bool log_op, const rgw_zone_set& zones_trace);
394 int cls_rgw_get_olh_log(librados::IoCtx& io_ctx, std::string& oid, const cls_rgw_obj_key& olh, uint64_t ver_marker,
395 const std::string& olh_tag, rgw_cls_read_olh_log_ret& log_ret);
396 int cls_rgw_clear_olh(librados::IoCtx& io_ctx, std::string& oid, const cls_rgw_obj_key& olh, const std::string& olh_tag);
397 int cls_rgw_usage_log_trim(librados::IoCtx& io_ctx, const std::string& oid, const std::string& user, const std::string& bucket,
398 uint64_t start_epoch, uint64_t end_epoch);
399 #endif
400
401
402 /**
403 * Std::list the bucket with the starting object and filter prefix.
404 * NOTE: this method do listing requests for each bucket index shards identified by
405 * the keys of the *list_results* std::map, which means the std::map should be popludated
406 * by the caller to fill with each bucket index object id.
407 *
408 * io_ctx - IO context for rados.
409 * start_obj - marker for the listing.
410 * filter_prefix - filter prefix.
411 * num_entries - number of entries to request for each object (note the total
412 * amount of entries returned depends on the number of shardings).
413 * list_results - the std::list results keyed by bucket index object id.
414 * max_aio - the maximum number of AIO (for throttling).
415 *
416 * Return 0 on success, a failure code otherwise.
417 */
418
419 class CLSRGWIssueBucketList : public CLSRGWConcurrentIO {
420 cls_rgw_obj_key start_obj;
421 std::string filter_prefix;
422 std::string delimiter;
423 uint32_t num_entries;
424 bool list_versions;
425 std::map<int, rgw_cls_list_ret>& result; // request_id -> return value
426
427 protected:
428 int issue_op(int shard_id, const std::string& oid) override;
429 void reset_container(std::map<int, std::string>& objs) override;
430
431 public:
432 CLSRGWIssueBucketList(librados::IoCtx& io_ctx,
433 const cls_rgw_obj_key& _start_obj,
434 const std::string& _filter_prefix,
435 const std::string& _delimiter,
436 uint32_t _num_entries,
437 bool _list_versions,
438 std::map<int, std::string>& oids, // shard_id -> shard_oid
439 // shard_id -> return value
440 std::map<int, rgw_cls_list_ret>& list_results,
441 uint32_t max_aio) :
442 CLSRGWConcurrentIO(io_ctx, oids, max_aio),
443 start_obj(_start_obj), filter_prefix(_filter_prefix), delimiter(_delimiter),
444 num_entries(_num_entries), list_versions(_list_versions),
445 result(list_results)
446 {}
447 };
448
449 void cls_rgw_bucket_list_op(librados::ObjectReadOperation& op,
450 const cls_rgw_obj_key& start_obj,
451 const std::string& filter_prefix,
452 const std::string& delimiter,
453 uint32_t num_entries,
454 bool list_versions,
455 rgw_cls_list_ret* result);
456
457 void cls_rgw_bilog_list(librados::ObjectReadOperation& op,
458 const std::string& marker, uint32_t max,
459 cls_rgw_bi_log_list_ret *pdata, int *ret = nullptr);
460
461 class CLSRGWIssueBILogList : public CLSRGWConcurrentIO {
462 std::map<int, cls_rgw_bi_log_list_ret>& result;
463 BucketIndexShardsManager& marker_mgr;
464 uint32_t max;
465 protected:
466 int issue_op(int shard_id, const std::string& oid) override;
467 public:
468 CLSRGWIssueBILogList(librados::IoCtx& io_ctx, BucketIndexShardsManager& _marker_mgr, uint32_t _max,
469 std::map<int, std::string>& oids,
470 std::map<int, cls_rgw_bi_log_list_ret>& bi_log_lists, uint32_t max_aio) :
471 CLSRGWConcurrentIO(io_ctx, oids, max_aio), result(bi_log_lists),
472 marker_mgr(_marker_mgr), max(_max) {}
473 };
474
475 void cls_rgw_bilog_trim(librados::ObjectWriteOperation& op,
476 const std::string& start_marker,
477 const std::string& end_marker);
478
479 class CLSRGWIssueBILogTrim : public CLSRGWConcurrentIO {
480 BucketIndexShardsManager& start_marker_mgr;
481 BucketIndexShardsManager& end_marker_mgr;
482 protected:
483 int issue_op(int shard_id, const std::string& oid) override;
484 // Trim until -ENODATA is returned.
485 int valid_ret_code() override { return -ENODATA; }
486 bool need_multiple_rounds() override { return true; }
487 void add_object(int shard, const std::string& oid) override { objs_container[shard] = oid; }
488 void reset_container(std::map<int, std::string>& objs) override {
489 objs_container.swap(objs);
490 iter = objs_container.begin();
491 objs.clear();
492 }
493 public:
494 CLSRGWIssueBILogTrim(librados::IoCtx& io_ctx, BucketIndexShardsManager& _start_marker_mgr,
495 BucketIndexShardsManager& _end_marker_mgr, std::map<int, std::string>& _bucket_objs, uint32_t max_aio) :
496 CLSRGWConcurrentIO(io_ctx, _bucket_objs, max_aio),
497 start_marker_mgr(_start_marker_mgr), end_marker_mgr(_end_marker_mgr) {}
498 };
499
500 /**
501 * Check the bucket index.
502 *
503 * io_ctx - IO context for rados.
504 * bucket_objs_ret - check result for all shards.
505 * max_aio - the maximum number of AIO (for throttling).
506 *
507 * Return 0 on success, a failure code otherwise.
508 */
509 class CLSRGWIssueBucketCheck : public CLSRGWConcurrentIO /*<std::map<std::string, rgw_cls_check_index_ret> >*/ {
510 std::map<int, rgw_cls_check_index_ret>& result;
511 protected:
512 int issue_op(int shard_id, const std::string& oid) override;
513 public:
514 CLSRGWIssueBucketCheck(librados::IoCtx& ioc, std::map<int, std::string>& oids,
515 std::map<int, rgw_cls_check_index_ret>& bucket_objs_ret,
516 uint32_t _max_aio) :
517 CLSRGWConcurrentIO(ioc, oids, _max_aio), result(bucket_objs_ret) {}
518 };
519
520 class CLSRGWIssueBucketRebuild : public CLSRGWConcurrentIO {
521 protected:
522 int issue_op(int shard_id, const std::string& oid) override;
523 public:
524 CLSRGWIssueBucketRebuild(librados::IoCtx& io_ctx, std::map<int, std::string>& bucket_objs,
525 uint32_t max_aio) : CLSRGWConcurrentIO(io_ctx, bucket_objs, max_aio) {}
526 };
527
528 class CLSRGWIssueGetDirHeader : public CLSRGWConcurrentIO {
529 std::map<int, rgw_cls_list_ret>& result;
530 protected:
531 int issue_op(int shard_id, const std::string& oid) override;
532 public:
533 CLSRGWIssueGetDirHeader(librados::IoCtx& io_ctx, std::map<int, std::string>& oids, std::map<int, rgw_cls_list_ret>& dir_headers,
534 uint32_t max_aio) :
535 CLSRGWConcurrentIO(io_ctx, oids, max_aio), result(dir_headers) {}
536 };
537
538 class CLSRGWIssueSetBucketResharding : public CLSRGWConcurrentIO {
539 cls_rgw_bucket_instance_entry entry;
540 protected:
541 int issue_op(int shard_id, const std::string& oid) override;
542 public:
543 CLSRGWIssueSetBucketResharding(librados::IoCtx& ioc, std::map<int, std::string>& _bucket_objs,
544 const cls_rgw_bucket_instance_entry& _entry,
545 uint32_t _max_aio) : CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio), entry(_entry) {}
546 };
547
548 class CLSRGWIssueResyncBucketBILog : public CLSRGWConcurrentIO {
549 protected:
550 int issue_op(int shard_id, const std::string& oid);
551 public:
552 CLSRGWIssueResyncBucketBILog(librados::IoCtx& io_ctx, std::map<int, std::string>& _bucket_objs, uint32_t max_aio) :
553 CLSRGWConcurrentIO(io_ctx, _bucket_objs, max_aio) {}
554 };
555
556 class CLSRGWIssueBucketBILogStop : public CLSRGWConcurrentIO {
557 protected:
558 int issue_op(int shard_id, const std::string& oid);
559 public:
560 CLSRGWIssueBucketBILogStop(librados::IoCtx& io_ctx, std::map<int, std::string>& _bucket_objs, uint32_t max_aio) :
561 CLSRGWConcurrentIO(io_ctx, _bucket_objs, max_aio) {}
562 };
563
564 int cls_rgw_get_dir_header_async(librados::IoCtx& io_ctx, std::string& oid, RGWGetDirHeader_CB *ctx);
565
566 void cls_rgw_encode_suggestion(char op, rgw_bucket_dir_entry& dirent, ceph::buffer::list& updates);
567
568 void cls_rgw_suggest_changes(librados::ObjectWriteOperation& o, ceph::buffer::list& updates);
569
570 /* usage logging */
571 // these overloads which call io_ctx.operate() should not be called in the rgw.
572 // rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
573 #ifndef CLS_CLIENT_HIDE_IOCTX
574 int cls_rgw_usage_log_read(librados::IoCtx& io_ctx, const std::string& oid, const std::string& user, const std::string& bucket,
575 uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, std::string& read_iter,
576 std::map<rgw_user_bucket, rgw_usage_log_entry>& usage, bool *is_truncated);
577 #endif
578
579 void cls_rgw_usage_log_trim(librados::ObjectWriteOperation& op, const std::string& user, const std::string& bucket, uint64_t start_epoch, uint64_t end_epoch);
580
581 void cls_rgw_usage_log_clear(librados::ObjectWriteOperation& op);
582 void cls_rgw_usage_log_add(librados::ObjectWriteOperation& op, rgw_usage_log_info& info);
583
584 /* garbage collection */
585 void cls_rgw_gc_set_entry(librados::ObjectWriteOperation& op, uint32_t expiration_secs, cls_rgw_gc_obj_info& info);
586 void cls_rgw_gc_defer_entry(librados::ObjectWriteOperation& op, uint32_t expiration_secs, const std::string& tag);
587 void cls_rgw_gc_remove(librados::ObjectWriteOperation& op, const std::vector<std::string>& tags);
588
589 // these overloads which call io_ctx.operate() should not be called in the rgw.
590 // rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
591 #ifndef CLS_CLIENT_HIDE_IOCTX
592 int cls_rgw_gc_list(librados::IoCtx& io_ctx, std::string& oid, std::string& marker, uint32_t max, bool expired_only,
593 std::list<cls_rgw_gc_obj_info>& entries, bool *truncated, std::string& next_marker);
594 #endif
595
596 /* lifecycle */
597 // these overloads which call io_ctx.operate() should not be called in the rgw.
598 // rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
599 #ifndef CLS_CLIENT_HIDE_IOCTX
600 int cls_rgw_lc_get_head(librados::IoCtx& io_ctx, const std::string& oid, cls_rgw_lc_obj_head& head);
601 int cls_rgw_lc_put_head(librados::IoCtx& io_ctx, const std::string& oid, cls_rgw_lc_obj_head& head);
602 int cls_rgw_lc_get_next_entry(librados::IoCtx& io_ctx, const std::string& oid, std::string& marker, cls_rgw_lc_entry& entry);
603 int cls_rgw_lc_rm_entry(librados::IoCtx& io_ctx, const std::string& oid, const cls_rgw_lc_entry& entry);
604 int cls_rgw_lc_set_entry(librados::IoCtx& io_ctx, const std::string& oid, const cls_rgw_lc_entry& entry);
605 int cls_rgw_lc_get_entry(librados::IoCtx& io_ctx, const std::string& oid, const std::string& marker, cls_rgw_lc_entry& entry);
606 int cls_rgw_lc_list(librados::IoCtx& io_ctx, const std::string& oid,
607 const std::string& marker, uint32_t max_entries,
608 std::vector<cls_rgw_lc_entry>& entries);
609 #endif
610
611 /* resharding */
612 void cls_rgw_reshard_add(librados::ObjectWriteOperation& op, const cls_rgw_reshard_entry& entry);
613 void cls_rgw_reshard_remove(librados::ObjectWriteOperation& op, const cls_rgw_reshard_entry& entry);
614 // these overloads which call io_ctx.operate() should not be called in the rgw.
615 // rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
616 #ifndef CLS_CLIENT_HIDE_IOCTX
617 int cls_rgw_reshard_list(librados::IoCtx& io_ctx, const std::string& oid, std::string& marker, uint32_t max,
618 std::list<cls_rgw_reshard_entry>& entries, bool* is_truncated);
619 int cls_rgw_reshard_get(librados::IoCtx& io_ctx, const std::string& oid, cls_rgw_reshard_entry& entry);
620 #endif
621
622 /* resharding attribute on bucket index shard headers */
623 void cls_rgw_guard_bucket_resharding(librados::ObjectOperation& op, int ret_err);
624 // these overloads which call io_ctx.operate() should not be called in the rgw.
625 // rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
626 #ifndef CLS_CLIENT_HIDE_IOCTX
627 int cls_rgw_set_bucket_resharding(librados::IoCtx& io_ctx, const std::string& oid,
628 const cls_rgw_bucket_instance_entry& entry);
629 int cls_rgw_clear_bucket_resharding(librados::IoCtx& io_ctx, const std::string& oid);
630 int cls_rgw_get_bucket_resharding(librados::IoCtx& io_ctx, const std::string& oid,
631 cls_rgw_bucket_instance_entry *entry);
632 #endif