]> git.proxmox.com Git - ceph.git/blob - ceph/src/cls/rgw/cls_rgw_client.h
bd13710e86686b29484ae64d4e55533f279d38df
[ceph.git] / ceph / src / cls / rgw / cls_rgw_client.h
1 // -*- mode:C; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #ifndef CEPH_CLS_RGW_CLIENT_H
5 #define CEPH_CLS_RGW_CLIENT_H
6
7 #include "include/str_list.h"
8 #include "include/rados/librados.hpp"
9 #include "cls_rgw_ops.h"
10 #include "cls_rgw_const.h"
11 #include "common/RefCountedObj.h"
12 #include "common/strtol.h"
13 #include "include/compat.h"
14 #include "common/ceph_time.h"
15 #include "common/ceph_mutex.h"
16
17 // Forward declaration
18 class BucketIndexAioManager;
19 /*
20 * Bucket index AIO request argument, this is used to pass a argument
21 * to callback.
22 */
23 struct BucketIndexAioArg : public RefCountedObject {
24 BucketIndexAioArg(int _id, BucketIndexAioManager* _manager) :
25 id(_id), manager(_manager) {}
26 int id;
27 BucketIndexAioManager* manager;
28 };
29
30 /*
31 * This class manages AIO completions. This class is not completely thread-safe,
32 * methods like *get_next* is not thread-safe and is expected to be called from
33 * within one thread.
34 */
35 class BucketIndexAioManager {
36 private:
37 map<int, librados::AioCompletion*> pendings;
38 map<int, librados::AioCompletion*> completions;
39 map<int, string> pending_objs;
40 map<int, string> completion_objs;
41 int next = 0;
42 ceph::mutex lock = ceph::make_mutex("BucketIndexAioManager::lock");
43 ceph::condition_variable cond;
44 /*
45 * Callback implementation for AIO request.
46 */
47 static void bucket_index_op_completion_cb(void* cb, void* arg) {
48 BucketIndexAioArg* cb_arg = (BucketIndexAioArg*) arg;
49 cb_arg->manager->do_completion(cb_arg->id);
50 cb_arg->put();
51 }
52
53 /*
54 * Get next request ID. This method is not thread-safe.
55 *
56 * Return next request ID.
57 */
58 int get_next() { return next++; }
59
60 /*
61 * Add a new pending AIO completion instance.
62 *
63 * @param id - the request ID.
64 * @param completion - the AIO completion instance.
65 * @param oid - the object id associated with the object, if it is NULL, we don't
66 * track the object id per callback.
67 */
68 void add_pending(int id, librados::AioCompletion* completion, const string& oid) {
69 pendings[id] = completion;
70 pending_objs[id] = oid;
71 }
72 public:
73 /*
74 * Create a new instance.
75 */
76 BucketIndexAioManager() = default;
77
78 /*
79 * Do completion for the given AIO request.
80 */
81 void do_completion(int id);
82
83 /*
84 * Wait for AIO completions.
85 *
86 * valid_ret_code - valid AIO return code.
87 * num_completions - number of completions.
88 * ret_code - return code of failed AIO.
89 * objs - a list of objects that has been finished the AIO.
90 *
91 * Return false if there is no pending AIO, true otherwise.
92 */
93 bool wait_for_completions(int valid_ret_code, int *num_completions, int *ret_code,
94 map<int, string> *objs);
95
96 /**
97 * Do aio read operation.
98 */
99 bool aio_operate(librados::IoCtx& io_ctx, const string& oid, librados::ObjectReadOperation *op) {
100 std::lock_guard l{lock};
101 BucketIndexAioArg *arg = new BucketIndexAioArg(get_next(), this);
102 librados::AioCompletion *c = librados::Rados::aio_create_completion((void*)arg, bucket_index_op_completion_cb);
103 int r = io_ctx.aio_operate(oid, c, (librados::ObjectReadOperation*)op, NULL);
104 if (r >= 0) {
105 add_pending(arg->id, c, oid);
106 } else {
107 arg->put();
108 c->release();
109 }
110 return r;
111 }
112
113 /**
114 * Do aio write operation.
115 */
116 bool aio_operate(librados::IoCtx& io_ctx, const string& oid, librados::ObjectWriteOperation *op) {
117 std::lock_guard l{lock};
118 BucketIndexAioArg *arg = new BucketIndexAioArg(get_next(), this);
119 librados::AioCompletion *c = librados::Rados::aio_create_completion((void*)arg, bucket_index_op_completion_cb);
120 int r = io_ctx.aio_operate(oid, c, (librados::ObjectWriteOperation*)op);
121 if (r >= 0) {
122 add_pending(arg->id, c, oid);
123 } else {
124 arg->put();
125 c->release();
126 }
127 return r;
128 }
129 };
130
131 class RGWGetDirHeader_CB : public RefCountedObject {
132 public:
133 ~RGWGetDirHeader_CB() override {}
134 virtual void handle_response(int r, rgw_bucket_dir_header& header) = 0;
135 };
136
137 class BucketIndexShardsManager {
138 private:
139 // Per shard setting manager, for example, marker.
140 map<int, string> value_by_shards;
141 public:
142 const static string KEY_VALUE_SEPARATOR;
143 const static string SHARDS_SEPARATOR;
144
145 void add(int shard, const string& value) {
146 value_by_shards[shard] = value;
147 }
148
149 const string& get(int shard, const string& default_value) {
150 map<int, string>::iterator iter = value_by_shards.find(shard);
151 return (iter == value_by_shards.end() ? default_value : iter->second);
152 }
153
154 map<int, string>& get() {
155 return value_by_shards;
156 }
157
158 bool empty() {
159 return value_by_shards.empty();
160 }
161
162 void to_string(string *out) const {
163 if (!out) {
164 return;
165 }
166 out->clear();
167 map<int, string>::const_iterator iter = value_by_shards.begin();
168 for (; iter != value_by_shards.end(); ++iter) {
169 if (out->length()) {
170 // Not the first item, append a separator first
171 out->append(SHARDS_SEPARATOR);
172 }
173 char buf[16];
174 snprintf(buf, sizeof(buf), "%d", iter->first);
175 out->append(buf);
176 out->append(KEY_VALUE_SEPARATOR);
177 out->append(iter->second);
178 }
179 }
180
181 static bool is_shards_marker(const string& marker) {
182 return marker.find(KEY_VALUE_SEPARATOR) != string::npos;
183 }
184
185 /*
186 * convert from string. There are two options of how the string looks like:
187 *
188 * 1. Single shard, no shard id specified, e.g. 000001.23.1
189 *
190 * for this case, if passed shard_id >= 0, use this shard id, otherwise assume that it's a
191 * bucket with no shards.
192 *
193 * 2. One or more shards, shard id specified for each shard, e.g., 0#00002.12,1#00003.23.2
194 *
195 */
196 int from_string(const string& composed_marker, int shard_id) {
197 value_by_shards.clear();
198 vector<string> shards;
199 get_str_vec(composed_marker, SHARDS_SEPARATOR.c_str(), shards);
200 if (shards.size() > 1 && shard_id >= 0) {
201 return -EINVAL;
202 }
203 vector<string>::const_iterator iter = shards.begin();
204 for (; iter != shards.end(); ++iter) {
205 size_t pos = iter->find(KEY_VALUE_SEPARATOR);
206 if (pos == string::npos) {
207 if (!value_by_shards.empty()) {
208 return -EINVAL;
209 }
210 if (shard_id < 0) {
211 add(0, *iter);
212 } else {
213 add(shard_id, *iter);
214 }
215 return 0;
216 }
217 string shard_str = iter->substr(0, pos);
218 string err;
219 int shard = (int)strict_strtol(shard_str.c_str(), 10, &err);
220 if (!err.empty()) {
221 return -EINVAL;
222 }
223 add(shard, iter->substr(pos + 1));
224 }
225 return 0;
226 }
227
228 // trim the '<shard-id>#' prefix from a single shard marker if present
229 static std::string get_shard_marker(const std::string& marker) {
230 auto p = marker.find(KEY_VALUE_SEPARATOR);
231 if (p == marker.npos) {
232 return marker;
233 }
234 return marker.substr(p + 1);
235 }
236 };
237
238 /* bucket index */
239 void cls_rgw_bucket_init_index(librados::ObjectWriteOperation& o);
240
241 class CLSRGWConcurrentIO {
242 protected:
243 librados::IoCtx& io_ctx;
244 map<int, string>& objs_container;
245 map<int, string>::iterator iter;
246 uint32_t max_aio;
247 BucketIndexAioManager manager;
248
249 virtual int issue_op(int shard_id, const string& oid) = 0;
250
251 virtual void cleanup() {}
252 virtual int valid_ret_code() { return 0; }
253 // Return true if multiple rounds of OPs might be needed, this happens when
254 // OP needs to be re-send until a certain code is returned.
255 virtual bool need_multiple_rounds() { return false; }
256 // Add a new object to the end of the container.
257 virtual void add_object(int shard, const string& oid) {}
258 virtual void reset_container(map<int, string>& objs) {}
259
260 public:
261
262 CLSRGWConcurrentIO(librados::IoCtx& ioc,
263 map<int, string>& _objs_container,
264 uint32_t _max_aio) :
265 io_ctx(ioc), objs_container(_objs_container), max_aio(_max_aio)
266 {}
267
268 virtual ~CLSRGWConcurrentIO()
269 {}
270
271 int operator()() {
272 int ret = 0;
273 iter = objs_container.begin();
274 for (; iter != objs_container.end() && max_aio-- > 0; ++iter) {
275 ret = issue_op(iter->first, iter->second);
276 if (ret < 0)
277 break;
278 }
279
280 int num_completions = 0, r = 0;
281 map<int, string> objs;
282 map<int, string> *pobjs = (need_multiple_rounds() ? &objs : NULL);
283 while (manager.wait_for_completions(valid_ret_code(), &num_completions, &r, pobjs)) {
284 if (r >= 0 && ret >= 0) {
285 for (; num_completions && iter != objs_container.end(); --num_completions, ++iter) {
286 int issue_ret = issue_op(iter->first, iter->second);
287 if (issue_ret < 0) {
288 ret = issue_ret;
289 break;
290 }
291 }
292 } else if (ret >= 0) {
293 ret = r;
294 }
295 if (need_multiple_rounds() && iter == objs_container.end() && !objs.empty()) {
296 // For those objects which need another round, use them to reset
297 // the container
298 reset_container(objs);
299 iter = objs_container.begin();
300 for (; num_completions && iter != objs_container.end(); --num_completions, ++iter) {
301 int issue_ret = issue_op(iter->first, iter->second);
302 if (issue_ret < 0) {
303 ret = issue_ret;
304 break;
305 }
306 }
307 }
308 }
309
310 if (ret < 0) {
311 cleanup();
312 }
313 return ret;
314 }
315 };
316
317 class CLSRGWIssueBucketIndexInit : public CLSRGWConcurrentIO {
318 protected:
319 int issue_op(int shard_id, const string& oid) override;
320 int valid_ret_code() override { return -EEXIST; }
321 void cleanup() override;
322 public:
323 CLSRGWIssueBucketIndexInit(librados::IoCtx& ioc, map<int, string>& _bucket_objs,
324 uint32_t _max_aio) :
325 CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio) {}
326 };
327
328
329 class CLSRGWIssueBucketIndexClean : public CLSRGWConcurrentIO {
330 protected:
331 int issue_op(int shard_id, const string& oid) override;
332 int valid_ret_code() override {
333 return -ENOENT;
334 }
335
336 public:
337 CLSRGWIssueBucketIndexClean(librados::IoCtx& ioc,
338 map<int, string>& _bucket_objs,
339 uint32_t _max_aio) :
340 CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio)
341 {}
342 };
343
344
345 class CLSRGWIssueSetTagTimeout : public CLSRGWConcurrentIO {
346 uint64_t tag_timeout;
347 protected:
348 int issue_op(int shard_id, const string& oid) override;
349 public:
350 CLSRGWIssueSetTagTimeout(librados::IoCtx& ioc, map<int, string>& _bucket_objs,
351 uint32_t _max_aio, uint64_t _tag_timeout) :
352 CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio), tag_timeout(_tag_timeout) {}
353 };
354
355 void cls_rgw_bucket_update_stats(librados::ObjectWriteOperation& o,
356 bool absolute,
357 const map<RGWObjCategory, rgw_bucket_category_stats>& stats);
358
359 void cls_rgw_bucket_prepare_op(librados::ObjectWriteOperation& o, RGWModifyOp op, string& tag,
360 const cls_rgw_obj_key& key, const string& locator, bool log_op,
361 uint16_t bilog_op, rgw_zone_set& zones_trace);
362
363 void cls_rgw_bucket_complete_op(librados::ObjectWriteOperation& o, RGWModifyOp op, string& tag,
364 rgw_bucket_entry_ver& ver,
365 const cls_rgw_obj_key& key,
366 rgw_bucket_dir_entry_meta& dir_meta,
367 list<cls_rgw_obj_key> *remove_objs, bool log_op,
368 uint16_t bilog_op, rgw_zone_set *zones_trace);
369
370 void cls_rgw_remove_obj(librados::ObjectWriteOperation& o, list<string>& keep_attr_prefixes);
371 void cls_rgw_obj_store_pg_ver(librados::ObjectWriteOperation& o, const string& attr);
372 void cls_rgw_obj_check_attrs_prefix(librados::ObjectOperation& o, const string& prefix, bool fail_if_exist);
373 void cls_rgw_obj_check_mtime(librados::ObjectOperation& o, const ceph::real_time& mtime, bool high_precision_time, RGWCheckMTimeType type);
374
375 int cls_rgw_bi_get(librados::IoCtx& io_ctx, const string oid,
376 BIIndexType index_type, cls_rgw_obj_key& key,
377 rgw_cls_bi_entry *entry);
378 int cls_rgw_bi_put(librados::IoCtx& io_ctx, const string oid, rgw_cls_bi_entry& entry);
379 void cls_rgw_bi_put(librados::ObjectWriteOperation& op, const string oid, rgw_cls_bi_entry& entry);
380 int cls_rgw_bi_list(librados::IoCtx& io_ctx, const string oid,
381 const string& name, const string& marker, uint32_t max,
382 list<rgw_cls_bi_entry> *entries, bool *is_truncated);
383
384
385 void cls_rgw_bucket_link_olh(librados::ObjectWriteOperation& op,
386 const cls_rgw_obj_key& key, bufferlist& olh_tag,
387 bool delete_marker, const string& op_tag, rgw_bucket_dir_entry_meta *meta,
388 uint64_t olh_epoch, ceph::real_time unmod_since, bool high_precision_time, bool log_op, rgw_zone_set& zones_trace);
389 void cls_rgw_bucket_unlink_instance(librados::ObjectWriteOperation& op,
390 const cls_rgw_obj_key& key, const string& op_tag,
391 const string& olh_tag, uint64_t olh_epoch, bool log_op, rgw_zone_set& zones_trace);
392 void cls_rgw_get_olh_log(librados::ObjectReadOperation& op, const cls_rgw_obj_key& olh, uint64_t ver_marker, const string& olh_tag, rgw_cls_read_olh_log_ret& log_ret, int& op_ret);
393 void cls_rgw_trim_olh_log(librados::ObjectWriteOperation& op, const cls_rgw_obj_key& olh, uint64_t ver, const string& olh_tag);
394 void cls_rgw_clear_olh(librados::ObjectWriteOperation& op, const cls_rgw_obj_key& olh, const string& olh_tag);
395
396 // these overloads which call io_ctx.operate() should not be called in the rgw.
397 // rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
398 #ifndef CLS_CLIENT_HIDE_IOCTX
399 int cls_rgw_bucket_link_olh(librados::IoCtx& io_ctx, const string& oid,
400 const cls_rgw_obj_key& key, bufferlist& olh_tag,
401 bool delete_marker, const string& op_tag, rgw_bucket_dir_entry_meta *meta,
402 uint64_t olh_epoch, ceph::real_time unmod_since, bool high_precision_time, bool log_op, rgw_zone_set& zones_trace);
403 int cls_rgw_bucket_unlink_instance(librados::IoCtx& io_ctx, const string& oid,
404 const cls_rgw_obj_key& key, const string& op_tag,
405 const string& olh_tag, uint64_t olh_epoch, bool log_op, rgw_zone_set& zones_trace);
406 int cls_rgw_get_olh_log(librados::IoCtx& io_ctx, string& oid, const cls_rgw_obj_key& olh, uint64_t ver_marker,
407 const string& olh_tag, rgw_cls_read_olh_log_ret& log_ret);
408 int cls_rgw_clear_olh(librados::IoCtx& io_ctx, string& oid, const cls_rgw_obj_key& olh, const string& olh_tag);
409 int cls_rgw_usage_log_trim(librados::IoCtx& io_ctx, const string& oid, const string& user, const string& bucket,
410 uint64_t start_epoch, uint64_t end_epoch);
411 #endif
412
413
414 /**
415 * List the bucket with the starting object and filter prefix.
416 * NOTE: this method do listing requests for each bucket index shards identified by
417 * the keys of the *list_results* map, which means the map should be popludated
418 * by the caller to fill with each bucket index object id.
419 *
420 * io_ctx - IO context for rados.
421 * start_obj - marker for the listing.
422 * filter_prefix - filter prefix.
423 * num_entries - number of entries to request for each object (note the total
424 * amount of entries returned depends on the number of shardings).
425 * list_results - the list results keyed by bucket index object id.
426 * max_aio - the maximum number of AIO (for throttling).
427 *
428 * Return 0 on success, a failure code otherwise.
429 */
430
431 class CLSRGWIssueBucketList : public CLSRGWConcurrentIO {
432 cls_rgw_obj_key start_obj;
433 string filter_prefix;
434 string delimiter;
435 uint32_t num_entries;
436 bool list_versions;
437 map<int, rgw_cls_list_ret>& result;
438 protected:
439 int issue_op(int shard_id, const string& oid) override;
440 public:
441 CLSRGWIssueBucketList(librados::IoCtx& io_ctx,
442 const cls_rgw_obj_key& _start_obj,
443 const string& _filter_prefix,
444 const string& _delimiter,
445 uint32_t _num_entries,
446 bool _list_versions,
447 map<int, string>& oids,
448 map<int, rgw_cls_list_ret>& list_results,
449 uint32_t max_aio) :
450 CLSRGWConcurrentIO(io_ctx, oids, max_aio),
451 start_obj(_start_obj), filter_prefix(_filter_prefix), delimiter(_delimiter),
452 num_entries(_num_entries), list_versions(_list_versions),
453 result(list_results)
454 {}
455 };
456
457 void cls_rgw_bucket_list_op(librados::ObjectReadOperation& op,
458 const cls_rgw_obj_key& start_obj,
459 const std::string& filter_prefix,
460 const std::string& delimiter,
461 uint32_t num_entries,
462 bool list_versions,
463 rgw_cls_list_ret* result);
464
465 void cls_rgw_bilog_list(librados::ObjectReadOperation& op,
466 const std::string& marker, uint32_t max,
467 cls_rgw_bi_log_list_ret *pdata, int *ret = nullptr);
468
469 class CLSRGWIssueBILogList : public CLSRGWConcurrentIO {
470 map<int, cls_rgw_bi_log_list_ret>& result;
471 BucketIndexShardsManager& marker_mgr;
472 uint32_t max;
473 protected:
474 int issue_op(int shard_id, const string& oid) override;
475 public:
476 CLSRGWIssueBILogList(librados::IoCtx& io_ctx, BucketIndexShardsManager& _marker_mgr, uint32_t _max,
477 map<int, string>& oids,
478 map<int, cls_rgw_bi_log_list_ret>& bi_log_lists, uint32_t max_aio) :
479 CLSRGWConcurrentIO(io_ctx, oids, max_aio), result(bi_log_lists),
480 marker_mgr(_marker_mgr), max(_max) {}
481 };
482
483 void cls_rgw_bilog_trim(librados::ObjectWriteOperation& op,
484 const std::string& start_marker,
485 const std::string& end_marker);
486
487 class CLSRGWIssueBILogTrim : public CLSRGWConcurrentIO {
488 BucketIndexShardsManager& start_marker_mgr;
489 BucketIndexShardsManager& end_marker_mgr;
490 protected:
491 int issue_op(int shard_id, const string& oid) override;
492 // Trim until -ENODATA is returned.
493 int valid_ret_code() override { return -ENODATA; }
494 bool need_multiple_rounds() override { return true; }
495 void add_object(int shard, const string& oid) override { objs_container[shard] = oid; }
496 void reset_container(map<int, string>& objs) override {
497 objs_container.swap(objs);
498 iter = objs_container.begin();
499 objs.clear();
500 }
501 public:
502 CLSRGWIssueBILogTrim(librados::IoCtx& io_ctx, BucketIndexShardsManager& _start_marker_mgr,
503 BucketIndexShardsManager& _end_marker_mgr, map<int, string>& _bucket_objs, uint32_t max_aio) :
504 CLSRGWConcurrentIO(io_ctx, _bucket_objs, max_aio),
505 start_marker_mgr(_start_marker_mgr), end_marker_mgr(_end_marker_mgr) {}
506 };
507
508 /**
509 * Check the bucket index.
510 *
511 * io_ctx - IO context for rados.
512 * bucket_objs_ret - check result for all shards.
513 * max_aio - the maximum number of AIO (for throttling).
514 *
515 * Return 0 on success, a failure code otherwise.
516 */
517 class CLSRGWIssueBucketCheck : public CLSRGWConcurrentIO /*<map<string, rgw_cls_check_index_ret> >*/ {
518 map<int, rgw_cls_check_index_ret>& result;
519 protected:
520 int issue_op(int shard_id, const string& oid) override;
521 public:
522 CLSRGWIssueBucketCheck(librados::IoCtx& ioc, map<int, string>& oids,
523 map<int, rgw_cls_check_index_ret>& bucket_objs_ret,
524 uint32_t _max_aio) :
525 CLSRGWConcurrentIO(ioc, oids, _max_aio), result(bucket_objs_ret) {}
526 };
527
528 class CLSRGWIssueBucketRebuild : public CLSRGWConcurrentIO {
529 protected:
530 int issue_op(int shard_id, const string& oid) override;
531 public:
532 CLSRGWIssueBucketRebuild(librados::IoCtx& io_ctx, map<int, string>& bucket_objs,
533 uint32_t max_aio) : CLSRGWConcurrentIO(io_ctx, bucket_objs, max_aio) {}
534 };
535
536 class CLSRGWIssueGetDirHeader : public CLSRGWConcurrentIO {
537 map<int, rgw_cls_list_ret>& result;
538 protected:
539 int issue_op(int shard_id, const string& oid) override;
540 public:
541 CLSRGWIssueGetDirHeader(librados::IoCtx& io_ctx, map<int, string>& oids, map<int, rgw_cls_list_ret>& dir_headers,
542 uint32_t max_aio) :
543 CLSRGWConcurrentIO(io_ctx, oids, max_aio), result(dir_headers) {}
544 };
545
546 class CLSRGWIssueSetBucketResharding : public CLSRGWConcurrentIO {
547 cls_rgw_bucket_instance_entry entry;
548 protected:
549 int issue_op(int shard_id, const string& oid) override;
550 public:
551 CLSRGWIssueSetBucketResharding(librados::IoCtx& ioc, map<int, string>& _bucket_objs,
552 const cls_rgw_bucket_instance_entry& _entry,
553 uint32_t _max_aio) : CLSRGWConcurrentIO(ioc, _bucket_objs, _max_aio), entry(_entry) {}
554 };
555
556 class CLSRGWIssueResyncBucketBILog : public CLSRGWConcurrentIO {
557 protected:
558 int issue_op(int shard_id, const string& oid);
559 public:
560 CLSRGWIssueResyncBucketBILog(librados::IoCtx& io_ctx, map<int, string>& _bucket_objs, uint32_t max_aio) :
561 CLSRGWConcurrentIO(io_ctx, _bucket_objs, max_aio) {}
562 };
563
564 class CLSRGWIssueBucketBILogStop : public CLSRGWConcurrentIO {
565 protected:
566 int issue_op(int shard_id, const string& oid);
567 public:
568 CLSRGWIssueBucketBILogStop(librados::IoCtx& io_ctx, map<int, string>& _bucket_objs, uint32_t max_aio) :
569 CLSRGWConcurrentIO(io_ctx, _bucket_objs, max_aio) {}
570 };
571
572 int cls_rgw_get_dir_header_async(librados::IoCtx& io_ctx, string& oid, RGWGetDirHeader_CB *ctx);
573
574 void cls_rgw_encode_suggestion(char op, rgw_bucket_dir_entry& dirent, bufferlist& updates);
575
576 void cls_rgw_suggest_changes(librados::ObjectWriteOperation& o, bufferlist& updates);
577
578 /* usage logging */
579 // these overloads which call io_ctx.operate() should not be called in the rgw.
580 // rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
581 #ifndef CLS_CLIENT_HIDE_IOCTX
582 int cls_rgw_usage_log_read(librados::IoCtx& io_ctx, const string& oid, const string& user, const string& bucket,
583 uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, string& read_iter,
584 map<rgw_user_bucket, rgw_usage_log_entry>& usage, bool *is_truncated);
585 #endif
586
587 void cls_rgw_usage_log_trim(librados::ObjectWriteOperation& op, const string& user, const string& bucket, uint64_t start_epoch, uint64_t end_epoch);
588
589 void cls_rgw_usage_log_clear(librados::ObjectWriteOperation& op);
590 void cls_rgw_usage_log_add(librados::ObjectWriteOperation& op, rgw_usage_log_info& info);
591
592 /* garbage collection */
593 void cls_rgw_gc_set_entry(librados::ObjectWriteOperation& op, uint32_t expiration_secs, cls_rgw_gc_obj_info& info);
594 void cls_rgw_gc_defer_entry(librados::ObjectWriteOperation& op, uint32_t expiration_secs, const string& tag);
595 void cls_rgw_gc_remove(librados::ObjectWriteOperation& op, const vector<string>& tags);
596
597 // these overloads which call io_ctx.operate() should not be called in the rgw.
598 // rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
599 #ifndef CLS_CLIENT_HIDE_IOCTX
600 int cls_rgw_gc_list(librados::IoCtx& io_ctx, string& oid, string& marker, uint32_t max, bool expired_only,
601 list<cls_rgw_gc_obj_info>& entries, bool *truncated, string& next_marker);
602 #endif
603
604 /* lifecycle */
605 // these overloads which call io_ctx.operate() should not be called in the rgw.
606 // rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
607 #ifndef CLS_CLIENT_HIDE_IOCTX
608 int cls_rgw_lc_get_head(librados::IoCtx& io_ctx, const string& oid, cls_rgw_lc_obj_head& head);
609 int cls_rgw_lc_put_head(librados::IoCtx& io_ctx, const string& oid, cls_rgw_lc_obj_head& head);
610 int cls_rgw_lc_get_next_entry(librados::IoCtx& io_ctx, const string& oid, string& marker, pair<string, int>& entry);
611 int cls_rgw_lc_rm_entry(librados::IoCtx& io_ctx, const string& oid, const pair<string, int>& entry);
612 int cls_rgw_lc_set_entry(librados::IoCtx& io_ctx, const string& oid, const pair<string, int>& entry);
613 int cls_rgw_lc_get_entry(librados::IoCtx& io_ctx, const string& oid, const std::string& marker, rgw_lc_entry_t& entry);
614 int cls_rgw_lc_list(librados::IoCtx& io_ctx, const string& oid,
615 const string& marker,
616 uint32_t max_entries,
617 map<string, int>& entries);
618 #endif
619
620 /* resharding */
621 void cls_rgw_reshard_add(librados::ObjectWriteOperation& op, const cls_rgw_reshard_entry& entry);
622 void cls_rgw_reshard_remove(librados::ObjectWriteOperation& op, const cls_rgw_reshard_entry& entry);
623 // these overloads which call io_ctx.operate() should not be called in the rgw.
624 // rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
625 #ifndef CLS_CLIENT_HIDE_IOCTX
626 int cls_rgw_reshard_list(librados::IoCtx& io_ctx, const string& oid, string& marker, uint32_t max,
627 list<cls_rgw_reshard_entry>& entries, bool* is_truncated);
628 int cls_rgw_reshard_get(librados::IoCtx& io_ctx, const string& oid, cls_rgw_reshard_entry& entry);
629 #endif
630
631 /* resharding attribute on bucket index shard headers */
632 void cls_rgw_guard_bucket_resharding(librados::ObjectOperation& op, int ret_err);
633 // these overloads which call io_ctx.operate() should not be called in the rgw.
634 // rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
635 #ifndef CLS_CLIENT_HIDE_IOCTX
636 int cls_rgw_set_bucket_resharding(librados::IoCtx& io_ctx, const string& oid,
637 const cls_rgw_bucket_instance_entry& entry);
638 int cls_rgw_clear_bucket_resharding(librados::IoCtx& io_ctx, const string& oid);
639 int cls_rgw_get_bucket_resharding(librados::IoCtx& io_ctx, const string& oid,
640 cls_rgw_bucket_instance_entry *entry);
641 #endif
642
643 #endif