]> git.proxmox.com Git - ceph.git/blame - ceph/src/rgw/rgw_sal_daos.cc
update ceph source to reef 18.2.1
[ceph.git] / ceph / src / rgw / rgw_sal_daos.cc
CommitLineData
1e59de90
TL
1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2// vim: ts=2 sw=2 expandtab ft=cpp
3
4/*
5 * Ceph - scalable distributed file system
6 *
7 * SAL implementation for the CORTX DAOS backend
8 *
9 * Copyright (C) 2022 Seagate Technology LLC and/or its Affiliates
10 *
11 * This is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License version 2.1, as published by the Free Software
14 * Foundation. See file COPYING.
15 *
16 */
17
18#include "rgw_sal_daos.h"
19
20#include <errno.h>
21#include <stdlib.h>
22#include <unistd.h>
23
24#include <filesystem>
25#include <system_error>
26
27#include "common/Clock.h"
28#include "common/errno.h"
29#include "rgw_bucket.h"
30#include "rgw_compression.h"
31#include "rgw_sal.h"
32
33#define dout_subsys ceph_subsys_rgw
34
35using std::list;
36using std::map;
37using std::set;
38using std::string;
39using std::vector;
40
41namespace fs = std::filesystem;
42
43namespace rgw::sal {
44
45using ::ceph::decode;
46using ::ceph::encode;
47
48int DaosUser::list_buckets(const DoutPrefixProvider* dpp, const string& marker,
49 const string& end_marker, uint64_t max,
50 bool need_stats, BucketList& buckets,
51 optional_yield y) {
52 ldpp_dout(dpp, 20) << "DEBUG: list_user_buckets: marker=" << marker
53 << " end_marker=" << end_marker << " max=" << max << dendl;
54 int ret = 0;
55 bool is_truncated = false;
56 buckets.clear();
57 vector<struct ds3_bucket_info> bucket_infos(max);
58 daos_size_t bcount = bucket_infos.size();
59 vector<vector<uint8_t>> values(bcount, vector<uint8_t>(DS3_MAX_ENCODED_LEN));
60 for (daos_size_t i = 0; i < bcount; i++) {
61 bucket_infos[i].encoded = values[i].data();
62 bucket_infos[i].encoded_length = values[i].size();
63 }
64
65 char daos_marker[DS3_MAX_BUCKET_NAME];
66 std::strncpy(daos_marker, marker.c_str(), sizeof(daos_marker));
67 ret = ds3_bucket_list(&bcount, bucket_infos.data(), daos_marker,
68 &is_truncated, store->ds3, nullptr);
69 ldpp_dout(dpp, 20) << "DEBUG: ds3_bucket_list: bcount=" << bcount
70 << " ret=" << ret << dendl;
71 if (ret != 0) {
72 ldpp_dout(dpp, 0) << "ERROR: ds3_bucket_list failed!" << ret << dendl;
73 return ret;
74 }
75
76 bucket_infos.resize(bcount);
77 values.resize(bcount);
78
79 for (const auto& bi : bucket_infos) {
80 DaosBucketInfo dbinfo;
81 bufferlist bl;
82 bl.append(reinterpret_cast<char*>(bi.encoded), bi.encoded_length);
83 auto iter = bl.cbegin();
84 dbinfo.decode(iter);
85 buckets.add(std::make_unique<DaosBucket>(this->store, dbinfo.info, this));
86 }
87
88 buckets.set_truncated(is_truncated);
89 return 0;
90}
91
92int DaosUser::create_bucket(
93 const DoutPrefixProvider* dpp, const rgw_bucket& b,
94 const std::string& zonegroup_id, rgw_placement_rule& placement_rule,
95 std::string& swift_ver_location, const RGWQuotaInfo* pquota_info,
96 const RGWAccessControlPolicy& policy, Attrs& attrs, RGWBucketInfo& info,
97 obj_version& ep_objv, bool exclusive, bool obj_lock_enabled, bool* existed,
98 req_info& req_info, std::unique_ptr<Bucket>* bucket_out, optional_yield y) {
99 ldpp_dout(dpp, 20) << "DEBUG: create_bucket:" << b.name << dendl;
100 int ret;
101 std::unique_ptr<Bucket> bucket;
102
103 // Look up the bucket. Create it if it doesn't exist.
104 ret = this->store->get_bucket(dpp, this, b, &bucket, y);
105 if (ret != 0 && ret != -ENOENT) {
106 return ret;
107 }
108
109 if (ret != -ENOENT) {
110 *existed = true;
111 if (swift_ver_location.empty()) {
112 swift_ver_location = bucket->get_info().swift_ver_location;
113 }
114 placement_rule.inherit_from(bucket->get_info().placement_rule);
115
116 // TODO: ACL policy
117 // // don't allow changes to the acl policy
118 // RGWAccessControlPolicy old_policy(ctx());
119 // int rc = rgw_op_get_bucket_policy_from_attr(
120 // dpp, this, u, bucket->get_attrs(), &old_policy, y);
121 // if (rc >= 0 && old_policy != policy) {
122 // bucket_out->swap(bucket);
123 // return -EEXIST;
124 //}
125 } else {
126 placement_rule.name = "default";
127 placement_rule.storage_class = "STANDARD";
128 bucket = std::make_unique<DaosBucket>(store, b, this);
129 bucket->set_attrs(attrs);
130
131 *existed = false;
132 }
133
134 // TODO: how to handle zone and multi-site.
135
136 if (!*existed) {
137 info.placement_rule = placement_rule;
138 info.bucket = b;
139 info.owner = this->get_info().user_id;
140 info.zonegroup = zonegroup_id;
141 info.creation_time = ceph::real_clock::now();
142 if (obj_lock_enabled)
143 info.flags = BUCKET_VERSIONED | BUCKET_OBJ_LOCK_ENABLED;
144 bucket->set_version(ep_objv);
145 bucket->get_info() = info;
146
147 // Create a new bucket:
148 DaosBucket* daos_bucket = static_cast<DaosBucket*>(bucket.get());
149 bufferlist bl;
150 std::unique_ptr<struct ds3_bucket_info> bucket_info =
151 daos_bucket->get_encoded_info(bl, ceph::real_time());
152 ret = ds3_bucket_create(bucket->get_name().c_str(), bucket_info.get(),
153 nullptr, store->ds3, nullptr);
154 if (ret != 0) {
155 ldpp_dout(dpp, 0) << "ERROR: ds3_bucket_create failed! ret=" << ret
156 << dendl;
157 return ret;
158 }
159 } else {
160 bucket->set_version(ep_objv);
161 bucket->get_info() = info;
162 }
163
164 bucket_out->swap(bucket);
165
166 return ret;
167}
168
169int DaosUser::read_attrs(const DoutPrefixProvider* dpp, optional_yield y) {
170 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
171}
172
173int DaosUser::read_stats(const DoutPrefixProvider* dpp, optional_yield y,
174 RGWStorageStats* stats,
175 ceph::real_time* last_stats_sync,
176 ceph::real_time* last_stats_update) {
177 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
178}
179
180/* stats - Not for first pass */
181int DaosUser::read_stats_async(const DoutPrefixProvider* dpp,
182 RGWGetUserStats_CB* cb) {
183 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
184}
185
186int DaosUser::complete_flush_stats(const DoutPrefixProvider* dpp,
187 optional_yield y) {
188 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
189}
190
191int DaosUser::read_usage(const DoutPrefixProvider* dpp, uint64_t start_epoch,
192 uint64_t end_epoch, uint32_t max_entries,
193 bool* is_truncated, RGWUsageIter& usage_iter,
194 map<rgw_user_bucket, rgw_usage_log_entry>& usage) {
195 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
196}
197
198int DaosUser::trim_usage(const DoutPrefixProvider* dpp, uint64_t start_epoch,
199 uint64_t end_epoch) {
200 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
201}
202
203int DaosUser::load_user(const DoutPrefixProvider* dpp, optional_yield y) {
204 const string name = info.user_id.to_str();
205 ldpp_dout(dpp, 20) << "DEBUG: load_user, name=" << name << dendl;
206
207 DaosUserInfo duinfo;
208 int ret = read_user(dpp, name, &duinfo);
209 if (ret != 0) {
210 ldpp_dout(dpp, 0) << "ERROR: load_user failed, name=" << name << dendl;
211 return ret;
212 }
213
214 info = duinfo.info;
215 attrs = duinfo.attrs;
216 objv_tracker.read_version = duinfo.user_version;
217 return 0;
218}
219
220int DaosUser::merge_and_store_attrs(const DoutPrefixProvider* dpp,
221 Attrs& new_attrs, optional_yield y) {
222 ldpp_dout(dpp, 20) << "DEBUG: merge_and_store_attrs, new_attrs=" << new_attrs
223 << dendl;
224 for (auto& it : new_attrs) {
225 attrs[it.first] = it.second;
226 }
227 return store_user(dpp, y, false);
228}
229
230int DaosUser::store_user(const DoutPrefixProvider* dpp, optional_yield y,
231 bool exclusive, RGWUserInfo* old_info) {
232 const string name = info.user_id.to_str();
233 ldpp_dout(dpp, 10) << "DEBUG: Store_user(): User name=" << name << dendl;
234
235 // Read user
236 int ret = 0;
237 struct DaosUserInfo duinfo;
238 ret = read_user(dpp, name, &duinfo);
239 obj_version obj_ver = duinfo.user_version;
240 std::unique_ptr<struct ds3_user_info> old_user_info;
241 std::vector<const char*> old_access_ids;
242
243 // Check if the user already exists
244 if (ret == 0 && obj_ver.ver) {
245 // already exists.
246
247 if (old_info) {
248 *old_info = duinfo.info;
249 }
250
251 if (objv_tracker.read_version.ver != obj_ver.ver) {
252 // Object version mismatch.. return ECANCELED
253 ret = -ECANCELED;
254 ldpp_dout(dpp, 0) << "User Read version mismatch read_version="
255 << objv_tracker.read_version.ver
256 << " obj_ver=" << obj_ver.ver << dendl;
257 return ret;
258 }
259
260 if (exclusive) {
261 // return
262 return ret;
263 }
264 obj_ver.ver++;
265
266 for (auto const& [id, key] : duinfo.info.access_keys) {
267 old_access_ids.push_back(id.c_str());
268 }
269 old_user_info.reset(
270 new ds3_user_info{.name = duinfo.info.user_id.to_str().c_str(),
271 .email = duinfo.info.user_email.c_str(),
272 .access_ids = old_access_ids.data(),
273 .access_ids_nr = old_access_ids.size()});
274 } else {
275 obj_ver.ver = 1;
276 obj_ver.tag = "UserTAG";
277 }
278
279 bufferlist bl;
280 std::unique_ptr<struct ds3_user_info> user_info =
281 get_encoded_info(bl, obj_ver);
282
283 ret = ds3_user_set(name.c_str(), user_info.get(), old_user_info.get(),
284 store->ds3, nullptr);
285
286 if (ret != 0) {
287 ldpp_dout(dpp, 0) << "Error: ds3_user_set failed, name=" << name
288 << " ret=" << ret << dendl;
289 }
290
291 return ret;
292}
293
294int DaosUser::read_user(const DoutPrefixProvider* dpp, std::string name,
295 DaosUserInfo* duinfo) {
296 // Initialize ds3_user_info
297 bufferlist bl;
298 uint64_t size = DS3_MAX_ENCODED_LEN;
299 struct ds3_user_info user_info = {.encoded = bl.append_hole(size).c_str(),
300 .encoded_length = size};
301
302 int ret = ds3_user_get(name.c_str(), &user_info, store->ds3, nullptr);
303
304 if (ret != 0) {
305 ldpp_dout(dpp, 0) << "Error: ds3_user_get failed, name=" << name
306 << " ret=" << ret << dendl;
307 return ret;
308 }
309
310 // Decode
311 bufferlist& blr = bl;
312 auto iter = blr.cbegin();
313 duinfo->decode(iter);
314 return ret;
315}
316
317std::unique_ptr<struct ds3_user_info> DaosUser::get_encoded_info(
318 bufferlist& bl, obj_version& obj_ver) {
319 // Encode user data
320 struct DaosUserInfo duinfo;
321 duinfo.info = info;
322 duinfo.attrs = attrs;
323 duinfo.user_version = obj_ver;
324 duinfo.encode(bl);
325
326 // Initialize ds3_user_info
327 access_ids.clear();
328 for (auto const& [id, key] : info.access_keys) {
329 access_ids.push_back(id.c_str());
330 }
331 return std::unique_ptr<struct ds3_user_info>(
332 new ds3_user_info{.name = info.user_id.to_str().c_str(),
333 .email = info.user_email.c_str(),
334 .access_ids = access_ids.data(),
335 .access_ids_nr = access_ids.size(),
336 .encoded = bl.c_str(),
337 .encoded_length = bl.length()});
338}
339
340int DaosUser::remove_user(const DoutPrefixProvider* dpp, optional_yield y) {
341 const string name = info.user_id.to_str();
342
343 // TODO: the expectation is that the object version needs to be passed in as a
344 // method arg see int DB::remove_user(const DoutPrefixProvider *dpp,
345 // RGWUserInfo& uinfo, RGWObjVersionTracker *pobjv)
346 obj_version obj_ver;
347 bufferlist bl;
348 std::unique_ptr<struct ds3_user_info> user_info =
349 get_encoded_info(bl, obj_ver);
350
351 // Remove user
352 int ret = ds3_user_remove(name.c_str(), user_info.get(), store->ds3, nullptr);
353 if (ret != 0) {
354 ldpp_dout(dpp, 0) << "Error: ds3_user_set failed, name=" << name
355 << " ret=" << ret << dendl;
356 }
357 return ret;
358}
359
360DaosBucket::~DaosBucket() { close(nullptr); }
361
362int DaosBucket::open(const DoutPrefixProvider* dpp) {
363 ldpp_dout(dpp, 20) << "DEBUG: open, name=" << info.bucket.name.c_str()
364 << dendl;
365 // Idempotent
366 if (is_open()) {
367 return 0;
368 }
369
370 int ret = ds3_bucket_open(get_name().c_str(), &ds3b, store->ds3, nullptr);
371 ldpp_dout(dpp, 20) << "DEBUG: ds3_bucket_open, name=" << get_name()
372 << ", ret=" << ret << dendl;
373
374 return ret;
375}
376
377int DaosBucket::close(const DoutPrefixProvider* dpp) {
378 ldpp_dout(dpp, 20) << "DEBUG: close" << dendl;
379 // Idempotent
380 if (!is_open()) {
381 return 0;
382 }
383
384 int ret = ds3_bucket_close(ds3b, nullptr);
385 ds3b = nullptr;
386 ldpp_dout(dpp, 20) << "DEBUG: ds3_bucket_close ret=" << ret << dendl;
387
388 return ret;
389}
390
391std::unique_ptr<struct ds3_bucket_info> DaosBucket::get_encoded_info(
392 bufferlist& bl, ceph::real_time _mtime) {
393 DaosBucketInfo dbinfo;
394 dbinfo.info = info;
395 dbinfo.bucket_attrs = attrs;
396 dbinfo.mtime = _mtime;
397 dbinfo.bucket_version = bucket_version;
398 dbinfo.encode(bl);
399
400 auto bucket_info = std::make_unique<struct ds3_bucket_info>();
401 bucket_info->encoded = bl.c_str();
402 bucket_info->encoded_length = bl.length();
403 std::strncpy(bucket_info->name, get_name().c_str(), sizeof(bucket_info->name));
404 return bucket_info;
405}
406
407int DaosBucket::remove_bucket(const DoutPrefixProvider* dpp,
408 bool delete_children, bool forward_to_master,
409 req_info* req_info, optional_yield y) {
410 ldpp_dout(dpp, 20) << "DEBUG: remove_bucket, delete_children="
411
412 << delete_children
413
414 << " forward_to_master=" << forward_to_master << dendl;
415
416 return ds3_bucket_destroy(get_name().c_str(), delete_children, store->ds3,
417 nullptr);
418}
419
420int DaosBucket::remove_bucket_bypass_gc(int concurrent_max,
421 bool keep_index_consistent,
422 optional_yield y,
423 const DoutPrefixProvider* dpp) {
424 ldpp_dout(dpp, 20) << "DEBUG: remove_bucket_bypass_gc, concurrent_max="
425
426 << concurrent_max
427
428 << " keep_index_consistent=" << keep_index_consistent
429
430 << dendl;
431 return ds3_bucket_destroy(get_name().c_str(), true, store->ds3, nullptr);
432}
433
434int DaosBucket::put_info(const DoutPrefixProvider* dpp, bool exclusive,
435 ceph::real_time _mtime) {
436 ldpp_dout(dpp, 20) << "DEBUG: put_info(): bucket name=" << get_name()
437 << dendl;
438
439 int ret = open(dpp);
440 if (ret != 0) {
441 return ret;
442 }
443
444 bufferlist bl;
445 std::unique_ptr<struct ds3_bucket_info> bucket_info =
446 get_encoded_info(bl, ceph::real_time());
447
448 ret = ds3_bucket_set_info(bucket_info.get(), ds3b, nullptr);
449 if (ret != 0) {
450 ldpp_dout(dpp, 0) << "ERROR: ds3_bucket_set_info failed: " << ret << dendl;
451 }
452 return ret;
453}
454
455int DaosBucket::load_bucket(const DoutPrefixProvider* dpp, optional_yield y,
456 bool get_stats) {
457 ldpp_dout(dpp, 20) << "DEBUG: load_bucket(): bucket name=" << get_name()
458 << dendl;
459 int ret = open(dpp);
460 if (ret != 0) {
461 return ret;
462 }
463
464 bufferlist bl;
465 DaosBucketInfo dbinfo;
466 uint64_t size = DS3_MAX_ENCODED_LEN;
467 struct ds3_bucket_info bucket_info = {.encoded = bl.append_hole(size).c_str(),
468 .encoded_length = size};
469
470 ret = ds3_bucket_get_info(&bucket_info, ds3b, nullptr);
471 if (ret != 0) {
472 ldpp_dout(dpp, 0) << "ERROR: ds3_bucket_get_info failed: " << ret << dendl;
473 return ret;
474 }
475
476 auto iter = bl.cbegin();
477 dbinfo.decode(iter);
478 info = dbinfo.info;
479 rgw_placement_rule placement_rule;
480 placement_rule.name = "default";
481 placement_rule.storage_class = "STANDARD";
482 info.placement_rule = placement_rule;
483
484 attrs = dbinfo.bucket_attrs;
485 mtime = dbinfo.mtime;
486 bucket_version = dbinfo.bucket_version;
487 return ret;
488}
489
490/* stats - Not for first pass */
491int DaosBucket::read_stats(const DoutPrefixProvider* dpp,
492 const bucket_index_layout_generation& idx_layout,
493 int shard_id, std::string* bucket_ver,
494 std::string* master_ver,
495 std::map<RGWObjCategory, RGWStorageStats>& stats,
496 std::string* max_marker, bool* syncstopped) {
497 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
498}
499
500int DaosBucket::read_stats_async(
501 const DoutPrefixProvider* dpp,
502 const bucket_index_layout_generation& idx_layout, int shard_id,
503 RGWGetBucketStats_CB* ctx) {
504 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
505}
506
507int DaosBucket::sync_user_stats(const DoutPrefixProvider* dpp,
508 optional_yield y) {
509 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
510}
511
512int DaosBucket::update_container_stats(const DoutPrefixProvider* dpp) {
513 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
514}
515
516int DaosBucket::check_bucket_shards(const DoutPrefixProvider* dpp) {
517 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
518}
519
520int DaosBucket::chown(const DoutPrefixProvider* dpp, User& new_user,
521 optional_yield y) {
522 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
523}
524
525/* Make sure to call load_bucket() if you need it first */
526bool DaosBucket::is_owner(User* user) {
527 return (info.owner.compare(user->get_id()) == 0);
528}
529
530int DaosBucket::check_empty(const DoutPrefixProvider* dpp, optional_yield y) {
531 /* XXX: Check if bucket contains any objects */
532 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
533}
534
535int DaosBucket::check_quota(const DoutPrefixProvider* dpp, RGWQuota& quota,
536 uint64_t obj_size, optional_yield y,
537 bool check_size_only) {
538 /* Not Handled in the first pass as stats are also needed */
539 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
540}
541
542int DaosBucket::merge_and_store_attrs(const DoutPrefixProvider* dpp,
543 Attrs& new_attrs, optional_yield y) {
544 ldpp_dout(dpp, 20) << "DEBUG: merge_and_store_attrs, new_attrs=" << new_attrs
545 << dendl;
546 for (auto& it : new_attrs) {
547 attrs[it.first] = it.second;
548 }
549
550 return put_info(dpp, y, ceph::real_time());
551}
552
553int DaosBucket::try_refresh_info(const DoutPrefixProvider* dpp,
554 ceph::real_time* pmtime) {
555 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
556}
557
558/* XXX: usage and stats not supported in the first pass */
559int DaosBucket::read_usage(const DoutPrefixProvider* dpp, uint64_t start_epoch,
560 uint64_t end_epoch, uint32_t max_entries,
561 bool* is_truncated, RGWUsageIter& usage_iter,
562 map<rgw_user_bucket, rgw_usage_log_entry>& usage) {
563 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
564}
565
566int DaosBucket::trim_usage(const DoutPrefixProvider* dpp, uint64_t start_epoch,
567 uint64_t end_epoch) {
568 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
569}
570
571int DaosBucket::remove_objs_from_index(
572 const DoutPrefixProvider* dpp,
573 std::list<rgw_obj_index_key>& objs_to_unlink) {
574 /* XXX: CHECK: Unlike RadosStore, there is no seperate bucket index table.
575 * Delete all the object in the list from the object table of this
576 * bucket
577 */
578 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
579}
580
581int DaosBucket::check_index(
582 const DoutPrefixProvider* dpp,
583 std::map<RGWObjCategory, RGWStorageStats>& existing_stats,
584 std::map<RGWObjCategory, RGWStorageStats>& calculated_stats) {
585 /* XXX: stats not supported yet */
586 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
587}
588
589int DaosBucket::rebuild_index(const DoutPrefixProvider* dpp) {
590 /* there is no index table in DAOS. Not applicable */
591 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
592}
593
594int DaosBucket::set_tag_timeout(const DoutPrefixProvider* dpp,
595 uint64_t timeout) {
596 /* XXX: CHECK: set tag timeout for all the bucket objects? */
597 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
598}
599
600int DaosBucket::purge_instance(const DoutPrefixProvider* dpp) {
601 /* XXX: CHECK: for DAOS only single instance supported.
602 * Remove all the objects for that instance? Anything extra needed?
603 */
604 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
605}
606
607int DaosBucket::set_acl(const DoutPrefixProvider* dpp,
608 RGWAccessControlPolicy& acl, optional_yield y) {
609 ldpp_dout(dpp, 20) << "DEBUG: set_acl" << dendl;
610 int ret = 0;
611 bufferlist aclbl;
612
613 acls = acl;
614 acl.encode(aclbl);
615
616 Attrs attrs = get_attrs();
617 attrs[RGW_ATTR_ACL] = aclbl;
618
619 return ret;
620}
621
622std::unique_ptr<Object> DaosBucket::get_object(const rgw_obj_key& k) {
623 return std::make_unique<DaosObject>(this->store, k, this);
624}
625
626bool compare_rgw_bucket_dir_entry(rgw_bucket_dir_entry& entry1,
627 rgw_bucket_dir_entry& entry2) {
628 return (entry1.key < entry2.key);
629}
630
631bool compare_multipart_upload(std::unique_ptr<MultipartUpload>& upload1,
632 std::unique_ptr<MultipartUpload>& upload2) {
633 return (upload1->get_key() < upload2->get_key());
634}
635
636int DaosBucket::list(const DoutPrefixProvider* dpp, ListParams& params, int max,
637 ListResults& results, optional_yield y) {
638 ldpp_dout(dpp, 20) << "DEBUG: list bucket=" << get_name() << " max=" << max
639 << " params=" << params << dendl;
640 // End
641 if (max == 0) {
642 return 0;
643 }
644
645 int ret = open(dpp);
646 if (ret != 0) {
647 return ret;
648 }
649
650 // Init needed structures
651 vector<struct ds3_object_info> object_infos(max);
652 uint32_t nobj = object_infos.size();
653 vector<vector<uint8_t>> values(nobj, vector<uint8_t>(DS3_MAX_ENCODED_LEN));
654 for (uint32_t i = 0; i < nobj; i++) {
655 object_infos[i].encoded = values[i].data();
656 object_infos[i].encoded_length = values[i].size();
657 }
658
659 vector<struct ds3_common_prefix_info> common_prefixes(max);
660 uint32_t ncp = common_prefixes.size();
661
662 char daos_marker[DS3_MAX_KEY_BUFF];
663 std::strncpy(daos_marker, params.marker.get_oid().c_str(), sizeof(daos_marker));
664
665 ret = ds3_bucket_list_obj(&nobj, object_infos.data(), &ncp,
666 common_prefixes.data(), params.prefix.c_str(),
667 params.delim.c_str(), daos_marker,
668 params.list_versions, &results.is_truncated, ds3b);
669
670 if (ret != 0) {
671 ldpp_dout(dpp, 0) << "ERROR: ds3_bucket_list_obj failed, name="
672 << get_name() << ", ret=" << ret << dendl;
673 return ret;
674 }
675
676 object_infos.resize(nobj);
677 values.resize(nobj);
678 common_prefixes.resize(ncp);
679
680 // Fill common prefixes
681 for (auto const& cp : common_prefixes) {
682 results.common_prefixes[cp.prefix] = true;
683 }
684
685 // Decode objs
686 for (auto const& obj : object_infos) {
687 bufferlist bl;
688 rgw_bucket_dir_entry ent;
689 bl.append(reinterpret_cast<char*>(obj.encoded), obj.encoded_length);
690 auto iter = bl.cbegin();
691 ent.decode(iter);
692 if (params.list_versions || ent.is_visible()) {
693 results.objs.emplace_back(std::move(ent));
694 }
695 }
696
697 if (!params.allow_unordered) {
698 std::sort(results.objs.begin(), results.objs.end(),
699 compare_rgw_bucket_dir_entry);
700 }
701
702 return ret;
703}
704
705int DaosBucket::list_multiparts(
706 const DoutPrefixProvider* dpp, const string& prefix, string& marker,
707 const string& delim, const int& max_uploads,
708 vector<std::unique_ptr<MultipartUpload>>& uploads,
709 map<string, bool>* common_prefixes, bool* is_truncated) {
710 ldpp_dout(dpp, 20) << "DEBUG: list_multiparts" << dendl;
711 // End of uploading
712 if (max_uploads == 0) {
713 *is_truncated = false;
714 return 0;
715 }
716
717 // Init needed structures
718 vector<struct ds3_multipart_upload_info> multipart_upload_infos(max_uploads);
719 uint32_t nmp = multipart_upload_infos.size();
720 vector<vector<uint8_t>> values(nmp, vector<uint8_t>(DS3_MAX_ENCODED_LEN));
721 for (uint32_t i = 0; i < nmp; i++) {
722 multipart_upload_infos[i].encoded = values[i].data();
723 multipart_upload_infos[i].encoded_length = values[i].size();
724 }
725
726 vector<struct ds3_common_prefix_info> cps(max_uploads);
727 uint32_t ncp = cps.size();
728
729 char daos_marker[DS3_MAX_KEY_BUFF];
730 std::strncpy(daos_marker, marker.c_str(), sizeof(daos_marker));
731
732 int ret = ds3_bucket_list_multipart(
733 get_name().c_str(), &nmp, multipart_upload_infos.data(), &ncp, cps.data(),
734 prefix.c_str(), delim.c_str(), daos_marker, is_truncated, store->ds3);
735
736 multipart_upload_infos.resize(nmp);
737 values.resize(nmp);
738 cps.resize(ncp);
739
740 // Fill common prefixes
741 for (auto const& cp : cps) {
742 (*common_prefixes)[cp.prefix] = true;
743 }
744
745 for (auto const& mp : multipart_upload_infos) {
746 // Decode the xattr
747 bufferlist bl;
748 rgw_bucket_dir_entry ent;
749 bl.append(reinterpret_cast<char*>(mp.encoded), mp.encoded_length);
750 auto iter = bl.cbegin();
751 ent.decode(iter);
752 string name = ent.key.name;
753
754 ACLOwner owner(rgw_user(ent.meta.owner));
755 owner.set_name(ent.meta.owner_display_name);
756 uploads.push_back(this->get_multipart_upload(
757 name, mp.upload_id, std::move(owner), ent.meta.mtime));
758 }
759
760 // Sort uploads
761 std::sort(uploads.begin(), uploads.end(), compare_multipart_upload);
762
763 return ret;
764}
765
766int DaosBucket::abort_multiparts(const DoutPrefixProvider* dpp,
767 CephContext* cct) {
768 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
769}
770
771void DaosStore::finalize(void) {
772 ldout(cctx, 20) << "DEBUG: finalize" << dendl;
773 int ret;
774
775 ret = ds3_disconnect(ds3, nullptr);
776 if (ret != 0) {
777 ldout(cctx, 0) << "ERROR: ds3_disconnect() failed: " << ret << dendl;
778 }
779 ds3 = nullptr;
780
781 ret = ds3_fini();
782 if (ret != 0) {
783 ldout(cctx, 0) << "ERROR: daos_fini() failed: " << ret << dendl;
784 }
785}
786
787int DaosStore::initialize(CephContext* cct, const DoutPrefixProvider* dpp) {
788 ldpp_dout(dpp, 20) << "DEBUG: initialize" << dendl;
789 int ret = ds3_init();
790
791 // DS3 init failed, allow the case where init is already done
792 if (ret != 0 && ret != DER_ALREADY) {
793 ldout(cct, 0) << "ERROR: ds3_init() failed: " << ret << dendl;
794 return ret;
795 }
796
797 // XXX: these params should be taken from config settings and
798 // cct somehow?
799 const auto& daos_pool = cct->_conf.get_val<std::string>("daos_pool");
800 ldout(cct, 20) << "INFO: daos pool: " << daos_pool << dendl;
801
802 ret = ds3_connect(daos_pool.c_str(), nullptr, &ds3, nullptr);
803
804 if (ret != 0) {
805 ldout(cct, 0) << "ERROR: ds3_connect() failed: " << ret << dendl;
806 ds3_fini();
807 }
808
809 return ret;
810}
811
812const std::string& DaosZoneGroup::get_endpoint() const {
813 if (!group.endpoints.empty()) {
814 return group.endpoints.front();
815 } else {
816 // use zonegroup's master zone endpoints
817 auto z = group.zones.find(group.master_zone);
818 if (z != group.zones.end() && !z->second.endpoints.empty()) {
819 return z->second.endpoints.front();
820 }
821 }
822 return empty;
823}
824
825bool DaosZoneGroup::placement_target_exists(std::string& target) const {
826 return !!group.placement_targets.count(target);
827}
828
aee94f69 829void DaosZoneGroup::get_placement_target_names(
1e59de90
TL
830 std::set<std::string>& names) const {
831 for (const auto& target : group.placement_targets) {
832 names.emplace(target.second.name);
833 }
1e59de90
TL
834}
835
836int DaosZoneGroup::get_placement_tier(const rgw_placement_rule& rule,
837 std::unique_ptr<PlacementTier>* tier) {
838 std::map<std::string, RGWZoneGroupPlacementTarget>::const_iterator titer;
839 titer = group.placement_targets.find(rule.name);
840 if (titer == group.placement_targets.end()) {
841 return -ENOENT;
842 }
843
844 const auto& target_rule = titer->second;
845 std::map<std::string, RGWZoneGroupPlacementTier>::const_iterator ttier;
846 ttier = target_rule.tier_targets.find(rule.storage_class);
847 if (ttier == target_rule.tier_targets.end()) {
848 // not found
849 return -ENOENT;
850 }
851
852 PlacementTier* t;
853 t = new DaosPlacementTier(store, ttier->second);
854 if (!t) return -ENOMEM;
855
856 tier->reset(t);
857 return 0;
858}
859
860ZoneGroup& DaosZone::get_zonegroup() { return zonegroup; }
861
862int DaosZone::get_zonegroup(const std::string& id,
863 std::unique_ptr<ZoneGroup>* group) {
864 /* XXX: for now only one zonegroup supported */
865 ZoneGroup* zg;
866 zg = new DaosZoneGroup(store, zonegroup.get_group());
867
868 group->reset(zg);
869 return 0;
870}
871
872const rgw_zone_id& DaosZone::get_id() { return cur_zone_id; }
873
874const std::string& DaosZone::get_name() const {
875 return zone_params->get_name();
876}
877
878bool DaosZone::is_writeable() { return true; }
879
880bool DaosZone::get_redirect_endpoint(std::string* endpoint) { return false; }
881
882bool DaosZone::has_zonegroup_api(const std::string& api) const { return false; }
883
884const std::string& DaosZone::get_current_period_id() {
885 return current_period->get_id();
886}
887
888std::unique_ptr<LuaManager> DaosStore::get_lua_manager() {
889 return std::make_unique<DaosLuaManager>(this);
890}
891
892int DaosObject::get_obj_state(const DoutPrefixProvider* dpp,
893 RGWObjState** _state, optional_yield y,
894 bool follow_olh) {
895 // Get object's metadata (those stored in rgw_bucket_dir_entry)
896 ldpp_dout(dpp, 20) << "DEBUG: get_obj_state" << dendl;
897 rgw_bucket_dir_entry ent;
898 *_state = &state; // state is required even if a failure occurs
899
900 int ret = get_dir_entry_attrs(dpp, &ent);
901 if (ret != 0) {
902 return ret;
903 }
904
905 // Set object state.
906 state.exists = true;
907 state.size = ent.meta.size;
908 state.accounted_size = ent.meta.size;
909 state.mtime = ent.meta.mtime;
910
911 state.has_attrs = true;
912 bufferlist etag_bl;
913 string& etag = ent.meta.etag;
914 ldpp_dout(dpp, 20) << __func__ << ": object's etag: " << ent.meta.etag
915 << dendl;
916 etag_bl.append(etag);
917 state.attrset[RGW_ATTR_ETAG] = etag_bl;
918 return 0;
919}
920
921DaosObject::~DaosObject() { close(nullptr); }
922
923int DaosObject::set_obj_attrs(const DoutPrefixProvider* dpp, Attrs* setattrs,
924 Attrs* delattrs, optional_yield y) {
925 ldpp_dout(dpp, 20) << "DEBUG: DaosObject::set_obj_attrs()" << dendl;
926 // TODO handle target_obj
927 // Get object's metadata (those stored in rgw_bucket_dir_entry)
928 rgw_bucket_dir_entry ent;
929 int ret = get_dir_entry_attrs(dpp, &ent);
930 if (ret != 0) {
931 return ret;
932 }
933
934 // Update object metadata
935 Attrs updateattrs = setattrs == nullptr ? attrs : *setattrs;
936 if (delattrs) {
937 for (auto const& [attr, attrval] : *delattrs) {
938 updateattrs.erase(attr);
939 }
940 }
941
942 ret = set_dir_entry_attrs(dpp, &ent, &updateattrs);
943 return ret;
944}
945
946int DaosObject::get_obj_attrs(optional_yield y, const DoutPrefixProvider* dpp,
947 rgw_obj* target_obj) {
948 ldpp_dout(dpp, 20) << "DEBUG: DaosObject::get_obj_attrs()" << dendl;
949 // TODO handle target_obj
950 // Get object's metadata (those stored in rgw_bucket_dir_entry)
951 rgw_bucket_dir_entry ent;
952 int ret = get_dir_entry_attrs(dpp, &ent, &attrs);
953 return ret;
954}
955
956int DaosObject::modify_obj_attrs(const char* attr_name, bufferlist& attr_val,
957 optional_yield y,
958 const DoutPrefixProvider* dpp) {
959 // Get object's metadata (those stored in rgw_bucket_dir_entry)
960 ldpp_dout(dpp, 20) << "DEBUG: modify_obj_attrs" << dendl;
961 rgw_bucket_dir_entry ent;
962 int ret = get_dir_entry_attrs(dpp, &ent, &attrs);
963 if (ret != 0) {
964 return ret;
965 }
966
967 // Update object attrs
968 set_atomic();
969 attrs[attr_name] = attr_val;
970
971 ret = set_dir_entry_attrs(dpp, &ent, &attrs);
972 return ret;
973}
974
975int DaosObject::delete_obj_attrs(const DoutPrefixProvider* dpp,
976 const char* attr_name, optional_yield y) {
977 ldpp_dout(dpp, 20) << "DEBUG: delete_obj_attrs" << dendl;
978 rgw_obj target = get_obj();
979 Attrs rmattr;
980 bufferlist bl;
981
982 rmattr[attr_name] = bl;
983 return set_obj_attrs(dpp, nullptr, &rmattr, y);
984}
985
986bool DaosObject::is_expired() {
987 auto iter = attrs.find(RGW_ATTR_DELETE_AT);
988 if (iter != attrs.end()) {
989 utime_t delete_at;
990 try {
991 auto bufit = iter->second.cbegin();
992 decode(delete_at, bufit);
993 } catch (buffer::error& err) {
994 ldout(store->ctx(), 0)
995 << "ERROR: " << __func__
996 << ": failed to decode " RGW_ATTR_DELETE_AT " attr" << dendl;
997 return false;
998 }
999
1000 if (delete_at <= ceph_clock_now() && !delete_at.is_zero()) {
1001 return true;
1002 }
1003 }
1004
1005 return false;
1006}
1007
1008// Taken from rgw_rados.cc
1009void DaosObject::gen_rand_obj_instance_name() {
1010 enum { OBJ_INSTANCE_LEN = 32 };
1011 char buf[OBJ_INSTANCE_LEN + 1];
1012
1013 gen_rand_alphanumeric_no_underscore(store->ctx(), buf, OBJ_INSTANCE_LEN);
1014 state.obj.key.set_instance(buf);
1015}
1016
1017int DaosObject::omap_get_vals(const DoutPrefixProvider* dpp,
1018 const std::string& marker, uint64_t count,
1019 std::map<std::string, bufferlist>* m, bool* pmore,
1020 optional_yield y) {
1021 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
1022}
1023
1024int DaosObject::omap_get_all(const DoutPrefixProvider* dpp,
1025 std::map<std::string, bufferlist>* m,
1026 optional_yield y) {
1027 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
1028}
1029
1030int DaosObject::omap_get_vals_by_keys(const DoutPrefixProvider* dpp,
1031 const std::string& oid,
1032 const std::set<std::string>& keys,
1033 Attrs* vals) {
1034 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
1035}
1036
1037int DaosObject::omap_set_val_by_key(const DoutPrefixProvider* dpp,
1038 const std::string& key, bufferlist& val,
1039 bool must_exist, optional_yield y) {
1040 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
1041}
1042
1043int DaosObject::chown(User& new_user, const DoutPrefixProvider* dpp, optional_yield y) {
1044 return 0;
1045}
1046
1047std::unique_ptr<MPSerializer> DaosObject::get_serializer(
1048 const DoutPrefixProvider* dpp, const std::string& lock_name) {
1049 return std::make_unique<MPDaosSerializer>(dpp, store, this, lock_name);
1050}
1051
1052int DaosObject::transition(Bucket* bucket,
1053 const rgw_placement_rule& placement_rule,
1054 const real_time& mtime, uint64_t olh_epoch,
1055 const DoutPrefixProvider* dpp, optional_yield y) {
1056 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
1057}
1058
1059int DaosObject::transition_to_cloud(
1060 Bucket* bucket, rgw::sal::PlacementTier* tier, rgw_bucket_dir_entry& o,
1061 std::set<std::string>& cloud_targets, CephContext* cct, bool update_object,
1062 const DoutPrefixProvider* dpp, optional_yield y) {
1063 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
1064}
1065
1066bool DaosObject::placement_rules_match(rgw_placement_rule& r1,
1067 rgw_placement_rule& r2) {
1068 /* XXX: support single default zone and zonegroup for now */
1069 return true;
1070}
1071
1072int DaosObject::dump_obj_layout(const DoutPrefixProvider* dpp, optional_yield y,
1073 Formatter* f) {
1074 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
1075}
1076
1077std::unique_ptr<Object::ReadOp> DaosObject::get_read_op() {
1078 return std::make_unique<DaosObject::DaosReadOp>(this);
1079}
1080
1081DaosObject::DaosReadOp::DaosReadOp(DaosObject* _source) : source(_source) {}
1082
1083int DaosObject::DaosReadOp::prepare(optional_yield y,
1084 const DoutPrefixProvider* dpp) {
1085 ldpp_dout(dpp, 20) << __func__
1086 << ": bucket=" << source->get_bucket()->get_name()
1087 << dendl;
1088
1089 if (source->get_bucket()->versioned() && !source->have_instance()) {
1090 // If the bucket is versioned and no version is specified, get the latest
1091 // version
1092 source->set_instance(DS3_LATEST_INSTANCE);
1093 }
1094
1095 rgw_bucket_dir_entry ent;
1096 int ret = source->get_dir_entry_attrs(dpp, &ent);
1097
1098 // Set source object's attrs. The attrs is key/value map and is used
1099 // in send_response_data() to set attributes, including etag.
1100 bufferlist etag_bl;
1101 string& etag = ent.meta.etag;
1102 ldpp_dout(dpp, 20) << __func__ << ": object's etag: " << ent.meta.etag
1103 << dendl;
1104 etag_bl.append(etag.c_str(), etag.size());
1105 source->get_attrs().emplace(std::move(RGW_ATTR_ETAG), std::move(etag_bl));
1106
1107 source->set_key(ent.key);
1108 source->set_obj_size(ent.meta.size);
1109 ldpp_dout(dpp, 20) << __func__ << ": object's size: " << ent.meta.size
1110 << dendl;
1111
1112 return ret;
1113}
1114
1115int DaosObject::DaosReadOp::read(int64_t off, int64_t end, bufferlist& bl,
1116 optional_yield y,
1117 const DoutPrefixProvider* dpp) {
1118 ldpp_dout(dpp, 20) << __func__ << ": off=" << off << " end=" << end << dendl;
1119 int ret = source->lookup(dpp);
1120 if (ret != 0) {
1121 return ret;
1122 }
1123
1124 // Calculate size, end is inclusive
1125 uint64_t size = end - off + 1;
1126
1127 // Read
1128 ret = source->read(dpp, bl, off, size);
1129 if (ret != 0) {
1130 return ret;
1131 }
1132
1133 return ret;
1134}
1135
1136// RGWGetObj::execute() calls ReadOp::iterate() to read object from 'off' to
1137// 'end'. The returned data is processed in 'cb' which is a chain of
1138// post-processing filters such as decompression, de-encryption and sending back
1139// data to client (RGWGetObj_CB::handle_dta which in turn calls
1140// RGWGetObj::get_data_cb() to send data back.).
1141//
1142// POC implements a simple sync version of iterate() function in which it reads
1143// a block of data each time and call 'cb' for post-processing.
1144int DaosObject::DaosReadOp::iterate(const DoutPrefixProvider* dpp, int64_t off,
1145 int64_t end, RGWGetDataCB* cb,
1146 optional_yield y) {
1147 ldpp_dout(dpp, 20) << __func__ << ": off=" << off << " end=" << end << dendl;
1148 int ret = source->lookup(dpp);
1149 if (ret != 0) {
1150 return ret;
1151 }
1152
1153 // Calculate size, end is inclusive
1154 uint64_t size = end - off + 1;
1155
1156 // Reserve buffers and read
1157 bufferlist bl;
1158 ret = source->read(dpp, bl, off, size);
1159 if (ret != 0) {
1160 return ret;
1161 }
1162
1163 // Call cb to process returned data.
1164 ldpp_dout(dpp, 20) << __func__ << ": call cb to process data, actual=" << size
1165 << dendl;
1166 cb->handle_data(bl, off, size);
1167 return ret;
1168}
1169
1170int DaosObject::DaosReadOp::get_attr(const DoutPrefixProvider* dpp,
1171 const char* name, bufferlist& dest,
1172 optional_yield y) {
1173 Attrs attrs;
1174 int ret = source->get_dir_entry_attrs(dpp, nullptr, &attrs);
1175 if (!ret) {
1176 return -ENODATA;
1177 }
1178
1179 auto search = attrs.find(name);
1180 if (search == attrs.end()) {
1181 return -ENODATA;
1182 }
1183
1184 dest = search->second;
1185 return 0;
1186}
1187
1188std::unique_ptr<Object::DeleteOp> DaosObject::get_delete_op() {
1189 return std::make_unique<DaosObject::DaosDeleteOp>(this);
1190}
1191
1192DaosObject::DaosDeleteOp::DaosDeleteOp(DaosObject* _source) : source(_source) {}
1193
1194// Implementation of DELETE OBJ also requires DaosObject::get_obj_state()
1195// to retrieve and set object's state from object's metadata.
1196//
1197// TODO:
1198// 1. The POC only deletes the Daos objects. It doesn't handle the
1199// DeleteOp::params. Delete::delete_obj() in rgw_rados.cc shows how rados
1200// backend process the params.
1201// 2. Delete an object when its versioning is turned on.
1202// 3. Handle empty directories
1203// 4. Fail when file doesn't exist
1204int DaosObject::DaosDeleteOp::delete_obj(const DoutPrefixProvider* dpp,
1205 optional_yield y) {
1206 ldpp_dout(dpp, 20) << "DaosDeleteOp::delete_obj "
1207 << source->get_key().get_oid() << " from "
1208 << source->get_bucket()->get_name() << dendl;
1209 if (source->get_instance() == "null") {
1210 source->clear_instance();
1211 }
1212
1213 // Open bucket
1214 int ret = 0;
1215 std::string key = source->get_key().get_oid();
1216 DaosBucket* daos_bucket = source->get_daos_bucket();
1217 ret = daos_bucket->open(dpp);
1218 if (ret != 0) {
1219 return ret;
1220 }
1221
1222 // Remove the daos object
1223 ret = ds3_obj_destroy(key.c_str(), daos_bucket->ds3b);
1224 ldpp_dout(dpp, 20) << "DEBUG: ds3_obj_destroy key=" << key << " ret=" << ret
1225 << dendl;
1226
1227 // result.delete_marker = parent_op.result.delete_marker;
1228 // result.version_id = parent_op.result.version_id;
1229
1230 return ret;
1231}
1232
1233int DaosObject::delete_object(const DoutPrefixProvider* dpp, optional_yield y,
1234 bool prevent_versioning) {
1235 ldpp_dout(dpp, 20) << "DEBUG: delete_object" << dendl;
1236 DaosObject::DaosDeleteOp del_op(this);
1237 del_op.params.bucket_owner = bucket->get_info().owner;
1238 del_op.params.versioning_status = bucket->get_info().versioning_status();
1239
1240 return del_op.delete_obj(dpp, y);
1241}
1242
1243int DaosObject::delete_obj_aio(const DoutPrefixProvider* dpp,
1244 RGWObjState* astate, Completions* aio,
1245 bool keep_index_consistent, optional_yield y) {
1246 /* XXX: Make it async */
1247 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
1248}
1249
1250int DaosObject::copy_object(
1251 User* user, req_info* info, const rgw_zone_id& source_zone,
1252 rgw::sal::Object* dest_object, rgw::sal::Bucket* dest_bucket,
1253 rgw::sal::Bucket* src_bucket, const rgw_placement_rule& dest_placement,
1254 ceph::real_time* src_mtime, ceph::real_time* mtime,
1255 const ceph::real_time* mod_ptr, const ceph::real_time* unmod_ptr,
1256 bool high_precision_time, const char* if_match, const char* if_nomatch,
1257 AttrsMod attrs_mod, bool copy_if_newer, Attrs& attrs,
1258 RGWObjCategory category, uint64_t olh_epoch,
1259 boost::optional<ceph::real_time> delete_at, std::string* version_id,
1260 std::string* tag, std::string* etag, void (*progress_cb)(off_t, void*),
1261 void* progress_data, const DoutPrefixProvider* dpp, optional_yield y) {
1262 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
1263}
1264
1265int DaosObject::swift_versioning_restore(bool& restored,
1266 const DoutPrefixProvider* dpp) {
1267 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
1268}
1269
1270int DaosObject::swift_versioning_copy(const DoutPrefixProvider* dpp,
1271 optional_yield y) {
1272 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
1273}
1274
1275int DaosObject::lookup(const DoutPrefixProvider* dpp) {
1276 ldpp_dout(dpp, 20) << "DEBUG: lookup" << dendl;
1277 if (is_open()) {
1278 return 0;
1279 }
1280
1281 if (get_instance() == "null") {
1282 clear_instance();
1283 }
1284
1285 int ret = 0;
1286 DaosBucket* daos_bucket = get_daos_bucket();
1287 ret = daos_bucket->open(dpp);
1288 if (ret != 0) {
1289 return ret;
1290 }
1291
1292 ret = ds3_obj_open(get_key().get_oid().c_str(), &ds3o, daos_bucket->ds3b);
1293
1294 if (ret == -ENOENT) {
1295 ldpp_dout(dpp, 20) << "DEBUG: daos object (" << get_bucket()->get_name()
1296 << ", " << get_key().get_oid()
1297 << ") does not exist: ret=" << ret << dendl;
1298 } else if (ret != 0) {
1299 ldpp_dout(dpp, 0) << "ERROR: failed to open daos object ("
1300 << get_bucket()->get_name() << ", " << get_key().get_oid()
1301 << "): ret=" << ret << dendl;
1302 }
1303 return ret;
1304}
1305
1306int DaosObject::create(const DoutPrefixProvider* dpp) {
1307 ldpp_dout(dpp, 20) << "DEBUG: create" << dendl;
1308 if (is_open()) {
1309 return 0;
1310 }
1311
1312 if (get_instance() == "null") {
1313 clear_instance();
1314 }
1315
1316 int ret = 0;
1317 DaosBucket* daos_bucket = get_daos_bucket();
1318 ret = daos_bucket->open(dpp);
1319 if (ret != 0) {
1320 return ret;
1321 }
1322
1323 ret = ds3_obj_create(get_key().get_oid().c_str(), &ds3o, daos_bucket->ds3b);
1324
1325 if (ret != 0) {
1326 ldpp_dout(dpp, 0) << "ERROR: failed to create daos object ("
1327 << get_bucket()->get_name() << ", " << get_key().get_oid()
1328 << "): ret=" << ret << dendl;
1329 }
1330 return ret;
1331}
1332
1333int DaosObject::close(const DoutPrefixProvider* dpp) {
1334 ldpp_dout(dpp, 20) << "DEBUG: close" << dendl;
1335 if (!is_open()) {
1336 return 0;
1337 }
1338
1339 int ret = ds3_obj_close(ds3o);
1340 ds3o = nullptr;
1341 ldpp_dout(dpp, 20) << "DEBUG: ds3_obj_close ret=" << ret << dendl;
1342 return ret;
1343}
1344
1345int DaosObject::write(const DoutPrefixProvider* dpp, bufferlist&& data,
1346 uint64_t offset) {
1347 ldpp_dout(dpp, 20) << "DEBUG: write" << dendl;
1348 uint64_t size = data.length();
1349 int ret = ds3_obj_write(data.c_str(), offset, &size, get_daos_bucket()->ds3b,
1350 ds3o, nullptr);
1351 if (ret != 0) {
1352 ldpp_dout(dpp, 0) << "ERROR: failed to write into daos object ("
1353 << get_bucket()->get_name() << ", " << get_key().get_oid()
1354 << "): ret=" << ret << dendl;
1355 }
1356 return ret;
1357}
1358
1359int DaosObject::read(const DoutPrefixProvider* dpp, bufferlist& data,
1360 uint64_t offset, uint64_t& size) {
1361 ldpp_dout(dpp, 20) << "DEBUG: read" << dendl;
1362 int ret = ds3_obj_read(data.append_hole(size).c_str(), offset, &size,
1363 get_daos_bucket()->ds3b, ds3o, nullptr);
1364 if (ret != 0) {
1365 ldpp_dout(dpp, 0) << "ERROR: failed to read from daos object ("
1366 << get_bucket()->get_name() << ", " << get_key().get_oid()
1367 << "): ret=" << ret << dendl;
1368 }
1369 return ret;
1370}
1371
1372// Get the object's dirent and attrs
1373int DaosObject::get_dir_entry_attrs(const DoutPrefixProvider* dpp,
1374 rgw_bucket_dir_entry* ent,
1375 Attrs* getattrs) {
1376 ldpp_dout(dpp, 20) << "DEBUG: get_dir_entry_attrs" << dendl;
1377 int ret = 0;
1378 vector<uint8_t> value(DS3_MAX_ENCODED_LEN);
1379 uint32_t size = value.size();
1380
1381 if (get_key().ns == RGW_OBJ_NS_MULTIPART) {
1382 struct ds3_multipart_upload_info ui = {.encoded = value.data(),
1383 .encoded_length = size};
1384 ret = ds3_upload_get_info(&ui, bucket->get_name().c_str(),
1385 get_key().get_oid().c_str(), store->ds3);
1386 } else {
1387 ret = lookup(dpp);
1388 if (ret != 0) {
1389 return ret;
1390 }
1391
1392 auto object_info = std::make_unique<struct ds3_object_info>();
1393 object_info->encoded = value.data();
1394 object_info->encoded_length = size;
1395 ret = ds3_obj_get_info(object_info.get(), get_daos_bucket()->ds3b, ds3o);
1396 size = object_info->encoded_length;
1397 }
1398
1399 if (ret != 0) {
1400 ldpp_dout(dpp, 0) << "ERROR: failed to get info of daos object ("
1401 << get_bucket()->get_name() << ", " << get_key().get_oid()
1402 << "): ret=" << ret << dendl;
1403 return ret;
1404 }
1405
1406 rgw_bucket_dir_entry dummy_ent;
1407 if (!ent) {
1408 // if ent is not passed, use a dummy ent
1409 ent = &dummy_ent;
1410 }
1411
1412 bufferlist bl;
1413 bl.append(reinterpret_cast<char*>(value.data()), size);
1414 auto iter = bl.cbegin();
1415 ent->decode(iter);
1416 if (getattrs) {
1417 decode(*getattrs, iter);
1418 }
1419
1420 return ret;
1421}
1422// Set the object's dirent and attrs
1423int DaosObject::set_dir_entry_attrs(const DoutPrefixProvider* dpp,
1424 rgw_bucket_dir_entry* ent,
1425 Attrs* setattrs) {
1426 ldpp_dout(dpp, 20) << "DEBUG: set_dir_entry_attrs" << dendl;
1427 int ret = lookup(dpp);
1428 if (ret != 0) {
1429 return ret;
1430 }
1431
1432 // Set defaults
1433 if (!ent) {
1434 // if ent is not passed, return an error
1435 return -EINVAL;
1436 }
1437
1438 if (!setattrs) {
1439 // if setattrs is not passed, use object attrs
1440 setattrs = &attrs;
1441 }
1442
1443 bufferlist wbl;
1444 ent->encode(wbl);
1445 encode(*setattrs, wbl);
1446
1447 // Write rgw_bucket_dir_entry into object xattr
1448 auto object_info = std::make_unique<struct ds3_object_info>();
1449 object_info->encoded = wbl.c_str();
1450 object_info->encoded_length = wbl.length();
1451 ret = ds3_obj_set_info(object_info.get(), get_daos_bucket()->ds3b, ds3o);
1452 if (ret != 0) {
1453 ldpp_dout(dpp, 0) << "ERROR: failed to set info of daos object ("
1454 << get_bucket()->get_name() << ", " << get_key().get_oid()
1455 << "): ret=" << ret << dendl;
1456 }
1457 return ret;
1458}
1459
1460int DaosObject::mark_as_latest(const DoutPrefixProvider* dpp,
1461 ceph::real_time set_mtime) {
1462 // TODO handle deletion
1463 // TODO understand race conditions
1464 ldpp_dout(dpp, 20) << "DEBUG: mark_as_latest" << dendl;
1465
1466 // Get latest version so far
1467 std::unique_ptr<DaosObject> latest_object = std::make_unique<DaosObject>(
1468 store, rgw_obj_key(get_name(), DS3_LATEST_INSTANCE), get_bucket());
1469
1470 ldpp_dout(dpp, 20) << __func__ << ": key=" << get_key().get_oid()
1471 << " latest_object_key= "
1472 << latest_object->get_key().get_oid() << dendl;
1473
1474 int ret = latest_object->lookup(dpp);
1475 if (ret == 0) {
1476 // Get metadata only if file exists
1477 rgw_bucket_dir_entry latest_ent;
1478 Attrs latest_attrs;
1479 ret = latest_object->get_dir_entry_attrs(dpp, &latest_ent, &latest_attrs);
1480 if (ret != 0) {
1481 return ret;
1482 }
1483
1484 // Update flags
1485 latest_ent.flags = rgw_bucket_dir_entry::FLAG_VER;
1486 latest_ent.meta.mtime = set_mtime;
1487 ret = latest_object->set_dir_entry_attrs(dpp, &latest_ent, &latest_attrs);
1488 if (ret != 0) {
1489 return ret;
1490 }
1491 }
1492
1493 // Get or create the link [latest], make it link to the current latest
1494 // version.
1495 ret =
1496 ds3_obj_mark_latest(get_key().get_oid().c_str(), get_daos_bucket()->ds3b);
1497 ldpp_dout(dpp, 20) << "DEBUG: ds3_obj_mark_latest ret=" << ret << dendl;
1498 return ret;
1499}
1500
1501DaosAtomicWriter::DaosAtomicWriter(
1502 const DoutPrefixProvider* dpp, optional_yield y,
1503 rgw::sal::Object* obj, DaosStore* _store,
1504 const rgw_user& _owner, const rgw_placement_rule* _ptail_placement_rule,
1505 uint64_t _olh_epoch, const std::string& _unique_tag)
1506 : StoreWriter(dpp, y),
1507 store(_store),
1508 owner(_owner),
1509 ptail_placement_rule(_ptail_placement_rule),
1510 olh_epoch(_olh_epoch),
1511 unique_tag(_unique_tag),
1512 obj(_store, obj->get_key(), obj->get_bucket()) {}
1513
1514int DaosAtomicWriter::prepare(optional_yield y) {
1515 ldpp_dout(dpp, 20) << "DEBUG: prepare" << dendl;
1516 int ret = obj.create(dpp);
1517 return ret;
1518}
1519
1520// TODO: Handle concurrent writes, a unique object id is a possible solution, or
1521// use DAOS transactions
1522// XXX: Do we need to accumulate writes as motr does?
1523int DaosAtomicWriter::process(bufferlist&& data, uint64_t offset) {
1524 ldpp_dout(dpp, 20) << "DEBUG: process" << dendl;
1525 if (data.length() == 0) {
1526 return 0;
1527 }
1528
1529 int ret = 0;
1530 if (!obj.is_open()) {
1531 ret = obj.lookup(dpp);
1532 if (ret != 0) {
1533 return ret;
1534 }
1535 }
1536
1537 // XXX: Combine multiple streams into one as motr does
1538 uint64_t data_size = data.length();
1539 ret = obj.write(dpp, std::move(data), offset);
1540 if (ret == 0) {
1541 total_data_size += data_size;
1542 }
1543 return ret;
1544}
1545
1546int DaosAtomicWriter::complete(
1547 size_t accounted_size, const std::string& etag, ceph::real_time* mtime,
1548 ceph::real_time set_mtime, std::map<std::string, bufferlist>& attrs,
1549 ceph::real_time delete_at, const char* if_match, const char* if_nomatch,
1550 const std::string* user_data, rgw_zone_set* zones_trace, bool* canceled,
1551 optional_yield y) {
1552 ldpp_dout(dpp, 20) << "DEBUG: complete" << dendl;
1553 bufferlist bl;
1554 rgw_bucket_dir_entry ent;
1555 int ret;
1556
1557 // Set rgw_bucet_dir_entry. Some of the members of this structure may not
1558 // apply to daos.
1559 //
1560 // Checkout AtomicObjectProcessor::complete() in rgw_putobj_processor.cc
1561 // and RGWRados::Object::Write::write_meta() in rgw_rados.cc for what and
1562 // how to set the dir entry. Only set the basic ones for POC, no ACLs and
1563 // other attrs.
1564 obj.get_key().get_index_key(&ent.key);
1565 ent.meta.size = total_data_size;
1566 ent.meta.accounted_size = accounted_size;
1567 ent.meta.mtime =
1568 real_clock::is_zero(set_mtime) ? ceph::real_clock::now() : set_mtime;
1569 ent.meta.etag = etag;
1570 ent.meta.owner = owner.to_str();
1571 ent.meta.owner_display_name =
1572 obj.get_bucket()->get_owner()->get_display_name();
1573 bool is_versioned = obj.get_bucket()->versioned();
1574 if (is_versioned)
1575 ent.flags =
1576 rgw_bucket_dir_entry::FLAG_VER | rgw_bucket_dir_entry::FLAG_CURRENT;
1577 ldpp_dout(dpp, 20) << __func__ << ": key=" << obj.get_key().get_oid()
1578 << " etag: " << etag << dendl;
1579 if (user_data) ent.meta.user_data = *user_data;
1580
1581 RGWBucketInfo& info = obj.get_bucket()->get_info();
1582 if (info.obj_lock_enabled() && info.obj_lock.has_rule()) {
1583 auto iter = attrs.find(RGW_ATTR_OBJECT_RETENTION);
1584 if (iter == attrs.end()) {
1585 real_time lock_until_date =
1586 info.obj_lock.get_lock_until_date(ent.meta.mtime);
1587 string mode = info.obj_lock.get_mode();
1588 RGWObjectRetention obj_retention(mode, lock_until_date);
1589 bufferlist retention_bl;
1590 obj_retention.encode(retention_bl);
1591 attrs[RGW_ATTR_OBJECT_RETENTION] = retention_bl;
1592 }
1593 }
1594
1595 ret = obj.set_dir_entry_attrs(dpp, &ent, &attrs);
1596
1597 if (is_versioned) {
1598 ret = obj.mark_as_latest(dpp, set_mtime);
1599 if (ret != 0) {
1600 return ret;
1601 }
1602 }
1603
1604 return ret;
1605}
1606
1607int DaosMultipartUpload::abort(const DoutPrefixProvider* dpp,
1608 CephContext* cct) {
1609 // Remove upload from bucket multipart index
1610 ldpp_dout(dpp, 20) << "DEBUG: abort" << dendl;
1611 return ds3_upload_remove(bucket->get_name().c_str(), get_upload_id().c_str(),
1612 store->ds3);
1613}
1614
1615std::unique_ptr<rgw::sal::Object> DaosMultipartUpload::get_meta_obj() {
1616 return bucket->get_object(
1617 rgw_obj_key(get_upload_id(), string(), RGW_OBJ_NS_MULTIPART));
1618}
1619
1620int DaosMultipartUpload::init(const DoutPrefixProvider* dpp, optional_yield y,
1621 ACLOwner& _owner,
1622 rgw_placement_rule& dest_placement,
1623 rgw::sal::Attrs& attrs) {
1624 ldpp_dout(dpp, 20) << "DEBUG: init" << dendl;
1625 int ret;
1626 std::string oid = mp_obj.get_key();
1627
1628 // Create an initial entry in the bucket. The entry will be
1629 // updated when multipart upload is completed, for example,
1630 // size, etag etc.
1631 bufferlist bl;
1632 rgw_bucket_dir_entry ent;
1633 ent.key.name = oid;
1634 ent.meta.owner = owner.get_id().to_str();
1635 ent.meta.category = RGWObjCategory::MultiMeta;
1636 ent.meta.mtime = ceph::real_clock::now();
1637
1638 multipart_upload_info upload_info;
1639 upload_info.dest_placement = dest_placement;
1640
1641 ent.encode(bl);
1642 encode(attrs, bl);
1643 encode(upload_info, bl);
1644
1645 struct ds3_multipart_upload_info ui;
1646 std::strcpy(ui.upload_id, MULTIPART_UPLOAD_ID_PREFIX);
1647 std::strncpy(ui.key, oid.c_str(), sizeof(ui.key));
1648 ui.encoded = bl.c_str();
1649 ui.encoded_length = bl.length();
1650 int prefix_length = strlen(ui.upload_id);
1651
1652 do {
1653 gen_rand_alphanumeric(store->ctx(), ui.upload_id + prefix_length,
1654 sizeof(ui.upload_id) - 1 - prefix_length);
1655 mp_obj.init(oid, ui.upload_id);
1656 ret = ds3_upload_init(&ui, bucket->get_name().c_str(), store->ds3);
1657 } while (ret == -EEXIST);
1658
1659 if (ret != 0) {
1660 ldpp_dout(dpp, 0) << "ERROR: failed to create multipart upload dir ("
1661 << bucket->get_name() << "/" << get_upload_id()
1662 << "): ret=" << ret << dendl;
1663 }
1664 return ret;
1665}
1666
1667int DaosMultipartUpload::list_parts(const DoutPrefixProvider* dpp,
1668 CephContext* cct, int num_parts, int marker,
1669 int* next_marker, bool* truncated,
1670 bool assume_unsorted) {
1671 ldpp_dout(dpp, 20) << "DEBUG: list_parts" << dendl;
1672 // Init needed structures
1673 vector<struct ds3_multipart_part_info> multipart_part_infos(num_parts);
1674 uint32_t npart = multipart_part_infos.size();
1675 vector<vector<uint8_t>> values(npart, vector<uint8_t>(DS3_MAX_ENCODED_LEN));
1676 for (uint32_t i = 0; i < npart; i++) {
1677 multipart_part_infos[i].encoded = values[i].data();
1678 multipart_part_infos[i].encoded_length = values[i].size();
1679 }
1680
1681 uint32_t daos_marker = marker;
1682 int ret = ds3_upload_list_parts(
1683 bucket->get_name().c_str(), get_upload_id().c_str(), &npart,
1684 multipart_part_infos.data(), &daos_marker, truncated, store->ds3);
1685
1686 if (ret != 0) {
1687 if (ret == -ENOENT) {
1688 ret = -ERR_NO_SUCH_UPLOAD;
1689 }
1690 return ret;
1691 }
1692
1693 multipart_part_infos.resize(npart);
1694 values.resize(npart);
1695 parts.clear();
1696
1697 for (auto const& pi : multipart_part_infos) {
1698 bufferlist bl;
1699 bl.append(reinterpret_cast<char*>(pi.encoded), pi.encoded_length);
1700
1701 std::unique_ptr<DaosMultipartPart> part =
1702 std::make_unique<DaosMultipartPart>();
1703 auto iter = bl.cbegin();
1704 decode(part->info, iter);
1705 parts[pi.part_num] = std::move(part);
1706 }
1707
1708 if (next_marker) {
1709 *next_marker = daos_marker;
1710 }
1711 return ret;
1712}
1713
1714// Heavily copied from rgw_sal_rados.cc
1715int DaosMultipartUpload::complete(
1716 const DoutPrefixProvider* dpp, optional_yield y, CephContext* cct,
1717 map<int, string>& part_etags, list<rgw_obj_index_key>& remove_objs,
1718 uint64_t& accounted_size, bool& compressed, RGWCompressionInfo& cs_info,
1719 off_t& off, std::string& tag, ACLOwner& owner, uint64_t olh_epoch,
1720 rgw::sal::Object* target_obj) {
1721 ldpp_dout(dpp, 20) << "DEBUG: complete" << dendl;
1722 char final_etag[CEPH_CRYPTO_MD5_DIGESTSIZE];
1723 char final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 16];
1724 std::string etag;
1725 bufferlist etag_bl;
1726 MD5 hash;
1727 // Allow use of MD5 digest in FIPS mode for non-cryptographic purposes
1728 hash.SetFlags(EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
1729 bool truncated;
1730 int ret;
1731
1732 ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): enter" << dendl;
1733 int total_parts = 0;
1734 int handled_parts = 0;
1735 int max_parts = 1000;
1736 int marker = 0;
1737 uint64_t min_part_size = cct->_conf->rgw_multipart_min_part_size;
1738 auto etags_iter = part_etags.begin();
1739 rgw::sal::Attrs attrs = target_obj->get_attrs();
1740
1741 do {
1742 ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): list_parts()"
1743 << dendl;
1744 ret = list_parts(dpp, cct, max_parts, marker, &marker, &truncated);
1745 if (ret == -ENOENT) {
1746 ret = -ERR_NO_SUCH_UPLOAD;
1747 }
1748 if (ret != 0) return ret;
1749
1750 total_parts += parts.size();
1751 if (!truncated && total_parts != (int)part_etags.size()) {
1752 ldpp_dout(dpp, 0) << "NOTICE: total parts mismatch: have: " << total_parts
1753 << " expected: " << part_etags.size() << dendl;
1754 ret = -ERR_INVALID_PART;
1755 return ret;
1756 }
1757 ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): parts.size()="
1758 << parts.size() << dendl;
1759
1760 for (auto obj_iter = parts.begin();
1761 etags_iter != part_etags.end() && obj_iter != parts.end();
1762 ++etags_iter, ++obj_iter, ++handled_parts) {
1763 DaosMultipartPart* part =
1764 dynamic_cast<rgw::sal::DaosMultipartPart*>(obj_iter->second.get());
1765 uint64_t part_size = part->get_size();
1766 ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): part_size="
1767 << part_size << dendl;
1768 if (handled_parts < (int)part_etags.size() - 1 &&
1769 part_size < min_part_size) {
1770 ret = -ERR_TOO_SMALL;
1771 return ret;
1772 }
1773
1774 char petag[CEPH_CRYPTO_MD5_DIGESTSIZE];
1775 if (etags_iter->first != (int)obj_iter->first) {
1776 ldpp_dout(dpp, 0) << "NOTICE: parts num mismatch: next requested: "
1777 << etags_iter->first
1778 << " next uploaded: " << obj_iter->first << dendl;
1779 ret = -ERR_INVALID_PART;
1780 return ret;
1781 }
1782 string part_etag = rgw_string_unquote(etags_iter->second);
1783 if (part_etag.compare(part->get_etag()) != 0) {
1784 ldpp_dout(dpp, 0) << "NOTICE: etag mismatch: part: "
1785 << etags_iter->first
1786 << " etag: " << etags_iter->second << dendl;
1787 ret = -ERR_INVALID_PART;
1788 return ret;
1789 }
1790
1791 hex_to_buf(part->get_etag().c_str(), petag, CEPH_CRYPTO_MD5_DIGESTSIZE);
1792 hash.Update((const unsigned char*)petag, sizeof(petag));
1793 ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): calc etag "
1794 << dendl;
1795
1796 RGWUploadPartInfo& obj_part = part->info;
1797 string oid = mp_obj.get_part(obj_part.num);
1798 rgw_obj src_obj;
1799 src_obj.init_ns(bucket->get_key(), oid, RGW_OBJ_NS_MULTIPART);
1800
1801 bool part_compressed = (obj_part.cs_info.compression_type != "none");
1802 if ((handled_parts > 0) &&
1803 ((part_compressed != compressed) ||
1804 (cs_info.compression_type != obj_part.cs_info.compression_type))) {
1805 ldpp_dout(dpp, 0)
1806 << "ERROR: compression type was changed during multipart upload ("
1807 << cs_info.compression_type << ">>"
1808 << obj_part.cs_info.compression_type << ")" << dendl;
1809 ret = -ERR_INVALID_PART;
1810 return ret;
1811 }
1812
1813 ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): part compression"
1814 << dendl;
1815 if (part_compressed) {
1816 int64_t new_ofs; // offset in compression data for new part
1817 if (cs_info.blocks.size() > 0)
1818 new_ofs = cs_info.blocks.back().new_ofs + cs_info.blocks.back().len;
1819 else
1820 new_ofs = 0;
1821 for (const auto& block : obj_part.cs_info.blocks) {
1822 compression_block cb;
1823 cb.old_ofs = block.old_ofs + cs_info.orig_size;
1824 cb.new_ofs = new_ofs;
1825 cb.len = block.len;
1826 cs_info.blocks.push_back(cb);
1827 new_ofs = cb.new_ofs + cb.len;
1828 }
1829 if (!compressed)
1830 cs_info.compression_type = obj_part.cs_info.compression_type;
1831 cs_info.orig_size += obj_part.cs_info.orig_size;
1832 compressed = true;
1833 }
1834
1835 // We may not need to do the following as remove_objs are those
1836 // don't show when listing a bucket. As we store in-progress uploaded
1837 // object's metadata in a separate index, they are not shown when
1838 // listing a bucket.
1839 rgw_obj_index_key remove_key;
1840 src_obj.key.get_index_key(&remove_key);
1841
1842 remove_objs.push_back(remove_key);
1843
1844 off += obj_part.size;
1845 accounted_size += obj_part.accounted_size;
1846 ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): off=" << off
1847 << ", accounted_size = " << accounted_size << dendl;
1848 }
1849 } while (truncated);
1850 hash.Final((unsigned char*)final_etag);
1851
1852 buf_to_hex((unsigned char*)final_etag, sizeof(final_etag), final_etag_str);
1853 snprintf(&final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2],
1854 sizeof(final_etag_str) - CEPH_CRYPTO_MD5_DIGESTSIZE * 2, "-%lld",
1855 (long long)part_etags.size());
1856 etag = final_etag_str;
1857 ldpp_dout(dpp, 10) << "calculated etag: " << etag << dendl;
1858
1859 etag_bl.append(etag);
1860
1861 attrs[RGW_ATTR_ETAG] = etag_bl;
1862
1863 if (compressed) {
1864 // write compression attribute to full object
1865 bufferlist tmp;
1866 encode(cs_info, tmp);
1867 attrs[RGW_ATTR_COMPRESSION] = tmp;
1868 }
1869
1870 // Different from rgw_sal_rados.cc starts here
1871 // Read the object's multipart info
1872 bufferlist bl;
1873 uint64_t size = DS3_MAX_ENCODED_LEN;
1874 struct ds3_multipart_upload_info ui = {
1875 .encoded = bl.append_hole(size).c_str(), .encoded_length = size};
1876 ret = ds3_upload_get_info(&ui, bucket->get_name().c_str(),
1877 get_upload_id().c_str(), store->ds3);
1878 ldpp_dout(dpp, 20) << "DEBUG: ds3_upload_get_info entry="
1879 << bucket->get_name() << "/" << get_upload_id() << dendl;
1880 if (ret != 0) {
1881 if (ret == -ENOENT) {
1882 ret = -ERR_NO_SUCH_UPLOAD;
1883 }
1884 return ret;
1885 }
1886
1887 rgw_bucket_dir_entry ent;
1888 auto iter = bl.cbegin();
1889 ent.decode(iter);
1890
1891 // Update entry data and name
1892 target_obj->get_key().get_index_key(&ent.key);
1893 ent.meta.size = off;
1894 ent.meta.accounted_size = accounted_size;
1895 ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): obj size="
1896 << ent.meta.size
1897 << " obj accounted size=" << ent.meta.accounted_size
1898 << dendl;
1899 ent.meta.category = RGWObjCategory::Main;
1900 ent.meta.mtime = ceph::real_clock::now();
1901 bool is_versioned = target_obj->get_bucket()->versioned();
1902 if (is_versioned)
1903 ent.flags =
1904 rgw_bucket_dir_entry::FLAG_VER | rgw_bucket_dir_entry::FLAG_CURRENT;
1905 ent.meta.etag = etag;
1906
1907 // Open object
1908 DaosObject* obj = static_cast<DaosObject*>(target_obj);
1909 ret = obj->create(dpp);
1910 if (ret != 0) {
1911 return ret;
1912 }
1913
1914 // Copy data from parts to object
1915 uint64_t write_off = 0;
1916 for (auto const& [part_num, part] : get_parts()) {
1917 ds3_part_t* ds3p;
1918 ret = ds3_part_open(get_bucket_name().c_str(), get_upload_id().c_str(),
1919 part_num, false, &ds3p, store->ds3);
1920 if (ret != 0) {
1921 return ret;
1922 }
1923
1924 // Reserve buffers and read
1925 uint64_t size = part->get_size();
1926 bufferlist bl;
1927 ret = ds3_part_read(bl.append_hole(size).c_str(), 0, &size, ds3p,
1928 store->ds3, nullptr);
1929 if (ret != 0) {
1930 ds3_part_close(ds3p);
1931 return ret;
1932 }
1933
1934 ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): part " << part_num
1935 << " size is " << size << dendl;
1936
1937 // write to obj
1938 obj->write(dpp, std::move(bl), write_off);
1939 ds3_part_close(ds3p);
1940 write_off += part->get_size();
1941 }
1942
1943 // Set attributes
1944 ret = obj->set_dir_entry_attrs(dpp, &ent, &attrs);
1945
1946 if (is_versioned) {
1947 ret = obj->mark_as_latest(dpp, ent.meta.mtime);
1948 if (ret != 0) {
1949 return ret;
1950 }
1951 }
1952
1953 // Remove upload from bucket multipart index
1954 ret = ds3_upload_remove(get_bucket_name().c_str(), get_upload_id().c_str(),
1955 store->ds3);
1956 return ret;
1957}
1958
1959int DaosMultipartUpload::get_info(const DoutPrefixProvider* dpp,
1960 optional_yield y, rgw_placement_rule** rule,
1961 rgw::sal::Attrs* attrs) {
1962 ldpp_dout(dpp, 20) << "DaosMultipartUpload::get_info(): enter" << dendl;
1963 if (!rule && !attrs) {
1964 return 0;
1965 }
1966
1967 if (rule) {
1968 if (!placement.empty()) {
1969 *rule = &placement;
1970 if (!attrs) {
1971 // Don't need attrs, done
1972 return 0;
1973 }
1974 } else {
1975 *rule = nullptr;
1976 }
1977 }
1978
1979 // Read the multipart upload dirent from index
1980 bufferlist bl;
1981 uint64_t size = DS3_MAX_ENCODED_LEN;
1982 struct ds3_multipart_upload_info ui = {
1983 .encoded = bl.append_hole(size).c_str(), .encoded_length = size};
1984 int ret = ds3_upload_get_info(&ui, bucket->get_name().c_str(),
1985 get_upload_id().c_str(), store->ds3);
1986
1987 if (ret != 0) {
1988 if (ret == -ENOENT) {
1989 ret = -ERR_NO_SUCH_UPLOAD;
1990 }
1991 return ret;
1992 }
1993
1994 multipart_upload_info upload_info;
1995 rgw_bucket_dir_entry ent;
1996 Attrs decoded_attrs;
1997 auto iter = bl.cbegin();
1998 ent.decode(iter);
1999 decode(decoded_attrs, iter);
2000 ldpp_dout(dpp, 20) << "DEBUG: decoded_attrs=" << attrs << dendl;
2001
2002 if (attrs) {
2003 *attrs = decoded_attrs;
2004 if (!rule || *rule != nullptr) {
2005 // placement was cached; don't actually read
2006 return 0;
2007 }
2008 }
2009
2010 // Now decode the placement rule
2011 decode(upload_info, iter);
2012 placement = upload_info.dest_placement;
2013 *rule = &placement;
2014
2015 return 0;
2016}
2017
2018std::unique_ptr<Writer> DaosMultipartUpload::get_writer(
2019 const DoutPrefixProvider* dpp, optional_yield y,
2020 rgw::sal::Object* obj, const rgw_user& owner,
2021 const rgw_placement_rule* ptail_placement_rule, uint64_t part_num,
2022 const std::string& part_num_str) {
2023 ldpp_dout(dpp, 20) << "DaosMultipartUpload::get_writer(): enter part="
2024 << part_num << " head_obj=" << _head_obj << dendl;
2025 return std::make_unique<DaosMultipartWriter>(
2026 dpp, y, this, obj, store, owner, ptail_placement_rule,
2027 part_num, part_num_str);
2028}
2029
2030DaosMultipartWriter::~DaosMultipartWriter() {
2031 if (is_open()) ds3_part_close(ds3p);
2032}
2033
2034int DaosMultipartWriter::prepare(optional_yield y) {
2035 ldpp_dout(dpp, 20) << "DaosMultipartWriter::prepare(): enter part="
2036 << part_num_str << dendl;
2037 int ret = ds3_part_open(get_bucket_name().c_str(), upload_id.c_str(),
2038 part_num, true, &ds3p, store->ds3);
2039 if (ret == -ENOENT) {
2040 ret = -ERR_NO_SUCH_UPLOAD;
2041 }
2042 return ret;
2043}
2044
2045const std::string& DaosMultipartWriter::get_bucket_name() {
2046 return static_cast<DaosMultipartUpload*>(upload)->get_bucket_name();
2047}
2048
2049int DaosMultipartWriter::process(bufferlist&& data, uint64_t offset) {
2050 ldpp_dout(dpp, 20) << "DaosMultipartWriter::process(): enter part="
2051 << part_num_str << " offset=" << offset << dendl;
2052 if (data.length() == 0) {
2053 return 0;
2054 }
2055
2056 uint64_t size = data.length();
2057 int ret =
2058 ds3_part_write(data.c_str(), offset, &size, ds3p, store->ds3, nullptr);
2059 if (ret == 0) {
2060 // XXX: Combine multiple streams into one as motr does
2061 actual_part_size += size;
2062 } else {
2063 ldpp_dout(dpp, 0) << "ERROR: failed to write into part ("
2064 << get_bucket_name() << ", " << upload_id << ", "
2065 << part_num << "): ret=" << ret << dendl;
2066 }
2067 return ret;
2068}
2069
2070int DaosMultipartWriter::complete(
2071 size_t accounted_size, const std::string& etag, ceph::real_time* mtime,
2072 ceph::real_time set_mtime, std::map<std::string, bufferlist>& attrs,
2073 ceph::real_time delete_at, const char* if_match, const char* if_nomatch,
2074 const std::string* user_data, rgw_zone_set* zones_trace, bool* canceled,
2075 optional_yield y) {
2076 ldpp_dout(dpp, 20) << "DaosMultipartWriter::complete(): enter part="
2077 << part_num_str << dendl;
2078
2079 // Add an entry into part index
2080 bufferlist bl;
2081 RGWUploadPartInfo info;
2082 info.num = part_num;
2083 info.etag = etag;
2084 info.size = actual_part_size;
2085 info.accounted_size = accounted_size;
2086 info.modified = real_clock::now();
2087
2088 bool compressed;
2089 int ret = rgw_compression_info_from_attrset(attrs, compressed, info.cs_info);
2090 ldpp_dout(dpp, 20) << "DaosMultipartWriter::complete(): compression ret="
2091 << ret << dendl;
2092 if (ret != 0) {
2093 ldpp_dout(dpp, 1) << "cannot get compression info" << dendl;
2094 return ret;
2095 }
2096 encode(info, bl);
2097 encode(attrs, bl);
2098 ldpp_dout(dpp, 20) << "DaosMultipartWriter::complete(): entry size"
2099 << bl.length() << dendl;
2100
2101 struct ds3_multipart_part_info part_info = {.part_num = part_num,
2102 .encoded = bl.c_str(),
2103 .encoded_length = bl.length()};
2104
2105 ret = ds3_part_set_info(&part_info, ds3p, store->ds3, nullptr);
2106
2107 if (ret != 0) {
2108 ldpp_dout(dpp, 0) << "ERROR: failed to set part info (" << get_bucket_name()
2109 << ", " << upload_id << ", " << part_num
2110 << "): ret=" << ret << dendl;
2111 if (ret == ENOENT) {
2112 ret = -ERR_NO_SUCH_UPLOAD;
2113 }
2114 }
2115
2116 return ret;
2117}
2118
2119std::unique_ptr<RGWRole> DaosStore::get_role(
2120 std::string name, std::string tenant, std::string path,
2121 std::string trust_policy, std::string max_session_duration_str,
2122 std::multimap<std::string, std::string> tags) {
2123 RGWRole* p = nullptr;
2124 return std::unique_ptr<RGWRole>(p);
2125}
2126
2127std::unique_ptr<RGWRole> DaosStore::get_role(const RGWRoleInfo& info) {
2128 RGWRole* p = nullptr;
2129 return std::unique_ptr<RGWRole>(p);
2130}
2131
2132std::unique_ptr<RGWRole> DaosStore::get_role(std::string id) {
2133 RGWRole* p = nullptr;
2134 return std::unique_ptr<RGWRole>(p);
2135}
2136
2137int DaosStore::get_roles(const DoutPrefixProvider* dpp, optional_yield y,
2138 const std::string& path_prefix,
2139 const std::string& tenant,
2140 vector<std::unique_ptr<RGWRole>>& roles) {
2141 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
2142}
2143
2144std::unique_ptr<RGWOIDCProvider> DaosStore::get_oidc_provider() {
2145 RGWOIDCProvider* p = nullptr;
2146 return std::unique_ptr<RGWOIDCProvider>(p);
2147}
2148
2149int DaosStore::get_oidc_providers(
2150 const DoutPrefixProvider* dpp, const std::string& tenant,
2151 vector<std::unique_ptr<RGWOIDCProvider>>& providers) {
2152 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
2153}
2154
2155std::unique_ptr<MultipartUpload> DaosBucket::get_multipart_upload(
2156 const std::string& oid, std::optional<std::string> upload_id,
2157 ACLOwner owner, ceph::real_time mtime) {
2158 return std::make_unique<DaosMultipartUpload>(store, this, oid, upload_id,
2159 owner, mtime);
2160}
2161
2162std::unique_ptr<Writer> DaosStore::get_append_writer(
2163 const DoutPrefixProvider* dpp, optional_yield y,
2164 rgw::sal::Object* obj, const rgw_user& owner,
2165 const rgw_placement_rule* ptail_placement_rule,
2166 const std::string& unique_tag, uint64_t position,
2167 uint64_t* cur_accounted_size) {
2168 DAOS_NOT_IMPLEMENTED_LOG(dpp);
2169 return nullptr;
2170}
2171
2172std::unique_ptr<Writer> DaosStore::get_atomic_writer(
2173 const DoutPrefixProvider* dpp, optional_yield y,
2174 rgw::sal::Object* obj, const rgw_user& owner,
2175 const rgw_placement_rule* ptail_placement_rule, uint64_t olh_epoch,
2176 const std::string& unique_tag) {
2177 ldpp_dout(dpp, 20) << "get_atomic_writer" << dendl;
2178 return std::make_unique<DaosAtomicWriter>(dpp, y, obj, this,
2179 owner, ptail_placement_rule,
2180 olh_epoch, unique_tag);
2181}
2182
2183const std::string& DaosStore::get_compression_type(
2184 const rgw_placement_rule& rule) {
2185 return zone.zone_params->get_compression_type(rule);
2186}
2187
2188bool DaosStore::valid_placement(const rgw_placement_rule& rule) {
2189 return zone.zone_params->valid_placement(rule);
2190}
2191
2192std::unique_ptr<User> DaosStore::get_user(const rgw_user& u) {
2193 ldout(cctx, 20) << "DEBUG: bucket's user: " << u.to_str() << dendl;
2194 return std::make_unique<DaosUser>(this, u);
2195}
2196
2197int DaosStore::get_user_by_access_key(const DoutPrefixProvider* dpp,
2198 const std::string& key, optional_yield y,
2199 std::unique_ptr<User>* user) {
2200 // Initialize ds3_user_info
2201 bufferlist bl;
2202 uint64_t size = DS3_MAX_ENCODED_LEN;
2203 struct ds3_user_info user_info = {.encoded = bl.append_hole(size).c_str(),
2204 .encoded_length = size};
2205
2206 int ret = ds3_user_get_by_key(key.c_str(), &user_info, ds3, nullptr);
2207
2208 if (ret != 0) {
2209 ldpp_dout(dpp, 0) << "Error: ds3_user_get_by_key failed, key=" << key
2210 << " ret=" << ret << dendl;
2211 return ret;
2212 }
2213
2214 // Decode
2215 DaosUserInfo duinfo;
2216 bufferlist& blr = bl;
2217 auto iter = blr.cbegin();
2218 duinfo.decode(iter);
2219
2220 User* u = new DaosUser(this, duinfo.info);
2221 if (!u) {
2222 return -ENOMEM;
2223 }
2224
2225 user->reset(u);
2226 return 0;
2227}
2228
2229int DaosStore::get_user_by_email(const DoutPrefixProvider* dpp,
2230 const std::string& email, optional_yield y,
2231 std::unique_ptr<User>* user) {
2232 // Initialize ds3_user_info
2233 bufferlist bl;
2234 uint64_t size = DS3_MAX_ENCODED_LEN;
2235 struct ds3_user_info user_info = {.encoded = bl.append_hole(size).c_str(),
2236 .encoded_length = size};
2237
2238 int ret = ds3_user_get_by_email(email.c_str(), &user_info, ds3, nullptr);
2239
2240 if (ret != 0) {
2241 ldpp_dout(dpp, 0) << "Error: ds3_user_get_by_email failed, email=" << email
2242 << " ret=" << ret << dendl;
2243 return ret;
2244 }
2245
2246 // Decode
2247 DaosUserInfo duinfo;
2248 bufferlist& blr = bl;
2249 auto iter = blr.cbegin();
2250 duinfo.decode(iter);
2251
2252 User* u = new DaosUser(this, duinfo.info);
2253 if (!u) {
2254 return -ENOMEM;
2255 }
2256
2257 user->reset(u);
2258 return 0;
2259}
2260
2261int DaosStore::get_user_by_swift(const DoutPrefixProvider* dpp,
2262 const std::string& user_str, optional_yield y,
2263 std::unique_ptr<User>* user) {
2264 /* Swift keys and subusers are not supported for now */
2265 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
2266}
2267
2268std::unique_ptr<Object> DaosStore::get_object(const rgw_obj_key& k) {
2269 return std::make_unique<DaosObject>(this, k);
2270}
2271
2272inline std::ostream& operator<<(std::ostream& out, const rgw_user* u) {
2273 std::string s;
2274 if (u != nullptr)
2275 u->to_str(s);
2276 else
2277 s = "(nullptr)";
2278 return out << s;
2279}
2280
2281int DaosStore::get_bucket(const DoutPrefixProvider* dpp, User* u,
2282 const rgw_bucket& b, std::unique_ptr<Bucket>* bucket,
2283 optional_yield y) {
2284 ldpp_dout(dpp, 20) << "DEBUG: get_bucket1: User: " << u << dendl;
2285 int ret;
2286 Bucket* bp;
2287
2288 bp = new DaosBucket(this, b, u);
2289 ret = bp->load_bucket(dpp, y);
2290 if (ret != 0) {
2291 delete bp;
2292 return ret;
2293 }
2294
2295 bucket->reset(bp);
2296 return 0;
2297}
2298
2299int DaosStore::get_bucket(User* u, const RGWBucketInfo& i,
2300 std::unique_ptr<Bucket>* bucket) {
2301 DaosBucket* bp;
2302
2303 bp = new DaosBucket(this, i, u);
2304 /* Don't need to fetch the bucket info, use the provided one */
2305
2306 bucket->reset(bp);
2307 return 0;
2308}
2309
2310int DaosStore::get_bucket(const DoutPrefixProvider* dpp, User* u,
2311 const std::string& tenant, const std::string& name,
2312 std::unique_ptr<Bucket>* bucket, optional_yield y) {
2313 ldpp_dout(dpp, 20) << "get_bucket" << dendl;
2314 rgw_bucket b;
2315
2316 b.tenant = tenant;
2317 b.name = name;
2318
2319 return get_bucket(dpp, u, b, bucket, y);
2320}
2321
2322bool DaosStore::is_meta_master() { return true; }
2323
2324int DaosStore::forward_request_to_master(const DoutPrefixProvider* dpp,
2325 User* user, obj_version* objv,
2326 bufferlist& in_data, JSONParser* jp,
2327 req_info& info, optional_yield y) {
2328 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
2329}
2330
2331int DaosStore::forward_iam_request_to_master(const DoutPrefixProvider* dpp,
2332 const RGWAccessKey& key,
2333 obj_version* objv,
2334 bufferlist& in_data,
2335 RGWXMLDecoder::XMLParser* parser,
2336 req_info& info, optional_yield y) {
2337 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
2338}
2339
2340std::string DaosStore::zone_unique_id(uint64_t unique_num) { return ""; }
2341
2342std::string DaosStore::zone_unique_trans_id(const uint64_t unique_num) {
2343 return "";
2344}
2345
2346int DaosStore::cluster_stat(RGWClusterStat& stats) {
2347 return DAOS_NOT_IMPLEMENTED_LOG(nullptr);
2348}
2349
2350std::unique_ptr<Lifecycle> DaosStore::get_lifecycle(void) {
2351 DAOS_NOT_IMPLEMENTED_LOG(nullptr);
2352 return 0;
2353}
2354
2355std::unique_ptr<Completions> DaosStore::get_completions(void) {
2356 DAOS_NOT_IMPLEMENTED_LOG(nullptr);
2357 return 0;
2358}
2359
2360std::unique_ptr<Notification> DaosStore::get_notification(
2361 rgw::sal::Object* obj, rgw::sal::Object* src_obj, struct req_state* s,
2362 rgw::notify::EventType event_type, const std::string* object_name) {
2363 return std::make_unique<DaosNotification>(obj, src_obj, event_type);
2364}
2365
2366std::unique_ptr<Notification> DaosStore::get_notification(
2367 const DoutPrefixProvider* dpp, Object* obj, Object* src_obj,
2368 rgw::notify::EventType event_type, rgw::sal::Bucket* _bucket,
2369 std::string& _user_id, std::string& _user_tenant, std::string& _req_id,
2370 optional_yield y) {
2371 ldpp_dout(dpp, 20) << "get_notification" << dendl;
2372 return std::make_unique<DaosNotification>(obj, src_obj, event_type);
2373}
2374
2375int DaosStore::log_usage(const DoutPrefixProvider* dpp,
2376 map<rgw_user_bucket, RGWUsageBatch>& usage_info) {
2377 DAOS_NOT_IMPLEMENTED_LOG(dpp);
2378 return 0;
2379}
2380
2381int DaosStore::log_op(const DoutPrefixProvider* dpp, string& oid,
2382 bufferlist& bl) {
2383 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
2384}
2385
2386int DaosStore::register_to_service_map(const DoutPrefixProvider* dpp,
2387 const string& daemon_type,
2388 const map<string, string>& meta) {
2389 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
2390}
2391
2392void DaosStore::get_quota(RGWQuota& quota) {
2393 // XXX: Not handled for the first pass
2394 return;
2395}
2396
2397void DaosStore::get_ratelimit(RGWRateLimitInfo& bucket_ratelimit,
2398 RGWRateLimitInfo& user_ratelimit,
2399 RGWRateLimitInfo& anon_ratelimit) {
2400 return;
2401}
2402
2403int DaosStore::set_buckets_enabled(const DoutPrefixProvider* dpp,
2404 std::vector<rgw_bucket>& buckets,
2405 bool enabled) {
2406 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
2407}
2408
2409int DaosStore::get_sync_policy_handler(const DoutPrefixProvider* dpp,
2410 std::optional<rgw_zone_id> zone,
2411 std::optional<rgw_bucket> bucket,
2412 RGWBucketSyncPolicyHandlerRef* phandler,
2413 optional_yield y) {
2414 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
2415}
2416
2417RGWDataSyncStatusManager* DaosStore::get_data_sync_manager(
2418 const rgw_zone_id& source_zone) {
2419 DAOS_NOT_IMPLEMENTED_LOG(nullptr);
2420 return 0;
2421}
2422
2423int DaosStore::read_all_usage(
2424 const DoutPrefixProvider* dpp, uint64_t start_epoch, uint64_t end_epoch,
2425 uint32_t max_entries, bool* is_truncated, RGWUsageIter& usage_iter,
2426 map<rgw_user_bucket, rgw_usage_log_entry>& usage) {
2427 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
2428}
2429
2430int DaosStore::trim_all_usage(const DoutPrefixProvider* dpp,
2431 uint64_t start_epoch, uint64_t end_epoch) {
2432 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
2433}
2434
2435int DaosStore::get_config_key_val(string name, bufferlist* bl) {
2436 return DAOS_NOT_IMPLEMENTED_LOG(nullptr);
2437}
2438
2439int DaosStore::meta_list_keys_init(const DoutPrefixProvider* dpp,
2440 const string& section, const string& marker,
2441 void** phandle) {
2442 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
2443}
2444
2445int DaosStore::meta_list_keys_next(const DoutPrefixProvider* dpp, void* handle,
2446 int max, list<string>& keys,
2447 bool* truncated) {
2448 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
2449}
2450
2451void DaosStore::meta_list_keys_complete(void* handle) { return; }
2452
2453std::string DaosStore::meta_get_marker(void* handle) { return ""; }
2454
2455int DaosStore::meta_remove(const DoutPrefixProvider* dpp, string& metadata_key,
2456 optional_yield y) {
2457 return DAOS_NOT_IMPLEMENTED_LOG(dpp);
2458}
2459
2460std::string DaosStore::get_cluster_id(const DoutPrefixProvider* dpp,
2461 optional_yield y) {
2462 DAOS_NOT_IMPLEMENTED_LOG(dpp);
2463 return "";
2464}
2465
2466} // namespace rgw::sal
2467
2468extern "C" {
2469
2470void* newDaosStore(CephContext* cct) {
2471 return new rgw::sal::DaosStore(cct);
2472}
2473}