]>
Commit | Line | Data |
---|---|---|
1e59de90 TL |
1 | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- |
2 | // vim: ts=2 sw=2 expandtab ft=cpp | |
3 | ||
4 | /* | |
5 | * Ceph - scalable distributed file system | |
6 | * | |
7 | * SAL implementation for the CORTX DAOS backend | |
8 | * | |
9 | * Copyright (C) 2022 Seagate Technology LLC and/or its Affiliates | |
10 | * | |
11 | * This is free software; you can redistribute it and/or | |
12 | * modify it under the terms of the GNU Lesser General Public | |
13 | * License version 2.1, as published by the Free Software | |
14 | * Foundation. See file COPYING. | |
15 | * | |
16 | */ | |
17 | ||
18 | #include "rgw_sal_daos.h" | |
19 | ||
20 | #include <errno.h> | |
21 | #include <stdlib.h> | |
22 | #include <unistd.h> | |
23 | ||
24 | #include <filesystem> | |
25 | #include <system_error> | |
26 | ||
27 | #include "common/Clock.h" | |
28 | #include "common/errno.h" | |
29 | #include "rgw_bucket.h" | |
30 | #include "rgw_compression.h" | |
31 | #include "rgw_sal.h" | |
32 | ||
33 | #define dout_subsys ceph_subsys_rgw | |
34 | ||
35 | using std::list; | |
36 | using std::map; | |
37 | using std::set; | |
38 | using std::string; | |
39 | using std::vector; | |
40 | ||
41 | namespace fs = std::filesystem; | |
42 | ||
43 | namespace rgw::sal { | |
44 | ||
45 | using ::ceph::decode; | |
46 | using ::ceph::encode; | |
47 | ||
48 | int DaosUser::list_buckets(const DoutPrefixProvider* dpp, const string& marker, | |
49 | const string& end_marker, uint64_t max, | |
50 | bool need_stats, BucketList& buckets, | |
51 | optional_yield y) { | |
52 | ldpp_dout(dpp, 20) << "DEBUG: list_user_buckets: marker=" << marker | |
53 | << " end_marker=" << end_marker << " max=" << max << dendl; | |
54 | int ret = 0; | |
55 | bool is_truncated = false; | |
56 | buckets.clear(); | |
57 | vector<struct ds3_bucket_info> bucket_infos(max); | |
58 | daos_size_t bcount = bucket_infos.size(); | |
59 | vector<vector<uint8_t>> values(bcount, vector<uint8_t>(DS3_MAX_ENCODED_LEN)); | |
60 | for (daos_size_t i = 0; i < bcount; i++) { | |
61 | bucket_infos[i].encoded = values[i].data(); | |
62 | bucket_infos[i].encoded_length = values[i].size(); | |
63 | } | |
64 | ||
65 | char daos_marker[DS3_MAX_BUCKET_NAME]; | |
66 | std::strncpy(daos_marker, marker.c_str(), sizeof(daos_marker)); | |
67 | ret = ds3_bucket_list(&bcount, bucket_infos.data(), daos_marker, | |
68 | &is_truncated, store->ds3, nullptr); | |
69 | ldpp_dout(dpp, 20) << "DEBUG: ds3_bucket_list: bcount=" << bcount | |
70 | << " ret=" << ret << dendl; | |
71 | if (ret != 0) { | |
72 | ldpp_dout(dpp, 0) << "ERROR: ds3_bucket_list failed!" << ret << dendl; | |
73 | return ret; | |
74 | } | |
75 | ||
76 | bucket_infos.resize(bcount); | |
77 | values.resize(bcount); | |
78 | ||
79 | for (const auto& bi : bucket_infos) { | |
80 | DaosBucketInfo dbinfo; | |
81 | bufferlist bl; | |
82 | bl.append(reinterpret_cast<char*>(bi.encoded), bi.encoded_length); | |
83 | auto iter = bl.cbegin(); | |
84 | dbinfo.decode(iter); | |
85 | buckets.add(std::make_unique<DaosBucket>(this->store, dbinfo.info, this)); | |
86 | } | |
87 | ||
88 | buckets.set_truncated(is_truncated); | |
89 | return 0; | |
90 | } | |
91 | ||
92 | int DaosUser::create_bucket( | |
93 | const DoutPrefixProvider* dpp, const rgw_bucket& b, | |
94 | const std::string& zonegroup_id, rgw_placement_rule& placement_rule, | |
95 | std::string& swift_ver_location, const RGWQuotaInfo* pquota_info, | |
96 | const RGWAccessControlPolicy& policy, Attrs& attrs, RGWBucketInfo& info, | |
97 | obj_version& ep_objv, bool exclusive, bool obj_lock_enabled, bool* existed, | |
98 | req_info& req_info, std::unique_ptr<Bucket>* bucket_out, optional_yield y) { | |
99 | ldpp_dout(dpp, 20) << "DEBUG: create_bucket:" << b.name << dendl; | |
100 | int ret; | |
101 | std::unique_ptr<Bucket> bucket; | |
102 | ||
103 | // Look up the bucket. Create it if it doesn't exist. | |
104 | ret = this->store->get_bucket(dpp, this, b, &bucket, y); | |
105 | if (ret != 0 && ret != -ENOENT) { | |
106 | return ret; | |
107 | } | |
108 | ||
109 | if (ret != -ENOENT) { | |
110 | *existed = true; | |
111 | if (swift_ver_location.empty()) { | |
112 | swift_ver_location = bucket->get_info().swift_ver_location; | |
113 | } | |
114 | placement_rule.inherit_from(bucket->get_info().placement_rule); | |
115 | ||
116 | // TODO: ACL policy | |
117 | // // don't allow changes to the acl policy | |
118 | // RGWAccessControlPolicy old_policy(ctx()); | |
119 | // int rc = rgw_op_get_bucket_policy_from_attr( | |
120 | // dpp, this, u, bucket->get_attrs(), &old_policy, y); | |
121 | // if (rc >= 0 && old_policy != policy) { | |
122 | // bucket_out->swap(bucket); | |
123 | // return -EEXIST; | |
124 | //} | |
125 | } else { | |
126 | placement_rule.name = "default"; | |
127 | placement_rule.storage_class = "STANDARD"; | |
128 | bucket = std::make_unique<DaosBucket>(store, b, this); | |
129 | bucket->set_attrs(attrs); | |
130 | ||
131 | *existed = false; | |
132 | } | |
133 | ||
134 | // TODO: how to handle zone and multi-site. | |
135 | ||
136 | if (!*existed) { | |
137 | info.placement_rule = placement_rule; | |
138 | info.bucket = b; | |
139 | info.owner = this->get_info().user_id; | |
140 | info.zonegroup = zonegroup_id; | |
141 | info.creation_time = ceph::real_clock::now(); | |
142 | if (obj_lock_enabled) | |
143 | info.flags = BUCKET_VERSIONED | BUCKET_OBJ_LOCK_ENABLED; | |
144 | bucket->set_version(ep_objv); | |
145 | bucket->get_info() = info; | |
146 | ||
147 | // Create a new bucket: | |
148 | DaosBucket* daos_bucket = static_cast<DaosBucket*>(bucket.get()); | |
149 | bufferlist bl; | |
150 | std::unique_ptr<struct ds3_bucket_info> bucket_info = | |
151 | daos_bucket->get_encoded_info(bl, ceph::real_time()); | |
152 | ret = ds3_bucket_create(bucket->get_name().c_str(), bucket_info.get(), | |
153 | nullptr, store->ds3, nullptr); | |
154 | if (ret != 0) { | |
155 | ldpp_dout(dpp, 0) << "ERROR: ds3_bucket_create failed! ret=" << ret | |
156 | << dendl; | |
157 | return ret; | |
158 | } | |
159 | } else { | |
160 | bucket->set_version(ep_objv); | |
161 | bucket->get_info() = info; | |
162 | } | |
163 | ||
164 | bucket_out->swap(bucket); | |
165 | ||
166 | return ret; | |
167 | } | |
168 | ||
169 | int DaosUser::read_attrs(const DoutPrefixProvider* dpp, optional_yield y) { | |
170 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
171 | } | |
172 | ||
173 | int DaosUser::read_stats(const DoutPrefixProvider* dpp, optional_yield y, | |
174 | RGWStorageStats* stats, | |
175 | ceph::real_time* last_stats_sync, | |
176 | ceph::real_time* last_stats_update) { | |
177 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
178 | } | |
179 | ||
180 | /* stats - Not for first pass */ | |
181 | int DaosUser::read_stats_async(const DoutPrefixProvider* dpp, | |
182 | RGWGetUserStats_CB* cb) { | |
183 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
184 | } | |
185 | ||
186 | int DaosUser::complete_flush_stats(const DoutPrefixProvider* dpp, | |
187 | optional_yield y) { | |
188 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
189 | } | |
190 | ||
191 | int DaosUser::read_usage(const DoutPrefixProvider* dpp, uint64_t start_epoch, | |
192 | uint64_t end_epoch, uint32_t max_entries, | |
193 | bool* is_truncated, RGWUsageIter& usage_iter, | |
194 | map<rgw_user_bucket, rgw_usage_log_entry>& usage) { | |
195 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
196 | } | |
197 | ||
198 | int DaosUser::trim_usage(const DoutPrefixProvider* dpp, uint64_t start_epoch, | |
199 | uint64_t end_epoch) { | |
200 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
201 | } | |
202 | ||
203 | int DaosUser::load_user(const DoutPrefixProvider* dpp, optional_yield y) { | |
204 | const string name = info.user_id.to_str(); | |
205 | ldpp_dout(dpp, 20) << "DEBUG: load_user, name=" << name << dendl; | |
206 | ||
207 | DaosUserInfo duinfo; | |
208 | int ret = read_user(dpp, name, &duinfo); | |
209 | if (ret != 0) { | |
210 | ldpp_dout(dpp, 0) << "ERROR: load_user failed, name=" << name << dendl; | |
211 | return ret; | |
212 | } | |
213 | ||
214 | info = duinfo.info; | |
215 | attrs = duinfo.attrs; | |
216 | objv_tracker.read_version = duinfo.user_version; | |
217 | return 0; | |
218 | } | |
219 | ||
220 | int DaosUser::merge_and_store_attrs(const DoutPrefixProvider* dpp, | |
221 | Attrs& new_attrs, optional_yield y) { | |
222 | ldpp_dout(dpp, 20) << "DEBUG: merge_and_store_attrs, new_attrs=" << new_attrs | |
223 | << dendl; | |
224 | for (auto& it : new_attrs) { | |
225 | attrs[it.first] = it.second; | |
226 | } | |
227 | return store_user(dpp, y, false); | |
228 | } | |
229 | ||
230 | int DaosUser::store_user(const DoutPrefixProvider* dpp, optional_yield y, | |
231 | bool exclusive, RGWUserInfo* old_info) { | |
232 | const string name = info.user_id.to_str(); | |
233 | ldpp_dout(dpp, 10) << "DEBUG: Store_user(): User name=" << name << dendl; | |
234 | ||
235 | // Read user | |
236 | int ret = 0; | |
237 | struct DaosUserInfo duinfo; | |
238 | ret = read_user(dpp, name, &duinfo); | |
239 | obj_version obj_ver = duinfo.user_version; | |
240 | std::unique_ptr<struct ds3_user_info> old_user_info; | |
241 | std::vector<const char*> old_access_ids; | |
242 | ||
243 | // Check if the user already exists | |
244 | if (ret == 0 && obj_ver.ver) { | |
245 | // already exists. | |
246 | ||
247 | if (old_info) { | |
248 | *old_info = duinfo.info; | |
249 | } | |
250 | ||
251 | if (objv_tracker.read_version.ver != obj_ver.ver) { | |
252 | // Object version mismatch.. return ECANCELED | |
253 | ret = -ECANCELED; | |
254 | ldpp_dout(dpp, 0) << "User Read version mismatch read_version=" | |
255 | << objv_tracker.read_version.ver | |
256 | << " obj_ver=" << obj_ver.ver << dendl; | |
257 | return ret; | |
258 | } | |
259 | ||
260 | if (exclusive) { | |
261 | // return | |
262 | return ret; | |
263 | } | |
264 | obj_ver.ver++; | |
265 | ||
266 | for (auto const& [id, key] : duinfo.info.access_keys) { | |
267 | old_access_ids.push_back(id.c_str()); | |
268 | } | |
269 | old_user_info.reset( | |
270 | new ds3_user_info{.name = duinfo.info.user_id.to_str().c_str(), | |
271 | .email = duinfo.info.user_email.c_str(), | |
272 | .access_ids = old_access_ids.data(), | |
273 | .access_ids_nr = old_access_ids.size()}); | |
274 | } else { | |
275 | obj_ver.ver = 1; | |
276 | obj_ver.tag = "UserTAG"; | |
277 | } | |
278 | ||
279 | bufferlist bl; | |
280 | std::unique_ptr<struct ds3_user_info> user_info = | |
281 | get_encoded_info(bl, obj_ver); | |
282 | ||
283 | ret = ds3_user_set(name.c_str(), user_info.get(), old_user_info.get(), | |
284 | store->ds3, nullptr); | |
285 | ||
286 | if (ret != 0) { | |
287 | ldpp_dout(dpp, 0) << "Error: ds3_user_set failed, name=" << name | |
288 | << " ret=" << ret << dendl; | |
289 | } | |
290 | ||
291 | return ret; | |
292 | } | |
293 | ||
294 | int DaosUser::read_user(const DoutPrefixProvider* dpp, std::string name, | |
295 | DaosUserInfo* duinfo) { | |
296 | // Initialize ds3_user_info | |
297 | bufferlist bl; | |
298 | uint64_t size = DS3_MAX_ENCODED_LEN; | |
299 | struct ds3_user_info user_info = {.encoded = bl.append_hole(size).c_str(), | |
300 | .encoded_length = size}; | |
301 | ||
302 | int ret = ds3_user_get(name.c_str(), &user_info, store->ds3, nullptr); | |
303 | ||
304 | if (ret != 0) { | |
305 | ldpp_dout(dpp, 0) << "Error: ds3_user_get failed, name=" << name | |
306 | << " ret=" << ret << dendl; | |
307 | return ret; | |
308 | } | |
309 | ||
310 | // Decode | |
311 | bufferlist& blr = bl; | |
312 | auto iter = blr.cbegin(); | |
313 | duinfo->decode(iter); | |
314 | return ret; | |
315 | } | |
316 | ||
317 | std::unique_ptr<struct ds3_user_info> DaosUser::get_encoded_info( | |
318 | bufferlist& bl, obj_version& obj_ver) { | |
319 | // Encode user data | |
320 | struct DaosUserInfo duinfo; | |
321 | duinfo.info = info; | |
322 | duinfo.attrs = attrs; | |
323 | duinfo.user_version = obj_ver; | |
324 | duinfo.encode(bl); | |
325 | ||
326 | // Initialize ds3_user_info | |
327 | access_ids.clear(); | |
328 | for (auto const& [id, key] : info.access_keys) { | |
329 | access_ids.push_back(id.c_str()); | |
330 | } | |
331 | return std::unique_ptr<struct ds3_user_info>( | |
332 | new ds3_user_info{.name = info.user_id.to_str().c_str(), | |
333 | .email = info.user_email.c_str(), | |
334 | .access_ids = access_ids.data(), | |
335 | .access_ids_nr = access_ids.size(), | |
336 | .encoded = bl.c_str(), | |
337 | .encoded_length = bl.length()}); | |
338 | } | |
339 | ||
340 | int DaosUser::remove_user(const DoutPrefixProvider* dpp, optional_yield y) { | |
341 | const string name = info.user_id.to_str(); | |
342 | ||
343 | // TODO: the expectation is that the object version needs to be passed in as a | |
344 | // method arg see int DB::remove_user(const DoutPrefixProvider *dpp, | |
345 | // RGWUserInfo& uinfo, RGWObjVersionTracker *pobjv) | |
346 | obj_version obj_ver; | |
347 | bufferlist bl; | |
348 | std::unique_ptr<struct ds3_user_info> user_info = | |
349 | get_encoded_info(bl, obj_ver); | |
350 | ||
351 | // Remove user | |
352 | int ret = ds3_user_remove(name.c_str(), user_info.get(), store->ds3, nullptr); | |
353 | if (ret != 0) { | |
354 | ldpp_dout(dpp, 0) << "Error: ds3_user_set failed, name=" << name | |
355 | << " ret=" << ret << dendl; | |
356 | } | |
357 | return ret; | |
358 | } | |
359 | ||
360 | DaosBucket::~DaosBucket() { close(nullptr); } | |
361 | ||
362 | int DaosBucket::open(const DoutPrefixProvider* dpp) { | |
363 | ldpp_dout(dpp, 20) << "DEBUG: open, name=" << info.bucket.name.c_str() | |
364 | << dendl; | |
365 | // Idempotent | |
366 | if (is_open()) { | |
367 | return 0; | |
368 | } | |
369 | ||
370 | int ret = ds3_bucket_open(get_name().c_str(), &ds3b, store->ds3, nullptr); | |
371 | ldpp_dout(dpp, 20) << "DEBUG: ds3_bucket_open, name=" << get_name() | |
372 | << ", ret=" << ret << dendl; | |
373 | ||
374 | return ret; | |
375 | } | |
376 | ||
377 | int DaosBucket::close(const DoutPrefixProvider* dpp) { | |
378 | ldpp_dout(dpp, 20) << "DEBUG: close" << dendl; | |
379 | // Idempotent | |
380 | if (!is_open()) { | |
381 | return 0; | |
382 | } | |
383 | ||
384 | int ret = ds3_bucket_close(ds3b, nullptr); | |
385 | ds3b = nullptr; | |
386 | ldpp_dout(dpp, 20) << "DEBUG: ds3_bucket_close ret=" << ret << dendl; | |
387 | ||
388 | return ret; | |
389 | } | |
390 | ||
391 | std::unique_ptr<struct ds3_bucket_info> DaosBucket::get_encoded_info( | |
392 | bufferlist& bl, ceph::real_time _mtime) { | |
393 | DaosBucketInfo dbinfo; | |
394 | dbinfo.info = info; | |
395 | dbinfo.bucket_attrs = attrs; | |
396 | dbinfo.mtime = _mtime; | |
397 | dbinfo.bucket_version = bucket_version; | |
398 | dbinfo.encode(bl); | |
399 | ||
400 | auto bucket_info = std::make_unique<struct ds3_bucket_info>(); | |
401 | bucket_info->encoded = bl.c_str(); | |
402 | bucket_info->encoded_length = bl.length(); | |
403 | std::strncpy(bucket_info->name, get_name().c_str(), sizeof(bucket_info->name)); | |
404 | return bucket_info; | |
405 | } | |
406 | ||
407 | int DaosBucket::remove_bucket(const DoutPrefixProvider* dpp, | |
408 | bool delete_children, bool forward_to_master, | |
409 | req_info* req_info, optional_yield y) { | |
410 | ldpp_dout(dpp, 20) << "DEBUG: remove_bucket, delete_children=" | |
411 | ||
412 | << delete_children | |
413 | ||
414 | << " forward_to_master=" << forward_to_master << dendl; | |
415 | ||
416 | return ds3_bucket_destroy(get_name().c_str(), delete_children, store->ds3, | |
417 | nullptr); | |
418 | } | |
419 | ||
420 | int DaosBucket::remove_bucket_bypass_gc(int concurrent_max, | |
421 | bool keep_index_consistent, | |
422 | optional_yield y, | |
423 | const DoutPrefixProvider* dpp) { | |
424 | ldpp_dout(dpp, 20) << "DEBUG: remove_bucket_bypass_gc, concurrent_max=" | |
425 | ||
426 | << concurrent_max | |
427 | ||
428 | << " keep_index_consistent=" << keep_index_consistent | |
429 | ||
430 | << dendl; | |
431 | return ds3_bucket_destroy(get_name().c_str(), true, store->ds3, nullptr); | |
432 | } | |
433 | ||
434 | int DaosBucket::put_info(const DoutPrefixProvider* dpp, bool exclusive, | |
435 | ceph::real_time _mtime) { | |
436 | ldpp_dout(dpp, 20) << "DEBUG: put_info(): bucket name=" << get_name() | |
437 | << dendl; | |
438 | ||
439 | int ret = open(dpp); | |
440 | if (ret != 0) { | |
441 | return ret; | |
442 | } | |
443 | ||
444 | bufferlist bl; | |
445 | std::unique_ptr<struct ds3_bucket_info> bucket_info = | |
446 | get_encoded_info(bl, ceph::real_time()); | |
447 | ||
448 | ret = ds3_bucket_set_info(bucket_info.get(), ds3b, nullptr); | |
449 | if (ret != 0) { | |
450 | ldpp_dout(dpp, 0) << "ERROR: ds3_bucket_set_info failed: " << ret << dendl; | |
451 | } | |
452 | return ret; | |
453 | } | |
454 | ||
455 | int DaosBucket::load_bucket(const DoutPrefixProvider* dpp, optional_yield y, | |
456 | bool get_stats) { | |
457 | ldpp_dout(dpp, 20) << "DEBUG: load_bucket(): bucket name=" << get_name() | |
458 | << dendl; | |
459 | int ret = open(dpp); | |
460 | if (ret != 0) { | |
461 | return ret; | |
462 | } | |
463 | ||
464 | bufferlist bl; | |
465 | DaosBucketInfo dbinfo; | |
466 | uint64_t size = DS3_MAX_ENCODED_LEN; | |
467 | struct ds3_bucket_info bucket_info = {.encoded = bl.append_hole(size).c_str(), | |
468 | .encoded_length = size}; | |
469 | ||
470 | ret = ds3_bucket_get_info(&bucket_info, ds3b, nullptr); | |
471 | if (ret != 0) { | |
472 | ldpp_dout(dpp, 0) << "ERROR: ds3_bucket_get_info failed: " << ret << dendl; | |
473 | return ret; | |
474 | } | |
475 | ||
476 | auto iter = bl.cbegin(); | |
477 | dbinfo.decode(iter); | |
478 | info = dbinfo.info; | |
479 | rgw_placement_rule placement_rule; | |
480 | placement_rule.name = "default"; | |
481 | placement_rule.storage_class = "STANDARD"; | |
482 | info.placement_rule = placement_rule; | |
483 | ||
484 | attrs = dbinfo.bucket_attrs; | |
485 | mtime = dbinfo.mtime; | |
486 | bucket_version = dbinfo.bucket_version; | |
487 | return ret; | |
488 | } | |
489 | ||
490 | /* stats - Not for first pass */ | |
491 | int DaosBucket::read_stats(const DoutPrefixProvider* dpp, | |
492 | const bucket_index_layout_generation& idx_layout, | |
493 | int shard_id, std::string* bucket_ver, | |
494 | std::string* master_ver, | |
495 | std::map<RGWObjCategory, RGWStorageStats>& stats, | |
496 | std::string* max_marker, bool* syncstopped) { | |
497 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
498 | } | |
499 | ||
500 | int DaosBucket::read_stats_async( | |
501 | const DoutPrefixProvider* dpp, | |
502 | const bucket_index_layout_generation& idx_layout, int shard_id, | |
503 | RGWGetBucketStats_CB* ctx) { | |
504 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
505 | } | |
506 | ||
507 | int DaosBucket::sync_user_stats(const DoutPrefixProvider* dpp, | |
508 | optional_yield y) { | |
509 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
510 | } | |
511 | ||
512 | int DaosBucket::update_container_stats(const DoutPrefixProvider* dpp) { | |
513 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
514 | } | |
515 | ||
516 | int DaosBucket::check_bucket_shards(const DoutPrefixProvider* dpp) { | |
517 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
518 | } | |
519 | ||
520 | int DaosBucket::chown(const DoutPrefixProvider* dpp, User& new_user, | |
521 | optional_yield y) { | |
522 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
523 | } | |
524 | ||
525 | /* Make sure to call load_bucket() if you need it first */ | |
526 | bool DaosBucket::is_owner(User* user) { | |
527 | return (info.owner.compare(user->get_id()) == 0); | |
528 | } | |
529 | ||
530 | int DaosBucket::check_empty(const DoutPrefixProvider* dpp, optional_yield y) { | |
531 | /* XXX: Check if bucket contains any objects */ | |
532 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
533 | } | |
534 | ||
535 | int DaosBucket::check_quota(const DoutPrefixProvider* dpp, RGWQuota& quota, | |
536 | uint64_t obj_size, optional_yield y, | |
537 | bool check_size_only) { | |
538 | /* Not Handled in the first pass as stats are also needed */ | |
539 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
540 | } | |
541 | ||
542 | int DaosBucket::merge_and_store_attrs(const DoutPrefixProvider* dpp, | |
543 | Attrs& new_attrs, optional_yield y) { | |
544 | ldpp_dout(dpp, 20) << "DEBUG: merge_and_store_attrs, new_attrs=" << new_attrs | |
545 | << dendl; | |
546 | for (auto& it : new_attrs) { | |
547 | attrs[it.first] = it.second; | |
548 | } | |
549 | ||
550 | return put_info(dpp, y, ceph::real_time()); | |
551 | } | |
552 | ||
553 | int DaosBucket::try_refresh_info(const DoutPrefixProvider* dpp, | |
554 | ceph::real_time* pmtime) { | |
555 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
556 | } | |
557 | ||
558 | /* XXX: usage and stats not supported in the first pass */ | |
559 | int DaosBucket::read_usage(const DoutPrefixProvider* dpp, uint64_t start_epoch, | |
560 | uint64_t end_epoch, uint32_t max_entries, | |
561 | bool* is_truncated, RGWUsageIter& usage_iter, | |
562 | map<rgw_user_bucket, rgw_usage_log_entry>& usage) { | |
563 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
564 | } | |
565 | ||
566 | int DaosBucket::trim_usage(const DoutPrefixProvider* dpp, uint64_t start_epoch, | |
567 | uint64_t end_epoch) { | |
568 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
569 | } | |
570 | ||
571 | int DaosBucket::remove_objs_from_index( | |
572 | const DoutPrefixProvider* dpp, | |
573 | std::list<rgw_obj_index_key>& objs_to_unlink) { | |
574 | /* XXX: CHECK: Unlike RadosStore, there is no seperate bucket index table. | |
575 | * Delete all the object in the list from the object table of this | |
576 | * bucket | |
577 | */ | |
578 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
579 | } | |
580 | ||
581 | int DaosBucket::check_index( | |
582 | const DoutPrefixProvider* dpp, | |
583 | std::map<RGWObjCategory, RGWStorageStats>& existing_stats, | |
584 | std::map<RGWObjCategory, RGWStorageStats>& calculated_stats) { | |
585 | /* XXX: stats not supported yet */ | |
586 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
587 | } | |
588 | ||
589 | int DaosBucket::rebuild_index(const DoutPrefixProvider* dpp) { | |
590 | /* there is no index table in DAOS. Not applicable */ | |
591 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
592 | } | |
593 | ||
594 | int DaosBucket::set_tag_timeout(const DoutPrefixProvider* dpp, | |
595 | uint64_t timeout) { | |
596 | /* XXX: CHECK: set tag timeout for all the bucket objects? */ | |
597 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
598 | } | |
599 | ||
600 | int DaosBucket::purge_instance(const DoutPrefixProvider* dpp) { | |
601 | /* XXX: CHECK: for DAOS only single instance supported. | |
602 | * Remove all the objects for that instance? Anything extra needed? | |
603 | */ | |
604 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
605 | } | |
606 | ||
607 | int DaosBucket::set_acl(const DoutPrefixProvider* dpp, | |
608 | RGWAccessControlPolicy& acl, optional_yield y) { | |
609 | ldpp_dout(dpp, 20) << "DEBUG: set_acl" << dendl; | |
610 | int ret = 0; | |
611 | bufferlist aclbl; | |
612 | ||
613 | acls = acl; | |
614 | acl.encode(aclbl); | |
615 | ||
616 | Attrs attrs = get_attrs(); | |
617 | attrs[RGW_ATTR_ACL] = aclbl; | |
618 | ||
619 | return ret; | |
620 | } | |
621 | ||
622 | std::unique_ptr<Object> DaosBucket::get_object(const rgw_obj_key& k) { | |
623 | return std::make_unique<DaosObject>(this->store, k, this); | |
624 | } | |
625 | ||
626 | bool compare_rgw_bucket_dir_entry(rgw_bucket_dir_entry& entry1, | |
627 | rgw_bucket_dir_entry& entry2) { | |
628 | return (entry1.key < entry2.key); | |
629 | } | |
630 | ||
631 | bool compare_multipart_upload(std::unique_ptr<MultipartUpload>& upload1, | |
632 | std::unique_ptr<MultipartUpload>& upload2) { | |
633 | return (upload1->get_key() < upload2->get_key()); | |
634 | } | |
635 | ||
636 | int DaosBucket::list(const DoutPrefixProvider* dpp, ListParams& params, int max, | |
637 | ListResults& results, optional_yield y) { | |
638 | ldpp_dout(dpp, 20) << "DEBUG: list bucket=" << get_name() << " max=" << max | |
639 | << " params=" << params << dendl; | |
640 | // End | |
641 | if (max == 0) { | |
642 | return 0; | |
643 | } | |
644 | ||
645 | int ret = open(dpp); | |
646 | if (ret != 0) { | |
647 | return ret; | |
648 | } | |
649 | ||
650 | // Init needed structures | |
651 | vector<struct ds3_object_info> object_infos(max); | |
652 | uint32_t nobj = object_infos.size(); | |
653 | vector<vector<uint8_t>> values(nobj, vector<uint8_t>(DS3_MAX_ENCODED_LEN)); | |
654 | for (uint32_t i = 0; i < nobj; i++) { | |
655 | object_infos[i].encoded = values[i].data(); | |
656 | object_infos[i].encoded_length = values[i].size(); | |
657 | } | |
658 | ||
659 | vector<struct ds3_common_prefix_info> common_prefixes(max); | |
660 | uint32_t ncp = common_prefixes.size(); | |
661 | ||
662 | char daos_marker[DS3_MAX_KEY_BUFF]; | |
663 | std::strncpy(daos_marker, params.marker.get_oid().c_str(), sizeof(daos_marker)); | |
664 | ||
665 | ret = ds3_bucket_list_obj(&nobj, object_infos.data(), &ncp, | |
666 | common_prefixes.data(), params.prefix.c_str(), | |
667 | params.delim.c_str(), daos_marker, | |
668 | params.list_versions, &results.is_truncated, ds3b); | |
669 | ||
670 | if (ret != 0) { | |
671 | ldpp_dout(dpp, 0) << "ERROR: ds3_bucket_list_obj failed, name=" | |
672 | << get_name() << ", ret=" << ret << dendl; | |
673 | return ret; | |
674 | } | |
675 | ||
676 | object_infos.resize(nobj); | |
677 | values.resize(nobj); | |
678 | common_prefixes.resize(ncp); | |
679 | ||
680 | // Fill common prefixes | |
681 | for (auto const& cp : common_prefixes) { | |
682 | results.common_prefixes[cp.prefix] = true; | |
683 | } | |
684 | ||
685 | // Decode objs | |
686 | for (auto const& obj : object_infos) { | |
687 | bufferlist bl; | |
688 | rgw_bucket_dir_entry ent; | |
689 | bl.append(reinterpret_cast<char*>(obj.encoded), obj.encoded_length); | |
690 | auto iter = bl.cbegin(); | |
691 | ent.decode(iter); | |
692 | if (params.list_versions || ent.is_visible()) { | |
693 | results.objs.emplace_back(std::move(ent)); | |
694 | } | |
695 | } | |
696 | ||
697 | if (!params.allow_unordered) { | |
698 | std::sort(results.objs.begin(), results.objs.end(), | |
699 | compare_rgw_bucket_dir_entry); | |
700 | } | |
701 | ||
702 | return ret; | |
703 | } | |
704 | ||
705 | int DaosBucket::list_multiparts( | |
706 | const DoutPrefixProvider* dpp, const string& prefix, string& marker, | |
707 | const string& delim, const int& max_uploads, | |
708 | vector<std::unique_ptr<MultipartUpload>>& uploads, | |
709 | map<string, bool>* common_prefixes, bool* is_truncated) { | |
710 | ldpp_dout(dpp, 20) << "DEBUG: list_multiparts" << dendl; | |
711 | // End of uploading | |
712 | if (max_uploads == 0) { | |
713 | *is_truncated = false; | |
714 | return 0; | |
715 | } | |
716 | ||
717 | // Init needed structures | |
718 | vector<struct ds3_multipart_upload_info> multipart_upload_infos(max_uploads); | |
719 | uint32_t nmp = multipart_upload_infos.size(); | |
720 | vector<vector<uint8_t>> values(nmp, vector<uint8_t>(DS3_MAX_ENCODED_LEN)); | |
721 | for (uint32_t i = 0; i < nmp; i++) { | |
722 | multipart_upload_infos[i].encoded = values[i].data(); | |
723 | multipart_upload_infos[i].encoded_length = values[i].size(); | |
724 | } | |
725 | ||
726 | vector<struct ds3_common_prefix_info> cps(max_uploads); | |
727 | uint32_t ncp = cps.size(); | |
728 | ||
729 | char daos_marker[DS3_MAX_KEY_BUFF]; | |
730 | std::strncpy(daos_marker, marker.c_str(), sizeof(daos_marker)); | |
731 | ||
732 | int ret = ds3_bucket_list_multipart( | |
733 | get_name().c_str(), &nmp, multipart_upload_infos.data(), &ncp, cps.data(), | |
734 | prefix.c_str(), delim.c_str(), daos_marker, is_truncated, store->ds3); | |
735 | ||
736 | multipart_upload_infos.resize(nmp); | |
737 | values.resize(nmp); | |
738 | cps.resize(ncp); | |
739 | ||
740 | // Fill common prefixes | |
741 | for (auto const& cp : cps) { | |
742 | (*common_prefixes)[cp.prefix] = true; | |
743 | } | |
744 | ||
745 | for (auto const& mp : multipart_upload_infos) { | |
746 | // Decode the xattr | |
747 | bufferlist bl; | |
748 | rgw_bucket_dir_entry ent; | |
749 | bl.append(reinterpret_cast<char*>(mp.encoded), mp.encoded_length); | |
750 | auto iter = bl.cbegin(); | |
751 | ent.decode(iter); | |
752 | string name = ent.key.name; | |
753 | ||
754 | ACLOwner owner(rgw_user(ent.meta.owner)); | |
755 | owner.set_name(ent.meta.owner_display_name); | |
756 | uploads.push_back(this->get_multipart_upload( | |
757 | name, mp.upload_id, std::move(owner), ent.meta.mtime)); | |
758 | } | |
759 | ||
760 | // Sort uploads | |
761 | std::sort(uploads.begin(), uploads.end(), compare_multipart_upload); | |
762 | ||
763 | return ret; | |
764 | } | |
765 | ||
766 | int DaosBucket::abort_multiparts(const DoutPrefixProvider* dpp, | |
767 | CephContext* cct) { | |
768 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
769 | } | |
770 | ||
771 | void DaosStore::finalize(void) { | |
772 | ldout(cctx, 20) << "DEBUG: finalize" << dendl; | |
773 | int ret; | |
774 | ||
775 | ret = ds3_disconnect(ds3, nullptr); | |
776 | if (ret != 0) { | |
777 | ldout(cctx, 0) << "ERROR: ds3_disconnect() failed: " << ret << dendl; | |
778 | } | |
779 | ds3 = nullptr; | |
780 | ||
781 | ret = ds3_fini(); | |
782 | if (ret != 0) { | |
783 | ldout(cctx, 0) << "ERROR: daos_fini() failed: " << ret << dendl; | |
784 | } | |
785 | } | |
786 | ||
787 | int DaosStore::initialize(CephContext* cct, const DoutPrefixProvider* dpp) { | |
788 | ldpp_dout(dpp, 20) << "DEBUG: initialize" << dendl; | |
789 | int ret = ds3_init(); | |
790 | ||
791 | // DS3 init failed, allow the case where init is already done | |
792 | if (ret != 0 && ret != DER_ALREADY) { | |
793 | ldout(cct, 0) << "ERROR: ds3_init() failed: " << ret << dendl; | |
794 | return ret; | |
795 | } | |
796 | ||
797 | // XXX: these params should be taken from config settings and | |
798 | // cct somehow? | |
799 | const auto& daos_pool = cct->_conf.get_val<std::string>("daos_pool"); | |
800 | ldout(cct, 20) << "INFO: daos pool: " << daos_pool << dendl; | |
801 | ||
802 | ret = ds3_connect(daos_pool.c_str(), nullptr, &ds3, nullptr); | |
803 | ||
804 | if (ret != 0) { | |
805 | ldout(cct, 0) << "ERROR: ds3_connect() failed: " << ret << dendl; | |
806 | ds3_fini(); | |
807 | } | |
808 | ||
809 | return ret; | |
810 | } | |
811 | ||
812 | const std::string& DaosZoneGroup::get_endpoint() const { | |
813 | if (!group.endpoints.empty()) { | |
814 | return group.endpoints.front(); | |
815 | } else { | |
816 | // use zonegroup's master zone endpoints | |
817 | auto z = group.zones.find(group.master_zone); | |
818 | if (z != group.zones.end() && !z->second.endpoints.empty()) { | |
819 | return z->second.endpoints.front(); | |
820 | } | |
821 | } | |
822 | return empty; | |
823 | } | |
824 | ||
825 | bool DaosZoneGroup::placement_target_exists(std::string& target) const { | |
826 | return !!group.placement_targets.count(target); | |
827 | } | |
828 | ||
829 | int DaosZoneGroup::get_placement_target_names( | |
830 | std::set<std::string>& names) const { | |
831 | for (const auto& target : group.placement_targets) { | |
832 | names.emplace(target.second.name); | |
833 | } | |
834 | ||
835 | return 0; | |
836 | } | |
837 | ||
838 | int DaosZoneGroup::get_placement_tier(const rgw_placement_rule& rule, | |
839 | std::unique_ptr<PlacementTier>* tier) { | |
840 | std::map<std::string, RGWZoneGroupPlacementTarget>::const_iterator titer; | |
841 | titer = group.placement_targets.find(rule.name); | |
842 | if (titer == group.placement_targets.end()) { | |
843 | return -ENOENT; | |
844 | } | |
845 | ||
846 | const auto& target_rule = titer->second; | |
847 | std::map<std::string, RGWZoneGroupPlacementTier>::const_iterator ttier; | |
848 | ttier = target_rule.tier_targets.find(rule.storage_class); | |
849 | if (ttier == target_rule.tier_targets.end()) { | |
850 | // not found | |
851 | return -ENOENT; | |
852 | } | |
853 | ||
854 | PlacementTier* t; | |
855 | t = new DaosPlacementTier(store, ttier->second); | |
856 | if (!t) return -ENOMEM; | |
857 | ||
858 | tier->reset(t); | |
859 | return 0; | |
860 | } | |
861 | ||
862 | ZoneGroup& DaosZone::get_zonegroup() { return zonegroup; } | |
863 | ||
864 | int DaosZone::get_zonegroup(const std::string& id, | |
865 | std::unique_ptr<ZoneGroup>* group) { | |
866 | /* XXX: for now only one zonegroup supported */ | |
867 | ZoneGroup* zg; | |
868 | zg = new DaosZoneGroup(store, zonegroup.get_group()); | |
869 | ||
870 | group->reset(zg); | |
871 | return 0; | |
872 | } | |
873 | ||
874 | const rgw_zone_id& DaosZone::get_id() { return cur_zone_id; } | |
875 | ||
876 | const std::string& DaosZone::get_name() const { | |
877 | return zone_params->get_name(); | |
878 | } | |
879 | ||
880 | bool DaosZone::is_writeable() { return true; } | |
881 | ||
882 | bool DaosZone::get_redirect_endpoint(std::string* endpoint) { return false; } | |
883 | ||
884 | bool DaosZone::has_zonegroup_api(const std::string& api) const { return false; } | |
885 | ||
886 | const std::string& DaosZone::get_current_period_id() { | |
887 | return current_period->get_id(); | |
888 | } | |
889 | ||
890 | std::unique_ptr<LuaManager> DaosStore::get_lua_manager() { | |
891 | return std::make_unique<DaosLuaManager>(this); | |
892 | } | |
893 | ||
894 | int DaosObject::get_obj_state(const DoutPrefixProvider* dpp, | |
895 | RGWObjState** _state, optional_yield y, | |
896 | bool follow_olh) { | |
897 | // Get object's metadata (those stored in rgw_bucket_dir_entry) | |
898 | ldpp_dout(dpp, 20) << "DEBUG: get_obj_state" << dendl; | |
899 | rgw_bucket_dir_entry ent; | |
900 | *_state = &state; // state is required even if a failure occurs | |
901 | ||
902 | int ret = get_dir_entry_attrs(dpp, &ent); | |
903 | if (ret != 0) { | |
904 | return ret; | |
905 | } | |
906 | ||
907 | // Set object state. | |
908 | state.exists = true; | |
909 | state.size = ent.meta.size; | |
910 | state.accounted_size = ent.meta.size; | |
911 | state.mtime = ent.meta.mtime; | |
912 | ||
913 | state.has_attrs = true; | |
914 | bufferlist etag_bl; | |
915 | string& etag = ent.meta.etag; | |
916 | ldpp_dout(dpp, 20) << __func__ << ": object's etag: " << ent.meta.etag | |
917 | << dendl; | |
918 | etag_bl.append(etag); | |
919 | state.attrset[RGW_ATTR_ETAG] = etag_bl; | |
920 | return 0; | |
921 | } | |
922 | ||
923 | DaosObject::~DaosObject() { close(nullptr); } | |
924 | ||
925 | int DaosObject::set_obj_attrs(const DoutPrefixProvider* dpp, Attrs* setattrs, | |
926 | Attrs* delattrs, optional_yield y) { | |
927 | ldpp_dout(dpp, 20) << "DEBUG: DaosObject::set_obj_attrs()" << dendl; | |
928 | // TODO handle target_obj | |
929 | // Get object's metadata (those stored in rgw_bucket_dir_entry) | |
930 | rgw_bucket_dir_entry ent; | |
931 | int ret = get_dir_entry_attrs(dpp, &ent); | |
932 | if (ret != 0) { | |
933 | return ret; | |
934 | } | |
935 | ||
936 | // Update object metadata | |
937 | Attrs updateattrs = setattrs == nullptr ? attrs : *setattrs; | |
938 | if (delattrs) { | |
939 | for (auto const& [attr, attrval] : *delattrs) { | |
940 | updateattrs.erase(attr); | |
941 | } | |
942 | } | |
943 | ||
944 | ret = set_dir_entry_attrs(dpp, &ent, &updateattrs); | |
945 | return ret; | |
946 | } | |
947 | ||
948 | int DaosObject::get_obj_attrs(optional_yield y, const DoutPrefixProvider* dpp, | |
949 | rgw_obj* target_obj) { | |
950 | ldpp_dout(dpp, 20) << "DEBUG: DaosObject::get_obj_attrs()" << dendl; | |
951 | // TODO handle target_obj | |
952 | // Get object's metadata (those stored in rgw_bucket_dir_entry) | |
953 | rgw_bucket_dir_entry ent; | |
954 | int ret = get_dir_entry_attrs(dpp, &ent, &attrs); | |
955 | return ret; | |
956 | } | |
957 | ||
958 | int DaosObject::modify_obj_attrs(const char* attr_name, bufferlist& attr_val, | |
959 | optional_yield y, | |
960 | const DoutPrefixProvider* dpp) { | |
961 | // Get object's metadata (those stored in rgw_bucket_dir_entry) | |
962 | ldpp_dout(dpp, 20) << "DEBUG: modify_obj_attrs" << dendl; | |
963 | rgw_bucket_dir_entry ent; | |
964 | int ret = get_dir_entry_attrs(dpp, &ent, &attrs); | |
965 | if (ret != 0) { | |
966 | return ret; | |
967 | } | |
968 | ||
969 | // Update object attrs | |
970 | set_atomic(); | |
971 | attrs[attr_name] = attr_val; | |
972 | ||
973 | ret = set_dir_entry_attrs(dpp, &ent, &attrs); | |
974 | return ret; | |
975 | } | |
976 | ||
977 | int DaosObject::delete_obj_attrs(const DoutPrefixProvider* dpp, | |
978 | const char* attr_name, optional_yield y) { | |
979 | ldpp_dout(dpp, 20) << "DEBUG: delete_obj_attrs" << dendl; | |
980 | rgw_obj target = get_obj(); | |
981 | Attrs rmattr; | |
982 | bufferlist bl; | |
983 | ||
984 | rmattr[attr_name] = bl; | |
985 | return set_obj_attrs(dpp, nullptr, &rmattr, y); | |
986 | } | |
987 | ||
988 | bool DaosObject::is_expired() { | |
989 | auto iter = attrs.find(RGW_ATTR_DELETE_AT); | |
990 | if (iter != attrs.end()) { | |
991 | utime_t delete_at; | |
992 | try { | |
993 | auto bufit = iter->second.cbegin(); | |
994 | decode(delete_at, bufit); | |
995 | } catch (buffer::error& err) { | |
996 | ldout(store->ctx(), 0) | |
997 | << "ERROR: " << __func__ | |
998 | << ": failed to decode " RGW_ATTR_DELETE_AT " attr" << dendl; | |
999 | return false; | |
1000 | } | |
1001 | ||
1002 | if (delete_at <= ceph_clock_now() && !delete_at.is_zero()) { | |
1003 | return true; | |
1004 | } | |
1005 | } | |
1006 | ||
1007 | return false; | |
1008 | } | |
1009 | ||
1010 | // Taken from rgw_rados.cc | |
1011 | void DaosObject::gen_rand_obj_instance_name() { | |
1012 | enum { OBJ_INSTANCE_LEN = 32 }; | |
1013 | char buf[OBJ_INSTANCE_LEN + 1]; | |
1014 | ||
1015 | gen_rand_alphanumeric_no_underscore(store->ctx(), buf, OBJ_INSTANCE_LEN); | |
1016 | state.obj.key.set_instance(buf); | |
1017 | } | |
1018 | ||
1019 | int DaosObject::omap_get_vals(const DoutPrefixProvider* dpp, | |
1020 | const std::string& marker, uint64_t count, | |
1021 | std::map<std::string, bufferlist>* m, bool* pmore, | |
1022 | optional_yield y) { | |
1023 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
1024 | } | |
1025 | ||
1026 | int DaosObject::omap_get_all(const DoutPrefixProvider* dpp, | |
1027 | std::map<std::string, bufferlist>* m, | |
1028 | optional_yield y) { | |
1029 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
1030 | } | |
1031 | ||
1032 | int DaosObject::omap_get_vals_by_keys(const DoutPrefixProvider* dpp, | |
1033 | const std::string& oid, | |
1034 | const std::set<std::string>& keys, | |
1035 | Attrs* vals) { | |
1036 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
1037 | } | |
1038 | ||
1039 | int DaosObject::omap_set_val_by_key(const DoutPrefixProvider* dpp, | |
1040 | const std::string& key, bufferlist& val, | |
1041 | bool must_exist, optional_yield y) { | |
1042 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
1043 | } | |
1044 | ||
1045 | int DaosObject::chown(User& new_user, const DoutPrefixProvider* dpp, optional_yield y) { | |
1046 | return 0; | |
1047 | } | |
1048 | ||
1049 | std::unique_ptr<MPSerializer> DaosObject::get_serializer( | |
1050 | const DoutPrefixProvider* dpp, const std::string& lock_name) { | |
1051 | return std::make_unique<MPDaosSerializer>(dpp, store, this, lock_name); | |
1052 | } | |
1053 | ||
1054 | int DaosObject::transition(Bucket* bucket, | |
1055 | const rgw_placement_rule& placement_rule, | |
1056 | const real_time& mtime, uint64_t olh_epoch, | |
1057 | const DoutPrefixProvider* dpp, optional_yield y) { | |
1058 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
1059 | } | |
1060 | ||
1061 | int DaosObject::transition_to_cloud( | |
1062 | Bucket* bucket, rgw::sal::PlacementTier* tier, rgw_bucket_dir_entry& o, | |
1063 | std::set<std::string>& cloud_targets, CephContext* cct, bool update_object, | |
1064 | const DoutPrefixProvider* dpp, optional_yield y) { | |
1065 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
1066 | } | |
1067 | ||
1068 | bool DaosObject::placement_rules_match(rgw_placement_rule& r1, | |
1069 | rgw_placement_rule& r2) { | |
1070 | /* XXX: support single default zone and zonegroup for now */ | |
1071 | return true; | |
1072 | } | |
1073 | ||
1074 | int DaosObject::dump_obj_layout(const DoutPrefixProvider* dpp, optional_yield y, | |
1075 | Formatter* f) { | |
1076 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
1077 | } | |
1078 | ||
1079 | std::unique_ptr<Object::ReadOp> DaosObject::get_read_op() { | |
1080 | return std::make_unique<DaosObject::DaosReadOp>(this); | |
1081 | } | |
1082 | ||
1083 | DaosObject::DaosReadOp::DaosReadOp(DaosObject* _source) : source(_source) {} | |
1084 | ||
1085 | int DaosObject::DaosReadOp::prepare(optional_yield y, | |
1086 | const DoutPrefixProvider* dpp) { | |
1087 | ldpp_dout(dpp, 20) << __func__ | |
1088 | << ": bucket=" << source->get_bucket()->get_name() | |
1089 | << dendl; | |
1090 | ||
1091 | if (source->get_bucket()->versioned() && !source->have_instance()) { | |
1092 | // If the bucket is versioned and no version is specified, get the latest | |
1093 | // version | |
1094 | source->set_instance(DS3_LATEST_INSTANCE); | |
1095 | } | |
1096 | ||
1097 | rgw_bucket_dir_entry ent; | |
1098 | int ret = source->get_dir_entry_attrs(dpp, &ent); | |
1099 | ||
1100 | // Set source object's attrs. The attrs is key/value map and is used | |
1101 | // in send_response_data() to set attributes, including etag. | |
1102 | bufferlist etag_bl; | |
1103 | string& etag = ent.meta.etag; | |
1104 | ldpp_dout(dpp, 20) << __func__ << ": object's etag: " << ent.meta.etag | |
1105 | << dendl; | |
1106 | etag_bl.append(etag.c_str(), etag.size()); | |
1107 | source->get_attrs().emplace(std::move(RGW_ATTR_ETAG), std::move(etag_bl)); | |
1108 | ||
1109 | source->set_key(ent.key); | |
1110 | source->set_obj_size(ent.meta.size); | |
1111 | ldpp_dout(dpp, 20) << __func__ << ": object's size: " << ent.meta.size | |
1112 | << dendl; | |
1113 | ||
1114 | return ret; | |
1115 | } | |
1116 | ||
1117 | int DaosObject::DaosReadOp::read(int64_t off, int64_t end, bufferlist& bl, | |
1118 | optional_yield y, | |
1119 | const DoutPrefixProvider* dpp) { | |
1120 | ldpp_dout(dpp, 20) << __func__ << ": off=" << off << " end=" << end << dendl; | |
1121 | int ret = source->lookup(dpp); | |
1122 | if (ret != 0) { | |
1123 | return ret; | |
1124 | } | |
1125 | ||
1126 | // Calculate size, end is inclusive | |
1127 | uint64_t size = end - off + 1; | |
1128 | ||
1129 | // Read | |
1130 | ret = source->read(dpp, bl, off, size); | |
1131 | if (ret != 0) { | |
1132 | return ret; | |
1133 | } | |
1134 | ||
1135 | return ret; | |
1136 | } | |
1137 | ||
1138 | // RGWGetObj::execute() calls ReadOp::iterate() to read object from 'off' to | |
1139 | // 'end'. The returned data is processed in 'cb' which is a chain of | |
1140 | // post-processing filters such as decompression, de-encryption and sending back | |
1141 | // data to client (RGWGetObj_CB::handle_dta which in turn calls | |
1142 | // RGWGetObj::get_data_cb() to send data back.). | |
1143 | // | |
1144 | // POC implements a simple sync version of iterate() function in which it reads | |
1145 | // a block of data each time and call 'cb' for post-processing. | |
1146 | int DaosObject::DaosReadOp::iterate(const DoutPrefixProvider* dpp, int64_t off, | |
1147 | int64_t end, RGWGetDataCB* cb, | |
1148 | optional_yield y) { | |
1149 | ldpp_dout(dpp, 20) << __func__ << ": off=" << off << " end=" << end << dendl; | |
1150 | int ret = source->lookup(dpp); | |
1151 | if (ret != 0) { | |
1152 | return ret; | |
1153 | } | |
1154 | ||
1155 | // Calculate size, end is inclusive | |
1156 | uint64_t size = end - off + 1; | |
1157 | ||
1158 | // Reserve buffers and read | |
1159 | bufferlist bl; | |
1160 | ret = source->read(dpp, bl, off, size); | |
1161 | if (ret != 0) { | |
1162 | return ret; | |
1163 | } | |
1164 | ||
1165 | // Call cb to process returned data. | |
1166 | ldpp_dout(dpp, 20) << __func__ << ": call cb to process data, actual=" << size | |
1167 | << dendl; | |
1168 | cb->handle_data(bl, off, size); | |
1169 | return ret; | |
1170 | } | |
1171 | ||
1172 | int DaosObject::DaosReadOp::get_attr(const DoutPrefixProvider* dpp, | |
1173 | const char* name, bufferlist& dest, | |
1174 | optional_yield y) { | |
1175 | Attrs attrs; | |
1176 | int ret = source->get_dir_entry_attrs(dpp, nullptr, &attrs); | |
1177 | if (!ret) { | |
1178 | return -ENODATA; | |
1179 | } | |
1180 | ||
1181 | auto search = attrs.find(name); | |
1182 | if (search == attrs.end()) { | |
1183 | return -ENODATA; | |
1184 | } | |
1185 | ||
1186 | dest = search->second; | |
1187 | return 0; | |
1188 | } | |
1189 | ||
1190 | std::unique_ptr<Object::DeleteOp> DaosObject::get_delete_op() { | |
1191 | return std::make_unique<DaosObject::DaosDeleteOp>(this); | |
1192 | } | |
1193 | ||
1194 | DaosObject::DaosDeleteOp::DaosDeleteOp(DaosObject* _source) : source(_source) {} | |
1195 | ||
1196 | // Implementation of DELETE OBJ also requires DaosObject::get_obj_state() | |
1197 | // to retrieve and set object's state from object's metadata. | |
1198 | // | |
1199 | // TODO: | |
1200 | // 1. The POC only deletes the Daos objects. It doesn't handle the | |
1201 | // DeleteOp::params. Delete::delete_obj() in rgw_rados.cc shows how rados | |
1202 | // backend process the params. | |
1203 | // 2. Delete an object when its versioning is turned on. | |
1204 | // 3. Handle empty directories | |
1205 | // 4. Fail when file doesn't exist | |
1206 | int DaosObject::DaosDeleteOp::delete_obj(const DoutPrefixProvider* dpp, | |
1207 | optional_yield y) { | |
1208 | ldpp_dout(dpp, 20) << "DaosDeleteOp::delete_obj " | |
1209 | << source->get_key().get_oid() << " from " | |
1210 | << source->get_bucket()->get_name() << dendl; | |
1211 | if (source->get_instance() == "null") { | |
1212 | source->clear_instance(); | |
1213 | } | |
1214 | ||
1215 | // Open bucket | |
1216 | int ret = 0; | |
1217 | std::string key = source->get_key().get_oid(); | |
1218 | DaosBucket* daos_bucket = source->get_daos_bucket(); | |
1219 | ret = daos_bucket->open(dpp); | |
1220 | if (ret != 0) { | |
1221 | return ret; | |
1222 | } | |
1223 | ||
1224 | // Remove the daos object | |
1225 | ret = ds3_obj_destroy(key.c_str(), daos_bucket->ds3b); | |
1226 | ldpp_dout(dpp, 20) << "DEBUG: ds3_obj_destroy key=" << key << " ret=" << ret | |
1227 | << dendl; | |
1228 | ||
1229 | // result.delete_marker = parent_op.result.delete_marker; | |
1230 | // result.version_id = parent_op.result.version_id; | |
1231 | ||
1232 | return ret; | |
1233 | } | |
1234 | ||
1235 | int DaosObject::delete_object(const DoutPrefixProvider* dpp, optional_yield y, | |
1236 | bool prevent_versioning) { | |
1237 | ldpp_dout(dpp, 20) << "DEBUG: delete_object" << dendl; | |
1238 | DaosObject::DaosDeleteOp del_op(this); | |
1239 | del_op.params.bucket_owner = bucket->get_info().owner; | |
1240 | del_op.params.versioning_status = bucket->get_info().versioning_status(); | |
1241 | ||
1242 | return del_op.delete_obj(dpp, y); | |
1243 | } | |
1244 | ||
1245 | int DaosObject::delete_obj_aio(const DoutPrefixProvider* dpp, | |
1246 | RGWObjState* astate, Completions* aio, | |
1247 | bool keep_index_consistent, optional_yield y) { | |
1248 | /* XXX: Make it async */ | |
1249 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
1250 | } | |
1251 | ||
1252 | int DaosObject::copy_object( | |
1253 | User* user, req_info* info, const rgw_zone_id& source_zone, | |
1254 | rgw::sal::Object* dest_object, rgw::sal::Bucket* dest_bucket, | |
1255 | rgw::sal::Bucket* src_bucket, const rgw_placement_rule& dest_placement, | |
1256 | ceph::real_time* src_mtime, ceph::real_time* mtime, | |
1257 | const ceph::real_time* mod_ptr, const ceph::real_time* unmod_ptr, | |
1258 | bool high_precision_time, const char* if_match, const char* if_nomatch, | |
1259 | AttrsMod attrs_mod, bool copy_if_newer, Attrs& attrs, | |
1260 | RGWObjCategory category, uint64_t olh_epoch, | |
1261 | boost::optional<ceph::real_time> delete_at, std::string* version_id, | |
1262 | std::string* tag, std::string* etag, void (*progress_cb)(off_t, void*), | |
1263 | void* progress_data, const DoutPrefixProvider* dpp, optional_yield y) { | |
1264 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
1265 | } | |
1266 | ||
1267 | int DaosObject::swift_versioning_restore(bool& restored, | |
1268 | const DoutPrefixProvider* dpp) { | |
1269 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
1270 | } | |
1271 | ||
1272 | int DaosObject::swift_versioning_copy(const DoutPrefixProvider* dpp, | |
1273 | optional_yield y) { | |
1274 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
1275 | } | |
1276 | ||
1277 | int DaosObject::lookup(const DoutPrefixProvider* dpp) { | |
1278 | ldpp_dout(dpp, 20) << "DEBUG: lookup" << dendl; | |
1279 | if (is_open()) { | |
1280 | return 0; | |
1281 | } | |
1282 | ||
1283 | if (get_instance() == "null") { | |
1284 | clear_instance(); | |
1285 | } | |
1286 | ||
1287 | int ret = 0; | |
1288 | DaosBucket* daos_bucket = get_daos_bucket(); | |
1289 | ret = daos_bucket->open(dpp); | |
1290 | if (ret != 0) { | |
1291 | return ret; | |
1292 | } | |
1293 | ||
1294 | ret = ds3_obj_open(get_key().get_oid().c_str(), &ds3o, daos_bucket->ds3b); | |
1295 | ||
1296 | if (ret == -ENOENT) { | |
1297 | ldpp_dout(dpp, 20) << "DEBUG: daos object (" << get_bucket()->get_name() | |
1298 | << ", " << get_key().get_oid() | |
1299 | << ") does not exist: ret=" << ret << dendl; | |
1300 | } else if (ret != 0) { | |
1301 | ldpp_dout(dpp, 0) << "ERROR: failed to open daos object (" | |
1302 | << get_bucket()->get_name() << ", " << get_key().get_oid() | |
1303 | << "): ret=" << ret << dendl; | |
1304 | } | |
1305 | return ret; | |
1306 | } | |
1307 | ||
1308 | int DaosObject::create(const DoutPrefixProvider* dpp) { | |
1309 | ldpp_dout(dpp, 20) << "DEBUG: create" << dendl; | |
1310 | if (is_open()) { | |
1311 | return 0; | |
1312 | } | |
1313 | ||
1314 | if (get_instance() == "null") { | |
1315 | clear_instance(); | |
1316 | } | |
1317 | ||
1318 | int ret = 0; | |
1319 | DaosBucket* daos_bucket = get_daos_bucket(); | |
1320 | ret = daos_bucket->open(dpp); | |
1321 | if (ret != 0) { | |
1322 | return ret; | |
1323 | } | |
1324 | ||
1325 | ret = ds3_obj_create(get_key().get_oid().c_str(), &ds3o, daos_bucket->ds3b); | |
1326 | ||
1327 | if (ret != 0) { | |
1328 | ldpp_dout(dpp, 0) << "ERROR: failed to create daos object (" | |
1329 | << get_bucket()->get_name() << ", " << get_key().get_oid() | |
1330 | << "): ret=" << ret << dendl; | |
1331 | } | |
1332 | return ret; | |
1333 | } | |
1334 | ||
1335 | int DaosObject::close(const DoutPrefixProvider* dpp) { | |
1336 | ldpp_dout(dpp, 20) << "DEBUG: close" << dendl; | |
1337 | if (!is_open()) { | |
1338 | return 0; | |
1339 | } | |
1340 | ||
1341 | int ret = ds3_obj_close(ds3o); | |
1342 | ds3o = nullptr; | |
1343 | ldpp_dout(dpp, 20) << "DEBUG: ds3_obj_close ret=" << ret << dendl; | |
1344 | return ret; | |
1345 | } | |
1346 | ||
1347 | int DaosObject::write(const DoutPrefixProvider* dpp, bufferlist&& data, | |
1348 | uint64_t offset) { | |
1349 | ldpp_dout(dpp, 20) << "DEBUG: write" << dendl; | |
1350 | uint64_t size = data.length(); | |
1351 | int ret = ds3_obj_write(data.c_str(), offset, &size, get_daos_bucket()->ds3b, | |
1352 | ds3o, nullptr); | |
1353 | if (ret != 0) { | |
1354 | ldpp_dout(dpp, 0) << "ERROR: failed to write into daos object (" | |
1355 | << get_bucket()->get_name() << ", " << get_key().get_oid() | |
1356 | << "): ret=" << ret << dendl; | |
1357 | } | |
1358 | return ret; | |
1359 | } | |
1360 | ||
1361 | int DaosObject::read(const DoutPrefixProvider* dpp, bufferlist& data, | |
1362 | uint64_t offset, uint64_t& size) { | |
1363 | ldpp_dout(dpp, 20) << "DEBUG: read" << dendl; | |
1364 | int ret = ds3_obj_read(data.append_hole(size).c_str(), offset, &size, | |
1365 | get_daos_bucket()->ds3b, ds3o, nullptr); | |
1366 | if (ret != 0) { | |
1367 | ldpp_dout(dpp, 0) << "ERROR: failed to read from daos object (" | |
1368 | << get_bucket()->get_name() << ", " << get_key().get_oid() | |
1369 | << "): ret=" << ret << dendl; | |
1370 | } | |
1371 | return ret; | |
1372 | } | |
1373 | ||
1374 | // Get the object's dirent and attrs | |
1375 | int DaosObject::get_dir_entry_attrs(const DoutPrefixProvider* dpp, | |
1376 | rgw_bucket_dir_entry* ent, | |
1377 | Attrs* getattrs) { | |
1378 | ldpp_dout(dpp, 20) << "DEBUG: get_dir_entry_attrs" << dendl; | |
1379 | int ret = 0; | |
1380 | vector<uint8_t> value(DS3_MAX_ENCODED_LEN); | |
1381 | uint32_t size = value.size(); | |
1382 | ||
1383 | if (get_key().ns == RGW_OBJ_NS_MULTIPART) { | |
1384 | struct ds3_multipart_upload_info ui = {.encoded = value.data(), | |
1385 | .encoded_length = size}; | |
1386 | ret = ds3_upload_get_info(&ui, bucket->get_name().c_str(), | |
1387 | get_key().get_oid().c_str(), store->ds3); | |
1388 | } else { | |
1389 | ret = lookup(dpp); | |
1390 | if (ret != 0) { | |
1391 | return ret; | |
1392 | } | |
1393 | ||
1394 | auto object_info = std::make_unique<struct ds3_object_info>(); | |
1395 | object_info->encoded = value.data(); | |
1396 | object_info->encoded_length = size; | |
1397 | ret = ds3_obj_get_info(object_info.get(), get_daos_bucket()->ds3b, ds3o); | |
1398 | size = object_info->encoded_length; | |
1399 | } | |
1400 | ||
1401 | if (ret != 0) { | |
1402 | ldpp_dout(dpp, 0) << "ERROR: failed to get info of daos object (" | |
1403 | << get_bucket()->get_name() << ", " << get_key().get_oid() | |
1404 | << "): ret=" << ret << dendl; | |
1405 | return ret; | |
1406 | } | |
1407 | ||
1408 | rgw_bucket_dir_entry dummy_ent; | |
1409 | if (!ent) { | |
1410 | // if ent is not passed, use a dummy ent | |
1411 | ent = &dummy_ent; | |
1412 | } | |
1413 | ||
1414 | bufferlist bl; | |
1415 | bl.append(reinterpret_cast<char*>(value.data()), size); | |
1416 | auto iter = bl.cbegin(); | |
1417 | ent->decode(iter); | |
1418 | if (getattrs) { | |
1419 | decode(*getattrs, iter); | |
1420 | } | |
1421 | ||
1422 | return ret; | |
1423 | } | |
1424 | // Set the object's dirent and attrs | |
1425 | int DaosObject::set_dir_entry_attrs(const DoutPrefixProvider* dpp, | |
1426 | rgw_bucket_dir_entry* ent, | |
1427 | Attrs* setattrs) { | |
1428 | ldpp_dout(dpp, 20) << "DEBUG: set_dir_entry_attrs" << dendl; | |
1429 | int ret = lookup(dpp); | |
1430 | if (ret != 0) { | |
1431 | return ret; | |
1432 | } | |
1433 | ||
1434 | // Set defaults | |
1435 | if (!ent) { | |
1436 | // if ent is not passed, return an error | |
1437 | return -EINVAL; | |
1438 | } | |
1439 | ||
1440 | if (!setattrs) { | |
1441 | // if setattrs is not passed, use object attrs | |
1442 | setattrs = &attrs; | |
1443 | } | |
1444 | ||
1445 | bufferlist wbl; | |
1446 | ent->encode(wbl); | |
1447 | encode(*setattrs, wbl); | |
1448 | ||
1449 | // Write rgw_bucket_dir_entry into object xattr | |
1450 | auto object_info = std::make_unique<struct ds3_object_info>(); | |
1451 | object_info->encoded = wbl.c_str(); | |
1452 | object_info->encoded_length = wbl.length(); | |
1453 | ret = ds3_obj_set_info(object_info.get(), get_daos_bucket()->ds3b, ds3o); | |
1454 | if (ret != 0) { | |
1455 | ldpp_dout(dpp, 0) << "ERROR: failed to set info of daos object (" | |
1456 | << get_bucket()->get_name() << ", " << get_key().get_oid() | |
1457 | << "): ret=" << ret << dendl; | |
1458 | } | |
1459 | return ret; | |
1460 | } | |
1461 | ||
1462 | int DaosObject::mark_as_latest(const DoutPrefixProvider* dpp, | |
1463 | ceph::real_time set_mtime) { | |
1464 | // TODO handle deletion | |
1465 | // TODO understand race conditions | |
1466 | ldpp_dout(dpp, 20) << "DEBUG: mark_as_latest" << dendl; | |
1467 | ||
1468 | // Get latest version so far | |
1469 | std::unique_ptr<DaosObject> latest_object = std::make_unique<DaosObject>( | |
1470 | store, rgw_obj_key(get_name(), DS3_LATEST_INSTANCE), get_bucket()); | |
1471 | ||
1472 | ldpp_dout(dpp, 20) << __func__ << ": key=" << get_key().get_oid() | |
1473 | << " latest_object_key= " | |
1474 | << latest_object->get_key().get_oid() << dendl; | |
1475 | ||
1476 | int ret = latest_object->lookup(dpp); | |
1477 | if (ret == 0) { | |
1478 | // Get metadata only if file exists | |
1479 | rgw_bucket_dir_entry latest_ent; | |
1480 | Attrs latest_attrs; | |
1481 | ret = latest_object->get_dir_entry_attrs(dpp, &latest_ent, &latest_attrs); | |
1482 | if (ret != 0) { | |
1483 | return ret; | |
1484 | } | |
1485 | ||
1486 | // Update flags | |
1487 | latest_ent.flags = rgw_bucket_dir_entry::FLAG_VER; | |
1488 | latest_ent.meta.mtime = set_mtime; | |
1489 | ret = latest_object->set_dir_entry_attrs(dpp, &latest_ent, &latest_attrs); | |
1490 | if (ret != 0) { | |
1491 | return ret; | |
1492 | } | |
1493 | } | |
1494 | ||
1495 | // Get or create the link [latest], make it link to the current latest | |
1496 | // version. | |
1497 | ret = | |
1498 | ds3_obj_mark_latest(get_key().get_oid().c_str(), get_daos_bucket()->ds3b); | |
1499 | ldpp_dout(dpp, 20) << "DEBUG: ds3_obj_mark_latest ret=" << ret << dendl; | |
1500 | return ret; | |
1501 | } | |
1502 | ||
1503 | DaosAtomicWriter::DaosAtomicWriter( | |
1504 | const DoutPrefixProvider* dpp, optional_yield y, | |
1505 | rgw::sal::Object* obj, DaosStore* _store, | |
1506 | const rgw_user& _owner, const rgw_placement_rule* _ptail_placement_rule, | |
1507 | uint64_t _olh_epoch, const std::string& _unique_tag) | |
1508 | : StoreWriter(dpp, y), | |
1509 | store(_store), | |
1510 | owner(_owner), | |
1511 | ptail_placement_rule(_ptail_placement_rule), | |
1512 | olh_epoch(_olh_epoch), | |
1513 | unique_tag(_unique_tag), | |
1514 | obj(_store, obj->get_key(), obj->get_bucket()) {} | |
1515 | ||
1516 | int DaosAtomicWriter::prepare(optional_yield y) { | |
1517 | ldpp_dout(dpp, 20) << "DEBUG: prepare" << dendl; | |
1518 | int ret = obj.create(dpp); | |
1519 | return ret; | |
1520 | } | |
1521 | ||
1522 | // TODO: Handle concurrent writes, a unique object id is a possible solution, or | |
1523 | // use DAOS transactions | |
1524 | // XXX: Do we need to accumulate writes as motr does? | |
1525 | int DaosAtomicWriter::process(bufferlist&& data, uint64_t offset) { | |
1526 | ldpp_dout(dpp, 20) << "DEBUG: process" << dendl; | |
1527 | if (data.length() == 0) { | |
1528 | return 0; | |
1529 | } | |
1530 | ||
1531 | int ret = 0; | |
1532 | if (!obj.is_open()) { | |
1533 | ret = obj.lookup(dpp); | |
1534 | if (ret != 0) { | |
1535 | return ret; | |
1536 | } | |
1537 | } | |
1538 | ||
1539 | // XXX: Combine multiple streams into one as motr does | |
1540 | uint64_t data_size = data.length(); | |
1541 | ret = obj.write(dpp, std::move(data), offset); | |
1542 | if (ret == 0) { | |
1543 | total_data_size += data_size; | |
1544 | } | |
1545 | return ret; | |
1546 | } | |
1547 | ||
1548 | int DaosAtomicWriter::complete( | |
1549 | size_t accounted_size, const std::string& etag, ceph::real_time* mtime, | |
1550 | ceph::real_time set_mtime, std::map<std::string, bufferlist>& attrs, | |
1551 | ceph::real_time delete_at, const char* if_match, const char* if_nomatch, | |
1552 | const std::string* user_data, rgw_zone_set* zones_trace, bool* canceled, | |
1553 | optional_yield y) { | |
1554 | ldpp_dout(dpp, 20) << "DEBUG: complete" << dendl; | |
1555 | bufferlist bl; | |
1556 | rgw_bucket_dir_entry ent; | |
1557 | int ret; | |
1558 | ||
1559 | // Set rgw_bucet_dir_entry. Some of the members of this structure may not | |
1560 | // apply to daos. | |
1561 | // | |
1562 | // Checkout AtomicObjectProcessor::complete() in rgw_putobj_processor.cc | |
1563 | // and RGWRados::Object::Write::write_meta() in rgw_rados.cc for what and | |
1564 | // how to set the dir entry. Only set the basic ones for POC, no ACLs and | |
1565 | // other attrs. | |
1566 | obj.get_key().get_index_key(&ent.key); | |
1567 | ent.meta.size = total_data_size; | |
1568 | ent.meta.accounted_size = accounted_size; | |
1569 | ent.meta.mtime = | |
1570 | real_clock::is_zero(set_mtime) ? ceph::real_clock::now() : set_mtime; | |
1571 | ent.meta.etag = etag; | |
1572 | ent.meta.owner = owner.to_str(); | |
1573 | ent.meta.owner_display_name = | |
1574 | obj.get_bucket()->get_owner()->get_display_name(); | |
1575 | bool is_versioned = obj.get_bucket()->versioned(); | |
1576 | if (is_versioned) | |
1577 | ent.flags = | |
1578 | rgw_bucket_dir_entry::FLAG_VER | rgw_bucket_dir_entry::FLAG_CURRENT; | |
1579 | ldpp_dout(dpp, 20) << __func__ << ": key=" << obj.get_key().get_oid() | |
1580 | << " etag: " << etag << dendl; | |
1581 | if (user_data) ent.meta.user_data = *user_data; | |
1582 | ||
1583 | RGWBucketInfo& info = obj.get_bucket()->get_info(); | |
1584 | if (info.obj_lock_enabled() && info.obj_lock.has_rule()) { | |
1585 | auto iter = attrs.find(RGW_ATTR_OBJECT_RETENTION); | |
1586 | if (iter == attrs.end()) { | |
1587 | real_time lock_until_date = | |
1588 | info.obj_lock.get_lock_until_date(ent.meta.mtime); | |
1589 | string mode = info.obj_lock.get_mode(); | |
1590 | RGWObjectRetention obj_retention(mode, lock_until_date); | |
1591 | bufferlist retention_bl; | |
1592 | obj_retention.encode(retention_bl); | |
1593 | attrs[RGW_ATTR_OBJECT_RETENTION] = retention_bl; | |
1594 | } | |
1595 | } | |
1596 | ||
1597 | ret = obj.set_dir_entry_attrs(dpp, &ent, &attrs); | |
1598 | ||
1599 | if (is_versioned) { | |
1600 | ret = obj.mark_as_latest(dpp, set_mtime); | |
1601 | if (ret != 0) { | |
1602 | return ret; | |
1603 | } | |
1604 | } | |
1605 | ||
1606 | return ret; | |
1607 | } | |
1608 | ||
1609 | int DaosMultipartUpload::abort(const DoutPrefixProvider* dpp, | |
1610 | CephContext* cct) { | |
1611 | // Remove upload from bucket multipart index | |
1612 | ldpp_dout(dpp, 20) << "DEBUG: abort" << dendl; | |
1613 | return ds3_upload_remove(bucket->get_name().c_str(), get_upload_id().c_str(), | |
1614 | store->ds3); | |
1615 | } | |
1616 | ||
1617 | std::unique_ptr<rgw::sal::Object> DaosMultipartUpload::get_meta_obj() { | |
1618 | return bucket->get_object( | |
1619 | rgw_obj_key(get_upload_id(), string(), RGW_OBJ_NS_MULTIPART)); | |
1620 | } | |
1621 | ||
1622 | int DaosMultipartUpload::init(const DoutPrefixProvider* dpp, optional_yield y, | |
1623 | ACLOwner& _owner, | |
1624 | rgw_placement_rule& dest_placement, | |
1625 | rgw::sal::Attrs& attrs) { | |
1626 | ldpp_dout(dpp, 20) << "DEBUG: init" << dendl; | |
1627 | int ret; | |
1628 | std::string oid = mp_obj.get_key(); | |
1629 | ||
1630 | // Create an initial entry in the bucket. The entry will be | |
1631 | // updated when multipart upload is completed, for example, | |
1632 | // size, etag etc. | |
1633 | bufferlist bl; | |
1634 | rgw_bucket_dir_entry ent; | |
1635 | ent.key.name = oid; | |
1636 | ent.meta.owner = owner.get_id().to_str(); | |
1637 | ent.meta.category = RGWObjCategory::MultiMeta; | |
1638 | ent.meta.mtime = ceph::real_clock::now(); | |
1639 | ||
1640 | multipart_upload_info upload_info; | |
1641 | upload_info.dest_placement = dest_placement; | |
1642 | ||
1643 | ent.encode(bl); | |
1644 | encode(attrs, bl); | |
1645 | encode(upload_info, bl); | |
1646 | ||
1647 | struct ds3_multipart_upload_info ui; | |
1648 | std::strcpy(ui.upload_id, MULTIPART_UPLOAD_ID_PREFIX); | |
1649 | std::strncpy(ui.key, oid.c_str(), sizeof(ui.key)); | |
1650 | ui.encoded = bl.c_str(); | |
1651 | ui.encoded_length = bl.length(); | |
1652 | int prefix_length = strlen(ui.upload_id); | |
1653 | ||
1654 | do { | |
1655 | gen_rand_alphanumeric(store->ctx(), ui.upload_id + prefix_length, | |
1656 | sizeof(ui.upload_id) - 1 - prefix_length); | |
1657 | mp_obj.init(oid, ui.upload_id); | |
1658 | ret = ds3_upload_init(&ui, bucket->get_name().c_str(), store->ds3); | |
1659 | } while (ret == -EEXIST); | |
1660 | ||
1661 | if (ret != 0) { | |
1662 | ldpp_dout(dpp, 0) << "ERROR: failed to create multipart upload dir (" | |
1663 | << bucket->get_name() << "/" << get_upload_id() | |
1664 | << "): ret=" << ret << dendl; | |
1665 | } | |
1666 | return ret; | |
1667 | } | |
1668 | ||
1669 | int DaosMultipartUpload::list_parts(const DoutPrefixProvider* dpp, | |
1670 | CephContext* cct, int num_parts, int marker, | |
1671 | int* next_marker, bool* truncated, | |
1672 | bool assume_unsorted) { | |
1673 | ldpp_dout(dpp, 20) << "DEBUG: list_parts" << dendl; | |
1674 | // Init needed structures | |
1675 | vector<struct ds3_multipart_part_info> multipart_part_infos(num_parts); | |
1676 | uint32_t npart = multipart_part_infos.size(); | |
1677 | vector<vector<uint8_t>> values(npart, vector<uint8_t>(DS3_MAX_ENCODED_LEN)); | |
1678 | for (uint32_t i = 0; i < npart; i++) { | |
1679 | multipart_part_infos[i].encoded = values[i].data(); | |
1680 | multipart_part_infos[i].encoded_length = values[i].size(); | |
1681 | } | |
1682 | ||
1683 | uint32_t daos_marker = marker; | |
1684 | int ret = ds3_upload_list_parts( | |
1685 | bucket->get_name().c_str(), get_upload_id().c_str(), &npart, | |
1686 | multipart_part_infos.data(), &daos_marker, truncated, store->ds3); | |
1687 | ||
1688 | if (ret != 0) { | |
1689 | if (ret == -ENOENT) { | |
1690 | ret = -ERR_NO_SUCH_UPLOAD; | |
1691 | } | |
1692 | return ret; | |
1693 | } | |
1694 | ||
1695 | multipart_part_infos.resize(npart); | |
1696 | values.resize(npart); | |
1697 | parts.clear(); | |
1698 | ||
1699 | for (auto const& pi : multipart_part_infos) { | |
1700 | bufferlist bl; | |
1701 | bl.append(reinterpret_cast<char*>(pi.encoded), pi.encoded_length); | |
1702 | ||
1703 | std::unique_ptr<DaosMultipartPart> part = | |
1704 | std::make_unique<DaosMultipartPart>(); | |
1705 | auto iter = bl.cbegin(); | |
1706 | decode(part->info, iter); | |
1707 | parts[pi.part_num] = std::move(part); | |
1708 | } | |
1709 | ||
1710 | if (next_marker) { | |
1711 | *next_marker = daos_marker; | |
1712 | } | |
1713 | return ret; | |
1714 | } | |
1715 | ||
1716 | // Heavily copied from rgw_sal_rados.cc | |
1717 | int DaosMultipartUpload::complete( | |
1718 | const DoutPrefixProvider* dpp, optional_yield y, CephContext* cct, | |
1719 | map<int, string>& part_etags, list<rgw_obj_index_key>& remove_objs, | |
1720 | uint64_t& accounted_size, bool& compressed, RGWCompressionInfo& cs_info, | |
1721 | off_t& off, std::string& tag, ACLOwner& owner, uint64_t olh_epoch, | |
1722 | rgw::sal::Object* target_obj) { | |
1723 | ldpp_dout(dpp, 20) << "DEBUG: complete" << dendl; | |
1724 | char final_etag[CEPH_CRYPTO_MD5_DIGESTSIZE]; | |
1725 | char final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 16]; | |
1726 | std::string etag; | |
1727 | bufferlist etag_bl; | |
1728 | MD5 hash; | |
1729 | // Allow use of MD5 digest in FIPS mode for non-cryptographic purposes | |
1730 | hash.SetFlags(EVP_MD_CTX_FLAG_NON_FIPS_ALLOW); | |
1731 | bool truncated; | |
1732 | int ret; | |
1733 | ||
1734 | ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): enter" << dendl; | |
1735 | int total_parts = 0; | |
1736 | int handled_parts = 0; | |
1737 | int max_parts = 1000; | |
1738 | int marker = 0; | |
1739 | uint64_t min_part_size = cct->_conf->rgw_multipart_min_part_size; | |
1740 | auto etags_iter = part_etags.begin(); | |
1741 | rgw::sal::Attrs attrs = target_obj->get_attrs(); | |
1742 | ||
1743 | do { | |
1744 | ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): list_parts()" | |
1745 | << dendl; | |
1746 | ret = list_parts(dpp, cct, max_parts, marker, &marker, &truncated); | |
1747 | if (ret == -ENOENT) { | |
1748 | ret = -ERR_NO_SUCH_UPLOAD; | |
1749 | } | |
1750 | if (ret != 0) return ret; | |
1751 | ||
1752 | total_parts += parts.size(); | |
1753 | if (!truncated && total_parts != (int)part_etags.size()) { | |
1754 | ldpp_dout(dpp, 0) << "NOTICE: total parts mismatch: have: " << total_parts | |
1755 | << " expected: " << part_etags.size() << dendl; | |
1756 | ret = -ERR_INVALID_PART; | |
1757 | return ret; | |
1758 | } | |
1759 | ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): parts.size()=" | |
1760 | << parts.size() << dendl; | |
1761 | ||
1762 | for (auto obj_iter = parts.begin(); | |
1763 | etags_iter != part_etags.end() && obj_iter != parts.end(); | |
1764 | ++etags_iter, ++obj_iter, ++handled_parts) { | |
1765 | DaosMultipartPart* part = | |
1766 | dynamic_cast<rgw::sal::DaosMultipartPart*>(obj_iter->second.get()); | |
1767 | uint64_t part_size = part->get_size(); | |
1768 | ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): part_size=" | |
1769 | << part_size << dendl; | |
1770 | if (handled_parts < (int)part_etags.size() - 1 && | |
1771 | part_size < min_part_size) { | |
1772 | ret = -ERR_TOO_SMALL; | |
1773 | return ret; | |
1774 | } | |
1775 | ||
1776 | char petag[CEPH_CRYPTO_MD5_DIGESTSIZE]; | |
1777 | if (etags_iter->first != (int)obj_iter->first) { | |
1778 | ldpp_dout(dpp, 0) << "NOTICE: parts num mismatch: next requested: " | |
1779 | << etags_iter->first | |
1780 | << " next uploaded: " << obj_iter->first << dendl; | |
1781 | ret = -ERR_INVALID_PART; | |
1782 | return ret; | |
1783 | } | |
1784 | string part_etag = rgw_string_unquote(etags_iter->second); | |
1785 | if (part_etag.compare(part->get_etag()) != 0) { | |
1786 | ldpp_dout(dpp, 0) << "NOTICE: etag mismatch: part: " | |
1787 | << etags_iter->first | |
1788 | << " etag: " << etags_iter->second << dendl; | |
1789 | ret = -ERR_INVALID_PART; | |
1790 | return ret; | |
1791 | } | |
1792 | ||
1793 | hex_to_buf(part->get_etag().c_str(), petag, CEPH_CRYPTO_MD5_DIGESTSIZE); | |
1794 | hash.Update((const unsigned char*)petag, sizeof(petag)); | |
1795 | ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): calc etag " | |
1796 | << dendl; | |
1797 | ||
1798 | RGWUploadPartInfo& obj_part = part->info; | |
1799 | string oid = mp_obj.get_part(obj_part.num); | |
1800 | rgw_obj src_obj; | |
1801 | src_obj.init_ns(bucket->get_key(), oid, RGW_OBJ_NS_MULTIPART); | |
1802 | ||
1803 | bool part_compressed = (obj_part.cs_info.compression_type != "none"); | |
1804 | if ((handled_parts > 0) && | |
1805 | ((part_compressed != compressed) || | |
1806 | (cs_info.compression_type != obj_part.cs_info.compression_type))) { | |
1807 | ldpp_dout(dpp, 0) | |
1808 | << "ERROR: compression type was changed during multipart upload (" | |
1809 | << cs_info.compression_type << ">>" | |
1810 | << obj_part.cs_info.compression_type << ")" << dendl; | |
1811 | ret = -ERR_INVALID_PART; | |
1812 | return ret; | |
1813 | } | |
1814 | ||
1815 | ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): part compression" | |
1816 | << dendl; | |
1817 | if (part_compressed) { | |
1818 | int64_t new_ofs; // offset in compression data for new part | |
1819 | if (cs_info.blocks.size() > 0) | |
1820 | new_ofs = cs_info.blocks.back().new_ofs + cs_info.blocks.back().len; | |
1821 | else | |
1822 | new_ofs = 0; | |
1823 | for (const auto& block : obj_part.cs_info.blocks) { | |
1824 | compression_block cb; | |
1825 | cb.old_ofs = block.old_ofs + cs_info.orig_size; | |
1826 | cb.new_ofs = new_ofs; | |
1827 | cb.len = block.len; | |
1828 | cs_info.blocks.push_back(cb); | |
1829 | new_ofs = cb.new_ofs + cb.len; | |
1830 | } | |
1831 | if (!compressed) | |
1832 | cs_info.compression_type = obj_part.cs_info.compression_type; | |
1833 | cs_info.orig_size += obj_part.cs_info.orig_size; | |
1834 | compressed = true; | |
1835 | } | |
1836 | ||
1837 | // We may not need to do the following as remove_objs are those | |
1838 | // don't show when listing a bucket. As we store in-progress uploaded | |
1839 | // object's metadata in a separate index, they are not shown when | |
1840 | // listing a bucket. | |
1841 | rgw_obj_index_key remove_key; | |
1842 | src_obj.key.get_index_key(&remove_key); | |
1843 | ||
1844 | remove_objs.push_back(remove_key); | |
1845 | ||
1846 | off += obj_part.size; | |
1847 | accounted_size += obj_part.accounted_size; | |
1848 | ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): off=" << off | |
1849 | << ", accounted_size = " << accounted_size << dendl; | |
1850 | } | |
1851 | } while (truncated); | |
1852 | hash.Final((unsigned char*)final_etag); | |
1853 | ||
1854 | buf_to_hex((unsigned char*)final_etag, sizeof(final_etag), final_etag_str); | |
1855 | snprintf(&final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2], | |
1856 | sizeof(final_etag_str) - CEPH_CRYPTO_MD5_DIGESTSIZE * 2, "-%lld", | |
1857 | (long long)part_etags.size()); | |
1858 | etag = final_etag_str; | |
1859 | ldpp_dout(dpp, 10) << "calculated etag: " << etag << dendl; | |
1860 | ||
1861 | etag_bl.append(etag); | |
1862 | ||
1863 | attrs[RGW_ATTR_ETAG] = etag_bl; | |
1864 | ||
1865 | if (compressed) { | |
1866 | // write compression attribute to full object | |
1867 | bufferlist tmp; | |
1868 | encode(cs_info, tmp); | |
1869 | attrs[RGW_ATTR_COMPRESSION] = tmp; | |
1870 | } | |
1871 | ||
1872 | // Different from rgw_sal_rados.cc starts here | |
1873 | // Read the object's multipart info | |
1874 | bufferlist bl; | |
1875 | uint64_t size = DS3_MAX_ENCODED_LEN; | |
1876 | struct ds3_multipart_upload_info ui = { | |
1877 | .encoded = bl.append_hole(size).c_str(), .encoded_length = size}; | |
1878 | ret = ds3_upload_get_info(&ui, bucket->get_name().c_str(), | |
1879 | get_upload_id().c_str(), store->ds3); | |
1880 | ldpp_dout(dpp, 20) << "DEBUG: ds3_upload_get_info entry=" | |
1881 | << bucket->get_name() << "/" << get_upload_id() << dendl; | |
1882 | if (ret != 0) { | |
1883 | if (ret == -ENOENT) { | |
1884 | ret = -ERR_NO_SUCH_UPLOAD; | |
1885 | } | |
1886 | return ret; | |
1887 | } | |
1888 | ||
1889 | rgw_bucket_dir_entry ent; | |
1890 | auto iter = bl.cbegin(); | |
1891 | ent.decode(iter); | |
1892 | ||
1893 | // Update entry data and name | |
1894 | target_obj->get_key().get_index_key(&ent.key); | |
1895 | ent.meta.size = off; | |
1896 | ent.meta.accounted_size = accounted_size; | |
1897 | ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): obj size=" | |
1898 | << ent.meta.size | |
1899 | << " obj accounted size=" << ent.meta.accounted_size | |
1900 | << dendl; | |
1901 | ent.meta.category = RGWObjCategory::Main; | |
1902 | ent.meta.mtime = ceph::real_clock::now(); | |
1903 | bool is_versioned = target_obj->get_bucket()->versioned(); | |
1904 | if (is_versioned) | |
1905 | ent.flags = | |
1906 | rgw_bucket_dir_entry::FLAG_VER | rgw_bucket_dir_entry::FLAG_CURRENT; | |
1907 | ent.meta.etag = etag; | |
1908 | ||
1909 | // Open object | |
1910 | DaosObject* obj = static_cast<DaosObject*>(target_obj); | |
1911 | ret = obj->create(dpp); | |
1912 | if (ret != 0) { | |
1913 | return ret; | |
1914 | } | |
1915 | ||
1916 | // Copy data from parts to object | |
1917 | uint64_t write_off = 0; | |
1918 | for (auto const& [part_num, part] : get_parts()) { | |
1919 | ds3_part_t* ds3p; | |
1920 | ret = ds3_part_open(get_bucket_name().c_str(), get_upload_id().c_str(), | |
1921 | part_num, false, &ds3p, store->ds3); | |
1922 | if (ret != 0) { | |
1923 | return ret; | |
1924 | } | |
1925 | ||
1926 | // Reserve buffers and read | |
1927 | uint64_t size = part->get_size(); | |
1928 | bufferlist bl; | |
1929 | ret = ds3_part_read(bl.append_hole(size).c_str(), 0, &size, ds3p, | |
1930 | store->ds3, nullptr); | |
1931 | if (ret != 0) { | |
1932 | ds3_part_close(ds3p); | |
1933 | return ret; | |
1934 | } | |
1935 | ||
1936 | ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): part " << part_num | |
1937 | << " size is " << size << dendl; | |
1938 | ||
1939 | // write to obj | |
1940 | obj->write(dpp, std::move(bl), write_off); | |
1941 | ds3_part_close(ds3p); | |
1942 | write_off += part->get_size(); | |
1943 | } | |
1944 | ||
1945 | // Set attributes | |
1946 | ret = obj->set_dir_entry_attrs(dpp, &ent, &attrs); | |
1947 | ||
1948 | if (is_versioned) { | |
1949 | ret = obj->mark_as_latest(dpp, ent.meta.mtime); | |
1950 | if (ret != 0) { | |
1951 | return ret; | |
1952 | } | |
1953 | } | |
1954 | ||
1955 | // Remove upload from bucket multipart index | |
1956 | ret = ds3_upload_remove(get_bucket_name().c_str(), get_upload_id().c_str(), | |
1957 | store->ds3); | |
1958 | return ret; | |
1959 | } | |
1960 | ||
1961 | int DaosMultipartUpload::get_info(const DoutPrefixProvider* dpp, | |
1962 | optional_yield y, rgw_placement_rule** rule, | |
1963 | rgw::sal::Attrs* attrs) { | |
1964 | ldpp_dout(dpp, 20) << "DaosMultipartUpload::get_info(): enter" << dendl; | |
1965 | if (!rule && !attrs) { | |
1966 | return 0; | |
1967 | } | |
1968 | ||
1969 | if (rule) { | |
1970 | if (!placement.empty()) { | |
1971 | *rule = &placement; | |
1972 | if (!attrs) { | |
1973 | // Don't need attrs, done | |
1974 | return 0; | |
1975 | } | |
1976 | } else { | |
1977 | *rule = nullptr; | |
1978 | } | |
1979 | } | |
1980 | ||
1981 | // Read the multipart upload dirent from index | |
1982 | bufferlist bl; | |
1983 | uint64_t size = DS3_MAX_ENCODED_LEN; | |
1984 | struct ds3_multipart_upload_info ui = { | |
1985 | .encoded = bl.append_hole(size).c_str(), .encoded_length = size}; | |
1986 | int ret = ds3_upload_get_info(&ui, bucket->get_name().c_str(), | |
1987 | get_upload_id().c_str(), store->ds3); | |
1988 | ||
1989 | if (ret != 0) { | |
1990 | if (ret == -ENOENT) { | |
1991 | ret = -ERR_NO_SUCH_UPLOAD; | |
1992 | } | |
1993 | return ret; | |
1994 | } | |
1995 | ||
1996 | multipart_upload_info upload_info; | |
1997 | rgw_bucket_dir_entry ent; | |
1998 | Attrs decoded_attrs; | |
1999 | auto iter = bl.cbegin(); | |
2000 | ent.decode(iter); | |
2001 | decode(decoded_attrs, iter); | |
2002 | ldpp_dout(dpp, 20) << "DEBUG: decoded_attrs=" << attrs << dendl; | |
2003 | ||
2004 | if (attrs) { | |
2005 | *attrs = decoded_attrs; | |
2006 | if (!rule || *rule != nullptr) { | |
2007 | // placement was cached; don't actually read | |
2008 | return 0; | |
2009 | } | |
2010 | } | |
2011 | ||
2012 | // Now decode the placement rule | |
2013 | decode(upload_info, iter); | |
2014 | placement = upload_info.dest_placement; | |
2015 | *rule = &placement; | |
2016 | ||
2017 | return 0; | |
2018 | } | |
2019 | ||
2020 | std::unique_ptr<Writer> DaosMultipartUpload::get_writer( | |
2021 | const DoutPrefixProvider* dpp, optional_yield y, | |
2022 | rgw::sal::Object* obj, const rgw_user& owner, | |
2023 | const rgw_placement_rule* ptail_placement_rule, uint64_t part_num, | |
2024 | const std::string& part_num_str) { | |
2025 | ldpp_dout(dpp, 20) << "DaosMultipartUpload::get_writer(): enter part=" | |
2026 | << part_num << " head_obj=" << _head_obj << dendl; | |
2027 | return std::make_unique<DaosMultipartWriter>( | |
2028 | dpp, y, this, obj, store, owner, ptail_placement_rule, | |
2029 | part_num, part_num_str); | |
2030 | } | |
2031 | ||
2032 | DaosMultipartWriter::~DaosMultipartWriter() { | |
2033 | if (is_open()) ds3_part_close(ds3p); | |
2034 | } | |
2035 | ||
2036 | int DaosMultipartWriter::prepare(optional_yield y) { | |
2037 | ldpp_dout(dpp, 20) << "DaosMultipartWriter::prepare(): enter part=" | |
2038 | << part_num_str << dendl; | |
2039 | int ret = ds3_part_open(get_bucket_name().c_str(), upload_id.c_str(), | |
2040 | part_num, true, &ds3p, store->ds3); | |
2041 | if (ret == -ENOENT) { | |
2042 | ret = -ERR_NO_SUCH_UPLOAD; | |
2043 | } | |
2044 | return ret; | |
2045 | } | |
2046 | ||
2047 | const std::string& DaosMultipartWriter::get_bucket_name() { | |
2048 | return static_cast<DaosMultipartUpload*>(upload)->get_bucket_name(); | |
2049 | } | |
2050 | ||
2051 | int DaosMultipartWriter::process(bufferlist&& data, uint64_t offset) { | |
2052 | ldpp_dout(dpp, 20) << "DaosMultipartWriter::process(): enter part=" | |
2053 | << part_num_str << " offset=" << offset << dendl; | |
2054 | if (data.length() == 0) { | |
2055 | return 0; | |
2056 | } | |
2057 | ||
2058 | uint64_t size = data.length(); | |
2059 | int ret = | |
2060 | ds3_part_write(data.c_str(), offset, &size, ds3p, store->ds3, nullptr); | |
2061 | if (ret == 0) { | |
2062 | // XXX: Combine multiple streams into one as motr does | |
2063 | actual_part_size += size; | |
2064 | } else { | |
2065 | ldpp_dout(dpp, 0) << "ERROR: failed to write into part (" | |
2066 | << get_bucket_name() << ", " << upload_id << ", " | |
2067 | << part_num << "): ret=" << ret << dendl; | |
2068 | } | |
2069 | return ret; | |
2070 | } | |
2071 | ||
2072 | int DaosMultipartWriter::complete( | |
2073 | size_t accounted_size, const std::string& etag, ceph::real_time* mtime, | |
2074 | ceph::real_time set_mtime, std::map<std::string, bufferlist>& attrs, | |
2075 | ceph::real_time delete_at, const char* if_match, const char* if_nomatch, | |
2076 | const std::string* user_data, rgw_zone_set* zones_trace, bool* canceled, | |
2077 | optional_yield y) { | |
2078 | ldpp_dout(dpp, 20) << "DaosMultipartWriter::complete(): enter part=" | |
2079 | << part_num_str << dendl; | |
2080 | ||
2081 | // Add an entry into part index | |
2082 | bufferlist bl; | |
2083 | RGWUploadPartInfo info; | |
2084 | info.num = part_num; | |
2085 | info.etag = etag; | |
2086 | info.size = actual_part_size; | |
2087 | info.accounted_size = accounted_size; | |
2088 | info.modified = real_clock::now(); | |
2089 | ||
2090 | bool compressed; | |
2091 | int ret = rgw_compression_info_from_attrset(attrs, compressed, info.cs_info); | |
2092 | ldpp_dout(dpp, 20) << "DaosMultipartWriter::complete(): compression ret=" | |
2093 | << ret << dendl; | |
2094 | if (ret != 0) { | |
2095 | ldpp_dout(dpp, 1) << "cannot get compression info" << dendl; | |
2096 | return ret; | |
2097 | } | |
2098 | encode(info, bl); | |
2099 | encode(attrs, bl); | |
2100 | ldpp_dout(dpp, 20) << "DaosMultipartWriter::complete(): entry size" | |
2101 | << bl.length() << dendl; | |
2102 | ||
2103 | struct ds3_multipart_part_info part_info = {.part_num = part_num, | |
2104 | .encoded = bl.c_str(), | |
2105 | .encoded_length = bl.length()}; | |
2106 | ||
2107 | ret = ds3_part_set_info(&part_info, ds3p, store->ds3, nullptr); | |
2108 | ||
2109 | if (ret != 0) { | |
2110 | ldpp_dout(dpp, 0) << "ERROR: failed to set part info (" << get_bucket_name() | |
2111 | << ", " << upload_id << ", " << part_num | |
2112 | << "): ret=" << ret << dendl; | |
2113 | if (ret == ENOENT) { | |
2114 | ret = -ERR_NO_SUCH_UPLOAD; | |
2115 | } | |
2116 | } | |
2117 | ||
2118 | return ret; | |
2119 | } | |
2120 | ||
2121 | std::unique_ptr<RGWRole> DaosStore::get_role( | |
2122 | std::string name, std::string tenant, std::string path, | |
2123 | std::string trust_policy, std::string max_session_duration_str, | |
2124 | std::multimap<std::string, std::string> tags) { | |
2125 | RGWRole* p = nullptr; | |
2126 | return std::unique_ptr<RGWRole>(p); | |
2127 | } | |
2128 | ||
2129 | std::unique_ptr<RGWRole> DaosStore::get_role(const RGWRoleInfo& info) { | |
2130 | RGWRole* p = nullptr; | |
2131 | return std::unique_ptr<RGWRole>(p); | |
2132 | } | |
2133 | ||
2134 | std::unique_ptr<RGWRole> DaosStore::get_role(std::string id) { | |
2135 | RGWRole* p = nullptr; | |
2136 | return std::unique_ptr<RGWRole>(p); | |
2137 | } | |
2138 | ||
2139 | int DaosStore::get_roles(const DoutPrefixProvider* dpp, optional_yield y, | |
2140 | const std::string& path_prefix, | |
2141 | const std::string& tenant, | |
2142 | vector<std::unique_ptr<RGWRole>>& roles) { | |
2143 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
2144 | } | |
2145 | ||
2146 | std::unique_ptr<RGWOIDCProvider> DaosStore::get_oidc_provider() { | |
2147 | RGWOIDCProvider* p = nullptr; | |
2148 | return std::unique_ptr<RGWOIDCProvider>(p); | |
2149 | } | |
2150 | ||
2151 | int DaosStore::get_oidc_providers( | |
2152 | const DoutPrefixProvider* dpp, const std::string& tenant, | |
2153 | vector<std::unique_ptr<RGWOIDCProvider>>& providers) { | |
2154 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
2155 | } | |
2156 | ||
2157 | std::unique_ptr<MultipartUpload> DaosBucket::get_multipart_upload( | |
2158 | const std::string& oid, std::optional<std::string> upload_id, | |
2159 | ACLOwner owner, ceph::real_time mtime) { | |
2160 | return std::make_unique<DaosMultipartUpload>(store, this, oid, upload_id, | |
2161 | owner, mtime); | |
2162 | } | |
2163 | ||
2164 | std::unique_ptr<Writer> DaosStore::get_append_writer( | |
2165 | const DoutPrefixProvider* dpp, optional_yield y, | |
2166 | rgw::sal::Object* obj, const rgw_user& owner, | |
2167 | const rgw_placement_rule* ptail_placement_rule, | |
2168 | const std::string& unique_tag, uint64_t position, | |
2169 | uint64_t* cur_accounted_size) { | |
2170 | DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
2171 | return nullptr; | |
2172 | } | |
2173 | ||
2174 | std::unique_ptr<Writer> DaosStore::get_atomic_writer( | |
2175 | const DoutPrefixProvider* dpp, optional_yield y, | |
2176 | rgw::sal::Object* obj, const rgw_user& owner, | |
2177 | const rgw_placement_rule* ptail_placement_rule, uint64_t olh_epoch, | |
2178 | const std::string& unique_tag) { | |
2179 | ldpp_dout(dpp, 20) << "get_atomic_writer" << dendl; | |
2180 | return std::make_unique<DaosAtomicWriter>(dpp, y, obj, this, | |
2181 | owner, ptail_placement_rule, | |
2182 | olh_epoch, unique_tag); | |
2183 | } | |
2184 | ||
2185 | const std::string& DaosStore::get_compression_type( | |
2186 | const rgw_placement_rule& rule) { | |
2187 | return zone.zone_params->get_compression_type(rule); | |
2188 | } | |
2189 | ||
2190 | bool DaosStore::valid_placement(const rgw_placement_rule& rule) { | |
2191 | return zone.zone_params->valid_placement(rule); | |
2192 | } | |
2193 | ||
2194 | std::unique_ptr<User> DaosStore::get_user(const rgw_user& u) { | |
2195 | ldout(cctx, 20) << "DEBUG: bucket's user: " << u.to_str() << dendl; | |
2196 | return std::make_unique<DaosUser>(this, u); | |
2197 | } | |
2198 | ||
2199 | int DaosStore::get_user_by_access_key(const DoutPrefixProvider* dpp, | |
2200 | const std::string& key, optional_yield y, | |
2201 | std::unique_ptr<User>* user) { | |
2202 | // Initialize ds3_user_info | |
2203 | bufferlist bl; | |
2204 | uint64_t size = DS3_MAX_ENCODED_LEN; | |
2205 | struct ds3_user_info user_info = {.encoded = bl.append_hole(size).c_str(), | |
2206 | .encoded_length = size}; | |
2207 | ||
2208 | int ret = ds3_user_get_by_key(key.c_str(), &user_info, ds3, nullptr); | |
2209 | ||
2210 | if (ret != 0) { | |
2211 | ldpp_dout(dpp, 0) << "Error: ds3_user_get_by_key failed, key=" << key | |
2212 | << " ret=" << ret << dendl; | |
2213 | return ret; | |
2214 | } | |
2215 | ||
2216 | // Decode | |
2217 | DaosUserInfo duinfo; | |
2218 | bufferlist& blr = bl; | |
2219 | auto iter = blr.cbegin(); | |
2220 | duinfo.decode(iter); | |
2221 | ||
2222 | User* u = new DaosUser(this, duinfo.info); | |
2223 | if (!u) { | |
2224 | return -ENOMEM; | |
2225 | } | |
2226 | ||
2227 | user->reset(u); | |
2228 | return 0; | |
2229 | } | |
2230 | ||
2231 | int DaosStore::get_user_by_email(const DoutPrefixProvider* dpp, | |
2232 | const std::string& email, optional_yield y, | |
2233 | std::unique_ptr<User>* user) { | |
2234 | // Initialize ds3_user_info | |
2235 | bufferlist bl; | |
2236 | uint64_t size = DS3_MAX_ENCODED_LEN; | |
2237 | struct ds3_user_info user_info = {.encoded = bl.append_hole(size).c_str(), | |
2238 | .encoded_length = size}; | |
2239 | ||
2240 | int ret = ds3_user_get_by_email(email.c_str(), &user_info, ds3, nullptr); | |
2241 | ||
2242 | if (ret != 0) { | |
2243 | ldpp_dout(dpp, 0) << "Error: ds3_user_get_by_email failed, email=" << email | |
2244 | << " ret=" << ret << dendl; | |
2245 | return ret; | |
2246 | } | |
2247 | ||
2248 | // Decode | |
2249 | DaosUserInfo duinfo; | |
2250 | bufferlist& blr = bl; | |
2251 | auto iter = blr.cbegin(); | |
2252 | duinfo.decode(iter); | |
2253 | ||
2254 | User* u = new DaosUser(this, duinfo.info); | |
2255 | if (!u) { | |
2256 | return -ENOMEM; | |
2257 | } | |
2258 | ||
2259 | user->reset(u); | |
2260 | return 0; | |
2261 | } | |
2262 | ||
2263 | int DaosStore::get_user_by_swift(const DoutPrefixProvider* dpp, | |
2264 | const std::string& user_str, optional_yield y, | |
2265 | std::unique_ptr<User>* user) { | |
2266 | /* Swift keys and subusers are not supported for now */ | |
2267 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
2268 | } | |
2269 | ||
2270 | std::unique_ptr<Object> DaosStore::get_object(const rgw_obj_key& k) { | |
2271 | return std::make_unique<DaosObject>(this, k); | |
2272 | } | |
2273 | ||
2274 | inline std::ostream& operator<<(std::ostream& out, const rgw_user* u) { | |
2275 | std::string s; | |
2276 | if (u != nullptr) | |
2277 | u->to_str(s); | |
2278 | else | |
2279 | s = "(nullptr)"; | |
2280 | return out << s; | |
2281 | } | |
2282 | ||
2283 | int DaosStore::get_bucket(const DoutPrefixProvider* dpp, User* u, | |
2284 | const rgw_bucket& b, std::unique_ptr<Bucket>* bucket, | |
2285 | optional_yield y) { | |
2286 | ldpp_dout(dpp, 20) << "DEBUG: get_bucket1: User: " << u << dendl; | |
2287 | int ret; | |
2288 | Bucket* bp; | |
2289 | ||
2290 | bp = new DaosBucket(this, b, u); | |
2291 | ret = bp->load_bucket(dpp, y); | |
2292 | if (ret != 0) { | |
2293 | delete bp; | |
2294 | return ret; | |
2295 | } | |
2296 | ||
2297 | bucket->reset(bp); | |
2298 | return 0; | |
2299 | } | |
2300 | ||
2301 | int DaosStore::get_bucket(User* u, const RGWBucketInfo& i, | |
2302 | std::unique_ptr<Bucket>* bucket) { | |
2303 | DaosBucket* bp; | |
2304 | ||
2305 | bp = new DaosBucket(this, i, u); | |
2306 | /* Don't need to fetch the bucket info, use the provided one */ | |
2307 | ||
2308 | bucket->reset(bp); | |
2309 | return 0; | |
2310 | } | |
2311 | ||
2312 | int DaosStore::get_bucket(const DoutPrefixProvider* dpp, User* u, | |
2313 | const std::string& tenant, const std::string& name, | |
2314 | std::unique_ptr<Bucket>* bucket, optional_yield y) { | |
2315 | ldpp_dout(dpp, 20) << "get_bucket" << dendl; | |
2316 | rgw_bucket b; | |
2317 | ||
2318 | b.tenant = tenant; | |
2319 | b.name = name; | |
2320 | ||
2321 | return get_bucket(dpp, u, b, bucket, y); | |
2322 | } | |
2323 | ||
2324 | bool DaosStore::is_meta_master() { return true; } | |
2325 | ||
2326 | int DaosStore::forward_request_to_master(const DoutPrefixProvider* dpp, | |
2327 | User* user, obj_version* objv, | |
2328 | bufferlist& in_data, JSONParser* jp, | |
2329 | req_info& info, optional_yield y) { | |
2330 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
2331 | } | |
2332 | ||
2333 | int DaosStore::forward_iam_request_to_master(const DoutPrefixProvider* dpp, | |
2334 | const RGWAccessKey& key, | |
2335 | obj_version* objv, | |
2336 | bufferlist& in_data, | |
2337 | RGWXMLDecoder::XMLParser* parser, | |
2338 | req_info& info, optional_yield y) { | |
2339 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
2340 | } | |
2341 | ||
2342 | std::string DaosStore::zone_unique_id(uint64_t unique_num) { return ""; } | |
2343 | ||
2344 | std::string DaosStore::zone_unique_trans_id(const uint64_t unique_num) { | |
2345 | return ""; | |
2346 | } | |
2347 | ||
2348 | int DaosStore::cluster_stat(RGWClusterStat& stats) { | |
2349 | return DAOS_NOT_IMPLEMENTED_LOG(nullptr); | |
2350 | } | |
2351 | ||
2352 | std::unique_ptr<Lifecycle> DaosStore::get_lifecycle(void) { | |
2353 | DAOS_NOT_IMPLEMENTED_LOG(nullptr); | |
2354 | return 0; | |
2355 | } | |
2356 | ||
2357 | std::unique_ptr<Completions> DaosStore::get_completions(void) { | |
2358 | DAOS_NOT_IMPLEMENTED_LOG(nullptr); | |
2359 | return 0; | |
2360 | } | |
2361 | ||
2362 | std::unique_ptr<Notification> DaosStore::get_notification( | |
2363 | rgw::sal::Object* obj, rgw::sal::Object* src_obj, struct req_state* s, | |
2364 | rgw::notify::EventType event_type, const std::string* object_name) { | |
2365 | return std::make_unique<DaosNotification>(obj, src_obj, event_type); | |
2366 | } | |
2367 | ||
2368 | std::unique_ptr<Notification> DaosStore::get_notification( | |
2369 | const DoutPrefixProvider* dpp, Object* obj, Object* src_obj, | |
2370 | rgw::notify::EventType event_type, rgw::sal::Bucket* _bucket, | |
2371 | std::string& _user_id, std::string& _user_tenant, std::string& _req_id, | |
2372 | optional_yield y) { | |
2373 | ldpp_dout(dpp, 20) << "get_notification" << dendl; | |
2374 | return std::make_unique<DaosNotification>(obj, src_obj, event_type); | |
2375 | } | |
2376 | ||
2377 | int DaosStore::log_usage(const DoutPrefixProvider* dpp, | |
2378 | map<rgw_user_bucket, RGWUsageBatch>& usage_info) { | |
2379 | DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
2380 | return 0; | |
2381 | } | |
2382 | ||
2383 | int DaosStore::log_op(const DoutPrefixProvider* dpp, string& oid, | |
2384 | bufferlist& bl) { | |
2385 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
2386 | } | |
2387 | ||
2388 | int DaosStore::register_to_service_map(const DoutPrefixProvider* dpp, | |
2389 | const string& daemon_type, | |
2390 | const map<string, string>& meta) { | |
2391 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
2392 | } | |
2393 | ||
2394 | void DaosStore::get_quota(RGWQuota& quota) { | |
2395 | // XXX: Not handled for the first pass | |
2396 | return; | |
2397 | } | |
2398 | ||
2399 | void DaosStore::get_ratelimit(RGWRateLimitInfo& bucket_ratelimit, | |
2400 | RGWRateLimitInfo& user_ratelimit, | |
2401 | RGWRateLimitInfo& anon_ratelimit) { | |
2402 | return; | |
2403 | } | |
2404 | ||
2405 | int DaosStore::set_buckets_enabled(const DoutPrefixProvider* dpp, | |
2406 | std::vector<rgw_bucket>& buckets, | |
2407 | bool enabled) { | |
2408 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
2409 | } | |
2410 | ||
2411 | int DaosStore::get_sync_policy_handler(const DoutPrefixProvider* dpp, | |
2412 | std::optional<rgw_zone_id> zone, | |
2413 | std::optional<rgw_bucket> bucket, | |
2414 | RGWBucketSyncPolicyHandlerRef* phandler, | |
2415 | optional_yield y) { | |
2416 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
2417 | } | |
2418 | ||
2419 | RGWDataSyncStatusManager* DaosStore::get_data_sync_manager( | |
2420 | const rgw_zone_id& source_zone) { | |
2421 | DAOS_NOT_IMPLEMENTED_LOG(nullptr); | |
2422 | return 0; | |
2423 | } | |
2424 | ||
2425 | int DaosStore::read_all_usage( | |
2426 | const DoutPrefixProvider* dpp, uint64_t start_epoch, uint64_t end_epoch, | |
2427 | uint32_t max_entries, bool* is_truncated, RGWUsageIter& usage_iter, | |
2428 | map<rgw_user_bucket, rgw_usage_log_entry>& usage) { | |
2429 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
2430 | } | |
2431 | ||
2432 | int DaosStore::trim_all_usage(const DoutPrefixProvider* dpp, | |
2433 | uint64_t start_epoch, uint64_t end_epoch) { | |
2434 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
2435 | } | |
2436 | ||
2437 | int DaosStore::get_config_key_val(string name, bufferlist* bl) { | |
2438 | return DAOS_NOT_IMPLEMENTED_LOG(nullptr); | |
2439 | } | |
2440 | ||
2441 | int DaosStore::meta_list_keys_init(const DoutPrefixProvider* dpp, | |
2442 | const string& section, const string& marker, | |
2443 | void** phandle) { | |
2444 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
2445 | } | |
2446 | ||
2447 | int DaosStore::meta_list_keys_next(const DoutPrefixProvider* dpp, void* handle, | |
2448 | int max, list<string>& keys, | |
2449 | bool* truncated) { | |
2450 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
2451 | } | |
2452 | ||
2453 | void DaosStore::meta_list_keys_complete(void* handle) { return; } | |
2454 | ||
2455 | std::string DaosStore::meta_get_marker(void* handle) { return ""; } | |
2456 | ||
2457 | int DaosStore::meta_remove(const DoutPrefixProvider* dpp, string& metadata_key, | |
2458 | optional_yield y) { | |
2459 | return DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
2460 | } | |
2461 | ||
2462 | std::string DaosStore::get_cluster_id(const DoutPrefixProvider* dpp, | |
2463 | optional_yield y) { | |
2464 | DAOS_NOT_IMPLEMENTED_LOG(dpp); | |
2465 | return ""; | |
2466 | } | |
2467 | ||
2468 | } // namespace rgw::sal | |
2469 | ||
2470 | extern "C" { | |
2471 | ||
2472 | void* newDaosStore(CephContext* cct) { | |
2473 | return new rgw::sal::DaosStore(cct); | |
2474 | } | |
2475 | } |