]> git.proxmox.com Git - ceph.git/blob - ceph/src/rgw/rgw_op.cc
import ceph 15.2.14
[ceph.git] / ceph / src / rgw / rgw_op.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab ft=cpp
3
4 #include <errno.h>
5 #include <stdlib.h>
6 #include <system_error>
7 #include <unistd.h>
8
9 #include <sstream>
10
11 #include <boost/algorithm/string/predicate.hpp>
12 #include <boost/bind.hpp>
13 #include <boost/optional.hpp>
14 #include <boost/utility/in_place_factory.hpp>
15 #include <boost/utility/string_view.hpp>
16
17 #include "include/scope_guard.h"
18 #include "common/Clock.h"
19 #include "common/armor.h"
20 #include "common/errno.h"
21 #include "common/mime.h"
22 #include "common/utf8.h"
23 #include "common/ceph_json.h"
24 #include "common/static_ptr.h"
25
26 #include "rgw_rados.h"
27 #include "rgw_zone.h"
28 #include "rgw_op.h"
29 #include "rgw_rest.h"
30 #include "rgw_acl.h"
31 #include "rgw_acl_s3.h"
32 #include "rgw_acl_swift.h"
33 #include "rgw_aio_throttle.h"
34 #include "rgw_user.h"
35 #include "rgw_bucket.h"
36 #include "rgw_log.h"
37 #include "rgw_multi.h"
38 #include "rgw_multi_del.h"
39 #include "rgw_cors.h"
40 #include "rgw_cors_s3.h"
41 #include "rgw_rest_conn.h"
42 #include "rgw_rest_s3.h"
43 #include "rgw_tar.h"
44 #include "rgw_client_io.h"
45 #include "rgw_compression.h"
46 #include "rgw_role.h"
47 #include "rgw_tag_s3.h"
48 #include "rgw_putobj_processor.h"
49 #include "rgw_crypt.h"
50 #include "rgw_perf_counters.h"
51 #include "rgw_notify.h"
52 #include "rgw_notify_event_type.h"
53
54 #include "services/svc_zone.h"
55 #include "services/svc_quota.h"
56 #include "services/svc_sys_obj.h"
57
58 #include "cls/lock/cls_lock_client.h"
59 #include "cls/rgw/cls_rgw_client.h"
60
61
62 #include "include/ceph_assert.h"
63
64 #include "compressor/Compressor.h"
65
66 #ifdef WITH_LTTNG
67 #define TRACEPOINT_DEFINE
68 #define TRACEPOINT_PROBE_DYNAMIC_LINKAGE
69 #include "tracing/rgw_op.h"
70 #undef TRACEPOINT_PROBE_DYNAMIC_LINKAGE
71 #undef TRACEPOINT_DEFINE
72 #else
73 #define tracepoint(...)
74 #endif
75
76 #define dout_context g_ceph_context
77 #define dout_subsys ceph_subsys_rgw
78
79 using namespace librados;
80 using ceph::crypto::MD5;
81 using boost::optional;
82 using boost::none;
83
84 using rgw::ARN;
85 using rgw::IAM::Effect;
86 using rgw::IAM::Policy;
87
88 static string mp_ns = RGW_OBJ_NS_MULTIPART;
89 static string shadow_ns = RGW_OBJ_NS_SHADOW;
90
91 static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name);
92
93 static MultipartMetaFilter mp_filter;
94
95 // this probably should belong in the rgw_iam_policy_keywords, I'll get it to it
96 // at some point
97 static constexpr auto S3_EXISTING_OBJTAG = "s3:ExistingObjectTag";
98
99 int RGWGetObj::parse_range(void)
100 {
101 int r = -ERANGE;
102 string rs(range_str);
103 string ofs_str;
104 string end_str;
105
106 ignore_invalid_range = s->cct->_conf->rgw_ignore_get_invalid_range;
107 partial_content = false;
108
109 size_t pos = rs.find("bytes=");
110 if (pos == string::npos) {
111 pos = 0;
112 while (isspace(rs[pos]))
113 pos++;
114 int end = pos;
115 while (isalpha(rs[end]))
116 end++;
117 if (strncasecmp(rs.c_str(), "bytes", end - pos) != 0)
118 return 0;
119 while (isspace(rs[end]))
120 end++;
121 if (rs[end] != '=')
122 return 0;
123 rs = rs.substr(end + 1);
124 } else {
125 rs = rs.substr(pos + 6); /* size of("bytes=") */
126 }
127 pos = rs.find('-');
128 if (pos == string::npos)
129 goto done;
130
131 partial_content = true;
132
133 ofs_str = rs.substr(0, pos);
134 end_str = rs.substr(pos + 1);
135 if (end_str.length()) {
136 end = atoll(end_str.c_str());
137 if (end < 0)
138 goto done;
139 }
140
141 if (ofs_str.length()) {
142 ofs = atoll(ofs_str.c_str());
143 } else { // RFC2616 suffix-byte-range-spec
144 ofs = -end;
145 end = -1;
146 }
147
148 if (end >= 0 && end < ofs)
149 goto done;
150
151 range_parsed = true;
152 return 0;
153
154 done:
155 if (ignore_invalid_range) {
156 partial_content = false;
157 ofs = 0;
158 end = -1;
159 range_parsed = false; // allow retry
160 r = 0;
161 }
162
163 return r;
164 }
165
166 static int decode_policy(CephContext *cct,
167 bufferlist& bl,
168 RGWAccessControlPolicy *policy)
169 {
170 auto iter = bl.cbegin();
171 try {
172 policy->decode(iter);
173 } catch (buffer::error& err) {
174 ldout(cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
175 return -EIO;
176 }
177 if (cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
178 ldout(cct, 15) << __func__ << " Read AccessControlPolicy";
179 RGWAccessControlPolicy_S3 *s3policy = static_cast<RGWAccessControlPolicy_S3 *>(policy);
180 s3policy->to_xml(*_dout);
181 *_dout << dendl;
182 }
183 return 0;
184 }
185
186
187 static int get_user_policy_from_attr(CephContext * const cct,
188 rgw::sal::RGWRadosStore * const store,
189 map<string, bufferlist>& attrs,
190 RGWAccessControlPolicy& policy /* out */)
191 {
192 auto aiter = attrs.find(RGW_ATTR_ACL);
193 if (aiter != attrs.end()) {
194 int ret = decode_policy(cct, aiter->second, &policy);
195 if (ret < 0) {
196 return ret;
197 }
198 } else {
199 return -ENOENT;
200 }
201
202 return 0;
203 }
204
205 /**
206 * Get the AccessControlPolicy for an object off of disk.
207 * policy: must point to a valid RGWACL, and will be filled upon return.
208 * bucket: name of the bucket containing the object.
209 * object: name of the object to get the ACL for.
210 * Returns: 0 on success, -ERR# otherwise.
211 */
212 int rgw_op_get_bucket_policy_from_attr(CephContext *cct,
213 rgw::sal::RGWRadosStore *store,
214 RGWBucketInfo& bucket_info,
215 map<string, bufferlist>& bucket_attrs,
216 RGWAccessControlPolicy *policy)
217 {
218 map<string, bufferlist>::iterator aiter = bucket_attrs.find(RGW_ATTR_ACL);
219
220 if (aiter != bucket_attrs.end()) {
221 int ret = decode_policy(cct, aiter->second, policy);
222 if (ret < 0)
223 return ret;
224 } else {
225 ldout(cct, 0) << "WARNING: couldn't find acl header for bucket, generating default" << dendl;
226 rgw::sal::RGWRadosUser user(store);
227 /* object exists, but policy is broken */
228 int r = user.get_by_id(bucket_info.owner, null_yield);
229 if (r < 0)
230 return r;
231
232 policy->create_default(bucket_info.owner, user.get_display_name());
233 }
234 return 0;
235 }
236
237 static int get_obj_policy_from_attr(CephContext *cct,
238 rgw::sal::RGWRadosStore *store,
239 RGWObjectCtx& obj_ctx,
240 RGWBucketInfo& bucket_info,
241 map<string, bufferlist>& bucket_attrs,
242 RGWAccessControlPolicy *policy,
243 string *storage_class,
244 rgw_obj& obj,
245 optional_yield y)
246 {
247 bufferlist bl;
248 int ret = 0;
249
250 RGWRados::Object op_target(store->getRados(), bucket_info, obj_ctx, obj);
251 RGWRados::Object::Read rop(&op_target);
252
253 ret = rop.get_attr(RGW_ATTR_ACL, bl, y);
254 if (ret >= 0) {
255 ret = decode_policy(cct, bl, policy);
256 if (ret < 0)
257 return ret;
258 } else if (ret == -ENODATA) {
259 /* object exists, but policy is broken */
260 ldout(cct, 0) << "WARNING: couldn't find acl header for object, generating default" << dendl;
261 rgw::sal::RGWRadosUser user(store);
262 ret = user.get_by_id(bucket_info.owner, y);
263 if (ret < 0)
264 return ret;
265
266 policy->create_default(bucket_info.owner, user.get_display_name());
267 }
268
269 if (storage_class) {
270 bufferlist scbl;
271 int r = rop.get_attr(RGW_ATTR_STORAGE_CLASS, scbl, y);
272 if (r >= 0) {
273 *storage_class = scbl.to_str();
274 } else {
275 storage_class->clear();
276 }
277 }
278
279 return ret;
280 }
281
282
283 static boost::optional<Policy> get_iam_policy_from_attr(CephContext* cct,
284 rgw::sal::RGWRadosStore* store,
285 map<string, bufferlist>& attrs,
286 const string& tenant) {
287 auto i = attrs.find(RGW_ATTR_IAM_POLICY);
288 if (i != attrs.end()) {
289 return Policy(cct, tenant, i->second);
290 } else {
291 return none;
292 }
293 }
294
295 static boost::optional<PublicAccessBlockConfiguration>
296 get_public_access_conf_from_attr(const map<string, bufferlist>& attrs)
297 {
298 if (auto aiter = attrs.find(RGW_ATTR_PUBLIC_ACCESS);
299 aiter != attrs.end()) {
300 bufferlist::const_iterator iter{&aiter->second};
301 PublicAccessBlockConfiguration access_conf;
302 try {
303 access_conf.decode(iter);
304 } catch (const buffer::error& e) {
305 return boost::none;
306 }
307 return access_conf;
308 }
309 return boost::none;
310 }
311
312 vector<Policy> get_iam_user_policy_from_attr(CephContext* cct,
313 rgw::sal::RGWRadosStore* store,
314 map<string, bufferlist>& attrs,
315 const string& tenant) {
316 vector<Policy> policies;
317 if (auto it = attrs.find(RGW_ATTR_USER_POLICY); it != attrs.end()) {
318 bufferlist out_bl = attrs[RGW_ATTR_USER_POLICY];
319 map<string, string> policy_map;
320 decode(policy_map, out_bl);
321 for (auto& it : policy_map) {
322 bufferlist bl = bufferlist::static_from_string(it.second);
323 Policy p(cct, tenant, bl);
324 policies.push_back(std::move(p));
325 }
326 }
327 return policies;
328 }
329
330 static int get_obj_attrs(rgw::sal::RGWRadosStore *store, struct req_state *s, const rgw_obj& obj, map<string, bufferlist>& attrs, rgw_obj *target_obj = nullptr)
331 {
332 RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
333 RGWRados::Object::Read read_op(&op_target);
334
335 read_op.params.attrs = &attrs;
336 read_op.params.target_obj = target_obj;
337
338 return read_op.prepare(s->yield);
339 }
340
341 static int get_obj_head(rgw::sal::RGWRadosStore *store, struct req_state *s,
342 const rgw_obj& obj,
343 map<string, bufferlist> *attrs,
344 bufferlist *pbl)
345 {
346 store->getRados()->set_prefetch_data(s->obj_ctx, obj);
347
348 RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
349 RGWRados::Object::Read read_op(&op_target);
350
351 read_op.params.attrs = attrs;
352
353 int ret = read_op.prepare(s->yield);
354 if (ret < 0) {
355 return ret;
356 }
357
358 if (!pbl) {
359 return 0;
360 }
361
362 ret = read_op.read(0, s->cct->_conf->rgw_max_chunk_size, *pbl, s->yield);
363
364 return 0;
365 }
366
367 struct multipart_upload_info
368 {
369 rgw_placement_rule dest_placement;
370
371 void encode(bufferlist& bl) const {
372 ENCODE_START(1, 1, bl);
373 encode(dest_placement, bl);
374 ENCODE_FINISH(bl);
375 }
376
377 void decode(bufferlist::const_iterator& bl) {
378 DECODE_START(1, bl);
379 decode(dest_placement, bl);
380 DECODE_FINISH(bl);
381 }
382 };
383 WRITE_CLASS_ENCODER(multipart_upload_info)
384
385 static int get_multipart_info(rgw::sal::RGWRadosStore *store, struct req_state *s,
386 const rgw_obj& obj,
387 RGWAccessControlPolicy *policy,
388 map<string, bufferlist> *attrs,
389 multipart_upload_info *upload_info)
390 {
391 bufferlist header;
392
393 bufferlist headbl;
394 bufferlist *pheadbl = (upload_info ? &headbl : nullptr);
395
396 int op_ret = get_obj_head(store, s, obj, attrs, pheadbl);
397 if (op_ret < 0) {
398 if (op_ret == -ENOENT) {
399 return -ERR_NO_SUCH_UPLOAD;
400 }
401 return op_ret;
402 }
403
404 if (upload_info && headbl.length() > 0) {
405 auto hiter = headbl.cbegin();
406 try {
407 decode(*upload_info, hiter);
408 } catch (buffer::error& err) {
409 ldpp_dout(s, 0) << "ERROR: failed to decode multipart upload info" << dendl;
410 return -EIO;
411 }
412 }
413
414 if (policy && attrs) {
415 for (auto& iter : *attrs) {
416 string name = iter.first;
417 if (name.compare(RGW_ATTR_ACL) == 0) {
418 bufferlist& bl = iter.second;
419 auto bli = bl.cbegin();
420 try {
421 decode(*policy, bli);
422 } catch (buffer::error& err) {
423 ldpp_dout(s, 0) << "ERROR: could not decode policy" << dendl;
424 return -EIO;
425 }
426 break;
427 }
428 }
429 }
430
431 return 0;
432 }
433
434 static int get_multipart_info(rgw::sal::RGWRadosStore *store, struct req_state *s,
435 const string& meta_oid,
436 RGWAccessControlPolicy *policy,
437 map<string, bufferlist> *attrs,
438 multipart_upload_info *upload_info)
439 {
440 map<string, bufferlist>::iterator iter;
441 bufferlist header;
442
443 rgw_obj meta_obj;
444 meta_obj.init_ns(s->bucket, meta_oid, mp_ns);
445 meta_obj.set_in_extra_data(true);
446
447 return get_multipart_info(store, s, meta_obj, policy, attrs, upload_info);
448 }
449
450 static int modify_obj_attr(rgw::sal::RGWRadosStore *store, struct req_state *s, const rgw_obj& obj, const char* attr_name, bufferlist& attr_val)
451 {
452 map<string, bufferlist> attrs;
453 RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
454 RGWRados::Object::Read read_op(&op_target);
455
456 read_op.params.attrs = &attrs;
457
458 int r = read_op.prepare(s->yield);
459 if (r < 0) {
460 return r;
461 }
462 store->getRados()->set_atomic(s->obj_ctx, read_op.state.obj);
463 attrs[attr_name] = attr_val;
464 return store->getRados()->set_attrs(s->obj_ctx, s->bucket_info, read_op.state.obj, attrs, NULL, s->yield);
465 }
466
467 static int read_bucket_policy(rgw::sal::RGWRadosStore *store,
468 struct req_state *s,
469 RGWBucketInfo& bucket_info,
470 map<string, bufferlist>& bucket_attrs,
471 RGWAccessControlPolicy *policy,
472 rgw_bucket& bucket)
473 {
474 if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) {
475 ldpp_dout(s, 0) << "NOTICE: bucket " << bucket_info.bucket.name
476 << " is suspended" << dendl;
477 return -ERR_USER_SUSPENDED;
478 }
479
480 if (bucket.name.empty()) {
481 return 0;
482 }
483
484 int ret = rgw_op_get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, policy);
485 if (ret == -ENOENT) {
486 ret = -ERR_NO_SUCH_BUCKET;
487 }
488
489 return ret;
490 }
491
492 static int read_obj_policy(rgw::sal::RGWRadosStore *store,
493 struct req_state *s,
494 RGWBucketInfo& bucket_info,
495 map<string, bufferlist>& bucket_attrs,
496 RGWAccessControlPolicy* acl,
497 string *storage_class,
498 boost::optional<Policy>& policy,
499 rgw_bucket& bucket,
500 rgw_obj_key& object)
501 {
502 string upload_id;
503 upload_id = s->info.args.get("uploadId");
504 rgw_obj obj;
505
506 if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) {
507 ldpp_dout(s, 0) << "NOTICE: bucket " << bucket_info.bucket.name
508 << " is suspended" << dendl;
509 return -ERR_USER_SUSPENDED;
510 }
511
512 if (!upload_id.empty()) {
513 /* multipart upload */
514 RGWMPObj mp(object.name, upload_id);
515 string oid = mp.get_meta();
516 obj.init_ns(bucket, oid, mp_ns);
517 obj.set_in_extra_data(true);
518 } else {
519 obj = rgw_obj(bucket, object);
520 }
521 policy = get_iam_policy_from_attr(s->cct, store, bucket_attrs, bucket.tenant);
522
523 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
524 int ret = get_obj_policy_from_attr(s->cct, store, *obj_ctx,
525 bucket_info, bucket_attrs, acl, storage_class, obj, s->yield);
526 if (ret == -ENOENT) {
527 /* object does not exist checking the bucket's ACL to make sure
528 that we send a proper error code */
529 RGWAccessControlPolicy bucket_policy(s->cct);
530 ret = rgw_op_get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, &bucket_policy);
531 if (ret < 0) {
532 return ret;
533 }
534 const rgw_user& bucket_owner = bucket_policy.get_owner().get_id();
535 if (bucket_owner.compare(s->user->get_id()) != 0 &&
536 ! s->auth.identity->is_admin_of(bucket_owner)) {
537 auto r = eval_user_policies(s->iam_user_policies, s->env,
538 *s->auth.identity, rgw::IAM::s3ListBucket,
539 ARN(bucket));
540 if (r == Effect::Allow)
541 return -ENOENT;
542 if (r == Effect::Deny)
543 return -EACCES;
544 if (policy) {
545 r = policy->eval(s->env, *s->auth.identity, rgw::IAM::s3ListBucket, ARN(bucket));
546 if (r == Effect::Allow)
547 return -ENOENT;
548 if (r == Effect::Deny)
549 return -EACCES;
550 }
551 if (! bucket_policy.verify_permission(s, *s->auth.identity, s->perm_mask, RGW_PERM_READ))
552 ret = -EACCES;
553 else
554 ret = -ENOENT;
555 } else {
556 ret = -ENOENT;
557 }
558 }
559
560 return ret;
561 }
562
563 /**
564 * Get the AccessControlPolicy for an user, bucket or object off of disk.
565 * s: The req_state to draw information from.
566 * only_bucket: If true, reads the user and bucket ACLs rather than the object ACL.
567 * Returns: 0 on success, -ERR# otherwise.
568 */
569 int rgw_build_bucket_policies(rgw::sal::RGWRadosStore* store, struct req_state* s)
570 {
571 int ret = 0;
572 rgw_obj_key obj;
573 auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
574
575 string bi = s->info.args.get(RGW_SYS_PARAM_PREFIX "bucket-instance");
576 if (!bi.empty()) {
577 string bucket_name;
578 ret = rgw_bucket_parse_bucket_instance(bi, &bucket_name, &s->bucket_instance_id, &s->bucket_instance_shard_id);
579 if (ret < 0) {
580 return ret;
581 }
582 }
583
584 if(s->dialect.compare("s3") == 0) {
585 s->bucket_acl = std::make_unique<RGWAccessControlPolicy_S3>(s->cct);
586 } else if(s->dialect.compare("swift") == 0) {
587 /* We aren't allocating the account policy for those operations using
588 * the Swift's infrastructure that don't really need req_state::user.
589 * Typical example here is the implementation of /info. */
590 if (!s->user->get_id().empty()) {
591 s->user_acl = std::make_unique<RGWAccessControlPolicy_SWIFTAcct>(s->cct);
592 }
593 s->bucket_acl = std::make_unique<RGWAccessControlPolicy_SWIFT>(s->cct);
594 } else {
595 s->bucket_acl = std::make_unique<RGWAccessControlPolicy>(s->cct);
596 }
597
598 /* check if copy source is within the current domain */
599 if (!s->src_bucket_name.empty()) {
600 RGWBucketInfo source_info;
601
602 if (s->bucket_instance_id.empty()) {
603 ret = store->getRados()->get_bucket_info(store->svc(), s->src_tenant_name, s->src_bucket_name, source_info, NULL, s->yield);
604 } else {
605 ret = store->getRados()->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, source_info, NULL, NULL, s->yield);
606 }
607 if (ret == 0) {
608 string& zonegroup = source_info.zonegroup;
609 s->local_source = store->svc()->zone->get_zonegroup().equals(zonegroup);
610 }
611 }
612
613 struct {
614 rgw_user uid;
615 std::string display_name;
616 } acct_acl_user = {
617 s->user->get_id(),
618 s->user->get_display_name(),
619 };
620
621 if (!s->bucket_name.empty()) {
622 s->bucket_exists = true;
623
624 auto b = rgw_bucket(rgw_bucket_key(s->bucket_tenant, s->bucket_name, s->bucket_instance_id));
625
626 RGWObjVersionTracker ep_ot;
627 ret = store->ctl()->bucket->read_bucket_info(b, &s->bucket_info,
628 s->yield,
629 RGWBucketCtl::BucketInstance::GetParams()
630 .set_mtime(&s->bucket_mtime)
631 .set_attrs(&s->bucket_attrs),
632 &ep_ot);
633 if (ret < 0) {
634 if (ret != -ENOENT) {
635 string bucket_log;
636 bucket_log = rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name);
637 ldpp_dout(s, 0) << "NOTICE: couldn't get bucket from bucket_name (name="
638 << bucket_log << ")" << dendl;
639 return ret;
640 }
641 s->bucket_exists = false;
642 }
643 s->bucket_ep_objv = ep_ot.read_version;
644 s->bucket = s->bucket_info.bucket;
645
646 if (s->bucket_exists) {
647 ret = read_bucket_policy(store, s, s->bucket_info, s->bucket_attrs,
648 s->bucket_acl.get(), s->bucket);
649 acct_acl_user = {
650 s->bucket_info.owner,
651 s->bucket_acl->get_owner().get_display_name(),
652 };
653 } else {
654 return -ERR_NO_SUCH_BUCKET;
655 }
656
657 s->bucket_owner = s->bucket_acl->get_owner();
658
659 RGWZoneGroup zonegroup;
660 int r = store->svc()->zone->get_zonegroup(s->bucket_info.zonegroup, zonegroup);
661 if (!r) {
662 if (!zonegroup.endpoints.empty()) {
663 s->zonegroup_endpoint = zonegroup.endpoints.front();
664 } else {
665 // use zonegroup's master zone endpoints
666 auto z = zonegroup.zones.find(zonegroup.master_zone);
667 if (z != zonegroup.zones.end() && !z->second.endpoints.empty()) {
668 s->zonegroup_endpoint = z->second.endpoints.front();
669 }
670 }
671 s->zonegroup_name = zonegroup.get_name();
672 }
673 if (r < 0 && ret == 0) {
674 ret = r;
675 }
676
677 if (s->bucket_exists && !store->svc()->zone->get_zonegroup().equals(s->bucket_info.zonegroup)) {
678 ldpp_dout(s, 0) << "NOTICE: request for data in a different zonegroup ("
679 << s->bucket_info.zonegroup << " != "
680 << store->svc()->zone->get_zonegroup().get_id() << ")" << dendl;
681 /* we now need to make sure that the operation actually requires copy source, that is
682 * it's a copy operation
683 */
684 if (store->svc()->zone->get_zonegroup().is_master_zonegroup() && s->system_request) {
685 /*If this is the master, don't redirect*/
686 } else if (s->op_type == RGW_OP_GET_BUCKET_LOCATION ) {
687 /* If op is get bucket location, don't redirect */
688 } else if (!s->local_source ||
689 (s->op != OP_PUT && s->op != OP_COPY) ||
690 s->object.empty()) {
691 return -ERR_PERMANENT_REDIRECT;
692 }
693 }
694
695 /* init dest placement -- only if bucket exists, otherwise request is either not relevant, or
696 * it's a create_bucket request, in which case the op will deal with the placement later */
697 if (s->bucket_exists) {
698 s->dest_placement.storage_class = s->info.storage_class;
699 s->dest_placement.inherit_from(s->bucket_info.placement_rule);
700
701 if (!store->svc()->zone->get_zone_params().valid_placement(s->dest_placement)) {
702 ldpp_dout(s, 0) << "NOTICE: invalid dest placement: " << s->dest_placement.to_str() << dendl;
703 return -EINVAL;
704 }
705 }
706
707 if(s->bucket_exists) {
708 s->bucket_access_conf = get_public_access_conf_from_attr(s->bucket_attrs);
709 }
710 }
711
712 /* handle user ACL only for those APIs which support it */
713 if (s->user_acl) {
714 map<string, bufferlist> uattrs;
715 ret = store->ctl()->user->get_attrs_by_uid(acct_acl_user.uid, &uattrs, s->yield);
716 if (!ret) {
717 ret = get_user_policy_from_attr(s->cct, store, uattrs, *s->user_acl);
718 }
719 if (-ENOENT == ret) {
720 /* In already existing clusters users won't have ACL. In such case
721 * assuming that only account owner has the rights seems to be
722 * reasonable. That allows to have only one verification logic.
723 * NOTE: there is small compatibility kludge for global, empty tenant:
724 * 1. if we try to reach an existing bucket, its owner is considered
725 * as account owner.
726 * 2. otherwise account owner is identity stored in s->user->user_id. */
727 s->user_acl->create_default(acct_acl_user.uid,
728 acct_acl_user.display_name);
729 ret = 0;
730 } else if (ret < 0) {
731 ldpp_dout(s, 0) << "NOTICE: couldn't get user attrs for handling ACL "
732 "(user_id=" << s->user->get_id() << ", ret=" << ret << ")" << dendl;
733 return ret;
734 }
735 }
736 // We don't need user policies in case of STS token returned by AssumeRole,
737 // hence the check for user type
738 if (! s->user->get_id().empty() && s->auth.identity->get_identity_type() != TYPE_ROLE) {
739 try {
740 map<string, bufferlist> uattrs;
741 if (ret = store->ctl()->user->get_attrs_by_uid(s->user->get_id(), &uattrs, s->yield); ! ret) {
742 if (s->iam_user_policies.empty()) {
743 s->iam_user_policies = get_iam_user_policy_from_attr(s->cct, store, uattrs, s->user->get_tenant());
744 } else {
745 // This scenario can happen when a STS token has a policy, then we need to append other user policies
746 // to the existing ones. (e.g. token returned by GetSessionToken)
747 auto user_policies = get_iam_user_policy_from_attr(s->cct, store, uattrs, s->user->get_tenant());
748 s->iam_user_policies.insert(s->iam_user_policies.end(), user_policies.begin(), user_policies.end());
749 }
750 } else {
751 if (ret == -ENOENT)
752 ret = 0;
753 else ret = -EACCES;
754 }
755 } catch (const std::exception& e) {
756 lderr(s->cct) << "Error reading IAM User Policy: " << e.what() << dendl;
757 ret = -EACCES;
758 }
759 }
760
761 try {
762 s->iam_policy = get_iam_policy_from_attr(s->cct, store, s->bucket_attrs,
763 s->bucket_tenant);
764 } catch (const std::exception& e) {
765 // Really this is a can't happen condition. We parse the policy
766 // when it's given to us, so perhaps we should abort or otherwise
767 // raise bloody murder.
768 ldpp_dout(s, 0) << "Error reading IAM Policy: " << e.what() << dendl;
769 ret = -EACCES;
770 }
771
772 bool success = store->svc()->zone->get_redirect_zone_endpoint(&s->redirect_zone_endpoint);
773 if (success) {
774 ldpp_dout(s, 20) << "redirect_zone_endpoint=" << s->redirect_zone_endpoint << dendl;
775 }
776
777 return ret;
778 }
779
780 /**
781 * Get the AccessControlPolicy for a bucket or object off of disk.
782 * s: The req_state to draw information from.
783 * only_bucket: If true, reads the bucket ACL rather than the object ACL.
784 * Returns: 0 on success, -ERR# otherwise.
785 */
786 int rgw_build_object_policies(rgw::sal::RGWRadosStore *store, struct req_state *s,
787 bool prefetch_data)
788 {
789 int ret = 0;
790
791 if (!s->object.empty()) {
792 if (!s->bucket_exists) {
793 return -ERR_NO_SUCH_BUCKET;
794 }
795 s->object_acl = std::make_unique<RGWAccessControlPolicy>(s->cct);
796 rgw_obj obj(s->bucket, s->object);
797
798 store->getRados()->set_atomic(s->obj_ctx, obj);
799 if (prefetch_data) {
800 store->getRados()->set_prefetch_data(s->obj_ctx, obj);
801 }
802 ret = read_obj_policy(store, s, s->bucket_info, s->bucket_attrs,
803 s->object_acl.get(), nullptr, s->iam_policy, s->bucket,
804 s->object);
805 }
806
807 return ret;
808 }
809
810 void rgw_add_to_iam_environment(rgw::IAM::Environment& e, std::string_view key, std::string_view val){
811 // This variant just adds non empty key pairs to IAM env., values can be empty
812 // in certain cases like tagging
813 if (!key.empty())
814 e.emplace(key,val);
815 }
816
817 static int rgw_iam_add_tags_from_bl(struct req_state* s, bufferlist& bl){
818 RGWObjTags& tagset = s->tagset;
819 try {
820 auto bliter = bl.cbegin();
821 tagset.decode(bliter);
822 } catch (buffer::error& err) {
823 ldpp_dout(s, 0) << "ERROR: caught buffer::error, couldn't decode TagSet" << dendl;
824 return -EIO;
825 }
826
827 for (const auto& tag: tagset.get_tags()){
828 rgw_add_to_iam_environment(s->env, "s3:ExistingObjectTag/" + tag.first, tag.second);
829 }
830 return 0;
831 }
832
833 static int rgw_iam_add_existing_objtags(rgw::sal::RGWRadosStore* store, struct req_state* s, rgw_obj& obj, std::uint64_t action){
834 map <string, bufferlist> attrs;
835 store->getRados()->set_atomic(s->obj_ctx, obj);
836 int op_ret = get_obj_attrs(store, s, obj, attrs);
837 if (op_ret < 0)
838 return op_ret;
839 auto tags = attrs.find(RGW_ATTR_TAGS);
840 if (tags != attrs.end()){
841 return rgw_iam_add_tags_from_bl(s, tags->second);
842 }
843 return 0;
844 }
845
846 static void rgw_add_grant_to_iam_environment(rgw::IAM::Environment& e, struct req_state *s){
847
848 using header_pair_t = std::pair <const char*, const char*>;
849 static const std::initializer_list <header_pair_t> acl_header_conditionals {
850 {"HTTP_X_AMZ_GRANT_READ", "s3:x-amz-grant-read"},
851 {"HTTP_X_AMZ_GRANT_WRITE", "s3:x-amz-grant-write"},
852 {"HTTP_X_AMZ_GRANT_READ_ACP", "s3:x-amz-grant-read-acp"},
853 {"HTTP_X_AMZ_GRANT_WRITE_ACP", "s3:x-amz-grant-write-acp"},
854 {"HTTP_X_AMZ_GRANT_FULL_CONTROL", "s3:x-amz-grant-full-control"}
855 };
856
857 if (s->has_acl_header){
858 for (const auto& c: acl_header_conditionals){
859 auto hdr = s->info.env->get(c.first);
860 if(hdr) {
861 e[c.second] = hdr;
862 }
863 }
864 }
865 }
866
867 void rgw_build_iam_environment(rgw::sal::RGWRadosStore* store,
868 struct req_state* s)
869 {
870 const auto& m = s->info.env->get_map();
871 auto t = ceph::real_clock::now();
872 s->env.emplace("aws:CurrentTime", std::to_string(ceph::real_clock::to_time_t(t)));
873 s->env.emplace("aws:EpochTime", ceph::to_iso_8601(t));
874 // TODO: This is fine for now, but once we have STS we'll need to
875 // look and see. Also this won't work with the IdentityApplier
876 // model, since we need to know the actual credential.
877 s->env.emplace("aws:PrincipalType", "User");
878
879 auto i = m.find("HTTP_REFERER");
880 if (i != m.end()) {
881 s->env.emplace("aws:Referer", i->second);
882 }
883
884 if (rgw_transport_is_secure(s->cct, *s->info.env)) {
885 s->env.emplace("aws:SecureTransport", "true");
886 }
887
888 const auto remote_addr_param = s->cct->_conf->rgw_remote_addr_param;
889 if (remote_addr_param.length()) {
890 i = m.find(remote_addr_param);
891 } else {
892 i = m.find("REMOTE_ADDR");
893 }
894 if (i != m.end()) {
895 const string* ip = &(i->second);
896 string temp;
897 if (remote_addr_param == "HTTP_X_FORWARDED_FOR") {
898 const auto comma = ip->find(',');
899 if (comma != string::npos) {
900 temp.assign(*ip, 0, comma);
901 ip = &temp;
902 }
903 }
904 s->env.emplace("aws:SourceIp", *ip);
905 }
906
907 i = m.find("HTTP_USER_AGENT"); {
908 if (i != m.end())
909 s->env.emplace("aws:UserAgent", i->second);
910 }
911
912 if (s->user) {
913 // What to do about aws::userid? One can have multiple access
914 // keys so that isn't really suitable. Do we have a durable
915 // identifier that can persist through name changes?
916 s->env.emplace("aws:username", s->user->get_id().id);
917 }
918
919 i = m.find("HTTP_X_AMZ_SECURITY_TOKEN");
920 if (i != m.end()) {
921 s->env.emplace("sts:authentication", "true");
922 } else {
923 s->env.emplace("sts:authentication", "false");
924 }
925 }
926
927 void rgw_bucket_object_pre_exec(struct req_state *s)
928 {
929 if (s->expect_cont)
930 dump_continue(s);
931
932 dump_bucket_from_state(s);
933 }
934
935 // So! Now and then when we try to update bucket information, the
936 // bucket has changed during the course of the operation. (Or we have
937 // a cache consistency problem that Watch/Notify isn't ruling out
938 // completely.)
939 //
940 // When this happens, we need to update the bucket info and try
941 // again. We have, however, to try the right *part* again. We can't
942 // simply re-send, since that will obliterate the previous update.
943 //
944 // Thus, callers of this function should include everything that
945 // merges information to be changed into the bucket information as
946 // well as the call to set it.
947 //
948 // The called function must return an integer, negative on error. In
949 // general, they should just return op_ret.
950 namespace {
951 template<typename F>
952 int retry_raced_bucket_write(RGWRados* g, req_state* s, const F& f) {
953 auto r = f();
954 for (auto i = 0u; i < 15u && r == -ECANCELED; ++i) {
955 r = g->try_refresh_bucket_info(s->bucket_info, nullptr,
956 &s->bucket_attrs);
957 if (r >= 0) {
958 r = f();
959 }
960 }
961 return r;
962 }
963 }
964
965 int RGWGetObj::verify_permission()
966 {
967 obj = rgw_obj(s->bucket, s->object);
968 store->getRados()->set_atomic(s->obj_ctx, obj);
969 if (get_data) {
970 store->getRados()->set_prefetch_data(s->obj_ctx, obj);
971 }
972
973 if (torrent.get_flag()) {
974 if (obj.key.instance.empty()) {
975 action = rgw::IAM::s3GetObjectTorrent;
976 } else {
977 action = rgw::IAM::s3GetObjectVersionTorrent;
978 }
979 } else {
980 if (obj.key.instance.empty()) {
981 action = rgw::IAM::s3GetObject;
982 } else {
983 action = rgw::IAM::s3GetObjectVersion;
984 }
985 if (s->iam_policy && s->iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG))
986 rgw_iam_add_existing_objtags(store, s, obj, action);
987 if (! s->iam_user_policies.empty()) {
988 for (auto& user_policy : s->iam_user_policies) {
989 if (user_policy.has_partial_conditional(S3_EXISTING_OBJTAG))
990 rgw_iam_add_existing_objtags(store, s, obj, action);
991 }
992 }
993 }
994
995 if (!verify_object_permission(this, s, action)) {
996 return -EACCES;
997 }
998
999 if (s->bucket_info.obj_lock_enabled()) {
1000 get_retention = verify_object_permission(this, s, rgw::IAM::s3GetObjectRetention);
1001 get_legal_hold = verify_object_permission(this, s, rgw::IAM::s3GetObjectLegalHold);
1002 }
1003
1004 return 0;
1005 }
1006
1007 // cache the objects tags into the requests
1008 // use inside try/catch as "decode()" may throw
1009 void populate_tags_in_request(req_state* s, const std::map<std::string, bufferlist>& attrs) {
1010 const auto attr_iter = attrs.find(RGW_ATTR_TAGS);
1011 if (attr_iter != attrs.end()) {
1012 auto bliter = attr_iter->second.cbegin();
1013 decode(s->tagset, bliter);
1014 }
1015 }
1016
1017 // cache the objects metadata into the request
1018 void populate_metadata_in_request(req_state* s, std::map<std::string, bufferlist>& attrs) {
1019 for (auto& attr : attrs) {
1020 if (boost::algorithm::starts_with(attr.first, RGW_ATTR_META_PREFIX)) {
1021 std::string_view key(attr.first);
1022 key.remove_prefix(sizeof(RGW_ATTR_PREFIX)-1);
1023 s->info.x_meta_map.emplace(key, attr.second.c_str());
1024 }
1025 }
1026 }
1027
1028 int RGWOp::verify_op_mask()
1029 {
1030 uint32_t required_mask = op_mask();
1031
1032 ldpp_dout(this, 20) << "required_mask= " << required_mask
1033 << " user.op_mask=" << s->user->get_info().op_mask << dendl;
1034
1035 if ((s->user->get_info().op_mask & required_mask) != required_mask) {
1036 return -EPERM;
1037 }
1038
1039 if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !store->svc()->zone->zone_is_writeable()) {
1040 ldpp_dout(this, 5) << "NOTICE: modify request to a read-only zone by a "
1041 "non-system user, permission denied" << dendl;
1042 return -EPERM;
1043 }
1044
1045 return 0;
1046 }
1047
1048 int RGWGetObjTags::verify_permission()
1049 {
1050 auto iam_action = s->object.instance.empty()?
1051 rgw::IAM::s3GetObjectTagging:
1052 rgw::IAM::s3GetObjectVersionTagging;
1053 // TODO since we are parsing the bl now anyway, we probably change
1054 // the send_response function to accept RGWObjTag instead of a bl
1055 if (s->iam_policy && s->iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG)){
1056 rgw_obj obj = rgw_obj(s->bucket, s->object);
1057 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
1058 }
1059 if (! s->iam_user_policies.empty()) {
1060 for (auto& user_policy : s->iam_user_policies) {
1061 if (user_policy.has_partial_conditional(S3_EXISTING_OBJTAG)) {
1062 rgw_obj obj = rgw_obj(s->bucket, s->object);
1063 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
1064 }
1065 }
1066 }
1067 if (!verify_object_permission(this, s,iam_action))
1068 return -EACCES;
1069
1070 return 0;
1071 }
1072
1073 void RGWGetObjTags::pre_exec()
1074 {
1075 rgw_bucket_object_pre_exec(s);
1076 }
1077
1078 void RGWGetObjTags::execute()
1079 {
1080 rgw_obj obj;
1081 map<string,bufferlist> attrs;
1082
1083 obj = rgw_obj(s->bucket, s->object);
1084
1085 store->getRados()->set_atomic(s->obj_ctx, obj);
1086
1087 op_ret = get_obj_attrs(store, s, obj, attrs);
1088 if (op_ret < 0) {
1089 ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << obj
1090 << " ret=" << op_ret << dendl;
1091 return;
1092 }
1093
1094 auto tags = attrs.find(RGW_ATTR_TAGS);
1095 if(tags != attrs.end()){
1096 has_tags = true;
1097 tags_bl.append(tags->second);
1098 }
1099 send_response_data(tags_bl);
1100 }
1101
1102 int RGWPutObjTags::verify_permission()
1103 {
1104 auto iam_action = s->object.instance.empty() ?
1105 rgw::IAM::s3PutObjectTagging:
1106 rgw::IAM::s3PutObjectVersionTagging;
1107
1108 if(s->iam_policy && s->iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG)){
1109 auto obj = rgw_obj(s->bucket, s->object);
1110 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
1111 }
1112 if (! s->iam_user_policies.empty()) {
1113 for (auto& user_policy : s->iam_user_policies) {
1114 if (user_policy.has_partial_conditional(S3_EXISTING_OBJTAG)) {
1115 rgw_obj obj = rgw_obj(s->bucket, s->object);
1116 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
1117 }
1118 }
1119 }
1120 if (!verify_object_permission(this, s,iam_action))
1121 return -EACCES;
1122 return 0;
1123 }
1124
1125 void RGWPutObjTags::execute()
1126 {
1127 op_ret = get_params();
1128 if (op_ret < 0)
1129 return;
1130
1131 if (s->object.empty()){
1132 op_ret= -EINVAL; // we only support tagging on existing objects
1133 return;
1134 }
1135
1136 rgw_obj obj;
1137 obj = rgw_obj(s->bucket, s->object);
1138 store->getRados()->set_atomic(s->obj_ctx, obj);
1139 op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_TAGS, tags_bl);
1140 if (op_ret == -ECANCELED){
1141 op_ret = -ERR_TAG_CONFLICT;
1142 }
1143 }
1144
1145 void RGWDeleteObjTags::pre_exec()
1146 {
1147 rgw_bucket_object_pre_exec(s);
1148 }
1149
1150
1151 int RGWDeleteObjTags::verify_permission()
1152 {
1153 if (!s->object.empty()) {
1154 auto iam_action = s->object.instance.empty() ?
1155 rgw::IAM::s3DeleteObjectTagging:
1156 rgw::IAM::s3DeleteObjectVersionTagging;
1157
1158 if (s->iam_policy && s->iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG)){
1159 auto obj = rgw_obj(s->bucket, s->object);
1160 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
1161 }
1162 if (! s->iam_user_policies.empty()) {
1163 for (auto& user_policy : s->iam_user_policies) {
1164 if (user_policy.has_partial_conditional(S3_EXISTING_OBJTAG)) {
1165 auto obj = rgw_obj(s->bucket, s->object);
1166 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
1167 }
1168 }
1169 }
1170 if (!verify_object_permission(this, s, iam_action))
1171 return -EACCES;
1172 }
1173 return 0;
1174 }
1175
1176 void RGWDeleteObjTags::execute()
1177 {
1178 if (s->object.empty())
1179 return;
1180
1181 rgw_obj obj;
1182 obj = rgw_obj(s->bucket, s->object);
1183 store->getRados()->set_atomic(s->obj_ctx, obj);
1184 map <string, bufferlist> attrs;
1185 map <string, bufferlist> rmattr;
1186 bufferlist bl;
1187 rmattr[RGW_ATTR_TAGS] = bl;
1188 op_ret = store->getRados()->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, &rmattr, s->yield);
1189 }
1190
1191 int RGWGetBucketTags::verify_permission()
1192 {
1193
1194 if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketTagging)) {
1195 return -EACCES;
1196 }
1197
1198 return 0;
1199 }
1200
1201 void RGWGetBucketTags::pre_exec()
1202 {
1203 rgw_bucket_object_pre_exec(s);
1204 }
1205
1206 void RGWGetBucketTags::execute()
1207 {
1208 auto iter = s->bucket_attrs.find(RGW_ATTR_TAGS);
1209 if (iter != s->bucket_attrs.end()) {
1210 has_tags = true;
1211 tags_bl.append(iter->second);
1212 } else {
1213 op_ret = -ERR_NO_SUCH_TAG_SET;
1214 }
1215 send_response_data(tags_bl);
1216 }
1217
1218 int RGWPutBucketTags::verify_permission() {
1219 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketTagging);
1220 }
1221
1222 void RGWPutBucketTags::execute() {
1223
1224 op_ret = get_params();
1225 if (op_ret < 0)
1226 return;
1227
1228 if (!store->svc()->zone->is_meta_master()) {
1229 op_ret = forward_request_to_master(s, nullptr, store, in_data, nullptr);
1230 if (op_ret < 0) {
1231 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
1232 }
1233 }
1234
1235 op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
1236 map<string, bufferlist> attrs = s->bucket_attrs;
1237 attrs[RGW_ATTR_TAGS] = tags_bl;
1238 return store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs, &s->bucket_info.objv_tracker, s->yield);
1239 });
1240
1241 }
1242
1243 void RGWDeleteBucketTags::pre_exec()
1244 {
1245 rgw_bucket_object_pre_exec(s);
1246 }
1247
1248 int RGWDeleteBucketTags::verify_permission()
1249 {
1250 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketTagging);
1251 }
1252
1253 void RGWDeleteBucketTags::execute()
1254 {
1255 if (!store->svc()->zone->is_meta_master()) {
1256 bufferlist in_data;
1257 op_ret = forward_request_to_master(s, nullptr, store, in_data, nullptr);
1258 if (op_ret < 0) {
1259 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
1260 return;
1261 }
1262 }
1263
1264 op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
1265 map<string, bufferlist> attrs = s->bucket_attrs;
1266 attrs.erase(RGW_ATTR_TAGS);
1267 op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs, &s->bucket_info.objv_tracker, s->yield);
1268 if (op_ret < 0) {
1269 ldpp_dout(this, 0) << "RGWDeleteBucketTags() failed to remove RGW_ATTR_TAGS on bucket="
1270 << s->bucket.name
1271 << " returned err= " << op_ret << dendl;
1272 }
1273 return op_ret;
1274 });
1275 }
1276
1277 int RGWGetBucketReplication::verify_permission()
1278 {
1279 if (!verify_bucket_permission(this, s, rgw::IAM::s3GetReplicationConfiguration)) {
1280 return -EACCES;
1281 }
1282
1283 return 0;
1284 }
1285
1286 void RGWGetBucketReplication::pre_exec()
1287 {
1288 rgw_bucket_object_pre_exec(s);
1289 }
1290
1291 void RGWGetBucketReplication::execute()
1292 {
1293 send_response_data();
1294 }
1295
1296 int RGWPutBucketReplication::verify_permission() {
1297 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutReplicationConfiguration);
1298 }
1299
1300 void RGWPutBucketReplication::execute() {
1301
1302 op_ret = get_params();
1303 if (op_ret < 0)
1304 return;
1305
1306 if (!store->svc()->zone->is_meta_master()) {
1307 op_ret = forward_request_to_master(s, nullptr, store, in_data, nullptr);
1308 if (op_ret < 0) {
1309 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
1310 return;
1311 }
1312 }
1313
1314 op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
1315 auto sync_policy = (s->bucket_info.sync_policy ? *s->bucket_info.sync_policy : rgw_sync_policy_info());
1316
1317 for (auto& group : sync_policy_groups) {
1318 sync_policy.groups[group.id] = group;
1319 }
1320
1321 s->bucket_info.set_sync_policy(std::move(sync_policy));
1322
1323 int ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false, real_time(),
1324 &s->bucket_attrs);
1325 if (ret < 0) {
1326 ldpp_dout(this, 0) << "ERROR: put_bucket_instance_info (bucket=" << s->bucket_info.bucket.get_key() << ") returned ret=" << ret << dendl;
1327 return ret;
1328 }
1329
1330 return 0;
1331 });
1332 }
1333
1334 void RGWDeleteBucketReplication::pre_exec()
1335 {
1336 rgw_bucket_object_pre_exec(s);
1337 }
1338
1339 int RGWDeleteBucketReplication::verify_permission()
1340 {
1341 return verify_bucket_owner_or_policy(s, rgw::IAM::s3DeleteReplicationConfiguration);
1342 }
1343
1344 void RGWDeleteBucketReplication::execute()
1345 {
1346 if (!store->svc()->zone->is_meta_master()) {
1347 bufferlist in_data;
1348 op_ret = forward_request_to_master(s, nullptr, store, in_data, nullptr);
1349 if (op_ret < 0) {
1350 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
1351 return;
1352 }
1353 }
1354
1355 op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
1356 if (!s->bucket_info.sync_policy) {
1357 return 0;
1358 }
1359
1360 rgw_sync_policy_info sync_policy = *s->bucket_info.sync_policy;
1361
1362 update_sync_policy(&sync_policy);
1363
1364 s->bucket_info.set_sync_policy(std::move(sync_policy));
1365
1366 int ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false, real_time(),
1367 &s->bucket_attrs);
1368 if (ret < 0) {
1369 ldpp_dout(this, 0) << "ERROR: put_bucket_instance_info (bucket=" << s->bucket_info.bucket.get_key() << ") returned ret=" << ret << dendl;
1370 return ret;
1371 }
1372
1373 return 0;
1374 });
1375 }
1376
1377 int RGWOp::do_aws4_auth_completion()
1378 {
1379 ldpp_dout(this, 5) << "NOTICE: call to do_aws4_auth_completion" << dendl;
1380 if (s->auth.completer) {
1381 if (!s->auth.completer->complete()) {
1382 return -ERR_AMZ_CONTENT_SHA256_MISMATCH;
1383 } else {
1384 ldpp_dout(this, 10) << "v4 auth ok -- do_aws4_auth_completion" << dendl;
1385 }
1386
1387 /* TODO(rzarzynski): yes, we're really called twice on PUTs. Only first
1388 * call passes, so we disable second one. This is old behaviour, sorry!
1389 * Plan for tomorrow: seek and destroy. */
1390 s->auth.completer = nullptr;
1391 }
1392
1393 return 0;
1394 }
1395
1396 int RGWOp::init_quota()
1397 {
1398 /* no quota enforcement for system requests */
1399 if (s->system_request)
1400 return 0;
1401
1402 /* init quota related stuff */
1403 if (!(s->user->get_info().op_mask & RGW_OP_TYPE_MODIFY)) {
1404 return 0;
1405 }
1406
1407 /* only interested in object related ops */
1408 if (s->object.empty()) {
1409 return 0;
1410 }
1411
1412 rgw::sal::RGWRadosUser owner_user(store);
1413 rgw::sal::RGWUser *user;
1414
1415 if (s->user->get_id() == s->bucket_owner.get_id()) {
1416 user = s->user;
1417 } else {
1418 int r = owner_user.get_by_id(s->bucket_info.owner, s->yield);
1419 if (r < 0)
1420 return r;
1421 user = &owner_user;
1422 }
1423
1424 if (s->bucket_info.quota.enabled) {
1425 bucket_quota = s->bucket_info.quota;
1426 } else if (user->get_info().bucket_quota.enabled) {
1427 bucket_quota = user->get_info().bucket_quota;
1428 } else {
1429 bucket_quota = store->svc()->quota->get_bucket_quota();
1430 }
1431
1432 if (user->get_info().user_quota.enabled) {
1433 user_quota = user->get_info().user_quota;
1434 } else {
1435 user_quota = store->svc()->quota->get_user_quota();
1436 }
1437
1438 return 0;
1439 }
1440
1441 static bool validate_cors_rule_method(RGWCORSRule *rule, const char *req_meth) {
1442 uint8_t flags = 0;
1443
1444 if (!req_meth) {
1445 dout(5) << "req_meth is null" << dendl;
1446 return false;
1447 }
1448
1449 if (strcmp(req_meth, "GET") == 0) flags = RGW_CORS_GET;
1450 else if (strcmp(req_meth, "POST") == 0) flags = RGW_CORS_POST;
1451 else if (strcmp(req_meth, "PUT") == 0) flags = RGW_CORS_PUT;
1452 else if (strcmp(req_meth, "DELETE") == 0) flags = RGW_CORS_DELETE;
1453 else if (strcmp(req_meth, "HEAD") == 0) flags = RGW_CORS_HEAD;
1454
1455 if (rule->get_allowed_methods() & flags) {
1456 dout(10) << "Method " << req_meth << " is supported" << dendl;
1457 } else {
1458 dout(5) << "Method " << req_meth << " is not supported" << dendl;
1459 return false;
1460 }
1461
1462 return true;
1463 }
1464
1465 static bool validate_cors_rule_header(RGWCORSRule *rule, const char *req_hdrs) {
1466 if (req_hdrs) {
1467 vector<string> hdrs;
1468 get_str_vec(req_hdrs, hdrs);
1469 for (const auto& hdr : hdrs) {
1470 if (!rule->is_header_allowed(hdr.c_str(), hdr.length())) {
1471 dout(5) << "Header " << hdr << " is not registered in this rule" << dendl;
1472 return false;
1473 }
1474 }
1475 }
1476 return true;
1477 }
1478
1479 int RGWOp::read_bucket_cors()
1480 {
1481 bufferlist bl;
1482
1483 map<string, bufferlist>::iterator aiter = s->bucket_attrs.find(RGW_ATTR_CORS);
1484 if (aiter == s->bucket_attrs.end()) {
1485 ldpp_dout(this, 20) << "no CORS configuration attr found" << dendl;
1486 cors_exist = false;
1487 return 0; /* no CORS configuration found */
1488 }
1489
1490 cors_exist = true;
1491
1492 bl = aiter->second;
1493
1494 auto iter = bl.cbegin();
1495 try {
1496 bucket_cors.decode(iter);
1497 } catch (buffer::error& err) {
1498 ldpp_dout(this, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
1499 return -EIO;
1500 }
1501 if (s->cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
1502 RGWCORSConfiguration_S3 *s3cors = static_cast<RGWCORSConfiguration_S3 *>(&bucket_cors);
1503 ldpp_dout(this, 15) << "Read RGWCORSConfiguration";
1504 s3cors->to_xml(*_dout);
1505 *_dout << dendl;
1506 }
1507 return 0;
1508 }
1509
1510 /** CORS 6.2.6.
1511 * If any of the header field-names is not a ASCII case-insensitive match for
1512 * any of the values in list of headers do not set any additional headers and
1513 * terminate this set of steps.
1514 * */
1515 static void get_cors_response_headers(RGWCORSRule *rule, const char *req_hdrs, string& hdrs, string& exp_hdrs, unsigned *max_age) {
1516 if (req_hdrs) {
1517 list<string> hl;
1518 get_str_list(req_hdrs, hl);
1519 for(list<string>::iterator it = hl.begin(); it != hl.end(); ++it) {
1520 if (!rule->is_header_allowed((*it).c_str(), (*it).length())) {
1521 dout(5) << "Header " << (*it) << " is not registered in this rule" << dendl;
1522 } else {
1523 if (hdrs.length() > 0) hdrs.append(",");
1524 hdrs.append((*it));
1525 }
1526 }
1527 }
1528 rule->format_exp_headers(exp_hdrs);
1529 *max_age = rule->get_max_age();
1530 }
1531
1532 /**
1533 * Generate the CORS header response
1534 *
1535 * This is described in the CORS standard, section 6.2.
1536 */
1537 bool RGWOp::generate_cors_headers(string& origin, string& method, string& headers, string& exp_headers, unsigned *max_age)
1538 {
1539 /* CORS 6.2.1. */
1540 const char *orig = s->info.env->get("HTTP_ORIGIN");
1541 if (!orig) {
1542 return false;
1543 }
1544
1545 /* Custom: */
1546 origin = orig;
1547 int temp_op_ret = read_bucket_cors();
1548 if (temp_op_ret < 0) {
1549 op_ret = temp_op_ret;
1550 return false;
1551 }
1552
1553 if (!cors_exist) {
1554 ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl;
1555 return false;
1556 }
1557
1558 /* CORS 6.2.2. */
1559 RGWCORSRule *rule = bucket_cors.host_name_rule(orig);
1560 if (!rule)
1561 return false;
1562
1563 /*
1564 * Set the Allowed-Origin header to a asterisk if this is allowed in the rule
1565 * and no Authorization was send by the client
1566 *
1567 * The origin parameter specifies a URI that may access the resource. The browser must enforce this.
1568 * For requests without credentials, the server may specify "*" as a wildcard,
1569 * thereby allowing any origin to access the resource.
1570 */
1571 const char *authorization = s->info.env->get("HTTP_AUTHORIZATION");
1572 if (!authorization && rule->has_wildcard_origin())
1573 origin = "*";
1574
1575 /* CORS 6.2.3. */
1576 const char *req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD");
1577 if (!req_meth) {
1578 req_meth = s->info.method;
1579 }
1580
1581 if (req_meth) {
1582 method = req_meth;
1583 /* CORS 6.2.5. */
1584 if (!validate_cors_rule_method(rule, req_meth)) {
1585 return false;
1586 }
1587 }
1588
1589 /* CORS 6.2.4. */
1590 const char *req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS");
1591
1592 /* CORS 6.2.6. */
1593 get_cors_response_headers(rule, req_hdrs, headers, exp_headers, max_age);
1594
1595 return true;
1596 }
1597
1598 int RGWGetObj::read_user_manifest_part(RGWBucketInfo& bucket_info,
1599 const rgw_bucket_dir_entry& ent,
1600 RGWAccessControlPolicy * const bucket_acl,
1601 const boost::optional<Policy>& bucket_policy,
1602 const off_t start_ofs,
1603 const off_t end_ofs,
1604 bool swift_slo)
1605 {
1606 ldpp_dout(this, 20) << "user manifest obj=" << ent.key.name
1607 << "[" << ent.key.instance << "]" << dendl;
1608 RGWGetObj_CB cb(this);
1609 RGWGetObj_Filter* filter = &cb;
1610 boost::optional<RGWGetObj_Decompress> decompress;
1611
1612 int64_t cur_ofs = start_ofs;
1613 int64_t cur_end = end_ofs;
1614
1615 rgw_bucket& bucket = bucket_info.bucket;
1616
1617 rgw_obj part(bucket, ent.key);
1618
1619 map<string, bufferlist> attrs;
1620
1621 uint64_t obj_size;
1622 RGWObjectCtx obj_ctx(store);
1623 RGWAccessControlPolicy obj_policy(s->cct);
1624
1625 ldpp_dout(this, 20) << "reading obj=" << part << " ofs=" << cur_ofs
1626 << " end=" << cur_end << dendl;
1627
1628 obj_ctx.set_atomic(part);
1629 store->getRados()->set_prefetch_data(&obj_ctx, part);
1630
1631 RGWRados::Object op_target(store->getRados(), bucket_info, obj_ctx, part);
1632 RGWRados::Object::Read read_op(&op_target);
1633
1634 if (!swift_slo) {
1635 /* SLO etag is optional */
1636 read_op.conds.if_match = ent.meta.etag.c_str();
1637 }
1638 read_op.params.attrs = &attrs;
1639 read_op.params.obj_size = &obj_size;
1640
1641 op_ret = read_op.prepare(s->yield);
1642 if (op_ret < 0)
1643 return op_ret;
1644 op_ret = read_op.range_to_ofs(ent.meta.accounted_size, cur_ofs, cur_end);
1645 if (op_ret < 0)
1646 return op_ret;
1647 bool need_decompress;
1648 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
1649 if (op_ret < 0) {
1650 ldpp_dout(this, 0) << "ERROR: failed to decode compression info" << dendl;
1651 return -EIO;
1652 }
1653
1654 if (need_decompress)
1655 {
1656 if (cs_info.orig_size != ent.meta.accounted_size) {
1657 // hmm.. something wrong, object not as expected, abort!
1658 ldpp_dout(this, 0) << "ERROR: expected cs_info.orig_size=" << cs_info.orig_size
1659 << ", actual read size=" << ent.meta.size << dendl;
1660 return -EIO;
1661 }
1662 decompress.emplace(s->cct, &cs_info, partial_content, filter);
1663 filter = &*decompress;
1664 }
1665 else
1666 {
1667 if (obj_size != ent.meta.size) {
1668 // hmm.. something wrong, object not as expected, abort!
1669 ldpp_dout(this, 0) << "ERROR: expected obj_size=" << obj_size
1670 << ", actual read size=" << ent.meta.size << dendl;
1671 return -EIO;
1672 }
1673 }
1674
1675 op_ret = rgw_policy_from_attrset(s->cct, attrs, &obj_policy);
1676 if (op_ret < 0)
1677 return op_ret;
1678
1679 /* We can use global user_acl because LOs cannot have segments
1680 * stored inside different accounts. */
1681 if (s->system_request) {
1682 ldpp_dout(this, 2) << "overriding permissions due to system operation" << dendl;
1683 } else if (s->auth.identity->is_admin_of(s->user->get_id())) {
1684 ldpp_dout(this, 2) << "overriding permissions due to admin operation" << dendl;
1685 } else if (!verify_object_permission(this, s, part, s->user_acl.get(), bucket_acl,
1686 &obj_policy, bucket_policy, s->iam_user_policies, action)) {
1687 return -EPERM;
1688 }
1689 if (ent.meta.size == 0) {
1690 return 0;
1691 }
1692
1693 perfcounter->inc(l_rgw_get_b, cur_end - cur_ofs);
1694 filter->fixup_range(cur_ofs, cur_end);
1695 op_ret = read_op.iterate(cur_ofs, cur_end, filter, s->yield);
1696 if (op_ret >= 0)
1697 op_ret = filter->flush();
1698 return op_ret;
1699 }
1700
1701 static int iterate_user_manifest_parts(CephContext * const cct,
1702 rgw::sal::RGWRadosStore * const store,
1703 const off_t ofs,
1704 const off_t end,
1705 RGWBucketInfo *pbucket_info,
1706 const string& obj_prefix,
1707 RGWAccessControlPolicy * const bucket_acl,
1708 const boost::optional<Policy>& bucket_policy,
1709 uint64_t * const ptotal_len,
1710 uint64_t * const pobj_size,
1711 string * const pobj_sum,
1712 int (*cb)(RGWBucketInfo& bucket_info,
1713 const rgw_bucket_dir_entry& ent,
1714 RGWAccessControlPolicy * const bucket_acl,
1715 const boost::optional<Policy>& bucket_policy,
1716 off_t start_ofs,
1717 off_t end_ofs,
1718 void *param,
1719 bool swift_slo),
1720 void * const cb_param)
1721 {
1722 rgw_bucket& bucket = pbucket_info->bucket;
1723 uint64_t obj_ofs = 0, len_count = 0;
1724 bool found_start = false, found_end = false, handled_end = false;
1725 string delim;
1726 bool is_truncated;
1727 vector<rgw_bucket_dir_entry> objs;
1728
1729 utime_t start_time = ceph_clock_now();
1730
1731 RGWRados::Bucket target(store->getRados(), *pbucket_info);
1732 RGWRados::Bucket::List list_op(&target);
1733
1734 list_op.params.prefix = obj_prefix;
1735 list_op.params.delim = delim;
1736
1737 MD5 etag_sum;
1738 do {
1739 #define MAX_LIST_OBJS 100
1740 int r = list_op.list_objects(MAX_LIST_OBJS, &objs, NULL, &is_truncated, null_yield);
1741 if (r < 0) {
1742 return r;
1743 }
1744
1745 for (rgw_bucket_dir_entry& ent : objs) {
1746 const uint64_t cur_total_len = obj_ofs;
1747 const uint64_t obj_size = ent.meta.accounted_size;
1748 uint64_t start_ofs = 0, end_ofs = obj_size;
1749
1750 if ((ptotal_len || cb) && !found_start && cur_total_len + obj_size > (uint64_t)ofs) {
1751 start_ofs = ofs - obj_ofs;
1752 found_start = true;
1753 }
1754
1755 obj_ofs += obj_size;
1756 if (pobj_sum) {
1757 etag_sum.Update((const unsigned char *)ent.meta.etag.c_str(),
1758 ent.meta.etag.length());
1759 }
1760
1761 if ((ptotal_len || cb) && !found_end && obj_ofs > (uint64_t)end) {
1762 end_ofs = end - cur_total_len + 1;
1763 found_end = true;
1764 }
1765
1766 perfcounter->tinc(l_rgw_get_lat,
1767 (ceph_clock_now() - start_time));
1768
1769 if (found_start && !handled_end) {
1770 len_count += end_ofs - start_ofs;
1771
1772 if (cb) {
1773 r = cb(*pbucket_info, ent, bucket_acl, bucket_policy, start_ofs, end_ofs,
1774 cb_param, false /* swift_slo */);
1775 if (r < 0) {
1776 return r;
1777 }
1778 }
1779 }
1780
1781 handled_end = found_end;
1782 start_time = ceph_clock_now();
1783 }
1784 } while (is_truncated);
1785
1786 if (ptotal_len) {
1787 *ptotal_len = len_count;
1788 }
1789 if (pobj_size) {
1790 *pobj_size = obj_ofs;
1791 }
1792 if (pobj_sum) {
1793 complete_etag(etag_sum, pobj_sum);
1794 }
1795
1796 return 0;
1797 }
1798
1799 struct rgw_slo_part {
1800 RGWAccessControlPolicy *bucket_acl = nullptr;
1801 Policy* bucket_policy = nullptr;
1802 RGWBucketInfo *pbucket_info = nullptr;
1803 string obj_name;
1804 uint64_t size = 0;
1805 string etag;
1806 };
1807
1808 static int iterate_slo_parts(CephContext *cct,
1809 rgw::sal::RGWRadosStore *store,
1810 off_t ofs,
1811 off_t end,
1812 map<uint64_t, rgw_slo_part>& slo_parts,
1813 int (*cb)(RGWBucketInfo& bucket_info,
1814 const rgw_bucket_dir_entry& ent,
1815 RGWAccessControlPolicy *bucket_acl,
1816 const boost::optional<Policy>& bucket_policy,
1817 off_t start_ofs,
1818 off_t end_ofs,
1819 void *param,
1820 bool swift_slo),
1821 void *cb_param)
1822 {
1823 bool found_start = false, found_end = false;
1824
1825 if (slo_parts.empty()) {
1826 return 0;
1827 }
1828
1829 utime_t start_time = ceph_clock_now();
1830
1831 map<uint64_t, rgw_slo_part>::iterator iter = slo_parts.upper_bound(ofs);
1832 if (iter != slo_parts.begin()) {
1833 --iter;
1834 }
1835
1836 uint64_t obj_ofs = iter->first;
1837
1838 for (; iter != slo_parts.end() && !found_end; ++iter) {
1839 rgw_slo_part& part = iter->second;
1840 rgw_bucket_dir_entry ent;
1841
1842 ent.key.name = part.obj_name;
1843 ent.meta.accounted_size = ent.meta.size = part.size;
1844 ent.meta.etag = part.etag;
1845
1846 uint64_t cur_total_len = obj_ofs;
1847 uint64_t start_ofs = 0, end_ofs = ent.meta.size - 1;
1848
1849 if (!found_start && cur_total_len + ent.meta.size > (uint64_t)ofs) {
1850 start_ofs = ofs - obj_ofs;
1851 found_start = true;
1852 }
1853
1854 obj_ofs += ent.meta.size;
1855
1856 if (!found_end && obj_ofs > (uint64_t)end) {
1857 end_ofs = end - cur_total_len;
1858 found_end = true;
1859 }
1860
1861 perfcounter->tinc(l_rgw_get_lat,
1862 (ceph_clock_now() - start_time));
1863
1864 if (found_start) {
1865 if (cb) {
1866 dout(20) << "iterate_slo_parts()"
1867 << " obj=" << part.obj_name
1868 << " start_ofs=" << start_ofs
1869 << " end_ofs=" << end_ofs
1870 << dendl;
1871
1872 // SLO is a Swift thing, and Swift has no knowledge of S3 Policies.
1873 int r = cb(*(part.pbucket_info), ent, part.bucket_acl,
1874 (part.bucket_policy ?
1875 boost::optional<Policy>(*part.bucket_policy) : none),
1876 start_ofs, end_ofs, cb_param, true /* swift_slo */);
1877 if (r < 0)
1878 return r;
1879 }
1880 }
1881
1882 start_time = ceph_clock_now();
1883 }
1884
1885 return 0;
1886 }
1887
1888 static int get_obj_user_manifest_iterate_cb(RGWBucketInfo& bucket_info,
1889 const rgw_bucket_dir_entry& ent,
1890 RGWAccessControlPolicy * const bucket_acl,
1891 const boost::optional<Policy>& bucket_policy,
1892 const off_t start_ofs,
1893 const off_t end_ofs,
1894 void * const param,
1895 bool swift_slo = false)
1896 {
1897 RGWGetObj *op = static_cast<RGWGetObj *>(param);
1898 return op->read_user_manifest_part(
1899 bucket_info, ent, bucket_acl, bucket_policy, start_ofs, end_ofs, swift_slo);
1900 }
1901
1902 int RGWGetObj::handle_user_manifest(const char *prefix)
1903 {
1904 const boost::string_view prefix_view(prefix);
1905 ldpp_dout(this, 2) << "RGWGetObj::handle_user_manifest() prefix="
1906 << prefix_view << dendl;
1907
1908 const size_t pos = prefix_view.find('/');
1909 if (pos == string::npos) {
1910 return -EINVAL;
1911 }
1912
1913 const std::string bucket_name = url_decode(prefix_view.substr(0, pos));
1914 const std::string obj_prefix = url_decode(prefix_view.substr(pos + 1));
1915
1916 rgw_bucket bucket;
1917
1918 RGWAccessControlPolicy _bucket_acl(s->cct);
1919 RGWAccessControlPolicy *bucket_acl;
1920 boost::optional<Policy> _bucket_policy;
1921 boost::optional<Policy>* bucket_policy;
1922 RGWBucketInfo bucket_info;
1923 RGWBucketInfo *pbucket_info;
1924
1925 if (bucket_name.compare(s->bucket.name) != 0) {
1926 map<string, bufferlist> bucket_attrs;
1927 auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
1928 int r = store->getRados()->get_bucket_info(store->svc(), s->user->get_tenant(),
1929 bucket_name, bucket_info, NULL,
1930 s->yield, &bucket_attrs);
1931 if (r < 0) {
1932 ldpp_dout(this, 0) << "could not get bucket info for bucket="
1933 << bucket_name << dendl;
1934 return r;
1935 }
1936 bucket = bucket_info.bucket;
1937 pbucket_info = &bucket_info;
1938 bucket_acl = &_bucket_acl;
1939 r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl, bucket);
1940 if (r < 0) {
1941 ldpp_dout(this, 0) << "failed to read bucket policy" << dendl;
1942 return r;
1943 }
1944 _bucket_policy = get_iam_policy_from_attr(s->cct, store, bucket_attrs,
1945 bucket_info.bucket.tenant);
1946 bucket_policy = &_bucket_policy;
1947 } else {
1948 bucket = s->bucket;
1949 pbucket_info = &s->bucket_info;
1950 bucket_acl = s->bucket_acl.get();
1951 bucket_policy = &s->iam_policy;
1952 }
1953
1954 /* dry run to find out:
1955 * - total length (of the parts we are going to send to client),
1956 * - overall DLO's content size,
1957 * - md5 sum of overall DLO's content (for etag of Swift API). */
1958 int r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1959 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1960 nullptr, &s->obj_size, &lo_etag,
1961 nullptr /* cb */, nullptr /* cb arg */);
1962 if (r < 0) {
1963 return r;
1964 }
1965
1966 r = RGWRados::Object::Read::range_to_ofs(s->obj_size, ofs, end);
1967 if (r < 0) {
1968 return r;
1969 }
1970
1971 r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1972 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1973 &total_len, nullptr, nullptr,
1974 nullptr, nullptr);
1975 if (r < 0) {
1976 return r;
1977 }
1978
1979 if (!get_data) {
1980 bufferlist bl;
1981 send_response_data(bl, 0, 0);
1982 return 0;
1983 }
1984
1985 r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1986 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1987 nullptr, nullptr, nullptr,
1988 get_obj_user_manifest_iterate_cb, (void *)this);
1989 if (r < 0) {
1990 return r;
1991 }
1992
1993 if (!total_len) {
1994 bufferlist bl;
1995 send_response_data(bl, 0, 0);
1996 }
1997
1998 return 0;
1999 }
2000
2001 int RGWGetObj::handle_slo_manifest(bufferlist& bl)
2002 {
2003 RGWSLOInfo slo_info;
2004 auto bliter = bl.cbegin();
2005 try {
2006 decode(slo_info, bliter);
2007 } catch (buffer::error& err) {
2008 ldpp_dout(this, 0) << "ERROR: failed to decode slo manifest" << dendl;
2009 return -EIO;
2010 }
2011 ldpp_dout(this, 2) << "RGWGetObj::handle_slo_manifest()" << dendl;
2012
2013 vector<RGWAccessControlPolicy> allocated_acls;
2014 map<string, pair<RGWAccessControlPolicy *, boost::optional<Policy>>> policies;
2015 map<string, RGWBucketInfo> bucket_infos;
2016
2017 map<uint64_t, rgw_slo_part> slo_parts;
2018
2019 MD5 etag_sum;
2020 total_len = 0;
2021
2022 for (const auto& entry : slo_info.entries) {
2023 const string& path = entry.path;
2024
2025 /* If the path starts with slashes, strip them all. */
2026 const size_t pos_init = path.find_first_not_of('/');
2027 /* According to the documentation of std::string::find following check
2028 * is not necessary as we should get the std::string::npos propagation
2029 * here. This might be true with the accuracy to implementation's bugs.
2030 * See following question on SO:
2031 * http://stackoverflow.com/questions/1011790/why-does-stdstring-findtext-stdstringnpos-not-return-npos
2032 */
2033 if (pos_init == string::npos) {
2034 return -EINVAL;
2035 }
2036
2037 const size_t pos_sep = path.find('/', pos_init);
2038 if (pos_sep == string::npos) {
2039 return -EINVAL;
2040 }
2041
2042 string bucket_name = path.substr(pos_init, pos_sep - pos_init);
2043 string obj_name = path.substr(pos_sep + 1);
2044
2045 rgw_bucket bucket;
2046 RGWBucketInfo *pbucket_info;
2047 RGWAccessControlPolicy *bucket_acl;
2048 Policy* bucket_policy;
2049
2050 if (bucket_name.compare(s->bucket.name) != 0) {
2051 const auto& piter = policies.find(bucket_name);
2052 if (piter != policies.end()) {
2053 bucket_acl = piter->second.first;
2054 bucket_policy = piter->second.second.get_ptr();
2055 pbucket_info = &bucket_infos[bucket_name];
2056 } else {
2057 allocated_acls.push_back(RGWAccessControlPolicy(s->cct));
2058 RGWAccessControlPolicy& _bucket_acl = allocated_acls.back();
2059
2060 RGWBucketInfo bucket_info;
2061 map<string, bufferlist> bucket_attrs;
2062 auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
2063 int r = store->getRados()->get_bucket_info(store->svc(), s->user->get_tenant(),
2064 bucket_name, bucket_info, nullptr,
2065 s->yield, &bucket_attrs);
2066 if (r < 0) {
2067 ldpp_dout(this, 0) << "could not get bucket info for bucket="
2068 << bucket_name << dendl;
2069 return r;
2070 }
2071 bucket = bucket_info.bucket;
2072 bucket_acl = &_bucket_acl;
2073 r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl,
2074 bucket);
2075 if (r < 0) {
2076 ldpp_dout(this, 0) << "failed to read bucket ACL for bucket "
2077 << bucket << dendl;
2078 return r;
2079 }
2080 auto _bucket_policy = get_iam_policy_from_attr(
2081 s->cct, store, bucket_attrs, bucket_info.bucket.tenant);
2082 bucket_policy = _bucket_policy.get_ptr();
2083 bucket_infos.emplace(bucket_name, std::move(bucket_info));
2084 pbucket_info = &bucket_infos[bucket_name];
2085 policies[bucket_name] = make_pair(bucket_acl, _bucket_policy);
2086 }
2087 } else {
2088 pbucket_info = &s->bucket_info;
2089 bucket_acl = s->bucket_acl.get();
2090 bucket_policy = s->iam_policy.get_ptr();
2091 }
2092
2093 rgw_slo_part part;
2094 part.bucket_acl = bucket_acl;
2095 part.bucket_policy = bucket_policy;
2096 part.pbucket_info = pbucket_info;
2097 part.obj_name = obj_name;
2098 part.size = entry.size_bytes;
2099 part.etag = entry.etag;
2100 ldpp_dout(this, 20) << "slo_part: bucket=" << part.pbucket_info->bucket
2101 << " obj=" << part.obj_name
2102 << " size=" << part.size
2103 << " etag=" << part.etag
2104 << dendl;
2105
2106 etag_sum.Update((const unsigned char *)entry.etag.c_str(),
2107 entry.etag.length());
2108
2109 slo_parts[total_len] = part;
2110 total_len += part.size;
2111 } /* foreach entry */
2112
2113 complete_etag(etag_sum, &lo_etag);
2114
2115 s->obj_size = slo_info.total_size;
2116 ldpp_dout(this, 20) << "s->obj_size=" << s->obj_size << dendl;
2117
2118 int r = RGWRados::Object::Read::range_to_ofs(total_len, ofs, end);
2119 if (r < 0) {
2120 return r;
2121 }
2122
2123 total_len = end - ofs + 1;
2124 ldpp_dout(this, 20) << "Requested: ofs=" << ofs
2125 << " end=" << end
2126 << " total=" << total_len
2127 << dendl;
2128
2129 r = iterate_slo_parts(s->cct, store, ofs, end, slo_parts,
2130 get_obj_user_manifest_iterate_cb, (void *)this);
2131 if (r < 0) {
2132 return r;
2133 }
2134
2135 return 0;
2136 }
2137
2138 int RGWGetObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
2139 {
2140 /* garbage collection related handling:
2141 * defer_gc disabled for https://tracker.ceph.com/issues/47866 */
2142 return send_response_data(bl, bl_ofs, bl_len);
2143 }
2144
2145 bool RGWGetObj::prefetch_data()
2146 {
2147 /* HEAD request, stop prefetch*/
2148 if (!get_data || s->info.env->exists("HTTP_X_RGW_AUTH")) {
2149 return false;
2150 }
2151
2152 range_str = s->info.env->get("HTTP_RANGE");
2153 // TODO: add range prefetch
2154 if (range_str) {
2155 parse_range();
2156 return false;
2157 }
2158
2159 return get_data;
2160 }
2161
2162 void RGWGetObj::pre_exec()
2163 {
2164 rgw_bucket_object_pre_exec(s);
2165 }
2166
2167 static bool object_is_expired(map<string, bufferlist>& attrs) {
2168 map<string, bufferlist>::iterator iter = attrs.find(RGW_ATTR_DELETE_AT);
2169 if (iter != attrs.end()) {
2170 utime_t delete_at;
2171 try {
2172 decode(delete_at, iter->second);
2173 } catch (buffer::error& err) {
2174 dout(0) << "ERROR: " << __func__ << ": failed to decode " RGW_ATTR_DELETE_AT " attr" << dendl;
2175 return false;
2176 }
2177
2178 if (delete_at <= ceph_clock_now() && !delete_at.is_zero()) {
2179 return true;
2180 }
2181 }
2182
2183 return false;
2184 }
2185
2186 static inline void rgw_cond_decode_objtags(
2187 struct req_state *s,
2188 const std::map<std::string, buffer::list> &attrs)
2189 {
2190 const auto& tags = attrs.find(RGW_ATTR_TAGS);
2191 if (tags != attrs.end()) {
2192 try {
2193 bufferlist::const_iterator iter{&tags->second};
2194 s->tagset.decode(iter);
2195 } catch (buffer::error& err) {
2196 ldout(s->cct, 0)
2197 << "ERROR: caught buffer::error, couldn't decode TagSet" << dendl;
2198 }
2199 }
2200 }
2201
2202 void RGWGetObj::execute()
2203 {
2204 bufferlist bl;
2205 gc_invalidate_time = ceph_clock_now();
2206 gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2);
2207
2208 bool need_decompress;
2209 int64_t ofs_x, end_x;
2210
2211 RGWGetObj_CB cb(this);
2212 RGWGetObj_Filter* filter = (RGWGetObj_Filter *)&cb;
2213 boost::optional<RGWGetObj_Decompress> decompress;
2214 std::unique_ptr<RGWGetObj_Filter> decrypt;
2215 map<string, bufferlist>::iterator attr_iter;
2216
2217 perfcounter->inc(l_rgw_get);
2218
2219 RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
2220 RGWRados::Object::Read read_op(&op_target);
2221
2222 op_ret = get_params();
2223 if (op_ret < 0)
2224 goto done_err;
2225
2226 op_ret = init_common();
2227 if (op_ret < 0)
2228 goto done_err;
2229
2230 read_op.conds.mod_ptr = mod_ptr;
2231 read_op.conds.unmod_ptr = unmod_ptr;
2232 read_op.conds.high_precision_time = s->system_request; /* system request need to use high precision time */
2233 read_op.conds.mod_zone_id = mod_zone_id;
2234 read_op.conds.mod_pg_ver = mod_pg_ver;
2235 read_op.conds.if_match = if_match;
2236 read_op.conds.if_nomatch = if_nomatch;
2237 read_op.params.attrs = &attrs;
2238 read_op.params.lastmod = &lastmod;
2239 read_op.params.obj_size = &s->obj_size;
2240
2241 op_ret = read_op.prepare(s->yield);
2242 if (op_ret < 0)
2243 goto done_err;
2244 version_id = read_op.state.obj.key.instance;
2245
2246 /* STAT ops don't need data, and do no i/o */
2247 if (get_type() == RGW_OP_STAT_OBJ) {
2248 return;
2249 }
2250 if (s->info.env->exists("HTTP_X_RGW_AUTH")) {
2251 op_ret = 0;
2252 goto done_err;
2253 }
2254 /* start gettorrent */
2255 if (torrent.get_flag())
2256 {
2257 attr_iter = attrs.find(RGW_ATTR_CRYPT_MODE);
2258 if (attr_iter != attrs.end() && attr_iter->second.to_str() == "SSE-C-AES256") {
2259 ldpp_dout(this, 0) << "ERROR: torrents are not supported for objects "
2260 "encrypted with SSE-C" << dendl;
2261 op_ret = -EINVAL;
2262 goto done_err;
2263 }
2264 torrent.init(s, store);
2265 op_ret = torrent.get_torrent_file(read_op, total_len, bl, obj);
2266 if (op_ret < 0)
2267 {
2268 ldpp_dout(this, 0) << "ERROR: failed to get_torrent_file ret= " << op_ret
2269 << dendl;
2270 goto done_err;
2271 }
2272 op_ret = send_response_data(bl, 0, total_len);
2273 if (op_ret < 0)
2274 {
2275 ldpp_dout(this, 0) << "ERROR: failed to send_response_data ret= " << op_ret << dendl;
2276 goto done_err;
2277 }
2278 return;
2279 }
2280 /* end gettorrent */
2281
2282 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
2283 if (op_ret < 0) {
2284 ldpp_dout(s, 0) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
2285 goto done_err;
2286 }
2287 if (need_decompress) {
2288 s->obj_size = cs_info.orig_size;
2289 decompress.emplace(s->cct, &cs_info, partial_content, filter);
2290 filter = &*decompress;
2291 }
2292
2293 attr_iter = attrs.find(RGW_ATTR_USER_MANIFEST);
2294 if (attr_iter != attrs.end() && !skip_manifest) {
2295 op_ret = handle_user_manifest(attr_iter->second.c_str());
2296 if (op_ret < 0) {
2297 ldpp_dout(this, 0) << "ERROR: failed to handle user manifest ret="
2298 << op_ret << dendl;
2299 goto done_err;
2300 }
2301 return;
2302 }
2303
2304 attr_iter = attrs.find(RGW_ATTR_SLO_MANIFEST);
2305 if (attr_iter != attrs.end() && !skip_manifest) {
2306 is_slo = true;
2307 op_ret = handle_slo_manifest(attr_iter->second);
2308 if (op_ret < 0) {
2309 ldpp_dout(this, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret
2310 << dendl;
2311 goto done_err;
2312 }
2313 return;
2314 }
2315
2316 // for range requests with obj size 0
2317 if (range_str && !(s->obj_size)) {
2318 total_len = 0;
2319 op_ret = -ERANGE;
2320 goto done_err;
2321 }
2322
2323 op_ret = read_op.range_to_ofs(s->obj_size, ofs, end);
2324 if (op_ret < 0)
2325 goto done_err;
2326 total_len = (ofs <= end ? end + 1 - ofs : 0);
2327
2328 /* Check whether the object has expired. Swift API documentation
2329 * stands that we should return 404 Not Found in such case. */
2330 if (need_object_expiration() && object_is_expired(attrs)) {
2331 op_ret = -ENOENT;
2332 goto done_err;
2333 }
2334
2335 /* Decode S3 objtags, if any */
2336 rgw_cond_decode_objtags(s, attrs);
2337
2338 start = ofs;
2339
2340 attr_iter = attrs.find(RGW_ATTR_MANIFEST);
2341 op_ret = this->get_decrypt_filter(&decrypt, filter,
2342 attr_iter != attrs.end() ? &(attr_iter->second) : nullptr);
2343 if (decrypt != nullptr) {
2344 filter = decrypt.get();
2345 }
2346 if (op_ret < 0) {
2347 goto done_err;
2348 }
2349
2350 if (!get_data || ofs > end) {
2351 send_response_data(bl, 0, 0);
2352 return;
2353 }
2354
2355 perfcounter->inc(l_rgw_get_b, end - ofs);
2356
2357 ofs_x = ofs;
2358 end_x = end;
2359 filter->fixup_range(ofs_x, end_x);
2360 op_ret = read_op.iterate(ofs_x, end_x, filter, s->yield);
2361
2362 if (op_ret >= 0)
2363 op_ret = filter->flush();
2364
2365 perfcounter->tinc(l_rgw_get_lat, s->time_elapsed());
2366 if (op_ret < 0) {
2367 goto done_err;
2368 }
2369
2370 op_ret = send_response_data(bl, 0, 0);
2371 if (op_ret < 0) {
2372 goto done_err;
2373 }
2374 return;
2375
2376 done_err:
2377 send_response_data_error();
2378 }
2379
2380 int RGWGetObj::init_common()
2381 {
2382 if (range_str) {
2383 /* range parsed error when prefetch */
2384 if (!range_parsed) {
2385 int r = parse_range();
2386 if (r < 0)
2387 return r;
2388 }
2389 }
2390 if (if_mod) {
2391 if (parse_time(if_mod, &mod_time) < 0)
2392 return -EINVAL;
2393 mod_ptr = &mod_time;
2394 }
2395
2396 if (if_unmod) {
2397 if (parse_time(if_unmod, &unmod_time) < 0)
2398 return -EINVAL;
2399 unmod_ptr = &unmod_time;
2400 }
2401
2402 return 0;
2403 }
2404
2405 int RGWListBuckets::verify_permission()
2406 {
2407 rgw::Partition partition = rgw::Partition::aws;
2408 rgw::Service service = rgw::Service::s3;
2409
2410 if (!verify_user_permission(this, s, ARN(partition, service, "", s->user->get_tenant(), "*"), rgw::IAM::s3ListAllMyBuckets)) {
2411 return -EACCES;
2412 }
2413
2414 return 0;
2415 }
2416
2417 int RGWGetUsage::verify_permission()
2418 {
2419 if (s->auth.identity->is_anonymous()) {
2420 return -EACCES;
2421 }
2422
2423 return 0;
2424 }
2425
2426 void RGWListBuckets::execute()
2427 {
2428 bool done;
2429 bool started = false;
2430 uint64_t total_count = 0;
2431
2432 const uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk;
2433
2434 op_ret = get_params();
2435 if (op_ret < 0) {
2436 goto send_end;
2437 }
2438
2439 if (supports_account_metadata()) {
2440 op_ret = store->ctl()->user->get_attrs_by_uid(s->user->get_id(), &attrs, s->yield);
2441 if (op_ret < 0) {
2442 goto send_end;
2443 }
2444 }
2445
2446 is_truncated = false;
2447 do {
2448 rgw::sal::RGWBucketList buckets;
2449 uint64_t read_count;
2450 if (limit >= 0) {
2451 read_count = min(limit - total_count, max_buckets);
2452 } else {
2453 read_count = max_buckets;
2454 }
2455
2456 rgw::sal::RGWRadosUser user(store, s->user->get_id());
2457
2458 op_ret = user.list_buckets(marker, end_marker, read_count, should_get_stats(), buckets);
2459
2460 if (op_ret < 0) {
2461 /* hmm.. something wrong here.. the user was authenticated, so it
2462 should exist */
2463 ldpp_dout(this, 10) << "WARNING: failed on rgw_get_user_buckets uid="
2464 << s->user->get_id() << dendl;
2465 break;
2466 }
2467
2468 /* We need to have stats for all our policies - even if a given policy
2469 * isn't actually used in a given account. In such situation its usage
2470 * stats would be simply full of zeros. */
2471 for (const auto& policy : store->svc()->zone->get_zonegroup().placement_targets) {
2472 policies_stats.emplace(policy.second.name,
2473 decltype(policies_stats)::mapped_type());
2474 }
2475
2476 std::map<std::string, rgw::sal::RGWBucket*>& m = buckets.get_buckets();
2477 for (const auto& kv : m) {
2478 const auto& bucket = kv.second;
2479
2480 global_stats.bytes_used += bucket->get_size();
2481 global_stats.bytes_used_rounded += bucket->get_size_rounded();
2482 global_stats.objects_count += bucket->get_count();
2483
2484 /* operator[] still can create a new entry for storage policy seen
2485 * for first time. */
2486 auto& policy_stats = policies_stats[bucket->get_placement_rule().to_str()];
2487 policy_stats.bytes_used += bucket->get_size();
2488 policy_stats.bytes_used_rounded += bucket->get_size_rounded();
2489 policy_stats.buckets_count++;
2490 policy_stats.objects_count += bucket->get_count();
2491 }
2492 global_stats.buckets_count += m.size();
2493 total_count += m.size();
2494
2495 done = (m.size() < read_count || (limit >= 0 && total_count >= (uint64_t)limit));
2496
2497 if (!started) {
2498 send_response_begin(buckets.count() > 0);
2499 started = true;
2500 }
2501
2502 if (read_count > 0 &&
2503 !m.empty()) {
2504 map<string, rgw::sal::RGWBucket*>::reverse_iterator riter = m.rbegin();
2505 marker = riter->first;
2506
2507 handle_listing_chunk(std::move(buckets));
2508 }
2509 } while (is_truncated && !done);
2510
2511 send_end:
2512 if (!started) {
2513 send_response_begin(false);
2514 }
2515 send_response_end();
2516 }
2517
2518 void RGWGetUsage::execute()
2519 {
2520 uint64_t start_epoch = 0;
2521 uint64_t end_epoch = (uint64_t)-1;
2522 op_ret = get_params();
2523 if (op_ret < 0)
2524 return;
2525
2526 if (!start_date.empty()) {
2527 op_ret = utime_t::parse_date(start_date, &start_epoch, NULL);
2528 if (op_ret < 0) {
2529 ldpp_dout(this, 0) << "ERROR: failed to parse start date" << dendl;
2530 return;
2531 }
2532 }
2533
2534 if (!end_date.empty()) {
2535 op_ret = utime_t::parse_date(end_date, &end_epoch, NULL);
2536 if (op_ret < 0) {
2537 ldpp_dout(this, 0) << "ERROR: failed to parse end date" << dendl;
2538 return;
2539 }
2540 }
2541
2542 uint32_t max_entries = 1000;
2543
2544 bool is_truncated = true;
2545
2546 RGWUsageIter usage_iter;
2547
2548 while (is_truncated) {
2549 op_ret = store->getRados()->read_usage(s->user->get_id(), s->bucket_name, start_epoch, end_epoch, max_entries,
2550 &is_truncated, usage_iter, usage);
2551 if (op_ret == -ENOENT) {
2552 op_ret = 0;
2553 is_truncated = false;
2554 }
2555
2556 if (op_ret < 0) {
2557 return;
2558 }
2559 }
2560
2561 op_ret = rgw_user_sync_all_stats(store, s->user->get_id());
2562 if (op_ret < 0) {
2563 ldpp_dout(this, 0) << "ERROR: failed to sync user stats" << dendl;
2564 return;
2565 }
2566
2567 op_ret = rgw_user_get_all_buckets_stats(store, s->user->get_id(), buckets_usage);
2568 if (op_ret < 0) {
2569 ldpp_dout(this, 0) << "ERROR: failed to get user's buckets stats" << dendl;
2570 return;
2571 }
2572
2573 op_ret = store->ctl()->user->read_stats(s->user->get_id(), &stats);
2574 if (op_ret < 0) {
2575 ldpp_dout(this, 0) << "ERROR: can't read user header" << dendl;
2576 return;
2577 }
2578
2579 return;
2580 }
2581
2582 int RGWStatAccount::verify_permission()
2583 {
2584 if (!verify_user_permission_no_policy(this, s, RGW_PERM_READ)) {
2585 return -EACCES;
2586 }
2587
2588 return 0;
2589 }
2590
2591 void RGWStatAccount::execute()
2592 {
2593 string marker;
2594 rgw::sal::RGWBucketList buckets;
2595 uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk;
2596 const string *lastmarker;
2597
2598 do {
2599
2600 lastmarker = nullptr;
2601 op_ret = rgw_read_user_buckets(store, s->user->get_id(), buckets, marker,
2602 string(), max_buckets, true);
2603 if (op_ret < 0) {
2604 /* hmm.. something wrong here.. the user was authenticated, so it
2605 should exist */
2606 ldpp_dout(this, 10) << "WARNING: failed on rgw_read_user_buckets uid="
2607 << s->user->get_id() << " ret=" << op_ret << dendl;
2608 break;
2609 } else {
2610 /* We need to have stats for all our policies - even if a given policy
2611 * isn't actually used in a given account. In such situation its usage
2612 * stats would be simply full of zeros. */
2613 for (const auto& policy : store->svc()->zone->get_zonegroup().placement_targets) {
2614 policies_stats.emplace(policy.second.name,
2615 decltype(policies_stats)::mapped_type());
2616 }
2617
2618 std::map<std::string, rgw::sal::RGWBucket*>& m = buckets.get_buckets();
2619 for (const auto& kv : m) {
2620 const auto& bucket = kv.second;
2621 lastmarker = &kv.first;
2622
2623 global_stats.bytes_used += bucket->get_size();
2624 global_stats.bytes_used_rounded += bucket->get_size_rounded();
2625 global_stats.objects_count += bucket->get_count();
2626
2627 /* operator[] still can create a new entry for storage policy seen
2628 * for first time. */
2629 auto& policy_stats = policies_stats[bucket->get_placement_rule().to_str()];
2630 policy_stats.bytes_used += bucket->get_size();
2631 policy_stats.bytes_used_rounded += bucket->get_size_rounded();
2632 policy_stats.buckets_count++;
2633 policy_stats.objects_count += bucket->get_count();
2634 }
2635 global_stats.buckets_count += m.size();
2636
2637 }
2638 if (!lastmarker) {
2639 lderr(s->cct) << "ERROR: rgw_read_user_buckets, stasis at marker="
2640 << marker << " uid=" << s->user->get_id() << dendl;
2641 break;
2642 }
2643 marker = *lastmarker;
2644 } while (buckets.is_truncated());
2645 }
2646
2647 int RGWGetBucketVersioning::verify_permission()
2648 {
2649 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketVersioning);
2650 }
2651
2652 void RGWGetBucketVersioning::pre_exec()
2653 {
2654 rgw_bucket_object_pre_exec(s);
2655 }
2656
2657 void RGWGetBucketVersioning::execute()
2658 {
2659 versioned = s->bucket_info.versioned();
2660 versioning_enabled = s->bucket_info.versioning_enabled();
2661 mfa_enabled = s->bucket_info.mfa_enabled();
2662 }
2663
2664 int RGWSetBucketVersioning::verify_permission()
2665 {
2666 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketVersioning);
2667 }
2668
2669 void RGWSetBucketVersioning::pre_exec()
2670 {
2671 rgw_bucket_object_pre_exec(s);
2672 }
2673
2674 void RGWSetBucketVersioning::execute()
2675 {
2676 op_ret = get_params();
2677 if (op_ret < 0)
2678 return;
2679
2680 if (s->bucket_info.obj_lock_enabled() && versioning_status != VersioningEnabled) {
2681 s->err.message = "bucket versioning cannot be disabled on buckets with object lock enabled";
2682 ldpp_dout(this, 4) << "ERROR: " << s->err.message << dendl;
2683 op_ret = -ERR_INVALID_BUCKET_STATE;
2684 return;
2685 }
2686
2687 bool cur_mfa_status = (s->bucket_info.flags & BUCKET_MFA_ENABLED) != 0;
2688
2689 mfa_set_status &= (mfa_status != cur_mfa_status);
2690
2691 if (mfa_set_status &&
2692 !s->mfa_verified) {
2693 op_ret = -ERR_MFA_REQUIRED;
2694 return;
2695 }
2696 //if mfa is enabled for bucket, make sure mfa code is validated in case versioned status gets changed
2697 if (cur_mfa_status) {
2698 bool req_versioning_status = false;
2699 //if requested versioning status is not the same as the one set for the bucket, return error
2700 if (versioning_status == VersioningEnabled) {
2701 req_versioning_status = (s->bucket_info.flags & BUCKET_VERSIONS_SUSPENDED) != 0;
2702 } else if (versioning_status == VersioningSuspended) {
2703 req_versioning_status = (s->bucket_info.flags & BUCKET_VERSIONS_SUSPENDED) == 0;
2704 }
2705 if (req_versioning_status && !s->mfa_verified) {
2706 op_ret = -ERR_MFA_REQUIRED;
2707 return;
2708 }
2709 }
2710
2711 if (!store->svc()->zone->is_meta_master()) {
2712 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
2713 if (op_ret < 0) {
2714 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
2715 return;
2716 }
2717 }
2718
2719 bool modified = mfa_set_status;
2720
2721 op_ret = retry_raced_bucket_write(store->getRados(), s, [&] {
2722 if (mfa_set_status) {
2723 if (mfa_status) {
2724 s->bucket_info.flags |= BUCKET_MFA_ENABLED;
2725 } else {
2726 s->bucket_info.flags &= ~BUCKET_MFA_ENABLED;
2727 }
2728 }
2729
2730 if (versioning_status == VersioningEnabled) {
2731 s->bucket_info.flags |= BUCKET_VERSIONED;
2732 s->bucket_info.flags &= ~BUCKET_VERSIONS_SUSPENDED;
2733 modified = true;
2734 } else if (versioning_status == VersioningSuspended) {
2735 s->bucket_info.flags |= (BUCKET_VERSIONED | BUCKET_VERSIONS_SUSPENDED);
2736 modified = true;
2737 } else {
2738 return op_ret;
2739 }
2740 return store->getRados()->put_bucket_instance_info(s->bucket_info, false, real_time(),
2741 &s->bucket_attrs);
2742 });
2743
2744 if (!modified) {
2745 return;
2746 }
2747
2748 if (op_ret < 0) {
2749 ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
2750 << " returned err=" << op_ret << dendl;
2751 return;
2752 }
2753 }
2754
2755 int RGWGetBucketWebsite::verify_permission()
2756 {
2757 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketWebsite);
2758 }
2759
2760 void RGWGetBucketWebsite::pre_exec()
2761 {
2762 rgw_bucket_object_pre_exec(s);
2763 }
2764
2765 void RGWGetBucketWebsite::execute()
2766 {
2767 if (!s->bucket_info.has_website) {
2768 op_ret = -ERR_NO_SUCH_WEBSITE_CONFIGURATION;
2769 }
2770 }
2771
2772 int RGWSetBucketWebsite::verify_permission()
2773 {
2774 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketWebsite);
2775 }
2776
2777 void RGWSetBucketWebsite::pre_exec()
2778 {
2779 rgw_bucket_object_pre_exec(s);
2780 }
2781
2782 void RGWSetBucketWebsite::execute()
2783 {
2784 op_ret = get_params();
2785
2786 if (op_ret < 0)
2787 return;
2788
2789 if (!store->svc()->zone->is_meta_master()) {
2790 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
2791 if (op_ret < 0) {
2792 ldpp_dout(this, 0) << " forward_request_to_master returned ret=" << op_ret << dendl;
2793 return;
2794 }
2795 }
2796
2797 op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
2798 s->bucket_info.has_website = true;
2799 s->bucket_info.website_conf = website_conf;
2800 op_ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false,
2801 real_time(), &s->bucket_attrs);
2802 return op_ret;
2803 });
2804
2805 if (op_ret < 0) {
2806 ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
2807 << " returned err=" << op_ret << dendl;
2808 return;
2809 }
2810 }
2811
2812 int RGWDeleteBucketWebsite::verify_permission()
2813 {
2814 return verify_bucket_owner_or_policy(s, rgw::IAM::s3DeleteBucketWebsite);
2815 }
2816
2817 void RGWDeleteBucketWebsite::pre_exec()
2818 {
2819 rgw_bucket_object_pre_exec(s);
2820 }
2821
2822 void RGWDeleteBucketWebsite::execute()
2823 {
2824
2825 if (!store->svc()->zone->is_meta_master()) {
2826 bufferlist in_data;
2827 op_ret = forward_request_to_master(s, nullptr, store, in_data, nullptr);
2828 if (op_ret < 0) {
2829 ldpp_dout(this, 0) << "NOTICE: forward_to_master failed on bucket=" << s->bucket.name
2830 << "returned err=" << op_ret << dendl;
2831 return;
2832 }
2833 }
2834 op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
2835 s->bucket_info.has_website = false;
2836 s->bucket_info.website_conf = RGWBucketWebsiteConf();
2837 op_ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false,
2838 real_time(), &s->bucket_attrs);
2839 return op_ret;
2840 });
2841 if (op_ret < 0) {
2842 ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
2843 << " returned err=" << op_ret << dendl;
2844 return;
2845 }
2846 }
2847
2848 int RGWStatBucket::verify_permission()
2849 {
2850 // This (a HEAD request on a bucket) is governed by the s3:ListBucket permission.
2851 if (!verify_bucket_permission(this, s, rgw::IAM::s3ListBucket)) {
2852 return -EACCES;
2853 }
2854
2855 return 0;
2856 }
2857
2858 void RGWStatBucket::pre_exec()
2859 {
2860 rgw_bucket_object_pre_exec(s);
2861 }
2862
2863 void RGWStatBucket::execute()
2864 {
2865 if (!s->bucket_exists) {
2866 op_ret = -ERR_NO_SUCH_BUCKET;
2867 return;
2868 }
2869
2870 rgw::sal::RGWRadosUser user(store, s->user->get_id());
2871 bucket = new rgw::sal::RGWRadosBucket(store, user, s->bucket);
2872 op_ret = bucket->update_container_stats();
2873 }
2874
2875 int RGWListBucket::verify_permission()
2876 {
2877 op_ret = get_params();
2878 if (op_ret < 0) {
2879 return op_ret;
2880 }
2881 if (!prefix.empty())
2882 s->env.emplace("s3:prefix", prefix);
2883
2884 if (!delimiter.empty())
2885 s->env.emplace("s3:delimiter", delimiter);
2886
2887 s->env.emplace("s3:max-keys", std::to_string(max));
2888
2889 if (!verify_bucket_permission(this,
2890 s,
2891 list_versions ?
2892 rgw::IAM::s3ListBucketVersions :
2893 rgw::IAM::s3ListBucket)) {
2894 return -EACCES;
2895 }
2896
2897 return 0;
2898 }
2899
2900 int RGWListBucket::parse_max_keys()
2901 {
2902 // Bound max value of max-keys to configured value for security
2903 // Bound min value of max-keys to '0'
2904 // Some S3 clients explicitly send max-keys=0 to detect if the bucket is
2905 // empty without listing any items.
2906 return parse_value_and_bound(max_keys, max, 0,
2907 g_conf().get_val<uint64_t>("rgw_max_listing_results"),
2908 default_max);
2909 }
2910
2911 void RGWListBucket::pre_exec()
2912 {
2913 rgw_bucket_object_pre_exec(s);
2914 }
2915
2916 void RGWListBucket::execute()
2917 {
2918 if (!s->bucket_exists) {
2919 op_ret = -ERR_NO_SUCH_BUCKET;
2920 return;
2921 }
2922
2923 if (allow_unordered && !delimiter.empty()) {
2924 ldpp_dout(this, 0) <<
2925 "ERROR: unordered bucket listing requested with a delimiter" << dendl;
2926 op_ret = -EINVAL;
2927 return;
2928 }
2929
2930 if (need_container_stats()) {
2931 op_ret = bucket->update_container_stats();
2932 }
2933
2934 RGWRados::Bucket target(store->getRados(), s->bucket_info);
2935 if (shard_id >= 0) {
2936 target.set_shard_id(shard_id);
2937 }
2938 RGWRados::Bucket::List list_op(&target);
2939
2940 list_op.params.prefix = prefix;
2941 list_op.params.delim = delimiter;
2942 list_op.params.marker = marker;
2943 list_op.params.end_marker = end_marker;
2944 list_op.params.list_versions = list_versions;
2945 list_op.params.allow_unordered = allow_unordered;
2946
2947 op_ret = list_op.list_objects(max, &objs, &common_prefixes, &is_truncated, s->yield);
2948 if (op_ret >= 0) {
2949 next_marker = list_op.get_next_marker();
2950 }
2951 }
2952
2953 int RGWGetBucketLogging::verify_permission()
2954 {
2955 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketLogging);
2956 }
2957
2958 int RGWGetBucketLocation::verify_permission()
2959 {
2960 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketLocation);
2961 }
2962
2963 int RGWCreateBucket::verify_permission()
2964 {
2965 /* This check is mostly needed for S3 that doesn't support account ACL.
2966 * Swift doesn't allow to delegate any permission to an anonymous user,
2967 * so it will become an early exit in such case. */
2968 if (s->auth.identity->is_anonymous()) {
2969 return -EACCES;
2970 }
2971
2972 rgw_bucket bucket;
2973 bucket.name = s->bucket_name;
2974 bucket.tenant = s->bucket_tenant;
2975 ARN arn = ARN(bucket);
2976 if (!verify_user_permission(this, s, arn, rgw::IAM::s3CreateBucket)) {
2977 return -EACCES;
2978 }
2979
2980 if (s->user->get_tenant() != s->bucket_tenant) {
2981 ldpp_dout(this, 10) << "user cannot create a bucket in a different tenant"
2982 << " (user_id.tenant=" << s->user->get_tenant()
2983 << " requested=" << s->bucket_tenant << ")"
2984 << dendl;
2985 return -EACCES;
2986 }
2987 if (s->user->get_max_buckets() < 0) {
2988 return -EPERM;
2989 }
2990
2991 if (s->user->get_max_buckets()) {
2992 rgw::sal::RGWBucketList buckets;
2993 string marker;
2994 op_ret = rgw_read_user_buckets(store, s->user->get_id(), buckets,
2995 marker, string(), s->user->get_max_buckets(),
2996 false);
2997 if (op_ret < 0) {
2998 return op_ret;
2999 }
3000
3001 if ((int)buckets.count() >= s->user->get_max_buckets()) {
3002 return -ERR_TOO_MANY_BUCKETS;
3003 }
3004 }
3005
3006 return 0;
3007 }
3008
3009 int forward_request_to_master(struct req_state *s, obj_version *objv,
3010 rgw::sal::RGWRadosStore *store, bufferlist& in_data,
3011 JSONParser *jp, req_info *forward_info)
3012 {
3013 if (!store->svc()->zone->get_master_conn()) {
3014 ldpp_dout(s, 0) << "rest connection is invalid" << dendl;
3015 return -EINVAL;
3016 }
3017 ldpp_dout(s, 0) << "sending request to master zonegroup" << dendl;
3018 bufferlist response;
3019 string uid_str = s->user->get_id().to_str();
3020 #define MAX_REST_RESPONSE (128 * 1024) // we expect a very small response
3021 int ret = store->svc()->zone->get_master_conn()->forward(rgw_user(uid_str), (forward_info ? *forward_info : s->info),
3022 objv, MAX_REST_RESPONSE, &in_data, &response);
3023 if (ret < 0)
3024 return ret;
3025
3026 ldpp_dout(s, 20) << "response: " << response.c_str() << dendl;
3027 if (jp && !jp->parse(response.c_str(), response.length())) {
3028 ldpp_dout(s, 0) << "failed parsing response from master zonegroup" << dendl;
3029 return -EINVAL;
3030 }
3031
3032 return 0;
3033 }
3034
3035 void RGWCreateBucket::pre_exec()
3036 {
3037 rgw_bucket_object_pre_exec(s);
3038 }
3039
3040 static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs,
3041 map<string, bufferlist>& out_attrs,
3042 map<string, bufferlist>& out_rmattrs)
3043 {
3044 for (const auto& kv : orig_attrs) {
3045 const string& name = kv.first;
3046
3047 /* Check if the attr is user-defined metadata item. */
3048 if (name.compare(0, sizeof(RGW_ATTR_META_PREFIX) - 1,
3049 RGW_ATTR_META_PREFIX) == 0) {
3050 /* For the objects all existing meta attrs have to be removed. */
3051 out_rmattrs[name] = kv.second;
3052 } else if (out_attrs.find(name) == std::end(out_attrs)) {
3053 out_attrs[name] = kv.second;
3054 }
3055 }
3056 }
3057
3058 /* Fuse resource metadata basing on original attributes in @orig_attrs, set
3059 * of _custom_ attribute names to remove in @rmattr_names and attributes in
3060 * @out_attrs. Place results in @out_attrs.
3061 *
3062 * NOTE: it's supposed that all special attrs already present in @out_attrs
3063 * will be preserved without any change. Special attributes are those which
3064 * names start with RGW_ATTR_META_PREFIX. They're complement to custom ones
3065 * used for X-Account-Meta-*, X-Container-Meta-*, X-Amz-Meta and so on. */
3066 static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs,
3067 const set<string>& rmattr_names,
3068 map<string, bufferlist>& out_attrs)
3069 {
3070 for (const auto& kv : orig_attrs) {
3071 const string& name = kv.first;
3072
3073 /* Check if the attr is user-defined metadata item. */
3074 if (name.compare(0, strlen(RGW_ATTR_META_PREFIX),
3075 RGW_ATTR_META_PREFIX) == 0) {
3076 /* For the buckets all existing meta attrs are preserved,
3077 except those that are listed in rmattr_names. */
3078 if (rmattr_names.find(name) != std::end(rmattr_names)) {
3079 const auto aiter = out_attrs.find(name);
3080
3081 if (aiter != std::end(out_attrs)) {
3082 out_attrs.erase(aiter);
3083 }
3084 } else {
3085 /* emplace() won't alter the map if the key is already present.
3086 * This behaviour is fully intensional here. */
3087 out_attrs.emplace(kv);
3088 }
3089 } else if (out_attrs.find(name) == std::end(out_attrs)) {
3090 out_attrs[name] = kv.second;
3091 }
3092 }
3093 }
3094
3095
3096 static void populate_with_generic_attrs(const req_state * const s,
3097 map<string, bufferlist>& out_attrs)
3098 {
3099 for (const auto& kv : s->generic_attrs) {
3100 bufferlist& attrbl = out_attrs[kv.first];
3101 const string& val = kv.second;
3102 attrbl.clear();
3103 attrbl.append(val.c_str(), val.size() + 1);
3104 }
3105 }
3106
3107
3108 static int filter_out_quota_info(std::map<std::string, bufferlist>& add_attrs,
3109 const std::set<std::string>& rmattr_names,
3110 RGWQuotaInfo& quota,
3111 bool * quota_extracted = nullptr)
3112 {
3113 bool extracted = false;
3114
3115 /* Put new limit on max objects. */
3116 auto iter = add_attrs.find(RGW_ATTR_QUOTA_NOBJS);
3117 std::string err;
3118 if (std::end(add_attrs) != iter) {
3119 quota.max_objects =
3120 static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err));
3121 if (!err.empty()) {
3122 return -EINVAL;
3123 }
3124 add_attrs.erase(iter);
3125 extracted = true;
3126 }
3127
3128 /* Put new limit on bucket (container) size. */
3129 iter = add_attrs.find(RGW_ATTR_QUOTA_MSIZE);
3130 if (iter != add_attrs.end()) {
3131 quota.max_size =
3132 static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err));
3133 if (!err.empty()) {
3134 return -EINVAL;
3135 }
3136 add_attrs.erase(iter);
3137 extracted = true;
3138 }
3139
3140 for (const auto& name : rmattr_names) {
3141 /* Remove limit on max objects. */
3142 if (name.compare(RGW_ATTR_QUOTA_NOBJS) == 0) {
3143 quota.max_objects = -1;
3144 extracted = true;
3145 }
3146
3147 /* Remove limit on max bucket size. */
3148 if (name.compare(RGW_ATTR_QUOTA_MSIZE) == 0) {
3149 quota.max_size = -1;
3150 extracted = true;
3151 }
3152 }
3153
3154 /* Swift requries checking on raw usage instead of the 4 KiB rounded one. */
3155 quota.check_on_raw = true;
3156 quota.enabled = quota.max_size > 0 || quota.max_objects > 0;
3157
3158 if (quota_extracted) {
3159 *quota_extracted = extracted;
3160 }
3161
3162 return 0;
3163 }
3164
3165
3166 static void filter_out_website(std::map<std::string, ceph::bufferlist>& add_attrs,
3167 const std::set<std::string>& rmattr_names,
3168 RGWBucketWebsiteConf& ws_conf)
3169 {
3170 std::string lstval;
3171
3172 /* Let's define a mapping between each custom attribute and the memory where
3173 * attribute's value should be stored. The memory location is expressed by
3174 * a non-const reference. */
3175 const auto mapping = {
3176 std::make_pair(RGW_ATTR_WEB_INDEX, std::ref(ws_conf.index_doc_suffix)),
3177 std::make_pair(RGW_ATTR_WEB_ERROR, std::ref(ws_conf.error_doc)),
3178 std::make_pair(RGW_ATTR_WEB_LISTINGS, std::ref(lstval)),
3179 std::make_pair(RGW_ATTR_WEB_LIST_CSS, std::ref(ws_conf.listing_css_doc)),
3180 std::make_pair(RGW_ATTR_SUBDIR_MARKER, std::ref(ws_conf.subdir_marker))
3181 };
3182
3183 for (const auto& kv : mapping) {
3184 const char * const key = kv.first;
3185 auto& target = kv.second;
3186
3187 auto iter = add_attrs.find(key);
3188
3189 if (std::end(add_attrs) != iter) {
3190 /* The "target" is a reference to ws_conf. */
3191 target = iter->second.c_str();
3192 add_attrs.erase(iter);
3193 }
3194
3195 if (rmattr_names.count(key)) {
3196 target = std::string();
3197 }
3198 }
3199
3200 if (! lstval.empty()) {
3201 ws_conf.listing_enabled = boost::algorithm::iequals(lstval, "true");
3202 }
3203 }
3204
3205
3206 void RGWCreateBucket::execute()
3207 {
3208 RGWAccessControlPolicy old_policy(s->cct);
3209 buffer::list aclbl;
3210 buffer::list corsbl;
3211 bool existed;
3212 string bucket_name = rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name);
3213 rgw_raw_obj obj(store->svc()->zone->get_zone_params().domain_root, bucket_name);
3214 obj_version objv, *pobjv = NULL;
3215
3216 op_ret = get_params();
3217 if (op_ret < 0)
3218 return;
3219
3220 if (!relaxed_region_enforcement &&
3221 !location_constraint.empty() &&
3222 !store->svc()->zone->has_zonegroup_api(location_constraint)) {
3223 ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")"
3224 << " can't be found." << dendl;
3225 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
3226 s->err.message = "The specified location-constraint is not valid";
3227 return;
3228 }
3229
3230 if (!relaxed_region_enforcement && !store->svc()->zone->get_zonegroup().is_master_zonegroup() && !location_constraint.empty() &&
3231 store->svc()->zone->get_zonegroup().api_name != location_constraint) {
3232 ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")"
3233 << " doesn't match zonegroup" << " (" << store->svc()->zone->get_zonegroup().api_name << ")"
3234 << dendl;
3235 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
3236 s->err.message = "The specified location-constraint is not valid";
3237 return;
3238 }
3239
3240 const auto& zonegroup = store->svc()->zone->get_zonegroup();
3241 if (!placement_rule.name.empty() &&
3242 !zonegroup.placement_targets.count(placement_rule.name)) {
3243 ldpp_dout(this, 0) << "placement target (" << placement_rule.name << ")"
3244 << " doesn't exist in the placement targets of zonegroup"
3245 << " (" << store->svc()->zone->get_zonegroup().api_name << ")" << dendl;
3246 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
3247 s->err.message = "The specified placement target does not exist";
3248 return;
3249 }
3250
3251 /* we need to make sure we read bucket info, it's not read before for this
3252 * specific request */
3253 s->bucket.tenant = s->bucket_tenant;
3254 s->bucket.name = s->bucket_name;
3255 rgw::sal::RGWBucket* bucket = NULL;
3256 op_ret = store->get_bucket(*s->user, s->bucket, &bucket);
3257 if (op_ret < 0 && op_ret != -ENOENT)
3258 return;
3259 s->bucket_exists = (op_ret != -ENOENT);
3260
3261 s->bucket_owner.set_id(s->user->get_id());
3262 s->bucket_owner.set_name(s->user->get_display_name());
3263 if (s->bucket_exists) {
3264 s->bucket_info = bucket->get_info();
3265 s->bucket_attrs = bucket->get_attrs();
3266 delete bucket;
3267 int r = rgw_op_get_bucket_policy_from_attr(s->cct, store, s->bucket_info,
3268 s->bucket_attrs, &old_policy);
3269 if (r >= 0) {
3270 if (old_policy.get_owner().get_id().compare(s->user->get_id()) != 0) {
3271 op_ret = -EEXIST;
3272 return;
3273 }
3274 }
3275 }
3276
3277 RGWBucketInfo master_info;
3278 rgw_bucket *pmaster_bucket;
3279 uint32_t *pmaster_num_shards;
3280 real_time creation_time;
3281
3282 if (!store->svc()->zone->is_meta_master()) {
3283 JSONParser jp;
3284 op_ret = forward_request_to_master(s, NULL, store, in_data, &jp);
3285 if (op_ret < 0) {
3286 return;
3287 }
3288
3289 JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp);
3290 JSONDecoder::decode_json("object_ver", objv, &jp);
3291 JSONDecoder::decode_json("bucket_info", master_info, &jp);
3292 ldpp_dout(this, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" << objv.ver << dendl;
3293 ldpp_dout(this, 20) << "got creation time: << " << master_info.creation_time << dendl;
3294 pmaster_bucket= &master_info.bucket;
3295 creation_time = master_info.creation_time;
3296 pmaster_num_shards = &master_info.num_shards;
3297 pobjv = &objv;
3298 obj_lock_enabled = master_info.obj_lock_enabled();
3299 } else {
3300 pmaster_bucket = NULL;
3301 pmaster_num_shards = NULL;
3302 }
3303
3304 string zonegroup_id;
3305
3306 if (s->system_request) {
3307 zonegroup_id = s->info.args.get(RGW_SYS_PARAM_PREFIX "zonegroup");
3308 if (zonegroup_id.empty()) {
3309 zonegroup_id = store->svc()->zone->get_zonegroup().get_id();
3310 }
3311 } else {
3312 zonegroup_id = store->svc()->zone->get_zonegroup().get_id();
3313 }
3314
3315 if (s->bucket_exists) {
3316 rgw_placement_rule selected_placement_rule;
3317 rgw_bucket bucket;
3318 bucket.tenant = s->bucket_tenant;
3319 bucket.name = s->bucket_name;
3320 op_ret = store->svc()->zone->select_bucket_placement(s->user->get_info(),
3321 zonegroup_id,
3322 placement_rule,
3323 &selected_placement_rule, nullptr);
3324 if (selected_placement_rule != s->bucket_info.placement_rule) {
3325 op_ret = -EEXIST;
3326 return;
3327 }
3328 }
3329
3330 /* Encode special metadata first as we're using std::map::emplace under
3331 * the hood. This method will add the new items only if the map doesn't
3332 * contain such keys yet. */
3333 policy.encode(aclbl);
3334 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
3335
3336 if (has_cors) {
3337 cors_config.encode(corsbl);
3338 emplace_attr(RGW_ATTR_CORS, std::move(corsbl));
3339 }
3340
3341 RGWQuotaInfo quota_info;
3342 const RGWQuotaInfo * pquota_info = nullptr;
3343 if (need_metadata_upload()) {
3344 /* It's supposed that following functions WILL NOT change any special
3345 * attributes (like RGW_ATTR_ACL) if they are already present in attrs. */
3346 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
3347 if (op_ret < 0) {
3348 return;
3349 }
3350 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
3351 populate_with_generic_attrs(s, attrs);
3352
3353 op_ret = filter_out_quota_info(attrs, rmattr_names, quota_info);
3354 if (op_ret < 0) {
3355 return;
3356 } else {
3357 pquota_info = &quota_info;
3358 }
3359
3360 /* Web site of Swift API. */
3361 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
3362 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
3363 }
3364
3365 s->bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */
3366 s->bucket.name = s->bucket_name;
3367
3368 /* Handle updates of the metadata for Swift's object versioning. */
3369 if (swift_ver_location) {
3370 s->bucket_info.swift_ver_location = *swift_ver_location;
3371 s->bucket_info.swift_versioning = (! swift_ver_location->empty());
3372 }
3373 if (obj_lock_enabled) {
3374 info.flags = BUCKET_VERSIONED | BUCKET_OBJ_LOCK_ENABLED;
3375 }
3376
3377
3378 op_ret = store->getRados()->create_bucket(s->user->get_info(), s->bucket, zonegroup_id,
3379 placement_rule, s->bucket_info.swift_ver_location,
3380 pquota_info, attrs,
3381 info, pobjv, &ep_objv, creation_time,
3382 pmaster_bucket, pmaster_num_shards, true);
3383 /* continue if EEXIST and create_bucket will fail below. this way we can
3384 * recover from a partial create by retrying it. */
3385 ldpp_dout(this, 20) << "rgw_create_bucket returned ret=" << op_ret << " bucket=" << s->bucket << dendl;
3386
3387 if (op_ret && op_ret != -EEXIST)
3388 return;
3389
3390 existed = (op_ret == -EEXIST);
3391
3392 if (existed) {
3393 /* bucket already existed, might have raced with another bucket creation, or
3394 * might be partial bucket creation that never completed. Read existing bucket
3395 * info, verify that the reported bucket owner is the current user.
3396 * If all is ok then update the user's list of buckets.
3397 * Otherwise inform client about a name conflict.
3398 */
3399 if (info.owner.compare(s->user->get_id()) != 0) {
3400 op_ret = -EEXIST;
3401 return;
3402 }
3403 s->bucket = info.bucket;
3404 }
3405
3406 op_ret = store->ctl()->bucket->link_bucket(s->user->get_id(), s->bucket,
3407 info.creation_time, s->yield, false);
3408 if (op_ret && !existed && op_ret != -EEXIST) {
3409 /* if it exists (or previously existed), don't remove it! */
3410 op_ret = store->ctl()->bucket->unlink_bucket(s->user->get_id(), s->bucket, s->yield);
3411 if (op_ret < 0) {
3412 ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
3413 << dendl;
3414 }
3415 } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) {
3416 op_ret = -ERR_BUCKET_EXISTS;
3417 }
3418
3419 if (need_metadata_upload() && existed) {
3420 /* OK, it looks we lost race with another request. As it's required to
3421 * handle metadata fusion and upload, the whole operation becomes very
3422 * similar in nature to PutMetadataBucket. However, as the attrs may
3423 * changed in the meantime, we have to refresh. */
3424 short tries = 0;
3425 do {
3426 RGWBucketInfo binfo;
3427 map<string, bufferlist> battrs;
3428
3429 op_ret = store->getRados()->get_bucket_info(store->svc(), s->bucket_tenant, s->bucket_name,
3430 binfo, nullptr, s->yield, &battrs);
3431 if (op_ret < 0) {
3432 return;
3433 } else if (binfo.owner.compare(s->user->get_id()) != 0) {
3434 /* New bucket doesn't belong to the account we're operating on. */
3435 op_ret = -EEXIST;
3436 return;
3437 } else {
3438 s->bucket_info = binfo;
3439 s->bucket_attrs = battrs;
3440 }
3441
3442 attrs.clear();
3443
3444 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
3445 if (op_ret < 0) {
3446 return;
3447 }
3448 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
3449 populate_with_generic_attrs(s, attrs);
3450 op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket_info.quota);
3451 if (op_ret < 0) {
3452 return;
3453 }
3454
3455 /* Handle updates of the metadata for Swift's object versioning. */
3456 if (swift_ver_location) {
3457 s->bucket_info.swift_ver_location = *swift_ver_location;
3458 s->bucket_info.swift_versioning = (! swift_ver_location->empty());
3459 }
3460
3461 /* Web site of Swift API. */
3462 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
3463 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
3464
3465 /* This will also set the quota on the bucket. */
3466 op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
3467 &s->bucket_info.objv_tracker,
3468 s->yield);
3469 } while (op_ret == -ECANCELED && tries++ < 20);
3470
3471 /* Restore the proper return code. */
3472 if (op_ret >= 0) {
3473 op_ret = -ERR_BUCKET_EXISTS;
3474 }
3475 }
3476 }
3477
3478 int RGWDeleteBucket::verify_permission()
3479 {
3480 if (!verify_bucket_permission(this, s, rgw::IAM::s3DeleteBucket)) {
3481 return -EACCES;
3482 }
3483
3484 return 0;
3485 }
3486
3487 void RGWDeleteBucket::pre_exec()
3488 {
3489 rgw_bucket_object_pre_exec(s);
3490 }
3491
3492 void RGWDeleteBucket::execute()
3493 {
3494 if (s->bucket_name.empty()) {
3495 op_ret = -EINVAL;
3496 return;
3497 }
3498
3499 if (!s->bucket_exists) {
3500 ldpp_dout(this, 0) << "ERROR: bucket " << s->bucket_name << " not found" << dendl;
3501 op_ret = -ERR_NO_SUCH_BUCKET;
3502 return;
3503 }
3504 RGWObjVersionTracker ot;
3505 ot.read_version = s->bucket_ep_objv;
3506
3507 if (s->system_request) {
3508 string tag = s->info.args.get(RGW_SYS_PARAM_PREFIX "tag");
3509 string ver_str = s->info.args.get(RGW_SYS_PARAM_PREFIX "ver");
3510 if (!tag.empty()) {
3511 ot.read_version.tag = tag;
3512 uint64_t ver;
3513 string err;
3514 ver = strict_strtol(ver_str.c_str(), 10, &err);
3515 if (!err.empty()) {
3516 ldpp_dout(this, 0) << "failed to parse ver param" << dendl;
3517 op_ret = -EINVAL;
3518 return;
3519 }
3520 ot.read_version.ver = ver;
3521 }
3522 }
3523
3524 op_ret = store->ctl()->bucket->sync_user_stats(s->user->get_id(), s->bucket_info);
3525 if ( op_ret < 0) {
3526 ldpp_dout(this, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl;
3527 }
3528
3529 op_ret = store->getRados()->check_bucket_empty(s->bucket_info, s->yield);
3530 if (op_ret < 0) {
3531 return;
3532 }
3533
3534 if (!store->svc()->zone->is_meta_master()) {
3535 bufferlist in_data;
3536 op_ret = forward_request_to_master(s, &ot.read_version, store, in_data,
3537 NULL);
3538 if (op_ret < 0) {
3539 if (op_ret == -ENOENT) {
3540 /* adjust error, we want to return with NoSuchBucket and not
3541 * NoSuchKey */
3542 op_ret = -ERR_NO_SUCH_BUCKET;
3543 }
3544 return;
3545 }
3546 }
3547
3548 string prefix, delimiter;
3549
3550 if (s->prot_flags & RGW_REST_SWIFT) {
3551 string path_args;
3552 path_args = s->info.args.get("path");
3553 if (!path_args.empty()) {
3554 if (!delimiter.empty() || !prefix.empty()) {
3555 op_ret = -EINVAL;
3556 return;
3557 }
3558 prefix = path_args;
3559 delimiter="/";
3560 }
3561 }
3562
3563 op_ret = abort_bucket_multiparts(store, s->cct, s->bucket_info, prefix, delimiter);
3564
3565 if (op_ret < 0) {
3566 return;
3567 }
3568
3569 op_ret = store->getRados()->delete_bucket(s->bucket_info, ot, s->yield, false);
3570
3571 if (op_ret == -ECANCELED) {
3572 // lost a race, either with mdlog sync or another delete bucket operation.
3573 // in either case, we've already called ctl.bucket->unlink_bucket()
3574 op_ret = 0;
3575 return;
3576 }
3577
3578 if (op_ret == 0) {
3579 op_ret = store->ctl()->bucket->unlink_bucket(s->bucket_info.owner,
3580 s->bucket, s->yield, false);
3581 if (op_ret < 0) {
3582 ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
3583 << dendl;
3584 }
3585 }
3586 }
3587
3588 int RGWPutObj::verify_permission()
3589 {
3590 if (! copy_source.empty()) {
3591
3592 RGWAccessControlPolicy cs_acl(s->cct);
3593 boost::optional<Policy> policy;
3594 map<string, bufferlist> cs_attrs;
3595 rgw_bucket cs_bucket(copy_source_bucket_info.bucket);
3596 rgw_obj_key cs_object(copy_source_object_name, copy_source_version_id);
3597
3598 rgw_obj obj(cs_bucket, cs_object);
3599 store->getRados()->set_atomic(s->obj_ctx, obj);
3600 store->getRados()->set_prefetch_data(s->obj_ctx, obj);
3601
3602 /* check source object permissions */
3603 if (read_obj_policy(store, s, copy_source_bucket_info, cs_attrs, &cs_acl, nullptr,
3604 policy, cs_bucket, cs_object) < 0) {
3605 return -EACCES;
3606 }
3607
3608 /* admin request overrides permission checks */
3609 if (! s->auth.identity->is_admin_of(cs_acl.get_owner().get_id())) {
3610 if (policy || ! s->iam_user_policies.empty()) {
3611 auto usr_policy_res = Effect::Pass;
3612 for (auto& user_policy : s->iam_user_policies) {
3613 if (usr_policy_res = user_policy.eval(s->env, *s->auth.identity,
3614 cs_object.instance.empty() ?
3615 rgw::IAM::s3GetObject :
3616 rgw::IAM::s3GetObjectVersion,
3617 rgw::ARN(obj)); usr_policy_res == Effect::Deny)
3618 return -EACCES;
3619 else if (usr_policy_res == Effect::Allow)
3620 break;
3621 }
3622 rgw::IAM::Effect e = Effect::Pass;
3623 if (policy) {
3624 e = policy->eval(s->env, *s->auth.identity,
3625 cs_object.instance.empty() ?
3626 rgw::IAM::s3GetObject :
3627 rgw::IAM::s3GetObjectVersion,
3628 rgw::ARN(obj));
3629 }
3630 if (e == Effect::Deny) {
3631 return -EACCES;
3632 } else if (usr_policy_res == Effect::Pass && e == Effect::Pass &&
3633 !cs_acl.verify_permission(this, *s->auth.identity, s->perm_mask,
3634 RGW_PERM_READ)) {
3635 return -EACCES;
3636 }
3637 } else if (!cs_acl.verify_permission(this, *s->auth.identity, s->perm_mask,
3638 RGW_PERM_READ)) {
3639 return -EACCES;
3640 }
3641 }
3642 }
3643
3644 if (s->bucket_access_conf && s->bucket_access_conf->block_public_acls()) {
3645 if (s->canned_acl.compare("public-read") ||
3646 s->canned_acl.compare("public-read-write") ||
3647 s->canned_acl.compare("authenticated-read"))
3648 return -EACCES;
3649 }
3650
3651 auto op_ret = get_params();
3652 if (op_ret < 0) {
3653 ldpp_dout(this, 20) << "get_params() returned ret=" << op_ret << dendl;
3654 return op_ret;
3655 }
3656
3657 if (s->iam_policy || ! s->iam_user_policies.empty()) {
3658 rgw_add_grant_to_iam_environment(s->env, s);
3659
3660 rgw_add_to_iam_environment(s->env, "s3:x-amz-acl", s->canned_acl);
3661
3662 if (obj_tags != nullptr && obj_tags->count() > 0){
3663 auto tags = obj_tags->get_tags();
3664 for (const auto& kv: tags){
3665 rgw_add_to_iam_environment(s->env, "s3:RequestObjectTag/"+kv.first, kv.second);
3666 }
3667 }
3668
3669 constexpr auto encrypt_attr = "x-amz-server-side-encryption";
3670 constexpr auto s3_encrypt_attr = "s3:x-amz-server-side-encryption";
3671 auto enc_header = s->info.x_meta_map.find(encrypt_attr);
3672 if (enc_header != s->info.x_meta_map.end()){
3673 rgw_add_to_iam_environment(s->env, s3_encrypt_attr, enc_header->second);
3674 }
3675
3676 constexpr auto kms_attr = "x-amz-server-side-encryption-aws-kms-key-id";
3677 constexpr auto s3_kms_attr = "s3:x-amz-server-side-encryption-aws-kms-key-id";
3678 auto kms_header = s->info.x_meta_map.find(kms_attr);
3679 if (kms_header != s->info.x_meta_map.end()){
3680 rgw_add_to_iam_environment(s->env, s3_kms_attr, kms_header->second);
3681 }
3682
3683 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
3684 boost::none,
3685 rgw::IAM::s3PutObject,
3686 rgw_obj(s->bucket, s->object));
3687 if (usr_policy_res == Effect::Deny)
3688 return -EACCES;
3689
3690 rgw::IAM::Effect e = Effect::Pass;
3691 if (s->iam_policy) {
3692 e = s->iam_policy->eval(s->env, *s->auth.identity,
3693 rgw::IAM::s3PutObject,
3694 rgw_obj(s->bucket, s->object));
3695 }
3696 if (e == Effect::Allow) {
3697 return 0;
3698 } else if (e == Effect::Deny) {
3699 return -EACCES;
3700 } else if (usr_policy_res == Effect::Allow) {
3701 return 0;
3702 }
3703 }
3704
3705 if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
3706 return -EACCES;
3707 }
3708
3709 return 0;
3710 }
3711
3712
3713 void RGWPutObj::pre_exec()
3714 {
3715 rgw_bucket_object_pre_exec(s);
3716 }
3717
3718 class RGWPutObj_CB : public RGWGetObj_Filter
3719 {
3720 RGWPutObj *op;
3721 public:
3722 explicit RGWPutObj_CB(RGWPutObj *_op) : op(_op) {}
3723 ~RGWPutObj_CB() override {}
3724
3725 int handle_data(bufferlist& bl, off_t bl_ofs, off_t bl_len) override {
3726 return op->get_data_cb(bl, bl_ofs, bl_len);
3727 }
3728 };
3729
3730 int RGWPutObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
3731 {
3732 bufferlist bl_tmp;
3733 bl.begin(bl_ofs).copy(bl_len, bl_tmp);
3734
3735 bl_aux.append(bl_tmp);
3736
3737 return bl_len;
3738 }
3739
3740 int RGWPutObj::get_data(const off_t fst, const off_t lst, bufferlist& bl)
3741 {
3742 RGWPutObj_CB cb(this);
3743 RGWGetObj_Filter* filter = &cb;
3744 boost::optional<RGWGetObj_Decompress> decompress;
3745 std::unique_ptr<RGWGetObj_Filter> decrypt;
3746 RGWCompressionInfo cs_info;
3747 map<string, bufferlist> attrs;
3748 map<string, bufferlist>::iterator attr_iter;
3749 int ret = 0;
3750
3751 uint64_t obj_size;
3752 int64_t new_ofs, new_end;
3753
3754 new_ofs = fst;
3755 new_end = lst;
3756
3757 rgw_obj_key obj_key(copy_source_object_name, copy_source_version_id);
3758 rgw_obj obj(copy_source_bucket_info.bucket, obj_key);
3759
3760 RGWRados::Object op_target(store->getRados(), copy_source_bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
3761 RGWRados::Object::Read read_op(&op_target);
3762 read_op.params.obj_size = &obj_size;
3763 read_op.params.attrs = &attrs;
3764
3765 ret = read_op.prepare(s->yield);
3766 if (ret < 0)
3767 return ret;
3768
3769 bool need_decompress;
3770 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
3771 if (op_ret < 0) {
3772 ldpp_dout(s, 0) << "ERROR: failed to decode compression info" << dendl;
3773 return -EIO;
3774 }
3775
3776 bool partial_content = true;
3777 if (need_decompress)
3778 {
3779 obj_size = cs_info.orig_size;
3780 decompress.emplace(s->cct, &cs_info, partial_content, filter);
3781 filter = &*decompress;
3782 }
3783
3784 attr_iter = attrs.find(RGW_ATTR_MANIFEST);
3785 op_ret = this->get_decrypt_filter(&decrypt,
3786 filter,
3787 attrs,
3788 attr_iter != attrs.end() ? &(attr_iter->second) : nullptr);
3789 if (decrypt != nullptr) {
3790 filter = decrypt.get();
3791 }
3792 if (op_ret < 0) {
3793 return op_ret;
3794 }
3795
3796 ret = read_op.range_to_ofs(obj_size, new_ofs, new_end);
3797 if (ret < 0)
3798 return ret;
3799
3800 filter->fixup_range(new_ofs, new_end);
3801 ret = read_op.iterate(new_ofs, new_end, filter, s->yield);
3802
3803 if (ret >= 0)
3804 ret = filter->flush();
3805
3806 bl.claim_append(bl_aux);
3807
3808 return ret;
3809 }
3810
3811 // special handling for compression type = "random" with multipart uploads
3812 static CompressorRef get_compressor_plugin(const req_state *s,
3813 const std::string& compression_type)
3814 {
3815 if (compression_type != "random") {
3816 return Compressor::create(s->cct, compression_type);
3817 }
3818
3819 bool is_multipart{false};
3820 const auto& upload_id = s->info.args.get("uploadId", &is_multipart);
3821
3822 if (!is_multipart) {
3823 return Compressor::create(s->cct, compression_type);
3824 }
3825
3826 // use a hash of the multipart upload id so all parts use the same plugin
3827 const auto alg = std::hash<std::string>{}(upload_id) % Compressor::COMP_ALG_LAST;
3828 if (alg == Compressor::COMP_ALG_NONE) {
3829 return nullptr;
3830 }
3831 return Compressor::create(s->cct, alg);
3832 }
3833
3834 void RGWPutObj::execute()
3835 {
3836 char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1];
3837 char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3838 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3839 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
3840 MD5 hash;
3841 bufferlist bl, aclbl, bs;
3842 int len;
3843
3844 off_t fst;
3845 off_t lst;
3846
3847 bool need_calc_md5 = (dlo_manifest == NULL) && (slo_info == NULL);
3848 perfcounter->inc(l_rgw_put);
3849 // report latency on return
3850 auto put_lat = make_scope_guard([&] {
3851 perfcounter->tinc(l_rgw_put_lat, s->time_elapsed());
3852 });
3853
3854 op_ret = -EINVAL;
3855 if (s->object.empty()) {
3856 return;
3857 }
3858
3859 if (!s->bucket_exists) {
3860 op_ret = -ERR_NO_SUCH_BUCKET;
3861 return;
3862 }
3863
3864
3865 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
3866 if (op_ret < 0) {
3867 ldpp_dout(this, 20) << "get_system_versioning_params() returned ret="
3868 << op_ret << dendl;
3869 return;
3870 }
3871
3872 if (supplied_md5_b64) {
3873 need_calc_md5 = true;
3874
3875 ldpp_dout(this, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
3876 op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1],
3877 supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64));
3878 ldpp_dout(this, 15) << "ceph_armor ret=" << op_ret << dendl;
3879 if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) {
3880 op_ret = -ERR_INVALID_DIGEST;
3881 return;
3882 }
3883
3884 buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5);
3885 ldpp_dout(this, 15) << "supplied_md5=" << supplied_md5 << dendl;
3886 }
3887
3888 if (!chunked_upload) { /* with chunked upload we don't know how big is the upload.
3889 we also check sizes at the end anyway */
3890 op_ret = store->getRados()->check_quota(s->bucket_owner.get_id(), s->bucket,
3891 user_quota, bucket_quota, s->content_length);
3892 if (op_ret < 0) {
3893 ldpp_dout(this, 20) << "check_quota() returned ret=" << op_ret << dendl;
3894 return;
3895 }
3896 }
3897
3898 if (supplied_etag) {
3899 strncpy(supplied_md5, supplied_etag, sizeof(supplied_md5) - 1);
3900 supplied_md5[sizeof(supplied_md5) - 1] = '\0';
3901 }
3902
3903 const bool multipart = !multipart_upload_id.empty();
3904 auto& obj_ctx = *static_cast<RGWObjectCtx*>(s->obj_ctx);
3905 rgw_obj obj{s->bucket, s->object};
3906
3907 /* Handle object versioning of Swift API. */
3908 if (! multipart) {
3909 op_ret = store->getRados()->swift_versioning_copy(obj_ctx,
3910 s->bucket_owner.get_id(),
3911 s->bucket_info,
3912 obj,
3913 this,
3914 s->yield);
3915 if (op_ret < 0) {
3916 return;
3917 }
3918 }
3919
3920 // create the object processor
3921 auto aio = rgw::make_throttle(s->cct->_conf->rgw_put_obj_min_window_size,
3922 s->yield);
3923 using namespace rgw::putobj;
3924 constexpr auto max_processor_size = std::max({sizeof(MultipartObjectProcessor),
3925 sizeof(AtomicObjectProcessor),
3926 sizeof(AppendObjectProcessor)});
3927 ceph::static_ptr<ObjectProcessor, max_processor_size> processor;
3928
3929 rgw_placement_rule *pdest_placement;
3930
3931 multipart_upload_info upload_info;
3932 if (multipart) {
3933 RGWMPObj mp(s->object.name, multipart_upload_id);
3934
3935 op_ret = get_multipart_info(store, s, mp.get_meta(), nullptr, nullptr, &upload_info);
3936 if (op_ret < 0) {
3937 if (op_ret != -ENOENT) {
3938 ldpp_dout(this, 0) << "ERROR: get_multipart_info returned " << op_ret << ": " << cpp_strerror(-op_ret) << dendl;
3939 } else {// -ENOENT: raced with upload complete/cancel, no need to spam log
3940 ldpp_dout(this, 20) << "failed to get multipart info (returned " << op_ret << ": " << cpp_strerror(-op_ret) << "): probably raced with upload complete / cancel" << dendl;
3941 }
3942 return;
3943 }
3944 pdest_placement = &upload_info.dest_placement;
3945 ldpp_dout(this, 20) << "dest_placement for part=" << upload_info.dest_placement << dendl;
3946 processor.emplace<MultipartObjectProcessor>(
3947 &*aio, store, s->bucket_info, pdest_placement,
3948 s->owner.get_id(), obj_ctx, obj,
3949 multipart_upload_id, multipart_part_num, multipart_part_str,
3950 this, s->yield);
3951 } else if(append) {
3952 if (s->bucket_info.versioned()) {
3953 op_ret = -ERR_INVALID_BUCKET_STATE;
3954 return;
3955 }
3956 pdest_placement = &s->dest_placement;
3957 processor.emplace<AppendObjectProcessor>(
3958 &*aio, store, s->bucket_info, pdest_placement, s->bucket_owner.get_id(),obj_ctx, obj,
3959 s->req_id, position, &cur_accounted_size, this, s->yield);
3960 } else {
3961 if (s->bucket_info.versioning_enabled()) {
3962 if (!version_id.empty()) {
3963 obj.key.set_instance(version_id);
3964 } else {
3965 store->getRados()->gen_rand_obj_instance_name(&obj);
3966 version_id = obj.key.instance;
3967 }
3968 }
3969 pdest_placement = &s->dest_placement;
3970 processor.emplace<AtomicObjectProcessor>(
3971 &*aio, store, s->bucket_info, pdest_placement,
3972 s->bucket_owner.get_id(), obj_ctx, obj, olh_epoch,
3973 s->req_id, this, s->yield);
3974 }
3975
3976 op_ret = processor->prepare(s->yield);
3977 if (op_ret < 0) {
3978 ldpp_dout(this, 20) << "processor->prepare() returned ret=" << op_ret
3979 << dendl;
3980 return;
3981 }
3982
3983 if ((! copy_source.empty()) && !copy_source_range) {
3984 rgw_obj_key obj_key(copy_source_object_name, copy_source_version_id);
3985 rgw_obj obj(copy_source_bucket_info.bucket, obj_key.name);
3986
3987 RGWObjState *astate;
3988 op_ret = store->getRados()->get_obj_state(&obj_ctx, copy_source_bucket_info, obj,
3989 &astate, true, s->yield, false);
3990 if (op_ret < 0) {
3991 ldpp_dout(this, 0) << "ERROR: get copy source obj state returned with error" << op_ret << dendl;
3992 return;
3993 }
3994 if (!astate->exists){
3995 op_ret = -ENOENT;
3996 return;
3997 }
3998 lst = astate->accounted_size - 1;
3999 } else {
4000 lst = copy_source_range_lst;
4001 }
4002
4003 fst = copy_source_range_fst;
4004
4005 // no filters by default
4006 DataProcessor *filter = processor.get();
4007
4008 const auto& compression_type = store->svc()->zone->get_zone_params().get_compression_type(*pdest_placement);
4009 CompressorRef plugin;
4010 boost::optional<RGWPutObj_Compress> compressor;
4011
4012 std::unique_ptr<DataProcessor> encrypt;
4013
4014 if (!append) { // compression and encryption only apply to full object uploads
4015 op_ret = get_encrypt_filter(&encrypt, filter);
4016 if (op_ret < 0) {
4017 return;
4018 }
4019 if (encrypt != nullptr) {
4020 filter = &*encrypt;
4021 } else if (compression_type != "none") {
4022 plugin = get_compressor_plugin(s, compression_type);
4023 if (!plugin) {
4024 ldpp_dout(this, 1) << "Cannot load plugin for compression type "
4025 << compression_type << dendl;
4026 } else {
4027 compressor.emplace(s->cct, plugin, filter);
4028 filter = &*compressor;
4029 }
4030 }
4031 }
4032 tracepoint(rgw_op, before_data_transfer, s->req_id.c_str());
4033 do {
4034 bufferlist data;
4035 if (fst > lst)
4036 break;
4037 if (copy_source.empty()) {
4038 len = get_data(data);
4039 } else {
4040 uint64_t cur_lst = min(fst + s->cct->_conf->rgw_max_chunk_size - 1, lst);
4041 op_ret = get_data(fst, cur_lst, data);
4042 if (op_ret < 0)
4043 return;
4044 len = data.length();
4045 s->content_length += len;
4046 fst += len;
4047 }
4048 if (len < 0) {
4049 op_ret = len;
4050 ldpp_dout(this, 20) << "get_data() returned ret=" << op_ret << dendl;
4051 return;
4052 } else if (len == 0) {
4053 break;
4054 }
4055
4056 if (need_calc_md5) {
4057 hash.Update((const unsigned char *)data.c_str(), data.length());
4058 }
4059
4060 /* update torrrent */
4061 torrent.update(data);
4062
4063 op_ret = filter->process(std::move(data), ofs);
4064 if (op_ret < 0) {
4065 ldpp_dout(this, 20) << "processor->process() returned ret="
4066 << op_ret << dendl;
4067 return;
4068 }
4069
4070 ofs += len;
4071 } while (len > 0);
4072 tracepoint(rgw_op, after_data_transfer, s->req_id.c_str(), ofs);
4073
4074 // flush any data in filters
4075 op_ret = filter->process({}, ofs);
4076 if (op_ret < 0) {
4077 return;
4078 }
4079
4080 if (!chunked_upload && ofs != s->content_length) {
4081 op_ret = -ERR_REQUEST_TIMEOUT;
4082 return;
4083 }
4084 s->obj_size = ofs;
4085
4086 perfcounter->inc(l_rgw_put_b, s->obj_size);
4087
4088 op_ret = do_aws4_auth_completion();
4089 if (op_ret < 0) {
4090 return;
4091 }
4092
4093 op_ret = store->getRados()->check_quota(s->bucket_owner.get_id(), s->bucket,
4094 user_quota, bucket_quota, s->obj_size);
4095 if (op_ret < 0) {
4096 ldpp_dout(this, 20) << "second check_quota() returned op_ret=" << op_ret << dendl;
4097 return;
4098 }
4099
4100 hash.Final(m);
4101
4102 if (compressor && compressor->is_compressed()) {
4103 bufferlist tmp;
4104 RGWCompressionInfo cs_info;
4105 cs_info.compression_type = plugin->get_type_name();
4106 cs_info.orig_size = s->obj_size;
4107 cs_info.blocks = move(compressor->get_compression_blocks());
4108 encode(cs_info, tmp);
4109 attrs[RGW_ATTR_COMPRESSION] = tmp;
4110 ldpp_dout(this, 20) << "storing " << RGW_ATTR_COMPRESSION
4111 << " with type=" << cs_info.compression_type
4112 << ", orig_size=" << cs_info.orig_size
4113 << ", blocks=" << cs_info.blocks.size() << dendl;
4114 }
4115
4116 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
4117
4118 etag = calc_md5;
4119
4120 if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) {
4121 op_ret = -ERR_BAD_DIGEST;
4122 return;
4123 }
4124
4125 policy.encode(aclbl);
4126 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
4127
4128 if (dlo_manifest) {
4129 op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
4130 if (op_ret < 0) {
4131 ldpp_dout(this, 0) << "bad user manifest: " << dlo_manifest << dendl;
4132 return;
4133 }
4134 }
4135
4136 if (slo_info) {
4137 bufferlist manifest_bl;
4138 encode(*slo_info, manifest_bl);
4139 emplace_attr(RGW_ATTR_SLO_MANIFEST, std::move(manifest_bl));
4140 }
4141
4142 if (supplied_etag && etag.compare(supplied_etag) != 0) {
4143 op_ret = -ERR_UNPROCESSABLE_ENTITY;
4144 return;
4145 }
4146 bl.append(etag.c_str(), etag.size());
4147 emplace_attr(RGW_ATTR_ETAG, std::move(bl));
4148
4149 populate_with_generic_attrs(s, attrs);
4150 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
4151 if (op_ret < 0) {
4152 return;
4153 }
4154 encode_delete_at_attr(delete_at, attrs);
4155 encode_obj_tags_attr(obj_tags.get(), attrs);
4156 rgw_cond_decode_objtags(s, attrs);
4157
4158 /* Add a custom metadata to expose the information whether an object
4159 * is an SLO or not. Appending the attribute must be performed AFTER
4160 * processing any input from user in order to prohibit overwriting. */
4161 if (slo_info) {
4162 bufferlist slo_userindicator_bl;
4163 slo_userindicator_bl.append("True", 4);
4164 emplace_attr(RGW_ATTR_SLO_UINDICATOR, std::move(slo_userindicator_bl));
4165 }
4166 if (obj_legal_hold) {
4167 bufferlist obj_legal_hold_bl;
4168 obj_legal_hold->encode(obj_legal_hold_bl);
4169 emplace_attr(RGW_ATTR_OBJECT_LEGAL_HOLD, std::move(obj_legal_hold_bl));
4170 }
4171 if (obj_retention) {
4172 bufferlist obj_retention_bl;
4173 obj_retention->encode(obj_retention_bl);
4174 emplace_attr(RGW_ATTR_OBJECT_RETENTION, std::move(obj_retention_bl));
4175 }
4176
4177 tracepoint(rgw_op, processor_complete_enter, s->req_id.c_str());
4178 op_ret = processor->complete(s->obj_size, etag, &mtime, real_time(), attrs,
4179 (delete_at ? *delete_at : real_time()), if_match, if_nomatch,
4180 (user_data.empty() ? nullptr : &user_data), nullptr, nullptr,
4181 s->yield);
4182 tracepoint(rgw_op, processor_complete_exit, s->req_id.c_str());
4183
4184 /* produce torrent */
4185 if (s->cct->_conf->rgw_torrent_flag && (ofs == torrent.get_data_len()))
4186 {
4187 torrent.init(s, store);
4188 torrent.set_create_date(mtime);
4189 op_ret = torrent.complete();
4190 if (0 != op_ret)
4191 {
4192 ldpp_dout(this, 0) << "ERROR: torrent.handle_data() returned " << op_ret << dendl;
4193 return;
4194 }
4195 }
4196
4197 // send request to notification manager
4198 const auto ret = rgw::notify::publish(s, obj.key, s->obj_size, mtime, etag, rgw::notify::ObjectCreatedPut, store);
4199 if (ret < 0) {
4200 ldpp_dout(this, 5) << "WARNING: publishing notification failed, with error: " << ret << dendl;
4201 // TODO: we should have conf to make send a blocking coroutine and reply with error in case sending failed
4202 // this should be global conf (probably returnign a different handler)
4203 // so we don't need to read the configured values before we perform it
4204 }
4205 }
4206
4207 int RGWPostObj::verify_permission()
4208 {
4209 return 0;
4210 }
4211
4212 void RGWPostObj::pre_exec()
4213 {
4214 rgw_bucket_object_pre_exec(s);
4215 }
4216
4217 void RGWPostObj::execute()
4218 {
4219 boost::optional<RGWPutObj_Compress> compressor;
4220 CompressorRef plugin;
4221 char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
4222
4223 /* Read in the data from the POST form. */
4224 op_ret = get_params();
4225 if (op_ret < 0) {
4226 return;
4227 }
4228
4229 op_ret = verify_params();
4230 if (op_ret < 0) {
4231 return;
4232 }
4233
4234 if (s->iam_policy || ! s->iam_user_policies.empty()) {
4235 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
4236 boost::none,
4237 rgw::IAM::s3PutObject,
4238 rgw_obj(s->bucket, s->object));
4239 if (usr_policy_res == Effect::Deny) {
4240 op_ret = -EACCES;
4241 return;
4242 }
4243
4244 rgw::IAM::Effect e = Effect::Pass;
4245 if (s->iam_policy) {
4246 e = s->iam_policy->eval(s->env, *s->auth.identity,
4247 rgw::IAM::s3PutObject,
4248 rgw_obj(s->bucket, s->object));
4249 }
4250 if (e == Effect::Deny) {
4251 op_ret = -EACCES;
4252 return;
4253 } else if (usr_policy_res == Effect::Pass && e == Effect::Pass && !verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
4254 op_ret = -EACCES;
4255 return;
4256 }
4257 } else if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
4258 op_ret = -EACCES;
4259 return;
4260 }
4261
4262 /* Start iteration over data fields. It's necessary as Swift's FormPost
4263 * is capable to handle multiple files in single form. */
4264 do {
4265 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
4266 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
4267 MD5 hash;
4268 ceph::buffer::list bl, aclbl;
4269 int len = 0;
4270
4271 op_ret = store->getRados()->check_quota(s->bucket_owner.get_id(),
4272 s->bucket,
4273 user_quota,
4274 bucket_quota,
4275 s->content_length);
4276 if (op_ret < 0) {
4277 return;
4278 }
4279
4280 if (supplied_md5_b64) {
4281 char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1];
4282 ldpp_dout(this, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
4283 op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1],
4284 supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64));
4285 ldpp_dout(this, 15) << "ceph_armor ret=" << op_ret << dendl;
4286 if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) {
4287 op_ret = -ERR_INVALID_DIGEST;
4288 return;
4289 }
4290
4291 buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5);
4292 ldpp_dout(this, 15) << "supplied_md5=" << supplied_md5 << dendl;
4293 }
4294
4295 rgw_obj obj(s->bucket, get_current_filename());
4296 if (s->bucket_info.versioning_enabled()) {
4297 store->getRados()->gen_rand_obj_instance_name(&obj);
4298 }
4299
4300 auto aio = rgw::make_throttle(s->cct->_conf->rgw_put_obj_min_window_size,
4301 s->yield);
4302
4303 using namespace rgw::putobj;
4304 AtomicObjectProcessor processor(&*aio, store, s->bucket_info,
4305 &s->dest_placement,
4306 s->bucket_owner.get_id(),
4307 *static_cast<RGWObjectCtx*>(s->obj_ctx),
4308 obj, 0, s->req_id, this, s->yield);
4309 op_ret = processor.prepare(s->yield);
4310 if (op_ret < 0) {
4311 return;
4312 }
4313
4314 /* No filters by default. */
4315 DataProcessor *filter = &processor;
4316
4317 std::unique_ptr<DataProcessor> encrypt;
4318 op_ret = get_encrypt_filter(&encrypt, filter);
4319 if (op_ret < 0) {
4320 return;
4321 }
4322 if (encrypt != nullptr) {
4323 filter = encrypt.get();
4324 } else {
4325 const auto& compression_type = store->svc()->zone->get_zone_params().get_compression_type(
4326 s->dest_placement);
4327 if (compression_type != "none") {
4328 plugin = Compressor::create(s->cct, compression_type);
4329 if (!plugin) {
4330 ldpp_dout(this, 1) << "Cannot load plugin for compression type "
4331 << compression_type << dendl;
4332 } else {
4333 compressor.emplace(s->cct, plugin, filter);
4334 filter = &*compressor;
4335 }
4336 }
4337 }
4338
4339 bool again;
4340 do {
4341 ceph::bufferlist data;
4342 len = get_data(data, again);
4343
4344 if (len < 0) {
4345 op_ret = len;
4346 return;
4347 }
4348
4349 if (!len) {
4350 break;
4351 }
4352
4353 hash.Update((const unsigned char *)data.c_str(), data.length());
4354 op_ret = filter->process(std::move(data), ofs);
4355
4356 ofs += len;
4357
4358 if (ofs > max_len) {
4359 op_ret = -ERR_TOO_LARGE;
4360 return;
4361 }
4362 } while (again);
4363
4364 // flush
4365 op_ret = filter->process({}, ofs);
4366 if (op_ret < 0) {
4367 return;
4368 }
4369
4370 if (len < min_len) {
4371 op_ret = -ERR_TOO_SMALL;
4372 return;
4373 }
4374
4375 s->obj_size = ofs;
4376
4377
4378 op_ret = store->getRados()->check_quota(s->bucket_owner.get_id(), s->bucket,
4379 user_quota, bucket_quota, s->obj_size);
4380 if (op_ret < 0) {
4381 return;
4382 }
4383
4384 hash.Final(m);
4385 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
4386
4387 etag = calc_md5;
4388
4389 if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) {
4390 op_ret = -ERR_BAD_DIGEST;
4391 return;
4392 }
4393
4394 bl.append(etag.c_str(), etag.size());
4395 emplace_attr(RGW_ATTR_ETAG, std::move(bl));
4396
4397 policy.encode(aclbl);
4398 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
4399
4400 const std::string content_type = get_current_content_type();
4401 if (! content_type.empty()) {
4402 ceph::bufferlist ct_bl;
4403 ct_bl.append(content_type.c_str(), content_type.size() + 1);
4404 emplace_attr(RGW_ATTR_CONTENT_TYPE, std::move(ct_bl));
4405 }
4406
4407 if (compressor && compressor->is_compressed()) {
4408 ceph::bufferlist tmp;
4409 RGWCompressionInfo cs_info;
4410 cs_info.compression_type = plugin->get_type_name();
4411 cs_info.orig_size = s->obj_size;
4412 cs_info.blocks = move(compressor->get_compression_blocks());
4413 encode(cs_info, tmp);
4414 emplace_attr(RGW_ATTR_COMPRESSION, std::move(tmp));
4415 }
4416
4417 op_ret = processor.complete(s->obj_size, etag, nullptr, real_time(), attrs,
4418 (delete_at ? *delete_at : real_time()),
4419 nullptr, nullptr, nullptr, nullptr, nullptr,
4420 s->yield);
4421 if (op_ret < 0) {
4422 return;
4423 }
4424 } while (is_next_file_to_upload());
4425
4426 const auto ret = rgw::notify::publish(s, s->object, ofs, ceph::real_clock::now(), etag, rgw::notify::ObjectCreatedPost, store);
4427 if (ret < 0) {
4428 ldpp_dout(this, 5) << "WARNING: publishing notification failed, with error: " << ret << dendl;
4429 // TODO: we should have conf to make send a blocking coroutine and reply with error in case sending failed
4430 // this should be global conf (probably returnign a different handler)
4431 // so we don't need to read the configured values before we perform it
4432 }
4433 }
4434
4435
4436 void RGWPutMetadataAccount::filter_out_temp_url(map<string, bufferlist>& add_attrs,
4437 const set<string>& rmattr_names,
4438 map<int, string>& temp_url_keys)
4439 {
4440 map<string, bufferlist>::iterator iter;
4441
4442 iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY1);
4443 if (iter != add_attrs.end()) {
4444 temp_url_keys[0] = iter->second.c_str();
4445 add_attrs.erase(iter);
4446 }
4447
4448 iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY2);
4449 if (iter != add_attrs.end()) {
4450 temp_url_keys[1] = iter->second.c_str();
4451 add_attrs.erase(iter);
4452 }
4453
4454 for (const string& name : rmattr_names) {
4455 if (name.compare(RGW_ATTR_TEMPURL_KEY1) == 0) {
4456 temp_url_keys[0] = string();
4457 }
4458 if (name.compare(RGW_ATTR_TEMPURL_KEY2) == 0) {
4459 temp_url_keys[1] = string();
4460 }
4461 }
4462 }
4463
4464 int RGWPutMetadataAccount::init_processing()
4465 {
4466 /* First, go to the base class. At the time of writing the method was
4467 * responsible only for initializing the quota. This isn't necessary
4468 * here as we are touching metadata only. I'm putting this call only
4469 * for the future. */
4470 op_ret = RGWOp::init_processing();
4471 if (op_ret < 0) {
4472 return op_ret;
4473 }
4474
4475 op_ret = get_params();
4476 if (op_ret < 0) {
4477 return op_ret;
4478 }
4479
4480 op_ret = store->ctl()->user->get_attrs_by_uid(s->user->get_id(), &orig_attrs,
4481 s->yield,
4482 &acct_op_tracker);
4483 if (op_ret < 0) {
4484 return op_ret;
4485 }
4486
4487 if (has_policy) {
4488 bufferlist acl_bl;
4489 policy.encode(acl_bl);
4490 attrs.emplace(RGW_ATTR_ACL, std::move(acl_bl));
4491 }
4492
4493 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
4494 if (op_ret < 0) {
4495 return op_ret;
4496 }
4497 prepare_add_del_attrs(orig_attrs, rmattr_names, attrs);
4498 populate_with_generic_attrs(s, attrs);
4499
4500 /* Try extract the TempURL-related stuff now to allow verify_permission
4501 * evaluate whether we need FULL_CONTROL or not. */
4502 filter_out_temp_url(attrs, rmattr_names, temp_url_keys);
4503
4504 /* The same with quota except a client needs to be reseller admin. */
4505 op_ret = filter_out_quota_info(attrs, rmattr_names, new_quota,
4506 &new_quota_extracted);
4507 if (op_ret < 0) {
4508 return op_ret;
4509 }
4510
4511 return 0;
4512 }
4513
4514 int RGWPutMetadataAccount::verify_permission()
4515 {
4516 if (s->auth.identity->is_anonymous()) {
4517 return -EACCES;
4518 }
4519
4520 if (!verify_user_permission_no_policy(this, s, RGW_PERM_WRITE)) {
4521 return -EACCES;
4522 }
4523
4524 /* Altering TempURL keys requires FULL_CONTROL. */
4525 if (!temp_url_keys.empty() && s->perm_mask != RGW_PERM_FULL_CONTROL) {
4526 return -EPERM;
4527 }
4528
4529 /* We are failing this intensionally to allow system user/reseller admin
4530 * override in rgw_process.cc. This is the way to specify a given RGWOp
4531 * expect extra privileges. */
4532 if (new_quota_extracted) {
4533 return -EACCES;
4534 }
4535
4536 return 0;
4537 }
4538
4539 void RGWPutMetadataAccount::execute()
4540 {
4541 /* Params have been extracted earlier. See init_processing(). */
4542 RGWUserInfo new_uinfo;
4543 op_ret = store->ctl()->user->get_info_by_uid(s->user->get_id(), &new_uinfo, s->yield,
4544 RGWUserCtl::GetParams()
4545 .set_objv_tracker(&acct_op_tracker));
4546 if (op_ret < 0) {
4547 return;
4548 }
4549
4550 /* Handle the TempURL-related stuff. */
4551 if (!temp_url_keys.empty()) {
4552 for (auto& pair : temp_url_keys) {
4553 new_uinfo.temp_url_keys[pair.first] = std::move(pair.second);
4554 }
4555 }
4556
4557 /* Handle the quota extracted at the verify_permission step. */
4558 if (new_quota_extracted) {
4559 new_uinfo.user_quota = std::move(new_quota);
4560 }
4561
4562 /* We are passing here the current (old) user info to allow the function
4563 * optimize-out some operations. */
4564 op_ret = store->ctl()->user->store_info(new_uinfo, s->yield,
4565 RGWUserCtl::PutParams()
4566 .set_old_info(&s->user->get_info())
4567 .set_objv_tracker(&acct_op_tracker)
4568 .set_attrs(&attrs));
4569 }
4570
4571 int RGWPutMetadataBucket::verify_permission()
4572 {
4573 if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
4574 return -EACCES;
4575 }
4576
4577 return 0;
4578 }
4579
4580 void RGWPutMetadataBucket::pre_exec()
4581 {
4582 rgw_bucket_object_pre_exec(s);
4583 }
4584
4585 void RGWPutMetadataBucket::execute()
4586 {
4587 op_ret = get_params();
4588 if (op_ret < 0) {
4589 return;
4590 }
4591
4592 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
4593 if (op_ret < 0) {
4594 return;
4595 }
4596
4597 if (!placement_rule.empty() &&
4598 placement_rule != s->bucket_info.placement_rule) {
4599 op_ret = -EEXIST;
4600 return;
4601 }
4602
4603 op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
4604 /* Encode special metadata first as we're using std::map::emplace under
4605 * the hood. This method will add the new items only if the map doesn't
4606 * contain such keys yet. */
4607 if (has_policy) {
4608 if (s->dialect.compare("swift") == 0) {
4609 auto old_policy = \
4610 static_cast<RGWAccessControlPolicy_SWIFT*>(s->bucket_acl.get());
4611 auto new_policy = static_cast<RGWAccessControlPolicy_SWIFT*>(&policy);
4612 new_policy->filter_merge(policy_rw_mask, old_policy);
4613 policy = *new_policy;
4614 }
4615 buffer::list bl;
4616 policy.encode(bl);
4617 emplace_attr(RGW_ATTR_ACL, std::move(bl));
4618 }
4619
4620 if (has_cors) {
4621 buffer::list bl;
4622 cors_config.encode(bl);
4623 emplace_attr(RGW_ATTR_CORS, std::move(bl));
4624 }
4625
4626 /* It's supposed that following functions WILL NOT change any
4627 * special attributes (like RGW_ATTR_ACL) if they are already
4628 * present in attrs. */
4629 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
4630 populate_with_generic_attrs(s, attrs);
4631
4632 /* According to the Swift's behaviour and its container_quota
4633 * WSGI middleware implementation: anyone with write permissions
4634 * is able to set the bucket quota. This stays in contrast to
4635 * account quotas that can be set only by clients holding
4636 * reseller admin privileges. */
4637 op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket_info.quota);
4638 if (op_ret < 0) {
4639 return op_ret;
4640 }
4641
4642 if (swift_ver_location) {
4643 s->bucket_info.swift_ver_location = *swift_ver_location;
4644 s->bucket_info.swift_versioning = (!swift_ver_location->empty());
4645 }
4646
4647 /* Web site of Swift API. */
4648 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
4649 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
4650
4651 /* Setting attributes also stores the provided bucket info. Due
4652 * to this fact, the new quota settings can be serialized with
4653 * the same call. */
4654 op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
4655 &s->bucket_info.objv_tracker,
4656 s->yield);
4657 return op_ret;
4658 });
4659 }
4660
4661 int RGWPutMetadataObject::verify_permission()
4662 {
4663 // This looks to be something specific to Swift. We could add
4664 // operations like swift:PutMetadataObject to the Policy Engine.
4665 if (!verify_object_permission_no_policy(this, s, RGW_PERM_WRITE)) {
4666 return -EACCES;
4667 }
4668
4669 return 0;
4670 }
4671
4672 void RGWPutMetadataObject::pre_exec()
4673 {
4674 rgw_bucket_object_pre_exec(s);
4675 }
4676
4677 void RGWPutMetadataObject::execute()
4678 {
4679 rgw_obj obj(s->bucket, s->object);
4680 rgw_obj target_obj;
4681 map<string, bufferlist> attrs, orig_attrs, rmattrs;
4682
4683 store->getRados()->set_atomic(s->obj_ctx, obj);
4684
4685 op_ret = get_params();
4686 if (op_ret < 0) {
4687 return;
4688 }
4689
4690 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
4691 if (op_ret < 0) {
4692 return;
4693 }
4694
4695 /* check if obj exists, read orig attrs */
4696 op_ret = get_obj_attrs(store, s, obj, orig_attrs, &target_obj);
4697 if (op_ret < 0) {
4698 return;
4699 }
4700
4701 /* Check whether the object has expired. Swift API documentation
4702 * stands that we should return 404 Not Found in such case. */
4703 if (need_object_expiration() && object_is_expired(orig_attrs)) {
4704 op_ret = -ENOENT;
4705 return;
4706 }
4707
4708 /* Filter currently existing attributes. */
4709 prepare_add_del_attrs(orig_attrs, attrs, rmattrs);
4710 populate_with_generic_attrs(s, attrs);
4711 encode_delete_at_attr(delete_at, attrs);
4712
4713 if (dlo_manifest) {
4714 op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
4715 if (op_ret < 0) {
4716 ldpp_dout(this, 0) << "bad user manifest: " << dlo_manifest << dendl;
4717 return;
4718 }
4719 }
4720
4721 op_ret = store->getRados()->set_attrs(s->obj_ctx, s->bucket_info, target_obj,
4722 attrs, &rmattrs, s->yield);
4723 }
4724
4725 int RGWDeleteObj::handle_slo_manifest(bufferlist& bl)
4726 {
4727 RGWSLOInfo slo_info;
4728 auto bliter = bl.cbegin();
4729 try {
4730 decode(slo_info, bliter);
4731 } catch (buffer::error& err) {
4732 ldpp_dout(this, 0) << "ERROR: failed to decode slo manifest" << dendl;
4733 return -EIO;
4734 }
4735
4736 try {
4737 deleter = std::unique_ptr<RGWBulkDelete::Deleter>(\
4738 new RGWBulkDelete::Deleter(this, store, s));
4739 } catch (const std::bad_alloc&) {
4740 return -ENOMEM;
4741 }
4742
4743 list<RGWBulkDelete::acct_path_t> items;
4744 for (const auto& iter : slo_info.entries) {
4745 const string& path_str = iter.path;
4746
4747 const size_t sep_pos = path_str.find('/', 1 /* skip first slash */);
4748 if (boost::string_view::npos == sep_pos) {
4749 return -EINVAL;
4750 }
4751
4752 RGWBulkDelete::acct_path_t path;
4753
4754 path.bucket_name = url_decode(path_str.substr(1, sep_pos - 1));
4755 path.obj_key = url_decode(path_str.substr(sep_pos + 1));
4756
4757 items.push_back(path);
4758 }
4759
4760 /* Request removal of the manifest object itself. */
4761 RGWBulkDelete::acct_path_t path;
4762 path.bucket_name = s->bucket_name;
4763 path.obj_key = s->object;
4764 items.push_back(path);
4765
4766 int ret = deleter->delete_chunk(items);
4767 if (ret < 0) {
4768 return ret;
4769 }
4770
4771 return 0;
4772 }
4773
4774 int RGWDeleteObj::verify_permission()
4775 {
4776 int op_ret = get_params();
4777 if (op_ret) {
4778 return op_ret;
4779 }
4780 if (s->iam_policy || ! s->iam_user_policies.empty()) {
4781 if (s->bucket_info.obj_lock_enabled() && bypass_governance_mode) {
4782 auto r = eval_user_policies(s->iam_user_policies, s->env, boost::none,
4783 rgw::IAM::s3BypassGovernanceRetention, ARN(s->bucket, s->object.name));
4784 if (r == Effect::Deny) {
4785 bypass_perm = false;
4786 } else if (r == Effect::Pass && s->iam_policy) {
4787 r = s->iam_policy->eval(s->env, *s->auth.identity, rgw::IAM::s3BypassGovernanceRetention,
4788 ARN(s->bucket, s->object.name));
4789 if (r == Effect::Deny) {
4790 bypass_perm = false;
4791 }
4792 }
4793 }
4794 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
4795 boost::none,
4796 s->object.instance.empty() ?
4797 rgw::IAM::s3DeleteObject :
4798 rgw::IAM::s3DeleteObjectVersion,
4799 ARN(s->bucket, s->object.name));
4800 if (usr_policy_res == Effect::Deny) {
4801 return -EACCES;
4802 }
4803
4804 rgw::IAM::Effect r = Effect::Pass;
4805 if (s->iam_policy) {
4806 r = s->iam_policy->eval(s->env, *s->auth.identity,
4807 s->object.instance.empty() ?
4808 rgw::IAM::s3DeleteObject :
4809 rgw::IAM::s3DeleteObjectVersion,
4810 ARN(s->bucket, s->object.name));
4811 }
4812 if (r == Effect::Allow)
4813 return 0;
4814 else if (r == Effect::Deny)
4815 return -EACCES;
4816 else if (usr_policy_res == Effect::Allow)
4817 return 0;
4818 }
4819
4820 if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
4821 return -EACCES;
4822 }
4823
4824 if (s->bucket_info.mfa_enabled() &&
4825 !s->object.instance.empty() &&
4826 !s->mfa_verified) {
4827 ldpp_dout(this, 5) << "NOTICE: object delete request with a versioned object, mfa auth not provided" << dendl;
4828 return -ERR_MFA_REQUIRED;
4829 }
4830
4831 return 0;
4832 }
4833
4834 void RGWDeleteObj::pre_exec()
4835 {
4836 rgw_bucket_object_pre_exec(s);
4837 }
4838
4839 void RGWDeleteObj::execute()
4840 {
4841 if (!s->bucket_exists) {
4842 op_ret = -ERR_NO_SUCH_BUCKET;
4843 return;
4844 }
4845
4846 rgw_obj obj(s->bucket, s->object);
4847 map<string, bufferlist> attrs;
4848
4849 bool check_obj_lock = obj.key.have_instance() && s->bucket_info.obj_lock_enabled();
4850
4851 if (!s->object.empty()) {
4852 op_ret = get_obj_attrs(store, s, obj, attrs);
4853
4854 if (need_object_expiration() || multipart_delete) {
4855 /* check if obj exists, read orig attrs */
4856 if (op_ret < 0) {
4857 return;
4858 }
4859 }
4860
4861 if (check_obj_lock) {
4862 /* check if obj exists, read orig attrs */
4863 if (op_ret < 0) {
4864 if (op_ret == -ENOENT) {
4865 /* object maybe delete_marker, skip check_obj_lock*/
4866 check_obj_lock = false;
4867 } else {
4868 return;
4869 }
4870 }
4871 }
4872
4873 // ignore return value from get_obj_attrs in all other cases
4874 op_ret = 0;
4875
4876 if (check_obj_lock) {
4877 int object_lock_response = verify_object_lock(this, attrs, bypass_perm, bypass_governance_mode);
4878 if (object_lock_response != 0) {
4879 op_ret = object_lock_response;
4880 return;
4881 }
4882 }
4883
4884 if (multipart_delete) {
4885 const auto slo_attr = attrs.find(RGW_ATTR_SLO_MANIFEST);
4886
4887 if (slo_attr != attrs.end()) {
4888 op_ret = handle_slo_manifest(slo_attr->second);
4889 if (op_ret < 0) {
4890 ldpp_dout(this, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret << dendl;
4891 }
4892 } else {
4893 op_ret = -ERR_NOT_SLO_MANIFEST;
4894 }
4895
4896 return;
4897 }
4898
4899 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
4900 obj_ctx->set_atomic(obj);
4901
4902 bool ver_restored = false;
4903 op_ret = store->getRados()->swift_versioning_restore(*obj_ctx, s->bucket_owner.get_id(),
4904 s->bucket_info, obj, ver_restored, this);
4905 if (op_ret < 0) {
4906 return;
4907 }
4908
4909 if (!ver_restored) {
4910 /* Swift's versioning mechanism hasn't found any previous version of
4911 * the object that could be restored. This means we should proceed
4912 * with the regular delete path. */
4913 RGWRados::Object del_target(store->getRados(), s->bucket_info, *obj_ctx, obj);
4914 RGWRados::Object::Delete del_op(&del_target);
4915
4916 op_ret = get_system_versioning_params(s, &del_op.params.olh_epoch,
4917 &del_op.params.marker_version_id);
4918 if (op_ret < 0) {
4919 return;
4920 }
4921
4922 del_op.params.bucket_owner = s->bucket_owner.get_id();
4923 del_op.params.versioning_status = s->bucket_info.versioning_status();
4924 del_op.params.obj_owner = s->owner;
4925 del_op.params.unmod_since = unmod_since;
4926 del_op.params.high_precision_time = s->system_request; /* system request uses high precision time */
4927
4928 op_ret = del_op.delete_obj(s->yield);
4929 if (op_ret >= 0) {
4930 delete_marker = del_op.result.delete_marker;
4931 version_id = del_op.result.version_id;
4932 }
4933
4934 /* Check whether the object has expired. Swift API documentation
4935 * stands that we should return 404 Not Found in such case. */
4936 if (need_object_expiration() && object_is_expired(attrs)) {
4937 op_ret = -ENOENT;
4938 return;
4939 }
4940 }
4941
4942 if (op_ret == -ECANCELED) {
4943 op_ret = 0;
4944 }
4945 if (op_ret == -ERR_PRECONDITION_FAILED && no_precondition_error) {
4946 op_ret = 0;
4947 }
4948
4949 // cache the objects tags and metadata into the requests
4950 // so it could be used in the notification mechanism
4951 try {
4952 populate_tags_in_request(s, attrs);
4953 } catch (buffer::error& err) {
4954 ldpp_dout(this, 5) << "WARNING: failed to populate delete request with object tags: " << err.what() << dendl;
4955 }
4956 populate_metadata_in_request(s, attrs);
4957 const auto obj_state = obj_ctx->get_state(obj);
4958
4959 const auto ret = rgw::notify::publish(s, s->object, obj_state->size , obj_state->mtime, attrs[RGW_ATTR_ETAG].to_str(),
4960 delete_marker && s->object.instance.empty() ? rgw::notify::ObjectRemovedDeleteMarkerCreated : rgw::notify::ObjectRemovedDelete,
4961 store);
4962 if (ret < 0) {
4963 ldpp_dout(this, 5) << "WARNING: publishing notification failed, with error: " << ret << dendl;
4964 // TODO: we should have conf to make send a blocking coroutine and reply with error in case sending failed
4965 // this should be global conf (probably returnign a different handler)
4966 // so we don't need to read the configured values before we perform it
4967 }
4968 } else {
4969 op_ret = -EINVAL;
4970 }
4971 }
4972
4973 bool RGWCopyObj::parse_copy_location(const boost::string_view& url_src,
4974 string& bucket_name,
4975 rgw_obj_key& key)
4976 {
4977 boost::string_view name_str;
4978 boost::string_view params_str;
4979
4980 // search for ? before url-decoding so we don't accidentally match %3F
4981 size_t pos = url_src.find('?');
4982 if (pos == string::npos) {
4983 name_str = url_src;
4984 } else {
4985 name_str = url_src.substr(0, pos);
4986 params_str = url_src.substr(pos + 1);
4987 }
4988
4989 boost::string_view dec_src{name_str};
4990 if (dec_src[0] == '/')
4991 dec_src.remove_prefix(1);
4992
4993 pos = dec_src.find('/');
4994 if (pos == string::npos)
4995 return false;
4996
4997 bucket_name = url_decode(dec_src.substr(0, pos));
4998 key.name = url_decode(dec_src.substr(pos + 1));
4999
5000 if (key.name.empty()) {
5001 return false;
5002 }
5003
5004 if (! params_str.empty()) {
5005 RGWHTTPArgs args;
5006 args.set(params_str.to_string());
5007 args.parse();
5008
5009 key.instance = args.get("versionId", NULL);
5010 }
5011
5012 return true;
5013 }
5014
5015 int RGWCopyObj::verify_permission()
5016 {
5017 RGWAccessControlPolicy src_acl(s->cct);
5018 boost::optional<Policy> src_policy;
5019 op_ret = get_params();
5020 if (op_ret < 0)
5021 return op_ret;
5022
5023 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
5024 if (op_ret < 0) {
5025 return op_ret;
5026 }
5027 map<string, bufferlist> src_attrs;
5028
5029 if (s->bucket_instance_id.empty()) {
5030 op_ret = store->getRados()->get_bucket_info(store->svc(), src_tenant_name, src_bucket_name, src_bucket_info, NULL, s->yield, &src_attrs);
5031 } else {
5032 /* will only happen in intra region sync where the source and dest bucket is the same */
5033 rgw_bucket b(rgw_bucket_key(src_tenant_name, src_bucket_name, s->bucket_instance_id));
5034 op_ret = store->getRados()->get_bucket_instance_info(*s->sysobj_ctx, b, src_bucket_info, NULL, &src_attrs, s->yield);
5035 }
5036 if (op_ret < 0) {
5037 if (op_ret == -ENOENT) {
5038 op_ret = -ERR_NO_SUCH_BUCKET;
5039 }
5040 return op_ret;
5041 }
5042
5043 src_bucket = src_bucket_info.bucket;
5044
5045 /* get buckets info (source and dest) */
5046 if (s->local_source && source_zone.empty()) {
5047 rgw_obj src_obj(src_bucket, src_object);
5048 store->getRados()->set_atomic(s->obj_ctx, src_obj);
5049 store->getRados()->set_prefetch_data(s->obj_ctx, src_obj);
5050
5051 rgw_placement_rule src_placement;
5052
5053 /* check source object permissions */
5054 op_ret = read_obj_policy(store, s, src_bucket_info, src_attrs, &src_acl, &src_placement.storage_class,
5055 src_policy, src_bucket, src_object);
5056 if (op_ret < 0) {
5057 return op_ret;
5058 }
5059
5060 /* follow up on previous checks that required reading source object head */
5061 if (need_to_check_storage_class) {
5062 src_placement.inherit_from(src_bucket_info.placement_rule);
5063
5064 op_ret = check_storage_class(src_placement);
5065 if (op_ret < 0) {
5066 return op_ret;
5067 }
5068 }
5069
5070 /* admin request overrides permission checks */
5071 if (!s->auth.identity->is_admin_of(src_acl.get_owner().get_id())) {
5072 if (src_policy) {
5073 auto e = src_policy->eval(s->env, *s->auth.identity,
5074 src_object.instance.empty() ?
5075 rgw::IAM::s3GetObject :
5076 rgw::IAM::s3GetObjectVersion,
5077 ARN(src_obj));
5078 if (e == Effect::Deny) {
5079 return -EACCES;
5080 } else if (e == Effect::Pass &&
5081 !src_acl.verify_permission(this, *s->auth.identity, s->perm_mask,
5082 RGW_PERM_READ)) {
5083 return -EACCES;
5084 }
5085 } else if (!src_acl.verify_permission(this, *s->auth.identity,
5086 s->perm_mask,
5087 RGW_PERM_READ)) {
5088 return -EACCES;
5089 }
5090 }
5091 }
5092
5093 RGWAccessControlPolicy dest_bucket_policy(s->cct);
5094 map<string, bufferlist> dest_attrs;
5095
5096 if (src_bucket_name.compare(dest_bucket_name) == 0) { /* will only happen if s->local_source
5097 or intra region sync */
5098 dest_bucket_info = src_bucket_info;
5099 dest_attrs = src_attrs;
5100 } else {
5101 op_ret = store->getRados()->get_bucket_info(store->svc(), dest_tenant_name, dest_bucket_name,
5102 dest_bucket_info, nullptr, s->yield, &dest_attrs);
5103 if (op_ret < 0) {
5104 if (op_ret == -ENOENT) {
5105 op_ret = -ERR_NO_SUCH_BUCKET;
5106 }
5107 return op_ret;
5108 }
5109 }
5110
5111 dest_bucket = dest_bucket_info.bucket;
5112
5113 rgw_obj dest_obj(dest_bucket, dest_object);
5114 store->getRados()->set_atomic(s->obj_ctx, dest_obj);
5115
5116 /* check dest bucket permissions */
5117 op_ret = read_bucket_policy(store, s, dest_bucket_info, dest_attrs,
5118 &dest_bucket_policy, dest_bucket);
5119 if (op_ret < 0) {
5120 return op_ret;
5121 }
5122 auto dest_iam_policy = get_iam_policy_from_attr(s->cct, store, dest_attrs, dest_bucket.tenant);
5123 /* admin request overrides permission checks */
5124 if (! s->auth.identity->is_admin_of(dest_policy.get_owner().get_id())){
5125 if (dest_iam_policy != boost::none) {
5126 rgw_add_to_iam_environment(s->env, "s3:x-amz-copy-source", copy_source);
5127 if (md_directive)
5128 rgw_add_to_iam_environment(s->env, "s3:x-amz-metadata-directive",
5129 *md_directive);
5130
5131 auto e = dest_iam_policy->eval(s->env, *s->auth.identity,
5132 rgw::IAM::s3PutObject,
5133 ARN(dest_obj));
5134 if (e == Effect::Deny) {
5135 return -EACCES;
5136 } else if (e == Effect::Pass &&
5137 ! dest_bucket_policy.verify_permission(this,
5138 *s->auth.identity,
5139 s->perm_mask,
5140 RGW_PERM_WRITE)){
5141 return -EACCES;
5142 }
5143 } else if (! dest_bucket_policy.verify_permission(this, *s->auth.identity, s->perm_mask,
5144 RGW_PERM_WRITE)) {
5145 return -EACCES;
5146 }
5147
5148 }
5149
5150 op_ret = init_dest_policy();
5151 if (op_ret < 0) {
5152 return op_ret;
5153 }
5154
5155 return 0;
5156 }
5157
5158
5159 int RGWCopyObj::init_common()
5160 {
5161 if (if_mod) {
5162 if (parse_time(if_mod, &mod_time) < 0) {
5163 op_ret = -EINVAL;
5164 return op_ret;
5165 }
5166 mod_ptr = &mod_time;
5167 }
5168
5169 if (if_unmod) {
5170 if (parse_time(if_unmod, &unmod_time) < 0) {
5171 op_ret = -EINVAL;
5172 return op_ret;
5173 }
5174 unmod_ptr = &unmod_time;
5175 }
5176
5177 bufferlist aclbl;
5178 dest_policy.encode(aclbl);
5179 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
5180
5181 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
5182 if (op_ret < 0) {
5183 return op_ret;
5184 }
5185 populate_with_generic_attrs(s, attrs);
5186
5187 return 0;
5188 }
5189
5190 static void copy_obj_progress_cb(off_t ofs, void *param)
5191 {
5192 RGWCopyObj *op = static_cast<RGWCopyObj *>(param);
5193 op->progress_cb(ofs);
5194 }
5195
5196 void RGWCopyObj::progress_cb(off_t ofs)
5197 {
5198 if (!s->cct->_conf->rgw_copy_obj_progress)
5199 return;
5200
5201 if (ofs - last_ofs < s->cct->_conf->rgw_copy_obj_progress_every_bytes)
5202 return;
5203
5204 send_partial_response(ofs);
5205
5206 last_ofs = ofs;
5207 }
5208
5209 void RGWCopyObj::pre_exec()
5210 {
5211 rgw_bucket_object_pre_exec(s);
5212 }
5213
5214 void RGWCopyObj::execute()
5215 {
5216 if (init_common() < 0)
5217 return;
5218
5219 rgw_obj src_obj(src_bucket, src_object);
5220 rgw_obj dst_obj(dest_bucket, dest_object);
5221
5222 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
5223 if ( ! version_id.empty()) {
5224 dst_obj.key.set_instance(version_id);
5225 } else if (dest_bucket_info.versioning_enabled()) {
5226 store->getRados()->gen_rand_obj_instance_name(&dst_obj);
5227 }
5228
5229 obj_ctx.set_atomic(src_obj);
5230 obj_ctx.set_atomic(dst_obj);
5231
5232 encode_delete_at_attr(delete_at, attrs);
5233
5234 if (!s->system_request) { // no quota enforcement for system requests
5235 // get src object size (cached in obj_ctx from verify_permission())
5236 RGWObjState* astate = nullptr;
5237 op_ret = store->getRados()->get_obj_state(s->obj_ctx, src_bucket_info, src_obj,
5238 &astate, true, s->yield, false);
5239 if (op_ret < 0) {
5240 return;
5241 }
5242 // enforce quota against the destination bucket owner
5243 op_ret = store->getRados()->check_quota(dest_bucket_info.owner,
5244 dest_bucket_info.bucket,
5245 user_quota, bucket_quota,
5246 astate->accounted_size);
5247 if (op_ret < 0) {
5248 return;
5249 }
5250 }
5251
5252 bool high_precision_time = (s->system_request);
5253
5254 /* Handle object versioning of Swift API. In case of copying to remote this
5255 * should fail gently (op_ret == 0) as the dst_obj will not exist here. */
5256 op_ret = store->getRados()->swift_versioning_copy(obj_ctx,
5257 dest_bucket_info.owner,
5258 dest_bucket_info,
5259 dst_obj,
5260 this,
5261 s->yield);
5262 if (op_ret < 0) {
5263 return;
5264 }
5265
5266 op_ret = store->getRados()->copy_obj(obj_ctx,
5267 s->user->get_id(),
5268 &s->info,
5269 source_zone,
5270 dst_obj,
5271 src_obj,
5272 dest_bucket_info,
5273 src_bucket_info,
5274 s->dest_placement,
5275 &src_mtime,
5276 &mtime,
5277 mod_ptr,
5278 unmod_ptr,
5279 high_precision_time,
5280 if_match,
5281 if_nomatch,
5282 attrs_mod,
5283 copy_if_newer,
5284 attrs, RGWObjCategory::Main,
5285 olh_epoch,
5286 (delete_at ? *delete_at : real_time()),
5287 (version_id.empty() ? NULL : &version_id),
5288 &s->req_id, /* use req_id as tag */
5289 &etag,
5290 copy_obj_progress_cb, (void *)this,
5291 this,
5292 s->yield);
5293
5294 const auto ret = rgw::notify::publish(s, s->object, s->obj_size, mtime, etag, rgw::notify::ObjectCreatedCopy, store);
5295 if (ret < 0) {
5296 ldpp_dout(this, 5) << "WARNING: publishing notification failed, with error: " << ret << dendl;
5297 // TODO: we should have conf to make send a blocking coroutine and reply with error in case sending failed
5298 // this should be global conf (probably returnign a different handler)
5299 // so we don't need to read the configured values before we perform it
5300 }
5301 }
5302
5303 int RGWGetACLs::verify_permission()
5304 {
5305 bool perm;
5306 if (!s->object.empty()) {
5307 auto iam_action = s->object.instance.empty() ?
5308 rgw::IAM::s3GetObjectAcl :
5309 rgw::IAM::s3GetObjectVersionAcl;
5310
5311 if (s->iam_policy && s->iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG)){
5312 rgw_obj obj = rgw_obj(s->bucket, s->object);
5313 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
5314 }
5315 if (! s->iam_user_policies.empty()) {
5316 for (auto& user_policy : s->iam_user_policies) {
5317 if (user_policy.has_partial_conditional(S3_EXISTING_OBJTAG)) {
5318 rgw_obj obj = rgw_obj(s->bucket, s->object);
5319 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
5320 }
5321 }
5322 }
5323 perm = verify_object_permission(this, s, iam_action);
5324 } else {
5325 if (!s->bucket_exists) {
5326 return -ERR_NO_SUCH_BUCKET;
5327 }
5328 perm = verify_bucket_permission(this, s, rgw::IAM::s3GetBucketAcl);
5329 }
5330 if (!perm)
5331 return -EACCES;
5332
5333 return 0;
5334 }
5335
5336 void RGWGetACLs::pre_exec()
5337 {
5338 rgw_bucket_object_pre_exec(s);
5339 }
5340
5341 void RGWGetACLs::execute()
5342 {
5343 stringstream ss;
5344 RGWAccessControlPolicy* const acl = \
5345 (!s->object.empty() ? s->object_acl.get() : s->bucket_acl.get());
5346 RGWAccessControlPolicy_S3* const s3policy = \
5347 static_cast<RGWAccessControlPolicy_S3*>(acl);
5348 s3policy->to_xml(ss);
5349 acls = ss.str();
5350 }
5351
5352
5353
5354 int RGWPutACLs::verify_permission()
5355 {
5356 bool perm;
5357
5358 rgw_add_to_iam_environment(s->env, "s3:x-amz-acl", s->canned_acl);
5359
5360 rgw_add_grant_to_iam_environment(s->env, s);
5361 if (!s->object.empty()) {
5362 auto iam_action = s->object.instance.empty() ? rgw::IAM::s3PutObjectAcl : rgw::IAM::s3PutObjectVersionAcl;
5363 auto obj = rgw_obj(s->bucket, s->object);
5364 op_ret = rgw_iam_add_existing_objtags(store, s, obj, iam_action);
5365 perm = verify_object_permission(this, s, iam_action);
5366 } else {
5367 perm = verify_bucket_permission(this, s, rgw::IAM::s3PutBucketAcl);
5368 }
5369 if (!perm)
5370 return -EACCES;
5371
5372 return 0;
5373 }
5374
5375 int RGWGetLC::verify_permission()
5376 {
5377 bool perm;
5378 perm = verify_bucket_permission(this, s, rgw::IAM::s3GetLifecycleConfiguration);
5379 if (!perm)
5380 return -EACCES;
5381
5382 return 0;
5383 }
5384
5385 int RGWPutLC::verify_permission()
5386 {
5387 bool perm;
5388 perm = verify_bucket_permission(this, s, rgw::IAM::s3PutLifecycleConfiguration);
5389 if (!perm)
5390 return -EACCES;
5391
5392 return 0;
5393 }
5394
5395 int RGWDeleteLC::verify_permission()
5396 {
5397 bool perm;
5398 perm = verify_bucket_permission(this, s, rgw::IAM::s3PutLifecycleConfiguration);
5399 if (!perm)
5400 return -EACCES;
5401
5402 return 0;
5403 }
5404
5405 void RGWPutACLs::pre_exec()
5406 {
5407 rgw_bucket_object_pre_exec(s);
5408 }
5409
5410 void RGWGetLC::pre_exec()
5411 {
5412 rgw_bucket_object_pre_exec(s);
5413 }
5414
5415 void RGWPutLC::pre_exec()
5416 {
5417 rgw_bucket_object_pre_exec(s);
5418 }
5419
5420 void RGWDeleteLC::pre_exec()
5421 {
5422 rgw_bucket_object_pre_exec(s);
5423 }
5424
5425 void RGWPutACLs::execute()
5426 {
5427 bufferlist bl;
5428
5429 RGWAccessControlPolicy_S3 *policy = NULL;
5430 RGWACLXMLParser_S3 parser(s->cct);
5431 RGWAccessControlPolicy_S3 new_policy(s->cct);
5432 stringstream ss;
5433 rgw_obj obj;
5434
5435 op_ret = 0; /* XXX redundant? */
5436
5437 if (!parser.init()) {
5438 op_ret = -EINVAL;
5439 return;
5440 }
5441
5442
5443 RGWAccessControlPolicy* const existing_policy = \
5444 (s->object.empty() ? s->bucket_acl.get() : s->object_acl.get());
5445
5446 owner = existing_policy->get_owner();
5447
5448 op_ret = get_params();
5449 if (op_ret < 0) {
5450 if (op_ret == -ERANGE) {
5451 ldpp_dout(this, 4) << "The size of request xml data is larger than the max limitation, data size = "
5452 << s->length << dendl;
5453 op_ret = -ERR_MALFORMED_XML;
5454 s->err.message = "The XML you provided was larger than the maximum " +
5455 std::to_string(s->cct->_conf->rgw_max_put_param_size) +
5456 " bytes allowed.";
5457 }
5458 return;
5459 }
5460
5461 char* buf = data.c_str();
5462 ldpp_dout(this, 15) << "read len=" << data.length() << " data=" << (buf ? buf : "") << dendl;
5463
5464 if (!s->canned_acl.empty() && data.length() > 0) {
5465 op_ret = -EINVAL;
5466 return;
5467 }
5468
5469 if (!s->canned_acl.empty() || s->has_acl_header) {
5470 op_ret = get_policy_from_state(store, s, ss);
5471 if (op_ret < 0)
5472 return;
5473
5474 data.clear();
5475 data.append(ss.str());
5476 }
5477
5478 if (!parser.parse(data.c_str(), data.length(), 1)) {
5479 op_ret = -EINVAL;
5480 return;
5481 }
5482 policy = static_cast<RGWAccessControlPolicy_S3 *>(parser.find_first("AccessControlPolicy"));
5483 if (!policy) {
5484 op_ret = -EINVAL;
5485 return;
5486 }
5487
5488 const RGWAccessControlList& req_acl = policy->get_acl();
5489 const multimap<string, ACLGrant>& req_grant_map = req_acl.get_grant_map();
5490 #define ACL_GRANTS_MAX_NUM 100
5491 int max_num = s->cct->_conf->rgw_acl_grants_max_num;
5492 if (max_num < 0) {
5493 max_num = ACL_GRANTS_MAX_NUM;
5494 }
5495
5496 int grants_num = req_grant_map.size();
5497 if (grants_num > max_num) {
5498 ldpp_dout(this, 4) << "An acl can have up to " << max_num
5499 << " grants, request acl grants num: " << grants_num << dendl;
5500 op_ret = -ERR_LIMIT_EXCEEDED;
5501 s->err.message = "The request is rejected, because the acl grants number you requested is larger than the maximum "
5502 + std::to_string(max_num)
5503 + " grants allowed in an acl.";
5504 return;
5505 }
5506
5507 // forward bucket acl requests to meta master zone
5508 if (s->object.empty() && !store->svc()->zone->is_meta_master()) {
5509 bufferlist in_data;
5510 // include acl data unless it was generated from a canned_acl
5511 if (s->canned_acl.empty()) {
5512 in_data.append(data);
5513 }
5514 op_ret = forward_request_to_master(s, NULL, store, in_data, NULL);
5515 if (op_ret < 0) {
5516 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
5517 return;
5518 }
5519 }
5520
5521 if (s->cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
5522 ldpp_dout(this, 15) << "Old AccessControlPolicy";
5523 policy->to_xml(*_dout);
5524 *_dout << dendl;
5525 }
5526
5527 op_ret = policy->rebuild(store->ctl()->user, &owner, new_policy, s->err.message);
5528 if (op_ret < 0)
5529 return;
5530
5531 if (s->cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
5532 ldpp_dout(this, 15) << "New AccessControlPolicy:";
5533 new_policy.to_xml(*_dout);
5534 *_dout << dendl;
5535 }
5536
5537 if (s->bucket_access_conf &&
5538 s->bucket_access_conf->block_public_acls() &&
5539 new_policy.is_public()) {
5540 op_ret = -EACCES;
5541 return;
5542 }
5543 new_policy.encode(bl);
5544 if (!s->object.empty()) {
5545 obj = rgw_obj(s->bucket, s->object);
5546 store->getRados()->set_atomic(s->obj_ctx, obj);
5547 //if instance is empty, we should modify the latest object
5548 op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_ACL, bl);
5549 } else {
5550 map<string,bufferlist> attrs = s->bucket_attrs;
5551 attrs[RGW_ATTR_ACL] = bl;
5552 op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
5553 &s->bucket_info.objv_tracker,
5554 s->yield);
5555 }
5556 if (op_ret == -ECANCELED) {
5557 op_ret = 0; /* lost a race, but it's ok because acls are immutable */
5558 }
5559 }
5560
5561 void RGWPutLC::execute()
5562 {
5563 bufferlist bl;
5564
5565 RGWLifecycleConfiguration_S3 config(s->cct);
5566 RGWXMLParser parser;
5567 RGWLifecycleConfiguration_S3 new_config(s->cct);
5568
5569 content_md5 = s->info.env->get("HTTP_CONTENT_MD5");
5570 if (content_md5 == nullptr) {
5571 op_ret = -ERR_INVALID_REQUEST;
5572 s->err.message = "Missing required header for this request: Content-MD5";
5573 ldpp_dout(this, 5) << s->err.message << dendl;
5574 return;
5575 }
5576
5577 std::string content_md5_bin;
5578 try {
5579 content_md5_bin = rgw::from_base64(boost::string_view(content_md5));
5580 } catch (...) {
5581 s->err.message = "Request header Content-MD5 contains character "
5582 "that is not base64 encoded.";
5583 ldpp_dout(this, 5) << s->err.message << dendl;
5584 op_ret = -ERR_BAD_DIGEST;
5585 return;
5586 }
5587
5588 if (!parser.init()) {
5589 op_ret = -EINVAL;
5590 return;
5591 }
5592
5593 op_ret = get_params();
5594 if (op_ret < 0)
5595 return;
5596
5597 char* buf = data.c_str();
5598 ldpp_dout(this, 15) << "read len=" << data.length() << " data=" << (buf ? buf : "") << dendl;
5599
5600 MD5 data_hash;
5601 unsigned char data_hash_res[CEPH_CRYPTO_MD5_DIGESTSIZE];
5602 data_hash.Update(reinterpret_cast<const unsigned char*>(buf), data.length());
5603 data_hash.Final(data_hash_res);
5604
5605 if (memcmp(data_hash_res, content_md5_bin.c_str(), CEPH_CRYPTO_MD5_DIGESTSIZE) != 0) {
5606 op_ret = -ERR_BAD_DIGEST;
5607 s->err.message = "The Content-MD5 you specified did not match what we received.";
5608 ldpp_dout(this, 5) << s->err.message
5609 << " Specified content md5: " << content_md5
5610 << ", calculated content md5: " << data_hash_res
5611 << dendl;
5612 return;
5613 }
5614
5615 if (!parser.parse(buf, data.length(), 1)) {
5616 op_ret = -ERR_MALFORMED_XML;
5617 return;
5618 }
5619
5620 try {
5621 RGWXMLDecoder::decode_xml("LifecycleConfiguration", config, &parser);
5622 } catch (RGWXMLDecoder::err& err) {
5623 ldpp_dout(this, 5) << "Bad lifecycle configuration: " << err << dendl;
5624 op_ret = -ERR_MALFORMED_XML;
5625 return;
5626 }
5627
5628 op_ret = config.rebuild(store->getRados(), new_config);
5629 if (op_ret < 0)
5630 return;
5631
5632 if (s->cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
5633 XMLFormatter xf;
5634 new_config.dump_xml(&xf);
5635 stringstream ss;
5636 xf.flush(ss);
5637 ldpp_dout(this, 15) << "New LifecycleConfiguration:" << ss.str() << dendl;
5638 }
5639
5640 if (!store->svc()->zone->is_meta_master()) {
5641 op_ret = forward_request_to_master(s, nullptr, store, data, nullptr);
5642 if (op_ret < 0) {
5643 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
5644 return;
5645 }
5646 }
5647
5648 op_ret = store->getRados()->get_lc()->set_bucket_config(s->bucket_info, s->bucket_attrs, &new_config);
5649 if (op_ret < 0) {
5650 return;
5651 }
5652 return;
5653 }
5654
5655 void RGWDeleteLC::execute()
5656 {
5657 if (!store->svc()->zone->is_meta_master()) {
5658 bufferlist data;
5659 op_ret = forward_request_to_master(s, nullptr, store, data, nullptr);
5660 if (op_ret < 0) {
5661 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
5662 return;
5663 }
5664 }
5665
5666 op_ret = store->getRados()->get_lc()->remove_bucket_config(s->bucket_info, s->bucket_attrs);
5667 if (op_ret < 0) {
5668 return;
5669 }
5670 return;
5671 }
5672
5673 int RGWGetCORS::verify_permission()
5674 {
5675 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketCORS);
5676 }
5677
5678 void RGWGetCORS::execute()
5679 {
5680 op_ret = read_bucket_cors();
5681 if (op_ret < 0)
5682 return ;
5683
5684 if (!cors_exist) {
5685 ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl;
5686 op_ret = -ERR_NO_CORS_FOUND;
5687 return;
5688 }
5689 }
5690
5691 int RGWPutCORS::verify_permission()
5692 {
5693 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketCORS);
5694 }
5695
5696 void RGWPutCORS::execute()
5697 {
5698 rgw_raw_obj obj;
5699
5700 op_ret = get_params();
5701 if (op_ret < 0)
5702 return;
5703
5704 if (!store->svc()->zone->is_meta_master()) {
5705 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
5706 if (op_ret < 0) {
5707 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
5708 return;
5709 }
5710 }
5711
5712 op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
5713 map<string, bufferlist> attrs = s->bucket_attrs;
5714 attrs[RGW_ATTR_CORS] = cors_bl;
5715 return store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
5716 &s->bucket_info.objv_tracker,
5717 s->yield);
5718 });
5719 }
5720
5721 int RGWDeleteCORS::verify_permission()
5722 {
5723 // No separate delete permission
5724 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketCORS);
5725 }
5726
5727 void RGWDeleteCORS::execute()
5728 {
5729 if (!store->svc()->zone->is_meta_master()) {
5730 bufferlist data;
5731 op_ret = forward_request_to_master(s, nullptr, store, data, nullptr);
5732 if (op_ret < 0) {
5733 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
5734 return;
5735 }
5736 }
5737
5738 op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
5739 op_ret = read_bucket_cors();
5740 if (op_ret < 0)
5741 return op_ret;
5742
5743 if (!cors_exist) {
5744 ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl;
5745 op_ret = -ENOENT;
5746 return op_ret;
5747 }
5748
5749 map<string, bufferlist> attrs = s->bucket_attrs;
5750 attrs.erase(RGW_ATTR_CORS);
5751 op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
5752 &s->bucket_info.objv_tracker,
5753 s->yield);
5754 if (op_ret < 0) {
5755 ldpp_dout(this, 0) << "RGWLC::RGWDeleteCORS() failed to set attrs on bucket=" << s->bucket.name
5756 << " returned err=" << op_ret << dendl;
5757 }
5758 return op_ret;
5759 });
5760 }
5761
5762 void RGWOptionsCORS::get_response_params(string& hdrs, string& exp_hdrs, unsigned *max_age) {
5763 get_cors_response_headers(rule, req_hdrs, hdrs, exp_hdrs, max_age);
5764 }
5765
5766 int RGWOptionsCORS::validate_cors_request(RGWCORSConfiguration *cc) {
5767 rule = cc->host_name_rule(origin);
5768 if (!rule) {
5769 ldpp_dout(this, 10) << "There is no cors rule present for " << origin << dendl;
5770 return -ENOENT;
5771 }
5772
5773 if (!validate_cors_rule_method(rule, req_meth)) {
5774 return -ENOENT;
5775 }
5776
5777 if (!validate_cors_rule_header(rule, req_hdrs)) {
5778 return -ENOENT;
5779 }
5780
5781 return 0;
5782 }
5783
5784 void RGWOptionsCORS::execute()
5785 {
5786 op_ret = read_bucket_cors();
5787 if (op_ret < 0)
5788 return;
5789
5790 origin = s->info.env->get("HTTP_ORIGIN");
5791 if (!origin) {
5792 ldpp_dout(this, 0) << "Missing mandatory Origin header" << dendl;
5793 op_ret = -EINVAL;
5794 return;
5795 }
5796 req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD");
5797 if (!req_meth) {
5798 ldpp_dout(this, 0) << "Missing mandatory Access-control-request-method header" << dendl;
5799 op_ret = -EINVAL;
5800 return;
5801 }
5802 if (!cors_exist) {
5803 ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl;
5804 op_ret = -ENOENT;
5805 return;
5806 }
5807 req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS");
5808 op_ret = validate_cors_request(&bucket_cors);
5809 if (!rule) {
5810 origin = req_meth = NULL;
5811 return;
5812 }
5813 return;
5814 }
5815
5816 int RGWGetRequestPayment::verify_permission()
5817 {
5818 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketRequestPayment);
5819 }
5820
5821 void RGWGetRequestPayment::pre_exec()
5822 {
5823 rgw_bucket_object_pre_exec(s);
5824 }
5825
5826 void RGWGetRequestPayment::execute()
5827 {
5828 requester_pays = s->bucket_info.requester_pays;
5829 }
5830
5831 int RGWSetRequestPayment::verify_permission()
5832 {
5833 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketRequestPayment);
5834 }
5835
5836 void RGWSetRequestPayment::pre_exec()
5837 {
5838 rgw_bucket_object_pre_exec(s);
5839 }
5840
5841 void RGWSetRequestPayment::execute()
5842 {
5843
5844 if (!store->svc()->zone->is_meta_master()) {
5845 op_ret = forward_request_to_master(s, nullptr, store, in_data, nullptr);
5846 if (op_ret < 0) {
5847 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
5848 return;
5849 }
5850 }
5851
5852 op_ret = get_params();
5853
5854 if (op_ret < 0)
5855 return;
5856
5857 s->bucket_info.requester_pays = requester_pays;
5858 op_ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false, real_time(),
5859 &s->bucket_attrs);
5860 if (op_ret < 0) {
5861 ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
5862 << " returned err=" << op_ret << dendl;
5863 return;
5864 }
5865 }
5866
5867 int RGWInitMultipart::verify_permission()
5868 {
5869 if (s->iam_policy || ! s->iam_user_policies.empty()) {
5870 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
5871 boost::none,
5872 rgw::IAM::s3PutObject,
5873 rgw_obj(s->bucket, s->object));
5874 if (usr_policy_res == Effect::Deny) {
5875 return -EACCES;
5876 }
5877
5878 rgw::IAM::Effect e = Effect::Pass;
5879 if (s->iam_policy) {
5880 e = s->iam_policy->eval(s->env, *s->auth.identity,
5881 rgw::IAM::s3PutObject,
5882 rgw_obj(s->bucket, s->object));
5883 }
5884 if (e == Effect::Allow) {
5885 return 0;
5886 } else if (e == Effect::Deny) {
5887 return -EACCES;
5888 } else if (usr_policy_res == Effect::Allow) {
5889 return 0;
5890 }
5891 }
5892
5893 if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
5894 return -EACCES;
5895 }
5896
5897 return 0;
5898 }
5899
5900 void RGWInitMultipart::pre_exec()
5901 {
5902 rgw_bucket_object_pre_exec(s);
5903 }
5904
5905 void RGWInitMultipart::execute()
5906 {
5907 bufferlist aclbl;
5908 map<string, bufferlist> attrs;
5909 rgw_obj obj;
5910
5911 if (get_params() < 0)
5912 return;
5913
5914 if (s->object.empty())
5915 return;
5916
5917 policy.encode(aclbl);
5918 attrs[RGW_ATTR_ACL] = aclbl;
5919
5920 populate_with_generic_attrs(s, attrs);
5921
5922 /* select encryption mode */
5923 op_ret = prepare_encryption(attrs);
5924 if (op_ret != 0)
5925 return;
5926
5927 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
5928 if (op_ret < 0) {
5929 return;
5930 }
5931
5932 do {
5933 char buf[33];
5934 gen_rand_alphanumeric(s->cct, buf, sizeof(buf) - 1);
5935 upload_id = MULTIPART_UPLOAD_ID_PREFIX; /* v2 upload id */
5936 upload_id.append(buf);
5937
5938 string tmp_obj_name;
5939 RGWMPObj mp(s->object.name, upload_id);
5940 tmp_obj_name = mp.get_meta();
5941
5942 obj.init_ns(s->bucket, tmp_obj_name, mp_ns);
5943 // the meta object will be indexed with 0 size, we c
5944 obj.set_in_extra_data(true);
5945 obj.index_hash_source = s->object.name;
5946
5947 RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
5948 op_target.set_versioning_disabled(true); /* no versioning for multipart meta */
5949
5950 RGWRados::Object::Write obj_op(&op_target);
5951
5952 obj_op.meta.owner = s->owner.get_id();
5953 obj_op.meta.category = RGWObjCategory::MultiMeta;
5954 obj_op.meta.flags = PUT_OBJ_CREATE_EXCL;
5955 obj_op.meta.mtime = &mtime;
5956
5957 multipart_upload_info upload_info;
5958 upload_info.dest_placement = s->dest_placement;
5959
5960 bufferlist bl;
5961 encode(upload_info, bl);
5962 obj_op.meta.data = &bl;
5963
5964 op_ret = obj_op.write_meta(bl.length(), 0, attrs, s->yield);
5965 } while (op_ret == -EEXIST);
5966
5967 const auto ret = rgw::notify::publish(s, s->object, s->obj_size, ceph::real_clock::now(), attrs[RGW_ATTR_ETAG].to_str(), rgw::notify::ObjectCreatedPost, store);
5968 if (ret < 0) {
5969 ldpp_dout(this, 5) << "WARNING: publishing notification failed, with error: " << ret << dendl;
5970 // TODO: we should have conf to make send a blocking coroutine and reply with error in case sending failed
5971 // this should be global conf (probably returnign a different handler)
5972 // so we don't need to read the configured values before we perform it
5973 }
5974 }
5975
5976 int RGWCompleteMultipart::verify_permission()
5977 {
5978 if (s->iam_policy || ! s->iam_user_policies.empty()) {
5979 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
5980 boost::none,
5981 rgw::IAM::s3PutObject,
5982 rgw_obj(s->bucket, s->object));
5983 if (usr_policy_res == Effect::Deny) {
5984 return -EACCES;
5985 }
5986
5987 rgw::IAM::Effect e = Effect::Pass;
5988 if (s->iam_policy) {
5989 e = s->iam_policy->eval(s->env, *s->auth.identity,
5990 rgw::IAM::s3PutObject,
5991 rgw_obj(s->bucket, s->object));
5992 }
5993 if (e == Effect::Allow) {
5994 return 0;
5995 } else if (e == Effect::Deny) {
5996 return -EACCES;
5997 } else if (usr_policy_res == Effect::Allow) {
5998 return 0;
5999 }
6000 }
6001
6002 if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
6003 return -EACCES;
6004 }
6005
6006 return 0;
6007 }
6008
6009 void RGWCompleteMultipart::pre_exec()
6010 {
6011 rgw_bucket_object_pre_exec(s);
6012 }
6013
6014 void RGWCompleteMultipart::execute()
6015 {
6016 RGWMultiCompleteUpload *parts;
6017 map<int, string>::iterator iter;
6018 RGWMultiXMLParser parser;
6019 string meta_oid;
6020 map<uint32_t, RGWUploadPartInfo> obj_parts;
6021 map<uint32_t, RGWUploadPartInfo>::iterator obj_iter;
6022 map<string, bufferlist> attrs;
6023 off_t ofs = 0;
6024 MD5 hash;
6025 char final_etag[CEPH_CRYPTO_MD5_DIGESTSIZE];
6026 char final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 16];
6027 bufferlist etag_bl;
6028 rgw_obj meta_obj;
6029 rgw_obj target_obj;
6030 RGWMPObj mp;
6031 RGWObjManifest manifest;
6032 uint64_t olh_epoch = 0;
6033
6034 op_ret = get_params();
6035 if (op_ret < 0)
6036 return;
6037 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
6038 if (op_ret < 0) {
6039 return;
6040 }
6041
6042 if (!data.length()) {
6043 op_ret = -ERR_MALFORMED_XML;
6044 return;
6045 }
6046
6047 if (!parser.init()) {
6048 op_ret = -EIO;
6049 return;
6050 }
6051
6052 if (!parser.parse(data.c_str(), data.length(), 1)) {
6053 op_ret = -ERR_MALFORMED_XML;
6054 return;
6055 }
6056
6057 parts = static_cast<RGWMultiCompleteUpload *>(parser.find_first("CompleteMultipartUpload"));
6058 if (!parts || parts->parts.empty()) {
6059 op_ret = -ERR_MALFORMED_XML;
6060 return;
6061 }
6062
6063 if ((int)parts->parts.size() >
6064 s->cct->_conf->rgw_multipart_part_upload_limit) {
6065 op_ret = -ERANGE;
6066 return;
6067 }
6068
6069 mp.init(s->object.name, upload_id);
6070 meta_oid = mp.get_meta();
6071
6072 int total_parts = 0;
6073 int handled_parts = 0;
6074 int max_parts = 1000;
6075 int marker = 0;
6076 bool truncated;
6077 RGWCompressionInfo cs_info;
6078 bool compressed = false;
6079 uint64_t accounted_size = 0;
6080
6081 uint64_t min_part_size = s->cct->_conf->rgw_multipart_min_part_size;
6082
6083 list<rgw_obj_index_key> remove_objs; /* objects to be removed from index listing */
6084
6085 bool versioned_object = s->bucket_info.versioning_enabled();
6086
6087 iter = parts->parts.begin();
6088
6089 meta_obj.init_ns(s->bucket, meta_oid, mp_ns);
6090 meta_obj.set_in_extra_data(true);
6091 meta_obj.index_hash_source = s->object.name;
6092
6093 /*take a cls lock on meta_obj to prevent racing completions (or retries)
6094 from deleting the parts*/
6095 rgw_pool meta_pool;
6096 rgw_raw_obj raw_obj;
6097 int max_lock_secs_mp =
6098 s->cct->_conf.get_val<int64_t>("rgw_mp_lock_max_time");
6099 utime_t dur(max_lock_secs_mp, 0);
6100
6101 store->getRados()->obj_to_raw((s->bucket_info).placement_rule, meta_obj, &raw_obj);
6102 store->getRados()->get_obj_data_pool((s->bucket_info).placement_rule,
6103 meta_obj,&meta_pool);
6104 store->getRados()->open_pool_ctx(meta_pool, serializer.ioctx, true);
6105
6106 op_ret = serializer.try_lock(raw_obj.oid, dur);
6107 if (op_ret < 0) {
6108 ldpp_dout(this, 0) << "failed to acquire lock" << dendl;
6109 op_ret = -ERR_INTERNAL_ERROR;
6110 s->err.message = "This multipart completion is already in progress";
6111 return;
6112 }
6113
6114 op_ret = get_obj_attrs(store, s, meta_obj, attrs);
6115
6116 if (op_ret < 0) {
6117 ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << meta_obj
6118 << " ret=" << op_ret << dendl;
6119 return;
6120 }
6121
6122 do {
6123 op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts,
6124 marker, obj_parts, &marker, &truncated);
6125 if (op_ret == -ENOENT) {
6126 op_ret = -ERR_NO_SUCH_UPLOAD;
6127 }
6128 if (op_ret < 0)
6129 return;
6130
6131 total_parts += obj_parts.size();
6132 if (!truncated && total_parts != (int)parts->parts.size()) {
6133 ldpp_dout(this, 0) << "NOTICE: total parts mismatch: have: " << total_parts
6134 << " expected: " << parts->parts.size() << dendl;
6135 op_ret = -ERR_INVALID_PART;
6136 return;
6137 }
6138
6139 for (obj_iter = obj_parts.begin(); iter != parts->parts.end() && obj_iter != obj_parts.end(); ++iter, ++obj_iter, ++handled_parts) {
6140 uint64_t part_size = obj_iter->second.accounted_size;
6141 if (handled_parts < (int)parts->parts.size() - 1 &&
6142 part_size < min_part_size) {
6143 op_ret = -ERR_TOO_SMALL;
6144 return;
6145 }
6146
6147 char petag[CEPH_CRYPTO_MD5_DIGESTSIZE];
6148 if (iter->first != (int)obj_iter->first) {
6149 ldpp_dout(this, 0) << "NOTICE: parts num mismatch: next requested: "
6150 << iter->first << " next uploaded: "
6151 << obj_iter->first << dendl;
6152 op_ret = -ERR_INVALID_PART;
6153 return;
6154 }
6155 string part_etag = rgw_string_unquote(iter->second);
6156 if (part_etag.compare(obj_iter->second.etag) != 0) {
6157 ldpp_dout(this, 0) << "NOTICE: etag mismatch: part: " << iter->first
6158 << " etag: " << iter->second << dendl;
6159 op_ret = -ERR_INVALID_PART;
6160 return;
6161 }
6162
6163 hex_to_buf(obj_iter->second.etag.c_str(), petag,
6164 CEPH_CRYPTO_MD5_DIGESTSIZE);
6165 hash.Update((const unsigned char *)petag, sizeof(petag));
6166
6167 RGWUploadPartInfo& obj_part = obj_iter->second;
6168
6169 /* update manifest for part */
6170 string oid = mp.get_part(obj_iter->second.num);
6171 rgw_obj src_obj;
6172 src_obj.init_ns(s->bucket, oid, mp_ns);
6173
6174 if (obj_part.manifest.empty()) {
6175 ldpp_dout(this, 0) << "ERROR: empty manifest for object part: obj="
6176 << src_obj << dendl;
6177 op_ret = -ERR_INVALID_PART;
6178 return;
6179 } else {
6180 manifest.append(obj_part.manifest, store->svc()->zone);
6181 }
6182
6183 bool part_compressed = (obj_part.cs_info.compression_type != "none");
6184 if ((handled_parts > 0) &&
6185 ((part_compressed != compressed) ||
6186 (cs_info.compression_type != obj_part.cs_info.compression_type))) {
6187 ldpp_dout(this, 0) << "ERROR: compression type was changed during multipart upload ("
6188 << cs_info.compression_type << ">>" << obj_part.cs_info.compression_type << ")" << dendl;
6189 op_ret = -ERR_INVALID_PART;
6190 return;
6191 }
6192
6193 if (part_compressed) {
6194 int64_t new_ofs; // offset in compression data for new part
6195 if (cs_info.blocks.size() > 0)
6196 new_ofs = cs_info.blocks.back().new_ofs + cs_info.blocks.back().len;
6197 else
6198 new_ofs = 0;
6199 for (const auto& block : obj_part.cs_info.blocks) {
6200 compression_block cb;
6201 cb.old_ofs = block.old_ofs + cs_info.orig_size;
6202 cb.new_ofs = new_ofs;
6203 cb.len = block.len;
6204 cs_info.blocks.push_back(cb);
6205 new_ofs = cb.new_ofs + cb.len;
6206 }
6207 if (!compressed)
6208 cs_info.compression_type = obj_part.cs_info.compression_type;
6209 cs_info.orig_size += obj_part.cs_info.orig_size;
6210 compressed = true;
6211 }
6212
6213 rgw_obj_index_key remove_key;
6214 src_obj.key.get_index_key(&remove_key);
6215
6216 remove_objs.push_back(remove_key);
6217
6218 ofs += obj_part.size;
6219 accounted_size += obj_part.accounted_size;
6220 }
6221 } while (truncated);
6222 hash.Final((unsigned char *)final_etag);
6223
6224 buf_to_hex((unsigned char *)final_etag, sizeof(final_etag), final_etag_str);
6225 snprintf(&final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2], sizeof(final_etag_str) - CEPH_CRYPTO_MD5_DIGESTSIZE * 2,
6226 "-%lld", (long long)parts->parts.size());
6227 etag = final_etag_str;
6228 ldpp_dout(this, 10) << "calculated etag: " << final_etag_str << dendl;
6229
6230 etag_bl.append(final_etag_str, strlen(final_etag_str));
6231
6232 attrs[RGW_ATTR_ETAG] = etag_bl;
6233
6234 if (compressed) {
6235 // write compression attribute to full object
6236 bufferlist tmp;
6237 encode(cs_info, tmp);
6238 attrs[RGW_ATTR_COMPRESSION] = tmp;
6239 }
6240
6241 target_obj.init(s->bucket, s->object.name);
6242 if (versioned_object) {
6243 if (!version_id.empty()) {
6244 target_obj.key.set_instance(version_id);
6245 } else {
6246 store->getRados()->gen_rand_obj_instance_name(&target_obj);
6247 version_id = target_obj.key.get_instance();
6248 }
6249 }
6250
6251 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
6252
6253 obj_ctx.set_atomic(target_obj);
6254
6255 RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), target_obj);
6256 RGWRados::Object::Write obj_op(&op_target);
6257
6258 obj_op.meta.manifest = &manifest;
6259 obj_op.meta.remove_objs = &remove_objs;
6260
6261 obj_op.meta.ptag = &s->req_id; /* use req_id as operation tag */
6262 obj_op.meta.owner = s->owner.get_id();
6263 obj_op.meta.flags = PUT_OBJ_CREATE;
6264 obj_op.meta.modify_tail = true;
6265 obj_op.meta.completeMultipart = true;
6266 obj_op.meta.olh_epoch = olh_epoch;
6267 op_ret = obj_op.write_meta(ofs, accounted_size, attrs, s->yield);
6268 if (op_ret < 0)
6269 return;
6270
6271 // remove the upload obj
6272 int r = store->getRados()->delete_obj(*static_cast<RGWObjectCtx *>(s->obj_ctx),
6273 s->bucket_info, meta_obj, 0);
6274 if (r >= 0) {
6275 /* serializer's exclusive lock is released */
6276 serializer.clear_locked();
6277 } else {
6278 ldpp_dout(this, 0) << "WARNING: failed to remove object " << meta_obj << dendl;
6279 }
6280
6281 const auto ret = rgw::notify::publish(s, s->object, ofs, ceph::real_clock::now(), final_etag_str, rgw::notify::ObjectCreatedCompleteMultipartUpload, store);
6282
6283 if (ret < 0) {
6284 ldpp_dout(this, 5) << "WARNING: publishing notification failed, with error: " << ret << dendl;
6285 // TODO: we should have conf to make send a blocking coroutine and reply with error in case sending failed
6286 // this should be global conf (probably returnign a different handler)
6287 // so we don't need to read the configured values before we perform it
6288 }
6289 }
6290
6291 int RGWCompleteMultipart::MPSerializer::try_lock(
6292 const std::string& _oid,
6293 utime_t dur)
6294 {
6295 oid = _oid;
6296 op.assert_exists();
6297 lock.set_duration(dur);
6298 lock.lock_exclusive(&op);
6299 int ret = rgw_rados_operate(ioctx, oid, &op, null_yield);
6300 if (! ret) {
6301 locked = true;
6302 }
6303 return ret;
6304 }
6305
6306 void RGWCompleteMultipart::complete()
6307 {
6308 /* release exclusive lock iff not already */
6309 if (unlikely(serializer.locked)) {
6310 int r = serializer.unlock();
6311 if (r < 0) {
6312 ldpp_dout(this, 0) << "WARNING: failed to unlock " << serializer.oid << dendl;
6313 }
6314 }
6315 send_response();
6316 }
6317
6318 int RGWAbortMultipart::verify_permission()
6319 {
6320 if (s->iam_policy || ! s->iam_user_policies.empty()) {
6321 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
6322 boost::none,
6323 rgw::IAM::s3AbortMultipartUpload,
6324 rgw_obj(s->bucket, s->object));
6325 if (usr_policy_res == Effect::Deny) {
6326 return -EACCES;
6327 }
6328
6329 rgw::IAM::Effect e = Effect::Pass;
6330 if (s->iam_policy) {
6331 e = s->iam_policy->eval(s->env, *s->auth.identity,
6332 rgw::IAM::s3AbortMultipartUpload,
6333 rgw_obj(s->bucket, s->object));
6334 }
6335 if (e == Effect::Allow) {
6336 return 0;
6337 } else if (e == Effect::Deny) {
6338 return -EACCES;
6339 } else if (usr_policy_res == Effect::Allow)
6340 return 0;
6341 }
6342
6343 if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
6344 return -EACCES;
6345 }
6346
6347 return 0;
6348 }
6349
6350 void RGWAbortMultipart::pre_exec()
6351 {
6352 rgw_bucket_object_pre_exec(s);
6353 }
6354
6355 void RGWAbortMultipart::execute()
6356 {
6357 op_ret = -EINVAL;
6358 string upload_id;
6359 string meta_oid;
6360 upload_id = s->info.args.get("uploadId");
6361 rgw_obj meta_obj;
6362 RGWMPObj mp;
6363
6364 if (upload_id.empty() || s->object.empty())
6365 return;
6366
6367 mp.init(s->object.name, upload_id);
6368 meta_oid = mp.get_meta();
6369
6370 op_ret = get_multipart_info(store, s, meta_oid, nullptr, nullptr, nullptr);
6371 if (op_ret < 0)
6372 return;
6373
6374 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
6375 op_ret = abort_multipart_upload(store, s->cct, obj_ctx, s->bucket_info, mp);
6376 }
6377
6378 int RGWListMultipart::verify_permission()
6379 {
6380 if (!verify_object_permission(this, s, rgw::IAM::s3ListMultipartUploadParts))
6381 return -EACCES;
6382
6383 return 0;
6384 }
6385
6386 void RGWListMultipart::pre_exec()
6387 {
6388 rgw_bucket_object_pre_exec(s);
6389 }
6390
6391 void RGWListMultipart::execute()
6392 {
6393 string meta_oid;
6394 RGWMPObj mp;
6395
6396 op_ret = get_params();
6397 if (op_ret < 0)
6398 return;
6399
6400 mp.init(s->object.name, upload_id);
6401 meta_oid = mp.get_meta();
6402
6403 op_ret = get_multipart_info(store, s, meta_oid, &policy, nullptr, nullptr);
6404 if (op_ret < 0)
6405 return;
6406
6407 op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts,
6408 marker, parts, NULL, &truncated);
6409 }
6410
6411 int RGWListBucketMultiparts::verify_permission()
6412 {
6413 if (!verify_bucket_permission(this,
6414 s,
6415 rgw::IAM::s3ListBucketMultipartUploads))
6416 return -EACCES;
6417
6418 return 0;
6419 }
6420
6421 void RGWListBucketMultiparts::pre_exec()
6422 {
6423 rgw_bucket_object_pre_exec(s);
6424 }
6425
6426 void RGWListBucketMultiparts::execute()
6427 {
6428 vector<rgw_bucket_dir_entry> objs;
6429 string marker_meta;
6430
6431 op_ret = get_params();
6432 if (op_ret < 0)
6433 return;
6434
6435 if (s->prot_flags & RGW_REST_SWIFT) {
6436 string path_args;
6437 path_args = s->info.args.get("path");
6438 if (!path_args.empty()) {
6439 if (!delimiter.empty() || !prefix.empty()) {
6440 op_ret = -EINVAL;
6441 return;
6442 }
6443 prefix = path_args;
6444 delimiter="/";
6445 }
6446 }
6447 marker_meta = marker.get_meta();
6448
6449 op_ret = list_bucket_multiparts(store, s->bucket_info, prefix, marker_meta, delimiter,
6450 max_uploads, &objs, &common_prefixes, &is_truncated);
6451 if (op_ret < 0) {
6452 return;
6453 }
6454
6455 if (!objs.empty()) {
6456 vector<rgw_bucket_dir_entry>::iterator iter;
6457 RGWMultipartUploadEntry entry;
6458 for (iter = objs.begin(); iter != objs.end(); ++iter) {
6459 rgw_obj_key key(iter->key);
6460 if (!entry.mp.from_meta(key.name))
6461 continue;
6462 entry.obj = *iter;
6463 uploads.push_back(entry);
6464 }
6465 next_marker = entry;
6466 }
6467 }
6468
6469 void RGWGetHealthCheck::execute()
6470 {
6471 if (!g_conf()->rgw_healthcheck_disabling_path.empty() &&
6472 (::access(g_conf()->rgw_healthcheck_disabling_path.c_str(), F_OK) == 0)) {
6473 /* Disabling path specified & existent in the filesystem. */
6474 op_ret = -ERR_SERVICE_UNAVAILABLE; /* 503 */
6475 } else {
6476 op_ret = 0; /* 200 OK */
6477 }
6478 }
6479
6480 int RGWDeleteMultiObj::verify_permission()
6481 {
6482 int op_ret = get_params();
6483 if (op_ret) {
6484 return op_ret;
6485 }
6486
6487 if (s->iam_policy || ! s->iam_user_policies.empty()) {
6488 if (s->bucket_info.obj_lock_enabled() && bypass_governance_mode) {
6489 auto r = eval_user_policies(s->iam_user_policies, s->env, boost::none,
6490 rgw::IAM::s3BypassGovernanceRetention, ARN(s->bucket_info.bucket));
6491 if (r == Effect::Deny) {
6492 bypass_perm = false;
6493 } else if (r == Effect::Pass && s->iam_policy) {
6494 r = s->iam_policy->eval(s->env, *s->auth.identity, rgw::IAM::s3BypassGovernanceRetention,
6495 ARN(s->bucket_info.bucket));
6496 if (r == Effect::Deny) {
6497 bypass_perm = false;
6498 }
6499 }
6500 }
6501
6502 bool not_versioned = s->object.empty() || s->object.instance.empty();
6503
6504 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
6505 boost::none,
6506 not_versioned ?
6507 rgw::IAM::s3DeleteObject :
6508 rgw::IAM::s3DeleteObjectVersion,
6509 ARN(s->bucket));
6510 if (usr_policy_res == Effect::Deny) {
6511 return -EACCES;
6512 }
6513
6514 rgw::IAM::Effect r = Effect::Pass;
6515 if (s->iam_policy) {
6516 r = s->iam_policy->eval(s->env, *s->auth.identity,
6517 not_versioned ?
6518 rgw::IAM::s3DeleteObject :
6519 rgw::IAM::s3DeleteObjectVersion,
6520 ARN(s->bucket));
6521 }
6522 if (r == Effect::Allow)
6523 return 0;
6524 else if (r == Effect::Deny)
6525 return -EACCES;
6526 else if (usr_policy_res == Effect::Allow)
6527 return 0;
6528 }
6529
6530 acl_allowed = verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE);
6531 if (!acl_allowed)
6532 return -EACCES;
6533
6534 return 0;
6535 }
6536
6537 void RGWDeleteMultiObj::pre_exec()
6538 {
6539 rgw_bucket_object_pre_exec(s);
6540 }
6541
6542 void RGWDeleteMultiObj::execute()
6543 {
6544 RGWMultiDelDelete *multi_delete;
6545 vector<rgw_obj_key>::iterator iter;
6546 RGWMultiDelXMLParser parser;
6547 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
6548 char* buf;
6549
6550 buf = data.c_str();
6551 if (!buf) {
6552 op_ret = -EINVAL;
6553 goto error;
6554 }
6555
6556 if (!parser.init()) {
6557 op_ret = -EINVAL;
6558 goto error;
6559 }
6560
6561 if (!parser.parse(buf, data.length(), 1)) {
6562 op_ret = -EINVAL;
6563 goto error;
6564 }
6565
6566 multi_delete = static_cast<RGWMultiDelDelete *>(parser.find_first("Delete"));
6567 if (!multi_delete) {
6568 op_ret = -EINVAL;
6569 goto error;
6570 } else {
6571 #define DELETE_MULTI_OBJ_MAX_NUM 1000
6572 int max_num = s->cct->_conf->rgw_delete_multi_obj_max_num;
6573 if (max_num < 0) {
6574 max_num = DELETE_MULTI_OBJ_MAX_NUM;
6575 }
6576 int multi_delete_object_num = multi_delete->objects.size();
6577 if (multi_delete_object_num > max_num) {
6578 op_ret = -ERR_MALFORMED_XML;
6579 goto error;
6580 }
6581 }
6582
6583 if (multi_delete->is_quiet())
6584 quiet = true;
6585
6586 if (s->bucket_info.mfa_enabled()) {
6587 bool has_versioned = false;
6588 for (auto i : multi_delete->objects) {
6589 if (!i.instance.empty()) {
6590 has_versioned = true;
6591 break;
6592 }
6593 }
6594 if (has_versioned && !s->mfa_verified) {
6595 ldpp_dout(this, 5) << "NOTICE: multi-object delete request with a versioned object, mfa auth not provided" << dendl;
6596 op_ret = -ERR_MFA_REQUIRED;
6597 goto error;
6598 }
6599 }
6600
6601 begin_response();
6602 if (multi_delete->objects.empty()) {
6603 goto done;
6604 }
6605
6606 for (iter = multi_delete->objects.begin();
6607 iter != multi_delete->objects.end();
6608 ++iter) {
6609 rgw_obj obj(bucket, *iter);
6610 if (s->iam_policy || ! s->iam_user_policies.empty()) {
6611 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
6612 boost::none,
6613 iter->instance.empty() ?
6614 rgw::IAM::s3DeleteObject :
6615 rgw::IAM::s3DeleteObjectVersion,
6616 ARN(obj));
6617 if (usr_policy_res == Effect::Deny) {
6618 send_partial_response(*iter, false, "", -EACCES);
6619 continue;
6620 }
6621
6622 rgw::IAM::Effect e = Effect::Pass;
6623 if (s->iam_policy) {
6624 e = s->iam_policy->eval(s->env,
6625 *s->auth.identity,
6626 iter->instance.empty() ?
6627 rgw::IAM::s3DeleteObject :
6628 rgw::IAM::s3DeleteObjectVersion,
6629 ARN(obj));
6630 }
6631 if ((e == Effect::Deny) ||
6632 (usr_policy_res == Effect::Pass && e == Effect::Pass && !acl_allowed)) {
6633 send_partial_response(*iter, false, "", -EACCES);
6634 continue;
6635 }
6636 }
6637
6638 // verify_object_lock
6639 bool check_obj_lock = obj.key.have_instance() && s->bucket_info.obj_lock_enabled();
6640 map<string,bufferlist> attrs;
6641 if (check_obj_lock) {
6642 int get_attrs_response = get_obj_attrs(store, s, obj, attrs);
6643 if (get_attrs_response < 0) {
6644 if (get_attrs_response == -ENOENT) {
6645 // object maybe delete_marker, skip check_obj_lock
6646 check_obj_lock = false;
6647 } else {
6648 // Something went wrong.
6649 send_partial_response(*iter, false, "", get_attrs_response);
6650 continue;
6651 }
6652 }
6653 }
6654
6655 if (check_obj_lock) {
6656 int object_lock_response = verify_object_lock(this, attrs, bypass_perm, bypass_governance_mode);
6657 if (object_lock_response != 0) {
6658 send_partial_response(*iter, false, "", object_lock_response);
6659 continue;
6660 }
6661 }
6662
6663 obj_ctx->set_atomic(obj);
6664
6665 RGWRados::Object del_target(store->getRados(), s->bucket_info, *obj_ctx, obj);
6666 RGWRados::Object::Delete del_op(&del_target);
6667
6668 del_op.params.bucket_owner = s->bucket_owner.get_id();
6669 del_op.params.versioning_status = s->bucket_info.versioning_status();
6670 del_op.params.obj_owner = s->owner;
6671
6672 op_ret = del_op.delete_obj(s->yield);
6673 if (op_ret == -ENOENT) {
6674 op_ret = 0;
6675 }
6676
6677 send_partial_response(*iter, del_op.result.delete_marker,
6678 del_op.result.version_id, op_ret);
6679
6680 const auto obj_state = obj_ctx->get_state(obj);
6681 bufferlist etag_bl;
6682 const auto etag = obj_state->get_attr(RGW_ATTR_ETAG, etag_bl) ? etag_bl.to_str() : "";
6683
6684 const auto ret = rgw::notify::publish(s, obj.key, obj_state->size, obj_state->mtime, etag,
6685 del_op.result.delete_marker && s->object.instance.empty() ? rgw::notify::ObjectRemovedDeleteMarkerCreated : rgw::notify::ObjectRemovedDelete,
6686 store);
6687 if (ret < 0) {
6688 ldpp_dout(this, 5) << "WARNING: publishing notification failed, with error: " << ret << dendl;
6689 // TODO: we should have conf to make send a blocking coroutine and reply with error in case sending failed
6690 // this should be global conf (probably returnign a different handler)
6691 // so we don't need to read the configured values before we perform it
6692 }
6693 }
6694
6695 /* set the return code to zero, errors at this point will be
6696 dumped to the response */
6697 op_ret = 0;
6698
6699 done:
6700 // will likely segfault if begin_response() has not been called
6701 end_response();
6702 return;
6703
6704 error:
6705 send_status();
6706 return;
6707
6708 }
6709
6710 bool RGWBulkDelete::Deleter::verify_permission(RGWBucketInfo& binfo,
6711 map<string, bufferlist>& battrs,
6712 ACLOwner& bucket_owner /* out */)
6713 {
6714 RGWAccessControlPolicy bacl(store->ctx());
6715 int ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket);
6716 if (ret < 0) {
6717 return false;
6718 }
6719
6720 auto policy = get_iam_policy_from_attr(s->cct, store, battrs, binfo.bucket.tenant);
6721
6722 bucket_owner = bacl.get_owner();
6723
6724 /* We can use global user_acl because each BulkDelete request is allowed
6725 * to work on entities from a single account only. */
6726 return verify_bucket_permission(dpp, s, binfo.bucket, s->user_acl.get(),
6727 &bacl, policy, s->iam_user_policies, rgw::IAM::s3DeleteBucket);
6728 }
6729
6730 bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path)
6731 {
6732 auto& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
6733
6734 RGWBucketInfo binfo;
6735 map<string, bufferlist> battrs;
6736 ACLOwner bowner;
6737 RGWObjVersionTracker ot;
6738
6739 rgw_bucket b(rgw_bucket_key(s->user->get_tenant(), path.bucket_name));
6740
6741 int ret = store->ctl()->bucket->read_bucket_info(b, &binfo, s->yield,
6742 RGWBucketCtl::BucketInstance::GetParams()
6743 .set_attrs(&battrs),
6744 &ot);
6745 if (ret < 0) {
6746 goto binfo_fail;
6747 }
6748
6749 if (!verify_permission(binfo, battrs, bowner)) {
6750 ret = -EACCES;
6751 goto auth_fail;
6752 }
6753
6754 if (!path.obj_key.empty()) {
6755 rgw_obj obj(binfo.bucket, path.obj_key);
6756 obj_ctx.set_atomic(obj);
6757
6758 RGWRados::Object del_target(store->getRados(), binfo, obj_ctx, obj);
6759 RGWRados::Object::Delete del_op(&del_target);
6760
6761 del_op.params.bucket_owner = binfo.owner;
6762 del_op.params.versioning_status = binfo.versioning_status();
6763 del_op.params.obj_owner = bowner;
6764
6765 ret = del_op.delete_obj(s->yield);
6766 if (ret < 0) {
6767 goto delop_fail;
6768 }
6769 } else {
6770 ret = store->getRados()->delete_bucket(binfo, ot, s->yield);
6771 if (0 == ret) {
6772 ret = store->ctl()->bucket->unlink_bucket(binfo.owner, binfo.bucket, s->yield, false);
6773 if (ret < 0) {
6774 ldpp_dout(s, 0) << "WARNING: failed to unlink bucket: ret=" << ret << dendl;
6775 }
6776 }
6777 if (ret < 0) {
6778 goto delop_fail;
6779 }
6780
6781 if (!store->svc()->zone->is_meta_master()) {
6782 bufferlist in_data;
6783 ret = forward_request_to_master(s, &ot.read_version, store, in_data,
6784 nullptr);
6785 if (ret < 0) {
6786 if (ret == -ENOENT) {
6787 /* adjust error, we want to return with NoSuchBucket and not
6788 * NoSuchKey */
6789 ret = -ERR_NO_SUCH_BUCKET;
6790 }
6791 goto delop_fail;
6792 }
6793 }
6794 }
6795
6796 num_deleted++;
6797 return true;
6798
6799
6800 binfo_fail:
6801 if (-ENOENT == ret) {
6802 ldpp_dout(s, 20) << "cannot find bucket = " << path.bucket_name << dendl;
6803 num_unfound++;
6804 } else {
6805 ldpp_dout(s, 20) << "cannot get bucket info, ret = " << ret << dendl;
6806
6807 fail_desc_t failed_item = {
6808 .err = ret,
6809 .path = path
6810 };
6811 failures.push_back(failed_item);
6812 }
6813 return false;
6814
6815 auth_fail:
6816 ldpp_dout(s, 20) << "wrong auth for " << path << dendl;
6817 {
6818 fail_desc_t failed_item = {
6819 .err = ret,
6820 .path = path
6821 };
6822 failures.push_back(failed_item);
6823 }
6824 return false;
6825
6826 delop_fail:
6827 if (-ENOENT == ret) {
6828 ldpp_dout(s, 20) << "cannot find entry " << path << dendl;
6829 num_unfound++;
6830 } else {
6831 fail_desc_t failed_item = {
6832 .err = ret,
6833 .path = path
6834 };
6835 failures.push_back(failed_item);
6836 }
6837 return false;
6838 }
6839
6840 bool RGWBulkDelete::Deleter::delete_chunk(const std::list<acct_path_t>& paths)
6841 {
6842 ldpp_dout(s, 20) << "in delete_chunk" << dendl;
6843 for (auto path : paths) {
6844 ldpp_dout(s, 20) << "bulk deleting path: " << path << dendl;
6845 delete_single(path);
6846 }
6847
6848 return true;
6849 }
6850
6851 int RGWBulkDelete::verify_permission()
6852 {
6853 return 0;
6854 }
6855
6856 void RGWBulkDelete::pre_exec()
6857 {
6858 rgw_bucket_object_pre_exec(s);
6859 }
6860
6861 void RGWBulkDelete::execute()
6862 {
6863 deleter = std::unique_ptr<Deleter>(new Deleter(this, store, s));
6864
6865 bool is_truncated = false;
6866 do {
6867 list<RGWBulkDelete::acct_path_t> items;
6868
6869 int ret = get_data(items, &is_truncated);
6870 if (ret < 0) {
6871 return;
6872 }
6873
6874 ret = deleter->delete_chunk(items);
6875 } while (!op_ret && is_truncated);
6876
6877 return;
6878 }
6879
6880
6881 constexpr std::array<int, 2> RGWBulkUploadOp::terminal_errors;
6882
6883 int RGWBulkUploadOp::verify_permission()
6884 {
6885 if (s->auth.identity->is_anonymous()) {
6886 return -EACCES;
6887 }
6888
6889 if (! verify_user_permission_no_policy(this, s, RGW_PERM_WRITE)) {
6890 return -EACCES;
6891 }
6892
6893 if (s->user->get_tenant() != s->bucket_tenant) {
6894 ldpp_dout(this, 10) << "user cannot create a bucket in a different tenant"
6895 << " (user_id.tenant=" << s->user->get_tenant()
6896 << " requested=" << s->bucket_tenant << ")" << dendl;
6897 return -EACCES;
6898 }
6899
6900 if (s->user->get_max_buckets() < 0) {
6901 return -EPERM;
6902 }
6903
6904 return 0;
6905 }
6906
6907 void RGWBulkUploadOp::pre_exec()
6908 {
6909 rgw_bucket_object_pre_exec(s);
6910 }
6911
6912 boost::optional<std::pair<std::string, rgw_obj_key>>
6913 RGWBulkUploadOp::parse_path(const boost::string_ref& path)
6914 {
6915 /* We need to skip all slashes at the beginning in order to preserve
6916 * compliance with Swift. */
6917 const size_t start_pos = path.find_first_not_of('/');
6918
6919 if (boost::string_ref::npos != start_pos) {
6920 /* Seperator is the first slash after the leading ones. */
6921 const size_t sep_pos = path.substr(start_pos).find('/');
6922
6923 if (boost::string_ref::npos != sep_pos) {
6924 const auto bucket_name = path.substr(start_pos, sep_pos - start_pos);
6925 const auto obj_name = path.substr(sep_pos + 1);
6926
6927 return std::make_pair(bucket_name.to_string(),
6928 rgw_obj_key(obj_name.to_string()));
6929 } else {
6930 /* It's guaranteed here that bucket name is at least one character
6931 * long and is different than slash. */
6932 return std::make_pair(path.substr(start_pos).to_string(),
6933 rgw_obj_key());
6934 }
6935 }
6936
6937 return none;
6938 }
6939
6940 std::pair<std::string, std::string>
6941 RGWBulkUploadOp::handle_upload_path(struct req_state *s)
6942 {
6943 std::string bucket_path, file_prefix;
6944 if (! s->init_state.url_bucket.empty()) {
6945 file_prefix = bucket_path = s->init_state.url_bucket + "/";
6946 if (! s->object.empty()) {
6947 std::string& object_name = s->object.name;
6948
6949 /* As rgw_obj_key::empty() already verified emptiness of s->object.name,
6950 * we can safely examine its last element. */
6951 if (object_name.back() == '/') {
6952 file_prefix.append(object_name);
6953 } else {
6954 file_prefix.append(object_name).append("/");
6955 }
6956 }
6957 }
6958 return std::make_pair(bucket_path, file_prefix);
6959 }
6960
6961 int RGWBulkUploadOp::handle_dir_verify_permission()
6962 {
6963 if (s->user->get_max_buckets() > 0) {
6964 rgw::sal::RGWBucketList buckets;
6965 std::string marker;
6966 op_ret = rgw_read_user_buckets(store, s->user->get_user(), buckets,
6967 marker, std::string(), s->user->get_max_buckets(),
6968 false);
6969 if (op_ret < 0) {
6970 return op_ret;
6971 }
6972
6973 if (buckets.count() >= static_cast<size_t>(s->user->get_max_buckets())) {
6974 return -ERR_TOO_MANY_BUCKETS;
6975 }
6976 }
6977
6978 return 0;
6979 }
6980
6981 static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name)
6982 {
6983 /* the request of container or object level will contain bucket name.
6984 * only at account level need to append the bucket name */
6985 if (info.script_uri.find(bucket_name) != std::string::npos) {
6986 return;
6987 }
6988
6989 ldout(cct, 20) << "append the bucket: "<< bucket_name << " to req_info" << dendl;
6990 info.script_uri.append("/").append(bucket_name);
6991 info.request_uri_aws4 = info.request_uri = info.script_uri;
6992 info.effective_uri = "/" + bucket_name;
6993 }
6994
6995 void RGWBulkUploadOp::init(rgw::sal::RGWRadosStore* const store,
6996 struct req_state* const s,
6997 RGWHandler* const h)
6998 {
6999 RGWOp::init(store, s, h);
7000 dir_ctx.emplace(store->svc()->sysobj->init_obj_ctx());
7001 }
7002
7003 int RGWBulkUploadOp::handle_dir(const boost::string_ref path)
7004 {
7005 ldpp_dout(this, 20) << "got directory=" << path << dendl;
7006
7007 op_ret = handle_dir_verify_permission();
7008 if (op_ret < 0) {
7009 return op_ret;
7010 }
7011
7012 std::string bucket_name;
7013 rgw_obj_key object_junk;
7014 std::tie(bucket_name, object_junk) = *parse_path(path);
7015
7016 rgw_raw_obj obj(store->svc()->zone->get_zone_params().domain_root,
7017 rgw_make_bucket_entry_name(s->bucket_tenant, bucket_name));
7018
7019 /* we need to make sure we read bucket info, it's not read before for this
7020 * specific request */
7021 RGWBucketInfo binfo;
7022 std::map<std::string, ceph::bufferlist> battrs;
7023 op_ret = store->getRados()->get_bucket_info(store->svc(), s->bucket_tenant, bucket_name,
7024 binfo, nullptr, s->yield, &battrs);
7025 if (op_ret < 0 && op_ret != -ENOENT) {
7026 return op_ret;
7027 }
7028 const bool bucket_exists = (op_ret != -ENOENT);
7029
7030 if (bucket_exists) {
7031 RGWAccessControlPolicy old_policy(s->cct);
7032 int r = rgw_op_get_bucket_policy_from_attr(s->cct, store, binfo,
7033 battrs, &old_policy);
7034 if (r >= 0) {
7035 if (old_policy.get_owner().get_id().compare(s->user->get_user()) != 0) {
7036 op_ret = -EEXIST;
7037 return op_ret;
7038 }
7039 }
7040 }
7041
7042 RGWBucketInfo master_info;
7043 rgw_bucket *pmaster_bucket = nullptr;
7044 uint32_t *pmaster_num_shards = nullptr;
7045 real_time creation_time;
7046 obj_version objv, ep_objv, *pobjv = nullptr;
7047
7048 if (! store->svc()->zone->is_meta_master()) {
7049 JSONParser jp;
7050 ceph::bufferlist in_data;
7051 req_info info = s->info;
7052 forward_req_info(s->cct, info, bucket_name);
7053 op_ret = forward_request_to_master(s, nullptr, store, in_data, &jp, &info);
7054 if (op_ret < 0) {
7055 return op_ret;
7056 }
7057
7058 JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp);
7059 JSONDecoder::decode_json("object_ver", objv, &jp);
7060 JSONDecoder::decode_json("bucket_info", master_info, &jp);
7061
7062 ldpp_dout(this, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" << objv.ver << dendl;
7063 ldpp_dout(this, 20) << "got creation_time="<< master_info.creation_time << dendl;
7064
7065 pmaster_bucket= &master_info.bucket;
7066 creation_time = master_info.creation_time;
7067 pmaster_num_shards = &master_info.num_shards;
7068 pobjv = &objv;
7069 } else {
7070 pmaster_bucket = nullptr;
7071 pmaster_num_shards = nullptr;
7072 }
7073
7074 rgw_placement_rule placement_rule(binfo.placement_rule, s->info.storage_class);
7075
7076 if (bucket_exists) {
7077 rgw_placement_rule selected_placement_rule;
7078 rgw_bucket bucket;
7079 bucket.tenant = s->bucket_tenant;
7080 bucket.name = s->bucket_name;
7081 op_ret = store->svc()->zone->select_bucket_placement(s->user->get_info(),
7082 store->svc()->zone->get_zonegroup().get_id(),
7083 placement_rule,
7084 &selected_placement_rule,
7085 nullptr);
7086 if (selected_placement_rule != binfo.placement_rule) {
7087 op_ret = -EEXIST;
7088 ldpp_dout(this, 20) << "non-coherent placement rule" << dendl;
7089 return op_ret;
7090 }
7091 }
7092
7093 /* Create metadata: ACLs. */
7094 std::map<std::string, ceph::bufferlist> attrs;
7095 RGWAccessControlPolicy policy;
7096 policy.create_default(s->user->get_id(), s->user->get_display_name());
7097 ceph::bufferlist aclbl;
7098 policy.encode(aclbl);
7099 attrs.emplace(RGW_ATTR_ACL, std::move(aclbl));
7100
7101 RGWQuotaInfo quota_info;
7102 const RGWQuotaInfo * pquota_info = nullptr;
7103
7104 rgw_bucket bucket;
7105 bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */
7106 bucket.name = bucket_name;
7107
7108
7109 RGWBucketInfo out_info;
7110 op_ret = store->getRados()->create_bucket(s->user->get_info(),
7111 bucket,
7112 store->svc()->zone->get_zonegroup().get_id(),
7113 placement_rule, binfo.swift_ver_location,
7114 pquota_info, attrs,
7115 out_info, pobjv, &ep_objv, creation_time,
7116 pmaster_bucket, pmaster_num_shards, true);
7117 /* continue if EEXIST and create_bucket will fail below. this way we can
7118 * recover from a partial create by retrying it. */
7119 ldpp_dout(this, 20) << "rgw_create_bucket returned ret=" << op_ret
7120 << ", bucket=" << bucket << dendl;
7121
7122 if (op_ret && op_ret != -EEXIST) {
7123 return op_ret;
7124 }
7125
7126 const bool existed = (op_ret == -EEXIST);
7127 if (existed) {
7128 /* bucket already existed, might have raced with another bucket creation, or
7129 * might be partial bucket creation that never completed. Read existing bucket
7130 * info, verify that the reported bucket owner is the current user.
7131 * If all is ok then update the user's list of buckets.
7132 * Otherwise inform client about a name conflict.
7133 */
7134 if (out_info.owner.compare(s->user->get_id()) != 0) {
7135 op_ret = -EEXIST;
7136 ldpp_dout(this, 20) << "conflicting bucket name" << dendl;
7137 return op_ret;
7138 }
7139 bucket = out_info.bucket;
7140 }
7141
7142 op_ret = store->ctl()->bucket->link_bucket(s->user->get_id(), bucket,
7143 out_info.creation_time,
7144 s->yield, false);
7145 if (op_ret && !existed && op_ret != -EEXIST) {
7146 /* if it exists (or previously existed), don't remove it! */
7147 op_ret = store->ctl()->bucket->unlink_bucket(s->user->get_id(), bucket, s->yield);
7148 if (op_ret < 0) {
7149 ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret << dendl;
7150 }
7151 } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) {
7152 ldpp_dout(this, 20) << "containers already exists" << dendl;
7153 op_ret = -ERR_BUCKET_EXISTS;
7154 }
7155
7156 return op_ret;
7157 }
7158
7159
7160 bool RGWBulkUploadOp::handle_file_verify_permission(RGWBucketInfo& binfo,
7161 const rgw_obj& obj,
7162 std::map<std::string, ceph::bufferlist>& battrs,
7163 ACLOwner& bucket_owner /* out */)
7164 {
7165 RGWAccessControlPolicy bacl(store->ctx());
7166 op_ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket);
7167 if (op_ret < 0) {
7168 ldpp_dout(this, 20) << "cannot read_policy() for bucket" << dendl;
7169 return false;
7170 }
7171
7172 auto policy = get_iam_policy_from_attr(s->cct, store, battrs, binfo.bucket.tenant);
7173
7174 bucket_owner = bacl.get_owner();
7175 if (policy || ! s->iam_user_policies.empty()) {
7176 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
7177 boost::none,
7178 rgw::IAM::s3PutObject, obj);
7179 if (usr_policy_res == Effect::Deny) {
7180 return false;
7181 }
7182 auto e = policy->eval(s->env, *s->auth.identity,
7183 rgw::IAM::s3PutObject, obj);
7184 if (e == Effect::Allow) {
7185 return true;
7186 } else if (e == Effect::Deny) {
7187 return false;
7188 } else if (usr_policy_res == Effect::Allow) {
7189 return true;
7190 }
7191 }
7192
7193 return verify_bucket_permission_no_policy(this, s, s->user_acl.get(),
7194 &bacl, RGW_PERM_WRITE);
7195 }
7196
7197 int RGWBulkUploadOp::handle_file(const boost::string_ref path,
7198 const size_t size,
7199 AlignedStreamGetter& body)
7200 {
7201
7202 ldpp_dout(this, 20) << "got file=" << path << ", size=" << size << dendl;
7203
7204 if (size > static_cast<size_t>(s->cct->_conf->rgw_max_put_size)) {
7205 op_ret = -ERR_TOO_LARGE;
7206 return op_ret;
7207 }
7208
7209 std::string bucket_name;
7210 rgw_obj_key object;
7211 std::tie(bucket_name, object) = *parse_path(path);
7212
7213 auto& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
7214 RGWBucketInfo binfo;
7215 std::map<std::string, ceph::bufferlist> battrs;
7216 ACLOwner bowner;
7217 op_ret = store->getRados()->get_bucket_info(store->svc(), s->user->get_tenant(),
7218 bucket_name, binfo, nullptr, s->yield, &battrs);
7219 if (op_ret == -ENOENT) {
7220 ldpp_dout(this, 20) << "non existent directory=" << bucket_name << dendl;
7221 } else if (op_ret < 0) {
7222 return op_ret;
7223 }
7224
7225 if (! handle_file_verify_permission(binfo,
7226 rgw_obj(binfo.bucket, object),
7227 battrs, bowner)) {
7228 ldpp_dout(this, 20) << "object creation unauthorized" << dendl;
7229 op_ret = -EACCES;
7230 return op_ret;
7231 }
7232
7233 op_ret = store->getRados()->check_quota(bowner.get_id(), binfo.bucket,
7234 user_quota, bucket_quota, size);
7235 if (op_ret < 0) {
7236 return op_ret;
7237 }
7238
7239 rgw_obj obj(binfo.bucket, object);
7240 if (s->bucket_info.versioning_enabled()) {
7241 store->getRados()->gen_rand_obj_instance_name(&obj);
7242 }
7243
7244 rgw_placement_rule dest_placement = s->dest_placement;
7245 dest_placement.inherit_from(binfo.placement_rule);
7246
7247 auto aio = rgw::make_throttle(s->cct->_conf->rgw_put_obj_min_window_size,
7248 s->yield);
7249
7250 using namespace rgw::putobj;
7251 AtomicObjectProcessor processor(&*aio, store, binfo, &s->dest_placement, bowner.get_id(),
7252 obj_ctx, obj, 0, s->req_id, this, s->yield);
7253
7254 op_ret = processor.prepare(s->yield);
7255 if (op_ret < 0) {
7256 ldpp_dout(this, 20) << "cannot prepare processor due to ret=" << op_ret << dendl;
7257 return op_ret;
7258 }
7259
7260 /* No filters by default. */
7261 DataProcessor *filter = &processor;
7262
7263 const auto& compression_type = store->svc()->zone->get_zone_params().get_compression_type(
7264 dest_placement);
7265 CompressorRef plugin;
7266 boost::optional<RGWPutObj_Compress> compressor;
7267 if (compression_type != "none") {
7268 plugin = Compressor::create(s->cct, compression_type);
7269 if (! plugin) {
7270 ldpp_dout(this, 1) << "Cannot load plugin for rgw_compression_type "
7271 << compression_type << dendl;
7272 } else {
7273 compressor.emplace(s->cct, plugin, filter);
7274 filter = &*compressor;
7275 }
7276 }
7277
7278 /* Upload file content. */
7279 ssize_t len = 0;
7280 size_t ofs = 0;
7281 MD5 hash;
7282 do {
7283 ceph::bufferlist data;
7284 len = body.get_at_most(s->cct->_conf->rgw_max_chunk_size, data);
7285
7286 ldpp_dout(this, 20) << "body=" << data.c_str() << dendl;
7287 if (len < 0) {
7288 op_ret = len;
7289 return op_ret;
7290 } else if (len > 0) {
7291 hash.Update((const unsigned char *)data.c_str(), data.length());
7292 op_ret = filter->process(std::move(data), ofs);
7293 if (op_ret < 0) {
7294 ldpp_dout(this, 20) << "filter->process() returned ret=" << op_ret << dendl;
7295 return op_ret;
7296 }
7297
7298 ofs += len;
7299 }
7300
7301 } while (len > 0);
7302
7303 // flush
7304 op_ret = filter->process({}, ofs);
7305 if (op_ret < 0) {
7306 return op_ret;
7307 }
7308
7309 if (ofs != size) {
7310 ldpp_dout(this, 10) << "real file size different from declared" << dendl;
7311 op_ret = -EINVAL;
7312 return op_ret;
7313 }
7314
7315 op_ret = store->getRados()->check_quota(bowner.get_id(), binfo.bucket,
7316 user_quota, bucket_quota, size);
7317 if (op_ret < 0) {
7318 ldpp_dout(this, 20) << "quota exceeded for path=" << path << dendl;
7319 return op_ret;
7320 }
7321
7322 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
7323 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
7324 hash.Final(m);
7325 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
7326
7327 /* Create metadata: ETAG. */
7328 std::map<std::string, ceph::bufferlist> attrs;
7329 std::string etag = calc_md5;
7330 ceph::bufferlist etag_bl;
7331 etag_bl.append(etag.c_str(), etag.size() + 1);
7332 attrs.emplace(RGW_ATTR_ETAG, std::move(etag_bl));
7333
7334 /* Create metadata: ACLs. */
7335 RGWAccessControlPolicy policy;
7336 policy.create_default(s->user->get_id(), s->user->get_display_name());
7337 ceph::bufferlist aclbl;
7338 policy.encode(aclbl);
7339 attrs.emplace(RGW_ATTR_ACL, std::move(aclbl));
7340
7341 /* Create metadata: compression info. */
7342 if (compressor && compressor->is_compressed()) {
7343 ceph::bufferlist tmp;
7344 RGWCompressionInfo cs_info;
7345 cs_info.compression_type = plugin->get_type_name();
7346 cs_info.orig_size = s->obj_size;
7347 cs_info.blocks = std::move(compressor->get_compression_blocks());
7348 encode(cs_info, tmp);
7349 attrs.emplace(RGW_ATTR_COMPRESSION, std::move(tmp));
7350 }
7351
7352 /* Complete the transaction. */
7353 op_ret = processor.complete(size, etag, nullptr, ceph::real_time(),
7354 attrs, ceph::real_time() /* delete_at */,
7355 nullptr, nullptr, nullptr, nullptr, nullptr,
7356 s->yield);
7357 if (op_ret < 0) {
7358 ldpp_dout(this, 20) << "processor::complete returned op_ret=" << op_ret << dendl;
7359 }
7360
7361 return op_ret;
7362 }
7363
7364 void RGWBulkUploadOp::execute()
7365 {
7366 ceph::bufferlist buffer(64 * 1024);
7367
7368 ldpp_dout(this, 20) << "start" << dendl;
7369
7370 /* Create an instance of stream-abstracting class. Having this indirection
7371 * allows for easy introduction of decompressors like gzip and bzip2. */
7372 auto stream = create_stream();
7373 if (! stream) {
7374 return;
7375 }
7376
7377 /* Handling the $UPLOAD_PATH accordingly to the Swift's Bulk middleware. See:
7378 * https://github.com/openstack/swift/blob/2.13.0/swift/common/middleware/bulk.py#L31-L41 */
7379 std::string bucket_path, file_prefix;
7380 std::tie(bucket_path, file_prefix) = handle_upload_path(s);
7381
7382 auto status = rgw::tar::StatusIndicator::create();
7383 do {
7384 op_ret = stream->get_exactly(rgw::tar::BLOCK_SIZE, buffer);
7385 if (op_ret < 0) {
7386 ldpp_dout(this, 2) << "cannot read header" << dendl;
7387 return;
7388 }
7389
7390 /* We need to re-interpret the buffer as a TAR block. Exactly two blocks
7391 * must be tracked to detect out end-of-archive. It occurs when both of
7392 * them are empty (zeroed). Tracing this particular inter-block dependency
7393 * is responsibility of the rgw::tar::StatusIndicator class. */
7394 boost::optional<rgw::tar::HeaderView> header;
7395 std::tie(status, header) = rgw::tar::interpret_block(status, buffer);
7396
7397 if (! status.empty() && header) {
7398 /* This specific block isn't empty (entirely zeroed), so we can parse
7399 * it as a TAR header and dispatch. At the moment we do support only
7400 * regular files and directories. Everything else (symlinks, devices)
7401 * will be ignored but won't cease the whole upload. */
7402 switch (header->get_filetype()) {
7403 case rgw::tar::FileType::NORMAL_FILE: {
7404 ldpp_dout(this, 2) << "handling regular file" << dendl;
7405
7406 boost::string_ref filename = bucket_path.empty() ? header->get_filename() : \
7407 file_prefix + header->get_filename().to_string();
7408 auto body = AlignedStreamGetter(0, header->get_filesize(),
7409 rgw::tar::BLOCK_SIZE, *stream);
7410 op_ret = handle_file(filename,
7411 header->get_filesize(),
7412 body);
7413 if (! op_ret) {
7414 /* Only regular files counts. */
7415 num_created++;
7416 } else {
7417 failures.emplace_back(op_ret, filename.to_string());
7418 }
7419 break;
7420 }
7421 case rgw::tar::FileType::DIRECTORY: {
7422 ldpp_dout(this, 2) << "handling regular directory" << dendl;
7423
7424 boost::string_ref dirname = bucket_path.empty() ? header->get_filename() : bucket_path;
7425 op_ret = handle_dir(dirname);
7426 if (op_ret < 0 && op_ret != -ERR_BUCKET_EXISTS) {
7427 failures.emplace_back(op_ret, dirname.to_string());
7428 }
7429 break;
7430 }
7431 default: {
7432 /* Not recognized. Skip. */
7433 op_ret = 0;
7434 break;
7435 }
7436 }
7437
7438 /* In case of any problems with sub-request authorization Swift simply
7439 * terminates whole upload immediately. */
7440 if (boost::algorithm::contains(std::initializer_list<int>{ op_ret },
7441 terminal_errors)) {
7442 ldpp_dout(this, 2) << "terminating due to ret=" << op_ret << dendl;
7443 break;
7444 }
7445 } else {
7446 ldpp_dout(this, 2) << "an empty block" << dendl;
7447 op_ret = 0;
7448 }
7449
7450 buffer.clear();
7451 } while (! status.eof());
7452
7453 return;
7454 }
7455
7456 RGWBulkUploadOp::AlignedStreamGetter::~AlignedStreamGetter()
7457 {
7458 const size_t aligned_legnth = length + (-length % alignment);
7459 ceph::bufferlist junk;
7460
7461 DecoratedStreamGetter::get_exactly(aligned_legnth - position, junk);
7462 }
7463
7464 ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_at_most(const size_t want,
7465 ceph::bufferlist& dst)
7466 {
7467 const size_t max_to_read = std::min(want, length - position);
7468 const auto len = DecoratedStreamGetter::get_at_most(max_to_read, dst);
7469 if (len > 0) {
7470 position += len;
7471 }
7472 return len;
7473 }
7474
7475 ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_exactly(const size_t want,
7476 ceph::bufferlist& dst)
7477 {
7478 const auto len = DecoratedStreamGetter::get_exactly(want, dst);
7479 if (len > 0) {
7480 position += len;
7481 }
7482 return len;
7483 }
7484
7485 int RGWSetAttrs::verify_permission()
7486 {
7487 // This looks to be part of the RGW-NFS machinery and has no S3 or
7488 // Swift equivalent.
7489 bool perm;
7490 if (!s->object.empty()) {
7491 perm = verify_object_permission_no_policy(this, s, RGW_PERM_WRITE);
7492 } else {
7493 perm = verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE);
7494 }
7495 if (!perm)
7496 return -EACCES;
7497
7498 return 0;
7499 }
7500
7501 void RGWSetAttrs::pre_exec()
7502 {
7503 rgw_bucket_object_pre_exec(s);
7504 }
7505
7506 void RGWSetAttrs::execute()
7507 {
7508 op_ret = get_params();
7509 if (op_ret < 0)
7510 return;
7511
7512 rgw_obj obj(s->bucket, s->object);
7513
7514 if (!s->object.empty()) {
7515 store->getRados()->set_atomic(s->obj_ctx, obj);
7516 op_ret = store->getRados()->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, nullptr, s->yield);
7517 } else {
7518 for (auto& iter : attrs) {
7519 s->bucket_attrs[iter.first] = std::move(iter.second);
7520 }
7521 op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
7522 &s->bucket_info.objv_tracker,
7523 s->yield);
7524 }
7525 }
7526
7527 void RGWGetObjLayout::pre_exec()
7528 {
7529 rgw_bucket_object_pre_exec(s);
7530 }
7531
7532 void RGWGetObjLayout::execute()
7533 {
7534 rgw_obj obj(s->bucket, s->object);
7535 RGWRados::Object target(store->getRados(),
7536 s->bucket_info,
7537 *static_cast<RGWObjectCtx *>(s->obj_ctx),
7538 rgw_obj(s->bucket, s->object));
7539 RGWRados::Object::Read stat_op(&target);
7540
7541 op_ret = stat_op.prepare(s->yield);
7542 if (op_ret < 0) {
7543 return;
7544 }
7545
7546 head_obj = stat_op.state.head_obj;
7547
7548 op_ret = target.get_manifest(&manifest, s->yield);
7549 }
7550
7551
7552 int RGWConfigBucketMetaSearch::verify_permission()
7553 {
7554 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
7555 return -EACCES;
7556 }
7557
7558 return 0;
7559 }
7560
7561 void RGWConfigBucketMetaSearch::pre_exec()
7562 {
7563 rgw_bucket_object_pre_exec(s);
7564 }
7565
7566 void RGWConfigBucketMetaSearch::execute()
7567 {
7568 op_ret = get_params();
7569 if (op_ret < 0) {
7570 ldpp_dout(this, 20) << "NOTICE: get_params() returned ret=" << op_ret << dendl;
7571 return;
7572 }
7573
7574 s->bucket_info.mdsearch_config = mdsearch_config;
7575
7576 op_ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
7577 if (op_ret < 0) {
7578 ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
7579 << " returned err=" << op_ret << dendl;
7580 return;
7581 }
7582 }
7583
7584 int RGWGetBucketMetaSearch::verify_permission()
7585 {
7586 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
7587 return -EACCES;
7588 }
7589
7590 return 0;
7591 }
7592
7593 void RGWGetBucketMetaSearch::pre_exec()
7594 {
7595 rgw_bucket_object_pre_exec(s);
7596 }
7597
7598 int RGWDelBucketMetaSearch::verify_permission()
7599 {
7600 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
7601 return -EACCES;
7602 }
7603
7604 return 0;
7605 }
7606
7607 void RGWDelBucketMetaSearch::pre_exec()
7608 {
7609 rgw_bucket_object_pre_exec(s);
7610 }
7611
7612 void RGWDelBucketMetaSearch::execute()
7613 {
7614 s->bucket_info.mdsearch_config.clear();
7615
7616 op_ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
7617 if (op_ret < 0) {
7618 ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
7619 << " returned err=" << op_ret << dendl;
7620 return;
7621 }
7622 }
7623
7624
7625 RGWHandler::~RGWHandler()
7626 {
7627 }
7628
7629 int RGWHandler::init(rgw::sal::RGWRadosStore *_store,
7630 struct req_state *_s,
7631 rgw::io::BasicClient *cio)
7632 {
7633 store = _store;
7634 s = _s;
7635
7636 return 0;
7637 }
7638
7639 int RGWHandler::do_init_permissions()
7640 {
7641 int ret = rgw_build_bucket_policies(store, s);
7642 if (ret < 0) {
7643 ldpp_dout(s, 10) << "init_permissions on " << s->bucket
7644 << " failed, ret=" << ret << dendl;
7645 return ret==-ENODATA ? -EACCES : ret;
7646 }
7647
7648 rgw_build_iam_environment(store, s);
7649 return ret;
7650 }
7651
7652 int RGWHandler::do_read_permissions(RGWOp *op, bool only_bucket)
7653 {
7654 if (only_bucket) {
7655 /* already read bucket info */
7656 return 0;
7657 }
7658 int ret = rgw_build_object_policies(store, s, op->prefetch_data());
7659
7660 if (ret < 0) {
7661 ldpp_dout(op, 10) << "read_permissions on " << s->bucket << ":"
7662 << s->object << " only_bucket=" << only_bucket
7663 << " ret=" << ret << dendl;
7664 if (ret == -ENODATA)
7665 ret = -EACCES;
7666 if (s->auth.identity->is_anonymous() && ret == -EACCES)
7667 ret = -EPERM;
7668 }
7669
7670 return ret;
7671 }
7672
7673 int RGWOp::error_handler(int err_no, string *error_content) {
7674 return dialect_handler->error_handler(err_no, error_content);
7675 }
7676
7677 int RGWHandler::error_handler(int err_no, string *error_content) {
7678 // This is the do-nothing error handler
7679 return err_no;
7680 }
7681
7682 std::ostream& RGWOp::gen_prefix(std::ostream& out) const
7683 {
7684 // append <dialect>:<op name> to the prefix
7685 return s->gen_prefix(out) << s->dialect << ':' << name() << ' ';
7686 }
7687
7688 void RGWDefaultResponseOp::send_response() {
7689 if (op_ret) {
7690 set_req_state_err(s, op_ret);
7691 }
7692 dump_errno(s);
7693 end_header(s);
7694 }
7695
7696 void RGWPutBucketPolicy::send_response()
7697 {
7698 if (!op_ret) {
7699 /* A successful Put Bucket Policy should return a 204 on success */
7700 op_ret = STATUS_NO_CONTENT;
7701 }
7702 if (op_ret) {
7703 set_req_state_err(s, op_ret);
7704 }
7705 dump_errno(s);
7706 end_header(s);
7707 }
7708
7709 int RGWPutBucketPolicy::verify_permission()
7710 {
7711 if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketPolicy)) {
7712 return -EACCES;
7713 }
7714
7715 return 0;
7716 }
7717
7718 int RGWPutBucketPolicy::get_params()
7719 {
7720 const auto max_size = s->cct->_conf->rgw_max_put_param_size;
7721 // At some point when I have more time I want to make a version of
7722 // rgw_rest_read_all_input that doesn't use malloc.
7723 std::tie(op_ret, data) = rgw_rest_read_all_input(s, max_size, false);
7724
7725 // And throws exceptions.
7726 return op_ret;
7727 }
7728
7729 void RGWPutBucketPolicy::execute()
7730 {
7731 op_ret = get_params();
7732 if (op_ret < 0) {
7733 return;
7734 }
7735
7736 if (!store->svc()->zone->is_meta_master()) {
7737 op_ret = forward_request_to_master(s, NULL, store, data, nullptr);
7738 if (op_ret < 0) {
7739 ldpp_dout(this, 20) << "forward_request_to_master returned ret=" << op_ret << dendl;
7740 return;
7741 }
7742 }
7743
7744 try {
7745 const Policy p(s->cct, s->bucket_tenant, data);
7746 auto attrs = s->bucket_attrs;
7747 if (s->bucket_access_conf &&
7748 s->bucket_access_conf->block_public_policy() &&
7749 rgw::IAM::is_public(p)) {
7750 op_ret = -EACCES;
7751 return;
7752 }
7753
7754 op_ret = retry_raced_bucket_write(store->getRados(), s, [&p, this, &attrs] {
7755 attrs[RGW_ATTR_IAM_POLICY].clear();
7756 attrs[RGW_ATTR_IAM_POLICY].append(p.text);
7757 op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
7758 &s->bucket_info.objv_tracker,
7759 s->yield);
7760 return op_ret;
7761 });
7762 } catch (rgw::IAM::PolicyParseException& e) {
7763 ldpp_dout(this, 20) << "failed to parse policy: " << e.what() << dendl;
7764 op_ret = -EINVAL;
7765 }
7766 }
7767
7768 void RGWGetBucketPolicy::send_response()
7769 {
7770 if (op_ret) {
7771 set_req_state_err(s, op_ret);
7772 }
7773 dump_errno(s);
7774 end_header(s, this, "application/json");
7775 dump_body(s, policy);
7776 }
7777
7778 int RGWGetBucketPolicy::verify_permission()
7779 {
7780 if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketPolicy)) {
7781 return -EACCES;
7782 }
7783
7784 return 0;
7785 }
7786
7787 void RGWGetBucketPolicy::execute()
7788 {
7789 auto attrs = s->bucket_attrs;
7790 map<string, bufferlist>::iterator aiter = attrs.find(RGW_ATTR_IAM_POLICY);
7791 if (aiter == attrs.end()) {
7792 ldpp_dout(this, 0) << "can't find bucket IAM POLICY attr bucket_name = "
7793 << s->bucket_name << dendl;
7794 op_ret = -ERR_NO_SUCH_BUCKET_POLICY;
7795 s->err.message = "The bucket policy does not exist";
7796 return;
7797 } else {
7798 policy = attrs[RGW_ATTR_IAM_POLICY];
7799
7800 if (policy.length() == 0) {
7801 ldpp_dout(this, 10) << "The bucket policy does not exist, bucket: "
7802 << s->bucket_name << dendl;
7803 op_ret = -ERR_NO_SUCH_BUCKET_POLICY;
7804 s->err.message = "The bucket policy does not exist";
7805 return;
7806 }
7807 }
7808 }
7809
7810 void RGWDeleteBucketPolicy::send_response()
7811 {
7812 if (op_ret) {
7813 set_req_state_err(s, op_ret);
7814 }
7815 dump_errno(s);
7816 end_header(s);
7817 }
7818
7819 int RGWDeleteBucketPolicy::verify_permission()
7820 {
7821 if (!verify_bucket_permission(this, s, rgw::IAM::s3DeleteBucketPolicy)) {
7822 return -EACCES;
7823 }
7824
7825 return 0;
7826 }
7827
7828 void RGWDeleteBucketPolicy::execute()
7829 {
7830 op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
7831 auto attrs = s->bucket_attrs;
7832 attrs.erase(RGW_ATTR_IAM_POLICY);
7833 op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
7834 &s->bucket_info.objv_tracker,
7835 s->yield);
7836 return op_ret;
7837 });
7838 }
7839
7840 void RGWPutBucketObjectLock::pre_exec()
7841 {
7842 rgw_bucket_object_pre_exec(s);
7843 }
7844
7845 int RGWPutBucketObjectLock::verify_permission()
7846 {
7847 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketObjectLockConfiguration);
7848 }
7849
7850 void RGWPutBucketObjectLock::execute()
7851 {
7852 if (!s->bucket_info.obj_lock_enabled()) {
7853 s->err.message = "object lock configuration can't be set if bucket object lock not enabled";
7854 ldpp_dout(this, 4) << "ERROR: " << s->err.message << dendl;
7855 op_ret = -ERR_INVALID_BUCKET_STATE;
7856 return;
7857 }
7858
7859 RGWXMLDecoder::XMLParser parser;
7860 if (!parser.init()) {
7861 ldpp_dout(this, 0) << "ERROR: failed to initialize parser" << dendl;
7862 op_ret = -EINVAL;
7863 return;
7864 }
7865 op_ret = get_params();
7866 if (op_ret < 0) {
7867 return;
7868 }
7869 if (!parser.parse(data.c_str(), data.length(), 1)) {
7870 op_ret = -ERR_MALFORMED_XML;
7871 return;
7872 }
7873
7874 try {
7875 RGWXMLDecoder::decode_xml("ObjectLockConfiguration", obj_lock, &parser, true);
7876 } catch (RGWXMLDecoder::err& err) {
7877 ldout(s->cct, 5) << "unexpected xml:" << err << dendl;
7878 op_ret = -ERR_MALFORMED_XML;
7879 return;
7880 }
7881 if (obj_lock.has_rule() && !obj_lock.retention_period_valid()) {
7882 s->err.message = "retention period must be a positive integer value";
7883 ldpp_dout(this, 4) << "ERROR: " << s->err.message << dendl;
7884 op_ret = -ERR_INVALID_RETENTION_PERIOD;
7885 return;
7886 }
7887
7888 if (!store->svc()->zone->is_meta_master()) {
7889 op_ret = forward_request_to_master(s, NULL, store, data, nullptr);
7890 if (op_ret < 0) {
7891 ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
7892 return;
7893 }
7894 }
7895
7896 op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
7897 s->bucket_info.obj_lock = obj_lock;
7898 op_ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false,
7899 real_time(), &s->bucket_attrs);
7900 return op_ret;
7901 });
7902 return;
7903 }
7904
7905 void RGWGetBucketObjectLock::pre_exec()
7906 {
7907 rgw_bucket_object_pre_exec(s);
7908 }
7909
7910 int RGWGetBucketObjectLock::verify_permission()
7911 {
7912 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketObjectLockConfiguration);
7913 }
7914
7915 void RGWGetBucketObjectLock::execute()
7916 {
7917 if (!s->bucket_info.obj_lock_enabled()) {
7918 op_ret = -ERR_NO_SUCH_OBJECT_LOCK_CONFIGURATION;
7919 return;
7920 }
7921 }
7922
7923 int RGWPutObjRetention::verify_permission()
7924 {
7925 if (!verify_object_permission(this, s, rgw::IAM::s3PutObjectRetention)) {
7926 return -EACCES;
7927 }
7928 op_ret = get_params();
7929 if (op_ret) {
7930 return op_ret;
7931 }
7932 if (bypass_governance_mode) {
7933 bypass_perm = verify_object_permission(this, s, rgw::IAM::s3BypassGovernanceRetention);
7934 }
7935 return 0;
7936 }
7937
7938 void RGWPutObjRetention::pre_exec()
7939 {
7940 rgw_bucket_object_pre_exec(s);
7941 }
7942
7943 void RGWPutObjRetention::execute()
7944 {
7945 if (!s->bucket_info.obj_lock_enabled()) {
7946 s->err.message = "object retention can't be set if bucket object lock not configured";
7947 ldpp_dout(this, 4) << "ERROR: " << s->err.message << dendl;
7948 op_ret = -ERR_INVALID_REQUEST;
7949 return;
7950 }
7951
7952 RGWXMLDecoder::XMLParser parser;
7953 if (!parser.init()) {
7954 ldpp_dout(this, 0) << "ERROR: failed to initialize parser" << dendl;
7955 op_ret = -EINVAL;
7956 return;
7957 }
7958
7959 if (!parser.parse(data.c_str(), data.length(), 1)) {
7960 op_ret = -ERR_MALFORMED_XML;
7961 return;
7962 }
7963
7964 try {
7965 RGWXMLDecoder::decode_xml("Retention", obj_retention, &parser, true);
7966 } catch (RGWXMLDecoder::err& err) {
7967 ldpp_dout(this, 5) << "unexpected xml:" << err << dendl;
7968 op_ret = -ERR_MALFORMED_XML;
7969 return;
7970 }
7971
7972 if (ceph::real_clock::to_time_t(obj_retention.get_retain_until_date()) < ceph_clock_now()) {
7973 s->err.message = "the retain-until date must be in the future";
7974 ldpp_dout(this, 0) << "ERROR: " << s->err.message << dendl;
7975 op_ret = -EINVAL;
7976 return;
7977 }
7978 bufferlist bl;
7979 obj_retention.encode(bl);
7980 rgw_obj obj(s->bucket, s->object);
7981
7982 //check old retention
7983 map<string, bufferlist> attrs;
7984 op_ret = get_obj_attrs(store, s, obj, attrs);
7985 if (op_ret < 0) {
7986 ldpp_dout(this, 0) << "ERROR: get obj attr error"<< dendl;
7987 return;
7988 }
7989 auto aiter = attrs.find(RGW_ATTR_OBJECT_RETENTION);
7990 if (aiter != attrs.end()) {
7991 RGWObjectRetention old_obj_retention;
7992 try {
7993 decode(old_obj_retention, aiter->second);
7994 } catch (buffer::error& err) {
7995 ldpp_dout(this, 0) << "ERROR: failed to decode RGWObjectRetention" << dendl;
7996 op_ret = -EIO;
7997 return;
7998 }
7999 if (ceph::real_clock::to_time_t(obj_retention.get_retain_until_date()) < ceph::real_clock::to_time_t(old_obj_retention.get_retain_until_date())) {
8000 if (old_obj_retention.get_mode().compare("GOVERNANCE") != 0 || !bypass_perm || !bypass_governance_mode) {
8001 s->err.message = "proposed retain-until date shortens an existing retention period and governance bypass check failed";
8002 op_ret = -EACCES;
8003 return;
8004 }
8005 }
8006 }
8007
8008 op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_OBJECT_RETENTION, bl);
8009
8010 return;
8011 }
8012
8013 int RGWGetObjRetention::verify_permission()
8014 {
8015 if (!verify_object_permission(this, s, rgw::IAM::s3GetObjectRetention)) {
8016 return -EACCES;
8017 }
8018 return 0;
8019 }
8020
8021 void RGWGetObjRetention::pre_exec()
8022 {
8023 rgw_bucket_object_pre_exec(s);
8024 }
8025
8026 void RGWGetObjRetention::execute()
8027 {
8028 if (!s->bucket_info.obj_lock_enabled()) {
8029 s->err.message = "bucket object lock not configured";
8030 ldpp_dout(this, 4) << "ERROR: " << s->err.message << dendl;
8031 op_ret = -ERR_INVALID_REQUEST;
8032 return;
8033 }
8034 rgw_obj obj(s->bucket, s->object);
8035 map<string, bufferlist> attrs;
8036 op_ret = get_obj_attrs(store, s, obj, attrs);
8037 if (op_ret < 0) {
8038 ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << obj
8039 << " ret=" << op_ret << dendl;
8040 return;
8041 }
8042 auto aiter = attrs.find(RGW_ATTR_OBJECT_RETENTION);
8043 if (aiter == attrs.end()) {
8044 op_ret = -ERR_NO_SUCH_OBJECT_LOCK_CONFIGURATION;
8045 return;
8046 }
8047
8048 bufferlist::const_iterator iter{&aiter->second};
8049 try {
8050 obj_retention.decode(iter);
8051 } catch (const buffer::error& e) {
8052 ldout(s->cct, 0) << __func__ << "decode object retention config failed" << dendl;
8053 op_ret = -EIO;
8054 return;
8055 }
8056 return;
8057 }
8058
8059 int RGWPutObjLegalHold::verify_permission()
8060 {
8061 if (!verify_object_permission(this, s, rgw::IAM::s3PutObjectLegalHold)) {
8062 return -EACCES;
8063 }
8064 return 0;
8065 }
8066
8067 void RGWPutObjLegalHold::pre_exec()
8068 {
8069 rgw_bucket_object_pre_exec(s);
8070 }
8071
8072 void RGWPutObjLegalHold::execute() {
8073 if (!s->bucket_info.obj_lock_enabled()) {
8074 s->err.message = "object legal hold can't be set if bucket object lock not enabled";
8075 ldpp_dout(this, 4) << "ERROR: " << s->err.message << dendl;
8076 op_ret = -ERR_INVALID_REQUEST;
8077 return;
8078 }
8079
8080 RGWXMLDecoder::XMLParser parser;
8081 if (!parser.init()) {
8082 ldpp_dout(this, 0) << "ERROR: failed to initialize parser" << dendl;
8083 op_ret = -EINVAL;
8084 return;
8085 }
8086
8087 op_ret = get_params();
8088 if (op_ret < 0)
8089 return;
8090
8091 if (!parser.parse(data.c_str(), data.length(), 1)) {
8092 op_ret = -ERR_MALFORMED_XML;
8093 return;
8094 }
8095
8096 try {
8097 RGWXMLDecoder::decode_xml("LegalHold", obj_legal_hold, &parser, true);
8098 } catch (RGWXMLDecoder::err &err) {
8099 ldout(s->cct, 5) << "unexpected xml:" << err << dendl;
8100 op_ret = -ERR_MALFORMED_XML;
8101 return;
8102 }
8103 bufferlist bl;
8104 obj_legal_hold.encode(bl);
8105 rgw_obj obj(s->bucket, s->object);
8106 //if instance is empty, we should modify the latest object
8107 op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_OBJECT_LEGAL_HOLD, bl);
8108 return;
8109 }
8110
8111 int RGWGetObjLegalHold::verify_permission()
8112 {
8113 if (!verify_object_permission(this, s, rgw::IAM::s3GetObjectLegalHold)) {
8114 return -EACCES;
8115 }
8116 return 0;
8117 }
8118
8119 void RGWGetObjLegalHold::pre_exec()
8120 {
8121 rgw_bucket_object_pre_exec(s);
8122 }
8123
8124 void RGWGetObjLegalHold::execute()
8125 {
8126 if (!s->bucket_info.obj_lock_enabled()) {
8127 s->err.message = "bucket object lock not configured";
8128 ldpp_dout(this, 4) << "ERROR: " << s->err.message << dendl;
8129 op_ret = -ERR_INVALID_REQUEST;
8130 return;
8131 }
8132 rgw_obj obj(s->bucket, s->object);
8133 map<string, bufferlist> attrs;
8134 op_ret = get_obj_attrs(store, s, obj, attrs);
8135 if (op_ret < 0) {
8136 ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << obj
8137 << " ret=" << op_ret << dendl;
8138 return;
8139 }
8140 auto aiter = attrs.find(RGW_ATTR_OBJECT_LEGAL_HOLD);
8141 if (aiter == attrs.end()) {
8142 op_ret = -ERR_NO_SUCH_OBJECT_LOCK_CONFIGURATION;
8143 return;
8144 }
8145
8146 bufferlist::const_iterator iter{&aiter->second};
8147 try {
8148 obj_legal_hold.decode(iter);
8149 } catch (const buffer::error& e) {
8150 ldout(s->cct, 0) << __func__ << "decode object legal hold config failed" << dendl;
8151 op_ret = -EIO;
8152 return;
8153 }
8154 return;
8155 }
8156
8157 void RGWGetClusterStat::execute()
8158 {
8159 op_ret = this->store->getRados()->get_rados_handle()->cluster_stat(stats_op);
8160 }
8161
8162 int RGWGetBucketPolicyStatus::verify_permission()
8163 {
8164 if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketPolicyStatus)) {
8165 return -EACCES;
8166 }
8167
8168 return 0;
8169 }
8170
8171 void RGWGetBucketPolicyStatus::execute()
8172 {
8173 isPublic = (s->iam_policy && rgw::IAM::is_public(*s->iam_policy)) || s->bucket_acl->is_public();
8174 }
8175
8176 int RGWPutBucketPublicAccessBlock::verify_permission()
8177 {
8178 if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketPublicAccessBlock)) {
8179 return -EACCES;
8180 }
8181
8182 return 0;
8183 }
8184
8185 int RGWPutBucketPublicAccessBlock::get_params()
8186 {
8187 const auto max_size = s->cct->_conf->rgw_max_put_param_size;
8188 std::tie(op_ret, data) = rgw_rest_read_all_input(s, max_size, false);
8189 return op_ret;
8190 }
8191
8192 void RGWPutBucketPublicAccessBlock::execute()
8193 {
8194 RGWXMLDecoder::XMLParser parser;
8195 if (!parser.init()) {
8196 ldpp_dout(this, 0) << "ERROR: failed to initialize parser" << dendl;
8197 op_ret = -EINVAL;
8198 return;
8199 }
8200
8201 op_ret = get_params();
8202 if (op_ret < 0)
8203 return;
8204
8205 if (!parser.parse(data.c_str(), data.length(), 1)) {
8206 ldpp_dout(this, 0) << "ERROR: malformed XML" << dendl;
8207 op_ret = -ERR_MALFORMED_XML;
8208 return;
8209 }
8210
8211 try {
8212 RGWXMLDecoder::decode_xml("PublicAccessBlockConfiguration", access_conf, &parser, true);
8213 } catch (RGWXMLDecoder::err &err) {
8214 ldpp_dout(this, 5) << "unexpected xml:" << err << dendl;
8215 op_ret = -ERR_MALFORMED_XML;
8216 return;
8217 }
8218
8219 if (!store->svc()->zone->is_meta_master()) {
8220 op_ret = forward_request_to_master(s, NULL, store, data, nullptr);
8221 if (op_ret < 0) {
8222 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
8223 return;
8224 }
8225 }
8226
8227 bufferlist bl;
8228 access_conf.encode(bl);
8229 op_ret = retry_raced_bucket_write(store->getRados(), s, [this, &bl] {
8230 map<string, bufferlist> attrs = s->bucket_attrs;
8231 attrs[RGW_ATTR_PUBLIC_ACCESS] = bl;
8232 return store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs, &s->bucket_info.objv_tracker, s->yield);
8233 });
8234
8235 }
8236
8237 int RGWGetBucketPublicAccessBlock::verify_permission()
8238 {
8239 if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketPolicy)) {
8240 return -EACCES;
8241 }
8242
8243 return 0;
8244 }
8245
8246 void RGWGetBucketPublicAccessBlock::execute()
8247 {
8248 auto attrs = s->bucket_attrs;
8249 if (auto aiter = attrs.find(RGW_ATTR_PUBLIC_ACCESS);
8250 aiter == attrs.end()) {
8251 ldpp_dout(this, 0) << "can't find bucket IAM POLICY attr bucket_name = "
8252 << s->bucket_name << dendl;
8253 // return the default;
8254 return;
8255 } else {
8256 bufferlist::const_iterator iter{&aiter->second};
8257 try {
8258 access_conf.decode(iter);
8259 } catch (const buffer::error& e) {
8260 ldpp_dout(this, 0) << __func__ << "decode access_conf failed" << dendl;
8261 op_ret = -EIO;
8262 return;
8263 }
8264 }
8265 }
8266
8267
8268 void RGWDeleteBucketPublicAccessBlock::send_response()
8269 {
8270 if (op_ret) {
8271 set_req_state_err(s, op_ret);
8272 }
8273 dump_errno(s);
8274 end_header(s);
8275 }
8276
8277 int RGWDeleteBucketPublicAccessBlock::verify_permission()
8278 {
8279 if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketPublicAccessBlock)) {
8280 return -EACCES;
8281 }
8282
8283 return 0;
8284 }
8285
8286 void RGWDeleteBucketPublicAccessBlock::execute()
8287 {
8288 op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
8289 auto attrs = s->bucket_attrs;
8290 attrs.erase(RGW_ATTR_PUBLIC_ACCESS);
8291 op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
8292 &s->bucket_info.objv_tracker,
8293 s->yield);
8294 return op_ret;
8295 });
8296 }