]> git.proxmox.com Git - ceph.git/blob - ceph/src/rgw/rgw_op.cc
import 15.2.5
[ceph.git] / ceph / src / rgw / rgw_op.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab ft=cpp
3
4 #include <errno.h>
5 #include <stdlib.h>
6 #include <system_error>
7 #include <unistd.h>
8
9 #include <sstream>
10
11 #include <boost/algorithm/string/predicate.hpp>
12 #include <boost/bind.hpp>
13 #include <boost/optional.hpp>
14 #include <boost/utility/in_place_factory.hpp>
15 #include <boost/utility/string_view.hpp>
16
17 #include "include/scope_guard.h"
18 #include "common/Clock.h"
19 #include "common/armor.h"
20 #include "common/errno.h"
21 #include "common/mime.h"
22 #include "common/utf8.h"
23 #include "common/ceph_json.h"
24 #include "common/static_ptr.h"
25
26 #include "rgw_rados.h"
27 #include "rgw_zone.h"
28 #include "rgw_op.h"
29 #include "rgw_rest.h"
30 #include "rgw_acl.h"
31 #include "rgw_acl_s3.h"
32 #include "rgw_acl_swift.h"
33 #include "rgw_aio_throttle.h"
34 #include "rgw_user.h"
35 #include "rgw_bucket.h"
36 #include "rgw_log.h"
37 #include "rgw_multi.h"
38 #include "rgw_multi_del.h"
39 #include "rgw_cors.h"
40 #include "rgw_cors_s3.h"
41 #include "rgw_rest_conn.h"
42 #include "rgw_rest_s3.h"
43 #include "rgw_tar.h"
44 #include "rgw_client_io.h"
45 #include "rgw_compression.h"
46 #include "rgw_role.h"
47 #include "rgw_tag_s3.h"
48 #include "rgw_putobj_processor.h"
49 #include "rgw_crypt.h"
50 #include "rgw_perf_counters.h"
51 #include "rgw_notify.h"
52 #include "rgw_notify_event_type.h"
53
54 #include "services/svc_zone.h"
55 #include "services/svc_quota.h"
56 #include "services/svc_sys_obj.h"
57
58 #include "cls/lock/cls_lock_client.h"
59 #include "cls/rgw/cls_rgw_client.h"
60
61
62 #include "include/ceph_assert.h"
63
64 #include "compressor/Compressor.h"
65
66 #ifdef WITH_LTTNG
67 #define TRACEPOINT_DEFINE
68 #define TRACEPOINT_PROBE_DYNAMIC_LINKAGE
69 #include "tracing/rgw_op.h"
70 #undef TRACEPOINT_PROBE_DYNAMIC_LINKAGE
71 #undef TRACEPOINT_DEFINE
72 #else
73 #define tracepoint(...)
74 #endif
75
76 #define dout_context g_ceph_context
77 #define dout_subsys ceph_subsys_rgw
78
79 using namespace librados;
80 using ceph::crypto::MD5;
81 using boost::optional;
82 using boost::none;
83
84 using rgw::ARN;
85 using rgw::IAM::Effect;
86 using rgw::IAM::Policy;
87
88 static string mp_ns = RGW_OBJ_NS_MULTIPART;
89 static string shadow_ns = RGW_OBJ_NS_SHADOW;
90
91 static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name);
92
93 static MultipartMetaFilter mp_filter;
94
95 // this probably should belong in the rgw_iam_policy_keywords, I'll get it to it
96 // at some point
97 static constexpr auto S3_EXISTING_OBJTAG = "s3:ExistingObjectTag";
98
99 int RGWGetObj::parse_range(void)
100 {
101 int r = -ERANGE;
102 string rs(range_str);
103 string ofs_str;
104 string end_str;
105
106 ignore_invalid_range = s->cct->_conf->rgw_ignore_get_invalid_range;
107 partial_content = false;
108
109 size_t pos = rs.find("bytes=");
110 if (pos == string::npos) {
111 pos = 0;
112 while (isspace(rs[pos]))
113 pos++;
114 int end = pos;
115 while (isalpha(rs[end]))
116 end++;
117 if (strncasecmp(rs.c_str(), "bytes", end - pos) != 0)
118 return 0;
119 while (isspace(rs[end]))
120 end++;
121 if (rs[end] != '=')
122 return 0;
123 rs = rs.substr(end + 1);
124 } else {
125 rs = rs.substr(pos + 6); /* size of("bytes=") */
126 }
127 pos = rs.find('-');
128 if (pos == string::npos)
129 goto done;
130
131 partial_content = true;
132
133 ofs_str = rs.substr(0, pos);
134 end_str = rs.substr(pos + 1);
135 if (end_str.length()) {
136 end = atoll(end_str.c_str());
137 if (end < 0)
138 goto done;
139 }
140
141 if (ofs_str.length()) {
142 ofs = atoll(ofs_str.c_str());
143 } else { // RFC2616 suffix-byte-range-spec
144 ofs = -end;
145 end = -1;
146 }
147
148 if (end >= 0 && end < ofs)
149 goto done;
150
151 range_parsed = true;
152 return 0;
153
154 done:
155 if (ignore_invalid_range) {
156 partial_content = false;
157 ofs = 0;
158 end = -1;
159 range_parsed = false; // allow retry
160 r = 0;
161 }
162
163 return r;
164 }
165
166 static int decode_policy(CephContext *cct,
167 bufferlist& bl,
168 RGWAccessControlPolicy *policy)
169 {
170 auto iter = bl.cbegin();
171 try {
172 policy->decode(iter);
173 } catch (buffer::error& err) {
174 ldout(cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
175 return -EIO;
176 }
177 if (cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
178 ldout(cct, 15) << __func__ << " Read AccessControlPolicy";
179 RGWAccessControlPolicy_S3 *s3policy = static_cast<RGWAccessControlPolicy_S3 *>(policy);
180 s3policy->to_xml(*_dout);
181 *_dout << dendl;
182 }
183 return 0;
184 }
185
186
187 static int get_user_policy_from_attr(CephContext * const cct,
188 rgw::sal::RGWRadosStore * const store,
189 map<string, bufferlist>& attrs,
190 RGWAccessControlPolicy& policy /* out */)
191 {
192 auto aiter = attrs.find(RGW_ATTR_ACL);
193 if (aiter != attrs.end()) {
194 int ret = decode_policy(cct, aiter->second, &policy);
195 if (ret < 0) {
196 return ret;
197 }
198 } else {
199 return -ENOENT;
200 }
201
202 return 0;
203 }
204
205 /**
206 * Get the AccessControlPolicy for an object off of disk.
207 * policy: must point to a valid RGWACL, and will be filled upon return.
208 * bucket: name of the bucket containing the object.
209 * object: name of the object to get the ACL for.
210 * Returns: 0 on success, -ERR# otherwise.
211 */
212 int rgw_op_get_bucket_policy_from_attr(CephContext *cct,
213 rgw::sal::RGWRadosStore *store,
214 RGWBucketInfo& bucket_info,
215 map<string, bufferlist>& bucket_attrs,
216 RGWAccessControlPolicy *policy)
217 {
218 map<string, bufferlist>::iterator aiter = bucket_attrs.find(RGW_ATTR_ACL);
219
220 if (aiter != bucket_attrs.end()) {
221 int ret = decode_policy(cct, aiter->second, policy);
222 if (ret < 0)
223 return ret;
224 } else {
225 ldout(cct, 0) << "WARNING: couldn't find acl header for bucket, generating default" << dendl;
226 rgw::sal::RGWRadosUser user(store);
227 /* object exists, but policy is broken */
228 int r = user.get_by_id(bucket_info.owner, null_yield);
229 if (r < 0)
230 return r;
231
232 policy->create_default(bucket_info.owner, user.get_display_name());
233 }
234 return 0;
235 }
236
237 static int get_obj_policy_from_attr(CephContext *cct,
238 rgw::sal::RGWRadosStore *store,
239 RGWObjectCtx& obj_ctx,
240 RGWBucketInfo& bucket_info,
241 map<string, bufferlist>& bucket_attrs,
242 RGWAccessControlPolicy *policy,
243 string *storage_class,
244 rgw_obj& obj,
245 optional_yield y)
246 {
247 bufferlist bl;
248 int ret = 0;
249
250 RGWRados::Object op_target(store->getRados(), bucket_info, obj_ctx, obj);
251 RGWRados::Object::Read rop(&op_target);
252
253 ret = rop.get_attr(RGW_ATTR_ACL, bl, y);
254 if (ret >= 0) {
255 ret = decode_policy(cct, bl, policy);
256 if (ret < 0)
257 return ret;
258 } else if (ret == -ENODATA) {
259 /* object exists, but policy is broken */
260 ldout(cct, 0) << "WARNING: couldn't find acl header for object, generating default" << dendl;
261 rgw::sal::RGWRadosUser user(store);
262 ret = user.get_by_id(bucket_info.owner, y);
263 if (ret < 0)
264 return ret;
265
266 policy->create_default(bucket_info.owner, user.get_display_name());
267 }
268
269 if (storage_class) {
270 bufferlist scbl;
271 int r = rop.get_attr(RGW_ATTR_STORAGE_CLASS, scbl, y);
272 if (r >= 0) {
273 *storage_class = scbl.to_str();
274 } else {
275 storage_class->clear();
276 }
277 }
278
279 return ret;
280 }
281
282
283 static boost::optional<Policy> get_iam_policy_from_attr(CephContext* cct,
284 rgw::sal::RGWRadosStore* store,
285 map<string, bufferlist>& attrs,
286 const string& tenant) {
287 auto i = attrs.find(RGW_ATTR_IAM_POLICY);
288 if (i != attrs.end()) {
289 return Policy(cct, tenant, i->second);
290 } else {
291 return none;
292 }
293 }
294
295 static boost::optional<PublicAccessBlockConfiguration>
296 get_public_access_conf_from_attr(const map<string, bufferlist>& attrs)
297 {
298 if (auto aiter = attrs.find(RGW_ATTR_PUBLIC_ACCESS);
299 aiter != attrs.end()) {
300 bufferlist::const_iterator iter{&aiter->second};
301 PublicAccessBlockConfiguration access_conf;
302 try {
303 access_conf.decode(iter);
304 } catch (const buffer::error& e) {
305 return boost::none;
306 }
307 return access_conf;
308 }
309 return boost::none;
310 }
311
312 vector<Policy> get_iam_user_policy_from_attr(CephContext* cct,
313 rgw::sal::RGWRadosStore* store,
314 map<string, bufferlist>& attrs,
315 const string& tenant) {
316 vector<Policy> policies;
317 if (auto it = attrs.find(RGW_ATTR_USER_POLICY); it != attrs.end()) {
318 bufferlist out_bl = attrs[RGW_ATTR_USER_POLICY];
319 map<string, string> policy_map;
320 decode(policy_map, out_bl);
321 for (auto& it : policy_map) {
322 bufferlist bl = bufferlist::static_from_string(it.second);
323 Policy p(cct, tenant, bl);
324 policies.push_back(std::move(p));
325 }
326 }
327 return policies;
328 }
329
330 static int get_obj_attrs(rgw::sal::RGWRadosStore *store, struct req_state *s, const rgw_obj& obj, map<string, bufferlist>& attrs, rgw_obj *target_obj = nullptr)
331 {
332 RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
333 RGWRados::Object::Read read_op(&op_target);
334
335 read_op.params.attrs = &attrs;
336 read_op.params.target_obj = target_obj;
337
338 return read_op.prepare(s->yield);
339 }
340
341 static int get_obj_head(rgw::sal::RGWRadosStore *store, struct req_state *s,
342 const rgw_obj& obj,
343 map<string, bufferlist> *attrs,
344 bufferlist *pbl)
345 {
346 store->getRados()->set_prefetch_data(s->obj_ctx, obj);
347
348 RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
349 RGWRados::Object::Read read_op(&op_target);
350
351 read_op.params.attrs = attrs;
352
353 int ret = read_op.prepare(s->yield);
354 if (ret < 0) {
355 return ret;
356 }
357
358 if (!pbl) {
359 return 0;
360 }
361
362 ret = read_op.read(0, s->cct->_conf->rgw_max_chunk_size, *pbl, s->yield);
363
364 return 0;
365 }
366
367 struct multipart_upload_info
368 {
369 rgw_placement_rule dest_placement;
370
371 void encode(bufferlist& bl) const {
372 ENCODE_START(1, 1, bl);
373 encode(dest_placement, bl);
374 ENCODE_FINISH(bl);
375 }
376
377 void decode(bufferlist::const_iterator& bl) {
378 DECODE_START(1, bl);
379 decode(dest_placement, bl);
380 DECODE_FINISH(bl);
381 }
382 };
383 WRITE_CLASS_ENCODER(multipart_upload_info)
384
385 static int get_multipart_info(rgw::sal::RGWRadosStore *store, struct req_state *s,
386 const rgw_obj& obj,
387 RGWAccessControlPolicy *policy,
388 map<string, bufferlist> *attrs,
389 multipart_upload_info *upload_info)
390 {
391 bufferlist header;
392
393 bufferlist headbl;
394 bufferlist *pheadbl = (upload_info ? &headbl : nullptr);
395
396 int op_ret = get_obj_head(store, s, obj, attrs, pheadbl);
397 if (op_ret < 0) {
398 if (op_ret == -ENOENT) {
399 return -ERR_NO_SUCH_UPLOAD;
400 }
401 return op_ret;
402 }
403
404 if (upload_info && headbl.length() > 0) {
405 auto hiter = headbl.cbegin();
406 try {
407 decode(*upload_info, hiter);
408 } catch (buffer::error& err) {
409 ldpp_dout(s, 0) << "ERROR: failed to decode multipart upload info" << dendl;
410 return -EIO;
411 }
412 }
413
414 if (policy && attrs) {
415 for (auto& iter : *attrs) {
416 string name = iter.first;
417 if (name.compare(RGW_ATTR_ACL) == 0) {
418 bufferlist& bl = iter.second;
419 auto bli = bl.cbegin();
420 try {
421 decode(*policy, bli);
422 } catch (buffer::error& err) {
423 ldpp_dout(s, 0) << "ERROR: could not decode policy" << dendl;
424 return -EIO;
425 }
426 break;
427 }
428 }
429 }
430
431 return 0;
432 }
433
434 static int get_multipart_info(rgw::sal::RGWRadosStore *store, struct req_state *s,
435 const string& meta_oid,
436 RGWAccessControlPolicy *policy,
437 map<string, bufferlist> *attrs,
438 multipart_upload_info *upload_info)
439 {
440 map<string, bufferlist>::iterator iter;
441 bufferlist header;
442
443 rgw_obj meta_obj;
444 meta_obj.init_ns(s->bucket, meta_oid, mp_ns);
445 meta_obj.set_in_extra_data(true);
446
447 return get_multipart_info(store, s, meta_obj, policy, attrs, upload_info);
448 }
449
450 static int modify_obj_attr(rgw::sal::RGWRadosStore *store, struct req_state *s, const rgw_obj& obj, const char* attr_name, bufferlist& attr_val)
451 {
452 map<string, bufferlist> attrs;
453 RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
454 RGWRados::Object::Read read_op(&op_target);
455
456 read_op.params.attrs = &attrs;
457
458 int r = read_op.prepare(s->yield);
459 if (r < 0) {
460 return r;
461 }
462 store->getRados()->set_atomic(s->obj_ctx, read_op.state.obj);
463 attrs[attr_name] = attr_val;
464 return store->getRados()->set_attrs(s->obj_ctx, s->bucket_info, read_op.state.obj, attrs, NULL, s->yield);
465 }
466
467 static int read_bucket_policy(rgw::sal::RGWRadosStore *store,
468 struct req_state *s,
469 RGWBucketInfo& bucket_info,
470 map<string, bufferlist>& bucket_attrs,
471 RGWAccessControlPolicy *policy,
472 rgw_bucket& bucket)
473 {
474 if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) {
475 ldpp_dout(s, 0) << "NOTICE: bucket " << bucket_info.bucket.name
476 << " is suspended" << dendl;
477 return -ERR_USER_SUSPENDED;
478 }
479
480 if (bucket.name.empty()) {
481 return 0;
482 }
483
484 int ret = rgw_op_get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, policy);
485 if (ret == -ENOENT) {
486 ret = -ERR_NO_SUCH_BUCKET;
487 }
488
489 return ret;
490 }
491
492 static int read_obj_policy(rgw::sal::RGWRadosStore *store,
493 struct req_state *s,
494 RGWBucketInfo& bucket_info,
495 map<string, bufferlist>& bucket_attrs,
496 RGWAccessControlPolicy* acl,
497 string *storage_class,
498 boost::optional<Policy>& policy,
499 rgw_bucket& bucket,
500 rgw_obj_key& object)
501 {
502 string upload_id;
503 upload_id = s->info.args.get("uploadId");
504 rgw_obj obj;
505
506 if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) {
507 ldpp_dout(s, 0) << "NOTICE: bucket " << bucket_info.bucket.name
508 << " is suspended" << dendl;
509 return -ERR_USER_SUSPENDED;
510 }
511
512 if (!upload_id.empty()) {
513 /* multipart upload */
514 RGWMPObj mp(object.name, upload_id);
515 string oid = mp.get_meta();
516 obj.init_ns(bucket, oid, mp_ns);
517 obj.set_in_extra_data(true);
518 } else {
519 obj = rgw_obj(bucket, object);
520 }
521 policy = get_iam_policy_from_attr(s->cct, store, bucket_attrs, bucket.tenant);
522
523 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
524 int ret = get_obj_policy_from_attr(s->cct, store, *obj_ctx,
525 bucket_info, bucket_attrs, acl, storage_class, obj, s->yield);
526 if (ret == -ENOENT) {
527 /* object does not exist checking the bucket's ACL to make sure
528 that we send a proper error code */
529 RGWAccessControlPolicy bucket_policy(s->cct);
530 ret = rgw_op_get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, &bucket_policy);
531 if (ret < 0) {
532 return ret;
533 }
534 const rgw_user& bucket_owner = bucket_policy.get_owner().get_id();
535 if (bucket_owner.compare(s->user->get_id()) != 0 &&
536 ! s->auth.identity->is_admin_of(bucket_owner)) {
537 if (policy) {
538 auto r = policy->eval(s->env, *s->auth.identity, rgw::IAM::s3ListBucket, ARN(bucket));
539 if (r == Effect::Allow)
540 return -ENOENT;
541 if (r == Effect::Deny)
542 return -EACCES;
543 }
544 if (! bucket_policy.verify_permission(s, *s->auth.identity, s->perm_mask, RGW_PERM_READ))
545 ret = -EACCES;
546 else
547 ret = -ENOENT;
548 } else {
549 ret = -ENOENT;
550 }
551 }
552
553 return ret;
554 }
555
556 /**
557 * Get the AccessControlPolicy for an user, bucket or object off of disk.
558 * s: The req_state to draw information from.
559 * only_bucket: If true, reads the user and bucket ACLs rather than the object ACL.
560 * Returns: 0 on success, -ERR# otherwise.
561 */
562 int rgw_build_bucket_policies(rgw::sal::RGWRadosStore* store, struct req_state* s)
563 {
564 int ret = 0;
565 rgw_obj_key obj;
566 auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
567
568 string bi = s->info.args.get(RGW_SYS_PARAM_PREFIX "bucket-instance");
569 if (!bi.empty()) {
570 string bucket_name;
571 ret = rgw_bucket_parse_bucket_instance(bi, &bucket_name, &s->bucket_instance_id, &s->bucket_instance_shard_id);
572 if (ret < 0) {
573 return ret;
574 }
575 }
576
577 if(s->dialect.compare("s3") == 0) {
578 s->bucket_acl = std::make_unique<RGWAccessControlPolicy_S3>(s->cct);
579 } else if(s->dialect.compare("swift") == 0) {
580 /* We aren't allocating the account policy for those operations using
581 * the Swift's infrastructure that don't really need req_state::user.
582 * Typical example here is the implementation of /info. */
583 if (!s->user->get_id().empty()) {
584 s->user_acl = std::make_unique<RGWAccessControlPolicy_SWIFTAcct>(s->cct);
585 }
586 s->bucket_acl = std::make_unique<RGWAccessControlPolicy_SWIFT>(s->cct);
587 } else {
588 s->bucket_acl = std::make_unique<RGWAccessControlPolicy>(s->cct);
589 }
590
591 /* check if copy source is within the current domain */
592 if (!s->src_bucket_name.empty()) {
593 RGWBucketInfo source_info;
594
595 if (s->bucket_instance_id.empty()) {
596 ret = store->getRados()->get_bucket_info(store->svc(), s->src_tenant_name, s->src_bucket_name, source_info, NULL, s->yield);
597 } else {
598 ret = store->getRados()->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, source_info, NULL, NULL, s->yield);
599 }
600 if (ret == 0) {
601 string& zonegroup = source_info.zonegroup;
602 s->local_source = store->svc()->zone->get_zonegroup().equals(zonegroup);
603 }
604 }
605
606 struct {
607 rgw_user uid;
608 std::string display_name;
609 } acct_acl_user = {
610 s->user->get_id(),
611 s->user->get_display_name(),
612 };
613
614 if (!s->bucket_name.empty()) {
615 s->bucket_exists = true;
616
617 auto b = rgw_bucket(rgw_bucket_key(s->bucket_tenant, s->bucket_name, s->bucket_instance_id));
618
619 RGWObjVersionTracker ep_ot;
620 ret = store->ctl()->bucket->read_bucket_info(b, &s->bucket_info,
621 s->yield,
622 RGWBucketCtl::BucketInstance::GetParams()
623 .set_mtime(&s->bucket_mtime)
624 .set_attrs(&s->bucket_attrs),
625 &ep_ot);
626 if (ret < 0) {
627 if (ret != -ENOENT) {
628 string bucket_log;
629 bucket_log = rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name);
630 ldpp_dout(s, 0) << "NOTICE: couldn't get bucket from bucket_name (name="
631 << bucket_log << ")" << dendl;
632 return ret;
633 }
634 s->bucket_exists = false;
635 }
636 s->bucket_ep_objv = ep_ot.read_version;
637 s->bucket = s->bucket_info.bucket;
638
639 if (s->bucket_exists) {
640 ret = read_bucket_policy(store, s, s->bucket_info, s->bucket_attrs,
641 s->bucket_acl.get(), s->bucket);
642 acct_acl_user = {
643 s->bucket_info.owner,
644 s->bucket_acl->get_owner().get_display_name(),
645 };
646 } else {
647 return -ERR_NO_SUCH_BUCKET;
648 }
649
650 s->bucket_owner = s->bucket_acl->get_owner();
651
652 RGWZoneGroup zonegroup;
653 int r = store->svc()->zone->get_zonegroup(s->bucket_info.zonegroup, zonegroup);
654 if (!r) {
655 if (!zonegroup.endpoints.empty()) {
656 s->zonegroup_endpoint = zonegroup.endpoints.front();
657 } else {
658 // use zonegroup's master zone endpoints
659 auto z = zonegroup.zones.find(zonegroup.master_zone);
660 if (z != zonegroup.zones.end() && !z->second.endpoints.empty()) {
661 s->zonegroup_endpoint = z->second.endpoints.front();
662 }
663 }
664 s->zonegroup_name = zonegroup.get_name();
665 }
666 if (r < 0 && ret == 0) {
667 ret = r;
668 }
669
670 if (s->bucket_exists && !store->svc()->zone->get_zonegroup().equals(s->bucket_info.zonegroup)) {
671 ldpp_dout(s, 0) << "NOTICE: request for data in a different zonegroup ("
672 << s->bucket_info.zonegroup << " != "
673 << store->svc()->zone->get_zonegroup().get_id() << ")" << dendl;
674 /* we now need to make sure that the operation actually requires copy source, that is
675 * it's a copy operation
676 */
677 if (store->svc()->zone->get_zonegroup().is_master_zonegroup() && s->system_request) {
678 /*If this is the master, don't redirect*/
679 } else if (s->op_type == RGW_OP_GET_BUCKET_LOCATION ) {
680 /* If op is get bucket location, don't redirect */
681 } else if (!s->local_source ||
682 (s->op != OP_PUT && s->op != OP_COPY) ||
683 s->object.empty()) {
684 return -ERR_PERMANENT_REDIRECT;
685 }
686 }
687
688 /* init dest placement -- only if bucket exists, otherwise request is either not relevant, or
689 * it's a create_bucket request, in which case the op will deal with the placement later */
690 if (s->bucket_exists) {
691 s->dest_placement.storage_class = s->info.storage_class;
692 s->dest_placement.inherit_from(s->bucket_info.placement_rule);
693
694 if (!store->svc()->zone->get_zone_params().valid_placement(s->dest_placement)) {
695 ldpp_dout(s, 0) << "NOTICE: invalid dest placement: " << s->dest_placement.to_str() << dendl;
696 return -EINVAL;
697 }
698 }
699
700 if(s->bucket_exists) {
701 s->bucket_access_conf = get_public_access_conf_from_attr(s->bucket_attrs);
702 }
703 }
704
705 /* handle user ACL only for those APIs which support it */
706 if (s->user_acl) {
707 map<string, bufferlist> uattrs;
708 ret = store->ctl()->user->get_attrs_by_uid(acct_acl_user.uid, &uattrs, s->yield);
709 if (!ret) {
710 ret = get_user_policy_from_attr(s->cct, store, uattrs, *s->user_acl);
711 }
712 if (-ENOENT == ret) {
713 /* In already existing clusters users won't have ACL. In such case
714 * assuming that only account owner has the rights seems to be
715 * reasonable. That allows to have only one verification logic.
716 * NOTE: there is small compatibility kludge for global, empty tenant:
717 * 1. if we try to reach an existing bucket, its owner is considered
718 * as account owner.
719 * 2. otherwise account owner is identity stored in s->user->user_id. */
720 s->user_acl->create_default(acct_acl_user.uid,
721 acct_acl_user.display_name);
722 ret = 0;
723 } else if (ret < 0) {
724 ldpp_dout(s, 0) << "NOTICE: couldn't get user attrs for handling ACL "
725 "(user_id=" << s->user->get_id() << ", ret=" << ret << ")" << dendl;
726 return ret;
727 }
728 }
729 // We don't need user policies in case of STS token returned by AssumeRole,
730 // hence the check for user type
731 if (! s->user->get_id().empty() && s->auth.identity->get_identity_type() != TYPE_ROLE) {
732 try {
733 map<string, bufferlist> uattrs;
734 if (ret = store->ctl()->user->get_attrs_by_uid(s->user->get_id(), &uattrs, s->yield); ! ret) {
735 if (s->iam_user_policies.empty()) {
736 s->iam_user_policies = get_iam_user_policy_from_attr(s->cct, store, uattrs, s->user->get_tenant());
737 } else {
738 // This scenario can happen when a STS token has a policy, then we need to append other user policies
739 // to the existing ones. (e.g. token returned by GetSessionToken)
740 auto user_policies = get_iam_user_policy_from_attr(s->cct, store, uattrs, s->user->get_tenant());
741 s->iam_user_policies.insert(s->iam_user_policies.end(), user_policies.begin(), user_policies.end());
742 }
743 } else {
744 if (ret == -ENOENT)
745 ret = 0;
746 else ret = -EACCES;
747 }
748 } catch (const std::exception& e) {
749 lderr(s->cct) << "Error reading IAM User Policy: " << e.what() << dendl;
750 ret = -EACCES;
751 }
752 }
753
754 try {
755 s->iam_policy = get_iam_policy_from_attr(s->cct, store, s->bucket_attrs,
756 s->bucket_tenant);
757 } catch (const std::exception& e) {
758 // Really this is a can't happen condition. We parse the policy
759 // when it's given to us, so perhaps we should abort or otherwise
760 // raise bloody murder.
761 ldpp_dout(s, 0) << "Error reading IAM Policy: " << e.what() << dendl;
762 ret = -EACCES;
763 }
764
765 bool success = store->svc()->zone->get_redirect_zone_endpoint(&s->redirect_zone_endpoint);
766 if (success) {
767 ldpp_dout(s, 20) << "redirect_zone_endpoint=" << s->redirect_zone_endpoint << dendl;
768 }
769
770 return ret;
771 }
772
773 /**
774 * Get the AccessControlPolicy for a bucket or object off of disk.
775 * s: The req_state to draw information from.
776 * only_bucket: If true, reads the bucket ACL rather than the object ACL.
777 * Returns: 0 on success, -ERR# otherwise.
778 */
779 int rgw_build_object_policies(rgw::sal::RGWRadosStore *store, struct req_state *s,
780 bool prefetch_data)
781 {
782 int ret = 0;
783
784 if (!s->object.empty()) {
785 if (!s->bucket_exists) {
786 return -ERR_NO_SUCH_BUCKET;
787 }
788 s->object_acl = std::make_unique<RGWAccessControlPolicy>(s->cct);
789 rgw_obj obj(s->bucket, s->object);
790
791 store->getRados()->set_atomic(s->obj_ctx, obj);
792 if (prefetch_data) {
793 store->getRados()->set_prefetch_data(s->obj_ctx, obj);
794 }
795 ret = read_obj_policy(store, s, s->bucket_info, s->bucket_attrs,
796 s->object_acl.get(), nullptr, s->iam_policy, s->bucket,
797 s->object);
798 }
799
800 return ret;
801 }
802
803 void rgw_add_to_iam_environment(rgw::IAM::Environment& e, std::string_view key, std::string_view val){
804 // This variant just adds non empty key pairs to IAM env., values can be empty
805 // in certain cases like tagging
806 if (!key.empty())
807 e.emplace(key,val);
808 }
809
810 static int rgw_iam_add_tags_from_bl(struct req_state* s, bufferlist& bl){
811 RGWObjTags& tagset = s->tagset;
812 try {
813 auto bliter = bl.cbegin();
814 tagset.decode(bliter);
815 } catch (buffer::error& err) {
816 ldpp_dout(s, 0) << "ERROR: caught buffer::error, couldn't decode TagSet" << dendl;
817 return -EIO;
818 }
819
820 for (const auto& tag: tagset.get_tags()){
821 rgw_add_to_iam_environment(s->env, "s3:ExistingObjectTag/" + tag.first, tag.second);
822 }
823 return 0;
824 }
825
826 static int rgw_iam_add_existing_objtags(rgw::sal::RGWRadosStore* store, struct req_state* s, rgw_obj& obj, std::uint64_t action){
827 map <string, bufferlist> attrs;
828 store->getRados()->set_atomic(s->obj_ctx, obj);
829 int op_ret = get_obj_attrs(store, s, obj, attrs);
830 if (op_ret < 0)
831 return op_ret;
832 auto tags = attrs.find(RGW_ATTR_TAGS);
833 if (tags != attrs.end()){
834 return rgw_iam_add_tags_from_bl(s, tags->second);
835 }
836 return 0;
837 }
838
839 static void rgw_add_grant_to_iam_environment(rgw::IAM::Environment& e, struct req_state *s){
840
841 using header_pair_t = std::pair <const char*, const char*>;
842 static const std::initializer_list <header_pair_t> acl_header_conditionals {
843 {"HTTP_X_AMZ_GRANT_READ", "s3:x-amz-grant-read"},
844 {"HTTP_X_AMZ_GRANT_WRITE", "s3:x-amz-grant-write"},
845 {"HTTP_X_AMZ_GRANT_READ_ACP", "s3:x-amz-grant-read-acp"},
846 {"HTTP_X_AMZ_GRANT_WRITE_ACP", "s3:x-amz-grant-write-acp"},
847 {"HTTP_X_AMZ_GRANT_FULL_CONTROL", "s3:x-amz-grant-full-control"}
848 };
849
850 if (s->has_acl_header){
851 for (const auto& c: acl_header_conditionals){
852 auto hdr = s->info.env->get(c.first);
853 if(hdr) {
854 e[c.second] = hdr;
855 }
856 }
857 }
858 }
859
860 void rgw_build_iam_environment(rgw::sal::RGWRadosStore* store,
861 struct req_state* s)
862 {
863 const auto& m = s->info.env->get_map();
864 auto t = ceph::real_clock::now();
865 s->env.emplace("aws:CurrentTime", std::to_string(ceph::real_clock::to_time_t(t)));
866 s->env.emplace("aws:EpochTime", ceph::to_iso_8601(t));
867 // TODO: This is fine for now, but once we have STS we'll need to
868 // look and see. Also this won't work with the IdentityApplier
869 // model, since we need to know the actual credential.
870 s->env.emplace("aws:PrincipalType", "User");
871
872 auto i = m.find("HTTP_REFERER");
873 if (i != m.end()) {
874 s->env.emplace("aws:Referer", i->second);
875 }
876
877 if (rgw_transport_is_secure(s->cct, *s->info.env)) {
878 s->env.emplace("aws:SecureTransport", "true");
879 }
880
881 const auto remote_addr_param = s->cct->_conf->rgw_remote_addr_param;
882 if (remote_addr_param.length()) {
883 i = m.find(remote_addr_param);
884 } else {
885 i = m.find("REMOTE_ADDR");
886 }
887 if (i != m.end()) {
888 const string* ip = &(i->second);
889 string temp;
890 if (remote_addr_param == "HTTP_X_FORWARDED_FOR") {
891 const auto comma = ip->find(',');
892 if (comma != string::npos) {
893 temp.assign(*ip, 0, comma);
894 ip = &temp;
895 }
896 }
897 s->env.emplace("aws:SourceIp", *ip);
898 }
899
900 i = m.find("HTTP_USER_AGENT"); {
901 if (i != m.end())
902 s->env.emplace("aws:UserAgent", i->second);
903 }
904
905 if (s->user) {
906 // What to do about aws::userid? One can have multiple access
907 // keys so that isn't really suitable. Do we have a durable
908 // identifier that can persist through name changes?
909 s->env.emplace("aws:username", s->user->get_id().id);
910 }
911
912 i = m.find("HTTP_X_AMZ_SECURITY_TOKEN");
913 if (i != m.end()) {
914 s->env.emplace("sts:authentication", "true");
915 } else {
916 s->env.emplace("sts:authentication", "false");
917 }
918 }
919
920 void rgw_bucket_object_pre_exec(struct req_state *s)
921 {
922 if (s->expect_cont)
923 dump_continue(s);
924
925 dump_bucket_from_state(s);
926 }
927
928 // So! Now and then when we try to update bucket information, the
929 // bucket has changed during the course of the operation. (Or we have
930 // a cache consistency problem that Watch/Notify isn't ruling out
931 // completely.)
932 //
933 // When this happens, we need to update the bucket info and try
934 // again. We have, however, to try the right *part* again. We can't
935 // simply re-send, since that will obliterate the previous update.
936 //
937 // Thus, callers of this function should include everything that
938 // merges information to be changed into the bucket information as
939 // well as the call to set it.
940 //
941 // The called function must return an integer, negative on error. In
942 // general, they should just return op_ret.
943 namespace {
944 template<typename F>
945 int retry_raced_bucket_write(RGWRados* g, req_state* s, const F& f) {
946 auto r = f();
947 for (auto i = 0u; i < 15u && r == -ECANCELED; ++i) {
948 r = g->try_refresh_bucket_info(s->bucket_info, nullptr,
949 &s->bucket_attrs);
950 if (r >= 0) {
951 r = f();
952 }
953 }
954 return r;
955 }
956 }
957
958
959 int RGWGetObj::verify_permission()
960 {
961 obj = rgw_obj(s->bucket, s->object);
962 store->getRados()->set_atomic(s->obj_ctx, obj);
963 if (get_data) {
964 store->getRados()->set_prefetch_data(s->obj_ctx, obj);
965 }
966
967 if (torrent.get_flag()) {
968 if (obj.key.instance.empty()) {
969 action = rgw::IAM::s3GetObjectTorrent;
970 } else {
971 action = rgw::IAM::s3GetObjectVersionTorrent;
972 }
973 } else {
974 if (obj.key.instance.empty()) {
975 action = rgw::IAM::s3GetObject;
976 } else {
977 action = rgw::IAM::s3GetObjectVersion;
978 }
979 if (s->iam_policy && s->iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG))
980 rgw_iam_add_existing_objtags(store, s, obj, action);
981 if (! s->iam_user_policies.empty()) {
982 for (auto& user_policy : s->iam_user_policies) {
983 if (user_policy.has_partial_conditional(S3_EXISTING_OBJTAG))
984 rgw_iam_add_existing_objtags(store, s, obj, action);
985 }
986 }
987 }
988
989 if (!verify_object_permission(this, s, action)) {
990 return -EACCES;
991 }
992
993 if (s->bucket_info.obj_lock_enabled()) {
994 get_retention = verify_object_permission(this, s, rgw::IAM::s3GetObjectRetention);
995 get_legal_hold = verify_object_permission(this, s, rgw::IAM::s3GetObjectLegalHold);
996 }
997
998 return 0;
999 }
1000
1001 // cache the objects tags into the requests
1002 // use inside try/catch as "decode()" may throw
1003 void populate_tags_in_request(req_state* s, const std::map<std::string, bufferlist>& attrs) {
1004 const auto attr_iter = attrs.find(RGW_ATTR_TAGS);
1005 if (attr_iter != attrs.end()) {
1006 auto bliter = attr_iter->second.cbegin();
1007 decode(s->tagset, bliter);
1008 }
1009 }
1010
1011 // cache the objects metadata into the request
1012 void populate_metadata_in_request(req_state* s, std::map<std::string, bufferlist>& attrs) {
1013 for (auto& attr : attrs) {
1014 if (boost::algorithm::starts_with(attr.first, RGW_ATTR_META_PREFIX)) {
1015 std::string_view key(attr.first);
1016 key.remove_prefix(sizeof(RGW_ATTR_PREFIX)-1);
1017 s->info.x_meta_map.emplace(key, attr.second.c_str());
1018 }
1019 }
1020 }
1021
1022 int RGWOp::verify_op_mask()
1023 {
1024 uint32_t required_mask = op_mask();
1025
1026 ldpp_dout(this, 20) << "required_mask= " << required_mask
1027 << " user.op_mask=" << s->user->get_info().op_mask << dendl;
1028
1029 if ((s->user->get_info().op_mask & required_mask) != required_mask) {
1030 return -EPERM;
1031 }
1032
1033 if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !store->svc()->zone->zone_is_writeable()) {
1034 ldpp_dout(this, 5) << "NOTICE: modify request to a read-only zone by a "
1035 "non-system user, permission denied" << dendl;
1036 return -EPERM;
1037 }
1038
1039 return 0;
1040 }
1041
1042 int RGWGetObjTags::verify_permission()
1043 {
1044 auto iam_action = s->object.instance.empty()?
1045 rgw::IAM::s3GetObjectTagging:
1046 rgw::IAM::s3GetObjectVersionTagging;
1047 // TODO since we are parsing the bl now anyway, we probably change
1048 // the send_response function to accept RGWObjTag instead of a bl
1049 if (s->iam_policy && s->iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG)){
1050 rgw_obj obj = rgw_obj(s->bucket, s->object);
1051 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
1052 }
1053 if (! s->iam_user_policies.empty()) {
1054 for (auto& user_policy : s->iam_user_policies) {
1055 if (user_policy.has_partial_conditional(S3_EXISTING_OBJTAG)) {
1056 rgw_obj obj = rgw_obj(s->bucket, s->object);
1057 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
1058 }
1059 }
1060 }
1061 if (!verify_object_permission(this, s,iam_action))
1062 return -EACCES;
1063
1064 return 0;
1065 }
1066
1067 void RGWGetObjTags::pre_exec()
1068 {
1069 rgw_bucket_object_pre_exec(s);
1070 }
1071
1072 void RGWGetObjTags::execute()
1073 {
1074 rgw_obj obj;
1075 map<string,bufferlist> attrs;
1076
1077 obj = rgw_obj(s->bucket, s->object);
1078
1079 store->getRados()->set_atomic(s->obj_ctx, obj);
1080
1081 op_ret = get_obj_attrs(store, s, obj, attrs);
1082 if (op_ret < 0) {
1083 ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << obj
1084 << " ret=" << op_ret << dendl;
1085 return;
1086 }
1087
1088 auto tags = attrs.find(RGW_ATTR_TAGS);
1089 if(tags != attrs.end()){
1090 has_tags = true;
1091 tags_bl.append(tags->second);
1092 }
1093 send_response_data(tags_bl);
1094 }
1095
1096 int RGWPutObjTags::verify_permission()
1097 {
1098 auto iam_action = s->object.instance.empty() ?
1099 rgw::IAM::s3PutObjectTagging:
1100 rgw::IAM::s3PutObjectVersionTagging;
1101
1102 if(s->iam_policy && s->iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG)){
1103 auto obj = rgw_obj(s->bucket, s->object);
1104 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
1105 }
1106 if (! s->iam_user_policies.empty()) {
1107 for (auto& user_policy : s->iam_user_policies) {
1108 if (user_policy.has_partial_conditional(S3_EXISTING_OBJTAG)) {
1109 rgw_obj obj = rgw_obj(s->bucket, s->object);
1110 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
1111 }
1112 }
1113 }
1114 if (!verify_object_permission(this, s,iam_action))
1115 return -EACCES;
1116 return 0;
1117 }
1118
1119 void RGWPutObjTags::execute()
1120 {
1121 op_ret = get_params();
1122 if (op_ret < 0)
1123 return;
1124
1125 if (s->object.empty()){
1126 op_ret= -EINVAL; // we only support tagging on existing objects
1127 return;
1128 }
1129
1130 rgw_obj obj;
1131 obj = rgw_obj(s->bucket, s->object);
1132 store->getRados()->set_atomic(s->obj_ctx, obj);
1133 op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_TAGS, tags_bl);
1134 if (op_ret == -ECANCELED){
1135 op_ret = -ERR_TAG_CONFLICT;
1136 }
1137 }
1138
1139 void RGWDeleteObjTags::pre_exec()
1140 {
1141 rgw_bucket_object_pre_exec(s);
1142 }
1143
1144
1145 int RGWDeleteObjTags::verify_permission()
1146 {
1147 if (!s->object.empty()) {
1148 auto iam_action = s->object.instance.empty() ?
1149 rgw::IAM::s3DeleteObjectTagging:
1150 rgw::IAM::s3DeleteObjectVersionTagging;
1151
1152 if (s->iam_policy && s->iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG)){
1153 auto obj = rgw_obj(s->bucket, s->object);
1154 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
1155 }
1156 if (! s->iam_user_policies.empty()) {
1157 for (auto& user_policy : s->iam_user_policies) {
1158 if (user_policy.has_partial_conditional(S3_EXISTING_OBJTAG)) {
1159 auto obj = rgw_obj(s->bucket, s->object);
1160 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
1161 }
1162 }
1163 }
1164 if (!verify_object_permission(this, s, iam_action))
1165 return -EACCES;
1166 }
1167 return 0;
1168 }
1169
1170 void RGWDeleteObjTags::execute()
1171 {
1172 if (s->object.empty())
1173 return;
1174
1175 rgw_obj obj;
1176 obj = rgw_obj(s->bucket, s->object);
1177 store->getRados()->set_atomic(s->obj_ctx, obj);
1178 map <string, bufferlist> attrs;
1179 map <string, bufferlist> rmattr;
1180 bufferlist bl;
1181 rmattr[RGW_ATTR_TAGS] = bl;
1182 op_ret = store->getRados()->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, &rmattr, s->yield);
1183 }
1184
1185 int RGWGetBucketTags::verify_permission()
1186 {
1187
1188 if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketTagging)) {
1189 return -EACCES;
1190 }
1191
1192 return 0;
1193 }
1194
1195 void RGWGetBucketTags::pre_exec()
1196 {
1197 rgw_bucket_object_pre_exec(s);
1198 }
1199
1200 void RGWGetBucketTags::execute()
1201 {
1202 auto iter = s->bucket_attrs.find(RGW_ATTR_TAGS);
1203 if (iter != s->bucket_attrs.end()) {
1204 has_tags = true;
1205 tags_bl.append(iter->second);
1206 } else {
1207 op_ret = -ERR_NO_SUCH_TAG_SET;
1208 }
1209 send_response_data(tags_bl);
1210 }
1211
1212 int RGWPutBucketTags::verify_permission() {
1213 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketTagging);
1214 }
1215
1216 void RGWPutBucketTags::execute() {
1217
1218 op_ret = get_params();
1219 if (op_ret < 0)
1220 return;
1221
1222 if (!store->svc()->zone->is_meta_master()) {
1223 op_ret = forward_request_to_master(s, nullptr, store, in_data, nullptr);
1224 if (op_ret < 0) {
1225 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
1226 }
1227 }
1228
1229 op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
1230 map<string, bufferlist> attrs = s->bucket_attrs;
1231 attrs[RGW_ATTR_TAGS] = tags_bl;
1232 return store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs, &s->bucket_info.objv_tracker, s->yield);
1233 });
1234
1235 }
1236
1237 void RGWDeleteBucketTags::pre_exec()
1238 {
1239 rgw_bucket_object_pre_exec(s);
1240 }
1241
1242 int RGWDeleteBucketTags::verify_permission()
1243 {
1244 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketTagging);
1245 }
1246
1247 void RGWDeleteBucketTags::execute()
1248 {
1249 if (!store->svc()->zone->is_meta_master()) {
1250 bufferlist in_data;
1251 op_ret = forward_request_to_master(s, nullptr, store, in_data, nullptr);
1252 if (op_ret < 0) {
1253 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
1254 return;
1255 }
1256 }
1257
1258 op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
1259 map<string, bufferlist> attrs = s->bucket_attrs;
1260 attrs.erase(RGW_ATTR_TAGS);
1261 op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs, &s->bucket_info.objv_tracker, s->yield);
1262 if (op_ret < 0) {
1263 ldpp_dout(this, 0) << "RGWDeleteBucketTags() failed to remove RGW_ATTR_TAGS on bucket="
1264 << s->bucket.name
1265 << " returned err= " << op_ret << dendl;
1266 }
1267 return op_ret;
1268 });
1269 }
1270
1271 int RGWGetBucketReplication::verify_permission()
1272 {
1273 if (!verify_bucket_permission(this, s, rgw::IAM::s3GetReplicationConfiguration)) {
1274 return -EACCES;
1275 }
1276
1277 return 0;
1278 }
1279
1280 void RGWGetBucketReplication::pre_exec()
1281 {
1282 rgw_bucket_object_pre_exec(s);
1283 }
1284
1285 void RGWGetBucketReplication::execute()
1286 {
1287 send_response_data();
1288 }
1289
1290 int RGWPutBucketReplication::verify_permission() {
1291 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutReplicationConfiguration);
1292 }
1293
1294 void RGWPutBucketReplication::execute() {
1295
1296 op_ret = get_params();
1297 if (op_ret < 0)
1298 return;
1299
1300 if (!store->svc()->zone->is_meta_master()) {
1301 op_ret = forward_request_to_master(s, nullptr, store, in_data, nullptr);
1302 if (op_ret < 0) {
1303 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
1304 return;
1305 }
1306 }
1307
1308 op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
1309 auto sync_policy = (s->bucket_info.sync_policy ? *s->bucket_info.sync_policy : rgw_sync_policy_info());
1310
1311 for (auto& group : sync_policy_groups) {
1312 sync_policy.groups[group.id] = group;
1313 }
1314
1315 s->bucket_info.set_sync_policy(std::move(sync_policy));
1316
1317 int ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false, real_time(),
1318 &s->bucket_attrs);
1319 if (ret < 0) {
1320 ldpp_dout(this, 0) << "ERROR: put_bucket_instance_info (bucket=" << s->bucket_info.bucket.get_key() << ") returned ret=" << ret << dendl;
1321 return ret;
1322 }
1323
1324 return 0;
1325 });
1326 }
1327
1328 void RGWDeleteBucketReplication::pre_exec()
1329 {
1330 rgw_bucket_object_pre_exec(s);
1331 }
1332
1333 int RGWDeleteBucketReplication::verify_permission()
1334 {
1335 return verify_bucket_owner_or_policy(s, rgw::IAM::s3DeleteReplicationConfiguration);
1336 }
1337
1338 void RGWDeleteBucketReplication::execute()
1339 {
1340 if (!store->svc()->zone->is_meta_master()) {
1341 bufferlist in_data;
1342 op_ret = forward_request_to_master(s, nullptr, store, in_data, nullptr);
1343 if (op_ret < 0) {
1344 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
1345 return;
1346 }
1347 }
1348
1349 op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
1350 if (!s->bucket_info.sync_policy) {
1351 return 0;
1352 }
1353
1354 rgw_sync_policy_info sync_policy = *s->bucket_info.sync_policy;
1355
1356 update_sync_policy(&sync_policy);
1357
1358 s->bucket_info.set_sync_policy(std::move(sync_policy));
1359
1360 int ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false, real_time(),
1361 &s->bucket_attrs);
1362 if (ret < 0) {
1363 ldpp_dout(this, 0) << "ERROR: put_bucket_instance_info (bucket=" << s->bucket_info.bucket.get_key() << ") returned ret=" << ret << dendl;
1364 return ret;
1365 }
1366
1367 return 0;
1368 });
1369 }
1370
1371 int RGWOp::do_aws4_auth_completion()
1372 {
1373 ldpp_dout(this, 5) << "NOTICE: call to do_aws4_auth_completion" << dendl;
1374 if (s->auth.completer) {
1375 if (!s->auth.completer->complete()) {
1376 return -ERR_AMZ_CONTENT_SHA256_MISMATCH;
1377 } else {
1378 ldpp_dout(this, 10) << "v4 auth ok -- do_aws4_auth_completion" << dendl;
1379 }
1380
1381 /* TODO(rzarzynski): yes, we're really called twice on PUTs. Only first
1382 * call passes, so we disable second one. This is old behaviour, sorry!
1383 * Plan for tomorrow: seek and destroy. */
1384 s->auth.completer = nullptr;
1385 }
1386
1387 return 0;
1388 }
1389
1390 int RGWOp::init_quota()
1391 {
1392 /* no quota enforcement for system requests */
1393 if (s->system_request)
1394 return 0;
1395
1396 /* init quota related stuff */
1397 if (!(s->user->get_info().op_mask & RGW_OP_TYPE_MODIFY)) {
1398 return 0;
1399 }
1400
1401 /* only interested in object related ops */
1402 if (s->object.empty()) {
1403 return 0;
1404 }
1405
1406 rgw::sal::RGWRadosUser owner_user(store);
1407 rgw::sal::RGWUser *user;
1408
1409 if (s->user->get_id() == s->bucket_owner.get_id()) {
1410 user = s->user;
1411 } else {
1412 int r = owner_user.get_by_id(s->bucket_info.owner, s->yield);
1413 if (r < 0)
1414 return r;
1415 user = &owner_user;
1416 }
1417
1418 if (s->bucket_info.quota.enabled) {
1419 bucket_quota = s->bucket_info.quota;
1420 } else if (user->get_info().bucket_quota.enabled) {
1421 bucket_quota = user->get_info().bucket_quota;
1422 } else {
1423 bucket_quota = store->svc()->quota->get_bucket_quota();
1424 }
1425
1426 if (user->get_info().user_quota.enabled) {
1427 user_quota = user->get_info().user_quota;
1428 } else {
1429 user_quota = store->svc()->quota->get_user_quota();
1430 }
1431
1432 return 0;
1433 }
1434
1435 static bool validate_cors_rule_method(RGWCORSRule *rule, const char *req_meth) {
1436 uint8_t flags = 0;
1437
1438 if (!req_meth) {
1439 dout(5) << "req_meth is null" << dendl;
1440 return false;
1441 }
1442
1443 if (strcmp(req_meth, "GET") == 0) flags = RGW_CORS_GET;
1444 else if (strcmp(req_meth, "POST") == 0) flags = RGW_CORS_POST;
1445 else if (strcmp(req_meth, "PUT") == 0) flags = RGW_CORS_PUT;
1446 else if (strcmp(req_meth, "DELETE") == 0) flags = RGW_CORS_DELETE;
1447 else if (strcmp(req_meth, "HEAD") == 0) flags = RGW_CORS_HEAD;
1448
1449 if (rule->get_allowed_methods() & flags) {
1450 dout(10) << "Method " << req_meth << " is supported" << dendl;
1451 } else {
1452 dout(5) << "Method " << req_meth << " is not supported" << dendl;
1453 return false;
1454 }
1455
1456 return true;
1457 }
1458
1459 static bool validate_cors_rule_header(RGWCORSRule *rule, const char *req_hdrs) {
1460 if (req_hdrs) {
1461 vector<string> hdrs;
1462 get_str_vec(req_hdrs, hdrs);
1463 for (const auto& hdr : hdrs) {
1464 if (!rule->is_header_allowed(hdr.c_str(), hdr.length())) {
1465 dout(5) << "Header " << hdr << " is not registered in this rule" << dendl;
1466 return false;
1467 }
1468 }
1469 }
1470 return true;
1471 }
1472
1473 int RGWOp::read_bucket_cors()
1474 {
1475 bufferlist bl;
1476
1477 map<string, bufferlist>::iterator aiter = s->bucket_attrs.find(RGW_ATTR_CORS);
1478 if (aiter == s->bucket_attrs.end()) {
1479 ldpp_dout(this, 20) << "no CORS configuration attr found" << dendl;
1480 cors_exist = false;
1481 return 0; /* no CORS configuration found */
1482 }
1483
1484 cors_exist = true;
1485
1486 bl = aiter->second;
1487
1488 auto iter = bl.cbegin();
1489 try {
1490 bucket_cors.decode(iter);
1491 } catch (buffer::error& err) {
1492 ldpp_dout(this, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
1493 return -EIO;
1494 }
1495 if (s->cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
1496 RGWCORSConfiguration_S3 *s3cors = static_cast<RGWCORSConfiguration_S3 *>(&bucket_cors);
1497 ldpp_dout(this, 15) << "Read RGWCORSConfiguration";
1498 s3cors->to_xml(*_dout);
1499 *_dout << dendl;
1500 }
1501 return 0;
1502 }
1503
1504 /** CORS 6.2.6.
1505 * If any of the header field-names is not a ASCII case-insensitive match for
1506 * any of the values in list of headers do not set any additional headers and
1507 * terminate this set of steps.
1508 * */
1509 static void get_cors_response_headers(RGWCORSRule *rule, const char *req_hdrs, string& hdrs, string& exp_hdrs, unsigned *max_age) {
1510 if (req_hdrs) {
1511 list<string> hl;
1512 get_str_list(req_hdrs, hl);
1513 for(list<string>::iterator it = hl.begin(); it != hl.end(); ++it) {
1514 if (!rule->is_header_allowed((*it).c_str(), (*it).length())) {
1515 dout(5) << "Header " << (*it) << " is not registered in this rule" << dendl;
1516 } else {
1517 if (hdrs.length() > 0) hdrs.append(",");
1518 hdrs.append((*it));
1519 }
1520 }
1521 }
1522 rule->format_exp_headers(exp_hdrs);
1523 *max_age = rule->get_max_age();
1524 }
1525
1526 /**
1527 * Generate the CORS header response
1528 *
1529 * This is described in the CORS standard, section 6.2.
1530 */
1531 bool RGWOp::generate_cors_headers(string& origin, string& method, string& headers, string& exp_headers, unsigned *max_age)
1532 {
1533 /* CORS 6.2.1. */
1534 const char *orig = s->info.env->get("HTTP_ORIGIN");
1535 if (!orig) {
1536 return false;
1537 }
1538
1539 /* Custom: */
1540 origin = orig;
1541 int temp_op_ret = read_bucket_cors();
1542 if (temp_op_ret < 0) {
1543 op_ret = temp_op_ret;
1544 return false;
1545 }
1546
1547 if (!cors_exist) {
1548 ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl;
1549 return false;
1550 }
1551
1552 /* CORS 6.2.2. */
1553 RGWCORSRule *rule = bucket_cors.host_name_rule(orig);
1554 if (!rule)
1555 return false;
1556
1557 /*
1558 * Set the Allowed-Origin header to a asterisk if this is allowed in the rule
1559 * and no Authorization was send by the client
1560 *
1561 * The origin parameter specifies a URI that may access the resource. The browser must enforce this.
1562 * For requests without credentials, the server may specify "*" as a wildcard,
1563 * thereby allowing any origin to access the resource.
1564 */
1565 const char *authorization = s->info.env->get("HTTP_AUTHORIZATION");
1566 if (!authorization && rule->has_wildcard_origin())
1567 origin = "*";
1568
1569 /* CORS 6.2.3. */
1570 const char *req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD");
1571 if (!req_meth) {
1572 req_meth = s->info.method;
1573 }
1574
1575 if (req_meth) {
1576 method = req_meth;
1577 /* CORS 6.2.5. */
1578 if (!validate_cors_rule_method(rule, req_meth)) {
1579 return false;
1580 }
1581 }
1582
1583 /* CORS 6.2.4. */
1584 const char *req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS");
1585
1586 /* CORS 6.2.6. */
1587 get_cors_response_headers(rule, req_hdrs, headers, exp_headers, max_age);
1588
1589 return true;
1590 }
1591
1592 int RGWGetObj::read_user_manifest_part(rgw_bucket& bucket,
1593 const rgw_bucket_dir_entry& ent,
1594 RGWAccessControlPolicy * const bucket_acl,
1595 const boost::optional<Policy>& bucket_policy,
1596 const off_t start_ofs,
1597 const off_t end_ofs,
1598 bool swift_slo)
1599 {
1600 ldpp_dout(this, 20) << "user manifest obj=" << ent.key.name
1601 << "[" << ent.key.instance << "]" << dendl;
1602 RGWGetObj_CB cb(this);
1603 RGWGetObj_Filter* filter = &cb;
1604 boost::optional<RGWGetObj_Decompress> decompress;
1605
1606 int64_t cur_ofs = start_ofs;
1607 int64_t cur_end = end_ofs;
1608
1609 rgw_obj part(bucket, ent.key);
1610
1611 map<string, bufferlist> attrs;
1612
1613 uint64_t obj_size;
1614 RGWObjectCtx obj_ctx(store);
1615 RGWAccessControlPolicy obj_policy(s->cct);
1616
1617 ldpp_dout(this, 20) << "reading obj=" << part << " ofs=" << cur_ofs
1618 << " end=" << cur_end << dendl;
1619
1620 obj_ctx.set_atomic(part);
1621 store->getRados()->set_prefetch_data(&obj_ctx, part);
1622
1623 RGWRados::Object op_target(store->getRados(), s->bucket_info, obj_ctx, part);
1624 RGWRados::Object::Read read_op(&op_target);
1625
1626 if (!swift_slo) {
1627 /* SLO etag is optional */
1628 read_op.conds.if_match = ent.meta.etag.c_str();
1629 }
1630 read_op.params.attrs = &attrs;
1631 read_op.params.obj_size = &obj_size;
1632
1633 op_ret = read_op.prepare(s->yield);
1634 if (op_ret < 0)
1635 return op_ret;
1636 op_ret = read_op.range_to_ofs(ent.meta.accounted_size, cur_ofs, cur_end);
1637 if (op_ret < 0)
1638 return op_ret;
1639 bool need_decompress;
1640 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
1641 if (op_ret < 0) {
1642 ldpp_dout(this, 0) << "ERROR: failed to decode compression info" << dendl;
1643 return -EIO;
1644 }
1645
1646 if (need_decompress)
1647 {
1648 if (cs_info.orig_size != ent.meta.accounted_size) {
1649 // hmm.. something wrong, object not as expected, abort!
1650 ldpp_dout(this, 0) << "ERROR: expected cs_info.orig_size=" << cs_info.orig_size
1651 << ", actual read size=" << ent.meta.size << dendl;
1652 return -EIO;
1653 }
1654 decompress.emplace(s->cct, &cs_info, partial_content, filter);
1655 filter = &*decompress;
1656 }
1657 else
1658 {
1659 if (obj_size != ent.meta.size) {
1660 // hmm.. something wrong, object not as expected, abort!
1661 ldpp_dout(this, 0) << "ERROR: expected obj_size=" << obj_size
1662 << ", actual read size=" << ent.meta.size << dendl;
1663 return -EIO;
1664 }
1665 }
1666
1667 op_ret = rgw_policy_from_attrset(s->cct, attrs, &obj_policy);
1668 if (op_ret < 0)
1669 return op_ret;
1670
1671 /* We can use global user_acl because LOs cannot have segments
1672 * stored inside different accounts. */
1673 if (s->system_request) {
1674 ldpp_dout(this, 2) << "overriding permissions due to system operation" << dendl;
1675 } else if (s->auth.identity->is_admin_of(s->user->get_id())) {
1676 ldpp_dout(this, 2) << "overriding permissions due to admin operation" << dendl;
1677 } else if (!verify_object_permission(this, s, part, s->user_acl.get(), bucket_acl,
1678 &obj_policy, bucket_policy, s->iam_user_policies, action)) {
1679 return -EPERM;
1680 }
1681 if (ent.meta.size == 0) {
1682 return 0;
1683 }
1684
1685 perfcounter->inc(l_rgw_get_b, cur_end - cur_ofs);
1686 filter->fixup_range(cur_ofs, cur_end);
1687 op_ret = read_op.iterate(cur_ofs, cur_end, filter, s->yield);
1688 if (op_ret >= 0)
1689 op_ret = filter->flush();
1690 return op_ret;
1691 }
1692
1693 static int iterate_user_manifest_parts(CephContext * const cct,
1694 rgw::sal::RGWRadosStore * const store,
1695 const off_t ofs,
1696 const off_t end,
1697 RGWBucketInfo *pbucket_info,
1698 const string& obj_prefix,
1699 RGWAccessControlPolicy * const bucket_acl,
1700 const boost::optional<Policy>& bucket_policy,
1701 uint64_t * const ptotal_len,
1702 uint64_t * const pobj_size,
1703 string * const pobj_sum,
1704 int (*cb)(rgw_bucket& bucket,
1705 const rgw_bucket_dir_entry& ent,
1706 RGWAccessControlPolicy * const bucket_acl,
1707 const boost::optional<Policy>& bucket_policy,
1708 off_t start_ofs,
1709 off_t end_ofs,
1710 void *param,
1711 bool swift_slo),
1712 void * const cb_param)
1713 {
1714 rgw_bucket& bucket = pbucket_info->bucket;
1715 uint64_t obj_ofs = 0, len_count = 0;
1716 bool found_start = false, found_end = false, handled_end = false;
1717 string delim;
1718 bool is_truncated;
1719 vector<rgw_bucket_dir_entry> objs;
1720
1721 utime_t start_time = ceph_clock_now();
1722
1723 RGWRados::Bucket target(store->getRados(), *pbucket_info);
1724 RGWRados::Bucket::List list_op(&target);
1725
1726 list_op.params.prefix = obj_prefix;
1727 list_op.params.delim = delim;
1728
1729 MD5 etag_sum;
1730 do {
1731 #define MAX_LIST_OBJS 100
1732 int r = list_op.list_objects(MAX_LIST_OBJS, &objs, NULL, &is_truncated, null_yield);
1733 if (r < 0) {
1734 return r;
1735 }
1736
1737 for (rgw_bucket_dir_entry& ent : objs) {
1738 const uint64_t cur_total_len = obj_ofs;
1739 const uint64_t obj_size = ent.meta.accounted_size;
1740 uint64_t start_ofs = 0, end_ofs = obj_size;
1741
1742 if ((ptotal_len || cb) && !found_start && cur_total_len + obj_size > (uint64_t)ofs) {
1743 start_ofs = ofs - obj_ofs;
1744 found_start = true;
1745 }
1746
1747 obj_ofs += obj_size;
1748 if (pobj_sum) {
1749 etag_sum.Update((const unsigned char *)ent.meta.etag.c_str(),
1750 ent.meta.etag.length());
1751 }
1752
1753 if ((ptotal_len || cb) && !found_end && obj_ofs > (uint64_t)end) {
1754 end_ofs = end - cur_total_len + 1;
1755 found_end = true;
1756 }
1757
1758 perfcounter->tinc(l_rgw_get_lat,
1759 (ceph_clock_now() - start_time));
1760
1761 if (found_start && !handled_end) {
1762 len_count += end_ofs - start_ofs;
1763
1764 if (cb) {
1765 r = cb(bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs,
1766 cb_param, false /* swift_slo */);
1767 if (r < 0) {
1768 return r;
1769 }
1770 }
1771 }
1772
1773 handled_end = found_end;
1774 start_time = ceph_clock_now();
1775 }
1776 } while (is_truncated);
1777
1778 if (ptotal_len) {
1779 *ptotal_len = len_count;
1780 }
1781 if (pobj_size) {
1782 *pobj_size = obj_ofs;
1783 }
1784 if (pobj_sum) {
1785 complete_etag(etag_sum, pobj_sum);
1786 }
1787
1788 return 0;
1789 }
1790
1791 struct rgw_slo_part {
1792 RGWAccessControlPolicy *bucket_acl = nullptr;
1793 Policy* bucket_policy = nullptr;
1794 rgw_bucket bucket;
1795 string obj_name;
1796 uint64_t size = 0;
1797 string etag;
1798 };
1799
1800 static int iterate_slo_parts(CephContext *cct,
1801 rgw::sal::RGWRadosStore *store,
1802 off_t ofs,
1803 off_t end,
1804 map<uint64_t, rgw_slo_part>& slo_parts,
1805 int (*cb)(rgw_bucket& bucket,
1806 const rgw_bucket_dir_entry& ent,
1807 RGWAccessControlPolicy *bucket_acl,
1808 const boost::optional<Policy>& bucket_policy,
1809 off_t start_ofs,
1810 off_t end_ofs,
1811 void *param,
1812 bool swift_slo),
1813 void *cb_param)
1814 {
1815 bool found_start = false, found_end = false;
1816
1817 if (slo_parts.empty()) {
1818 return 0;
1819 }
1820
1821 utime_t start_time = ceph_clock_now();
1822
1823 map<uint64_t, rgw_slo_part>::iterator iter = slo_parts.upper_bound(ofs);
1824 if (iter != slo_parts.begin()) {
1825 --iter;
1826 }
1827
1828 uint64_t obj_ofs = iter->first;
1829
1830 for (; iter != slo_parts.end() && !found_end; ++iter) {
1831 rgw_slo_part& part = iter->second;
1832 rgw_bucket_dir_entry ent;
1833
1834 ent.key.name = part.obj_name;
1835 ent.meta.accounted_size = ent.meta.size = part.size;
1836 ent.meta.etag = part.etag;
1837
1838 uint64_t cur_total_len = obj_ofs;
1839 uint64_t start_ofs = 0, end_ofs = ent.meta.size - 1;
1840
1841 if (!found_start && cur_total_len + ent.meta.size > (uint64_t)ofs) {
1842 start_ofs = ofs - obj_ofs;
1843 found_start = true;
1844 }
1845
1846 obj_ofs += ent.meta.size;
1847
1848 if (!found_end && obj_ofs > (uint64_t)end) {
1849 end_ofs = end - cur_total_len;
1850 found_end = true;
1851 }
1852
1853 perfcounter->tinc(l_rgw_get_lat,
1854 (ceph_clock_now() - start_time));
1855
1856 if (found_start) {
1857 if (cb) {
1858 dout(20) << "iterate_slo_parts()"
1859 << " obj=" << part.obj_name
1860 << " start_ofs=" << start_ofs
1861 << " end_ofs=" << end_ofs
1862 << dendl;
1863
1864 // SLO is a Swift thing, and Swift has no knowledge of S3 Policies.
1865 int r = cb(part.bucket, ent, part.bucket_acl,
1866 (part.bucket_policy ?
1867 boost::optional<Policy>(*part.bucket_policy) : none),
1868 start_ofs, end_ofs, cb_param, true /* swift_slo */);
1869 if (r < 0)
1870 return r;
1871 }
1872 }
1873
1874 start_time = ceph_clock_now();
1875 }
1876
1877 return 0;
1878 }
1879
1880 static int get_obj_user_manifest_iterate_cb(rgw_bucket& bucket,
1881 const rgw_bucket_dir_entry& ent,
1882 RGWAccessControlPolicy * const bucket_acl,
1883 const boost::optional<Policy>& bucket_policy,
1884 const off_t start_ofs,
1885 const off_t end_ofs,
1886 void * const param,
1887 bool swift_slo = false)
1888 {
1889 RGWGetObj *op = static_cast<RGWGetObj *>(param);
1890 return op->read_user_manifest_part(
1891 bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs, swift_slo);
1892 }
1893
1894 int RGWGetObj::handle_user_manifest(const char *prefix)
1895 {
1896 const boost::string_view prefix_view(prefix);
1897 ldpp_dout(this, 2) << "RGWGetObj::handle_user_manifest() prefix="
1898 << prefix_view << dendl;
1899
1900 const size_t pos = prefix_view.find('/');
1901 if (pos == string::npos) {
1902 return -EINVAL;
1903 }
1904
1905 const std::string bucket_name = url_decode(prefix_view.substr(0, pos));
1906 const std::string obj_prefix = url_decode(prefix_view.substr(pos + 1));
1907
1908 rgw_bucket bucket;
1909
1910 RGWAccessControlPolicy _bucket_acl(s->cct);
1911 RGWAccessControlPolicy *bucket_acl;
1912 boost::optional<Policy> _bucket_policy;
1913 boost::optional<Policy>* bucket_policy;
1914 RGWBucketInfo bucket_info;
1915 RGWBucketInfo *pbucket_info;
1916
1917 if (bucket_name.compare(s->bucket.name) != 0) {
1918 map<string, bufferlist> bucket_attrs;
1919 auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
1920 int r = store->getRados()->get_bucket_info(store->svc(), s->user->get_tenant(),
1921 bucket_name, bucket_info, NULL,
1922 s->yield, &bucket_attrs);
1923 if (r < 0) {
1924 ldpp_dout(this, 0) << "could not get bucket info for bucket="
1925 << bucket_name << dendl;
1926 return r;
1927 }
1928 bucket = bucket_info.bucket;
1929 pbucket_info = &bucket_info;
1930 bucket_acl = &_bucket_acl;
1931 r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl, bucket);
1932 if (r < 0) {
1933 ldpp_dout(this, 0) << "failed to read bucket policy" << dendl;
1934 return r;
1935 }
1936 _bucket_policy = get_iam_policy_from_attr(s->cct, store, bucket_attrs,
1937 bucket_info.bucket.tenant);
1938 bucket_policy = &_bucket_policy;
1939 } else {
1940 bucket = s->bucket;
1941 pbucket_info = &s->bucket_info;
1942 bucket_acl = s->bucket_acl.get();
1943 bucket_policy = &s->iam_policy;
1944 }
1945
1946 /* dry run to find out:
1947 * - total length (of the parts we are going to send to client),
1948 * - overall DLO's content size,
1949 * - md5 sum of overall DLO's content (for etag of Swift API). */
1950 int r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1951 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1952 nullptr, &s->obj_size, &lo_etag,
1953 nullptr /* cb */, nullptr /* cb arg */);
1954 if (r < 0) {
1955 return r;
1956 }
1957
1958 r = RGWRados::Object::Read::range_to_ofs(s->obj_size, ofs, end);
1959 if (r < 0) {
1960 return r;
1961 }
1962
1963 r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1964 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1965 &total_len, nullptr, nullptr,
1966 nullptr, nullptr);
1967 if (r < 0) {
1968 return r;
1969 }
1970
1971 if (!get_data) {
1972 bufferlist bl;
1973 send_response_data(bl, 0, 0);
1974 return 0;
1975 }
1976
1977 r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1978 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1979 nullptr, nullptr, nullptr,
1980 get_obj_user_manifest_iterate_cb, (void *)this);
1981 if (r < 0) {
1982 return r;
1983 }
1984
1985 if (!total_len) {
1986 bufferlist bl;
1987 send_response_data(bl, 0, 0);
1988 }
1989
1990 return 0;
1991 }
1992
1993 int RGWGetObj::handle_slo_manifest(bufferlist& bl)
1994 {
1995 RGWSLOInfo slo_info;
1996 auto bliter = bl.cbegin();
1997 try {
1998 decode(slo_info, bliter);
1999 } catch (buffer::error& err) {
2000 ldpp_dout(this, 0) << "ERROR: failed to decode slo manifest" << dendl;
2001 return -EIO;
2002 }
2003 ldpp_dout(this, 2) << "RGWGetObj::handle_slo_manifest()" << dendl;
2004
2005 vector<RGWAccessControlPolicy> allocated_acls;
2006 map<string, pair<RGWAccessControlPolicy *, boost::optional<Policy>>> policies;
2007 map<string, rgw_bucket> buckets;
2008
2009 map<uint64_t, rgw_slo_part> slo_parts;
2010
2011 MD5 etag_sum;
2012 total_len = 0;
2013
2014 for (const auto& entry : slo_info.entries) {
2015 const string& path = entry.path;
2016
2017 /* If the path starts with slashes, strip them all. */
2018 const size_t pos_init = path.find_first_not_of('/');
2019 /* According to the documentation of std::string::find following check
2020 * is not necessary as we should get the std::string::npos propagation
2021 * here. This might be true with the accuracy to implementation's bugs.
2022 * See following question on SO:
2023 * http://stackoverflow.com/questions/1011790/why-does-stdstring-findtext-stdstringnpos-not-return-npos
2024 */
2025 if (pos_init == string::npos) {
2026 return -EINVAL;
2027 }
2028
2029 const size_t pos_sep = path.find('/', pos_init);
2030 if (pos_sep == string::npos) {
2031 return -EINVAL;
2032 }
2033
2034 string bucket_name = path.substr(pos_init, pos_sep - pos_init);
2035 string obj_name = path.substr(pos_sep + 1);
2036
2037 rgw_bucket bucket;
2038 RGWAccessControlPolicy *bucket_acl;
2039 Policy* bucket_policy;
2040
2041 if (bucket_name.compare(s->bucket.name) != 0) {
2042 const auto& piter = policies.find(bucket_name);
2043 if (piter != policies.end()) {
2044 bucket_acl = piter->second.first;
2045 bucket_policy = piter->second.second.get_ptr();
2046 bucket = buckets[bucket_name];
2047 } else {
2048 allocated_acls.push_back(RGWAccessControlPolicy(s->cct));
2049 RGWAccessControlPolicy& _bucket_acl = allocated_acls.back();
2050
2051 RGWBucketInfo bucket_info;
2052 map<string, bufferlist> bucket_attrs;
2053 auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
2054 int r = store->getRados()->get_bucket_info(store->svc(), s->user->get_tenant(),
2055 bucket_name, bucket_info, nullptr,
2056 s->yield, &bucket_attrs);
2057 if (r < 0) {
2058 ldpp_dout(this, 0) << "could not get bucket info for bucket="
2059 << bucket_name << dendl;
2060 return r;
2061 }
2062 bucket = bucket_info.bucket;
2063 bucket_acl = &_bucket_acl;
2064 r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl,
2065 bucket);
2066 if (r < 0) {
2067 ldpp_dout(this, 0) << "failed to read bucket ACL for bucket "
2068 << bucket << dendl;
2069 return r;
2070 }
2071 auto _bucket_policy = get_iam_policy_from_attr(
2072 s->cct, store, bucket_attrs, bucket_info.bucket.tenant);
2073 bucket_policy = _bucket_policy.get_ptr();
2074 buckets[bucket_name] = bucket;
2075 policies[bucket_name] = make_pair(bucket_acl, _bucket_policy);
2076 }
2077 } else {
2078 bucket = s->bucket;
2079 bucket_acl = s->bucket_acl.get();
2080 bucket_policy = s->iam_policy.get_ptr();
2081 }
2082
2083 rgw_slo_part part;
2084 part.bucket_acl = bucket_acl;
2085 part.bucket_policy = bucket_policy;
2086 part.bucket = bucket;
2087 part.obj_name = obj_name;
2088 part.size = entry.size_bytes;
2089 part.etag = entry.etag;
2090 ldpp_dout(this, 20) << "slo_part: bucket=" << part.bucket
2091 << " obj=" << part.obj_name
2092 << " size=" << part.size
2093 << " etag=" << part.etag
2094 << dendl;
2095
2096 etag_sum.Update((const unsigned char *)entry.etag.c_str(),
2097 entry.etag.length());
2098
2099 slo_parts[total_len] = part;
2100 total_len += part.size;
2101 } /* foreach entry */
2102
2103 complete_etag(etag_sum, &lo_etag);
2104
2105 s->obj_size = slo_info.total_size;
2106 ldpp_dout(this, 20) << "s->obj_size=" << s->obj_size << dendl;
2107
2108 int r = RGWRados::Object::Read::range_to_ofs(total_len, ofs, end);
2109 if (r < 0) {
2110 return r;
2111 }
2112
2113 total_len = end - ofs + 1;
2114 ldpp_dout(this, 20) << "Requested: ofs=" << ofs
2115 << " end=" << end
2116 << " total=" << total_len
2117 << dendl;
2118
2119 r = iterate_slo_parts(s->cct, store, ofs, end, slo_parts,
2120 get_obj_user_manifest_iterate_cb, (void *)this);
2121 if (r < 0) {
2122 return r;
2123 }
2124
2125 return 0;
2126 }
2127
2128 int RGWGetObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
2129 {
2130 /* garbage collection related handling */
2131 utime_t start_time = ceph_clock_now();
2132 if (start_time > gc_invalidate_time) {
2133 int r = store->getRados()->defer_gc(s->obj_ctx, s->bucket_info, obj, s->yield);
2134 if (r < 0) {
2135 ldpp_dout(this, 0) << "WARNING: could not defer gc entry for obj" << dendl;
2136 }
2137 gc_invalidate_time = start_time;
2138 gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2);
2139 }
2140 return send_response_data(bl, bl_ofs, bl_len);
2141 }
2142
2143 bool RGWGetObj::prefetch_data()
2144 {
2145 /* HEAD request, stop prefetch*/
2146 if (!get_data || s->info.env->exists("HTTP_X_RGW_AUTH")) {
2147 return false;
2148 }
2149
2150 range_str = s->info.env->get("HTTP_RANGE");
2151 // TODO: add range prefetch
2152 if (range_str) {
2153 parse_range();
2154 return false;
2155 }
2156
2157 return get_data;
2158 }
2159
2160 void RGWGetObj::pre_exec()
2161 {
2162 rgw_bucket_object_pre_exec(s);
2163 }
2164
2165 static bool object_is_expired(map<string, bufferlist>& attrs) {
2166 map<string, bufferlist>::iterator iter = attrs.find(RGW_ATTR_DELETE_AT);
2167 if (iter != attrs.end()) {
2168 utime_t delete_at;
2169 try {
2170 decode(delete_at, iter->second);
2171 } catch (buffer::error& err) {
2172 dout(0) << "ERROR: " << __func__ << ": failed to decode " RGW_ATTR_DELETE_AT " attr" << dendl;
2173 return false;
2174 }
2175
2176 if (delete_at <= ceph_clock_now() && !delete_at.is_zero()) {
2177 return true;
2178 }
2179 }
2180
2181 return false;
2182 }
2183
2184 static inline void rgw_cond_decode_objtags(
2185 struct req_state *s,
2186 const std::map<std::string, buffer::list> &attrs)
2187 {
2188 const auto& tags = attrs.find(RGW_ATTR_TAGS);
2189 if (tags != attrs.end()) {
2190 try {
2191 bufferlist::const_iterator iter{&tags->second};
2192 s->tagset.decode(iter);
2193 } catch (buffer::error& err) {
2194 ldout(s->cct, 0)
2195 << "ERROR: caught buffer::error, couldn't decode TagSet" << dendl;
2196 }
2197 }
2198 }
2199
2200 void RGWGetObj::execute()
2201 {
2202 bufferlist bl;
2203 gc_invalidate_time = ceph_clock_now();
2204 gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2);
2205
2206 bool need_decompress;
2207 int64_t ofs_x, end_x;
2208
2209 RGWGetObj_CB cb(this);
2210 RGWGetObj_Filter* filter = (RGWGetObj_Filter *)&cb;
2211 boost::optional<RGWGetObj_Decompress> decompress;
2212 std::unique_ptr<RGWGetObj_Filter> decrypt;
2213 map<string, bufferlist>::iterator attr_iter;
2214
2215 perfcounter->inc(l_rgw_get);
2216
2217 RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
2218 RGWRados::Object::Read read_op(&op_target);
2219
2220 op_ret = get_params();
2221 if (op_ret < 0)
2222 goto done_err;
2223
2224 op_ret = init_common();
2225 if (op_ret < 0)
2226 goto done_err;
2227
2228 read_op.conds.mod_ptr = mod_ptr;
2229 read_op.conds.unmod_ptr = unmod_ptr;
2230 read_op.conds.high_precision_time = s->system_request; /* system request need to use high precision time */
2231 read_op.conds.mod_zone_id = mod_zone_id;
2232 read_op.conds.mod_pg_ver = mod_pg_ver;
2233 read_op.conds.if_match = if_match;
2234 read_op.conds.if_nomatch = if_nomatch;
2235 read_op.params.attrs = &attrs;
2236 read_op.params.lastmod = &lastmod;
2237 read_op.params.obj_size = &s->obj_size;
2238
2239 op_ret = read_op.prepare(s->yield);
2240 if (op_ret < 0)
2241 goto done_err;
2242 version_id = read_op.state.obj.key.instance;
2243
2244 /* STAT ops don't need data, and do no i/o */
2245 if (get_type() == RGW_OP_STAT_OBJ) {
2246 return;
2247 }
2248 if (s->info.env->exists("HTTP_X_RGW_AUTH")) {
2249 op_ret = 0;
2250 goto done_err;
2251 }
2252 /* start gettorrent */
2253 if (torrent.get_flag())
2254 {
2255 attr_iter = attrs.find(RGW_ATTR_CRYPT_MODE);
2256 if (attr_iter != attrs.end() && attr_iter->second.to_str() == "SSE-C-AES256") {
2257 ldpp_dout(this, 0) << "ERROR: torrents are not supported for objects "
2258 "encrypted with SSE-C" << dendl;
2259 op_ret = -EINVAL;
2260 goto done_err;
2261 }
2262 torrent.init(s, store);
2263 op_ret = torrent.get_torrent_file(read_op, total_len, bl, obj);
2264 if (op_ret < 0)
2265 {
2266 ldpp_dout(this, 0) << "ERROR: failed to get_torrent_file ret= " << op_ret
2267 << dendl;
2268 goto done_err;
2269 }
2270 op_ret = send_response_data(bl, 0, total_len);
2271 if (op_ret < 0)
2272 {
2273 ldpp_dout(this, 0) << "ERROR: failed to send_response_data ret= " << op_ret << dendl;
2274 goto done_err;
2275 }
2276 return;
2277 }
2278 /* end gettorrent */
2279
2280 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
2281 if (op_ret < 0) {
2282 ldpp_dout(s, 0) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
2283 goto done_err;
2284 }
2285 if (need_decompress) {
2286 s->obj_size = cs_info.orig_size;
2287 decompress.emplace(s->cct, &cs_info, partial_content, filter);
2288 filter = &*decompress;
2289 }
2290
2291 attr_iter = attrs.find(RGW_ATTR_USER_MANIFEST);
2292 if (attr_iter != attrs.end() && !skip_manifest) {
2293 op_ret = handle_user_manifest(attr_iter->second.c_str());
2294 if (op_ret < 0) {
2295 ldpp_dout(this, 0) << "ERROR: failed to handle user manifest ret="
2296 << op_ret << dendl;
2297 goto done_err;
2298 }
2299 return;
2300 }
2301
2302 attr_iter = attrs.find(RGW_ATTR_SLO_MANIFEST);
2303 if (attr_iter != attrs.end() && !skip_manifest) {
2304 is_slo = true;
2305 op_ret = handle_slo_manifest(attr_iter->second);
2306 if (op_ret < 0) {
2307 ldpp_dout(this, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret
2308 << dendl;
2309 goto done_err;
2310 }
2311 return;
2312 }
2313
2314 // for range requests with obj size 0
2315 if (range_str && !(s->obj_size)) {
2316 total_len = 0;
2317 op_ret = -ERANGE;
2318 goto done_err;
2319 }
2320
2321 op_ret = read_op.range_to_ofs(s->obj_size, ofs, end);
2322 if (op_ret < 0)
2323 goto done_err;
2324 total_len = (ofs <= end ? end + 1 - ofs : 0);
2325
2326 /* Check whether the object has expired. Swift API documentation
2327 * stands that we should return 404 Not Found in such case. */
2328 if (need_object_expiration() && object_is_expired(attrs)) {
2329 op_ret = -ENOENT;
2330 goto done_err;
2331 }
2332
2333 /* Decode S3 objtags, if any */
2334 rgw_cond_decode_objtags(s, attrs);
2335
2336 start = ofs;
2337
2338 attr_iter = attrs.find(RGW_ATTR_MANIFEST);
2339 op_ret = this->get_decrypt_filter(&decrypt, filter,
2340 attr_iter != attrs.end() ? &(attr_iter->second) : nullptr);
2341 if (decrypt != nullptr) {
2342 filter = decrypt.get();
2343 }
2344 if (op_ret < 0) {
2345 goto done_err;
2346 }
2347
2348 if (!get_data || ofs > end) {
2349 send_response_data(bl, 0, 0);
2350 return;
2351 }
2352
2353 perfcounter->inc(l_rgw_get_b, end - ofs);
2354
2355 ofs_x = ofs;
2356 end_x = end;
2357 filter->fixup_range(ofs_x, end_x);
2358 op_ret = read_op.iterate(ofs_x, end_x, filter, s->yield);
2359
2360 if (op_ret >= 0)
2361 op_ret = filter->flush();
2362
2363 perfcounter->tinc(l_rgw_get_lat, s->time_elapsed());
2364 if (op_ret < 0) {
2365 goto done_err;
2366 }
2367
2368 op_ret = send_response_data(bl, 0, 0);
2369 if (op_ret < 0) {
2370 goto done_err;
2371 }
2372 return;
2373
2374 done_err:
2375 send_response_data_error();
2376 }
2377
2378 int RGWGetObj::init_common()
2379 {
2380 if (range_str) {
2381 /* range parsed error when prefetch */
2382 if (!range_parsed) {
2383 int r = parse_range();
2384 if (r < 0)
2385 return r;
2386 }
2387 }
2388 if (if_mod) {
2389 if (parse_time(if_mod, &mod_time) < 0)
2390 return -EINVAL;
2391 mod_ptr = &mod_time;
2392 }
2393
2394 if (if_unmod) {
2395 if (parse_time(if_unmod, &unmod_time) < 0)
2396 return -EINVAL;
2397 unmod_ptr = &unmod_time;
2398 }
2399
2400 return 0;
2401 }
2402
2403 int RGWListBuckets::verify_permission()
2404 {
2405 rgw::Partition partition = rgw::Partition::aws;
2406 rgw::Service service = rgw::Service::s3;
2407
2408 if (!verify_user_permission(this, s, ARN(partition, service, "", s->user->get_tenant(), "*"), rgw::IAM::s3ListAllMyBuckets)) {
2409 return -EACCES;
2410 }
2411
2412 return 0;
2413 }
2414
2415 int RGWGetUsage::verify_permission()
2416 {
2417 if (s->auth.identity->is_anonymous()) {
2418 return -EACCES;
2419 }
2420
2421 return 0;
2422 }
2423
2424 void RGWListBuckets::execute()
2425 {
2426 bool done;
2427 bool started = false;
2428 uint64_t total_count = 0;
2429
2430 const uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk;
2431
2432 op_ret = get_params();
2433 if (op_ret < 0) {
2434 goto send_end;
2435 }
2436
2437 if (supports_account_metadata()) {
2438 op_ret = store->ctl()->user->get_attrs_by_uid(s->user->get_id(), &attrs, s->yield);
2439 if (op_ret < 0) {
2440 goto send_end;
2441 }
2442 }
2443
2444 is_truncated = false;
2445 do {
2446 rgw::sal::RGWBucketList buckets;
2447 uint64_t read_count;
2448 if (limit >= 0) {
2449 read_count = min(limit - total_count, max_buckets);
2450 } else {
2451 read_count = max_buckets;
2452 }
2453
2454 rgw::sal::RGWRadosUser user(store, s->user->get_id());
2455
2456 op_ret = user.list_buckets(marker, end_marker, read_count, should_get_stats(), buckets);
2457
2458 if (op_ret < 0) {
2459 /* hmm.. something wrong here.. the user was authenticated, so it
2460 should exist */
2461 ldpp_dout(this, 10) << "WARNING: failed on rgw_get_user_buckets uid="
2462 << s->user->get_id() << dendl;
2463 break;
2464 }
2465
2466 /* We need to have stats for all our policies - even if a given policy
2467 * isn't actually used in a given account. In such situation its usage
2468 * stats would be simply full of zeros. */
2469 for (const auto& policy : store->svc()->zone->get_zonegroup().placement_targets) {
2470 policies_stats.emplace(policy.second.name,
2471 decltype(policies_stats)::mapped_type());
2472 }
2473
2474 std::map<std::string, rgw::sal::RGWBucket*>& m = buckets.get_buckets();
2475 for (const auto& kv : m) {
2476 const auto& bucket = kv.second;
2477
2478 global_stats.bytes_used += bucket->get_size();
2479 global_stats.bytes_used_rounded += bucket->get_size_rounded();
2480 global_stats.objects_count += bucket->get_count();
2481
2482 /* operator[] still can create a new entry for storage policy seen
2483 * for first time. */
2484 auto& policy_stats = policies_stats[bucket->get_placement_rule().to_str()];
2485 policy_stats.bytes_used += bucket->get_size();
2486 policy_stats.bytes_used_rounded += bucket->get_size_rounded();
2487 policy_stats.buckets_count++;
2488 policy_stats.objects_count += bucket->get_count();
2489 }
2490 global_stats.buckets_count += m.size();
2491 total_count += m.size();
2492
2493 done = (m.size() < read_count || (limit >= 0 && total_count >= (uint64_t)limit));
2494
2495 if (!started) {
2496 send_response_begin(buckets.count() > 0);
2497 started = true;
2498 }
2499
2500 if (read_count > 0 &&
2501 !m.empty()) {
2502 map<string, rgw::sal::RGWBucket*>::reverse_iterator riter = m.rbegin();
2503 marker = riter->first;
2504
2505 handle_listing_chunk(std::move(buckets));
2506 }
2507 } while (is_truncated && !done);
2508
2509 send_end:
2510 if (!started) {
2511 send_response_begin(false);
2512 }
2513 send_response_end();
2514 }
2515
2516 void RGWGetUsage::execute()
2517 {
2518 uint64_t start_epoch = 0;
2519 uint64_t end_epoch = (uint64_t)-1;
2520 op_ret = get_params();
2521 if (op_ret < 0)
2522 return;
2523
2524 if (!start_date.empty()) {
2525 op_ret = utime_t::parse_date(start_date, &start_epoch, NULL);
2526 if (op_ret < 0) {
2527 ldpp_dout(this, 0) << "ERROR: failed to parse start date" << dendl;
2528 return;
2529 }
2530 }
2531
2532 if (!end_date.empty()) {
2533 op_ret = utime_t::parse_date(end_date, &end_epoch, NULL);
2534 if (op_ret < 0) {
2535 ldpp_dout(this, 0) << "ERROR: failed to parse end date" << dendl;
2536 return;
2537 }
2538 }
2539
2540 uint32_t max_entries = 1000;
2541
2542 bool is_truncated = true;
2543
2544 RGWUsageIter usage_iter;
2545
2546 while (is_truncated) {
2547 op_ret = store->getRados()->read_usage(s->user->get_id(), s->bucket_name, start_epoch, end_epoch, max_entries,
2548 &is_truncated, usage_iter, usage);
2549
2550 if (op_ret == -ENOENT) {
2551 op_ret = 0;
2552 is_truncated = false;
2553 }
2554
2555 if (op_ret < 0) {
2556 return;
2557 }
2558 }
2559
2560 op_ret = rgw_user_sync_all_stats(store, s->user->get_id());
2561 if (op_ret < 0) {
2562 ldpp_dout(this, 0) << "ERROR: failed to sync user stats" << dendl;
2563 return;
2564 }
2565
2566 op_ret = rgw_user_get_all_buckets_stats(store, s->user->get_id(), buckets_usage);
2567 if (op_ret < 0) {
2568 ldpp_dout(this, 0) << "ERROR: failed to get user's buckets stats" << dendl;
2569 return;
2570 }
2571
2572 op_ret = store->ctl()->user->read_stats(s->user->get_id(), &stats);
2573 if (op_ret < 0) {
2574 ldpp_dout(this, 0) << "ERROR: can't read user header" << dendl;
2575 return;
2576 }
2577
2578 return;
2579 }
2580
2581 int RGWStatAccount::verify_permission()
2582 {
2583 if (!verify_user_permission_no_policy(this, s, RGW_PERM_READ)) {
2584 return -EACCES;
2585 }
2586
2587 return 0;
2588 }
2589
2590 void RGWStatAccount::execute()
2591 {
2592 string marker;
2593 rgw::sal::RGWBucketList buckets;
2594 uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk;
2595 const string *lastmarker;
2596
2597 do {
2598
2599 lastmarker = nullptr;
2600 op_ret = rgw_read_user_buckets(store, s->user->get_id(), buckets, marker,
2601 string(), max_buckets, true);
2602 if (op_ret < 0) {
2603 /* hmm.. something wrong here.. the user was authenticated, so it
2604 should exist */
2605 ldpp_dout(this, 10) << "WARNING: failed on rgw_read_user_buckets uid="
2606 << s->user->get_id() << " ret=" << op_ret << dendl;
2607 break;
2608 } else {
2609 /* We need to have stats for all our policies - even if a given policy
2610 * isn't actually used in a given account. In such situation its usage
2611 * stats would be simply full of zeros. */
2612 for (const auto& policy : store->svc()->zone->get_zonegroup().placement_targets) {
2613 policies_stats.emplace(policy.second.name,
2614 decltype(policies_stats)::mapped_type());
2615 }
2616
2617 std::map<std::string, rgw::sal::RGWBucket*>& m = buckets.get_buckets();
2618 for (const auto& kv : m) {
2619 const auto& bucket = kv.second;
2620 lastmarker = &kv.first;
2621
2622 global_stats.bytes_used += bucket->get_size();
2623 global_stats.bytes_used_rounded += bucket->get_size_rounded();
2624 global_stats.objects_count += bucket->get_count();
2625
2626 /* operator[] still can create a new entry for storage policy seen
2627 * for first time. */
2628 auto& policy_stats = policies_stats[bucket->get_placement_rule().to_str()];
2629 policy_stats.bytes_used += bucket->get_size();
2630 policy_stats.bytes_used_rounded += bucket->get_size_rounded();
2631 policy_stats.buckets_count++;
2632 policy_stats.objects_count += bucket->get_count();
2633 }
2634 global_stats.buckets_count += m.size();
2635
2636 }
2637 if (!lastmarker) {
2638 lderr(s->cct) << "ERROR: rgw_read_user_buckets, stasis at marker="
2639 << marker << " uid=" << s->user->get_id() << dendl;
2640 break;
2641 }
2642 marker = *lastmarker;
2643 } while (buckets.is_truncated());
2644 }
2645
2646 int RGWGetBucketVersioning::verify_permission()
2647 {
2648 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketVersioning);
2649 }
2650
2651 void RGWGetBucketVersioning::pre_exec()
2652 {
2653 rgw_bucket_object_pre_exec(s);
2654 }
2655
2656 void RGWGetBucketVersioning::execute()
2657 {
2658 versioned = s->bucket_info.versioned();
2659 versioning_enabled = s->bucket_info.versioning_enabled();
2660 mfa_enabled = s->bucket_info.mfa_enabled();
2661 }
2662
2663 int RGWSetBucketVersioning::verify_permission()
2664 {
2665 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketVersioning);
2666 }
2667
2668 void RGWSetBucketVersioning::pre_exec()
2669 {
2670 rgw_bucket_object_pre_exec(s);
2671 }
2672
2673 void RGWSetBucketVersioning::execute()
2674 {
2675 op_ret = get_params();
2676 if (op_ret < 0)
2677 return;
2678
2679 if (s->bucket_info.obj_lock_enabled() && versioning_status != VersioningEnabled) {
2680 op_ret = -ERR_INVALID_BUCKET_STATE;
2681 return;
2682 }
2683
2684 bool cur_mfa_status = (s->bucket_info.flags & BUCKET_MFA_ENABLED) != 0;
2685
2686 mfa_set_status &= (mfa_status != cur_mfa_status);
2687
2688 if (mfa_set_status &&
2689 !s->mfa_verified) {
2690 op_ret = -ERR_MFA_REQUIRED;
2691 return;
2692 }
2693 //if mfa is enabled for bucket, make sure mfa code is validated in case versioned status gets changed
2694 if (cur_mfa_status) {
2695 bool req_versioning_status = false;
2696 //if requested versioning status is not the same as the one set for the bucket, return error
2697 if (versioning_status == VersioningEnabled) {
2698 req_versioning_status = (s->bucket_info.flags & BUCKET_VERSIONS_SUSPENDED) != 0;
2699 } else if (versioning_status == VersioningSuspended) {
2700 req_versioning_status = (s->bucket_info.flags & BUCKET_VERSIONS_SUSPENDED) == 0;
2701 }
2702 if (req_versioning_status && !s->mfa_verified) {
2703 op_ret = -ERR_MFA_REQUIRED;
2704 return;
2705 }
2706 }
2707
2708 if (!store->svc()->zone->is_meta_master()) {
2709 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
2710 if (op_ret < 0) {
2711 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
2712 return;
2713 }
2714 }
2715
2716 bool modified = mfa_set_status;
2717
2718 op_ret = retry_raced_bucket_write(store->getRados(), s, [&] {
2719 if (mfa_set_status) {
2720 if (mfa_status) {
2721 s->bucket_info.flags |= BUCKET_MFA_ENABLED;
2722 } else {
2723 s->bucket_info.flags &= ~BUCKET_MFA_ENABLED;
2724 }
2725 }
2726
2727 if (versioning_status == VersioningEnabled) {
2728 s->bucket_info.flags |= BUCKET_VERSIONED;
2729 s->bucket_info.flags &= ~BUCKET_VERSIONS_SUSPENDED;
2730 modified = true;
2731 } else if (versioning_status == VersioningSuspended) {
2732 s->bucket_info.flags |= (BUCKET_VERSIONED | BUCKET_VERSIONS_SUSPENDED);
2733 modified = true;
2734 } else {
2735 return op_ret;
2736 }
2737 return store->getRados()->put_bucket_instance_info(s->bucket_info, false, real_time(),
2738 &s->bucket_attrs);
2739 });
2740
2741 if (!modified) {
2742 return;
2743 }
2744
2745 if (op_ret < 0) {
2746 ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
2747 << " returned err=" << op_ret << dendl;
2748 return;
2749 }
2750 }
2751
2752 int RGWGetBucketWebsite::verify_permission()
2753 {
2754 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketWebsite);
2755 }
2756
2757 void RGWGetBucketWebsite::pre_exec()
2758 {
2759 rgw_bucket_object_pre_exec(s);
2760 }
2761
2762 void RGWGetBucketWebsite::execute()
2763 {
2764 if (!s->bucket_info.has_website) {
2765 op_ret = -ERR_NO_SUCH_WEBSITE_CONFIGURATION;
2766 }
2767 }
2768
2769 int RGWSetBucketWebsite::verify_permission()
2770 {
2771 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketWebsite);
2772 }
2773
2774 void RGWSetBucketWebsite::pre_exec()
2775 {
2776 rgw_bucket_object_pre_exec(s);
2777 }
2778
2779 void RGWSetBucketWebsite::execute()
2780 {
2781 op_ret = get_params();
2782
2783 if (op_ret < 0)
2784 return;
2785
2786 if (!store->svc()->zone->is_meta_master()) {
2787 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
2788 if (op_ret < 0) {
2789 ldpp_dout(this, 0) << " forward_request_to_master returned ret=" << op_ret << dendl;
2790 return;
2791 }
2792 }
2793
2794 op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
2795 s->bucket_info.has_website = true;
2796 s->bucket_info.website_conf = website_conf;
2797 op_ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false,
2798 real_time(), &s->bucket_attrs);
2799 return op_ret;
2800 });
2801
2802 if (op_ret < 0) {
2803 ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
2804 << " returned err=" << op_ret << dendl;
2805 return;
2806 }
2807 }
2808
2809 int RGWDeleteBucketWebsite::verify_permission()
2810 {
2811 return verify_bucket_owner_or_policy(s, rgw::IAM::s3DeleteBucketWebsite);
2812 }
2813
2814 void RGWDeleteBucketWebsite::pre_exec()
2815 {
2816 rgw_bucket_object_pre_exec(s);
2817 }
2818
2819 void RGWDeleteBucketWebsite::execute()
2820 {
2821
2822 if (!store->svc()->zone->is_meta_master()) {
2823 bufferlist in_data;
2824 op_ret = forward_request_to_master(s, nullptr, store, in_data, nullptr);
2825 if (op_ret < 0) {
2826 ldpp_dout(this, 0) << "NOTICE: forward_to_master failed on bucket=" << s->bucket.name
2827 << "returned err=" << op_ret << dendl;
2828 return;
2829 }
2830 }
2831 op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
2832 s->bucket_info.has_website = false;
2833 s->bucket_info.website_conf = RGWBucketWebsiteConf();
2834 op_ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false,
2835 real_time(), &s->bucket_attrs);
2836 return op_ret;
2837 });
2838 if (op_ret < 0) {
2839 ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
2840 << " returned err=" << op_ret << dendl;
2841 return;
2842 }
2843 }
2844
2845 int RGWStatBucket::verify_permission()
2846 {
2847 // This (a HEAD request on a bucket) is governed by the s3:ListBucket permission.
2848 if (!verify_bucket_permission(this, s, rgw::IAM::s3ListBucket)) {
2849 return -EACCES;
2850 }
2851
2852 return 0;
2853 }
2854
2855 void RGWStatBucket::pre_exec()
2856 {
2857 rgw_bucket_object_pre_exec(s);
2858 }
2859
2860 void RGWStatBucket::execute()
2861 {
2862 if (!s->bucket_exists) {
2863 op_ret = -ERR_NO_SUCH_BUCKET;
2864 return;
2865 }
2866
2867 rgw::sal::RGWRadosUser user(store, s->user->get_id());
2868 bucket = new rgw::sal::RGWRadosBucket(store, user, s->bucket);
2869 op_ret = bucket->update_container_stats();
2870 }
2871
2872 int RGWListBucket::verify_permission()
2873 {
2874 op_ret = get_params();
2875 if (op_ret < 0) {
2876 return op_ret;
2877 }
2878 if (!prefix.empty())
2879 s->env.emplace("s3:prefix", prefix);
2880
2881 if (!delimiter.empty())
2882 s->env.emplace("s3:delimiter", delimiter);
2883
2884 s->env.emplace("s3:max-keys", std::to_string(max));
2885
2886 if (!verify_bucket_permission(this,
2887 s,
2888 list_versions ?
2889 rgw::IAM::s3ListBucketVersions :
2890 rgw::IAM::s3ListBucket)) {
2891 return -EACCES;
2892 }
2893
2894 return 0;
2895 }
2896
2897 int RGWListBucket::parse_max_keys()
2898 {
2899 // Bound max value of max-keys to configured value for security
2900 // Bound min value of max-keys to '0'
2901 // Some S3 clients explicitly send max-keys=0 to detect if the bucket is
2902 // empty without listing any items.
2903 return parse_value_and_bound(max_keys, max, 0,
2904 g_conf().get_val<uint64_t>("rgw_max_listing_results"),
2905 default_max);
2906 }
2907
2908 void RGWListBucket::pre_exec()
2909 {
2910 rgw_bucket_object_pre_exec(s);
2911 }
2912
2913 void RGWListBucket::execute()
2914 {
2915 if (!s->bucket_exists) {
2916 op_ret = -ERR_NO_SUCH_BUCKET;
2917 return;
2918 }
2919
2920 if (allow_unordered && !delimiter.empty()) {
2921 ldpp_dout(this, 0) <<
2922 "ERROR: unordered bucket listing requested with a delimiter" << dendl;
2923 op_ret = -EINVAL;
2924 return;
2925 }
2926
2927 if (need_container_stats()) {
2928 op_ret = bucket->update_container_stats();
2929 }
2930
2931 RGWRados::Bucket target(store->getRados(), s->bucket_info);
2932 if (shard_id >= 0) {
2933 target.set_shard_id(shard_id);
2934 }
2935 RGWRados::Bucket::List list_op(&target);
2936
2937 list_op.params.prefix = prefix;
2938 list_op.params.delim = delimiter;
2939 list_op.params.marker = marker;
2940 list_op.params.end_marker = end_marker;
2941 list_op.params.list_versions = list_versions;
2942 list_op.params.allow_unordered = allow_unordered;
2943
2944 op_ret = list_op.list_objects(max, &objs, &common_prefixes, &is_truncated, s->yield);
2945 if (op_ret >= 0) {
2946 next_marker = list_op.get_next_marker();
2947 }
2948 }
2949
2950 int RGWGetBucketLogging::verify_permission()
2951 {
2952 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketLogging);
2953 }
2954
2955 int RGWGetBucketLocation::verify_permission()
2956 {
2957 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketLocation);
2958 }
2959
2960 int RGWCreateBucket::verify_permission()
2961 {
2962 /* This check is mostly needed for S3 that doesn't support account ACL.
2963 * Swift doesn't allow to delegate any permission to an anonymous user,
2964 * so it will become an early exit in such case. */
2965 if (s->auth.identity->is_anonymous()) {
2966 return -EACCES;
2967 }
2968
2969 rgw_bucket bucket;
2970 bucket.name = s->bucket_name;
2971 bucket.tenant = s->bucket_tenant;
2972 ARN arn = ARN(bucket);
2973 if (!verify_user_permission(this, s, arn, rgw::IAM::s3CreateBucket)) {
2974 return -EACCES;
2975 }
2976
2977 if (s->user->get_tenant() != s->bucket_tenant) {
2978 ldpp_dout(this, 10) << "user cannot create a bucket in a different tenant"
2979 << " (user_id.tenant=" << s->user->get_tenant()
2980 << " requested=" << s->bucket_tenant << ")"
2981 << dendl;
2982 return -EACCES;
2983 }
2984 if (s->user->get_max_buckets() < 0) {
2985 return -EPERM;
2986 }
2987
2988 if (s->user->get_max_buckets()) {
2989 rgw::sal::RGWBucketList buckets;
2990 string marker;
2991 op_ret = rgw_read_user_buckets(store, s->user->get_id(), buckets,
2992 marker, string(), s->user->get_max_buckets(),
2993 false);
2994 if (op_ret < 0) {
2995 return op_ret;
2996 }
2997
2998 if ((int)buckets.count() >= s->user->get_max_buckets()) {
2999 return -ERR_TOO_MANY_BUCKETS;
3000 }
3001 }
3002
3003 return 0;
3004 }
3005
3006 int forward_request_to_master(struct req_state *s, obj_version *objv,
3007 rgw::sal::RGWRadosStore *store, bufferlist& in_data,
3008 JSONParser *jp, req_info *forward_info)
3009 {
3010 if (!store->svc()->zone->get_master_conn()) {
3011 ldpp_dout(s, 0) << "rest connection is invalid" << dendl;
3012 return -EINVAL;
3013 }
3014 ldpp_dout(s, 0) << "sending request to master zonegroup" << dendl;
3015 bufferlist response;
3016 string uid_str = s->user->get_id().to_str();
3017 #define MAX_REST_RESPONSE (128 * 1024) // we expect a very small response
3018 int ret = store->svc()->zone->get_master_conn()->forward(rgw_user(uid_str), (forward_info ? *forward_info : s->info),
3019 objv, MAX_REST_RESPONSE, &in_data, &response);
3020 if (ret < 0)
3021 return ret;
3022
3023 ldpp_dout(s, 20) << "response: " << response.c_str() << dendl;
3024 if (jp && !jp->parse(response.c_str(), response.length())) {
3025 ldpp_dout(s, 0) << "failed parsing response from master zonegroup" << dendl;
3026 return -EINVAL;
3027 }
3028
3029 return 0;
3030 }
3031
3032 void RGWCreateBucket::pre_exec()
3033 {
3034 rgw_bucket_object_pre_exec(s);
3035 }
3036
3037 static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs,
3038 map<string, bufferlist>& out_attrs,
3039 map<string, bufferlist>& out_rmattrs)
3040 {
3041 for (const auto& kv : orig_attrs) {
3042 const string& name = kv.first;
3043
3044 /* Check if the attr is user-defined metadata item. */
3045 if (name.compare(0, sizeof(RGW_ATTR_META_PREFIX) - 1,
3046 RGW_ATTR_META_PREFIX) == 0) {
3047 /* For the objects all existing meta attrs have to be removed. */
3048 out_rmattrs[name] = kv.second;
3049 } else if (out_attrs.find(name) == std::end(out_attrs)) {
3050 out_attrs[name] = kv.second;
3051 }
3052 }
3053 }
3054
3055 /* Fuse resource metadata basing on original attributes in @orig_attrs, set
3056 * of _custom_ attribute names to remove in @rmattr_names and attributes in
3057 * @out_attrs. Place results in @out_attrs.
3058 *
3059 * NOTE: it's supposed that all special attrs already present in @out_attrs
3060 * will be preserved without any change. Special attributes are those which
3061 * names start with RGW_ATTR_META_PREFIX. They're complement to custom ones
3062 * used for X-Account-Meta-*, X-Container-Meta-*, X-Amz-Meta and so on. */
3063 static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs,
3064 const set<string>& rmattr_names,
3065 map<string, bufferlist>& out_attrs)
3066 {
3067 for (const auto& kv : orig_attrs) {
3068 const string& name = kv.first;
3069
3070 /* Check if the attr is user-defined metadata item. */
3071 if (name.compare(0, strlen(RGW_ATTR_META_PREFIX),
3072 RGW_ATTR_META_PREFIX) == 0) {
3073 /* For the buckets all existing meta attrs are preserved,
3074 except those that are listed in rmattr_names. */
3075 if (rmattr_names.find(name) != std::end(rmattr_names)) {
3076 const auto aiter = out_attrs.find(name);
3077
3078 if (aiter != std::end(out_attrs)) {
3079 out_attrs.erase(aiter);
3080 }
3081 } else {
3082 /* emplace() won't alter the map if the key is already present.
3083 * This behaviour is fully intensional here. */
3084 out_attrs.emplace(kv);
3085 }
3086 } else if (out_attrs.find(name) == std::end(out_attrs)) {
3087 out_attrs[name] = kv.second;
3088 }
3089 }
3090 }
3091
3092
3093 static void populate_with_generic_attrs(const req_state * const s,
3094 map<string, bufferlist>& out_attrs)
3095 {
3096 for (const auto& kv : s->generic_attrs) {
3097 bufferlist& attrbl = out_attrs[kv.first];
3098 const string& val = kv.second;
3099 attrbl.clear();
3100 attrbl.append(val.c_str(), val.size() + 1);
3101 }
3102 }
3103
3104
3105 static int filter_out_quota_info(std::map<std::string, bufferlist>& add_attrs,
3106 const std::set<std::string>& rmattr_names,
3107 RGWQuotaInfo& quota,
3108 bool * quota_extracted = nullptr)
3109 {
3110 bool extracted = false;
3111
3112 /* Put new limit on max objects. */
3113 auto iter = add_attrs.find(RGW_ATTR_QUOTA_NOBJS);
3114 std::string err;
3115 if (std::end(add_attrs) != iter) {
3116 quota.max_objects =
3117 static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err));
3118 if (!err.empty()) {
3119 return -EINVAL;
3120 }
3121 add_attrs.erase(iter);
3122 extracted = true;
3123 }
3124
3125 /* Put new limit on bucket (container) size. */
3126 iter = add_attrs.find(RGW_ATTR_QUOTA_MSIZE);
3127 if (iter != add_attrs.end()) {
3128 quota.max_size =
3129 static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err));
3130 if (!err.empty()) {
3131 return -EINVAL;
3132 }
3133 add_attrs.erase(iter);
3134 extracted = true;
3135 }
3136
3137 for (const auto& name : rmattr_names) {
3138 /* Remove limit on max objects. */
3139 if (name.compare(RGW_ATTR_QUOTA_NOBJS) == 0) {
3140 quota.max_objects = -1;
3141 extracted = true;
3142 }
3143
3144 /* Remove limit on max bucket size. */
3145 if (name.compare(RGW_ATTR_QUOTA_MSIZE) == 0) {
3146 quota.max_size = -1;
3147 extracted = true;
3148 }
3149 }
3150
3151 /* Swift requries checking on raw usage instead of the 4 KiB rounded one. */
3152 quota.check_on_raw = true;
3153 quota.enabled = quota.max_size > 0 || quota.max_objects > 0;
3154
3155 if (quota_extracted) {
3156 *quota_extracted = extracted;
3157 }
3158
3159 return 0;
3160 }
3161
3162
3163 static void filter_out_website(std::map<std::string, ceph::bufferlist>& add_attrs,
3164 const std::set<std::string>& rmattr_names,
3165 RGWBucketWebsiteConf& ws_conf)
3166 {
3167 std::string lstval;
3168
3169 /* Let's define a mapping between each custom attribute and the memory where
3170 * attribute's value should be stored. The memory location is expressed by
3171 * a non-const reference. */
3172 const auto mapping = {
3173 std::make_pair(RGW_ATTR_WEB_INDEX, std::ref(ws_conf.index_doc_suffix)),
3174 std::make_pair(RGW_ATTR_WEB_ERROR, std::ref(ws_conf.error_doc)),
3175 std::make_pair(RGW_ATTR_WEB_LISTINGS, std::ref(lstval)),
3176 std::make_pair(RGW_ATTR_WEB_LIST_CSS, std::ref(ws_conf.listing_css_doc)),
3177 std::make_pair(RGW_ATTR_SUBDIR_MARKER, std::ref(ws_conf.subdir_marker))
3178 };
3179
3180 for (const auto& kv : mapping) {
3181 const char * const key = kv.first;
3182 auto& target = kv.second;
3183
3184 auto iter = add_attrs.find(key);
3185
3186 if (std::end(add_attrs) != iter) {
3187 /* The "target" is a reference to ws_conf. */
3188 target = iter->second.c_str();
3189 add_attrs.erase(iter);
3190 }
3191
3192 if (rmattr_names.count(key)) {
3193 target = std::string();
3194 }
3195 }
3196
3197 if (! lstval.empty()) {
3198 ws_conf.listing_enabled = boost::algorithm::iequals(lstval, "true");
3199 }
3200 }
3201
3202
3203 void RGWCreateBucket::execute()
3204 {
3205 RGWAccessControlPolicy old_policy(s->cct);
3206 buffer::list aclbl;
3207 buffer::list corsbl;
3208 bool existed;
3209 string bucket_name = rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name);
3210 rgw_raw_obj obj(store->svc()->zone->get_zone_params().domain_root, bucket_name);
3211 obj_version objv, *pobjv = NULL;
3212
3213 op_ret = get_params();
3214 if (op_ret < 0)
3215 return;
3216
3217 if (!relaxed_region_enforcement &&
3218 !location_constraint.empty() &&
3219 !store->svc()->zone->has_zonegroup_api(location_constraint)) {
3220 ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")"
3221 << " can't be found." << dendl;
3222 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
3223 s->err.message = "The specified location-constraint is not valid";
3224 return;
3225 }
3226
3227 if (!relaxed_region_enforcement && !store->svc()->zone->get_zonegroup().is_master_zonegroup() && !location_constraint.empty() &&
3228 store->svc()->zone->get_zonegroup().api_name != location_constraint) {
3229 ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")"
3230 << " doesn't match zonegroup" << " (" << store->svc()->zone->get_zonegroup().api_name << ")"
3231 << dendl;
3232 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
3233 s->err.message = "The specified location-constraint is not valid";
3234 return;
3235 }
3236
3237 const auto& zonegroup = store->svc()->zone->get_zonegroup();
3238 if (!placement_rule.name.empty() &&
3239 !zonegroup.placement_targets.count(placement_rule.name)) {
3240 ldpp_dout(this, 0) << "placement target (" << placement_rule.name << ")"
3241 << " doesn't exist in the placement targets of zonegroup"
3242 << " (" << store->svc()->zone->get_zonegroup().api_name << ")" << dendl;
3243 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
3244 s->err.message = "The specified placement target does not exist";
3245 return;
3246 }
3247
3248 /* we need to make sure we read bucket info, it's not read before for this
3249 * specific request */
3250 s->bucket.tenant = s->bucket_tenant;
3251 s->bucket.name = s->bucket_name;
3252 rgw::sal::RGWBucket* bucket = NULL;
3253 op_ret = store->get_bucket(*s->user, s->bucket, &bucket);
3254 if (op_ret < 0 && op_ret != -ENOENT)
3255 return;
3256 s->bucket_exists = (op_ret != -ENOENT);
3257
3258 s->bucket_owner.set_id(s->user->get_id());
3259 s->bucket_owner.set_name(s->user->get_display_name());
3260 if (s->bucket_exists) {
3261 s->bucket_info = bucket->get_info();
3262 s->bucket_attrs = bucket->get_attrs();
3263 delete bucket;
3264 int r = rgw_op_get_bucket_policy_from_attr(s->cct, store, s->bucket_info,
3265 s->bucket_attrs, &old_policy);
3266 if (r >= 0) {
3267 if (old_policy.get_owner().get_id().compare(s->user->get_id()) != 0) {
3268 op_ret = -EEXIST;
3269 return;
3270 }
3271 }
3272 }
3273
3274 RGWBucketInfo master_info;
3275 rgw_bucket *pmaster_bucket;
3276 uint32_t *pmaster_num_shards;
3277 real_time creation_time;
3278
3279 if (!store->svc()->zone->is_meta_master()) {
3280 JSONParser jp;
3281 op_ret = forward_request_to_master(s, NULL, store, in_data, &jp);
3282 if (op_ret < 0) {
3283 return;
3284 }
3285
3286 JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp);
3287 JSONDecoder::decode_json("object_ver", objv, &jp);
3288 JSONDecoder::decode_json("bucket_info", master_info, &jp);
3289 ldpp_dout(this, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" << objv.ver << dendl;
3290 ldpp_dout(this, 20) << "got creation time: << " << master_info.creation_time << dendl;
3291 pmaster_bucket= &master_info.bucket;
3292 creation_time = master_info.creation_time;
3293 pmaster_num_shards = &master_info.num_shards;
3294 pobjv = &objv;
3295 obj_lock_enabled = master_info.obj_lock_enabled();
3296 } else {
3297 pmaster_bucket = NULL;
3298 pmaster_num_shards = NULL;
3299 }
3300
3301 string zonegroup_id;
3302
3303 if (s->system_request) {
3304 zonegroup_id = s->info.args.get(RGW_SYS_PARAM_PREFIX "zonegroup");
3305 if (zonegroup_id.empty()) {
3306 zonegroup_id = store->svc()->zone->get_zonegroup().get_id();
3307 }
3308 } else {
3309 zonegroup_id = store->svc()->zone->get_zonegroup().get_id();
3310 }
3311
3312 if (s->bucket_exists) {
3313 rgw_placement_rule selected_placement_rule;
3314 rgw_bucket bucket;
3315 bucket.tenant = s->bucket_tenant;
3316 bucket.name = s->bucket_name;
3317 op_ret = store->svc()->zone->select_bucket_placement(s->user->get_info(),
3318 zonegroup_id,
3319 placement_rule,
3320 &selected_placement_rule, nullptr);
3321 if (selected_placement_rule != s->bucket_info.placement_rule) {
3322 op_ret = -EEXIST;
3323 return;
3324 }
3325 }
3326
3327 /* Encode special metadata first as we're using std::map::emplace under
3328 * the hood. This method will add the new items only if the map doesn't
3329 * contain such keys yet. */
3330 policy.encode(aclbl);
3331 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
3332
3333 if (has_cors) {
3334 cors_config.encode(corsbl);
3335 emplace_attr(RGW_ATTR_CORS, std::move(corsbl));
3336 }
3337
3338 RGWQuotaInfo quota_info;
3339 const RGWQuotaInfo * pquota_info = nullptr;
3340 if (need_metadata_upload()) {
3341 /* It's supposed that following functions WILL NOT change any special
3342 * attributes (like RGW_ATTR_ACL) if they are already present in attrs. */
3343 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
3344 if (op_ret < 0) {
3345 return;
3346 }
3347 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
3348 populate_with_generic_attrs(s, attrs);
3349
3350 op_ret = filter_out_quota_info(attrs, rmattr_names, quota_info);
3351 if (op_ret < 0) {
3352 return;
3353 } else {
3354 pquota_info = &quota_info;
3355 }
3356
3357 /* Web site of Swift API. */
3358 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
3359 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
3360 }
3361
3362 s->bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */
3363 s->bucket.name = s->bucket_name;
3364
3365 /* Handle updates of the metadata for Swift's object versioning. */
3366 if (swift_ver_location) {
3367 s->bucket_info.swift_ver_location = *swift_ver_location;
3368 s->bucket_info.swift_versioning = (! swift_ver_location->empty());
3369 }
3370 if (obj_lock_enabled) {
3371 info.flags = BUCKET_VERSIONED | BUCKET_OBJ_LOCK_ENABLED;
3372 }
3373
3374
3375 op_ret = store->getRados()->create_bucket(s->user->get_info(), s->bucket, zonegroup_id,
3376 placement_rule, s->bucket_info.swift_ver_location,
3377 pquota_info, attrs,
3378 info, pobjv, &ep_objv, creation_time,
3379 pmaster_bucket, pmaster_num_shards, true);
3380 /* continue if EEXIST and create_bucket will fail below. this way we can
3381 * recover from a partial create by retrying it. */
3382 ldpp_dout(this, 20) << "rgw_create_bucket returned ret=" << op_ret << " bucket=" << s->bucket << dendl;
3383
3384 if (op_ret && op_ret != -EEXIST)
3385 return;
3386
3387 existed = (op_ret == -EEXIST);
3388
3389 if (existed) {
3390 /* bucket already existed, might have raced with another bucket creation, or
3391 * might be partial bucket creation that never completed. Read existing bucket
3392 * info, verify that the reported bucket owner is the current user.
3393 * If all is ok then update the user's list of buckets.
3394 * Otherwise inform client about a name conflict.
3395 */
3396 if (info.owner.compare(s->user->get_id()) != 0) {
3397 op_ret = -EEXIST;
3398 return;
3399 }
3400 s->bucket = info.bucket;
3401 }
3402
3403 op_ret = store->ctl()->bucket->link_bucket(s->user->get_id(), s->bucket,
3404 info.creation_time, s->yield, false);
3405 if (op_ret && !existed && op_ret != -EEXIST) {
3406 /* if it exists (or previously existed), don't remove it! */
3407 op_ret = store->ctl()->bucket->unlink_bucket(s->user->get_id(), s->bucket, s->yield);
3408 if (op_ret < 0) {
3409 ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
3410 << dendl;
3411 }
3412 } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) {
3413 op_ret = -ERR_BUCKET_EXISTS;
3414 }
3415
3416 if (need_metadata_upload() && existed) {
3417 /* OK, it looks we lost race with another request. As it's required to
3418 * handle metadata fusion and upload, the whole operation becomes very
3419 * similar in nature to PutMetadataBucket. However, as the attrs may
3420 * changed in the meantime, we have to refresh. */
3421 short tries = 0;
3422 do {
3423 RGWBucketInfo binfo;
3424 map<string, bufferlist> battrs;
3425
3426 op_ret = store->getRados()->get_bucket_info(store->svc(), s->bucket_tenant, s->bucket_name,
3427 binfo, nullptr, s->yield, &battrs);
3428 if (op_ret < 0) {
3429 return;
3430 } else if (binfo.owner.compare(s->user->get_id()) != 0) {
3431 /* New bucket doesn't belong to the account we're operating on. */
3432 op_ret = -EEXIST;
3433 return;
3434 } else {
3435 s->bucket_info = binfo;
3436 s->bucket_attrs = battrs;
3437 }
3438
3439 attrs.clear();
3440
3441 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
3442 if (op_ret < 0) {
3443 return;
3444 }
3445 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
3446 populate_with_generic_attrs(s, attrs);
3447 op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket_info.quota);
3448 if (op_ret < 0) {
3449 return;
3450 }
3451
3452 /* Handle updates of the metadata for Swift's object versioning. */
3453 if (swift_ver_location) {
3454 s->bucket_info.swift_ver_location = *swift_ver_location;
3455 s->bucket_info.swift_versioning = (! swift_ver_location->empty());
3456 }
3457
3458 /* Web site of Swift API. */
3459 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
3460 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
3461
3462 /* This will also set the quota on the bucket. */
3463 op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
3464 &s->bucket_info.objv_tracker,
3465 s->yield);
3466 } while (op_ret == -ECANCELED && tries++ < 20);
3467
3468 /* Restore the proper return code. */
3469 if (op_ret >= 0) {
3470 op_ret = -ERR_BUCKET_EXISTS;
3471 }
3472 }
3473 }
3474
3475 int RGWDeleteBucket::verify_permission()
3476 {
3477 if (!verify_bucket_permission(this, s, rgw::IAM::s3DeleteBucket)) {
3478 return -EACCES;
3479 }
3480
3481 return 0;
3482 }
3483
3484 void RGWDeleteBucket::pre_exec()
3485 {
3486 rgw_bucket_object_pre_exec(s);
3487 }
3488
3489 void RGWDeleteBucket::execute()
3490 {
3491 if (s->bucket_name.empty()) {
3492 op_ret = -EINVAL;
3493 return;
3494 }
3495
3496 if (!s->bucket_exists) {
3497 ldpp_dout(this, 0) << "ERROR: bucket " << s->bucket_name << " not found" << dendl;
3498 op_ret = -ERR_NO_SUCH_BUCKET;
3499 return;
3500 }
3501 RGWObjVersionTracker ot;
3502 ot.read_version = s->bucket_ep_objv;
3503
3504 if (s->system_request) {
3505 string tag = s->info.args.get(RGW_SYS_PARAM_PREFIX "tag");
3506 string ver_str = s->info.args.get(RGW_SYS_PARAM_PREFIX "ver");
3507 if (!tag.empty()) {
3508 ot.read_version.tag = tag;
3509 uint64_t ver;
3510 string err;
3511 ver = strict_strtol(ver_str.c_str(), 10, &err);
3512 if (!err.empty()) {
3513 ldpp_dout(this, 0) << "failed to parse ver param" << dendl;
3514 op_ret = -EINVAL;
3515 return;
3516 }
3517 ot.read_version.ver = ver;
3518 }
3519 }
3520
3521 op_ret = store->ctl()->bucket->sync_user_stats(s->user->get_id(), s->bucket_info);
3522 if ( op_ret < 0) {
3523 ldpp_dout(this, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl;
3524 }
3525
3526 op_ret = store->getRados()->check_bucket_empty(s->bucket_info, s->yield);
3527 if (op_ret < 0) {
3528 return;
3529 }
3530
3531 if (!store->svc()->zone->is_meta_master()) {
3532 bufferlist in_data;
3533 op_ret = forward_request_to_master(s, &ot.read_version, store, in_data,
3534 NULL);
3535 if (op_ret < 0) {
3536 if (op_ret == -ENOENT) {
3537 /* adjust error, we want to return with NoSuchBucket and not
3538 * NoSuchKey */
3539 op_ret = -ERR_NO_SUCH_BUCKET;
3540 }
3541 return;
3542 }
3543 }
3544
3545 string prefix, delimiter;
3546
3547 if (s->prot_flags & RGW_REST_SWIFT) {
3548 string path_args;
3549 path_args = s->info.args.get("path");
3550 if (!path_args.empty()) {
3551 if (!delimiter.empty() || !prefix.empty()) {
3552 op_ret = -EINVAL;
3553 return;
3554 }
3555 prefix = path_args;
3556 delimiter="/";
3557 }
3558 }
3559
3560 op_ret = abort_bucket_multiparts(store, s->cct, s->bucket_info, prefix, delimiter);
3561
3562 if (op_ret < 0) {
3563 return;
3564 }
3565
3566 op_ret = store->getRados()->delete_bucket(s->bucket_info, ot, s->yield, false);
3567
3568 if (op_ret == -ECANCELED) {
3569 // lost a race, either with mdlog sync or another delete bucket operation.
3570 // in either case, we've already called ctl.bucket->unlink_bucket()
3571 op_ret = 0;
3572 return;
3573 }
3574
3575 if (op_ret == 0) {
3576 op_ret = store->ctl()->bucket->unlink_bucket(s->bucket_info.owner,
3577 s->bucket, s->yield, false);
3578 if (op_ret < 0) {
3579 ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
3580 << dendl;
3581 }
3582 }
3583 }
3584
3585 int RGWPutObj::verify_permission()
3586 {
3587 if (! copy_source.empty()) {
3588
3589 RGWAccessControlPolicy cs_acl(s->cct);
3590 boost::optional<Policy> policy;
3591 map<string, bufferlist> cs_attrs;
3592 rgw_bucket cs_bucket(copy_source_bucket_info.bucket);
3593 rgw_obj_key cs_object(copy_source_object_name, copy_source_version_id);
3594
3595 rgw_obj obj(cs_bucket, cs_object);
3596 store->getRados()->set_atomic(s->obj_ctx, obj);
3597 store->getRados()->set_prefetch_data(s->obj_ctx, obj);
3598
3599 /* check source object permissions */
3600 if (read_obj_policy(store, s, copy_source_bucket_info, cs_attrs, &cs_acl, nullptr,
3601 policy, cs_bucket, cs_object) < 0) {
3602 return -EACCES;
3603 }
3604
3605 /* admin request overrides permission checks */
3606 if (! s->auth.identity->is_admin_of(cs_acl.get_owner().get_id())) {
3607 if (policy || ! s->iam_user_policies.empty()) {
3608 auto usr_policy_res = Effect::Pass;
3609 for (auto& user_policy : s->iam_user_policies) {
3610 if (usr_policy_res = user_policy.eval(s->env, *s->auth.identity,
3611 cs_object.instance.empty() ?
3612 rgw::IAM::s3GetObject :
3613 rgw::IAM::s3GetObjectVersion,
3614 rgw::ARN(obj)); usr_policy_res == Effect::Deny)
3615 return -EACCES;
3616 else if (usr_policy_res == Effect::Allow)
3617 break;
3618 }
3619 rgw::IAM::Effect e = Effect::Pass;
3620 if (policy) {
3621 e = policy->eval(s->env, *s->auth.identity,
3622 cs_object.instance.empty() ?
3623 rgw::IAM::s3GetObject :
3624 rgw::IAM::s3GetObjectVersion,
3625 rgw::ARN(obj));
3626 }
3627 if (e == Effect::Deny) {
3628 return -EACCES;
3629 } else if (usr_policy_res == Effect::Pass && e == Effect::Pass &&
3630 !cs_acl.verify_permission(this, *s->auth.identity, s->perm_mask,
3631 RGW_PERM_READ)) {
3632 return -EACCES;
3633 }
3634 } else if (!cs_acl.verify_permission(this, *s->auth.identity, s->perm_mask,
3635 RGW_PERM_READ)) {
3636 return -EACCES;
3637 }
3638 }
3639 }
3640
3641 if (s->bucket_access_conf && s->bucket_access_conf->block_public_acls()) {
3642 if (s->canned_acl.compare("public-read") ||
3643 s->canned_acl.compare("public-read-write") ||
3644 s->canned_acl.compare("authenticated-read"))
3645 return -EACCES;
3646 }
3647
3648 auto op_ret = get_params();
3649 if (op_ret < 0) {
3650 ldpp_dout(this, 20) << "get_params() returned ret=" << op_ret << dendl;
3651 return op_ret;
3652 }
3653
3654 if (s->iam_policy || ! s->iam_user_policies.empty()) {
3655 rgw_add_grant_to_iam_environment(s->env, s);
3656
3657 rgw_add_to_iam_environment(s->env, "s3:x-amz-acl", s->canned_acl);
3658
3659 if (obj_tags != nullptr && obj_tags->count() > 0){
3660 auto tags = obj_tags->get_tags();
3661 for (const auto& kv: tags){
3662 rgw_add_to_iam_environment(s->env, "s3:RequestObjectTag/"+kv.first, kv.second);
3663 }
3664 }
3665
3666 constexpr auto encrypt_attr = "x-amz-server-side-encryption";
3667 constexpr auto s3_encrypt_attr = "s3:x-amz-server-side-encryption";
3668 auto enc_header = s->info.x_meta_map.find(encrypt_attr);
3669 if (enc_header != s->info.x_meta_map.end()){
3670 rgw_add_to_iam_environment(s->env, s3_encrypt_attr, enc_header->second);
3671 }
3672
3673 constexpr auto kms_attr = "x-amz-server-side-encryption-aws-kms-key-id";
3674 constexpr auto s3_kms_attr = "s3:x-amz-server-side-encryption-aws-kms-key-id";
3675 auto kms_header = s->info.x_meta_map.find(kms_attr);
3676 if (kms_header != s->info.x_meta_map.end()){
3677 rgw_add_to_iam_environment(s->env, s3_kms_attr, kms_header->second);
3678 }
3679
3680 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
3681 boost::none,
3682 rgw::IAM::s3PutObject,
3683 rgw_obj(s->bucket, s->object));
3684 if (usr_policy_res == Effect::Deny)
3685 return -EACCES;
3686
3687 rgw::IAM::Effect e = Effect::Pass;
3688 if (s->iam_policy) {
3689 e = s->iam_policy->eval(s->env, *s->auth.identity,
3690 rgw::IAM::s3PutObject,
3691 rgw_obj(s->bucket, s->object));
3692 }
3693 if (e == Effect::Allow) {
3694 return 0;
3695 } else if (e == Effect::Deny) {
3696 return -EACCES;
3697 } else if (usr_policy_res == Effect::Allow) {
3698 return 0;
3699 }
3700 }
3701
3702 if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
3703 return -EACCES;
3704 }
3705
3706 return 0;
3707 }
3708
3709
3710 void RGWPutObj::pre_exec()
3711 {
3712 rgw_bucket_object_pre_exec(s);
3713 }
3714
3715 class RGWPutObj_CB : public RGWGetObj_Filter
3716 {
3717 RGWPutObj *op;
3718 public:
3719 explicit RGWPutObj_CB(RGWPutObj *_op) : op(_op) {}
3720 ~RGWPutObj_CB() override {}
3721
3722 int handle_data(bufferlist& bl, off_t bl_ofs, off_t bl_len) override {
3723 return op->get_data_cb(bl, bl_ofs, bl_len);
3724 }
3725 };
3726
3727 int RGWPutObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
3728 {
3729 bufferlist bl_tmp;
3730 bl.begin(bl_ofs).copy(bl_len, bl_tmp);
3731
3732 bl_aux.append(bl_tmp);
3733
3734 return bl_len;
3735 }
3736
3737 int RGWPutObj::get_data(const off_t fst, const off_t lst, bufferlist& bl)
3738 {
3739 RGWPutObj_CB cb(this);
3740 RGWGetObj_Filter* filter = &cb;
3741 boost::optional<RGWGetObj_Decompress> decompress;
3742 std::unique_ptr<RGWGetObj_Filter> decrypt;
3743 RGWCompressionInfo cs_info;
3744 map<string, bufferlist> attrs;
3745 map<string, bufferlist>::iterator attr_iter;
3746 int ret = 0;
3747
3748 uint64_t obj_size;
3749 int64_t new_ofs, new_end;
3750
3751 new_ofs = fst;
3752 new_end = lst;
3753
3754 rgw_obj_key obj_key(copy_source_object_name, copy_source_version_id);
3755 rgw_obj obj(copy_source_bucket_info.bucket, obj_key);
3756
3757 RGWRados::Object op_target(store->getRados(), copy_source_bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
3758 RGWRados::Object::Read read_op(&op_target);
3759 read_op.params.obj_size = &obj_size;
3760 read_op.params.attrs = &attrs;
3761
3762 ret = read_op.prepare(s->yield);
3763 if (ret < 0)
3764 return ret;
3765
3766 bool need_decompress;
3767 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
3768 if (op_ret < 0) {
3769 ldpp_dout(s, 0) << "ERROR: failed to decode compression info" << dendl;
3770 return -EIO;
3771 }
3772
3773 bool partial_content = true;
3774 if (need_decompress)
3775 {
3776 obj_size = cs_info.orig_size;
3777 decompress.emplace(s->cct, &cs_info, partial_content, filter);
3778 filter = &*decompress;
3779 }
3780
3781 attr_iter = attrs.find(RGW_ATTR_MANIFEST);
3782 op_ret = this->get_decrypt_filter(&decrypt,
3783 filter,
3784 attrs,
3785 attr_iter != attrs.end() ? &(attr_iter->second) : nullptr);
3786 if (decrypt != nullptr) {
3787 filter = decrypt.get();
3788 }
3789 if (op_ret < 0) {
3790 return ret;
3791 }
3792
3793 ret = read_op.range_to_ofs(obj_size, new_ofs, new_end);
3794 if (ret < 0)
3795 return ret;
3796
3797 filter->fixup_range(new_ofs, new_end);
3798 ret = read_op.iterate(new_ofs, new_end, filter, s->yield);
3799
3800 if (ret >= 0)
3801 ret = filter->flush();
3802
3803 bl.claim_append(bl_aux);
3804
3805 return ret;
3806 }
3807
3808 // special handling for compression type = "random" with multipart uploads
3809 static CompressorRef get_compressor_plugin(const req_state *s,
3810 const std::string& compression_type)
3811 {
3812 if (compression_type != "random") {
3813 return Compressor::create(s->cct, compression_type);
3814 }
3815
3816 bool is_multipart{false};
3817 const auto& upload_id = s->info.args.get("uploadId", &is_multipart);
3818
3819 if (!is_multipart) {
3820 return Compressor::create(s->cct, compression_type);
3821 }
3822
3823 // use a hash of the multipart upload id so all parts use the same plugin
3824 const auto alg = std::hash<std::string>{}(upload_id) % Compressor::COMP_ALG_LAST;
3825 if (alg == Compressor::COMP_ALG_NONE) {
3826 return nullptr;
3827 }
3828 return Compressor::create(s->cct, alg);
3829 }
3830
3831 void RGWPutObj::execute()
3832 {
3833 char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1];
3834 char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3835 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3836 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
3837 MD5 hash;
3838 bufferlist bl, aclbl, bs;
3839 int len;
3840
3841 off_t fst;
3842 off_t lst;
3843
3844 bool need_calc_md5 = (dlo_manifest == NULL) && (slo_info == NULL);
3845 perfcounter->inc(l_rgw_put);
3846 // report latency on return
3847 auto put_lat = make_scope_guard([&] {
3848 perfcounter->tinc(l_rgw_put_lat, s->time_elapsed());
3849 });
3850
3851 op_ret = -EINVAL;
3852 if (s->object.empty()) {
3853 return;
3854 }
3855
3856 if (!s->bucket_exists) {
3857 op_ret = -ERR_NO_SUCH_BUCKET;
3858 return;
3859 }
3860
3861
3862 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
3863 if (op_ret < 0) {
3864 ldpp_dout(this, 20) << "get_system_versioning_params() returned ret="
3865 << op_ret << dendl;
3866 return;
3867 }
3868
3869 if (supplied_md5_b64) {
3870 need_calc_md5 = true;
3871
3872 ldpp_dout(this, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
3873 op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1],
3874 supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64));
3875 ldpp_dout(this, 15) << "ceph_armor ret=" << op_ret << dendl;
3876 if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) {
3877 op_ret = -ERR_INVALID_DIGEST;
3878 return;
3879 }
3880
3881 buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5);
3882 ldpp_dout(this, 15) << "supplied_md5=" << supplied_md5 << dendl;
3883 }
3884
3885 if (!chunked_upload) { /* with chunked upload we don't know how big is the upload.
3886 we also check sizes at the end anyway */
3887 op_ret = store->getRados()->check_quota(s->bucket_owner.get_id(), s->bucket,
3888 user_quota, bucket_quota, s->content_length);
3889 if (op_ret < 0) {
3890 ldpp_dout(this, 20) << "check_quota() returned ret=" << op_ret << dendl;
3891 return;
3892 }
3893 }
3894
3895 if (supplied_etag) {
3896 strncpy(supplied_md5, supplied_etag, sizeof(supplied_md5) - 1);
3897 supplied_md5[sizeof(supplied_md5) - 1] = '\0';
3898 }
3899
3900 const bool multipart = !multipart_upload_id.empty();
3901 auto& obj_ctx = *static_cast<RGWObjectCtx*>(s->obj_ctx);
3902 rgw_obj obj{s->bucket, s->object};
3903
3904 /* Handle object versioning of Swift API. */
3905 if (! multipart) {
3906 op_ret = store->getRados()->swift_versioning_copy(obj_ctx,
3907 s->bucket_owner.get_id(),
3908 s->bucket_info,
3909 obj,
3910 this,
3911 s->yield);
3912 if (op_ret < 0) {
3913 return;
3914 }
3915 }
3916
3917 // create the object processor
3918 auto aio = rgw::make_throttle(s->cct->_conf->rgw_put_obj_min_window_size,
3919 s->yield);
3920 using namespace rgw::putobj;
3921 constexpr auto max_processor_size = std::max({sizeof(MultipartObjectProcessor),
3922 sizeof(AtomicObjectProcessor),
3923 sizeof(AppendObjectProcessor)});
3924 ceph::static_ptr<ObjectProcessor, max_processor_size> processor;
3925
3926 rgw_placement_rule *pdest_placement;
3927
3928 multipart_upload_info upload_info;
3929 if (multipart) {
3930 RGWMPObj mp(s->object.name, multipart_upload_id);
3931
3932 op_ret = get_multipart_info(store, s, mp.get_meta(), nullptr, nullptr, &upload_info);
3933 if (op_ret < 0) {
3934 if (op_ret != -ENOENT) {
3935 ldpp_dout(this, 0) << "ERROR: get_multipart_info returned " << op_ret << ": " << cpp_strerror(-op_ret) << dendl;
3936 } else {// -ENOENT: raced with upload complete/cancel, no need to spam log
3937 ldpp_dout(this, 20) << "failed to get multipart info (returned " << op_ret << ": " << cpp_strerror(-op_ret) << "): probably raced with upload complete / cancel" << dendl;
3938 }
3939 return;
3940 }
3941 pdest_placement = &upload_info.dest_placement;
3942 ldpp_dout(this, 20) << "dest_placement for part=" << upload_info.dest_placement << dendl;
3943 processor.emplace<MultipartObjectProcessor>(
3944 &*aio, store, s->bucket_info, pdest_placement,
3945 s->owner.get_id(), obj_ctx, obj,
3946 multipart_upload_id, multipart_part_num, multipart_part_str,
3947 this, s->yield);
3948 } else if(append) {
3949 if (s->bucket_info.versioned()) {
3950 op_ret = -ERR_INVALID_BUCKET_STATE;
3951 return;
3952 }
3953 pdest_placement = &s->dest_placement;
3954 processor.emplace<AppendObjectProcessor>(
3955 &*aio, store, s->bucket_info, pdest_placement, s->bucket_owner.get_id(),obj_ctx, obj,
3956 s->req_id, position, &cur_accounted_size, this, s->yield);
3957 } else {
3958 if (s->bucket_info.versioning_enabled()) {
3959 if (!version_id.empty()) {
3960 obj.key.set_instance(version_id);
3961 } else {
3962 store->getRados()->gen_rand_obj_instance_name(&obj);
3963 version_id = obj.key.instance;
3964 }
3965 }
3966 pdest_placement = &s->dest_placement;
3967 processor.emplace<AtomicObjectProcessor>(
3968 &*aio, store, s->bucket_info, pdest_placement,
3969 s->bucket_owner.get_id(), obj_ctx, obj, olh_epoch,
3970 s->req_id, this, s->yield);
3971 }
3972
3973 op_ret = processor->prepare(s->yield);
3974 if (op_ret < 0) {
3975 ldpp_dout(this, 20) << "processor->prepare() returned ret=" << op_ret
3976 << dendl;
3977 return;
3978 }
3979
3980 if ((! copy_source.empty()) && !copy_source_range) {
3981 rgw_obj_key obj_key(copy_source_object_name, copy_source_version_id);
3982 rgw_obj obj(copy_source_bucket_info.bucket, obj_key.name);
3983
3984 RGWObjState *astate;
3985 op_ret = store->getRados()->get_obj_state(&obj_ctx, copy_source_bucket_info, obj,
3986 &astate, true, s->yield, false);
3987 if (op_ret < 0) {
3988 ldpp_dout(this, 0) << "ERROR: get copy source obj state returned with error" << op_ret << dendl;
3989 return;
3990 }
3991 if (!astate->exists){
3992 op_ret = -ENOENT;
3993 return;
3994 }
3995 lst = astate->accounted_size - 1;
3996 } else {
3997 lst = copy_source_range_lst;
3998 }
3999
4000 fst = copy_source_range_fst;
4001
4002 // no filters by default
4003 DataProcessor *filter = processor.get();
4004
4005 const auto& compression_type = store->svc()->zone->get_zone_params().get_compression_type(*pdest_placement);
4006 CompressorRef plugin;
4007 boost::optional<RGWPutObj_Compress> compressor;
4008
4009 std::unique_ptr<DataProcessor> encrypt;
4010
4011 if (!append) { // compression and encryption only apply to full object uploads
4012 op_ret = get_encrypt_filter(&encrypt, filter);
4013 if (op_ret < 0) {
4014 return;
4015 }
4016 if (encrypt != nullptr) {
4017 filter = &*encrypt;
4018 } else if (compression_type != "none") {
4019 plugin = get_compressor_plugin(s, compression_type);
4020 if (!plugin) {
4021 ldpp_dout(this, 1) << "Cannot load plugin for compression type "
4022 << compression_type << dendl;
4023 } else {
4024 compressor.emplace(s->cct, plugin, filter);
4025 filter = &*compressor;
4026 }
4027 }
4028 }
4029 tracepoint(rgw_op, before_data_transfer, s->req_id.c_str());
4030 do {
4031 bufferlist data;
4032 if (fst > lst)
4033 break;
4034 if (copy_source.empty()) {
4035 len = get_data(data);
4036 } else {
4037 uint64_t cur_lst = min(fst + s->cct->_conf->rgw_max_chunk_size - 1, lst);
4038 op_ret = get_data(fst, cur_lst, data);
4039 if (op_ret < 0)
4040 return;
4041 len = data.length();
4042 s->content_length += len;
4043 fst += len;
4044 }
4045 if (len < 0) {
4046 op_ret = len;
4047 ldpp_dout(this, 20) << "get_data() returned ret=" << op_ret << dendl;
4048 return;
4049 } else if (len == 0) {
4050 break;
4051 }
4052
4053 if (need_calc_md5) {
4054 hash.Update((const unsigned char *)data.c_str(), data.length());
4055 }
4056
4057 /* update torrrent */
4058 torrent.update(data);
4059
4060 op_ret = filter->process(std::move(data), ofs);
4061 if (op_ret < 0) {
4062 ldpp_dout(this, 20) << "processor->process() returned ret="
4063 << op_ret << dendl;
4064 return;
4065 }
4066
4067 ofs += len;
4068 } while (len > 0);
4069 tracepoint(rgw_op, after_data_transfer, s->req_id.c_str(), ofs);
4070
4071 // flush any data in filters
4072 op_ret = filter->process({}, ofs);
4073 if (op_ret < 0) {
4074 return;
4075 }
4076
4077 if (!chunked_upload && ofs != s->content_length) {
4078 op_ret = -ERR_REQUEST_TIMEOUT;
4079 return;
4080 }
4081 s->obj_size = ofs;
4082
4083 perfcounter->inc(l_rgw_put_b, s->obj_size);
4084
4085 op_ret = do_aws4_auth_completion();
4086 if (op_ret < 0) {
4087 return;
4088 }
4089
4090 op_ret = store->getRados()->check_quota(s->bucket_owner.get_id(), s->bucket,
4091 user_quota, bucket_quota, s->obj_size);
4092 if (op_ret < 0) {
4093 ldpp_dout(this, 20) << "second check_quota() returned op_ret=" << op_ret << dendl;
4094 return;
4095 }
4096
4097 hash.Final(m);
4098
4099 if (compressor && compressor->is_compressed()) {
4100 bufferlist tmp;
4101 RGWCompressionInfo cs_info;
4102 cs_info.compression_type = plugin->get_type_name();
4103 cs_info.orig_size = s->obj_size;
4104 cs_info.blocks = move(compressor->get_compression_blocks());
4105 encode(cs_info, tmp);
4106 attrs[RGW_ATTR_COMPRESSION] = tmp;
4107 ldpp_dout(this, 20) << "storing " << RGW_ATTR_COMPRESSION
4108 << " with type=" << cs_info.compression_type
4109 << ", orig_size=" << cs_info.orig_size
4110 << ", blocks=" << cs_info.blocks.size() << dendl;
4111 }
4112
4113 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
4114
4115 etag = calc_md5;
4116
4117 if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) {
4118 op_ret = -ERR_BAD_DIGEST;
4119 return;
4120 }
4121
4122 policy.encode(aclbl);
4123 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
4124
4125 if (dlo_manifest) {
4126 op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
4127 if (op_ret < 0) {
4128 ldpp_dout(this, 0) << "bad user manifest: " << dlo_manifest << dendl;
4129 return;
4130 }
4131 }
4132
4133 if (slo_info) {
4134 bufferlist manifest_bl;
4135 encode(*slo_info, manifest_bl);
4136 emplace_attr(RGW_ATTR_SLO_MANIFEST, std::move(manifest_bl));
4137 }
4138
4139 if (supplied_etag && etag.compare(supplied_etag) != 0) {
4140 op_ret = -ERR_UNPROCESSABLE_ENTITY;
4141 return;
4142 }
4143 bl.append(etag.c_str(), etag.size());
4144 emplace_attr(RGW_ATTR_ETAG, std::move(bl));
4145
4146 populate_with_generic_attrs(s, attrs);
4147 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
4148 if (op_ret < 0) {
4149 return;
4150 }
4151 encode_delete_at_attr(delete_at, attrs);
4152 encode_obj_tags_attr(obj_tags.get(), attrs);
4153 rgw_cond_decode_objtags(s, attrs);
4154
4155 /* Add a custom metadata to expose the information whether an object
4156 * is an SLO or not. Appending the attribute must be performed AFTER
4157 * processing any input from user in order to prohibit overwriting. */
4158 if (slo_info) {
4159 bufferlist slo_userindicator_bl;
4160 slo_userindicator_bl.append("True", 4);
4161 emplace_attr(RGW_ATTR_SLO_UINDICATOR, std::move(slo_userindicator_bl));
4162 }
4163 if (obj_legal_hold) {
4164 bufferlist obj_legal_hold_bl;
4165 obj_legal_hold->encode(obj_legal_hold_bl);
4166 emplace_attr(RGW_ATTR_OBJECT_LEGAL_HOLD, std::move(obj_legal_hold_bl));
4167 }
4168 if (obj_retention) {
4169 bufferlist obj_retention_bl;
4170 obj_retention->encode(obj_retention_bl);
4171 emplace_attr(RGW_ATTR_OBJECT_RETENTION, std::move(obj_retention_bl));
4172 }
4173
4174 tracepoint(rgw_op, processor_complete_enter, s->req_id.c_str());
4175 op_ret = processor->complete(s->obj_size, etag, &mtime, real_time(), attrs,
4176 (delete_at ? *delete_at : real_time()), if_match, if_nomatch,
4177 (user_data.empty() ? nullptr : &user_data), nullptr, nullptr,
4178 s->yield);
4179 tracepoint(rgw_op, processor_complete_exit, s->req_id.c_str());
4180
4181 /* produce torrent */
4182 if (s->cct->_conf->rgw_torrent_flag && (ofs == torrent.get_data_len()))
4183 {
4184 torrent.init(s, store);
4185 torrent.set_create_date(mtime);
4186 op_ret = torrent.complete();
4187 if (0 != op_ret)
4188 {
4189 ldpp_dout(this, 0) << "ERROR: torrent.handle_data() returned " << op_ret << dendl;
4190 return;
4191 }
4192 }
4193
4194 // send request to notification manager
4195 const auto ret = rgw::notify::publish(s, obj.key, s->obj_size, mtime, etag, rgw::notify::ObjectCreatedPut, store);
4196 if (ret < 0) {
4197 ldpp_dout(this, 5) << "WARNING: publishing notification failed, with error: " << ret << dendl;
4198 // TODO: we should have conf to make send a blocking coroutine and reply with error in case sending failed
4199 // this should be global conf (probably returnign a different handler)
4200 // so we don't need to read the configured values before we perform it
4201 }
4202 }
4203
4204 int RGWPostObj::verify_permission()
4205 {
4206 return 0;
4207 }
4208
4209 void RGWPostObj::pre_exec()
4210 {
4211 rgw_bucket_object_pre_exec(s);
4212 }
4213
4214 void RGWPostObj::execute()
4215 {
4216 boost::optional<RGWPutObj_Compress> compressor;
4217 CompressorRef plugin;
4218 char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
4219
4220 /* Read in the data from the POST form. */
4221 op_ret = get_params();
4222 if (op_ret < 0) {
4223 return;
4224 }
4225
4226 op_ret = verify_params();
4227 if (op_ret < 0) {
4228 return;
4229 }
4230
4231 if (s->iam_policy || ! s->iam_user_policies.empty()) {
4232 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
4233 boost::none,
4234 rgw::IAM::s3PutObject,
4235 rgw_obj(s->bucket, s->object));
4236 if (usr_policy_res == Effect::Deny) {
4237 op_ret = -EACCES;
4238 return;
4239 }
4240
4241 rgw::IAM::Effect e = Effect::Pass;
4242 if (s->iam_policy) {
4243 e = s->iam_policy->eval(s->env, *s->auth.identity,
4244 rgw::IAM::s3PutObject,
4245 rgw_obj(s->bucket, s->object));
4246 }
4247 if (e == Effect::Deny) {
4248 op_ret = -EACCES;
4249 return;
4250 } else if (usr_policy_res == Effect::Pass && e == Effect::Pass && !verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
4251 op_ret = -EACCES;
4252 return;
4253 }
4254 } else if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
4255 op_ret = -EACCES;
4256 return;
4257 }
4258
4259 /* Start iteration over data fields. It's necessary as Swift's FormPost
4260 * is capable to handle multiple files in single form. */
4261 do {
4262 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
4263 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
4264 MD5 hash;
4265 ceph::buffer::list bl, aclbl;
4266 int len = 0;
4267
4268 op_ret = store->getRados()->check_quota(s->bucket_owner.get_id(),
4269 s->bucket,
4270 user_quota,
4271 bucket_quota,
4272 s->content_length);
4273 if (op_ret < 0) {
4274 return;
4275 }
4276
4277 if (supplied_md5_b64) {
4278 char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1];
4279 ldpp_dout(this, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
4280 op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1],
4281 supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64));
4282 ldpp_dout(this, 15) << "ceph_armor ret=" << op_ret << dendl;
4283 if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) {
4284 op_ret = -ERR_INVALID_DIGEST;
4285 return;
4286 }
4287
4288 buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5);
4289 ldpp_dout(this, 15) << "supplied_md5=" << supplied_md5 << dendl;
4290 }
4291
4292 rgw_obj obj(s->bucket, get_current_filename());
4293 if (s->bucket_info.versioning_enabled()) {
4294 store->getRados()->gen_rand_obj_instance_name(&obj);
4295 }
4296
4297 auto aio = rgw::make_throttle(s->cct->_conf->rgw_put_obj_min_window_size,
4298 s->yield);
4299
4300 using namespace rgw::putobj;
4301 AtomicObjectProcessor processor(&*aio, store, s->bucket_info,
4302 &s->dest_placement,
4303 s->bucket_owner.get_id(),
4304 *static_cast<RGWObjectCtx*>(s->obj_ctx),
4305 obj, 0, s->req_id, this, s->yield);
4306 op_ret = processor.prepare(s->yield);
4307 if (op_ret < 0) {
4308 return;
4309 }
4310
4311 /* No filters by default. */
4312 DataProcessor *filter = &processor;
4313
4314 std::unique_ptr<DataProcessor> encrypt;
4315 op_ret = get_encrypt_filter(&encrypt, filter);
4316 if (op_ret < 0) {
4317 return;
4318 }
4319 if (encrypt != nullptr) {
4320 filter = encrypt.get();
4321 } else {
4322 const auto& compression_type = store->svc()->zone->get_zone_params().get_compression_type(
4323 s->dest_placement);
4324 if (compression_type != "none") {
4325 plugin = Compressor::create(s->cct, compression_type);
4326 if (!plugin) {
4327 ldpp_dout(this, 1) << "Cannot load plugin for compression type "
4328 << compression_type << dendl;
4329 } else {
4330 compressor.emplace(s->cct, plugin, filter);
4331 filter = &*compressor;
4332 }
4333 }
4334 }
4335
4336 bool again;
4337 do {
4338 ceph::bufferlist data;
4339 len = get_data(data, again);
4340
4341 if (len < 0) {
4342 op_ret = len;
4343 return;
4344 }
4345
4346 if (!len) {
4347 break;
4348 }
4349
4350 hash.Update((const unsigned char *)data.c_str(), data.length());
4351 op_ret = filter->process(std::move(data), ofs);
4352
4353 ofs += len;
4354
4355 if (ofs > max_len) {
4356 op_ret = -ERR_TOO_LARGE;
4357 return;
4358 }
4359 } while (again);
4360
4361 // flush
4362 op_ret = filter->process({}, ofs);
4363 if (op_ret < 0) {
4364 return;
4365 }
4366
4367 if (len < min_len) {
4368 op_ret = -ERR_TOO_SMALL;
4369 return;
4370 }
4371
4372 s->obj_size = ofs;
4373
4374
4375 op_ret = store->getRados()->check_quota(s->bucket_owner.get_id(), s->bucket,
4376 user_quota, bucket_quota, s->obj_size);
4377 if (op_ret < 0) {
4378 return;
4379 }
4380
4381 hash.Final(m);
4382 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
4383
4384 etag = calc_md5;
4385
4386 if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) {
4387 op_ret = -ERR_BAD_DIGEST;
4388 return;
4389 }
4390
4391 bl.append(etag.c_str(), etag.size());
4392 emplace_attr(RGW_ATTR_ETAG, std::move(bl));
4393
4394 policy.encode(aclbl);
4395 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
4396
4397 const std::string content_type = get_current_content_type();
4398 if (! content_type.empty()) {
4399 ceph::bufferlist ct_bl;
4400 ct_bl.append(content_type.c_str(), content_type.size() + 1);
4401 emplace_attr(RGW_ATTR_CONTENT_TYPE, std::move(ct_bl));
4402 }
4403
4404 if (compressor && compressor->is_compressed()) {
4405 ceph::bufferlist tmp;
4406 RGWCompressionInfo cs_info;
4407 cs_info.compression_type = plugin->get_type_name();
4408 cs_info.orig_size = s->obj_size;
4409 cs_info.blocks = move(compressor->get_compression_blocks());
4410 encode(cs_info, tmp);
4411 emplace_attr(RGW_ATTR_COMPRESSION, std::move(tmp));
4412 }
4413
4414 op_ret = processor.complete(s->obj_size, etag, nullptr, real_time(), attrs,
4415 (delete_at ? *delete_at : real_time()),
4416 nullptr, nullptr, nullptr, nullptr, nullptr,
4417 s->yield);
4418 if (op_ret < 0) {
4419 return;
4420 }
4421 } while (is_next_file_to_upload());
4422
4423 const auto ret = rgw::notify::publish(s, s->object, ofs, ceph::real_clock::now(), etag, rgw::notify::ObjectCreatedPost, store);
4424 if (ret < 0) {
4425 ldpp_dout(this, 5) << "WARNING: publishing notification failed, with error: " << ret << dendl;
4426 // TODO: we should have conf to make send a blocking coroutine and reply with error in case sending failed
4427 // this should be global conf (probably returnign a different handler)
4428 // so we don't need to read the configured values before we perform it
4429 }
4430 }
4431
4432
4433 void RGWPutMetadataAccount::filter_out_temp_url(map<string, bufferlist>& add_attrs,
4434 const set<string>& rmattr_names,
4435 map<int, string>& temp_url_keys)
4436 {
4437 map<string, bufferlist>::iterator iter;
4438
4439 iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY1);
4440 if (iter != add_attrs.end()) {
4441 temp_url_keys[0] = iter->second.c_str();
4442 add_attrs.erase(iter);
4443 }
4444
4445 iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY2);
4446 if (iter != add_attrs.end()) {
4447 temp_url_keys[1] = iter->second.c_str();
4448 add_attrs.erase(iter);
4449 }
4450
4451 for (const string& name : rmattr_names) {
4452 if (name.compare(RGW_ATTR_TEMPURL_KEY1) == 0) {
4453 temp_url_keys[0] = string();
4454 }
4455 if (name.compare(RGW_ATTR_TEMPURL_KEY2) == 0) {
4456 temp_url_keys[1] = string();
4457 }
4458 }
4459 }
4460
4461 int RGWPutMetadataAccount::init_processing()
4462 {
4463 /* First, go to the base class. At the time of writing the method was
4464 * responsible only for initializing the quota. This isn't necessary
4465 * here as we are touching metadata only. I'm putting this call only
4466 * for the future. */
4467 op_ret = RGWOp::init_processing();
4468 if (op_ret < 0) {
4469 return op_ret;
4470 }
4471
4472 op_ret = get_params();
4473 if (op_ret < 0) {
4474 return op_ret;
4475 }
4476
4477 op_ret = store->ctl()->user->get_attrs_by_uid(s->user->get_id(), &orig_attrs,
4478 s->yield,
4479 &acct_op_tracker);
4480 if (op_ret < 0) {
4481 return op_ret;
4482 }
4483
4484 if (has_policy) {
4485 bufferlist acl_bl;
4486 policy.encode(acl_bl);
4487 attrs.emplace(RGW_ATTR_ACL, std::move(acl_bl));
4488 }
4489
4490 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
4491 if (op_ret < 0) {
4492 return op_ret;
4493 }
4494 prepare_add_del_attrs(orig_attrs, rmattr_names, attrs);
4495 populate_with_generic_attrs(s, attrs);
4496
4497 /* Try extract the TempURL-related stuff now to allow verify_permission
4498 * evaluate whether we need FULL_CONTROL or not. */
4499 filter_out_temp_url(attrs, rmattr_names, temp_url_keys);
4500
4501 /* The same with quota except a client needs to be reseller admin. */
4502 op_ret = filter_out_quota_info(attrs, rmattr_names, new_quota,
4503 &new_quota_extracted);
4504 if (op_ret < 0) {
4505 return op_ret;
4506 }
4507
4508 return 0;
4509 }
4510
4511 int RGWPutMetadataAccount::verify_permission()
4512 {
4513 if (s->auth.identity->is_anonymous()) {
4514 return -EACCES;
4515 }
4516
4517 if (!verify_user_permission_no_policy(this, s, RGW_PERM_WRITE)) {
4518 return -EACCES;
4519 }
4520
4521 /* Altering TempURL keys requires FULL_CONTROL. */
4522 if (!temp_url_keys.empty() && s->perm_mask != RGW_PERM_FULL_CONTROL) {
4523 return -EPERM;
4524 }
4525
4526 /* We are failing this intensionally to allow system user/reseller admin
4527 * override in rgw_process.cc. This is the way to specify a given RGWOp
4528 * expect extra privileges. */
4529 if (new_quota_extracted) {
4530 return -EACCES;
4531 }
4532
4533 return 0;
4534 }
4535
4536 void RGWPutMetadataAccount::execute()
4537 {
4538 /* Params have been extracted earlier. See init_processing(). */
4539 RGWUserInfo new_uinfo;
4540 op_ret = store->ctl()->user->get_info_by_uid(s->user->get_id(), &new_uinfo, s->yield,
4541 RGWUserCtl::GetParams()
4542 .set_objv_tracker(&acct_op_tracker));
4543 if (op_ret < 0) {
4544 return;
4545 }
4546
4547 /* Handle the TempURL-related stuff. */
4548 if (!temp_url_keys.empty()) {
4549 for (auto& pair : temp_url_keys) {
4550 new_uinfo.temp_url_keys[pair.first] = std::move(pair.second);
4551 }
4552 }
4553
4554 /* Handle the quota extracted at the verify_permission step. */
4555 if (new_quota_extracted) {
4556 new_uinfo.user_quota = std::move(new_quota);
4557 }
4558
4559 /* We are passing here the current (old) user info to allow the function
4560 * optimize-out some operations. */
4561 op_ret = store->ctl()->user->store_info(new_uinfo, s->yield,
4562 RGWUserCtl::PutParams()
4563 .set_old_info(&s->user->get_info())
4564 .set_objv_tracker(&acct_op_tracker)
4565 .set_attrs(&attrs));
4566 }
4567
4568 int RGWPutMetadataBucket::verify_permission()
4569 {
4570 if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
4571 return -EACCES;
4572 }
4573
4574 return 0;
4575 }
4576
4577 void RGWPutMetadataBucket::pre_exec()
4578 {
4579 rgw_bucket_object_pre_exec(s);
4580 }
4581
4582 void RGWPutMetadataBucket::execute()
4583 {
4584 op_ret = get_params();
4585 if (op_ret < 0) {
4586 return;
4587 }
4588
4589 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
4590 if (op_ret < 0) {
4591 return;
4592 }
4593
4594 if (!placement_rule.empty() &&
4595 placement_rule != s->bucket_info.placement_rule) {
4596 op_ret = -EEXIST;
4597 return;
4598 }
4599
4600 op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
4601 /* Encode special metadata first as we're using std::map::emplace under
4602 * the hood. This method will add the new items only if the map doesn't
4603 * contain such keys yet. */
4604 if (has_policy) {
4605 if (s->dialect.compare("swift") == 0) {
4606 auto old_policy = \
4607 static_cast<RGWAccessControlPolicy_SWIFT*>(s->bucket_acl.get());
4608 auto new_policy = static_cast<RGWAccessControlPolicy_SWIFT*>(&policy);
4609 new_policy->filter_merge(policy_rw_mask, old_policy);
4610 policy = *new_policy;
4611 }
4612 buffer::list bl;
4613 policy.encode(bl);
4614 emplace_attr(RGW_ATTR_ACL, std::move(bl));
4615 }
4616
4617 if (has_cors) {
4618 buffer::list bl;
4619 cors_config.encode(bl);
4620 emplace_attr(RGW_ATTR_CORS, std::move(bl));
4621 }
4622
4623 /* It's supposed that following functions WILL NOT change any
4624 * special attributes (like RGW_ATTR_ACL) if they are already
4625 * present in attrs. */
4626 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
4627 populate_with_generic_attrs(s, attrs);
4628
4629 /* According to the Swift's behaviour and its container_quota
4630 * WSGI middleware implementation: anyone with write permissions
4631 * is able to set the bucket quota. This stays in contrast to
4632 * account quotas that can be set only by clients holding
4633 * reseller admin privileges. */
4634 op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket_info.quota);
4635 if (op_ret < 0) {
4636 return op_ret;
4637 }
4638
4639 if (swift_ver_location) {
4640 s->bucket_info.swift_ver_location = *swift_ver_location;
4641 s->bucket_info.swift_versioning = (!swift_ver_location->empty());
4642 }
4643
4644 /* Web site of Swift API. */
4645 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
4646 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
4647
4648 /* Setting attributes also stores the provided bucket info. Due
4649 * to this fact, the new quota settings can be serialized with
4650 * the same call. */
4651 op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
4652 &s->bucket_info.objv_tracker,
4653 s->yield);
4654 return op_ret;
4655 });
4656 }
4657
4658 int RGWPutMetadataObject::verify_permission()
4659 {
4660 // This looks to be something specific to Swift. We could add
4661 // operations like swift:PutMetadataObject to the Policy Engine.
4662 if (!verify_object_permission_no_policy(this, s, RGW_PERM_WRITE)) {
4663 return -EACCES;
4664 }
4665
4666 return 0;
4667 }
4668
4669 void RGWPutMetadataObject::pre_exec()
4670 {
4671 rgw_bucket_object_pre_exec(s);
4672 }
4673
4674 void RGWPutMetadataObject::execute()
4675 {
4676 rgw_obj obj(s->bucket, s->object);
4677 rgw_obj target_obj;
4678 map<string, bufferlist> attrs, orig_attrs, rmattrs;
4679
4680 store->getRados()->set_atomic(s->obj_ctx, obj);
4681
4682 op_ret = get_params();
4683 if (op_ret < 0) {
4684 return;
4685 }
4686
4687 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
4688 if (op_ret < 0) {
4689 return;
4690 }
4691
4692 /* check if obj exists, read orig attrs */
4693 op_ret = get_obj_attrs(store, s, obj, orig_attrs, &target_obj);
4694 if (op_ret < 0) {
4695 return;
4696 }
4697
4698 /* Check whether the object has expired. Swift API documentation
4699 * stands that we should return 404 Not Found in such case. */
4700 if (need_object_expiration() && object_is_expired(orig_attrs)) {
4701 op_ret = -ENOENT;
4702 return;
4703 }
4704
4705 /* Filter currently existing attributes. */
4706 prepare_add_del_attrs(orig_attrs, attrs, rmattrs);
4707 populate_with_generic_attrs(s, attrs);
4708 encode_delete_at_attr(delete_at, attrs);
4709
4710 if (dlo_manifest) {
4711 op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
4712 if (op_ret < 0) {
4713 ldpp_dout(this, 0) << "bad user manifest: " << dlo_manifest << dendl;
4714 return;
4715 }
4716 }
4717
4718 op_ret = store->getRados()->set_attrs(s->obj_ctx, s->bucket_info, target_obj,
4719 attrs, &rmattrs, s->yield);
4720 }
4721
4722 int RGWDeleteObj::handle_slo_manifest(bufferlist& bl)
4723 {
4724 RGWSLOInfo slo_info;
4725 auto bliter = bl.cbegin();
4726 try {
4727 decode(slo_info, bliter);
4728 } catch (buffer::error& err) {
4729 ldpp_dout(this, 0) << "ERROR: failed to decode slo manifest" << dendl;
4730 return -EIO;
4731 }
4732
4733 try {
4734 deleter = std::unique_ptr<RGWBulkDelete::Deleter>(\
4735 new RGWBulkDelete::Deleter(this, store, s));
4736 } catch (const std::bad_alloc&) {
4737 return -ENOMEM;
4738 }
4739
4740 list<RGWBulkDelete::acct_path_t> items;
4741 for (const auto& iter : slo_info.entries) {
4742 const string& path_str = iter.path;
4743
4744 const size_t sep_pos = path_str.find('/', 1 /* skip first slash */);
4745 if (boost::string_view::npos == sep_pos) {
4746 return -EINVAL;
4747 }
4748
4749 RGWBulkDelete::acct_path_t path;
4750
4751 path.bucket_name = url_decode(path_str.substr(1, sep_pos - 1));
4752 path.obj_key = url_decode(path_str.substr(sep_pos + 1));
4753
4754 items.push_back(path);
4755 }
4756
4757 /* Request removal of the manifest object itself. */
4758 RGWBulkDelete::acct_path_t path;
4759 path.bucket_name = s->bucket_name;
4760 path.obj_key = s->object;
4761 items.push_back(path);
4762
4763 int ret = deleter->delete_chunk(items);
4764 if (ret < 0) {
4765 return ret;
4766 }
4767
4768 return 0;
4769 }
4770
4771 int RGWDeleteObj::verify_permission()
4772 {
4773 int op_ret = get_params();
4774 if (op_ret) {
4775 return op_ret;
4776 }
4777 if (s->iam_policy || ! s->iam_user_policies.empty()) {
4778 if (s->bucket_info.obj_lock_enabled() && bypass_governance_mode) {
4779 auto r = eval_user_policies(s->iam_user_policies, s->env, boost::none,
4780 rgw::IAM::s3BypassGovernanceRetention, ARN(s->bucket, s->object.name));
4781 if (r == Effect::Deny) {
4782 bypass_perm = false;
4783 } else if (r == Effect::Pass && s->iam_policy) {
4784 r = s->iam_policy->eval(s->env, *s->auth.identity, rgw::IAM::s3BypassGovernanceRetention,
4785 ARN(s->bucket, s->object.name));
4786 if (r == Effect::Deny) {
4787 bypass_perm = false;
4788 }
4789 }
4790 }
4791 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
4792 boost::none,
4793 s->object.instance.empty() ?
4794 rgw::IAM::s3DeleteObject :
4795 rgw::IAM::s3DeleteObjectVersion,
4796 ARN(s->bucket, s->object.name));
4797 if (usr_policy_res == Effect::Deny) {
4798 return -EACCES;
4799 }
4800
4801 rgw::IAM::Effect r = Effect::Pass;
4802 if (s->iam_policy) {
4803 r = s->iam_policy->eval(s->env, *s->auth.identity,
4804 s->object.instance.empty() ?
4805 rgw::IAM::s3DeleteObject :
4806 rgw::IAM::s3DeleteObjectVersion,
4807 ARN(s->bucket, s->object.name));
4808 }
4809 if (r == Effect::Allow)
4810 return 0;
4811 else if (r == Effect::Deny)
4812 return -EACCES;
4813 else if (usr_policy_res == Effect::Allow)
4814 return 0;
4815 }
4816
4817 if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
4818 return -EACCES;
4819 }
4820
4821 if (s->bucket_info.mfa_enabled() &&
4822 !s->object.instance.empty() &&
4823 !s->mfa_verified) {
4824 ldpp_dout(this, 5) << "NOTICE: object delete request with a versioned object, mfa auth not provided" << dendl;
4825 return -ERR_MFA_REQUIRED;
4826 }
4827
4828 return 0;
4829 }
4830
4831 void RGWDeleteObj::pre_exec()
4832 {
4833 rgw_bucket_object_pre_exec(s);
4834 }
4835
4836 void RGWDeleteObj::execute()
4837 {
4838 if (!s->bucket_exists) {
4839 op_ret = -ERR_NO_SUCH_BUCKET;
4840 return;
4841 }
4842
4843 rgw_obj obj(s->bucket, s->object);
4844 map<string, bufferlist> attrs;
4845
4846 bool check_obj_lock = obj.key.have_instance() && s->bucket_info.obj_lock_enabled();
4847
4848 if (!s->object.empty()) {
4849 op_ret = get_obj_attrs(store, s, obj, attrs);
4850
4851 if (need_object_expiration() || multipart_delete) {
4852 /* check if obj exists, read orig attrs */
4853 if (op_ret < 0) {
4854 return;
4855 }
4856 }
4857
4858 if (check_obj_lock) {
4859 /* check if obj exists, read orig attrs */
4860 if (op_ret < 0) {
4861 if (op_ret == -ENOENT) {
4862 /* object maybe delete_marker, skip check_obj_lock*/
4863 check_obj_lock = false;
4864 } else {
4865 return;
4866 }
4867 }
4868 }
4869
4870 // ignore return value from get_obj_attrs in all other cases
4871 op_ret = 0;
4872
4873 if (check_obj_lock) {
4874 auto aiter = attrs.find(RGW_ATTR_OBJECT_RETENTION);
4875 if (aiter != attrs.end()) {
4876 RGWObjectRetention obj_retention;
4877 try {
4878 decode(obj_retention, aiter->second);
4879 } catch (buffer::error& err) {
4880 ldpp_dout(this, 0) << "ERROR: failed to decode RGWObjectRetention" << dendl;
4881 op_ret = -EIO;
4882 return;
4883 }
4884 if (ceph::real_clock::to_time_t(obj_retention.get_retain_until_date()) > ceph_clock_now()) {
4885 if (obj_retention.get_mode().compare("GOVERNANCE") != 0 || !bypass_perm || !bypass_governance_mode) {
4886 op_ret = -EACCES;
4887 return;
4888 }
4889 }
4890 }
4891 aiter = attrs.find(RGW_ATTR_OBJECT_LEGAL_HOLD);
4892 if (aiter != attrs.end()) {
4893 RGWObjectLegalHold obj_legal_hold;
4894 try {
4895 decode(obj_legal_hold, aiter->second);
4896 } catch (buffer::error& err) {
4897 ldpp_dout(this, 0) << "ERROR: failed to decode RGWObjectLegalHold" << dendl;
4898 op_ret = -EIO;
4899 return;
4900 }
4901 if (obj_legal_hold.is_enabled()) {
4902 op_ret = -EACCES;
4903 return;
4904 }
4905 }
4906 }
4907
4908 if (multipart_delete) {
4909 const auto slo_attr = attrs.find(RGW_ATTR_SLO_MANIFEST);
4910
4911 if (slo_attr != attrs.end()) {
4912 op_ret = handle_slo_manifest(slo_attr->second);
4913 if (op_ret < 0) {
4914 ldpp_dout(this, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret << dendl;
4915 }
4916 } else {
4917 op_ret = -ERR_NOT_SLO_MANIFEST;
4918 }
4919
4920 return;
4921 }
4922
4923 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
4924 obj_ctx->set_atomic(obj);
4925
4926 bool ver_restored = false;
4927 op_ret = store->getRados()->swift_versioning_restore(*obj_ctx, s->bucket_owner.get_id(),
4928 s->bucket_info, obj, ver_restored, this);
4929 if (op_ret < 0) {
4930 return;
4931 }
4932
4933 if (!ver_restored) {
4934 /* Swift's versioning mechanism hasn't found any previous version of
4935 * the object that could be restored. This means we should proceed
4936 * with the regular delete path. */
4937 RGWRados::Object del_target(store->getRados(), s->bucket_info, *obj_ctx, obj);
4938 RGWRados::Object::Delete del_op(&del_target);
4939
4940 op_ret = get_system_versioning_params(s, &del_op.params.olh_epoch,
4941 &del_op.params.marker_version_id);
4942 if (op_ret < 0) {
4943 return;
4944 }
4945
4946 del_op.params.bucket_owner = s->bucket_owner.get_id();
4947 del_op.params.versioning_status = s->bucket_info.versioning_status();
4948 del_op.params.obj_owner = s->owner;
4949 del_op.params.unmod_since = unmod_since;
4950 del_op.params.high_precision_time = s->system_request; /* system request uses high precision time */
4951
4952 op_ret = del_op.delete_obj(s->yield);
4953 if (op_ret >= 0) {
4954 delete_marker = del_op.result.delete_marker;
4955 version_id = del_op.result.version_id;
4956 }
4957
4958 /* Check whether the object has expired. Swift API documentation
4959 * stands that we should return 404 Not Found in such case. */
4960 if (need_object_expiration() && object_is_expired(attrs)) {
4961 op_ret = -ENOENT;
4962 return;
4963 }
4964 }
4965
4966 if (op_ret == -ECANCELED) {
4967 op_ret = 0;
4968 }
4969 if (op_ret == -ERR_PRECONDITION_FAILED && no_precondition_error) {
4970 op_ret = 0;
4971 }
4972
4973 // cache the objects tags and metadata into the requests
4974 // so it could be used in the notification mechanism
4975 try {
4976 populate_tags_in_request(s, attrs);
4977 } catch (buffer::error& err) {
4978 ldpp_dout(this, 5) << "WARNING: failed to populate delete request with object tags: " << err.what() << dendl;
4979 }
4980 populate_metadata_in_request(s, attrs);
4981 const auto obj_state = obj_ctx->get_state(obj);
4982
4983 const auto ret = rgw::notify::publish(s, s->object, obj_state->size , obj_state->mtime, attrs[RGW_ATTR_ETAG].to_str(),
4984 delete_marker && s->object.instance.empty() ? rgw::notify::ObjectRemovedDeleteMarkerCreated : rgw::notify::ObjectRemovedDelete,
4985 store);
4986 if (ret < 0) {
4987 ldpp_dout(this, 5) << "WARNING: publishing notification failed, with error: " << ret << dendl;
4988 // TODO: we should have conf to make send a blocking coroutine and reply with error in case sending failed
4989 // this should be global conf (probably returnign a different handler)
4990 // so we don't need to read the configured values before we perform it
4991 }
4992 } else {
4993 op_ret = -EINVAL;
4994 }
4995 }
4996
4997 bool RGWCopyObj::parse_copy_location(const boost::string_view& url_src,
4998 string& bucket_name,
4999 rgw_obj_key& key)
5000 {
5001 boost::string_view name_str;
5002 boost::string_view params_str;
5003
5004 // search for ? before url-decoding so we don't accidentally match %3F
5005 size_t pos = url_src.find('?');
5006 if (pos == string::npos) {
5007 name_str = url_src;
5008 } else {
5009 name_str = url_src.substr(0, pos);
5010 params_str = url_src.substr(pos + 1);
5011 }
5012
5013 boost::string_view dec_src{name_str};
5014 if (dec_src[0] == '/')
5015 dec_src.remove_prefix(1);
5016
5017 pos = dec_src.find('/');
5018 if (pos == string::npos)
5019 return false;
5020
5021 bucket_name = url_decode(dec_src.substr(0, pos));
5022 key.name = url_decode(dec_src.substr(pos + 1));
5023
5024 if (key.name.empty()) {
5025 return false;
5026 }
5027
5028 if (! params_str.empty()) {
5029 RGWHTTPArgs args;
5030 args.set(params_str.to_string());
5031 args.parse();
5032
5033 key.instance = args.get("versionId", NULL);
5034 }
5035
5036 return true;
5037 }
5038
5039 int RGWCopyObj::verify_permission()
5040 {
5041 RGWAccessControlPolicy src_acl(s->cct);
5042 boost::optional<Policy> src_policy;
5043 op_ret = get_params();
5044 if (op_ret < 0)
5045 return op_ret;
5046
5047 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
5048 if (op_ret < 0) {
5049 return op_ret;
5050 }
5051 map<string, bufferlist> src_attrs;
5052
5053 if (s->bucket_instance_id.empty()) {
5054 op_ret = store->getRados()->get_bucket_info(store->svc(), src_tenant_name, src_bucket_name, src_bucket_info, NULL, s->yield, &src_attrs);
5055 } else {
5056 /* will only happen in intra region sync where the source and dest bucket is the same */
5057 rgw_bucket b(rgw_bucket_key(src_tenant_name, src_bucket_name, s->bucket_instance_id));
5058 op_ret = store->getRados()->get_bucket_instance_info(*s->sysobj_ctx, b, src_bucket_info, NULL, &src_attrs, s->yield);
5059 }
5060 if (op_ret < 0) {
5061 if (op_ret == -ENOENT) {
5062 op_ret = -ERR_NO_SUCH_BUCKET;
5063 }
5064 return op_ret;
5065 }
5066
5067 src_bucket = src_bucket_info.bucket;
5068
5069 /* get buckets info (source and dest) */
5070 if (s->local_source && source_zone.empty()) {
5071 rgw_obj src_obj(src_bucket, src_object);
5072 store->getRados()->set_atomic(s->obj_ctx, src_obj);
5073 store->getRados()->set_prefetch_data(s->obj_ctx, src_obj);
5074
5075 rgw_placement_rule src_placement;
5076
5077 /* check source object permissions */
5078 op_ret = read_obj_policy(store, s, src_bucket_info, src_attrs, &src_acl, &src_placement.storage_class,
5079 src_policy, src_bucket, src_object);
5080 if (op_ret < 0) {
5081 return op_ret;
5082 }
5083
5084 /* follow up on previous checks that required reading source object head */
5085 if (need_to_check_storage_class) {
5086 src_placement.inherit_from(src_bucket_info.placement_rule);
5087
5088 op_ret = check_storage_class(src_placement);
5089 if (op_ret < 0) {
5090 return op_ret;
5091 }
5092 }
5093
5094 /* admin request overrides permission checks */
5095 if (!s->auth.identity->is_admin_of(src_acl.get_owner().get_id())) {
5096 if (src_policy) {
5097 auto e = src_policy->eval(s->env, *s->auth.identity,
5098 src_object.instance.empty() ?
5099 rgw::IAM::s3GetObject :
5100 rgw::IAM::s3GetObjectVersion,
5101 ARN(src_obj));
5102 if (e == Effect::Deny) {
5103 return -EACCES;
5104 } else if (e == Effect::Pass &&
5105 !src_acl.verify_permission(this, *s->auth.identity, s->perm_mask,
5106 RGW_PERM_READ)) {
5107 return -EACCES;
5108 }
5109 } else if (!src_acl.verify_permission(this, *s->auth.identity,
5110 s->perm_mask,
5111 RGW_PERM_READ)) {
5112 return -EACCES;
5113 }
5114 }
5115 }
5116
5117 RGWAccessControlPolicy dest_bucket_policy(s->cct);
5118 map<string, bufferlist> dest_attrs;
5119
5120 if (src_bucket_name.compare(dest_bucket_name) == 0) { /* will only happen if s->local_source
5121 or intra region sync */
5122 dest_bucket_info = src_bucket_info;
5123 dest_attrs = src_attrs;
5124 } else {
5125 op_ret = store->getRados()->get_bucket_info(store->svc(), dest_tenant_name, dest_bucket_name,
5126 dest_bucket_info, nullptr, s->yield, &dest_attrs);
5127 if (op_ret < 0) {
5128 if (op_ret == -ENOENT) {
5129 op_ret = -ERR_NO_SUCH_BUCKET;
5130 }
5131 return op_ret;
5132 }
5133 }
5134
5135 dest_bucket = dest_bucket_info.bucket;
5136
5137 rgw_obj dest_obj(dest_bucket, dest_object);
5138 store->getRados()->set_atomic(s->obj_ctx, dest_obj);
5139
5140 /* check dest bucket permissions */
5141 op_ret = read_bucket_policy(store, s, dest_bucket_info, dest_attrs,
5142 &dest_bucket_policy, dest_bucket);
5143 if (op_ret < 0) {
5144 return op_ret;
5145 }
5146 auto dest_iam_policy = get_iam_policy_from_attr(s->cct, store, dest_attrs, dest_bucket.tenant);
5147 /* admin request overrides permission checks */
5148 if (! s->auth.identity->is_admin_of(dest_policy.get_owner().get_id())){
5149 if (dest_iam_policy != boost::none) {
5150 rgw_add_to_iam_environment(s->env, "s3:x-amz-copy-source", copy_source);
5151 if (md_directive)
5152 rgw_add_to_iam_environment(s->env, "s3:x-amz-metadata-directive",
5153 *md_directive);
5154
5155 auto e = dest_iam_policy->eval(s->env, *s->auth.identity,
5156 rgw::IAM::s3PutObject,
5157 ARN(dest_obj));
5158 if (e == Effect::Deny) {
5159 return -EACCES;
5160 } else if (e == Effect::Pass &&
5161 ! dest_bucket_policy.verify_permission(this,
5162 *s->auth.identity,
5163 s->perm_mask,
5164 RGW_PERM_WRITE)){
5165 return -EACCES;
5166 }
5167 } else if (! dest_bucket_policy.verify_permission(this, *s->auth.identity, s->perm_mask,
5168 RGW_PERM_WRITE)) {
5169 return -EACCES;
5170 }
5171
5172 }
5173
5174 op_ret = init_dest_policy();
5175 if (op_ret < 0) {
5176 return op_ret;
5177 }
5178
5179 return 0;
5180 }
5181
5182
5183 int RGWCopyObj::init_common()
5184 {
5185 if (if_mod) {
5186 if (parse_time(if_mod, &mod_time) < 0) {
5187 op_ret = -EINVAL;
5188 return op_ret;
5189 }
5190 mod_ptr = &mod_time;
5191 }
5192
5193 if (if_unmod) {
5194 if (parse_time(if_unmod, &unmod_time) < 0) {
5195 op_ret = -EINVAL;
5196 return op_ret;
5197 }
5198 unmod_ptr = &unmod_time;
5199 }
5200
5201 bufferlist aclbl;
5202 dest_policy.encode(aclbl);
5203 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
5204
5205 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
5206 if (op_ret < 0) {
5207 return op_ret;
5208 }
5209 populate_with_generic_attrs(s, attrs);
5210
5211 return 0;
5212 }
5213
5214 static void copy_obj_progress_cb(off_t ofs, void *param)
5215 {
5216 RGWCopyObj *op = static_cast<RGWCopyObj *>(param);
5217 op->progress_cb(ofs);
5218 }
5219
5220 void RGWCopyObj::progress_cb(off_t ofs)
5221 {
5222 if (!s->cct->_conf->rgw_copy_obj_progress)
5223 return;
5224
5225 if (ofs - last_ofs < s->cct->_conf->rgw_copy_obj_progress_every_bytes)
5226 return;
5227
5228 send_partial_response(ofs);
5229
5230 last_ofs = ofs;
5231 }
5232
5233 void RGWCopyObj::pre_exec()
5234 {
5235 rgw_bucket_object_pre_exec(s);
5236 }
5237
5238 void RGWCopyObj::execute()
5239 {
5240 if (init_common() < 0)
5241 return;
5242
5243 rgw_obj src_obj(src_bucket, src_object);
5244 rgw_obj dst_obj(dest_bucket, dest_object);
5245
5246 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
5247 if ( ! version_id.empty()) {
5248 dst_obj.key.set_instance(version_id);
5249 } else if (dest_bucket_info.versioning_enabled()) {
5250 store->getRados()->gen_rand_obj_instance_name(&dst_obj);
5251 }
5252
5253 obj_ctx.set_atomic(src_obj);
5254 obj_ctx.set_atomic(dst_obj);
5255
5256 encode_delete_at_attr(delete_at, attrs);
5257
5258 if (!s->system_request) { // no quota enforcement for system requests
5259 // get src object size (cached in obj_ctx from verify_permission())
5260 RGWObjState* astate = nullptr;
5261 op_ret = store->getRados()->get_obj_state(s->obj_ctx, src_bucket_info, src_obj,
5262 &astate, true, s->yield, false);
5263 if (op_ret < 0) {
5264 return;
5265 }
5266 // enforce quota against the destination bucket owner
5267 op_ret = store->getRados()->check_quota(dest_bucket_info.owner,
5268 dest_bucket_info.bucket,
5269 user_quota, bucket_quota,
5270 astate->accounted_size);
5271 if (op_ret < 0) {
5272 return;
5273 }
5274 }
5275
5276 bool high_precision_time = (s->system_request);
5277
5278 /* Handle object versioning of Swift API. In case of copying to remote this
5279 * should fail gently (op_ret == 0) as the dst_obj will not exist here. */
5280 op_ret = store->getRados()->swift_versioning_copy(obj_ctx,
5281 dest_bucket_info.owner,
5282 dest_bucket_info,
5283 dst_obj,
5284 this,
5285 s->yield);
5286 if (op_ret < 0) {
5287 return;
5288 }
5289
5290 op_ret = store->getRados()->copy_obj(obj_ctx,
5291 s->user->get_id(),
5292 &s->info,
5293 source_zone,
5294 dst_obj,
5295 src_obj,
5296 dest_bucket_info,
5297 src_bucket_info,
5298 s->dest_placement,
5299 &src_mtime,
5300 &mtime,
5301 mod_ptr,
5302 unmod_ptr,
5303 high_precision_time,
5304 if_match,
5305 if_nomatch,
5306 attrs_mod,
5307 copy_if_newer,
5308 attrs, RGWObjCategory::Main,
5309 olh_epoch,
5310 (delete_at ? *delete_at : real_time()),
5311 (version_id.empty() ? NULL : &version_id),
5312 &s->req_id, /* use req_id as tag */
5313 &etag,
5314 copy_obj_progress_cb, (void *)this,
5315 this,
5316 s->yield);
5317
5318 const auto ret = rgw::notify::publish(s, s->object, s->obj_size, mtime, etag, rgw::notify::ObjectCreatedCopy, store);
5319 if (ret < 0) {
5320 ldpp_dout(this, 5) << "WARNING: publishing notification failed, with error: " << ret << dendl;
5321 // TODO: we should have conf to make send a blocking coroutine and reply with error in case sending failed
5322 // this should be global conf (probably returnign a different handler)
5323 // so we don't need to read the configured values before we perform it
5324 }
5325 }
5326
5327 int RGWGetACLs::verify_permission()
5328 {
5329 bool perm;
5330 if (!s->object.empty()) {
5331 auto iam_action = s->object.instance.empty() ?
5332 rgw::IAM::s3GetObjectAcl :
5333 rgw::IAM::s3GetObjectVersionAcl;
5334
5335 if (s->iam_policy && s->iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG)){
5336 rgw_obj obj = rgw_obj(s->bucket, s->object);
5337 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
5338 }
5339 if (! s->iam_user_policies.empty()) {
5340 for (auto& user_policy : s->iam_user_policies) {
5341 if (user_policy.has_partial_conditional(S3_EXISTING_OBJTAG)) {
5342 rgw_obj obj = rgw_obj(s->bucket, s->object);
5343 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
5344 }
5345 }
5346 }
5347 perm = verify_object_permission(this, s, iam_action);
5348 } else {
5349 if (!s->bucket_exists) {
5350 return -ERR_NO_SUCH_BUCKET;
5351 }
5352 perm = verify_bucket_permission(this, s, rgw::IAM::s3GetBucketAcl);
5353 }
5354 if (!perm)
5355 return -EACCES;
5356
5357 return 0;
5358 }
5359
5360 void RGWGetACLs::pre_exec()
5361 {
5362 rgw_bucket_object_pre_exec(s);
5363 }
5364
5365 void RGWGetACLs::execute()
5366 {
5367 stringstream ss;
5368 RGWAccessControlPolicy* const acl = \
5369 (!s->object.empty() ? s->object_acl.get() : s->bucket_acl.get());
5370 RGWAccessControlPolicy_S3* const s3policy = \
5371 static_cast<RGWAccessControlPolicy_S3*>(acl);
5372 s3policy->to_xml(ss);
5373 acls = ss.str();
5374 }
5375
5376
5377
5378 int RGWPutACLs::verify_permission()
5379 {
5380 bool perm;
5381
5382 rgw_add_to_iam_environment(s->env, "s3:x-amz-acl", s->canned_acl);
5383
5384 rgw_add_grant_to_iam_environment(s->env, s);
5385 if (!s->object.empty()) {
5386 auto iam_action = s->object.instance.empty() ? rgw::IAM::s3PutObjectAcl : rgw::IAM::s3PutObjectVersionAcl;
5387 auto obj = rgw_obj(s->bucket, s->object);
5388 op_ret = rgw_iam_add_existing_objtags(store, s, obj, iam_action);
5389 perm = verify_object_permission(this, s, iam_action);
5390 } else {
5391 perm = verify_bucket_permission(this, s, rgw::IAM::s3PutBucketAcl);
5392 }
5393 if (!perm)
5394 return -EACCES;
5395
5396 return 0;
5397 }
5398
5399 int RGWGetLC::verify_permission()
5400 {
5401 bool perm;
5402 perm = verify_bucket_permission(this, s, rgw::IAM::s3GetLifecycleConfiguration);
5403 if (!perm)
5404 return -EACCES;
5405
5406 return 0;
5407 }
5408
5409 int RGWPutLC::verify_permission()
5410 {
5411 bool perm;
5412 perm = verify_bucket_permission(this, s, rgw::IAM::s3PutLifecycleConfiguration);
5413 if (!perm)
5414 return -EACCES;
5415
5416 return 0;
5417 }
5418
5419 int RGWDeleteLC::verify_permission()
5420 {
5421 bool perm;
5422 perm = verify_bucket_permission(this, s, rgw::IAM::s3PutLifecycleConfiguration);
5423 if (!perm)
5424 return -EACCES;
5425
5426 return 0;
5427 }
5428
5429 void RGWPutACLs::pre_exec()
5430 {
5431 rgw_bucket_object_pre_exec(s);
5432 }
5433
5434 void RGWGetLC::pre_exec()
5435 {
5436 rgw_bucket_object_pre_exec(s);
5437 }
5438
5439 void RGWPutLC::pre_exec()
5440 {
5441 rgw_bucket_object_pre_exec(s);
5442 }
5443
5444 void RGWDeleteLC::pre_exec()
5445 {
5446 rgw_bucket_object_pre_exec(s);
5447 }
5448
5449 void RGWPutACLs::execute()
5450 {
5451 bufferlist bl;
5452
5453 RGWAccessControlPolicy_S3 *policy = NULL;
5454 RGWACLXMLParser_S3 parser(s->cct);
5455 RGWAccessControlPolicy_S3 new_policy(s->cct);
5456 stringstream ss;
5457 rgw_obj obj;
5458
5459 op_ret = 0; /* XXX redundant? */
5460
5461 if (!parser.init()) {
5462 op_ret = -EINVAL;
5463 return;
5464 }
5465
5466
5467 RGWAccessControlPolicy* const existing_policy = \
5468 (s->object.empty() ? s->bucket_acl.get() : s->object_acl.get());
5469
5470 owner = existing_policy->get_owner();
5471
5472 op_ret = get_params();
5473 if (op_ret < 0) {
5474 if (op_ret == -ERANGE) {
5475 ldpp_dout(this, 4) << "The size of request xml data is larger than the max limitation, data size = "
5476 << s->length << dendl;
5477 op_ret = -ERR_MALFORMED_XML;
5478 s->err.message = "The XML you provided was larger than the maximum " +
5479 std::to_string(s->cct->_conf->rgw_max_put_param_size) +
5480 " bytes allowed.";
5481 }
5482 return;
5483 }
5484
5485 char* buf = data.c_str();
5486 ldpp_dout(this, 15) << "read len=" << data.length() << " data=" << (buf ? buf : "") << dendl;
5487
5488 if (!s->canned_acl.empty() && data.length() > 0) {
5489 op_ret = -EINVAL;
5490 return;
5491 }
5492
5493 if (!s->canned_acl.empty() || s->has_acl_header) {
5494 op_ret = get_policy_from_state(store, s, ss);
5495 if (op_ret < 0)
5496 return;
5497
5498 data.clear();
5499 data.append(ss.str());
5500 }
5501
5502 if (!parser.parse(data.c_str(), data.length(), 1)) {
5503 op_ret = -EINVAL;
5504 return;
5505 }
5506 policy = static_cast<RGWAccessControlPolicy_S3 *>(parser.find_first("AccessControlPolicy"));
5507 if (!policy) {
5508 op_ret = -EINVAL;
5509 return;
5510 }
5511
5512 const RGWAccessControlList& req_acl = policy->get_acl();
5513 const multimap<string, ACLGrant>& req_grant_map = req_acl.get_grant_map();
5514 #define ACL_GRANTS_MAX_NUM 100
5515 int max_num = s->cct->_conf->rgw_acl_grants_max_num;
5516 if (max_num < 0) {
5517 max_num = ACL_GRANTS_MAX_NUM;
5518 }
5519
5520 int grants_num = req_grant_map.size();
5521 if (grants_num > max_num) {
5522 ldpp_dout(this, 4) << "An acl can have up to " << max_num
5523 << " grants, request acl grants num: " << grants_num << dendl;
5524 op_ret = -ERR_LIMIT_EXCEEDED;
5525 s->err.message = "The request is rejected, because the acl grants number you requested is larger than the maximum "
5526 + std::to_string(max_num)
5527 + " grants allowed in an acl.";
5528 return;
5529 }
5530
5531 // forward bucket acl requests to meta master zone
5532 if (s->object.empty() && !store->svc()->zone->is_meta_master()) {
5533 bufferlist in_data;
5534 // include acl data unless it was generated from a canned_acl
5535 if (s->canned_acl.empty()) {
5536 in_data.append(data);
5537 }
5538 op_ret = forward_request_to_master(s, NULL, store, in_data, NULL);
5539 if (op_ret < 0) {
5540 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
5541 return;
5542 }
5543 }
5544
5545 if (s->cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
5546 ldpp_dout(this, 15) << "Old AccessControlPolicy";
5547 policy->to_xml(*_dout);
5548 *_dout << dendl;
5549 }
5550
5551 op_ret = policy->rebuild(store->ctl()->user, &owner, new_policy, s->err.message);
5552 if (op_ret < 0)
5553 return;
5554
5555 if (s->cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
5556 ldpp_dout(this, 15) << "New AccessControlPolicy:";
5557 new_policy.to_xml(*_dout);
5558 *_dout << dendl;
5559 }
5560
5561 if (s->bucket_access_conf &&
5562 s->bucket_access_conf->block_public_acls() &&
5563 new_policy.is_public()) {
5564 op_ret = -EACCES;
5565 return;
5566 }
5567 new_policy.encode(bl);
5568 if (!s->object.empty()) {
5569 obj = rgw_obj(s->bucket, s->object);
5570 store->getRados()->set_atomic(s->obj_ctx, obj);
5571 //if instance is empty, we should modify the latest object
5572 op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_ACL, bl);
5573 } else {
5574 map<string,bufferlist> attrs = s->bucket_attrs;
5575 attrs[RGW_ATTR_ACL] = bl;
5576 op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
5577 &s->bucket_info.objv_tracker,
5578 s->yield);
5579 }
5580 if (op_ret == -ECANCELED) {
5581 op_ret = 0; /* lost a race, but it's ok because acls are immutable */
5582 }
5583 }
5584
5585 void RGWPutLC::execute()
5586 {
5587 bufferlist bl;
5588
5589 RGWLifecycleConfiguration_S3 config(s->cct);
5590 RGWXMLParser parser;
5591 RGWLifecycleConfiguration_S3 new_config(s->cct);
5592
5593 content_md5 = s->info.env->get("HTTP_CONTENT_MD5");
5594 if (content_md5 == nullptr) {
5595 op_ret = -ERR_INVALID_REQUEST;
5596 s->err.message = "Missing required header for this request: Content-MD5";
5597 ldpp_dout(this, 5) << s->err.message << dendl;
5598 return;
5599 }
5600
5601 std::string content_md5_bin;
5602 try {
5603 content_md5_bin = rgw::from_base64(boost::string_view(content_md5));
5604 } catch (...) {
5605 s->err.message = "Request header Content-MD5 contains character "
5606 "that is not base64 encoded.";
5607 ldpp_dout(this, 5) << s->err.message << dendl;
5608 op_ret = -ERR_BAD_DIGEST;
5609 return;
5610 }
5611
5612 if (!parser.init()) {
5613 op_ret = -EINVAL;
5614 return;
5615 }
5616
5617 op_ret = get_params();
5618 if (op_ret < 0)
5619 return;
5620
5621 char* buf = data.c_str();
5622 ldpp_dout(this, 15) << "read len=" << data.length() << " data=" << (buf ? buf : "") << dendl;
5623
5624 MD5 data_hash;
5625 unsigned char data_hash_res[CEPH_CRYPTO_MD5_DIGESTSIZE];
5626 data_hash.Update(reinterpret_cast<const unsigned char*>(buf), data.length());
5627 data_hash.Final(data_hash_res);
5628
5629 if (memcmp(data_hash_res, content_md5_bin.c_str(), CEPH_CRYPTO_MD5_DIGESTSIZE) != 0) {
5630 op_ret = -ERR_BAD_DIGEST;
5631 s->err.message = "The Content-MD5 you specified did not match what we received.";
5632 ldpp_dout(this, 5) << s->err.message
5633 << " Specified content md5: " << content_md5
5634 << ", calculated content md5: " << data_hash_res
5635 << dendl;
5636 return;
5637 }
5638
5639 if (!parser.parse(buf, data.length(), 1)) {
5640 op_ret = -ERR_MALFORMED_XML;
5641 return;
5642 }
5643
5644 try {
5645 RGWXMLDecoder::decode_xml("LifecycleConfiguration", config, &parser);
5646 } catch (RGWXMLDecoder::err& err) {
5647 ldpp_dout(this, 5) << "Bad lifecycle configuration: " << err << dendl;
5648 op_ret = -ERR_MALFORMED_XML;
5649 return;
5650 }
5651
5652 op_ret = config.rebuild(store->getRados(), new_config);
5653 if (op_ret < 0)
5654 return;
5655
5656 if (s->cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
5657 XMLFormatter xf;
5658 new_config.dump_xml(&xf);
5659 stringstream ss;
5660 xf.flush(ss);
5661 ldpp_dout(this, 15) << "New LifecycleConfiguration:" << ss.str() << dendl;
5662 }
5663
5664 if (!store->svc()->zone->is_meta_master()) {
5665 op_ret = forward_request_to_master(s, nullptr, store, data, nullptr);
5666 if (op_ret < 0) {
5667 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
5668 return;
5669 }
5670 }
5671
5672 op_ret = store->getRados()->get_lc()->set_bucket_config(s->bucket_info, s->bucket_attrs, &new_config);
5673 if (op_ret < 0) {
5674 return;
5675 }
5676 return;
5677 }
5678
5679 void RGWDeleteLC::execute()
5680 {
5681 if (!store->svc()->zone->is_meta_master()) {
5682 bufferlist data;
5683 op_ret = forward_request_to_master(s, nullptr, store, data, nullptr);
5684 if (op_ret < 0) {
5685 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
5686 return;
5687 }
5688 }
5689
5690 op_ret = store->getRados()->get_lc()->remove_bucket_config(s->bucket_info, s->bucket_attrs);
5691 if (op_ret < 0) {
5692 return;
5693 }
5694 return;
5695 }
5696
5697 int RGWGetCORS::verify_permission()
5698 {
5699 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketCORS);
5700 }
5701
5702 void RGWGetCORS::execute()
5703 {
5704 op_ret = read_bucket_cors();
5705 if (op_ret < 0)
5706 return ;
5707
5708 if (!cors_exist) {
5709 ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl;
5710 op_ret = -ERR_NO_CORS_FOUND;
5711 return;
5712 }
5713 }
5714
5715 int RGWPutCORS::verify_permission()
5716 {
5717 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketCORS);
5718 }
5719
5720 void RGWPutCORS::execute()
5721 {
5722 rgw_raw_obj obj;
5723
5724 op_ret = get_params();
5725 if (op_ret < 0)
5726 return;
5727
5728 if (!store->svc()->zone->is_meta_master()) {
5729 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
5730 if (op_ret < 0) {
5731 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
5732 return;
5733 }
5734 }
5735
5736 op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
5737 map<string, bufferlist> attrs = s->bucket_attrs;
5738 attrs[RGW_ATTR_CORS] = cors_bl;
5739 return store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
5740 &s->bucket_info.objv_tracker,
5741 s->yield);
5742 });
5743 }
5744
5745 int RGWDeleteCORS::verify_permission()
5746 {
5747 // No separate delete permission
5748 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketCORS);
5749 }
5750
5751 void RGWDeleteCORS::execute()
5752 {
5753 if (!store->svc()->zone->is_meta_master()) {
5754 bufferlist data;
5755 op_ret = forward_request_to_master(s, nullptr, store, data, nullptr);
5756 if (op_ret < 0) {
5757 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
5758 return;
5759 }
5760 }
5761
5762 op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
5763 op_ret = read_bucket_cors();
5764 if (op_ret < 0)
5765 return op_ret;
5766
5767 if (!cors_exist) {
5768 ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl;
5769 op_ret = -ENOENT;
5770 return op_ret;
5771 }
5772
5773 map<string, bufferlist> attrs = s->bucket_attrs;
5774 attrs.erase(RGW_ATTR_CORS);
5775 op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
5776 &s->bucket_info.objv_tracker,
5777 s->yield);
5778 if (op_ret < 0) {
5779 ldpp_dout(this, 0) << "RGWLC::RGWDeleteCORS() failed to set attrs on bucket=" << s->bucket.name
5780 << " returned err=" << op_ret << dendl;
5781 }
5782 return op_ret;
5783 });
5784 }
5785
5786 void RGWOptionsCORS::get_response_params(string& hdrs, string& exp_hdrs, unsigned *max_age) {
5787 get_cors_response_headers(rule, req_hdrs, hdrs, exp_hdrs, max_age);
5788 }
5789
5790 int RGWOptionsCORS::validate_cors_request(RGWCORSConfiguration *cc) {
5791 rule = cc->host_name_rule(origin);
5792 if (!rule) {
5793 ldpp_dout(this, 10) << "There is no cors rule present for " << origin << dendl;
5794 return -ENOENT;
5795 }
5796
5797 if (!validate_cors_rule_method(rule, req_meth)) {
5798 return -ENOENT;
5799 }
5800
5801 if (!validate_cors_rule_header(rule, req_hdrs)) {
5802 return -ENOENT;
5803 }
5804
5805 return 0;
5806 }
5807
5808 void RGWOptionsCORS::execute()
5809 {
5810 op_ret = read_bucket_cors();
5811 if (op_ret < 0)
5812 return;
5813
5814 origin = s->info.env->get("HTTP_ORIGIN");
5815 if (!origin) {
5816 ldpp_dout(this, 0) << "Missing mandatory Origin header" << dendl;
5817 op_ret = -EINVAL;
5818 return;
5819 }
5820 req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD");
5821 if (!req_meth) {
5822 ldpp_dout(this, 0) << "Missing mandatory Access-control-request-method header" << dendl;
5823 op_ret = -EINVAL;
5824 return;
5825 }
5826 if (!cors_exist) {
5827 ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl;
5828 op_ret = -ENOENT;
5829 return;
5830 }
5831 req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS");
5832 op_ret = validate_cors_request(&bucket_cors);
5833 if (!rule) {
5834 origin = req_meth = NULL;
5835 return;
5836 }
5837 return;
5838 }
5839
5840 int RGWGetRequestPayment::verify_permission()
5841 {
5842 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketRequestPayment);
5843 }
5844
5845 void RGWGetRequestPayment::pre_exec()
5846 {
5847 rgw_bucket_object_pre_exec(s);
5848 }
5849
5850 void RGWGetRequestPayment::execute()
5851 {
5852 requester_pays = s->bucket_info.requester_pays;
5853 }
5854
5855 int RGWSetRequestPayment::verify_permission()
5856 {
5857 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketRequestPayment);
5858 }
5859
5860 void RGWSetRequestPayment::pre_exec()
5861 {
5862 rgw_bucket_object_pre_exec(s);
5863 }
5864
5865 void RGWSetRequestPayment::execute()
5866 {
5867
5868 if (!store->svc()->zone->is_meta_master()) {
5869 op_ret = forward_request_to_master(s, nullptr, store, in_data, nullptr);
5870 if (op_ret < 0) {
5871 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
5872 return;
5873 }
5874 }
5875
5876 op_ret = get_params();
5877
5878 if (op_ret < 0)
5879 return;
5880
5881 s->bucket_info.requester_pays = requester_pays;
5882 op_ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false, real_time(),
5883 &s->bucket_attrs);
5884 if (op_ret < 0) {
5885 ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
5886 << " returned err=" << op_ret << dendl;
5887 return;
5888 }
5889 }
5890
5891 int RGWInitMultipart::verify_permission()
5892 {
5893 if (s->iam_policy || ! s->iam_user_policies.empty()) {
5894 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
5895 boost::none,
5896 rgw::IAM::s3PutObject,
5897 rgw_obj(s->bucket, s->object));
5898 if (usr_policy_res == Effect::Deny) {
5899 return -EACCES;
5900 }
5901
5902 rgw::IAM::Effect e = Effect::Pass;
5903 if (s->iam_policy) {
5904 e = s->iam_policy->eval(s->env, *s->auth.identity,
5905 rgw::IAM::s3PutObject,
5906 rgw_obj(s->bucket, s->object));
5907 }
5908 if (e == Effect::Allow) {
5909 return 0;
5910 } else if (e == Effect::Deny) {
5911 return -EACCES;
5912 } else if (usr_policy_res == Effect::Allow) {
5913 return 0;
5914 }
5915 }
5916
5917 if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
5918 return -EACCES;
5919 }
5920
5921 return 0;
5922 }
5923
5924 void RGWInitMultipart::pre_exec()
5925 {
5926 rgw_bucket_object_pre_exec(s);
5927 }
5928
5929 void RGWInitMultipart::execute()
5930 {
5931 bufferlist aclbl;
5932 map<string, bufferlist> attrs;
5933 rgw_obj obj;
5934
5935 if (get_params() < 0)
5936 return;
5937
5938 if (s->object.empty())
5939 return;
5940
5941 policy.encode(aclbl);
5942 attrs[RGW_ATTR_ACL] = aclbl;
5943
5944 populate_with_generic_attrs(s, attrs);
5945
5946 /* select encryption mode */
5947 op_ret = prepare_encryption(attrs);
5948 if (op_ret != 0)
5949 return;
5950
5951 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
5952 if (op_ret < 0) {
5953 return;
5954 }
5955
5956 do {
5957 char buf[33];
5958 gen_rand_alphanumeric(s->cct, buf, sizeof(buf) - 1);
5959 upload_id = MULTIPART_UPLOAD_ID_PREFIX; /* v2 upload id */
5960 upload_id.append(buf);
5961
5962 string tmp_obj_name;
5963 RGWMPObj mp(s->object.name, upload_id);
5964 tmp_obj_name = mp.get_meta();
5965
5966 obj.init_ns(s->bucket, tmp_obj_name, mp_ns);
5967 // the meta object will be indexed with 0 size, we c
5968 obj.set_in_extra_data(true);
5969 obj.index_hash_source = s->object.name;
5970
5971 RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
5972 op_target.set_versioning_disabled(true); /* no versioning for multipart meta */
5973
5974 RGWRados::Object::Write obj_op(&op_target);
5975
5976 obj_op.meta.owner = s->owner.get_id();
5977 obj_op.meta.category = RGWObjCategory::MultiMeta;
5978 obj_op.meta.flags = PUT_OBJ_CREATE_EXCL;
5979 obj_op.meta.mtime = &mtime;
5980
5981 multipart_upload_info upload_info;
5982 upload_info.dest_placement = s->dest_placement;
5983
5984 bufferlist bl;
5985 encode(upload_info, bl);
5986 obj_op.meta.data = &bl;
5987
5988 op_ret = obj_op.write_meta(bl.length(), 0, attrs, s->yield);
5989 } while (op_ret == -EEXIST);
5990
5991 const auto ret = rgw::notify::publish(s, s->object, s->obj_size, ceph::real_clock::now(), attrs[RGW_ATTR_ETAG].to_str(), rgw::notify::ObjectCreatedPost, store);
5992 if (ret < 0) {
5993 ldpp_dout(this, 5) << "WARNING: publishing notification failed, with error: " << ret << dendl;
5994 // TODO: we should have conf to make send a blocking coroutine and reply with error in case sending failed
5995 // this should be global conf (probably returnign a different handler)
5996 // so we don't need to read the configured values before we perform it
5997 }
5998 }
5999
6000 int RGWCompleteMultipart::verify_permission()
6001 {
6002 if (s->iam_policy || ! s->iam_user_policies.empty()) {
6003 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
6004 boost::none,
6005 rgw::IAM::s3PutObject,
6006 rgw_obj(s->bucket, s->object));
6007 if (usr_policy_res == Effect::Deny) {
6008 return -EACCES;
6009 }
6010
6011 rgw::IAM::Effect e = Effect::Pass;
6012 if (s->iam_policy) {
6013 e = s->iam_policy->eval(s->env, *s->auth.identity,
6014 rgw::IAM::s3PutObject,
6015 rgw_obj(s->bucket, s->object));
6016 }
6017 if (e == Effect::Allow) {
6018 return 0;
6019 } else if (e == Effect::Deny) {
6020 return -EACCES;
6021 } else if (usr_policy_res == Effect::Allow) {
6022 return 0;
6023 }
6024 }
6025
6026 if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
6027 return -EACCES;
6028 }
6029
6030 return 0;
6031 }
6032
6033 void RGWCompleteMultipart::pre_exec()
6034 {
6035 rgw_bucket_object_pre_exec(s);
6036 }
6037
6038 void RGWCompleteMultipart::execute()
6039 {
6040 RGWMultiCompleteUpload *parts;
6041 map<int, string>::iterator iter;
6042 RGWMultiXMLParser parser;
6043 string meta_oid;
6044 map<uint32_t, RGWUploadPartInfo> obj_parts;
6045 map<uint32_t, RGWUploadPartInfo>::iterator obj_iter;
6046 map<string, bufferlist> attrs;
6047 off_t ofs = 0;
6048 MD5 hash;
6049 char final_etag[CEPH_CRYPTO_MD5_DIGESTSIZE];
6050 char final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 16];
6051 bufferlist etag_bl;
6052 rgw_obj meta_obj;
6053 rgw_obj target_obj;
6054 RGWMPObj mp;
6055 RGWObjManifest manifest;
6056 uint64_t olh_epoch = 0;
6057
6058 op_ret = get_params();
6059 if (op_ret < 0)
6060 return;
6061 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
6062 if (op_ret < 0) {
6063 return;
6064 }
6065
6066 if (!data.length()) {
6067 op_ret = -ERR_MALFORMED_XML;
6068 return;
6069 }
6070
6071 if (!parser.init()) {
6072 op_ret = -EIO;
6073 return;
6074 }
6075
6076 if (!parser.parse(data.c_str(), data.length(), 1)) {
6077 op_ret = -ERR_MALFORMED_XML;
6078 return;
6079 }
6080
6081 parts = static_cast<RGWMultiCompleteUpload *>(parser.find_first("CompleteMultipartUpload"));
6082 if (!parts || parts->parts.empty()) {
6083 op_ret = -ERR_MALFORMED_XML;
6084 return;
6085 }
6086
6087 if ((int)parts->parts.size() >
6088 s->cct->_conf->rgw_multipart_part_upload_limit) {
6089 op_ret = -ERANGE;
6090 return;
6091 }
6092
6093 mp.init(s->object.name, upload_id);
6094 meta_oid = mp.get_meta();
6095
6096 int total_parts = 0;
6097 int handled_parts = 0;
6098 int max_parts = 1000;
6099 int marker = 0;
6100 bool truncated;
6101 RGWCompressionInfo cs_info;
6102 bool compressed = false;
6103 uint64_t accounted_size = 0;
6104
6105 uint64_t min_part_size = s->cct->_conf->rgw_multipart_min_part_size;
6106
6107 list<rgw_obj_index_key> remove_objs; /* objects to be removed from index listing */
6108
6109 bool versioned_object = s->bucket_info.versioning_enabled();
6110
6111 iter = parts->parts.begin();
6112
6113 meta_obj.init_ns(s->bucket, meta_oid, mp_ns);
6114 meta_obj.set_in_extra_data(true);
6115 meta_obj.index_hash_source = s->object.name;
6116
6117 /*take a cls lock on meta_obj to prevent racing completions (or retries)
6118 from deleting the parts*/
6119 rgw_pool meta_pool;
6120 rgw_raw_obj raw_obj;
6121 int max_lock_secs_mp =
6122 s->cct->_conf.get_val<int64_t>("rgw_mp_lock_max_time");
6123 utime_t dur(max_lock_secs_mp, 0);
6124
6125 store->getRados()->obj_to_raw((s->bucket_info).placement_rule, meta_obj, &raw_obj);
6126 store->getRados()->get_obj_data_pool((s->bucket_info).placement_rule,
6127 meta_obj,&meta_pool);
6128 store->getRados()->open_pool_ctx(meta_pool, serializer.ioctx, true);
6129
6130 op_ret = serializer.try_lock(raw_obj.oid, dur);
6131 if (op_ret < 0) {
6132 ldpp_dout(this, 0) << "failed to acquire lock" << dendl;
6133 op_ret = -ERR_INTERNAL_ERROR;
6134 s->err.message = "This multipart completion is already in progress";
6135 return;
6136 }
6137
6138 op_ret = get_obj_attrs(store, s, meta_obj, attrs);
6139
6140 if (op_ret < 0) {
6141 ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << meta_obj
6142 << " ret=" << op_ret << dendl;
6143 return;
6144 }
6145
6146 do {
6147 op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts,
6148 marker, obj_parts, &marker, &truncated);
6149 if (op_ret == -ENOENT) {
6150 op_ret = -ERR_NO_SUCH_UPLOAD;
6151 }
6152 if (op_ret < 0)
6153 return;
6154
6155 total_parts += obj_parts.size();
6156 if (!truncated && total_parts != (int)parts->parts.size()) {
6157 ldpp_dout(this, 0) << "NOTICE: total parts mismatch: have: " << total_parts
6158 << " expected: " << parts->parts.size() << dendl;
6159 op_ret = -ERR_INVALID_PART;
6160 return;
6161 }
6162
6163 for (obj_iter = obj_parts.begin(); iter != parts->parts.end() && obj_iter != obj_parts.end(); ++iter, ++obj_iter, ++handled_parts) {
6164 uint64_t part_size = obj_iter->second.accounted_size;
6165 if (handled_parts < (int)parts->parts.size() - 1 &&
6166 part_size < min_part_size) {
6167 op_ret = -ERR_TOO_SMALL;
6168 return;
6169 }
6170
6171 char petag[CEPH_CRYPTO_MD5_DIGESTSIZE];
6172 if (iter->first != (int)obj_iter->first) {
6173 ldpp_dout(this, 0) << "NOTICE: parts num mismatch: next requested: "
6174 << iter->first << " next uploaded: "
6175 << obj_iter->first << dendl;
6176 op_ret = -ERR_INVALID_PART;
6177 return;
6178 }
6179 string part_etag = rgw_string_unquote(iter->second);
6180 if (part_etag.compare(obj_iter->second.etag) != 0) {
6181 ldpp_dout(this, 0) << "NOTICE: etag mismatch: part: " << iter->first
6182 << " etag: " << iter->second << dendl;
6183 op_ret = -ERR_INVALID_PART;
6184 return;
6185 }
6186
6187 hex_to_buf(obj_iter->second.etag.c_str(), petag,
6188 CEPH_CRYPTO_MD5_DIGESTSIZE);
6189 hash.Update((const unsigned char *)petag, sizeof(petag));
6190
6191 RGWUploadPartInfo& obj_part = obj_iter->second;
6192
6193 /* update manifest for part */
6194 string oid = mp.get_part(obj_iter->second.num);
6195 rgw_obj src_obj;
6196 src_obj.init_ns(s->bucket, oid, mp_ns);
6197
6198 if (obj_part.manifest.empty()) {
6199 ldpp_dout(this, 0) << "ERROR: empty manifest for object part: obj="
6200 << src_obj << dendl;
6201 op_ret = -ERR_INVALID_PART;
6202 return;
6203 } else {
6204 manifest.append(obj_part.manifest, store->svc()->zone);
6205 }
6206
6207 bool part_compressed = (obj_part.cs_info.compression_type != "none");
6208 if ((handled_parts > 0) &&
6209 ((part_compressed != compressed) ||
6210 (cs_info.compression_type != obj_part.cs_info.compression_type))) {
6211 ldpp_dout(this, 0) << "ERROR: compression type was changed during multipart upload ("
6212 << cs_info.compression_type << ">>" << obj_part.cs_info.compression_type << ")" << dendl;
6213 op_ret = -ERR_INVALID_PART;
6214 return;
6215 }
6216
6217 if (part_compressed) {
6218 int64_t new_ofs; // offset in compression data for new part
6219 if (cs_info.blocks.size() > 0)
6220 new_ofs = cs_info.blocks.back().new_ofs + cs_info.blocks.back().len;
6221 else
6222 new_ofs = 0;
6223 for (const auto& block : obj_part.cs_info.blocks) {
6224 compression_block cb;
6225 cb.old_ofs = block.old_ofs + cs_info.orig_size;
6226 cb.new_ofs = new_ofs;
6227 cb.len = block.len;
6228 cs_info.blocks.push_back(cb);
6229 new_ofs = cb.new_ofs + cb.len;
6230 }
6231 if (!compressed)
6232 cs_info.compression_type = obj_part.cs_info.compression_type;
6233 cs_info.orig_size += obj_part.cs_info.orig_size;
6234 compressed = true;
6235 }
6236
6237 rgw_obj_index_key remove_key;
6238 src_obj.key.get_index_key(&remove_key);
6239
6240 remove_objs.push_back(remove_key);
6241
6242 ofs += obj_part.size;
6243 accounted_size += obj_part.accounted_size;
6244 }
6245 } while (truncated);
6246 hash.Final((unsigned char *)final_etag);
6247
6248 buf_to_hex((unsigned char *)final_etag, sizeof(final_etag), final_etag_str);
6249 snprintf(&final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2], sizeof(final_etag_str) - CEPH_CRYPTO_MD5_DIGESTSIZE * 2,
6250 "-%lld", (long long)parts->parts.size());
6251 etag = final_etag_str;
6252 ldpp_dout(this, 10) << "calculated etag: " << final_etag_str << dendl;
6253
6254 etag_bl.append(final_etag_str, strlen(final_etag_str));
6255
6256 attrs[RGW_ATTR_ETAG] = etag_bl;
6257
6258 if (compressed) {
6259 // write compression attribute to full object
6260 bufferlist tmp;
6261 encode(cs_info, tmp);
6262 attrs[RGW_ATTR_COMPRESSION] = tmp;
6263 }
6264
6265 target_obj.init(s->bucket, s->object.name);
6266 if (versioned_object) {
6267 if (!version_id.empty()) {
6268 target_obj.key.set_instance(version_id);
6269 } else {
6270 store->getRados()->gen_rand_obj_instance_name(&target_obj);
6271 version_id = target_obj.key.get_instance();
6272 }
6273 }
6274
6275 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
6276
6277 obj_ctx.set_atomic(target_obj);
6278
6279 RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), target_obj);
6280 RGWRados::Object::Write obj_op(&op_target);
6281
6282 obj_op.meta.manifest = &manifest;
6283 obj_op.meta.remove_objs = &remove_objs;
6284
6285 obj_op.meta.ptag = &s->req_id; /* use req_id as operation tag */
6286 obj_op.meta.owner = s->owner.get_id();
6287 obj_op.meta.flags = PUT_OBJ_CREATE;
6288 obj_op.meta.modify_tail = true;
6289 obj_op.meta.completeMultipart = true;
6290 obj_op.meta.olh_epoch = olh_epoch;
6291 op_ret = obj_op.write_meta(ofs, accounted_size, attrs, s->yield);
6292 if (op_ret < 0)
6293 return;
6294
6295 // remove the upload obj
6296 int r = store->getRados()->delete_obj(*static_cast<RGWObjectCtx *>(s->obj_ctx),
6297 s->bucket_info, meta_obj, 0);
6298 if (r >= 0) {
6299 /* serializer's exclusive lock is released */
6300 serializer.clear_locked();
6301 } else {
6302 ldpp_dout(this, 0) << "WARNING: failed to remove object " << meta_obj << dendl;
6303 }
6304
6305 const auto ret = rgw::notify::publish(s, s->object, ofs, ceph::real_clock::now(), final_etag_str, rgw::notify::ObjectCreatedCompleteMultipartUpload, store);
6306
6307 if (ret < 0) {
6308 ldpp_dout(this, 5) << "WARNING: publishing notification failed, with error: " << ret << dendl;
6309 // TODO: we should have conf to make send a blocking coroutine and reply with error in case sending failed
6310 // this should be global conf (probably returnign a different handler)
6311 // so we don't need to read the configured values before we perform it
6312 }
6313 }
6314
6315 int RGWCompleteMultipart::MPSerializer::try_lock(
6316 const std::string& _oid,
6317 utime_t dur)
6318 {
6319 oid = _oid;
6320 op.assert_exists();
6321 lock.set_duration(dur);
6322 lock.lock_exclusive(&op);
6323 int ret = rgw_rados_operate(ioctx, oid, &op, null_yield);
6324 if (! ret) {
6325 locked = true;
6326 }
6327 return ret;
6328 }
6329
6330 void RGWCompleteMultipart::complete()
6331 {
6332 /* release exclusive lock iff not already */
6333 if (unlikely(serializer.locked)) {
6334 int r = serializer.unlock();
6335 if (r < 0) {
6336 ldpp_dout(this, 0) << "WARNING: failed to unlock " << serializer.oid << dendl;
6337 }
6338 }
6339 send_response();
6340 }
6341
6342 int RGWAbortMultipart::verify_permission()
6343 {
6344 if (s->iam_policy || ! s->iam_user_policies.empty()) {
6345 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
6346 boost::none,
6347 rgw::IAM::s3AbortMultipartUpload,
6348 rgw_obj(s->bucket, s->object));
6349 if (usr_policy_res == Effect::Deny) {
6350 return -EACCES;
6351 }
6352
6353 rgw::IAM::Effect e = Effect::Pass;
6354 if (s->iam_policy) {
6355 e = s->iam_policy->eval(s->env, *s->auth.identity,
6356 rgw::IAM::s3AbortMultipartUpload,
6357 rgw_obj(s->bucket, s->object));
6358 }
6359 if (e == Effect::Allow) {
6360 return 0;
6361 } else if (e == Effect::Deny) {
6362 return -EACCES;
6363 } else if (usr_policy_res == Effect::Allow)
6364 return 0;
6365 }
6366
6367 if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
6368 return -EACCES;
6369 }
6370
6371 return 0;
6372 }
6373
6374 void RGWAbortMultipart::pre_exec()
6375 {
6376 rgw_bucket_object_pre_exec(s);
6377 }
6378
6379 void RGWAbortMultipart::execute()
6380 {
6381 op_ret = -EINVAL;
6382 string upload_id;
6383 string meta_oid;
6384 upload_id = s->info.args.get("uploadId");
6385 rgw_obj meta_obj;
6386 RGWMPObj mp;
6387
6388 if (upload_id.empty() || s->object.empty())
6389 return;
6390
6391 mp.init(s->object.name, upload_id);
6392 meta_oid = mp.get_meta();
6393
6394 op_ret = get_multipart_info(store, s, meta_oid, nullptr, nullptr, nullptr);
6395 if (op_ret < 0)
6396 return;
6397
6398 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
6399 op_ret = abort_multipart_upload(store, s->cct, obj_ctx, s->bucket_info, mp);
6400 }
6401
6402 int RGWListMultipart::verify_permission()
6403 {
6404 if (!verify_object_permission(this, s, rgw::IAM::s3ListMultipartUploadParts))
6405 return -EACCES;
6406
6407 return 0;
6408 }
6409
6410 void RGWListMultipart::pre_exec()
6411 {
6412 rgw_bucket_object_pre_exec(s);
6413 }
6414
6415 void RGWListMultipart::execute()
6416 {
6417 string meta_oid;
6418 RGWMPObj mp;
6419
6420 op_ret = get_params();
6421 if (op_ret < 0)
6422 return;
6423
6424 mp.init(s->object.name, upload_id);
6425 meta_oid = mp.get_meta();
6426
6427 op_ret = get_multipart_info(store, s, meta_oid, &policy, nullptr, nullptr);
6428 if (op_ret < 0)
6429 return;
6430
6431 op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts,
6432 marker, parts, NULL, &truncated);
6433 }
6434
6435 int RGWListBucketMultiparts::verify_permission()
6436 {
6437 if (!verify_bucket_permission(this,
6438 s,
6439 rgw::IAM::s3ListBucketMultipartUploads))
6440 return -EACCES;
6441
6442 return 0;
6443 }
6444
6445 void RGWListBucketMultiparts::pre_exec()
6446 {
6447 rgw_bucket_object_pre_exec(s);
6448 }
6449
6450 void RGWListBucketMultiparts::execute()
6451 {
6452 vector<rgw_bucket_dir_entry> objs;
6453 string marker_meta;
6454
6455 op_ret = get_params();
6456 if (op_ret < 0)
6457 return;
6458
6459 if (s->prot_flags & RGW_REST_SWIFT) {
6460 string path_args;
6461 path_args = s->info.args.get("path");
6462 if (!path_args.empty()) {
6463 if (!delimiter.empty() || !prefix.empty()) {
6464 op_ret = -EINVAL;
6465 return;
6466 }
6467 prefix = path_args;
6468 delimiter="/";
6469 }
6470 }
6471 marker_meta = marker.get_meta();
6472
6473 op_ret = list_bucket_multiparts(store, s->bucket_info, prefix, marker_meta, delimiter,
6474 max_uploads, &objs, &common_prefixes, &is_truncated);
6475 if (op_ret < 0) {
6476 return;
6477 }
6478
6479 if (!objs.empty()) {
6480 vector<rgw_bucket_dir_entry>::iterator iter;
6481 RGWMultipartUploadEntry entry;
6482 for (iter = objs.begin(); iter != objs.end(); ++iter) {
6483 rgw_obj_key key(iter->key);
6484 if (!entry.mp.from_meta(key.name))
6485 continue;
6486 entry.obj = *iter;
6487 uploads.push_back(entry);
6488 }
6489 next_marker = entry;
6490 }
6491 }
6492
6493 void RGWGetHealthCheck::execute()
6494 {
6495 if (!g_conf()->rgw_healthcheck_disabling_path.empty() &&
6496 (::access(g_conf()->rgw_healthcheck_disabling_path.c_str(), F_OK) == 0)) {
6497 /* Disabling path specified & existent in the filesystem. */
6498 op_ret = -ERR_SERVICE_UNAVAILABLE; /* 503 */
6499 } else {
6500 op_ret = 0; /* 200 OK */
6501 }
6502 }
6503
6504 int RGWDeleteMultiObj::verify_permission()
6505 {
6506 if (s->iam_policy || ! s->iam_user_policies.empty()) {
6507 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
6508 boost::none,
6509 s->object.instance.empty() ?
6510 rgw::IAM::s3DeleteObject :
6511 rgw::IAM::s3DeleteObjectVersion,
6512 ARN(s->bucket));
6513 if (usr_policy_res == Effect::Deny) {
6514 return -EACCES;
6515 }
6516
6517 rgw::IAM::Effect r = Effect::Pass;
6518 if (s->iam_policy) {
6519 r = s->iam_policy->eval(s->env, *s->auth.identity,
6520 s->object.instance.empty() ?
6521 rgw::IAM::s3DeleteObject :
6522 rgw::IAM::s3DeleteObjectVersion,
6523 ARN(s->bucket));
6524 }
6525 if (r == Effect::Allow)
6526 return 0;
6527 else if (r == Effect::Deny)
6528 return -EACCES;
6529 else if (usr_policy_res == Effect::Allow)
6530 return 0;
6531 }
6532
6533 acl_allowed = verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE);
6534 if (!acl_allowed)
6535 return -EACCES;
6536
6537 return 0;
6538 }
6539
6540 void RGWDeleteMultiObj::pre_exec()
6541 {
6542 rgw_bucket_object_pre_exec(s);
6543 }
6544
6545 void RGWDeleteMultiObj::execute()
6546 {
6547 RGWMultiDelDelete *multi_delete;
6548 vector<rgw_obj_key>::iterator iter;
6549 RGWMultiDelXMLParser parser;
6550 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
6551 char* buf;
6552
6553 op_ret = get_params();
6554 if (op_ret < 0) {
6555 goto error;
6556 }
6557
6558 buf = data.c_str();
6559 if (!buf) {
6560 op_ret = -EINVAL;
6561 goto error;
6562 }
6563
6564 if (!parser.init()) {
6565 op_ret = -EINVAL;
6566 goto error;
6567 }
6568
6569 if (!parser.parse(buf, data.length(), 1)) {
6570 op_ret = -EINVAL;
6571 goto error;
6572 }
6573
6574 multi_delete = static_cast<RGWMultiDelDelete *>(parser.find_first("Delete"));
6575 if (!multi_delete) {
6576 op_ret = -EINVAL;
6577 goto error;
6578 } else {
6579 #define DELETE_MULTI_OBJ_MAX_NUM 1000
6580 int max_num = s->cct->_conf->rgw_delete_multi_obj_max_num;
6581 if (max_num < 0) {
6582 max_num = DELETE_MULTI_OBJ_MAX_NUM;
6583 }
6584 int multi_delete_object_num = multi_delete->objects.size();
6585 if (multi_delete_object_num > max_num) {
6586 op_ret = -ERR_MALFORMED_XML;
6587 goto error;
6588 }
6589 }
6590
6591 if (multi_delete->is_quiet())
6592 quiet = true;
6593
6594 if (s->bucket_info.mfa_enabled()) {
6595 bool has_versioned = false;
6596 for (auto i : multi_delete->objects) {
6597 if (!i.instance.empty()) {
6598 has_versioned = true;
6599 break;
6600 }
6601 }
6602 if (has_versioned && !s->mfa_verified) {
6603 ldpp_dout(this, 5) << "NOTICE: multi-object delete request with a versioned object, mfa auth not provided" << dendl;
6604 op_ret = -ERR_MFA_REQUIRED;
6605 goto error;
6606 }
6607 }
6608
6609 begin_response();
6610 if (multi_delete->objects.empty()) {
6611 goto done;
6612 }
6613
6614 for (iter = multi_delete->objects.begin();
6615 iter != multi_delete->objects.end();
6616 ++iter) {
6617 rgw_obj obj(bucket, *iter);
6618 if (s->iam_policy || ! s->iam_user_policies.empty()) {
6619 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
6620 boost::none,
6621 iter->instance.empty() ?
6622 rgw::IAM::s3DeleteObject :
6623 rgw::IAM::s3DeleteObjectVersion,
6624 ARN(obj));
6625 if (usr_policy_res == Effect::Deny) {
6626 send_partial_response(*iter, false, "", -EACCES);
6627 continue;
6628 }
6629
6630 rgw::IAM::Effect e = Effect::Pass;
6631 if (s->iam_policy) {
6632 e = s->iam_policy->eval(s->env,
6633 *s->auth.identity,
6634 iter->instance.empty() ?
6635 rgw::IAM::s3DeleteObject :
6636 rgw::IAM::s3DeleteObjectVersion,
6637 ARN(obj));
6638 }
6639 if ((e == Effect::Deny) ||
6640 (usr_policy_res == Effect::Pass && e == Effect::Pass && !acl_allowed)) {
6641 send_partial_response(*iter, false, "", -EACCES);
6642 continue;
6643 }
6644 }
6645
6646 obj_ctx->set_atomic(obj);
6647
6648 RGWRados::Object del_target(store->getRados(), s->bucket_info, *obj_ctx, obj);
6649 RGWRados::Object::Delete del_op(&del_target);
6650
6651 del_op.params.bucket_owner = s->bucket_owner.get_id();
6652 del_op.params.versioning_status = s->bucket_info.versioning_status();
6653 del_op.params.obj_owner = s->owner;
6654
6655 op_ret = del_op.delete_obj(s->yield);
6656 if (op_ret == -ENOENT) {
6657 op_ret = 0;
6658 }
6659
6660 send_partial_response(*iter, del_op.result.delete_marker,
6661 del_op.result.version_id, op_ret);
6662
6663 const auto obj_state = obj_ctx->get_state(obj);
6664 bufferlist etag_bl;
6665 const auto etag = obj_state->get_attr(RGW_ATTR_ETAG, etag_bl) ? etag_bl.to_str() : "";
6666
6667 const auto ret = rgw::notify::publish(s, obj.key, obj_state->size, obj_state->mtime, etag,
6668 del_op.result.delete_marker && s->object.instance.empty() ? rgw::notify::ObjectRemovedDeleteMarkerCreated : rgw::notify::ObjectRemovedDelete,
6669 store);
6670 if (ret < 0) {
6671 ldpp_dout(this, 5) << "WARNING: publishing notification failed, with error: " << ret << dendl;
6672 // TODO: we should have conf to make send a blocking coroutine and reply with error in case sending failed
6673 // this should be global conf (probably returnign a different handler)
6674 // so we don't need to read the configured values before we perform it
6675 }
6676 }
6677
6678 /* set the return code to zero, errors at this point will be
6679 dumped to the response */
6680 op_ret = 0;
6681
6682 done:
6683 // will likely segfault if begin_response() has not been called
6684 end_response();
6685 return;
6686
6687 error:
6688 send_status();
6689 return;
6690
6691 }
6692
6693 bool RGWBulkDelete::Deleter::verify_permission(RGWBucketInfo& binfo,
6694 map<string, bufferlist>& battrs,
6695 ACLOwner& bucket_owner /* out */)
6696 {
6697 RGWAccessControlPolicy bacl(store->ctx());
6698 int ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket);
6699 if (ret < 0) {
6700 return false;
6701 }
6702
6703 auto policy = get_iam_policy_from_attr(s->cct, store, battrs, binfo.bucket.tenant);
6704
6705 bucket_owner = bacl.get_owner();
6706
6707 /* We can use global user_acl because each BulkDelete request is allowed
6708 * to work on entities from a single account only. */
6709 return verify_bucket_permission(dpp, s, binfo.bucket, s->user_acl.get(),
6710 &bacl, policy, s->iam_user_policies, rgw::IAM::s3DeleteBucket);
6711 }
6712
6713 bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path)
6714 {
6715 auto& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
6716
6717 RGWBucketInfo binfo;
6718 map<string, bufferlist> battrs;
6719 ACLOwner bowner;
6720 RGWObjVersionTracker ot;
6721
6722 rgw_bucket b(rgw_bucket_key(s->user->get_tenant(), path.bucket_name));
6723
6724 int ret = store->ctl()->bucket->read_bucket_info(b, &binfo, s->yield,
6725 RGWBucketCtl::BucketInstance::GetParams()
6726 .set_attrs(&battrs),
6727 &ot);
6728 if (ret < 0) {
6729 goto binfo_fail;
6730 }
6731
6732 if (!verify_permission(binfo, battrs, bowner)) {
6733 ret = -EACCES;
6734 goto auth_fail;
6735 }
6736
6737 if (!path.obj_key.empty()) {
6738 rgw_obj obj(binfo.bucket, path.obj_key);
6739 obj_ctx.set_atomic(obj);
6740
6741 RGWRados::Object del_target(store->getRados(), binfo, obj_ctx, obj);
6742 RGWRados::Object::Delete del_op(&del_target);
6743
6744 del_op.params.bucket_owner = binfo.owner;
6745 del_op.params.versioning_status = binfo.versioning_status();
6746 del_op.params.obj_owner = bowner;
6747
6748 ret = del_op.delete_obj(s->yield);
6749 if (ret < 0) {
6750 goto delop_fail;
6751 }
6752 } else {
6753 ret = store->getRados()->delete_bucket(binfo, ot, s->yield);
6754 if (0 == ret) {
6755 ret = store->ctl()->bucket->unlink_bucket(binfo.owner, binfo.bucket, s->yield, false);
6756 if (ret < 0) {
6757 ldpp_dout(s, 0) << "WARNING: failed to unlink bucket: ret=" << ret << dendl;
6758 }
6759 }
6760 if (ret < 0) {
6761 goto delop_fail;
6762 }
6763
6764 if (!store->svc()->zone->is_meta_master()) {
6765 bufferlist in_data;
6766 ret = forward_request_to_master(s, &ot.read_version, store, in_data,
6767 nullptr);
6768 if (ret < 0) {
6769 if (ret == -ENOENT) {
6770 /* adjust error, we want to return with NoSuchBucket and not
6771 * NoSuchKey */
6772 ret = -ERR_NO_SUCH_BUCKET;
6773 }
6774 goto delop_fail;
6775 }
6776 }
6777 }
6778
6779 num_deleted++;
6780 return true;
6781
6782
6783 binfo_fail:
6784 if (-ENOENT == ret) {
6785 ldpp_dout(s, 20) << "cannot find bucket = " << path.bucket_name << dendl;
6786 num_unfound++;
6787 } else {
6788 ldpp_dout(s, 20) << "cannot get bucket info, ret = " << ret << dendl;
6789
6790 fail_desc_t failed_item = {
6791 .err = ret,
6792 .path = path
6793 };
6794 failures.push_back(failed_item);
6795 }
6796 return false;
6797
6798 auth_fail:
6799 ldpp_dout(s, 20) << "wrong auth for " << path << dendl;
6800 {
6801 fail_desc_t failed_item = {
6802 .err = ret,
6803 .path = path
6804 };
6805 failures.push_back(failed_item);
6806 }
6807 return false;
6808
6809 delop_fail:
6810 if (-ENOENT == ret) {
6811 ldpp_dout(s, 20) << "cannot find entry " << path << dendl;
6812 num_unfound++;
6813 } else {
6814 fail_desc_t failed_item = {
6815 .err = ret,
6816 .path = path
6817 };
6818 failures.push_back(failed_item);
6819 }
6820 return false;
6821 }
6822
6823 bool RGWBulkDelete::Deleter::delete_chunk(const std::list<acct_path_t>& paths)
6824 {
6825 ldpp_dout(s, 20) << "in delete_chunk" << dendl;
6826 for (auto path : paths) {
6827 ldpp_dout(s, 20) << "bulk deleting path: " << path << dendl;
6828 delete_single(path);
6829 }
6830
6831 return true;
6832 }
6833
6834 int RGWBulkDelete::verify_permission()
6835 {
6836 return 0;
6837 }
6838
6839 void RGWBulkDelete::pre_exec()
6840 {
6841 rgw_bucket_object_pre_exec(s);
6842 }
6843
6844 void RGWBulkDelete::execute()
6845 {
6846 deleter = std::unique_ptr<Deleter>(new Deleter(this, store, s));
6847
6848 bool is_truncated = false;
6849 do {
6850 list<RGWBulkDelete::acct_path_t> items;
6851
6852 int ret = get_data(items, &is_truncated);
6853 if (ret < 0) {
6854 return;
6855 }
6856
6857 ret = deleter->delete_chunk(items);
6858 } while (!op_ret && is_truncated);
6859
6860 return;
6861 }
6862
6863
6864 constexpr std::array<int, 2> RGWBulkUploadOp::terminal_errors;
6865
6866 int RGWBulkUploadOp::verify_permission()
6867 {
6868 if (s->auth.identity->is_anonymous()) {
6869 return -EACCES;
6870 }
6871
6872 if (! verify_user_permission_no_policy(this, s, RGW_PERM_WRITE)) {
6873 return -EACCES;
6874 }
6875
6876 if (s->user->get_tenant() != s->bucket_tenant) {
6877 ldpp_dout(this, 10) << "user cannot create a bucket in a different tenant"
6878 << " (user_id.tenant=" << s->user->get_tenant()
6879 << " requested=" << s->bucket_tenant << ")" << dendl;
6880 return -EACCES;
6881 }
6882
6883 if (s->user->get_max_buckets() < 0) {
6884 return -EPERM;
6885 }
6886
6887 return 0;
6888 }
6889
6890 void RGWBulkUploadOp::pre_exec()
6891 {
6892 rgw_bucket_object_pre_exec(s);
6893 }
6894
6895 boost::optional<std::pair<std::string, rgw_obj_key>>
6896 RGWBulkUploadOp::parse_path(const boost::string_ref& path)
6897 {
6898 /* We need to skip all slashes at the beginning in order to preserve
6899 * compliance with Swift. */
6900 const size_t start_pos = path.find_first_not_of('/');
6901
6902 if (boost::string_ref::npos != start_pos) {
6903 /* Seperator is the first slash after the leading ones. */
6904 const size_t sep_pos = path.substr(start_pos).find('/');
6905
6906 if (boost::string_ref::npos != sep_pos) {
6907 const auto bucket_name = path.substr(start_pos, sep_pos - start_pos);
6908 const auto obj_name = path.substr(sep_pos + 1);
6909
6910 return std::make_pair(bucket_name.to_string(),
6911 rgw_obj_key(obj_name.to_string()));
6912 } else {
6913 /* It's guaranteed here that bucket name is at least one character
6914 * long and is different than slash. */
6915 return std::make_pair(path.substr(start_pos).to_string(),
6916 rgw_obj_key());
6917 }
6918 }
6919
6920 return none;
6921 }
6922
6923 std::pair<std::string, std::string>
6924 RGWBulkUploadOp::handle_upload_path(struct req_state *s)
6925 {
6926 std::string bucket_path, file_prefix;
6927 if (! s->init_state.url_bucket.empty()) {
6928 file_prefix = bucket_path = s->init_state.url_bucket + "/";
6929 if (! s->object.empty()) {
6930 std::string& object_name = s->object.name;
6931
6932 /* As rgw_obj_key::empty() already verified emptiness of s->object.name,
6933 * we can safely examine its last element. */
6934 if (object_name.back() == '/') {
6935 file_prefix.append(object_name);
6936 } else {
6937 file_prefix.append(object_name).append("/");
6938 }
6939 }
6940 }
6941 return std::make_pair(bucket_path, file_prefix);
6942 }
6943
6944 int RGWBulkUploadOp::handle_dir_verify_permission()
6945 {
6946 if (s->user->get_max_buckets() > 0) {
6947 rgw::sal::RGWBucketList buckets;
6948 std::string marker;
6949 op_ret = rgw_read_user_buckets(store, s->user->get_user(), buckets,
6950 marker, std::string(), s->user->get_max_buckets(),
6951 false);
6952 if (op_ret < 0) {
6953 return op_ret;
6954 }
6955
6956 if (buckets.count() >= static_cast<size_t>(s->user->get_max_buckets())) {
6957 return -ERR_TOO_MANY_BUCKETS;
6958 }
6959 }
6960
6961 return 0;
6962 }
6963
6964 static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name)
6965 {
6966 /* the request of container or object level will contain bucket name.
6967 * only at account level need to append the bucket name */
6968 if (info.script_uri.find(bucket_name) != std::string::npos) {
6969 return;
6970 }
6971
6972 ldout(cct, 20) << "append the bucket: "<< bucket_name << " to req_info" << dendl;
6973 info.script_uri.append("/").append(bucket_name);
6974 info.request_uri_aws4 = info.request_uri = info.script_uri;
6975 info.effective_uri = "/" + bucket_name;
6976 }
6977
6978 void RGWBulkUploadOp::init(rgw::sal::RGWRadosStore* const store,
6979 struct req_state* const s,
6980 RGWHandler* const h)
6981 {
6982 RGWOp::init(store, s, h);
6983 dir_ctx.emplace(store->svc()->sysobj->init_obj_ctx());
6984 }
6985
6986 int RGWBulkUploadOp::handle_dir(const boost::string_ref path)
6987 {
6988 ldpp_dout(this, 20) << "got directory=" << path << dendl;
6989
6990 op_ret = handle_dir_verify_permission();
6991 if (op_ret < 0) {
6992 return op_ret;
6993 }
6994
6995 std::string bucket_name;
6996 rgw_obj_key object_junk;
6997 std::tie(bucket_name, object_junk) = *parse_path(path);
6998
6999 rgw_raw_obj obj(store->svc()->zone->get_zone_params().domain_root,
7000 rgw_make_bucket_entry_name(s->bucket_tenant, bucket_name));
7001
7002 /* we need to make sure we read bucket info, it's not read before for this
7003 * specific request */
7004 RGWBucketInfo binfo;
7005 std::map<std::string, ceph::bufferlist> battrs;
7006 op_ret = store->getRados()->get_bucket_info(store->svc(), s->bucket_tenant, bucket_name,
7007 binfo, nullptr, s->yield, &battrs);
7008 if (op_ret < 0 && op_ret != -ENOENT) {
7009 return op_ret;
7010 }
7011 const bool bucket_exists = (op_ret != -ENOENT);
7012
7013 if (bucket_exists) {
7014 RGWAccessControlPolicy old_policy(s->cct);
7015 int r = rgw_op_get_bucket_policy_from_attr(s->cct, store, binfo,
7016 battrs, &old_policy);
7017 if (r >= 0) {
7018 if (old_policy.get_owner().get_id().compare(s->user->get_user()) != 0) {
7019 op_ret = -EEXIST;
7020 return op_ret;
7021 }
7022 }
7023 }
7024
7025 RGWBucketInfo master_info;
7026 rgw_bucket *pmaster_bucket = nullptr;
7027 uint32_t *pmaster_num_shards = nullptr;
7028 real_time creation_time;
7029 obj_version objv, ep_objv, *pobjv = nullptr;
7030
7031 if (! store->svc()->zone->is_meta_master()) {
7032 JSONParser jp;
7033 ceph::bufferlist in_data;
7034 req_info info = s->info;
7035 forward_req_info(s->cct, info, bucket_name);
7036 op_ret = forward_request_to_master(s, nullptr, store, in_data, &jp, &info);
7037 if (op_ret < 0) {
7038 return op_ret;
7039 }
7040
7041 JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp);
7042 JSONDecoder::decode_json("object_ver", objv, &jp);
7043 JSONDecoder::decode_json("bucket_info", master_info, &jp);
7044
7045 ldpp_dout(this, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" << objv.ver << dendl;
7046 ldpp_dout(this, 20) << "got creation_time="<< master_info.creation_time << dendl;
7047
7048 pmaster_bucket= &master_info.bucket;
7049 creation_time = master_info.creation_time;
7050 pmaster_num_shards = &master_info.num_shards;
7051 pobjv = &objv;
7052 } else {
7053 pmaster_bucket = nullptr;
7054 pmaster_num_shards = nullptr;
7055 }
7056
7057 rgw_placement_rule placement_rule(binfo.placement_rule, s->info.storage_class);
7058
7059 if (bucket_exists) {
7060 rgw_placement_rule selected_placement_rule;
7061 rgw_bucket bucket;
7062 bucket.tenant = s->bucket_tenant;
7063 bucket.name = s->bucket_name;
7064 op_ret = store->svc()->zone->select_bucket_placement(s->user->get_info(),
7065 store->svc()->zone->get_zonegroup().get_id(),
7066 placement_rule,
7067 &selected_placement_rule,
7068 nullptr);
7069 if (selected_placement_rule != binfo.placement_rule) {
7070 op_ret = -EEXIST;
7071 ldpp_dout(this, 20) << "non-coherent placement rule" << dendl;
7072 return op_ret;
7073 }
7074 }
7075
7076 /* Create metadata: ACLs. */
7077 std::map<std::string, ceph::bufferlist> attrs;
7078 RGWAccessControlPolicy policy;
7079 policy.create_default(s->user->get_id(), s->user->get_display_name());
7080 ceph::bufferlist aclbl;
7081 policy.encode(aclbl);
7082 attrs.emplace(RGW_ATTR_ACL, std::move(aclbl));
7083
7084 RGWQuotaInfo quota_info;
7085 const RGWQuotaInfo * pquota_info = nullptr;
7086
7087 rgw_bucket bucket;
7088 bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */
7089 bucket.name = bucket_name;
7090
7091
7092 RGWBucketInfo out_info;
7093 op_ret = store->getRados()->create_bucket(s->user->get_info(),
7094 bucket,
7095 store->svc()->zone->get_zonegroup().get_id(),
7096 placement_rule, binfo.swift_ver_location,
7097 pquota_info, attrs,
7098 out_info, pobjv, &ep_objv, creation_time,
7099 pmaster_bucket, pmaster_num_shards, true);
7100 /* continue if EEXIST and create_bucket will fail below. this way we can
7101 * recover from a partial create by retrying it. */
7102 ldpp_dout(this, 20) << "rgw_create_bucket returned ret=" << op_ret
7103 << ", bucket=" << bucket << dendl;
7104
7105 if (op_ret && op_ret != -EEXIST) {
7106 return op_ret;
7107 }
7108
7109 const bool existed = (op_ret == -EEXIST);
7110 if (existed) {
7111 /* bucket already existed, might have raced with another bucket creation, or
7112 * might be partial bucket creation that never completed. Read existing bucket
7113 * info, verify that the reported bucket owner is the current user.
7114 * If all is ok then update the user's list of buckets.
7115 * Otherwise inform client about a name conflict.
7116 */
7117 if (out_info.owner.compare(s->user->get_id()) != 0) {
7118 op_ret = -EEXIST;
7119 ldpp_dout(this, 20) << "conflicting bucket name" << dendl;
7120 return op_ret;
7121 }
7122 bucket = out_info.bucket;
7123 }
7124
7125 op_ret = store->ctl()->bucket->link_bucket(s->user->get_id(), bucket,
7126 out_info.creation_time,
7127 s->yield, false);
7128 if (op_ret && !existed && op_ret != -EEXIST) {
7129 /* if it exists (or previously existed), don't remove it! */
7130 op_ret = store->ctl()->bucket->unlink_bucket(s->user->get_id(), bucket, s->yield);
7131 if (op_ret < 0) {
7132 ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret << dendl;
7133 }
7134 } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) {
7135 ldpp_dout(this, 20) << "containers already exists" << dendl;
7136 op_ret = -ERR_BUCKET_EXISTS;
7137 }
7138
7139 return op_ret;
7140 }
7141
7142
7143 bool RGWBulkUploadOp::handle_file_verify_permission(RGWBucketInfo& binfo,
7144 const rgw_obj& obj,
7145 std::map<std::string, ceph::bufferlist>& battrs,
7146 ACLOwner& bucket_owner /* out */)
7147 {
7148 RGWAccessControlPolicy bacl(store->ctx());
7149 op_ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket);
7150 if (op_ret < 0) {
7151 ldpp_dout(this, 20) << "cannot read_policy() for bucket" << dendl;
7152 return false;
7153 }
7154
7155 auto policy = get_iam_policy_from_attr(s->cct, store, battrs, binfo.bucket.tenant);
7156
7157 bucket_owner = bacl.get_owner();
7158 if (policy || ! s->iam_user_policies.empty()) {
7159 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
7160 boost::none,
7161 rgw::IAM::s3PutObject, obj);
7162 if (usr_policy_res == Effect::Deny) {
7163 return false;
7164 }
7165 auto e = policy->eval(s->env, *s->auth.identity,
7166 rgw::IAM::s3PutObject, obj);
7167 if (e == Effect::Allow) {
7168 return true;
7169 } else if (e == Effect::Deny) {
7170 return false;
7171 } else if (usr_policy_res == Effect::Allow) {
7172 return true;
7173 }
7174 }
7175
7176 return verify_bucket_permission_no_policy(this, s, s->user_acl.get(),
7177 &bacl, RGW_PERM_WRITE);
7178 }
7179
7180 int RGWBulkUploadOp::handle_file(const boost::string_ref path,
7181 const size_t size,
7182 AlignedStreamGetter& body)
7183 {
7184
7185 ldpp_dout(this, 20) << "got file=" << path << ", size=" << size << dendl;
7186
7187 if (size > static_cast<size_t>(s->cct->_conf->rgw_max_put_size)) {
7188 op_ret = -ERR_TOO_LARGE;
7189 return op_ret;
7190 }
7191
7192 std::string bucket_name;
7193 rgw_obj_key object;
7194 std::tie(bucket_name, object) = *parse_path(path);
7195
7196 auto& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
7197 RGWBucketInfo binfo;
7198 std::map<std::string, ceph::bufferlist> battrs;
7199 ACLOwner bowner;
7200 op_ret = store->getRados()->get_bucket_info(store->svc(), s->user->get_tenant(),
7201 bucket_name, binfo, nullptr, s->yield, &battrs);
7202 if (op_ret == -ENOENT) {
7203 ldpp_dout(this, 20) << "non existent directory=" << bucket_name << dendl;
7204 } else if (op_ret < 0) {
7205 return op_ret;
7206 }
7207
7208 if (! handle_file_verify_permission(binfo,
7209 rgw_obj(binfo.bucket, object),
7210 battrs, bowner)) {
7211 ldpp_dout(this, 20) << "object creation unauthorized" << dendl;
7212 op_ret = -EACCES;
7213 return op_ret;
7214 }
7215
7216 op_ret = store->getRados()->check_quota(bowner.get_id(), binfo.bucket,
7217 user_quota, bucket_quota, size);
7218 if (op_ret < 0) {
7219 return op_ret;
7220 }
7221
7222 rgw_obj obj(binfo.bucket, object);
7223 if (s->bucket_info.versioning_enabled()) {
7224 store->getRados()->gen_rand_obj_instance_name(&obj);
7225 }
7226
7227 rgw_placement_rule dest_placement = s->dest_placement;
7228 dest_placement.inherit_from(binfo.placement_rule);
7229
7230 auto aio = rgw::make_throttle(s->cct->_conf->rgw_put_obj_min_window_size,
7231 s->yield);
7232
7233 using namespace rgw::putobj;
7234 AtomicObjectProcessor processor(&*aio, store, binfo, &s->dest_placement, bowner.get_id(),
7235 obj_ctx, obj, 0, s->req_id, this, s->yield);
7236
7237 op_ret = processor.prepare(s->yield);
7238 if (op_ret < 0) {
7239 ldpp_dout(this, 20) << "cannot prepare processor due to ret=" << op_ret << dendl;
7240 return op_ret;
7241 }
7242
7243 /* No filters by default. */
7244 DataProcessor *filter = &processor;
7245
7246 const auto& compression_type = store->svc()->zone->get_zone_params().get_compression_type(
7247 dest_placement);
7248 CompressorRef plugin;
7249 boost::optional<RGWPutObj_Compress> compressor;
7250 if (compression_type != "none") {
7251 plugin = Compressor::create(s->cct, compression_type);
7252 if (! plugin) {
7253 ldpp_dout(this, 1) << "Cannot load plugin for rgw_compression_type "
7254 << compression_type << dendl;
7255 } else {
7256 compressor.emplace(s->cct, plugin, filter);
7257 filter = &*compressor;
7258 }
7259 }
7260
7261 /* Upload file content. */
7262 ssize_t len = 0;
7263 size_t ofs = 0;
7264 MD5 hash;
7265 do {
7266 ceph::bufferlist data;
7267 len = body.get_at_most(s->cct->_conf->rgw_max_chunk_size, data);
7268
7269 ldpp_dout(this, 20) << "body=" << data.c_str() << dendl;
7270 if (len < 0) {
7271 op_ret = len;
7272 return op_ret;
7273 } else if (len > 0) {
7274 hash.Update((const unsigned char *)data.c_str(), data.length());
7275 op_ret = filter->process(std::move(data), ofs);
7276 if (op_ret < 0) {
7277 ldpp_dout(this, 20) << "filter->process() returned ret=" << op_ret << dendl;
7278 return op_ret;
7279 }
7280
7281 ofs += len;
7282 }
7283
7284 } while (len > 0);
7285
7286 // flush
7287 op_ret = filter->process({}, ofs);
7288 if (op_ret < 0) {
7289 return op_ret;
7290 }
7291
7292 if (ofs != size) {
7293 ldpp_dout(this, 10) << "real file size different from declared" << dendl;
7294 op_ret = -EINVAL;
7295 return op_ret;
7296 }
7297
7298 op_ret = store->getRados()->check_quota(bowner.get_id(), binfo.bucket,
7299 user_quota, bucket_quota, size);
7300 if (op_ret < 0) {
7301 ldpp_dout(this, 20) << "quota exceeded for path=" << path << dendl;
7302 return op_ret;
7303 }
7304
7305 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
7306 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
7307 hash.Final(m);
7308 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
7309
7310 /* Create metadata: ETAG. */
7311 std::map<std::string, ceph::bufferlist> attrs;
7312 std::string etag = calc_md5;
7313 ceph::bufferlist etag_bl;
7314 etag_bl.append(etag.c_str(), etag.size() + 1);
7315 attrs.emplace(RGW_ATTR_ETAG, std::move(etag_bl));
7316
7317 /* Create metadata: ACLs. */
7318 RGWAccessControlPolicy policy;
7319 policy.create_default(s->user->get_id(), s->user->get_display_name());
7320 ceph::bufferlist aclbl;
7321 policy.encode(aclbl);
7322 attrs.emplace(RGW_ATTR_ACL, std::move(aclbl));
7323
7324 /* Create metadata: compression info. */
7325 if (compressor && compressor->is_compressed()) {
7326 ceph::bufferlist tmp;
7327 RGWCompressionInfo cs_info;
7328 cs_info.compression_type = plugin->get_type_name();
7329 cs_info.orig_size = s->obj_size;
7330 cs_info.blocks = std::move(compressor->get_compression_blocks());
7331 encode(cs_info, tmp);
7332 attrs.emplace(RGW_ATTR_COMPRESSION, std::move(tmp));
7333 }
7334
7335 /* Complete the transaction. */
7336 op_ret = processor.complete(size, etag, nullptr, ceph::real_time(),
7337 attrs, ceph::real_time() /* delete_at */,
7338 nullptr, nullptr, nullptr, nullptr, nullptr,
7339 s->yield);
7340 if (op_ret < 0) {
7341 ldpp_dout(this, 20) << "processor::complete returned op_ret=" << op_ret << dendl;
7342 }
7343
7344 return op_ret;
7345 }
7346
7347 void RGWBulkUploadOp::execute()
7348 {
7349 ceph::bufferlist buffer(64 * 1024);
7350
7351 ldpp_dout(this, 20) << "start" << dendl;
7352
7353 /* Create an instance of stream-abstracting class. Having this indirection
7354 * allows for easy introduction of decompressors like gzip and bzip2. */
7355 auto stream = create_stream();
7356 if (! stream) {
7357 return;
7358 }
7359
7360 /* Handling the $UPLOAD_PATH accordingly to the Swift's Bulk middleware. See:
7361 * https://github.com/openstack/swift/blob/2.13.0/swift/common/middleware/bulk.py#L31-L41 */
7362 std::string bucket_path, file_prefix;
7363 std::tie(bucket_path, file_prefix) = handle_upload_path(s);
7364
7365 auto status = rgw::tar::StatusIndicator::create();
7366 do {
7367 op_ret = stream->get_exactly(rgw::tar::BLOCK_SIZE, buffer);
7368 if (op_ret < 0) {
7369 ldpp_dout(this, 2) << "cannot read header" << dendl;
7370 return;
7371 }
7372
7373 /* We need to re-interpret the buffer as a TAR block. Exactly two blocks
7374 * must be tracked to detect out end-of-archive. It occurs when both of
7375 * them are empty (zeroed). Tracing this particular inter-block dependency
7376 * is responsibility of the rgw::tar::StatusIndicator class. */
7377 boost::optional<rgw::tar::HeaderView> header;
7378 std::tie(status, header) = rgw::tar::interpret_block(status, buffer);
7379
7380 if (! status.empty() && header) {
7381 /* This specific block isn't empty (entirely zeroed), so we can parse
7382 * it as a TAR header and dispatch. At the moment we do support only
7383 * regular files and directories. Everything else (symlinks, devices)
7384 * will be ignored but won't cease the whole upload. */
7385 switch (header->get_filetype()) {
7386 case rgw::tar::FileType::NORMAL_FILE: {
7387 ldpp_dout(this, 2) << "handling regular file" << dendl;
7388
7389 boost::string_ref filename = bucket_path.empty() ? header->get_filename() : \
7390 file_prefix + header->get_filename().to_string();
7391 auto body = AlignedStreamGetter(0, header->get_filesize(),
7392 rgw::tar::BLOCK_SIZE, *stream);
7393 op_ret = handle_file(filename,
7394 header->get_filesize(),
7395 body);
7396 if (! op_ret) {
7397 /* Only regular files counts. */
7398 num_created++;
7399 } else {
7400 failures.emplace_back(op_ret, filename.to_string());
7401 }
7402 break;
7403 }
7404 case rgw::tar::FileType::DIRECTORY: {
7405 ldpp_dout(this, 2) << "handling regular directory" << dendl;
7406
7407 boost::string_ref dirname = bucket_path.empty() ? header->get_filename() : bucket_path;
7408 op_ret = handle_dir(dirname);
7409 if (op_ret < 0 && op_ret != -ERR_BUCKET_EXISTS) {
7410 failures.emplace_back(op_ret, dirname.to_string());
7411 }
7412 break;
7413 }
7414 default: {
7415 /* Not recognized. Skip. */
7416 op_ret = 0;
7417 break;
7418 }
7419 }
7420
7421 /* In case of any problems with sub-request authorization Swift simply
7422 * terminates whole upload immediately. */
7423 if (boost::algorithm::contains(std::initializer_list<int>{ op_ret },
7424 terminal_errors)) {
7425 ldpp_dout(this, 2) << "terminating due to ret=" << op_ret << dendl;
7426 break;
7427 }
7428 } else {
7429 ldpp_dout(this, 2) << "an empty block" << dendl;
7430 op_ret = 0;
7431 }
7432
7433 buffer.clear();
7434 } while (! status.eof());
7435
7436 return;
7437 }
7438
7439 RGWBulkUploadOp::AlignedStreamGetter::~AlignedStreamGetter()
7440 {
7441 const size_t aligned_legnth = length + (-length % alignment);
7442 ceph::bufferlist junk;
7443
7444 DecoratedStreamGetter::get_exactly(aligned_legnth - position, junk);
7445 }
7446
7447 ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_at_most(const size_t want,
7448 ceph::bufferlist& dst)
7449 {
7450 const size_t max_to_read = std::min(want, length - position);
7451 const auto len = DecoratedStreamGetter::get_at_most(max_to_read, dst);
7452 if (len > 0) {
7453 position += len;
7454 }
7455 return len;
7456 }
7457
7458 ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_exactly(const size_t want,
7459 ceph::bufferlist& dst)
7460 {
7461 const auto len = DecoratedStreamGetter::get_exactly(want, dst);
7462 if (len > 0) {
7463 position += len;
7464 }
7465 return len;
7466 }
7467
7468 int RGWSetAttrs::verify_permission()
7469 {
7470 // This looks to be part of the RGW-NFS machinery and has no S3 or
7471 // Swift equivalent.
7472 bool perm;
7473 if (!s->object.empty()) {
7474 perm = verify_object_permission_no_policy(this, s, RGW_PERM_WRITE);
7475 } else {
7476 perm = verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE);
7477 }
7478 if (!perm)
7479 return -EACCES;
7480
7481 return 0;
7482 }
7483
7484 void RGWSetAttrs::pre_exec()
7485 {
7486 rgw_bucket_object_pre_exec(s);
7487 }
7488
7489 void RGWSetAttrs::execute()
7490 {
7491 op_ret = get_params();
7492 if (op_ret < 0)
7493 return;
7494
7495 rgw_obj obj(s->bucket, s->object);
7496
7497 if (!s->object.empty()) {
7498 store->getRados()->set_atomic(s->obj_ctx, obj);
7499 op_ret = store->getRados()->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, nullptr, s->yield);
7500 } else {
7501 for (auto& iter : attrs) {
7502 s->bucket_attrs[iter.first] = std::move(iter.second);
7503 }
7504 op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
7505 &s->bucket_info.objv_tracker,
7506 s->yield);
7507 }
7508 }
7509
7510 void RGWGetObjLayout::pre_exec()
7511 {
7512 rgw_bucket_object_pre_exec(s);
7513 }
7514
7515 void RGWGetObjLayout::execute()
7516 {
7517 rgw_obj obj(s->bucket, s->object);
7518 RGWRados::Object target(store->getRados(),
7519 s->bucket_info,
7520 *static_cast<RGWObjectCtx *>(s->obj_ctx),
7521 rgw_obj(s->bucket, s->object));
7522 RGWRados::Object::Read stat_op(&target);
7523
7524 op_ret = stat_op.prepare(s->yield);
7525 if (op_ret < 0) {
7526 return;
7527 }
7528
7529 head_obj = stat_op.state.head_obj;
7530
7531 op_ret = target.get_manifest(&manifest, s->yield);
7532 }
7533
7534
7535 int RGWConfigBucketMetaSearch::verify_permission()
7536 {
7537 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
7538 return -EACCES;
7539 }
7540
7541 return 0;
7542 }
7543
7544 void RGWConfigBucketMetaSearch::pre_exec()
7545 {
7546 rgw_bucket_object_pre_exec(s);
7547 }
7548
7549 void RGWConfigBucketMetaSearch::execute()
7550 {
7551 op_ret = get_params();
7552 if (op_ret < 0) {
7553 ldpp_dout(this, 20) << "NOTICE: get_params() returned ret=" << op_ret << dendl;
7554 return;
7555 }
7556
7557 s->bucket_info.mdsearch_config = mdsearch_config;
7558
7559 op_ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
7560 if (op_ret < 0) {
7561 ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
7562 << " returned err=" << op_ret << dendl;
7563 return;
7564 }
7565 }
7566
7567 int RGWGetBucketMetaSearch::verify_permission()
7568 {
7569 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
7570 return -EACCES;
7571 }
7572
7573 return 0;
7574 }
7575
7576 void RGWGetBucketMetaSearch::pre_exec()
7577 {
7578 rgw_bucket_object_pre_exec(s);
7579 }
7580
7581 int RGWDelBucketMetaSearch::verify_permission()
7582 {
7583 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
7584 return -EACCES;
7585 }
7586
7587 return 0;
7588 }
7589
7590 void RGWDelBucketMetaSearch::pre_exec()
7591 {
7592 rgw_bucket_object_pre_exec(s);
7593 }
7594
7595 void RGWDelBucketMetaSearch::execute()
7596 {
7597 s->bucket_info.mdsearch_config.clear();
7598
7599 op_ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
7600 if (op_ret < 0) {
7601 ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
7602 << " returned err=" << op_ret << dendl;
7603 return;
7604 }
7605 }
7606
7607
7608 RGWHandler::~RGWHandler()
7609 {
7610 }
7611
7612 int RGWHandler::init(rgw::sal::RGWRadosStore *_store,
7613 struct req_state *_s,
7614 rgw::io::BasicClient *cio)
7615 {
7616 store = _store;
7617 s = _s;
7618
7619 return 0;
7620 }
7621
7622 int RGWHandler::do_init_permissions()
7623 {
7624 int ret = rgw_build_bucket_policies(store, s);
7625 if (ret < 0) {
7626 ldpp_dout(s, 10) << "init_permissions on " << s->bucket
7627 << " failed, ret=" << ret << dendl;
7628 return ret==-ENODATA ? -EACCES : ret;
7629 }
7630
7631 rgw_build_iam_environment(store, s);
7632 return ret;
7633 }
7634
7635 int RGWHandler::do_read_permissions(RGWOp *op, bool only_bucket)
7636 {
7637 if (only_bucket) {
7638 /* already read bucket info */
7639 return 0;
7640 }
7641 int ret = rgw_build_object_policies(store, s, op->prefetch_data());
7642
7643 if (ret < 0) {
7644 ldpp_dout(op, 10) << "read_permissions on " << s->bucket << ":"
7645 << s->object << " only_bucket=" << only_bucket
7646 << " ret=" << ret << dendl;
7647 if (ret == -ENODATA)
7648 ret = -EACCES;
7649 if (s->auth.identity->is_anonymous() && ret == -EACCES)
7650 ret = -EPERM;
7651 }
7652
7653 return ret;
7654 }
7655
7656 int RGWOp::error_handler(int err_no, string *error_content) {
7657 return dialect_handler->error_handler(err_no, error_content);
7658 }
7659
7660 int RGWHandler::error_handler(int err_no, string *error_content) {
7661 // This is the do-nothing error handler
7662 return err_no;
7663 }
7664
7665 std::ostream& RGWOp::gen_prefix(std::ostream& out) const
7666 {
7667 // append <dialect>:<op name> to the prefix
7668 return s->gen_prefix(out) << s->dialect << ':' << name() << ' ';
7669 }
7670
7671 void RGWDefaultResponseOp::send_response() {
7672 if (op_ret) {
7673 set_req_state_err(s, op_ret);
7674 }
7675 dump_errno(s);
7676 end_header(s);
7677 }
7678
7679 void RGWPutBucketPolicy::send_response()
7680 {
7681 if (op_ret) {
7682 set_req_state_err(s, op_ret);
7683 }
7684 dump_errno(s);
7685 end_header(s);
7686 }
7687
7688 int RGWPutBucketPolicy::verify_permission()
7689 {
7690 if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketPolicy)) {
7691 return -EACCES;
7692 }
7693
7694 return 0;
7695 }
7696
7697 int RGWPutBucketPolicy::get_params()
7698 {
7699 const auto max_size = s->cct->_conf->rgw_max_put_param_size;
7700 // At some point when I have more time I want to make a version of
7701 // rgw_rest_read_all_input that doesn't use malloc.
7702 std::tie(op_ret, data) = rgw_rest_read_all_input(s, max_size, false);
7703
7704 // And throws exceptions.
7705 return op_ret;
7706 }
7707
7708 void RGWPutBucketPolicy::execute()
7709 {
7710 op_ret = get_params();
7711 if (op_ret < 0) {
7712 return;
7713 }
7714
7715 if (!store->svc()->zone->is_meta_master()) {
7716 op_ret = forward_request_to_master(s, NULL, store, data, nullptr);
7717 if (op_ret < 0) {
7718 ldpp_dout(this, 20) << "forward_request_to_master returned ret=" << op_ret << dendl;
7719 return;
7720 }
7721 }
7722
7723 try {
7724 const Policy p(s->cct, s->bucket_tenant, data);
7725 auto attrs = s->bucket_attrs;
7726 if (s->bucket_access_conf &&
7727 s->bucket_access_conf->block_public_policy() &&
7728 rgw::IAM::is_public(p)) {
7729 op_ret = -EACCES;
7730 return;
7731 }
7732
7733 op_ret = retry_raced_bucket_write(store->getRados(), s, [&p, this, &attrs] {
7734 attrs[RGW_ATTR_IAM_POLICY].clear();
7735 attrs[RGW_ATTR_IAM_POLICY].append(p.text);
7736 op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
7737 &s->bucket_info.objv_tracker,
7738 s->yield);
7739 return op_ret;
7740 });
7741 } catch (rgw::IAM::PolicyParseException& e) {
7742 ldpp_dout(this, 20) << "failed to parse policy: " << e.what() << dendl;
7743 op_ret = -EINVAL;
7744 }
7745 }
7746
7747 void RGWGetBucketPolicy::send_response()
7748 {
7749 if (op_ret) {
7750 set_req_state_err(s, op_ret);
7751 }
7752 dump_errno(s);
7753 end_header(s, this, "application/json");
7754 dump_body(s, policy);
7755 }
7756
7757 int RGWGetBucketPolicy::verify_permission()
7758 {
7759 if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketPolicy)) {
7760 return -EACCES;
7761 }
7762
7763 return 0;
7764 }
7765
7766 void RGWGetBucketPolicy::execute()
7767 {
7768 auto attrs = s->bucket_attrs;
7769 map<string, bufferlist>::iterator aiter = attrs.find(RGW_ATTR_IAM_POLICY);
7770 if (aiter == attrs.end()) {
7771 ldpp_dout(this, 0) << "can't find bucket IAM POLICY attr bucket_name = "
7772 << s->bucket_name << dendl;
7773 op_ret = -ERR_NO_SUCH_BUCKET_POLICY;
7774 s->err.message = "The bucket policy does not exist";
7775 return;
7776 } else {
7777 policy = attrs[RGW_ATTR_IAM_POLICY];
7778
7779 if (policy.length() == 0) {
7780 ldpp_dout(this, 10) << "The bucket policy does not exist, bucket: "
7781 << s->bucket_name << dendl;
7782 op_ret = -ERR_NO_SUCH_BUCKET_POLICY;
7783 s->err.message = "The bucket policy does not exist";
7784 return;
7785 }
7786 }
7787 }
7788
7789 void RGWDeleteBucketPolicy::send_response()
7790 {
7791 if (op_ret) {
7792 set_req_state_err(s, op_ret);
7793 }
7794 dump_errno(s);
7795 end_header(s);
7796 }
7797
7798 int RGWDeleteBucketPolicy::verify_permission()
7799 {
7800 if (!verify_bucket_permission(this, s, rgw::IAM::s3DeleteBucketPolicy)) {
7801 return -EACCES;
7802 }
7803
7804 return 0;
7805 }
7806
7807 void RGWDeleteBucketPolicy::execute()
7808 {
7809 op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
7810 auto attrs = s->bucket_attrs;
7811 attrs.erase(RGW_ATTR_IAM_POLICY);
7812 op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
7813 &s->bucket_info.objv_tracker,
7814 s->yield);
7815 return op_ret;
7816 });
7817 }
7818
7819 void RGWPutBucketObjectLock::pre_exec()
7820 {
7821 rgw_bucket_object_pre_exec(s);
7822 }
7823
7824 int RGWPutBucketObjectLock::verify_permission()
7825 {
7826 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketObjectLockConfiguration);
7827 }
7828
7829 void RGWPutBucketObjectLock::execute()
7830 {
7831 if (!s->bucket_info.obj_lock_enabled()) {
7832 ldpp_dout(this, 0) << "ERROR: object Lock configuration cannot be enabled on existing buckets" << dendl;
7833 op_ret = -ERR_INVALID_BUCKET_STATE;
7834 return;
7835 }
7836
7837 RGWXMLDecoder::XMLParser parser;
7838 if (!parser.init()) {
7839 ldpp_dout(this, 0) << "ERROR: failed to initialize parser" << dendl;
7840 op_ret = -EINVAL;
7841 return;
7842 }
7843 op_ret = get_params();
7844 if (op_ret < 0) {
7845 return;
7846 }
7847 if (!parser.parse(data.c_str(), data.length(), 1)) {
7848 op_ret = -ERR_MALFORMED_XML;
7849 return;
7850 }
7851
7852 try {
7853 RGWXMLDecoder::decode_xml("ObjectLockConfiguration", obj_lock, &parser, true);
7854 } catch (RGWXMLDecoder::err& err) {
7855 ldout(s->cct, 5) << "unexpected xml:" << err << dendl;
7856 op_ret = -ERR_MALFORMED_XML;
7857 return;
7858 }
7859 if (obj_lock.has_rule() && !obj_lock.retention_period_valid()) {
7860 ldpp_dout(this, 0) << "ERROR: retention period must be a positive integer value" << dendl;
7861 op_ret = -ERR_INVALID_RETENTION_PERIOD;
7862 return;
7863 }
7864
7865 if (!store->svc()->zone->is_meta_master()) {
7866 op_ret = forward_request_to_master(s, NULL, store, data, nullptr);
7867 if (op_ret < 0) {
7868 ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
7869 return;
7870 }
7871 }
7872
7873 op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
7874 s->bucket_info.obj_lock = obj_lock;
7875 op_ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false,
7876 real_time(), &s->bucket_attrs);
7877 return op_ret;
7878 });
7879 return;
7880 }
7881
7882 void RGWGetBucketObjectLock::pre_exec()
7883 {
7884 rgw_bucket_object_pre_exec(s);
7885 }
7886
7887 int RGWGetBucketObjectLock::verify_permission()
7888 {
7889 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketObjectLockConfiguration);
7890 }
7891
7892 void RGWGetBucketObjectLock::execute()
7893 {
7894 if (!s->bucket_info.obj_lock_enabled()) {
7895 op_ret = -ERR_NO_SUCH_OBJECT_LOCK_CONFIGURATION;
7896 return;
7897 }
7898 }
7899
7900 int RGWPutObjRetention::verify_permission()
7901 {
7902 if (!verify_object_permission(this, s, rgw::IAM::s3PutObjectRetention)) {
7903 return -EACCES;
7904 }
7905 op_ret = get_params();
7906 if (op_ret) {
7907 return op_ret;
7908 }
7909 if (bypass_governance_mode) {
7910 bypass_perm = verify_object_permission(this, s, rgw::IAM::s3BypassGovernanceRetention);
7911 }
7912 return 0;
7913 }
7914
7915 void RGWPutObjRetention::pre_exec()
7916 {
7917 rgw_bucket_object_pre_exec(s);
7918 }
7919
7920 void RGWPutObjRetention::execute()
7921 {
7922 if (!s->bucket_info.obj_lock_enabled()) {
7923 ldpp_dout(this, 0) << "ERROR: object retention can't be set if bucket object lock not configured" << dendl;
7924 op_ret = -ERR_INVALID_REQUEST;
7925 return;
7926 }
7927
7928 RGWXMLDecoder::XMLParser parser;
7929 if (!parser.init()) {
7930 ldpp_dout(this, 0) << "ERROR: failed to initialize parser" << dendl;
7931 op_ret = -EINVAL;
7932 return;
7933 }
7934
7935 if (!parser.parse(data.c_str(), data.length(), 1)) {
7936 op_ret = -ERR_MALFORMED_XML;
7937 return;
7938 }
7939
7940 try {
7941 RGWXMLDecoder::decode_xml("Retention", obj_retention, &parser, true);
7942 } catch (RGWXMLDecoder::err& err) {
7943 ldpp_dout(this, 5) << "unexpected xml:" << err << dendl;
7944 op_ret = -ERR_MALFORMED_XML;
7945 return;
7946 }
7947
7948 if (ceph::real_clock::to_time_t(obj_retention.get_retain_until_date()) < ceph_clock_now()) {
7949 ldpp_dout(this, 0) << "ERROR: the retain until date must be in the future" << dendl;
7950 op_ret = -EINVAL;
7951 return;
7952 }
7953 bufferlist bl;
7954 obj_retention.encode(bl);
7955 rgw_obj obj(s->bucket, s->object);
7956
7957 //check old retention
7958 map<string, bufferlist> attrs;
7959 op_ret = get_obj_attrs(store, s, obj, attrs);
7960 if (op_ret < 0) {
7961 ldpp_dout(this, 0) << "ERROR: get obj attr error"<< dendl;
7962 return;
7963 }
7964 auto aiter = attrs.find(RGW_ATTR_OBJECT_RETENTION);
7965 if (aiter != attrs.end()) {
7966 RGWObjectRetention old_obj_retention;
7967 try {
7968 decode(old_obj_retention, aiter->second);
7969 } catch (buffer::error& err) {
7970 ldpp_dout(this, 0) << "ERROR: failed to decode RGWObjectRetention" << dendl;
7971 op_ret = -EIO;
7972 return;
7973 }
7974 if (ceph::real_clock::to_time_t(obj_retention.get_retain_until_date()) < ceph::real_clock::to_time_t(old_obj_retention.get_retain_until_date())) {
7975 if (old_obj_retention.get_mode().compare("GOVERNANCE") != 0 || !bypass_perm || !bypass_governance_mode) {
7976 op_ret = -EACCES;
7977 return;
7978 }
7979 }
7980 }
7981
7982 op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_OBJECT_RETENTION, bl);
7983
7984 return;
7985 }
7986
7987 int RGWGetObjRetention::verify_permission()
7988 {
7989 if (!verify_object_permission(this, s, rgw::IAM::s3GetObjectRetention)) {
7990 return -EACCES;
7991 }
7992 return 0;
7993 }
7994
7995 void RGWGetObjRetention::pre_exec()
7996 {
7997 rgw_bucket_object_pre_exec(s);
7998 }
7999
8000 void RGWGetObjRetention::execute()
8001 {
8002 if (!s->bucket_info.obj_lock_enabled()) {
8003 ldpp_dout(this, 0) << "ERROR: bucket object lock not configured" << dendl;
8004 op_ret = -ERR_INVALID_REQUEST;
8005 return;
8006 }
8007 rgw_obj obj(s->bucket, s->object);
8008 map<string, bufferlist> attrs;
8009 op_ret = get_obj_attrs(store, s, obj, attrs);
8010 if (op_ret < 0) {
8011 ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << obj
8012 << " ret=" << op_ret << dendl;
8013 return;
8014 }
8015 auto aiter = attrs.find(RGW_ATTR_OBJECT_RETENTION);
8016 if (aiter == attrs.end()) {
8017 op_ret = -ERR_NO_SUCH_OBJECT_LOCK_CONFIGURATION;
8018 return;
8019 }
8020
8021 bufferlist::const_iterator iter{&aiter->second};
8022 try {
8023 obj_retention.decode(iter);
8024 } catch (const buffer::error& e) {
8025 ldout(s->cct, 0) << __func__ << "decode object retention config failed" << dendl;
8026 op_ret = -EIO;
8027 return;
8028 }
8029 return;
8030 }
8031
8032 int RGWPutObjLegalHold::verify_permission()
8033 {
8034 if (!verify_object_permission(this, s, rgw::IAM::s3PutObjectLegalHold)) {
8035 return -EACCES;
8036 }
8037 return 0;
8038 }
8039
8040 void RGWPutObjLegalHold::pre_exec()
8041 {
8042 rgw_bucket_object_pre_exec(s);
8043 }
8044
8045 void RGWPutObjLegalHold::execute() {
8046 if (!s->bucket_info.obj_lock_enabled()) {
8047 ldpp_dout(this, 0) << "ERROR: object legal hold can't be set if bucket object lock not configured" << dendl;
8048 op_ret = -ERR_INVALID_REQUEST;
8049 return;
8050 }
8051
8052 RGWXMLDecoder::XMLParser parser;
8053 if (!parser.init()) {
8054 ldpp_dout(this, 0) << "ERROR: failed to initialize parser" << dendl;
8055 op_ret = -EINVAL;
8056 return;
8057 }
8058
8059 op_ret = get_params();
8060 if (op_ret < 0)
8061 return;
8062
8063 if (!parser.parse(data.c_str(), data.length(), 1)) {
8064 op_ret = -ERR_MALFORMED_XML;
8065 return;
8066 }
8067
8068 try {
8069 RGWXMLDecoder::decode_xml("LegalHold", obj_legal_hold, &parser, true);
8070 } catch (RGWXMLDecoder::err &err) {
8071 ldout(s->cct, 5) << "unexpected xml:" << err << dendl;
8072 op_ret = -ERR_MALFORMED_XML;
8073 return;
8074 }
8075 bufferlist bl;
8076 obj_legal_hold.encode(bl);
8077 rgw_obj obj(s->bucket, s->object);
8078 //if instance is empty, we should modify the latest object
8079 op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_OBJECT_LEGAL_HOLD, bl);
8080 return;
8081 }
8082
8083 int RGWGetObjLegalHold::verify_permission()
8084 {
8085 if (!verify_object_permission(this, s, rgw::IAM::s3GetObjectLegalHold)) {
8086 return -EACCES;
8087 }
8088 return 0;
8089 }
8090
8091 void RGWGetObjLegalHold::pre_exec()
8092 {
8093 rgw_bucket_object_pre_exec(s);
8094 }
8095
8096 void RGWGetObjLegalHold::execute()
8097 {
8098 if (!s->bucket_info.obj_lock_enabled()) {
8099 ldpp_dout(this, 0) << "ERROR: bucket object lock not configured" << dendl;
8100 op_ret = -ERR_INVALID_REQUEST;
8101 return;
8102 }
8103 rgw_obj obj(s->bucket, s->object);
8104 map<string, bufferlist> attrs;
8105 op_ret = get_obj_attrs(store, s, obj, attrs);
8106 if (op_ret < 0) {
8107 ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << obj
8108 << " ret=" << op_ret << dendl;
8109 return;
8110 }
8111 auto aiter = attrs.find(RGW_ATTR_OBJECT_LEGAL_HOLD);
8112 if (aiter == attrs.end()) {
8113 op_ret = -ERR_NO_SUCH_OBJECT_LOCK_CONFIGURATION;
8114 return;
8115 }
8116
8117 bufferlist::const_iterator iter{&aiter->second};
8118 try {
8119 obj_legal_hold.decode(iter);
8120 } catch (const buffer::error& e) {
8121 ldout(s->cct, 0) << __func__ << "decode object legal hold config failed" << dendl;
8122 op_ret = -EIO;
8123 return;
8124 }
8125 return;
8126 }
8127
8128 void RGWGetClusterStat::execute()
8129 {
8130 op_ret = this->store->getRados()->get_rados_handle()->cluster_stat(stats_op);
8131 }
8132
8133
8134 int RGWGetBucketPolicyStatus::verify_permission()
8135 {
8136 if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketPolicyStatus)) {
8137 return -EACCES;
8138 }
8139
8140 return 0;
8141 }
8142
8143 void RGWGetBucketPolicyStatus::execute()
8144 {
8145 isPublic = (s->iam_policy && rgw::IAM::is_public(*s->iam_policy)) || s->bucket_acl->is_public();
8146 }
8147
8148 int RGWPutBucketPublicAccessBlock::verify_permission()
8149 {
8150 if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketPublicAccessBlock)) {
8151 return -EACCES;
8152 }
8153
8154 return 0;
8155 }
8156
8157 int RGWPutBucketPublicAccessBlock::get_params()
8158 {
8159 const auto max_size = s->cct->_conf->rgw_max_put_param_size;
8160 std::tie(op_ret, data) = rgw_rest_read_all_input(s, max_size, false);
8161 return op_ret;
8162 }
8163
8164 void RGWPutBucketPublicAccessBlock::execute()
8165 {
8166 RGWXMLDecoder::XMLParser parser;
8167 if (!parser.init()) {
8168 ldpp_dout(this, 0) << "ERROR: failed to initialize parser" << dendl;
8169 op_ret = -EINVAL;
8170 return;
8171 }
8172
8173 op_ret = get_params();
8174 if (op_ret < 0)
8175 return;
8176
8177 if (!parser.parse(data.c_str(), data.length(), 1)) {
8178 ldpp_dout(this, 0) << "ERROR: malformed XML" << dendl;
8179 op_ret = -ERR_MALFORMED_XML;
8180 return;
8181 }
8182
8183 try {
8184 RGWXMLDecoder::decode_xml("PublicAccessBlockConfiguration", access_conf, &parser, true);
8185 } catch (RGWXMLDecoder::err &err) {
8186 ldpp_dout(this, 5) << "unexpected xml:" << err << dendl;
8187 op_ret = -ERR_MALFORMED_XML;
8188 return;
8189 }
8190
8191 if (!store->svc()->zone->is_meta_master()) {
8192 op_ret = forward_request_to_master(s, NULL, store, data, nullptr);
8193 if (op_ret < 0) {
8194 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
8195 return;
8196 }
8197 }
8198
8199 bufferlist bl;
8200 access_conf.encode(bl);
8201 op_ret = retry_raced_bucket_write(store->getRados(), s, [this, &bl] {
8202 map<string, bufferlist> attrs = s->bucket_attrs;
8203 attrs[RGW_ATTR_PUBLIC_ACCESS] = bl;
8204 return store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs, &s->bucket_info.objv_tracker, s->yield);
8205 });
8206
8207 }
8208
8209 int RGWGetBucketPublicAccessBlock::verify_permission()
8210 {
8211 if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketPolicy)) {
8212 return -EACCES;
8213 }
8214
8215 return 0;
8216 }
8217
8218 void RGWGetBucketPublicAccessBlock::execute()
8219 {
8220 auto attrs = s->bucket_attrs;
8221 if (auto aiter = attrs.find(RGW_ATTR_PUBLIC_ACCESS);
8222 aiter == attrs.end()) {
8223 ldpp_dout(this, 0) << "can't find bucket IAM POLICY attr bucket_name = "
8224 << s->bucket_name << dendl;
8225 // return the default;
8226 return;
8227 } else {
8228 bufferlist::const_iterator iter{&aiter->second};
8229 try {
8230 access_conf.decode(iter);
8231 } catch (const buffer::error& e) {
8232 ldpp_dout(this, 0) << __func__ << "decode access_conf failed" << dendl;
8233 op_ret = -EIO;
8234 return;
8235 }
8236 }
8237 }
8238
8239
8240 void RGWDeleteBucketPublicAccessBlock::send_response()
8241 {
8242 if (op_ret) {
8243 set_req_state_err(s, op_ret);
8244 }
8245 dump_errno(s);
8246 end_header(s);
8247 }
8248
8249 int RGWDeleteBucketPublicAccessBlock::verify_permission()
8250 {
8251 if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketPublicAccessBlock)) {
8252 return -EACCES;
8253 }
8254
8255 return 0;
8256 }
8257
8258 void RGWDeleteBucketPublicAccessBlock::execute()
8259 {
8260 op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
8261 auto attrs = s->bucket_attrs;
8262 attrs.erase(RGW_ATTR_PUBLIC_ACCESS);
8263 op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
8264 &s->bucket_info.objv_tracker,
8265 s->yield);
8266 return op_ret;
8267 });
8268 }