]> git.proxmox.com Git - ceph.git/blob - ceph/src/rgw/rgw_op.cc
import ceph nautilus 14.2.2
[ceph.git] / ceph / src / rgw / rgw_op.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include <errno.h>
5 #include <stdlib.h>
6 #include <system_error>
7 #include <unistd.h>
8
9 #include <sstream>
10
11 #include <boost/algorithm/string/predicate.hpp>
12 #include <boost/bind.hpp>
13 #include <boost/optional.hpp>
14 #include <boost/utility/in_place_factory.hpp>
15 #include <boost/utility/string_view.hpp>
16
17 #include "include/scope_guard.h"
18 #include "common/Clock.h"
19 #include "common/armor.h"
20 #include "common/errno.h"
21 #include "common/mime.h"
22 #include "common/utf8.h"
23 #include "common/ceph_json.h"
24 #include "common/static_ptr.h"
25
26 #include "rgw_rados.h"
27 #include "rgw_zone.h"
28 #include "rgw_op.h"
29 #include "rgw_rest.h"
30 #include "rgw_acl.h"
31 #include "rgw_acl_s3.h"
32 #include "rgw_acl_swift.h"
33 #include "rgw_aio_throttle.h"
34 #include "rgw_user.h"
35 #include "rgw_bucket.h"
36 #include "rgw_log.h"
37 #include "rgw_multi.h"
38 #include "rgw_multi_del.h"
39 #include "rgw_cors.h"
40 #include "rgw_cors_s3.h"
41 #include "rgw_rest_conn.h"
42 #include "rgw_rest_s3.h"
43 #include "rgw_tar.h"
44 #include "rgw_client_io.h"
45 #include "rgw_compression.h"
46 #include "rgw_role.h"
47 #include "rgw_tag_s3.h"
48 #include "rgw_putobj_processor.h"
49 #include "rgw_crypt.h"
50 #include "rgw_perf_counters.h"
51
52 #include "services/svc_zone.h"
53 #include "services/svc_quota.h"
54 #include "services/svc_sys_obj.h"
55
56 #include "cls/lock/cls_lock_client.h"
57 #include "cls/rgw/cls_rgw_client.h"
58
59
60 #include "include/ceph_assert.h"
61
62 #include "compressor/Compressor.h"
63
64 #ifdef WITH_LTTNG
65 #define TRACEPOINT_DEFINE
66 #define TRACEPOINT_PROBE_DYNAMIC_LINKAGE
67 #include "tracing/rgw_op.h"
68 #undef TRACEPOINT_PROBE_DYNAMIC_LINKAGE
69 #undef TRACEPOINT_DEFINE
70 #else
71 #define tracepoint(...)
72 #endif
73
74 #define dout_context g_ceph_context
75 #define dout_subsys ceph_subsys_rgw
76
77 using namespace librados;
78 using ceph::crypto::MD5;
79 using boost::optional;
80 using boost::none;
81
82 using rgw::IAM::ARN;
83 using rgw::IAM::Effect;
84 using rgw::IAM::Policy;
85
86 using rgw::IAM::Policy;
87
88 static string mp_ns = RGW_OBJ_NS_MULTIPART;
89 static string shadow_ns = RGW_OBJ_NS_SHADOW;
90
91 static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name);
92 static int forward_request_to_master(struct req_state *s, obj_version *objv, RGWRados *store,
93 bufferlist& in_data, JSONParser *jp, req_info *forward_info = nullptr);
94
95 static MultipartMetaFilter mp_filter;
96
97 // this probably should belong in the rgw_iam_policy_keywords, I'll get it to it
98 // at some point
99 static constexpr auto S3_EXISTING_OBJTAG = "s3:ExistingObjectTag";
100
101 int RGWGetObj::parse_range(void)
102 {
103 int r = -ERANGE;
104 string rs(range_str);
105 string ofs_str;
106 string end_str;
107
108 ignore_invalid_range = s->cct->_conf->rgw_ignore_get_invalid_range;
109 partial_content = false;
110
111 size_t pos = rs.find("bytes=");
112 if (pos == string::npos) {
113 pos = 0;
114 while (isspace(rs[pos]))
115 pos++;
116 int end = pos;
117 while (isalpha(rs[end]))
118 end++;
119 if (strncasecmp(rs.c_str(), "bytes", end - pos) != 0)
120 return 0;
121 while (isspace(rs[end]))
122 end++;
123 if (rs[end] != '=')
124 return 0;
125 rs = rs.substr(end + 1);
126 } else {
127 rs = rs.substr(pos + 6); /* size of("bytes=") */
128 }
129 pos = rs.find('-');
130 if (pos == string::npos)
131 goto done;
132
133 partial_content = true;
134
135 ofs_str = rs.substr(0, pos);
136 end_str = rs.substr(pos + 1);
137 if (end_str.length()) {
138 end = atoll(end_str.c_str());
139 if (end < 0)
140 goto done;
141 }
142
143 if (ofs_str.length()) {
144 ofs = atoll(ofs_str.c_str());
145 } else { // RFC2616 suffix-byte-range-spec
146 ofs = -end;
147 end = -1;
148 }
149
150 if (end >= 0 && end < ofs)
151 goto done;
152
153 range_parsed = true;
154 return 0;
155
156 done:
157 if (ignore_invalid_range) {
158 partial_content = false;
159 ofs = 0;
160 end = -1;
161 range_parsed = false; // allow retry
162 r = 0;
163 }
164
165 return r;
166 }
167
168 static int decode_policy(CephContext *cct,
169 bufferlist& bl,
170 RGWAccessControlPolicy *policy)
171 {
172 auto iter = bl.cbegin();
173 try {
174 policy->decode(iter);
175 } catch (buffer::error& err) {
176 ldout(cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
177 return -EIO;
178 }
179 if (cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
180 ldout(cct, 15) << __func__ << " Read AccessControlPolicy";
181 RGWAccessControlPolicy_S3 *s3policy = static_cast<RGWAccessControlPolicy_S3 *>(policy);
182 s3policy->to_xml(*_dout);
183 *_dout << dendl;
184 }
185 return 0;
186 }
187
188
189 static int get_user_policy_from_attr(CephContext * const cct,
190 RGWRados * const store,
191 map<string, bufferlist>& attrs,
192 RGWAccessControlPolicy& policy /* out */)
193 {
194 auto aiter = attrs.find(RGW_ATTR_ACL);
195 if (aiter != attrs.end()) {
196 int ret = decode_policy(cct, aiter->second, &policy);
197 if (ret < 0) {
198 return ret;
199 }
200 } else {
201 return -ENOENT;
202 }
203
204 return 0;
205 }
206
207 static int get_bucket_instance_policy_from_attr(CephContext *cct,
208 RGWRados *store,
209 RGWBucketInfo& bucket_info,
210 map<string, bufferlist>& bucket_attrs,
211 RGWAccessControlPolicy *policy)
212 {
213 map<string, bufferlist>::iterator aiter = bucket_attrs.find(RGW_ATTR_ACL);
214
215 if (aiter != bucket_attrs.end()) {
216 int ret = decode_policy(cct, aiter->second, policy);
217 if (ret < 0)
218 return ret;
219 } else {
220 ldout(cct, 0) << "WARNING: couldn't find acl header for bucket, generating default" << dendl;
221 RGWUserInfo uinfo;
222 /* object exists, but policy is broken */
223 int r = rgw_get_user_info_by_uid(store, bucket_info.owner, uinfo);
224 if (r < 0)
225 return r;
226
227 policy->create_default(bucket_info.owner, uinfo.display_name);
228 }
229 return 0;
230 }
231
232 static int get_obj_policy_from_attr(CephContext *cct,
233 RGWRados *store,
234 RGWObjectCtx& obj_ctx,
235 RGWBucketInfo& bucket_info,
236 map<string, bufferlist>& bucket_attrs,
237 RGWAccessControlPolicy *policy,
238 string *storage_class,
239 rgw_obj& obj)
240 {
241 bufferlist bl;
242 int ret = 0;
243
244 RGWRados::Object op_target(store, bucket_info, obj_ctx, obj);
245 RGWRados::Object::Read rop(&op_target);
246
247 ret = rop.get_attr(RGW_ATTR_ACL, bl);
248 if (ret >= 0) {
249 ret = decode_policy(cct, bl, policy);
250 if (ret < 0)
251 return ret;
252 } else if (ret == -ENODATA) {
253 /* object exists, but policy is broken */
254 ldout(cct, 0) << "WARNING: couldn't find acl header for object, generating default" << dendl;
255 RGWUserInfo uinfo;
256 ret = rgw_get_user_info_by_uid(store, bucket_info.owner, uinfo);
257 if (ret < 0)
258 return ret;
259
260 policy->create_default(bucket_info.owner, uinfo.display_name);
261 }
262
263 if (storage_class) {
264 bufferlist scbl;
265 int r = rop.get_attr(RGW_ATTR_STORAGE_CLASS, scbl);
266 if (r >= 0) {
267 *storage_class = scbl.to_str();
268 } else {
269 storage_class->clear();
270 }
271 }
272
273 return ret;
274 }
275
276
277 /**
278 * Get the AccessControlPolicy for an object off of disk.
279 * policy: must point to a valid RGWACL, and will be filled upon return.
280 * bucket: name of the bucket containing the object.
281 * object: name of the object to get the ACL for.
282 * Returns: 0 on success, -ERR# otherwise.
283 */
284 int rgw_op_get_bucket_policy_from_attr(CephContext *cct,
285 RGWRados *store,
286 RGWBucketInfo& bucket_info,
287 map<string, bufferlist>& bucket_attrs,
288 RGWAccessControlPolicy *policy)
289 {
290 return get_bucket_instance_policy_from_attr(cct, store, bucket_info, bucket_attrs, policy);
291 }
292
293 static boost::optional<Policy> get_iam_policy_from_attr(CephContext* cct,
294 RGWRados* store,
295 map<string, bufferlist>& attrs,
296 const string& tenant) {
297 auto i = attrs.find(RGW_ATTR_IAM_POLICY);
298 if (i != attrs.end()) {
299 return Policy(cct, tenant, i->second);
300 } else {
301 return none;
302 }
303 }
304
305 vector<Policy> get_iam_user_policy_from_attr(CephContext* cct,
306 RGWRados* store,
307 map<string, bufferlist>& attrs,
308 const string& tenant) {
309 vector<Policy> policies;
310 if (auto it = attrs.find(RGW_ATTR_USER_POLICY); it != attrs.end()) {
311 bufferlist out_bl = attrs[RGW_ATTR_USER_POLICY];
312 map<string, string> policy_map;
313 decode(policy_map, out_bl);
314 for (auto& it : policy_map) {
315 bufferlist bl = bufferlist::static_from_string(it.second);
316 Policy p(cct, tenant, bl);
317 policies.push_back(std::move(p));
318 }
319 }
320 return policies;
321 }
322
323 static int get_obj_attrs(RGWRados *store, struct req_state *s, const rgw_obj& obj, map<string, bufferlist>& attrs)
324 {
325 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
326 RGWRados::Object::Read read_op(&op_target);
327
328 read_op.params.attrs = &attrs;
329
330 return read_op.prepare();
331 }
332
333 static int get_obj_head(RGWRados *store, struct req_state *s,
334 const rgw_obj& obj,
335 map<string, bufferlist> *attrs,
336 bufferlist *pbl)
337 {
338 store->set_prefetch_data(s->obj_ctx, obj);
339
340 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
341 RGWRados::Object::Read read_op(&op_target);
342
343 read_op.params.attrs = attrs;
344
345 int ret = read_op.prepare();
346 if (ret < 0) {
347 return ret;
348 }
349
350 if (!pbl) {
351 return 0;
352 }
353
354 ret = read_op.read(0, s->cct->_conf->rgw_max_chunk_size, *pbl);
355
356 return 0;
357 }
358
359 struct multipart_upload_info
360 {
361 rgw_placement_rule dest_placement;
362
363 void encode(bufferlist& bl) const {
364 ENCODE_START(1, 1, bl);
365 encode(dest_placement, bl);
366 ENCODE_FINISH(bl);
367 }
368
369 void decode(bufferlist::const_iterator& bl) {
370 DECODE_START(1, bl);
371 decode(dest_placement, bl);
372 DECODE_FINISH(bl);
373 }
374 };
375 WRITE_CLASS_ENCODER(multipart_upload_info)
376
377 static int get_multipart_info(RGWRados *store, struct req_state *s,
378 const rgw_obj& obj,
379 RGWAccessControlPolicy *policy,
380 map<string, bufferlist> *attrs,
381 multipart_upload_info *upload_info)
382 {
383 bufferlist header;
384
385 bufferlist headbl;
386 bufferlist *pheadbl = (upload_info ? &headbl : nullptr);
387
388 int op_ret = get_obj_head(store, s, obj, attrs, pheadbl);
389 if (op_ret < 0) {
390 if (op_ret == -ENOENT) {
391 return -ERR_NO_SUCH_UPLOAD;
392 }
393 return op_ret;
394 }
395
396 if (upload_info && headbl.length() > 0) {
397 auto hiter = headbl.cbegin();
398 try {
399 decode(*upload_info, hiter);
400 } catch (buffer::error& err) {
401 ldpp_dout(s, 0) << "ERROR: failed to decode multipart upload info" << dendl;
402 return -EIO;
403 }
404 }
405
406 if (policy && attrs) {
407 for (auto& iter : *attrs) {
408 string name = iter.first;
409 if (name.compare(RGW_ATTR_ACL) == 0) {
410 bufferlist& bl = iter.second;
411 auto bli = bl.cbegin();
412 try {
413 decode(*policy, bli);
414 } catch (buffer::error& err) {
415 ldpp_dout(s, 0) << "ERROR: could not decode policy" << dendl;
416 return -EIO;
417 }
418 break;
419 }
420 }
421 }
422
423 return 0;
424 }
425
426 static int get_multipart_info(RGWRados *store, struct req_state *s,
427 const string& meta_oid,
428 RGWAccessControlPolicy *policy,
429 map<string, bufferlist> *attrs,
430 multipart_upload_info *upload_info)
431 {
432 map<string, bufferlist>::iterator iter;
433 bufferlist header;
434
435 rgw_obj meta_obj;
436 meta_obj.init_ns(s->bucket, meta_oid, mp_ns);
437 meta_obj.set_in_extra_data(true);
438
439 return get_multipart_info(store, s, meta_obj, policy, attrs, upload_info);
440 }
441
442 static int modify_obj_attr(RGWRados *store, struct req_state *s, const rgw_obj& obj, const char* attr_name, bufferlist& attr_val)
443 {
444 map<string, bufferlist> attrs;
445 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
446 RGWRados::Object::Read read_op(&op_target);
447
448 read_op.params.attrs = &attrs;
449
450 int r = read_op.prepare();
451 if (r < 0) {
452 return r;
453 }
454 store->set_atomic(s->obj_ctx, read_op.state.obj);
455 attrs[attr_name] = attr_val;
456 return store->set_attrs(s->obj_ctx, s->bucket_info, read_op.state.obj, attrs, NULL);
457 }
458
459 static int read_bucket_policy(RGWRados *store,
460 struct req_state *s,
461 RGWBucketInfo& bucket_info,
462 map<string, bufferlist>& bucket_attrs,
463 RGWAccessControlPolicy *policy,
464 rgw_bucket& bucket)
465 {
466 if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) {
467 ldpp_dout(s, 0) << "NOTICE: bucket " << bucket_info.bucket.name
468 << " is suspended" << dendl;
469 return -ERR_USER_SUSPENDED;
470 }
471
472 if (bucket.name.empty()) {
473 return 0;
474 }
475
476 int ret = rgw_op_get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, policy);
477 if (ret == -ENOENT) {
478 ret = -ERR_NO_SUCH_BUCKET;
479 }
480
481 return ret;
482 }
483
484 static int read_obj_policy(RGWRados *store,
485 struct req_state *s,
486 RGWBucketInfo& bucket_info,
487 map<string, bufferlist>& bucket_attrs,
488 RGWAccessControlPolicy* acl,
489 string *storage_class,
490 boost::optional<Policy>& policy,
491 rgw_bucket& bucket,
492 rgw_obj_key& object)
493 {
494 string upload_id;
495 upload_id = s->info.args.get("uploadId");
496 rgw_obj obj;
497
498 if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) {
499 ldpp_dout(s, 0) << "NOTICE: bucket " << bucket_info.bucket.name
500 << " is suspended" << dendl;
501 return -ERR_USER_SUSPENDED;
502 }
503
504 if (!upload_id.empty()) {
505 /* multipart upload */
506 RGWMPObj mp(object.name, upload_id);
507 string oid = mp.get_meta();
508 obj.init_ns(bucket, oid, mp_ns);
509 obj.set_in_extra_data(true);
510 } else {
511 obj = rgw_obj(bucket, object);
512 }
513 policy = get_iam_policy_from_attr(s->cct, store, bucket_attrs, bucket.tenant);
514
515 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
516 int ret = get_obj_policy_from_attr(s->cct, store, *obj_ctx,
517 bucket_info, bucket_attrs, acl, storage_class, obj);
518 if (ret == -ENOENT) {
519 /* object does not exist checking the bucket's ACL to make sure
520 that we send a proper error code */
521 RGWAccessControlPolicy bucket_policy(s->cct);
522 ret = rgw_op_get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, &bucket_policy);
523 if (ret < 0) {
524 return ret;
525 }
526 const rgw_user& bucket_owner = bucket_policy.get_owner().get_id();
527 if (bucket_owner.compare(s->user->user_id) != 0 &&
528 ! s->auth.identity->is_admin_of(bucket_owner)) {
529 if (policy) {
530 auto r = policy->eval(s->env, *s->auth.identity, rgw::IAM::s3ListBucket, ARN(bucket));
531 if (r == Effect::Allow)
532 return -ENOENT;
533 if (r == Effect::Deny)
534 return -EACCES;
535 }
536 if (! bucket_policy.verify_permission(s, *s->auth.identity, s->perm_mask, RGW_PERM_READ))
537 ret = -EACCES;
538 else
539 ret = -ENOENT;
540 } else {
541 ret = -ENOENT;
542 }
543 }
544
545 return ret;
546 }
547
548 /**
549 * Get the AccessControlPolicy for an user, bucket or object off of disk.
550 * s: The req_state to draw information from.
551 * only_bucket: If true, reads the user and bucket ACLs rather than the object ACL.
552 * Returns: 0 on success, -ERR# otherwise.
553 */
554 int rgw_build_bucket_policies(RGWRados* store, struct req_state* s)
555 {
556 int ret = 0;
557 rgw_obj_key obj;
558 RGWUserInfo bucket_owner_info;
559 auto obj_ctx = store->svc.sysobj->init_obj_ctx();
560
561 string bi = s->info.args.get(RGW_SYS_PARAM_PREFIX "bucket-instance");
562 if (!bi.empty()) {
563 ret = rgw_bucket_parse_bucket_instance(bi, &s->bucket_instance_id, &s->bucket_instance_shard_id);
564 if (ret < 0) {
565 return ret;
566 }
567 }
568
569 if(s->dialect.compare("s3") == 0) {
570 s->bucket_acl = std::make_unique<RGWAccessControlPolicy_S3>(s->cct);
571 } else if(s->dialect.compare("swift") == 0) {
572 /* We aren't allocating the account policy for those operations using
573 * the Swift's infrastructure that don't really need req_state::user.
574 * Typical example here is the implementation of /info. */
575 if (!s->user->user_id.empty()) {
576 s->user_acl = std::make_unique<RGWAccessControlPolicy_SWIFTAcct>(s->cct);
577 }
578 s->bucket_acl = std::make_unique<RGWAccessControlPolicy_SWIFT>(s->cct);
579 } else {
580 s->bucket_acl = std::make_unique<RGWAccessControlPolicy>(s->cct);
581 }
582
583 /* check if copy source is within the current domain */
584 if (!s->src_bucket_name.empty()) {
585 RGWBucketInfo source_info;
586
587 if (s->bucket_instance_id.empty()) {
588 ret = store->get_bucket_info(obj_ctx, s->src_tenant_name, s->src_bucket_name, source_info, NULL);
589 } else {
590 ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, source_info, NULL, NULL);
591 }
592 if (ret == 0) {
593 string& zonegroup = source_info.zonegroup;
594 s->local_source = store->svc.zone->get_zonegroup().equals(zonegroup);
595 }
596 }
597
598 struct {
599 rgw_user uid;
600 std::string display_name;
601 } acct_acl_user = {
602 s->user->user_id,
603 s->user->display_name,
604 };
605
606 if (!s->bucket_name.empty()) {
607 s->bucket_exists = true;
608 if (s->bucket_instance_id.empty()) {
609 ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name,
610 s->bucket_info, &s->bucket_mtime,
611 &s->bucket_attrs);
612 } else {
613 ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id,
614 s->bucket_info, &s->bucket_mtime,
615 &s->bucket_attrs);
616 }
617 if (ret < 0) {
618 if (ret != -ENOENT) {
619 string bucket_log;
620 rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_log);
621 ldpp_dout(s, 0) << "NOTICE: couldn't get bucket from bucket_name (name="
622 << bucket_log << ")" << dendl;
623 return ret;
624 }
625 s->bucket_exists = false;
626 }
627 s->bucket = s->bucket_info.bucket;
628
629 if (s->bucket_exists) {
630 ret = read_bucket_policy(store, s, s->bucket_info, s->bucket_attrs,
631 s->bucket_acl.get(), s->bucket);
632 acct_acl_user = {
633 s->bucket_info.owner,
634 s->bucket_acl->get_owner().get_display_name(),
635 };
636 } else {
637 s->bucket_acl->create_default(s->user->user_id, s->user->display_name);
638 ret = -ERR_NO_SUCH_BUCKET;
639 }
640
641 s->bucket_owner = s->bucket_acl->get_owner();
642
643 RGWZoneGroup zonegroup;
644 int r = store->svc.zone->get_zonegroup(s->bucket_info.zonegroup, zonegroup);
645 if (!r) {
646 if (!zonegroup.endpoints.empty()) {
647 s->zonegroup_endpoint = zonegroup.endpoints.front();
648 } else {
649 // use zonegroup's master zone endpoints
650 auto z = zonegroup.zones.find(zonegroup.master_zone);
651 if (z != zonegroup.zones.end() && !z->second.endpoints.empty()) {
652 s->zonegroup_endpoint = z->second.endpoints.front();
653 }
654 }
655 s->zonegroup_name = zonegroup.get_name();
656 }
657 if (r < 0 && ret == 0) {
658 ret = r;
659 }
660
661 if (s->bucket_exists && !store->svc.zone->get_zonegroup().equals(s->bucket_info.zonegroup)) {
662 ldpp_dout(s, 0) << "NOTICE: request for data in a different zonegroup ("
663 << s->bucket_info.zonegroup << " != "
664 << store->svc.zone->get_zonegroup().get_id() << ")" << dendl;
665 /* we now need to make sure that the operation actually requires copy source, that is
666 * it's a copy operation
667 */
668 if (store->svc.zone->get_zonegroup().is_master_zonegroup() && s->system_request) {
669 /*If this is the master, don't redirect*/
670 } else if (s->op_type == RGW_OP_GET_BUCKET_LOCATION ) {
671 /* If op is get bucket location, don't redirect */
672 } else if (!s->local_source ||
673 (s->op != OP_PUT && s->op != OP_COPY) ||
674 s->object.empty()) {
675 return -ERR_PERMANENT_REDIRECT;
676 }
677 }
678
679 /* init dest placement -- only if bucket exists, otherwise request is either not relevant, or
680 * it's a create_bucket request, in which case the op will deal with the placement later */
681 if (s->bucket_exists) {
682 s->dest_placement.storage_class = s->info.storage_class;
683 s->dest_placement.inherit_from(s->bucket_info.placement_rule);
684
685 if (!store->svc.zone->get_zone_params().valid_placement(s->dest_placement)) {
686 ldpp_dout(s, 0) << "NOTICE: invalid dest placement: " << s->dest_placement.to_str() << dendl;
687 return -EINVAL;
688 }
689 }
690 }
691
692 /* handle user ACL only for those APIs which support it */
693 if (s->user_acl) {
694 map<string, bufferlist> uattrs;
695 ret = rgw_get_user_attrs_by_uid(store, acct_acl_user.uid, uattrs);
696 if (!ret) {
697 ret = get_user_policy_from_attr(s->cct, store, uattrs, *s->user_acl);
698 }
699 if (-ENOENT == ret) {
700 /* In already existing clusters users won't have ACL. In such case
701 * assuming that only account owner has the rights seems to be
702 * reasonable. That allows to have only one verification logic.
703 * NOTE: there is small compatibility kludge for global, empty tenant:
704 * 1. if we try to reach an existing bucket, its owner is considered
705 * as account owner.
706 * 2. otherwise account owner is identity stored in s->user->user_id. */
707 s->user_acl->create_default(acct_acl_user.uid,
708 acct_acl_user.display_name);
709 ret = 0;
710 } else if (ret < 0) {
711 ldpp_dout(s, 0) << "NOTICE: couldn't get user attrs for handling ACL "
712 "(user_id=" << s->user->user_id << ", ret=" << ret << ")" << dendl;
713 return ret;
714 }
715 }
716 // We don't need user policies in case of STS token returned by AssumeRole,
717 // hence the check for user type
718 if (! s->user->user_id.empty() && s->auth.identity->get_identity_type() != TYPE_ROLE) {
719 try {
720 map<string, bufferlist> uattrs;
721 if (ret = rgw_get_user_attrs_by_uid(store, s->user->user_id, uattrs); ! ret) {
722 if (s->iam_user_policies.empty()) {
723 s->iam_user_policies = get_iam_user_policy_from_attr(s->cct, store, uattrs, s->user->user_id.tenant);
724 } else {
725 // This scenario can happen when a STS token has a policy, then we need to append other user policies
726 // to the existing ones. (e.g. token returned by GetSessionToken)
727 auto user_policies = get_iam_user_policy_from_attr(s->cct, store, uattrs, s->user->user_id.tenant);
728 s->iam_user_policies.insert(s->iam_user_policies.end(), user_policies.begin(), user_policies.end());
729 }
730 } else {
731 if (ret == -ENOENT)
732 ret = 0;
733 else ret = -EACCES;
734 }
735 } catch (const std::exception& e) {
736 lderr(s->cct) << "Error reading IAM User Policy: " << e.what() << dendl;
737 ret = -EACCES;
738 }
739 }
740
741 try {
742 s->iam_policy = get_iam_policy_from_attr(s->cct, store, s->bucket_attrs,
743 s->bucket_tenant);
744 } catch (const std::exception& e) {
745 // Really this is a can't happen condition. We parse the policy
746 // when it's given to us, so perhaps we should abort or otherwise
747 // raise bloody murder.
748 ldpp_dout(s, 0) << "Error reading IAM Policy: " << e.what() << dendl;
749 ret = -EACCES;
750 }
751
752 bool success = store->svc.zone->get_redirect_zone_endpoint(&s->redirect_zone_endpoint);
753 if (success) {
754 ldpp_dout(s, 20) << "redirect_zone_endpoint=" << s->redirect_zone_endpoint << dendl;
755 }
756
757 return ret;
758 }
759
760 /**
761 * Get the AccessControlPolicy for a bucket or object off of disk.
762 * s: The req_state to draw information from.
763 * only_bucket: If true, reads the bucket ACL rather than the object ACL.
764 * Returns: 0 on success, -ERR# otherwise.
765 */
766 int rgw_build_object_policies(RGWRados *store, struct req_state *s,
767 bool prefetch_data)
768 {
769 int ret = 0;
770
771 if (!s->object.empty()) {
772 if (!s->bucket_exists) {
773 return -ERR_NO_SUCH_BUCKET;
774 }
775 s->object_acl = std::make_unique<RGWAccessControlPolicy>(s->cct);
776 rgw_obj obj(s->bucket, s->object);
777
778 store->set_atomic(s->obj_ctx, obj);
779 if (prefetch_data) {
780 store->set_prefetch_data(s->obj_ctx, obj);
781 }
782 ret = read_obj_policy(store, s, s->bucket_info, s->bucket_attrs,
783 s->object_acl.get(), nullptr, s->iam_policy, s->bucket,
784 s->object);
785 }
786
787 return ret;
788 }
789
790 void rgw_add_to_iam_environment(rgw::IAM::Environment& e, std::string_view key, std::string_view val){
791 // This variant just adds non empty key pairs to IAM env., values can be empty
792 // in certain cases like tagging
793 if (!key.empty())
794 e.emplace(key,val);
795 }
796
797 static int rgw_iam_add_tags_from_bl(struct req_state* s, bufferlist& bl){
798 RGWObjTags tagset;
799 try {
800 auto bliter = bl.cbegin();
801 tagset.decode(bliter);
802 } catch (buffer::error& err) {
803 ldpp_dout(s, 0) << "ERROR: caught buffer::error, couldn't decode TagSet" << dendl;
804 return -EIO;
805 }
806
807 for (const auto& tag: tagset.get_tags()){
808 rgw_add_to_iam_environment(s->env, "s3:ExistingObjectTag/" + tag.first, tag.second);
809 }
810 return 0;
811 }
812
813 static int rgw_iam_add_existing_objtags(RGWRados* store, struct req_state* s, rgw_obj& obj, std::uint64_t action){
814 map <string, bufferlist> attrs;
815 store->set_atomic(s->obj_ctx, obj);
816 int op_ret = get_obj_attrs(store, s, obj, attrs);
817 if (op_ret < 0)
818 return op_ret;
819 auto tags = attrs.find(RGW_ATTR_TAGS);
820 if (tags != attrs.end()){
821 return rgw_iam_add_tags_from_bl(s, tags->second);
822 }
823 return 0;
824 }
825
826 static void rgw_add_grant_to_iam_environment(rgw::IAM::Environment& e, struct req_state *s){
827
828 using header_pair_t = std::pair <const char*, const char*>;
829 static const std::initializer_list <header_pair_t> acl_header_conditionals {
830 {"HTTP_X_AMZ_GRANT_READ", "s3:x-amz-grant-read"},
831 {"HTTP_X_AMZ_GRANT_WRITE", "s3:x-amz-grant-write"},
832 {"HTTP_X_AMZ_GRANT_READ_ACP", "s3:x-amz-grant-read-acp"},
833 {"HTTP_X_AMZ_GRANT_WRITE_ACP", "s3:x-amz-grant-write-acp"},
834 {"HTTP_X_AMZ_GRANT_FULL_CONTROL", "s3:x-amz-grant-full-control"}
835 };
836
837 if (s->has_acl_header){
838 for (const auto& c: acl_header_conditionals){
839 auto hdr = s->info.env->get(c.first);
840 if(hdr) {
841 e[c.second] = hdr;
842 }
843 }
844 }
845 }
846
847 void rgw_build_iam_environment(RGWRados* store,
848 struct req_state* s)
849 {
850 const auto& m = s->info.env->get_map();
851 auto t = ceph::real_clock::now();
852 s->env.emplace("aws:CurrentTime", std::to_string(ceph::real_clock::to_time_t(t)));
853 s->env.emplace("aws:EpochTime", ceph::to_iso_8601(t));
854 // TODO: This is fine for now, but once we have STS we'll need to
855 // look and see. Also this won't work with the IdentityApplier
856 // model, since we need to know the actual credential.
857 s->env.emplace("aws:PrincipalType", "User");
858
859 auto i = m.find("HTTP_REFERER");
860 if (i != m.end()) {
861 s->env.emplace("aws:Referer", i->second);
862 }
863
864 if (rgw_transport_is_secure(s->cct, *s->info.env)) {
865 s->env.emplace("aws:SecureTransport", "true");
866 }
867
868 const auto remote_addr_param = s->cct->_conf->rgw_remote_addr_param;
869 if (remote_addr_param.length()) {
870 i = m.find(remote_addr_param);
871 } else {
872 i = m.find("REMOTE_ADDR");
873 }
874 if (i != m.end()) {
875 const string* ip = &(i->second);
876 string temp;
877 if (remote_addr_param == "HTTP_X_FORWARDED_FOR") {
878 const auto comma = ip->find(',');
879 if (comma != string::npos) {
880 temp.assign(*ip, 0, comma);
881 ip = &temp;
882 }
883 }
884 s->env.emplace("aws:SourceIp", *ip);
885 }
886
887 i = m.find("HTTP_USER_AGENT"); {
888 if (i != m.end())
889 s->env.emplace("aws:UserAgent", i->second);
890 }
891
892 if (s->user) {
893 // What to do about aws::userid? One can have multiple access
894 // keys so that isn't really suitable. Do we have a durable
895 // identifier that can persist through name changes?
896 s->env.emplace("aws:username", s->user->user_id.id);
897 }
898
899 i = m.find("HTTP_X_AMZ_SECURITY_TOKEN");
900 if (i != m.end()) {
901 s->env.emplace("sts:authentication", "true");
902 } else {
903 s->env.emplace("sts:authentication", "false");
904 }
905 }
906
907 void rgw_bucket_object_pre_exec(struct req_state *s)
908 {
909 if (s->expect_cont)
910 dump_continue(s);
911
912 dump_bucket_from_state(s);
913 }
914
915 // So! Now and then when we try to update bucket information, the
916 // bucket has changed during the course of the operation. (Or we have
917 // a cache consistency problem that Watch/Notify isn't ruling out
918 // completely.)
919 //
920 // When this happens, we need to update the bucket info and try
921 // again. We have, however, to try the right *part* again. We can't
922 // simply re-send, since that will obliterate the previous update.
923 //
924 // Thus, callers of this function should include everything that
925 // merges information to be changed into the bucket information as
926 // well as the call to set it.
927 //
928 // The called function must return an integer, negative on error. In
929 // general, they should just return op_ret.
930 namespace {
931 template<typename F>
932 int retry_raced_bucket_write(RGWRados* g, req_state* s, const F& f) {
933 auto r = f();
934 for (auto i = 0u; i < 15u && r == -ECANCELED; ++i) {
935 r = g->try_refresh_bucket_info(s->bucket_info, nullptr,
936 &s->bucket_attrs);
937 if (r >= 0) {
938 r = f();
939 }
940 }
941 return r;
942 }
943 }
944
945
946 int RGWGetObj::verify_permission()
947 {
948 obj = rgw_obj(s->bucket, s->object);
949 store->set_atomic(s->obj_ctx, obj);
950 if (get_data) {
951 store->set_prefetch_data(s->obj_ctx, obj);
952 }
953
954 if (torrent.get_flag()) {
955 if (obj.key.instance.empty()) {
956 action = rgw::IAM::s3GetObjectTorrent;
957 } else {
958 action = rgw::IAM::s3GetObjectVersionTorrent;
959 }
960 } else {
961 if (obj.key.instance.empty()) {
962 action = rgw::IAM::s3GetObject;
963 } else {
964 action = rgw::IAM::s3GetObjectVersion;
965 }
966 if (s->iam_policy && s->iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG))
967 rgw_iam_add_existing_objtags(store, s, obj, action);
968 if (! s->iam_user_policies.empty()) {
969 for (auto& user_policy : s->iam_user_policies) {
970 if (user_policy.has_partial_conditional(S3_EXISTING_OBJTAG))
971 rgw_iam_add_existing_objtags(store, s, obj, action);
972 }
973 }
974 }
975
976 if (!verify_object_permission(this, s, action)) {
977 return -EACCES;
978 }
979
980 return 0;
981 }
982
983
984 int RGWOp::verify_op_mask()
985 {
986 uint32_t required_mask = op_mask();
987
988 ldpp_dout(this, 20) << "required_mask= " << required_mask
989 << " user.op_mask=" << s->user->op_mask << dendl;
990
991 if ((s->user->op_mask & required_mask) != required_mask) {
992 return -EPERM;
993 }
994
995 if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !store->svc.zone->zone_is_writeable()) {
996 ldpp_dout(this, 5) << "NOTICE: modify request to a read-only zone by a "
997 "non-system user, permission denied" << dendl;
998 return -EPERM;
999 }
1000
1001 return 0;
1002 }
1003
1004 int RGWGetObjTags::verify_permission()
1005 {
1006 auto iam_action = s->object.instance.empty()?
1007 rgw::IAM::s3GetObjectTagging:
1008 rgw::IAM::s3GetObjectVersionTagging;
1009 // TODO since we are parsing the bl now anyway, we probably change
1010 // the send_response function to accept RGWObjTag instead of a bl
1011 if (s->iam_policy && s->iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG)){
1012 rgw_obj obj = rgw_obj(s->bucket, s->object);
1013 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
1014 }
1015 if (! s->iam_user_policies.empty()) {
1016 for (auto& user_policy : s->iam_user_policies) {
1017 if (user_policy.has_partial_conditional(S3_EXISTING_OBJTAG)) {
1018 rgw_obj obj = rgw_obj(s->bucket, s->object);
1019 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
1020 }
1021 }
1022 }
1023 if (!verify_object_permission(this, s,iam_action))
1024 return -EACCES;
1025
1026 return 0;
1027 }
1028
1029 void RGWGetObjTags::pre_exec()
1030 {
1031 rgw_bucket_object_pre_exec(s);
1032 }
1033
1034 void RGWGetObjTags::execute()
1035 {
1036 rgw_obj obj;
1037 map<string,bufferlist> attrs;
1038
1039 obj = rgw_obj(s->bucket, s->object);
1040
1041 store->set_atomic(s->obj_ctx, obj);
1042
1043 op_ret = get_obj_attrs(store, s, obj, attrs);
1044 if (op_ret < 0) {
1045 ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << obj
1046 << " ret=" << op_ret << dendl;
1047 return;
1048 }
1049
1050 auto tags = attrs.find(RGW_ATTR_TAGS);
1051 if(tags != attrs.end()){
1052 has_tags = true;
1053 tags_bl.append(tags->second);
1054 }
1055 send_response_data(tags_bl);
1056 }
1057
1058 int RGWPutObjTags::verify_permission()
1059 {
1060 auto iam_action = s->object.instance.empty() ?
1061 rgw::IAM::s3PutObjectTagging:
1062 rgw::IAM::s3PutObjectVersionTagging;
1063
1064 if(s->iam_policy && s->iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG)){
1065 auto obj = rgw_obj(s->bucket, s->object);
1066 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
1067 }
1068 if (! s->iam_user_policies.empty()) {
1069 for (auto& user_policy : s->iam_user_policies) {
1070 if (user_policy.has_partial_conditional(S3_EXISTING_OBJTAG)) {
1071 rgw_obj obj = rgw_obj(s->bucket, s->object);
1072 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
1073 }
1074 }
1075 }
1076 if (!verify_object_permission(this, s,iam_action))
1077 return -EACCES;
1078 return 0;
1079 }
1080
1081 void RGWPutObjTags::execute()
1082 {
1083 op_ret = get_params();
1084 if (op_ret < 0)
1085 return;
1086
1087 if (s->object.empty()){
1088 op_ret= -EINVAL; // we only support tagging on existing objects
1089 return;
1090 }
1091
1092 rgw_obj obj;
1093 obj = rgw_obj(s->bucket, s->object);
1094 store->set_atomic(s->obj_ctx, obj);
1095 op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_TAGS, tags_bl);
1096 if (op_ret == -ECANCELED){
1097 op_ret = -ERR_TAG_CONFLICT;
1098 }
1099 }
1100
1101 void RGWDeleteObjTags::pre_exec()
1102 {
1103 rgw_bucket_object_pre_exec(s);
1104 }
1105
1106
1107 int RGWDeleteObjTags::verify_permission()
1108 {
1109 if (!s->object.empty()) {
1110 auto iam_action = s->object.instance.empty() ?
1111 rgw::IAM::s3DeleteObjectTagging:
1112 rgw::IAM::s3DeleteObjectVersionTagging;
1113
1114 if (s->iam_policy && s->iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG)){
1115 auto obj = rgw_obj(s->bucket, s->object);
1116 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
1117 }
1118 if (! s->iam_user_policies.empty()) {
1119 for (auto& user_policy : s->iam_user_policies) {
1120 if (user_policy.has_partial_conditional(S3_EXISTING_OBJTAG)) {
1121 auto obj = rgw_obj(s->bucket, s->object);
1122 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
1123 }
1124 }
1125 }
1126 if (!verify_object_permission(this, s, iam_action))
1127 return -EACCES;
1128 }
1129 return 0;
1130 }
1131
1132 void RGWDeleteObjTags::execute()
1133 {
1134 if (s->object.empty())
1135 return;
1136
1137 rgw_obj obj;
1138 obj = rgw_obj(s->bucket, s->object);
1139 store->set_atomic(s->obj_ctx, obj);
1140 map <string, bufferlist> attrs;
1141 map <string, bufferlist> rmattr;
1142 bufferlist bl;
1143 rmattr[RGW_ATTR_TAGS] = bl;
1144 op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, &rmattr);
1145 }
1146
1147 int RGWOp::do_aws4_auth_completion()
1148 {
1149 ldpp_dout(this, 5) << "NOTICE: call to do_aws4_auth_completion" << dendl;
1150 if (s->auth.completer) {
1151 if (!s->auth.completer->complete()) {
1152 return -ERR_AMZ_CONTENT_SHA256_MISMATCH;
1153 } else {
1154 ldpp_dout(this, 10) << "v4 auth ok -- do_aws4_auth_completion" << dendl;
1155 }
1156
1157 /* TODO(rzarzynski): yes, we're really called twice on PUTs. Only first
1158 * call passes, so we disable second one. This is old behaviour, sorry!
1159 * Plan for tomorrow: seek and destroy. */
1160 s->auth.completer = nullptr;
1161 }
1162
1163 return 0;
1164 }
1165
1166 int RGWOp::init_quota()
1167 {
1168 /* no quota enforcement for system requests */
1169 if (s->system_request)
1170 return 0;
1171
1172 /* init quota related stuff */
1173 if (!(s->user->op_mask & RGW_OP_TYPE_MODIFY)) {
1174 return 0;
1175 }
1176
1177 /* only interested in object related ops */
1178 if (s->object.empty()) {
1179 return 0;
1180 }
1181
1182 RGWUserInfo owner_info;
1183 RGWUserInfo *uinfo;
1184
1185 if (s->user->user_id == s->bucket_owner.get_id()) {
1186 uinfo = s->user;
1187 } else {
1188 int r = rgw_get_user_info_by_uid(store, s->bucket_info.owner, owner_info);
1189 if (r < 0)
1190 return r;
1191 uinfo = &owner_info;
1192 }
1193
1194 if (s->bucket_info.quota.enabled) {
1195 bucket_quota = s->bucket_info.quota;
1196 } else if (uinfo->bucket_quota.enabled) {
1197 bucket_quota = uinfo->bucket_quota;
1198 } else {
1199 bucket_quota = store->svc.quota->get_bucket_quota();
1200 }
1201
1202 if (uinfo->user_quota.enabled) {
1203 user_quota = uinfo->user_quota;
1204 } else {
1205 user_quota = store->svc.quota->get_user_quota();
1206 }
1207
1208 return 0;
1209 }
1210
1211 static bool validate_cors_rule_method(RGWCORSRule *rule, const char *req_meth) {
1212 uint8_t flags = 0;
1213
1214 if (!req_meth) {
1215 dout(5) << "req_meth is null" << dendl;
1216 return false;
1217 }
1218
1219 if (strcmp(req_meth, "GET") == 0) flags = RGW_CORS_GET;
1220 else if (strcmp(req_meth, "POST") == 0) flags = RGW_CORS_POST;
1221 else if (strcmp(req_meth, "PUT") == 0) flags = RGW_CORS_PUT;
1222 else if (strcmp(req_meth, "DELETE") == 0) flags = RGW_CORS_DELETE;
1223 else if (strcmp(req_meth, "HEAD") == 0) flags = RGW_CORS_HEAD;
1224
1225 if (rule->get_allowed_methods() & flags) {
1226 dout(10) << "Method " << req_meth << " is supported" << dendl;
1227 } else {
1228 dout(5) << "Method " << req_meth << " is not supported" << dendl;
1229 return false;
1230 }
1231
1232 return true;
1233 }
1234
1235 static bool validate_cors_rule_header(RGWCORSRule *rule, const char *req_hdrs) {
1236 if (req_hdrs) {
1237 vector<string> hdrs;
1238 get_str_vec(req_hdrs, hdrs);
1239 for (const auto& hdr : hdrs) {
1240 if (!rule->is_header_allowed(hdr.c_str(), hdr.length())) {
1241 dout(5) << "Header " << hdr << " is not registered in this rule" << dendl;
1242 return false;
1243 }
1244 }
1245 }
1246 return true;
1247 }
1248
1249 int RGWOp::read_bucket_cors()
1250 {
1251 bufferlist bl;
1252
1253 map<string, bufferlist>::iterator aiter = s->bucket_attrs.find(RGW_ATTR_CORS);
1254 if (aiter == s->bucket_attrs.end()) {
1255 ldpp_dout(this, 20) << "no CORS configuration attr found" << dendl;
1256 cors_exist = false;
1257 return 0; /* no CORS configuration found */
1258 }
1259
1260 cors_exist = true;
1261
1262 bl = aiter->second;
1263
1264 auto iter = bl.cbegin();
1265 try {
1266 bucket_cors.decode(iter);
1267 } catch (buffer::error& err) {
1268 ldpp_dout(this, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
1269 return -EIO;
1270 }
1271 if (s->cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
1272 RGWCORSConfiguration_S3 *s3cors = static_cast<RGWCORSConfiguration_S3 *>(&bucket_cors);
1273 ldpp_dout(this, 15) << "Read RGWCORSConfiguration";
1274 s3cors->to_xml(*_dout);
1275 *_dout << dendl;
1276 }
1277 return 0;
1278 }
1279
1280 /** CORS 6.2.6.
1281 * If any of the header field-names is not a ASCII case-insensitive match for
1282 * any of the values in list of headers do not set any additional headers and
1283 * terminate this set of steps.
1284 * */
1285 static void get_cors_response_headers(RGWCORSRule *rule, const char *req_hdrs, string& hdrs, string& exp_hdrs, unsigned *max_age) {
1286 if (req_hdrs) {
1287 list<string> hl;
1288 get_str_list(req_hdrs, hl);
1289 for(list<string>::iterator it = hl.begin(); it != hl.end(); ++it) {
1290 if (!rule->is_header_allowed((*it).c_str(), (*it).length())) {
1291 dout(5) << "Header " << (*it) << " is not registered in this rule" << dendl;
1292 } else {
1293 if (hdrs.length() > 0) hdrs.append(",");
1294 hdrs.append((*it));
1295 }
1296 }
1297 }
1298 rule->format_exp_headers(exp_hdrs);
1299 *max_age = rule->get_max_age();
1300 }
1301
1302 /**
1303 * Generate the CORS header response
1304 *
1305 * This is described in the CORS standard, section 6.2.
1306 */
1307 bool RGWOp::generate_cors_headers(string& origin, string& method, string& headers, string& exp_headers, unsigned *max_age)
1308 {
1309 /* CORS 6.2.1. */
1310 const char *orig = s->info.env->get("HTTP_ORIGIN");
1311 if (!orig) {
1312 return false;
1313 }
1314
1315 /* Custom: */
1316 origin = orig;
1317 op_ret = read_bucket_cors();
1318 if (op_ret < 0) {
1319 return false;
1320 }
1321
1322 if (!cors_exist) {
1323 ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl;
1324 return false;
1325 }
1326
1327 /* CORS 6.2.2. */
1328 RGWCORSRule *rule = bucket_cors.host_name_rule(orig);
1329 if (!rule)
1330 return false;
1331
1332 /*
1333 * Set the Allowed-Origin header to a asterisk if this is allowed in the rule
1334 * and no Authorization was send by the client
1335 *
1336 * The origin parameter specifies a URI that may access the resource. The browser must enforce this.
1337 * For requests without credentials, the server may specify "*" as a wildcard,
1338 * thereby allowing any origin to access the resource.
1339 */
1340 const char *authorization = s->info.env->get("HTTP_AUTHORIZATION");
1341 if (!authorization && rule->has_wildcard_origin())
1342 origin = "*";
1343
1344 /* CORS 6.2.3. */
1345 const char *req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD");
1346 if (!req_meth) {
1347 req_meth = s->info.method;
1348 }
1349
1350 if (req_meth) {
1351 method = req_meth;
1352 /* CORS 6.2.5. */
1353 if (!validate_cors_rule_method(rule, req_meth)) {
1354 return false;
1355 }
1356 }
1357
1358 /* CORS 6.2.4. */
1359 const char *req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS");
1360
1361 /* CORS 6.2.6. */
1362 get_cors_response_headers(rule, req_hdrs, headers, exp_headers, max_age);
1363
1364 return true;
1365 }
1366
1367 int RGWGetObj::read_user_manifest_part(rgw_bucket& bucket,
1368 const rgw_bucket_dir_entry& ent,
1369 RGWAccessControlPolicy * const bucket_acl,
1370 const boost::optional<Policy>& bucket_policy,
1371 const off_t start_ofs,
1372 const off_t end_ofs,
1373 bool swift_slo)
1374 {
1375 ldpp_dout(this, 20) << "user manifest obj=" << ent.key.name
1376 << "[" << ent.key.instance << "]" << dendl;
1377 RGWGetObj_CB cb(this);
1378 RGWGetObj_Filter* filter = &cb;
1379 boost::optional<RGWGetObj_Decompress> decompress;
1380
1381 int64_t cur_ofs = start_ofs;
1382 int64_t cur_end = end_ofs;
1383
1384 rgw_obj part(bucket, ent.key);
1385
1386 map<string, bufferlist> attrs;
1387
1388 uint64_t obj_size;
1389 RGWObjectCtx obj_ctx(store);
1390 RGWAccessControlPolicy obj_policy(s->cct);
1391
1392 ldpp_dout(this, 20) << "reading obj=" << part << " ofs=" << cur_ofs
1393 << " end=" << cur_end << dendl;
1394
1395 obj_ctx.set_atomic(part);
1396 store->set_prefetch_data(&obj_ctx, part);
1397
1398 RGWRados::Object op_target(store, s->bucket_info, obj_ctx, part);
1399 RGWRados::Object::Read read_op(&op_target);
1400
1401 if (!swift_slo) {
1402 /* SLO etag is optional */
1403 read_op.conds.if_match = ent.meta.etag.c_str();
1404 }
1405 read_op.params.attrs = &attrs;
1406 read_op.params.obj_size = &obj_size;
1407
1408 op_ret = read_op.prepare();
1409 if (op_ret < 0)
1410 return op_ret;
1411 op_ret = read_op.range_to_ofs(ent.meta.accounted_size, cur_ofs, cur_end);
1412 if (op_ret < 0)
1413 return op_ret;
1414 bool need_decompress;
1415 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
1416 if (op_ret < 0) {
1417 ldpp_dout(this, 0) << "ERROR: failed to decode compression info" << dendl;
1418 return -EIO;
1419 }
1420
1421 if (need_decompress)
1422 {
1423 if (cs_info.orig_size != ent.meta.accounted_size) {
1424 // hmm.. something wrong, object not as expected, abort!
1425 ldpp_dout(this, 0) << "ERROR: expected cs_info.orig_size=" << cs_info.orig_size
1426 << ", actual read size=" << ent.meta.size << dendl;
1427 return -EIO;
1428 }
1429 decompress.emplace(s->cct, &cs_info, partial_content, filter);
1430 filter = &*decompress;
1431 }
1432 else
1433 {
1434 if (obj_size != ent.meta.size) {
1435 // hmm.. something wrong, object not as expected, abort!
1436 ldpp_dout(this, 0) << "ERROR: expected obj_size=" << obj_size
1437 << ", actual read size=" << ent.meta.size << dendl;
1438 return -EIO;
1439 }
1440 }
1441
1442 op_ret = rgw_policy_from_attrset(s->cct, attrs, &obj_policy);
1443 if (op_ret < 0)
1444 return op_ret;
1445
1446 /* We can use global user_acl because LOs cannot have segments
1447 * stored inside different accounts. */
1448 if (s->system_request) {
1449 ldpp_dout(this, 2) << "overriding permissions due to system operation" << dendl;
1450 } else if (s->auth.identity->is_admin_of(s->user->user_id)) {
1451 ldpp_dout(this, 2) << "overriding permissions due to admin operation" << dendl;
1452 } else if (!verify_object_permission(this, s, part, s->user_acl.get(), bucket_acl,
1453 &obj_policy, bucket_policy, s->iam_user_policies, action)) {
1454 return -EPERM;
1455 }
1456 if (ent.meta.size == 0) {
1457 return 0;
1458 }
1459
1460 perfcounter->inc(l_rgw_get_b, cur_end - cur_ofs);
1461 filter->fixup_range(cur_ofs, cur_end);
1462 op_ret = read_op.iterate(cur_ofs, cur_end, filter);
1463 if (op_ret >= 0)
1464 op_ret = filter->flush();
1465 return op_ret;
1466 }
1467
1468 static int iterate_user_manifest_parts(CephContext * const cct,
1469 RGWRados * const store,
1470 const off_t ofs,
1471 const off_t end,
1472 RGWBucketInfo *pbucket_info,
1473 const string& obj_prefix,
1474 RGWAccessControlPolicy * const bucket_acl,
1475 const boost::optional<Policy>& bucket_policy,
1476 uint64_t * const ptotal_len,
1477 uint64_t * const pobj_size,
1478 string * const pobj_sum,
1479 int (*cb)(rgw_bucket& bucket,
1480 const rgw_bucket_dir_entry& ent,
1481 RGWAccessControlPolicy * const bucket_acl,
1482 const boost::optional<Policy>& bucket_policy,
1483 off_t start_ofs,
1484 off_t end_ofs,
1485 void *param,
1486 bool swift_slo),
1487 void * const cb_param)
1488 {
1489 rgw_bucket& bucket = pbucket_info->bucket;
1490 uint64_t obj_ofs = 0, len_count = 0;
1491 bool found_start = false, found_end = false, handled_end = false;
1492 string delim;
1493 bool is_truncated;
1494 vector<rgw_bucket_dir_entry> objs;
1495
1496 utime_t start_time = ceph_clock_now();
1497
1498 RGWRados::Bucket target(store, *pbucket_info);
1499 RGWRados::Bucket::List list_op(&target);
1500
1501 list_op.params.prefix = obj_prefix;
1502 list_op.params.delim = delim;
1503
1504 MD5 etag_sum;
1505 do {
1506 #define MAX_LIST_OBJS 100
1507 int r = list_op.list_objects(MAX_LIST_OBJS, &objs, NULL, &is_truncated);
1508 if (r < 0) {
1509 return r;
1510 }
1511
1512 for (rgw_bucket_dir_entry& ent : objs) {
1513 const uint64_t cur_total_len = obj_ofs;
1514 const uint64_t obj_size = ent.meta.accounted_size;
1515 uint64_t start_ofs = 0, end_ofs = obj_size;
1516
1517 if ((ptotal_len || cb) && !found_start && cur_total_len + obj_size > (uint64_t)ofs) {
1518 start_ofs = ofs - obj_ofs;
1519 found_start = true;
1520 }
1521
1522 obj_ofs += obj_size;
1523 if (pobj_sum) {
1524 etag_sum.Update((const unsigned char *)ent.meta.etag.c_str(),
1525 ent.meta.etag.length());
1526 }
1527
1528 if ((ptotal_len || cb) && !found_end && obj_ofs > (uint64_t)end) {
1529 end_ofs = end - cur_total_len + 1;
1530 found_end = true;
1531 }
1532
1533 perfcounter->tinc(l_rgw_get_lat,
1534 (ceph_clock_now() - start_time));
1535
1536 if (found_start && !handled_end) {
1537 len_count += end_ofs - start_ofs;
1538
1539 if (cb) {
1540 r = cb(bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs,
1541 cb_param, false /* swift_slo */);
1542 if (r < 0) {
1543 return r;
1544 }
1545 }
1546 }
1547
1548 handled_end = found_end;
1549 start_time = ceph_clock_now();
1550 }
1551 } while (is_truncated);
1552
1553 if (ptotal_len) {
1554 *ptotal_len = len_count;
1555 }
1556 if (pobj_size) {
1557 *pobj_size = obj_ofs;
1558 }
1559 if (pobj_sum) {
1560 complete_etag(etag_sum, pobj_sum);
1561 }
1562
1563 return 0;
1564 }
1565
1566 struct rgw_slo_part {
1567 RGWAccessControlPolicy *bucket_acl = nullptr;
1568 Policy* bucket_policy = nullptr;
1569 rgw_bucket bucket;
1570 string obj_name;
1571 uint64_t size = 0;
1572 string etag;
1573 };
1574
1575 static int iterate_slo_parts(CephContext *cct,
1576 RGWRados *store,
1577 off_t ofs,
1578 off_t end,
1579 map<uint64_t, rgw_slo_part>& slo_parts,
1580 int (*cb)(rgw_bucket& bucket,
1581 const rgw_bucket_dir_entry& ent,
1582 RGWAccessControlPolicy *bucket_acl,
1583 const boost::optional<Policy>& bucket_policy,
1584 off_t start_ofs,
1585 off_t end_ofs,
1586 void *param,
1587 bool swift_slo),
1588 void *cb_param)
1589 {
1590 bool found_start = false, found_end = false;
1591
1592 if (slo_parts.empty()) {
1593 return 0;
1594 }
1595
1596 utime_t start_time = ceph_clock_now();
1597
1598 map<uint64_t, rgw_slo_part>::iterator iter = slo_parts.upper_bound(ofs);
1599 if (iter != slo_parts.begin()) {
1600 --iter;
1601 }
1602
1603 uint64_t obj_ofs = iter->first;
1604
1605 for (; iter != slo_parts.end() && !found_end; ++iter) {
1606 rgw_slo_part& part = iter->second;
1607 rgw_bucket_dir_entry ent;
1608
1609 ent.key.name = part.obj_name;
1610 ent.meta.accounted_size = ent.meta.size = part.size;
1611 ent.meta.etag = part.etag;
1612
1613 uint64_t cur_total_len = obj_ofs;
1614 uint64_t start_ofs = 0, end_ofs = ent.meta.size;
1615
1616 if (!found_start && cur_total_len + ent.meta.size > (uint64_t)ofs) {
1617 start_ofs = ofs - obj_ofs;
1618 found_start = true;
1619 }
1620
1621 obj_ofs += ent.meta.size;
1622
1623 if (!found_end && obj_ofs > (uint64_t)end) {
1624 end_ofs = end - cur_total_len + 1;
1625 found_end = true;
1626 }
1627
1628 perfcounter->tinc(l_rgw_get_lat,
1629 (ceph_clock_now() - start_time));
1630
1631 if (found_start) {
1632 if (cb) {
1633 // SLO is a Swift thing, and Swift has no knowledge of S3 Policies.
1634 int r = cb(part.bucket, ent, part.bucket_acl,
1635 (part.bucket_policy ?
1636 boost::optional<Policy>(*part.bucket_policy) : none),
1637 start_ofs, end_ofs, cb_param, true /* swift_slo */);
1638 if (r < 0)
1639 return r;
1640 }
1641 }
1642
1643 start_time = ceph_clock_now();
1644 }
1645
1646 return 0;
1647 }
1648
1649 static int get_obj_user_manifest_iterate_cb(rgw_bucket& bucket,
1650 const rgw_bucket_dir_entry& ent,
1651 RGWAccessControlPolicy * const bucket_acl,
1652 const boost::optional<Policy>& bucket_policy,
1653 const off_t start_ofs,
1654 const off_t end_ofs,
1655 void * const param,
1656 bool swift_slo = false)
1657 {
1658 RGWGetObj *op = static_cast<RGWGetObj *>(param);
1659 return op->read_user_manifest_part(
1660 bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs, swift_slo);
1661 }
1662
1663 int RGWGetObj::handle_user_manifest(const char *prefix)
1664 {
1665 const boost::string_view prefix_view(prefix);
1666 ldpp_dout(this, 2) << "RGWGetObj::handle_user_manifest() prefix="
1667 << prefix_view << dendl;
1668
1669 const size_t pos = prefix_view.find('/');
1670 if (pos == string::npos) {
1671 return -EINVAL;
1672 }
1673
1674 const std::string bucket_name = url_decode(prefix_view.substr(0, pos));
1675 const std::string obj_prefix = url_decode(prefix_view.substr(pos + 1));
1676
1677 rgw_bucket bucket;
1678
1679 RGWAccessControlPolicy _bucket_acl(s->cct);
1680 RGWAccessControlPolicy *bucket_acl;
1681 boost::optional<Policy> _bucket_policy;
1682 boost::optional<Policy>* bucket_policy;
1683 RGWBucketInfo bucket_info;
1684 RGWBucketInfo *pbucket_info;
1685
1686 if (bucket_name.compare(s->bucket.name) != 0) {
1687 map<string, bufferlist> bucket_attrs;
1688 auto obj_ctx = store->svc.sysobj->init_obj_ctx();
1689 int r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
1690 bucket_name, bucket_info, NULL,
1691 &bucket_attrs);
1692 if (r < 0) {
1693 ldpp_dout(this, 0) << "could not get bucket info for bucket="
1694 << bucket_name << dendl;
1695 return r;
1696 }
1697 bucket = bucket_info.bucket;
1698 pbucket_info = &bucket_info;
1699 bucket_acl = &_bucket_acl;
1700 r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl, bucket);
1701 if (r < 0) {
1702 ldpp_dout(this, 0) << "failed to read bucket policy" << dendl;
1703 return r;
1704 }
1705 _bucket_policy = get_iam_policy_from_attr(s->cct, store, bucket_attrs,
1706 bucket_info.bucket.tenant);
1707 bucket_policy = &_bucket_policy;
1708 } else {
1709 bucket = s->bucket;
1710 pbucket_info = &s->bucket_info;
1711 bucket_acl = s->bucket_acl.get();
1712 bucket_policy = &s->iam_policy;
1713 }
1714
1715 /* dry run to find out:
1716 * - total length (of the parts we are going to send to client),
1717 * - overall DLO's content size,
1718 * - md5 sum of overall DLO's content (for etag of Swift API). */
1719 int r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1720 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1721 nullptr, &s->obj_size, &lo_etag,
1722 nullptr /* cb */, nullptr /* cb arg */);
1723 if (r < 0) {
1724 return r;
1725 }
1726
1727 r = RGWRados::Object::Read::range_to_ofs(s->obj_size, ofs, end);
1728 if (r < 0) {
1729 return r;
1730 }
1731
1732 r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1733 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1734 &total_len, nullptr, nullptr,
1735 nullptr, nullptr);
1736 if (r < 0) {
1737 return r;
1738 }
1739
1740 if (!get_data) {
1741 bufferlist bl;
1742 send_response_data(bl, 0, 0);
1743 return 0;
1744 }
1745
1746 r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1747 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1748 nullptr, nullptr, nullptr,
1749 get_obj_user_manifest_iterate_cb, (void *)this);
1750 if (r < 0) {
1751 return r;
1752 }
1753
1754 if (!total_len) {
1755 bufferlist bl;
1756 send_response_data(bl, 0, 0);
1757 }
1758
1759 return 0;
1760 }
1761
1762 int RGWGetObj::handle_slo_manifest(bufferlist& bl)
1763 {
1764 RGWSLOInfo slo_info;
1765 auto bliter = bl.cbegin();
1766 try {
1767 decode(slo_info, bliter);
1768 } catch (buffer::error& err) {
1769 ldpp_dout(this, 0) << "ERROR: failed to decode slo manifest" << dendl;
1770 return -EIO;
1771 }
1772 ldpp_dout(this, 2) << "RGWGetObj::handle_slo_manifest()" << dendl;
1773
1774 vector<RGWAccessControlPolicy> allocated_acls;
1775 map<string, pair<RGWAccessControlPolicy *, boost::optional<Policy>>> policies;
1776 map<string, rgw_bucket> buckets;
1777
1778 map<uint64_t, rgw_slo_part> slo_parts;
1779
1780 MD5 etag_sum;
1781 total_len = 0;
1782
1783 for (const auto& entry : slo_info.entries) {
1784 const string& path = entry.path;
1785
1786 /* If the path starts with slashes, strip them all. */
1787 const size_t pos_init = path.find_first_not_of('/');
1788 /* According to the documentation of std::string::find following check
1789 * is not necessary as we should get the std::string::npos propagation
1790 * here. This might be true with the accuracy to implementation's bugs.
1791 * See following question on SO:
1792 * http://stackoverflow.com/questions/1011790/why-does-stdstring-findtext-stdstringnpos-not-return-npos
1793 */
1794 if (pos_init == string::npos) {
1795 return -EINVAL;
1796 }
1797
1798 const size_t pos_sep = path.find('/', pos_init);
1799 if (pos_sep == string::npos) {
1800 return -EINVAL;
1801 }
1802
1803 string bucket_name = path.substr(pos_init, pos_sep - pos_init);
1804 string obj_name = path.substr(pos_sep + 1);
1805
1806 rgw_bucket bucket;
1807 RGWAccessControlPolicy *bucket_acl;
1808 Policy* bucket_policy;
1809
1810 if (bucket_name.compare(s->bucket.name) != 0) {
1811 const auto& piter = policies.find(bucket_name);
1812 if (piter != policies.end()) {
1813 bucket_acl = piter->second.first;
1814 bucket_policy = piter->second.second.get_ptr();
1815 bucket = buckets[bucket_name];
1816 } else {
1817 allocated_acls.push_back(RGWAccessControlPolicy(s->cct));
1818 RGWAccessControlPolicy& _bucket_acl = allocated_acls.back();
1819
1820 RGWBucketInfo bucket_info;
1821 map<string, bufferlist> bucket_attrs;
1822 auto obj_ctx = store->svc.sysobj->init_obj_ctx();
1823 int r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
1824 bucket_name, bucket_info, nullptr,
1825 &bucket_attrs);
1826 if (r < 0) {
1827 ldpp_dout(this, 0) << "could not get bucket info for bucket="
1828 << bucket_name << dendl;
1829 return r;
1830 }
1831 bucket = bucket_info.bucket;
1832 bucket_acl = &_bucket_acl;
1833 r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl,
1834 bucket);
1835 if (r < 0) {
1836 ldpp_dout(this, 0) << "failed to read bucket ACL for bucket "
1837 << bucket << dendl;
1838 return r;
1839 }
1840 auto _bucket_policy = get_iam_policy_from_attr(
1841 s->cct, store, bucket_attrs, bucket_info.bucket.tenant);
1842 bucket_policy = _bucket_policy.get_ptr();
1843 buckets[bucket_name] = bucket;
1844 policies[bucket_name] = make_pair(bucket_acl, _bucket_policy);
1845 }
1846 } else {
1847 bucket = s->bucket;
1848 bucket_acl = s->bucket_acl.get();
1849 bucket_policy = s->iam_policy.get_ptr();
1850 }
1851
1852 rgw_slo_part part;
1853 part.bucket_acl = bucket_acl;
1854 part.bucket_policy = bucket_policy;
1855 part.bucket = bucket;
1856 part.obj_name = obj_name;
1857 part.size = entry.size_bytes;
1858 part.etag = entry.etag;
1859 ldpp_dout(this, 20) << "slo_part: ofs=" << ofs
1860 << " bucket=" << part.bucket
1861 << " obj=" << part.obj_name
1862 << " size=" << part.size
1863 << " etag=" << part.etag
1864 << dendl;
1865
1866 etag_sum.Update((const unsigned char *)entry.etag.c_str(),
1867 entry.etag.length());
1868
1869 slo_parts[total_len] = part;
1870 total_len += part.size;
1871 } /* foreach entry */
1872
1873 complete_etag(etag_sum, &lo_etag);
1874
1875 s->obj_size = slo_info.total_size;
1876 ldpp_dout(this, 20) << "s->obj_size=" << s->obj_size << dendl;
1877
1878 int r = RGWRados::Object::Read::range_to_ofs(total_len, ofs, end);
1879 if (r < 0) {
1880 return r;
1881 }
1882
1883 total_len = end - ofs + 1;
1884
1885 r = iterate_slo_parts(s->cct, store, ofs, end, slo_parts,
1886 get_obj_user_manifest_iterate_cb, (void *)this);
1887 if (r < 0) {
1888 return r;
1889 }
1890
1891 return 0;
1892 }
1893
1894 int RGWGetObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
1895 {
1896 /* garbage collection related handling */
1897 utime_t start_time = ceph_clock_now();
1898 if (start_time > gc_invalidate_time) {
1899 int r = store->defer_gc(s->obj_ctx, s->bucket_info, obj);
1900 if (r < 0) {
1901 ldpp_dout(this, 0) << "WARNING: could not defer gc entry for obj" << dendl;
1902 }
1903 gc_invalidate_time = start_time;
1904 gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2);
1905 }
1906 return send_response_data(bl, bl_ofs, bl_len);
1907 }
1908
1909 bool RGWGetObj::prefetch_data()
1910 {
1911 /* HEAD request, stop prefetch*/
1912 if (!get_data) {
1913 return false;
1914 }
1915
1916 bool prefetch_first_chunk = true;
1917 range_str = s->info.env->get("HTTP_RANGE");
1918
1919 if (range_str) {
1920 int r = parse_range();
1921 /* error on parsing the range, stop prefetch and will fail in execute() */
1922 if (r < 0) {
1923 return false; /* range_parsed==false */
1924 }
1925 /* range get goes to shadow objects, stop prefetch */
1926 if (ofs >= s->cct->_conf->rgw_max_chunk_size) {
1927 prefetch_first_chunk = false;
1928 }
1929 }
1930
1931 return get_data && prefetch_first_chunk;
1932 }
1933
1934 void RGWGetObj::pre_exec()
1935 {
1936 rgw_bucket_object_pre_exec(s);
1937 }
1938
1939 static bool object_is_expired(map<string, bufferlist>& attrs) {
1940 map<string, bufferlist>::iterator iter = attrs.find(RGW_ATTR_DELETE_AT);
1941 if (iter != attrs.end()) {
1942 utime_t delete_at;
1943 try {
1944 decode(delete_at, iter->second);
1945 } catch (buffer::error& err) {
1946 dout(0) << "ERROR: " << __func__ << ": failed to decode " RGW_ATTR_DELETE_AT " attr" << dendl;
1947 return false;
1948 }
1949
1950 if (delete_at <= ceph_clock_now() && !delete_at.is_zero()) {
1951 return true;
1952 }
1953 }
1954
1955 return false;
1956 }
1957
1958 void RGWGetObj::execute()
1959 {
1960 bufferlist bl;
1961 gc_invalidate_time = ceph_clock_now();
1962 gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2);
1963
1964 bool need_decompress;
1965 int64_t ofs_x, end_x;
1966
1967 RGWGetObj_CB cb(this);
1968 RGWGetObj_Filter* filter = (RGWGetObj_Filter *)&cb;
1969 boost::optional<RGWGetObj_Decompress> decompress;
1970 std::unique_ptr<RGWGetObj_Filter> decrypt;
1971 map<string, bufferlist>::iterator attr_iter;
1972
1973 perfcounter->inc(l_rgw_get);
1974
1975 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
1976 RGWRados::Object::Read read_op(&op_target);
1977
1978 op_ret = get_params();
1979 if (op_ret < 0)
1980 goto done_err;
1981
1982 op_ret = init_common();
1983 if (op_ret < 0)
1984 goto done_err;
1985
1986 read_op.conds.mod_ptr = mod_ptr;
1987 read_op.conds.unmod_ptr = unmod_ptr;
1988 read_op.conds.high_precision_time = s->system_request; /* system request need to use high precision time */
1989 read_op.conds.mod_zone_id = mod_zone_id;
1990 read_op.conds.mod_pg_ver = mod_pg_ver;
1991 read_op.conds.if_match = if_match;
1992 read_op.conds.if_nomatch = if_nomatch;
1993 read_op.params.attrs = &attrs;
1994 read_op.params.lastmod = &lastmod;
1995 read_op.params.obj_size = &s->obj_size;
1996
1997 op_ret = read_op.prepare();
1998 if (op_ret < 0)
1999 goto done_err;
2000 version_id = read_op.state.obj.key.instance;
2001
2002 /* STAT ops don't need data, and do no i/o */
2003 if (get_type() == RGW_OP_STAT_OBJ) {
2004 return;
2005 }
2006
2007 /* start gettorrent */
2008 if (torrent.get_flag())
2009 {
2010 attr_iter = attrs.find(RGW_ATTR_CRYPT_MODE);
2011 if (attr_iter != attrs.end() && attr_iter->second.to_str() == "SSE-C-AES256") {
2012 ldpp_dout(this, 0) << "ERROR: torrents are not supported for objects "
2013 "encrypted with SSE-C" << dendl;
2014 op_ret = -EINVAL;
2015 goto done_err;
2016 }
2017 torrent.init(s, store);
2018 op_ret = torrent.get_torrent_file(read_op, total_len, bl, obj);
2019 if (op_ret < 0)
2020 {
2021 ldpp_dout(this, 0) << "ERROR: failed to get_torrent_file ret= " << op_ret
2022 << dendl;
2023 goto done_err;
2024 }
2025 op_ret = send_response_data(bl, 0, total_len);
2026 if (op_ret < 0)
2027 {
2028 ldpp_dout(this, 0) << "ERROR: failed to send_response_data ret= " << op_ret << dendl;
2029 goto done_err;
2030 }
2031 return;
2032 }
2033 /* end gettorrent */
2034
2035 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
2036 if (op_ret < 0) {
2037 ldpp_dout(s, 0) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
2038 goto done_err;
2039 }
2040 if (need_decompress) {
2041 s->obj_size = cs_info.orig_size;
2042 decompress.emplace(s->cct, &cs_info, partial_content, filter);
2043 filter = &*decompress;
2044 }
2045
2046 attr_iter = attrs.find(RGW_ATTR_USER_MANIFEST);
2047 if (attr_iter != attrs.end() && !skip_manifest) {
2048 op_ret = handle_user_manifest(attr_iter->second.c_str());
2049 if (op_ret < 0) {
2050 ldpp_dout(this, 0) << "ERROR: failed to handle user manifest ret="
2051 << op_ret << dendl;
2052 goto done_err;
2053 }
2054 return;
2055 }
2056
2057 attr_iter = attrs.find(RGW_ATTR_SLO_MANIFEST);
2058 if (attr_iter != attrs.end() && !skip_manifest) {
2059 is_slo = true;
2060 op_ret = handle_slo_manifest(attr_iter->second);
2061 if (op_ret < 0) {
2062 ldpp_dout(this, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret
2063 << dendl;
2064 goto done_err;
2065 }
2066 return;
2067 }
2068
2069 // for range requests with obj size 0
2070 if (range_str && !(s->obj_size)) {
2071 total_len = 0;
2072 op_ret = -ERANGE;
2073 goto done_err;
2074 }
2075
2076 op_ret = read_op.range_to_ofs(s->obj_size, ofs, end);
2077 if (op_ret < 0)
2078 goto done_err;
2079 total_len = (ofs <= end ? end + 1 - ofs : 0);
2080
2081 /* Check whether the object has expired. Swift API documentation
2082 * stands that we should return 404 Not Found in such case. */
2083 if (need_object_expiration() && object_is_expired(attrs)) {
2084 op_ret = -ENOENT;
2085 goto done_err;
2086 }
2087
2088 start = ofs;
2089
2090 attr_iter = attrs.find(RGW_ATTR_MANIFEST);
2091 op_ret = this->get_decrypt_filter(&decrypt, filter,
2092 attr_iter != attrs.end() ? &(attr_iter->second) : nullptr);
2093 if (decrypt != nullptr) {
2094 filter = decrypt.get();
2095 }
2096 if (op_ret < 0) {
2097 goto done_err;
2098 }
2099
2100 if (!get_data || ofs > end) {
2101 send_response_data(bl, 0, 0);
2102 return;
2103 }
2104
2105 perfcounter->inc(l_rgw_get_b, end - ofs);
2106
2107 ofs_x = ofs;
2108 end_x = end;
2109 filter->fixup_range(ofs_x, end_x);
2110 op_ret = read_op.iterate(ofs_x, end_x, filter);
2111
2112 if (op_ret >= 0)
2113 op_ret = filter->flush();
2114
2115 perfcounter->tinc(l_rgw_get_lat, s->time_elapsed());
2116 if (op_ret < 0) {
2117 goto done_err;
2118 }
2119
2120 op_ret = send_response_data(bl, 0, 0);
2121 if (op_ret < 0) {
2122 goto done_err;
2123 }
2124 return;
2125
2126 done_err:
2127 send_response_data_error();
2128 }
2129
2130 int RGWGetObj::init_common()
2131 {
2132 if (range_str) {
2133 /* range parsed error when prefetch */
2134 if (!range_parsed) {
2135 int r = parse_range();
2136 if (r < 0)
2137 return r;
2138 }
2139 }
2140 if (if_mod) {
2141 if (parse_time(if_mod, &mod_time) < 0)
2142 return -EINVAL;
2143 mod_ptr = &mod_time;
2144 }
2145
2146 if (if_unmod) {
2147 if (parse_time(if_unmod, &unmod_time) < 0)
2148 return -EINVAL;
2149 unmod_ptr = &unmod_time;
2150 }
2151
2152 return 0;
2153 }
2154
2155 int RGWListBuckets::verify_permission()
2156 {
2157 rgw::IAM::Partition partition = rgw::IAM::Partition::aws;
2158 rgw::IAM::Service service = rgw::IAM::Service::s3;
2159
2160 if (!verify_user_permission(this, s, ARN(partition, service, "", s->user->user_id.tenant, "*"), rgw::IAM::s3ListAllMyBuckets)) {
2161 return -EACCES;
2162 }
2163
2164 return 0;
2165 }
2166
2167 int RGWGetUsage::verify_permission()
2168 {
2169 if (s->auth.identity->is_anonymous()) {
2170 return -EACCES;
2171 }
2172
2173 return 0;
2174 }
2175
2176 void RGWListBuckets::execute()
2177 {
2178 bool done;
2179 bool started = false;
2180 uint64_t total_count = 0;
2181
2182 const uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk;
2183
2184 op_ret = get_params();
2185 if (op_ret < 0) {
2186 goto send_end;
2187 }
2188
2189 if (supports_account_metadata()) {
2190 op_ret = rgw_get_user_attrs_by_uid(store, s->user->user_id, attrs);
2191 if (op_ret < 0) {
2192 goto send_end;
2193 }
2194 }
2195
2196 is_truncated = false;
2197 do {
2198 RGWUserBuckets buckets;
2199 uint64_t read_count;
2200 if (limit >= 0) {
2201 read_count = min(limit - total_count, max_buckets);
2202 } else {
2203 read_count = max_buckets;
2204 }
2205
2206 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
2207 marker, end_marker, read_count,
2208 should_get_stats(), &is_truncated,
2209 get_default_max());
2210 if (op_ret < 0) {
2211 /* hmm.. something wrong here.. the user was authenticated, so it
2212 should exist */
2213 ldpp_dout(this, 10) << "WARNING: failed on rgw_get_user_buckets uid="
2214 << s->user->user_id << dendl;
2215 break;
2216 }
2217
2218 /* We need to have stats for all our policies - even if a given policy
2219 * isn't actually used in a given account. In such situation its usage
2220 * stats would be simply full of zeros. */
2221 for (const auto& policy : store->svc.zone->get_zonegroup().placement_targets) {
2222 policies_stats.emplace(policy.second.name,
2223 decltype(policies_stats)::mapped_type());
2224 }
2225
2226 std::map<std::string, RGWBucketEnt>& m = buckets.get_buckets();
2227 for (const auto& kv : m) {
2228 const auto& bucket = kv.second;
2229
2230 global_stats.bytes_used += bucket.size;
2231 global_stats.bytes_used_rounded += bucket.size_rounded;
2232 global_stats.objects_count += bucket.count;
2233
2234 /* operator[] still can create a new entry for storage policy seen
2235 * for first time. */
2236 auto& policy_stats = policies_stats[bucket.placement_rule.to_str()];
2237 policy_stats.bytes_used += bucket.size;
2238 policy_stats.bytes_used_rounded += bucket.size_rounded;
2239 policy_stats.buckets_count++;
2240 policy_stats.objects_count += bucket.count;
2241 }
2242 global_stats.buckets_count += m.size();
2243 total_count += m.size();
2244
2245 done = (m.size() < read_count || (limit >= 0 && total_count >= (uint64_t)limit));
2246
2247 if (!started) {
2248 send_response_begin(buckets.count() > 0);
2249 started = true;
2250 }
2251
2252 if (!m.empty()) {
2253 map<string, RGWBucketEnt>::reverse_iterator riter = m.rbegin();
2254 marker = riter->first;
2255
2256 handle_listing_chunk(std::move(buckets));
2257 }
2258 } while (is_truncated && !done);
2259
2260 send_end:
2261 if (!started) {
2262 send_response_begin(false);
2263 }
2264 send_response_end();
2265 }
2266
2267 void RGWGetUsage::execute()
2268 {
2269 uint64_t start_epoch = 0;
2270 uint64_t end_epoch = (uint64_t)-1;
2271 op_ret = get_params();
2272 if (op_ret < 0)
2273 return;
2274
2275 if (!start_date.empty()) {
2276 op_ret = utime_t::parse_date(start_date, &start_epoch, NULL);
2277 if (op_ret < 0) {
2278 ldpp_dout(this, 0) << "ERROR: failed to parse start date" << dendl;
2279 return;
2280 }
2281 }
2282
2283 if (!end_date.empty()) {
2284 op_ret = utime_t::parse_date(end_date, &end_epoch, NULL);
2285 if (op_ret < 0) {
2286 ldpp_dout(this, 0) << "ERROR: failed to parse end date" << dendl;
2287 return;
2288 }
2289 }
2290
2291 uint32_t max_entries = 1000;
2292
2293 bool is_truncated = true;
2294
2295 RGWUsageIter usage_iter;
2296
2297 while (is_truncated) {
2298 op_ret = store->read_usage(s->user->user_id, s->bucket_name, start_epoch, end_epoch, max_entries,
2299 &is_truncated, usage_iter, usage);
2300
2301 if (op_ret == -ENOENT) {
2302 op_ret = 0;
2303 is_truncated = false;
2304 }
2305
2306 if (op_ret < 0) {
2307 return;
2308 }
2309 }
2310
2311 op_ret = rgw_user_sync_all_stats(store, s->user->user_id);
2312 if (op_ret < 0) {
2313 ldpp_dout(this, 0) << "ERROR: failed to sync user stats" << dendl;
2314 return;
2315 }
2316
2317 op_ret = rgw_user_get_all_buckets_stats(store, s->user->user_id, buckets_usage);
2318 if (op_ret < 0) {
2319 ldpp_dout(this, 0) << "ERROR: failed to get user's buckets stats" << dendl;
2320 return;
2321 }
2322
2323 string user_str = s->user->user_id.to_str();
2324 op_ret = store->cls_user_get_header(user_str, &header);
2325 if (op_ret < 0) {
2326 ldpp_dout(this, 0) << "ERROR: can't read user header" << dendl;
2327 return;
2328 }
2329
2330 return;
2331 }
2332
2333 int RGWStatAccount::verify_permission()
2334 {
2335 if (!verify_user_permission_no_policy(this, s, RGW_PERM_READ)) {
2336 return -EACCES;
2337 }
2338
2339 return 0;
2340 }
2341
2342 void RGWStatAccount::execute()
2343 {
2344 string marker;
2345 bool is_truncated = false;
2346 uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk;
2347
2348 do {
2349 RGWUserBuckets buckets;
2350
2351 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets, marker,
2352 string(), max_buckets, true, &is_truncated);
2353 if (op_ret < 0) {
2354 /* hmm.. something wrong here.. the user was authenticated, so it
2355 should exist */
2356 ldpp_dout(this, 10) << "WARNING: failed on rgw_get_user_buckets uid="
2357 << s->user->user_id << dendl;
2358 break;
2359 } else {
2360 /* We need to have stats for all our policies - even if a given policy
2361 * isn't actually used in a given account. In such situation its usage
2362 * stats would be simply full of zeros. */
2363 for (const auto& policy : store->svc.zone->get_zonegroup().placement_targets) {
2364 policies_stats.emplace(policy.second.name,
2365 decltype(policies_stats)::mapped_type());
2366 }
2367
2368 std::map<std::string, RGWBucketEnt>& m = buckets.get_buckets();
2369 for (const auto& kv : m) {
2370 const auto& bucket = kv.second;
2371
2372 global_stats.bytes_used += bucket.size;
2373 global_stats.bytes_used_rounded += bucket.size_rounded;
2374 global_stats.objects_count += bucket.count;
2375
2376 /* operator[] still can create a new entry for storage policy seen
2377 * for first time. */
2378 auto& policy_stats = policies_stats[bucket.placement_rule.to_str()];
2379 policy_stats.bytes_used += bucket.size;
2380 policy_stats.bytes_used_rounded += bucket.size_rounded;
2381 policy_stats.buckets_count++;
2382 policy_stats.objects_count += bucket.count;
2383 }
2384 global_stats.buckets_count += m.size();
2385
2386 }
2387 } while (is_truncated);
2388 }
2389
2390 int RGWGetBucketVersioning::verify_permission()
2391 {
2392 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketVersioning);
2393 }
2394
2395 void RGWGetBucketVersioning::pre_exec()
2396 {
2397 rgw_bucket_object_pre_exec(s);
2398 }
2399
2400 void RGWGetBucketVersioning::execute()
2401 {
2402 versioned = s->bucket_info.versioned();
2403 versioning_enabled = s->bucket_info.versioning_enabled();
2404 mfa_enabled = s->bucket_info.mfa_enabled();
2405 }
2406
2407 int RGWSetBucketVersioning::verify_permission()
2408 {
2409 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketVersioning);
2410 }
2411
2412 void RGWSetBucketVersioning::pre_exec()
2413 {
2414 rgw_bucket_object_pre_exec(s);
2415 }
2416
2417 void RGWSetBucketVersioning::execute()
2418 {
2419 op_ret = get_params();
2420 if (op_ret < 0)
2421 return;
2422
2423 bool cur_mfa_status = (s->bucket_info.flags & BUCKET_MFA_ENABLED) != 0;
2424
2425 mfa_set_status &= (mfa_status != cur_mfa_status);
2426
2427 if (mfa_set_status &&
2428 !s->mfa_verified) {
2429 op_ret = -ERR_MFA_REQUIRED;
2430 return;
2431 }
2432
2433 if (!store->svc.zone->is_meta_master()) {
2434 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
2435 if (op_ret < 0) {
2436 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
2437 return;
2438 }
2439 }
2440
2441 bool modified = mfa_set_status;
2442
2443 op_ret = retry_raced_bucket_write(store, s, [&] {
2444 if (mfa_set_status) {
2445 if (mfa_status) {
2446 s->bucket_info.flags |= BUCKET_MFA_ENABLED;
2447 } else {
2448 s->bucket_info.flags &= ~BUCKET_MFA_ENABLED;
2449 }
2450 }
2451
2452 if (versioning_status == VersioningEnabled) {
2453 s->bucket_info.flags |= BUCKET_VERSIONED;
2454 s->bucket_info.flags &= ~BUCKET_VERSIONS_SUSPENDED;
2455 modified = true;
2456 } else if (versioning_status == VersioningSuspended) {
2457 s->bucket_info.flags |= (BUCKET_VERSIONED | BUCKET_VERSIONS_SUSPENDED);
2458 modified = true;
2459 } else {
2460 return op_ret;
2461 }
2462 return store->put_bucket_instance_info(s->bucket_info, false, real_time(),
2463 &s->bucket_attrs);
2464 });
2465
2466 if (!modified) {
2467 return;
2468 }
2469
2470 if (op_ret < 0) {
2471 ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
2472 << " returned err=" << op_ret << dendl;
2473 return;
2474 }
2475 }
2476
2477 int RGWGetBucketWebsite::verify_permission()
2478 {
2479 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketWebsite);
2480 }
2481
2482 void RGWGetBucketWebsite::pre_exec()
2483 {
2484 rgw_bucket_object_pre_exec(s);
2485 }
2486
2487 void RGWGetBucketWebsite::execute()
2488 {
2489 if (!s->bucket_info.has_website) {
2490 op_ret = -ERR_NO_SUCH_WEBSITE_CONFIGURATION;
2491 }
2492 }
2493
2494 int RGWSetBucketWebsite::verify_permission()
2495 {
2496 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketWebsite);
2497 }
2498
2499 void RGWSetBucketWebsite::pre_exec()
2500 {
2501 rgw_bucket_object_pre_exec(s);
2502 }
2503
2504 void RGWSetBucketWebsite::execute()
2505 {
2506 op_ret = get_params();
2507
2508 if (op_ret < 0)
2509 return;
2510
2511 if (!store->svc.zone->is_meta_master()) {
2512 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
2513 if (op_ret < 0) {
2514 ldpp_dout(this, 0) << " forward_request_to_master returned ret=" << op_ret << dendl;
2515 return;
2516 }
2517 }
2518
2519 op_ret = retry_raced_bucket_write(store, s, [this] {
2520 s->bucket_info.has_website = true;
2521 s->bucket_info.website_conf = website_conf;
2522 op_ret = store->put_bucket_instance_info(s->bucket_info, false,
2523 real_time(), &s->bucket_attrs);
2524 return op_ret;
2525 });
2526
2527 if (op_ret < 0) {
2528 ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
2529 << " returned err=" << op_ret << dendl;
2530 return;
2531 }
2532 }
2533
2534 int RGWDeleteBucketWebsite::verify_permission()
2535 {
2536 return verify_bucket_owner_or_policy(s, rgw::IAM::s3DeleteBucketWebsite);
2537 }
2538
2539 void RGWDeleteBucketWebsite::pre_exec()
2540 {
2541 rgw_bucket_object_pre_exec(s);
2542 }
2543
2544 void RGWDeleteBucketWebsite::execute()
2545 {
2546 op_ret = retry_raced_bucket_write(store, s, [this] {
2547 s->bucket_info.has_website = false;
2548 s->bucket_info.website_conf = RGWBucketWebsiteConf();
2549 op_ret = store->put_bucket_instance_info(s->bucket_info, false,
2550 real_time(), &s->bucket_attrs);
2551 return op_ret;
2552 });
2553 if (op_ret < 0) {
2554 ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
2555 << " returned err=" << op_ret << dendl;
2556 return;
2557 }
2558 }
2559
2560 int RGWStatBucket::verify_permission()
2561 {
2562 // This (a HEAD request on a bucket) is governed by the s3:ListBucket permission.
2563 if (!verify_bucket_permission(this, s, rgw::IAM::s3ListBucket)) {
2564 return -EACCES;
2565 }
2566
2567 return 0;
2568 }
2569
2570 void RGWStatBucket::pre_exec()
2571 {
2572 rgw_bucket_object_pre_exec(s);
2573 }
2574
2575 void RGWStatBucket::execute()
2576 {
2577 if (!s->bucket_exists) {
2578 op_ret = -ERR_NO_SUCH_BUCKET;
2579 return;
2580 }
2581
2582 RGWUserBuckets buckets;
2583 bucket.bucket = s->bucket;
2584 buckets.add(bucket);
2585 map<string, RGWBucketEnt>& m = buckets.get_buckets();
2586 op_ret = store->update_containers_stats(m);
2587 if (! op_ret)
2588 op_ret = -EEXIST;
2589 if (op_ret > 0) {
2590 op_ret = 0;
2591 map<string, RGWBucketEnt>::iterator iter = m.find(bucket.bucket.name);
2592 if (iter != m.end()) {
2593 bucket = iter->second;
2594 } else {
2595 op_ret = -EINVAL;
2596 }
2597 }
2598 }
2599
2600 int RGWListBucket::verify_permission()
2601 {
2602 op_ret = get_params();
2603 if (op_ret < 0) {
2604 return op_ret;
2605 }
2606 if (!prefix.empty())
2607 s->env.emplace("s3:prefix", prefix);
2608
2609 if (!delimiter.empty())
2610 s->env.emplace("s3:delimiter", delimiter);
2611
2612 s->env.emplace("s3:max-keys", std::to_string(max));
2613
2614 if (!verify_bucket_permission(this,
2615 s,
2616 list_versions ?
2617 rgw::IAM::s3ListBucketVersions :
2618 rgw::IAM::s3ListBucket)) {
2619 return -EACCES;
2620 }
2621
2622 return 0;
2623 }
2624
2625 int RGWListBucket::parse_max_keys()
2626 {
2627 // Bound max value of max-keys to configured value for security
2628 // Bound min value of max-keys to '0'
2629 // Some S3 clients explicitly send max-keys=0 to detect if the bucket is
2630 // empty without listing any items.
2631 return parse_value_and_bound(max_keys, max, 0,
2632 g_conf().get_val<uint64_t>("rgw_max_listing_results"),
2633 default_max);
2634 }
2635
2636 void RGWListBucket::pre_exec()
2637 {
2638 rgw_bucket_object_pre_exec(s);
2639 }
2640
2641 void RGWListBucket::execute()
2642 {
2643 if (!s->bucket_exists) {
2644 op_ret = -ERR_NO_SUCH_BUCKET;
2645 return;
2646 }
2647
2648 if (allow_unordered && !delimiter.empty()) {
2649 ldpp_dout(this, 0) <<
2650 "ERROR: unordered bucket listing requested with a delimiter" << dendl;
2651 op_ret = -EINVAL;
2652 return;
2653 }
2654
2655 if (need_container_stats()) {
2656 map<string, RGWBucketEnt> m;
2657 m[s->bucket.name] = RGWBucketEnt();
2658 m.begin()->second.bucket = s->bucket;
2659 op_ret = store->update_containers_stats(m);
2660 if (op_ret > 0) {
2661 bucket = m.begin()->second;
2662 }
2663 }
2664
2665 RGWRados::Bucket target(store, s->bucket_info);
2666 if (shard_id >= 0) {
2667 target.set_shard_id(shard_id);
2668 }
2669 RGWRados::Bucket::List list_op(&target);
2670
2671 list_op.params.prefix = prefix;
2672 list_op.params.delim = delimiter;
2673 list_op.params.marker = marker;
2674 list_op.params.end_marker = end_marker;
2675 list_op.params.list_versions = list_versions;
2676 list_op.params.allow_unordered = allow_unordered;
2677
2678 op_ret = list_op.list_objects(max, &objs, &common_prefixes, &is_truncated);
2679 if (op_ret >= 0) {
2680 next_marker = list_op.get_next_marker();
2681 }
2682 }
2683
2684 int RGWGetBucketLogging::verify_permission()
2685 {
2686 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketLogging);
2687 }
2688
2689 int RGWGetBucketLocation::verify_permission()
2690 {
2691 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketLocation);
2692 }
2693
2694 int RGWCreateBucket::verify_permission()
2695 {
2696 /* This check is mostly needed for S3 that doesn't support account ACL.
2697 * Swift doesn't allow to delegate any permission to an anonymous user,
2698 * so it will become an early exit in such case. */
2699 if (s->auth.identity->is_anonymous()) {
2700 return -EACCES;
2701 }
2702
2703 rgw_bucket bucket;
2704 bucket.name = s->bucket_name;
2705 bucket.tenant = s->bucket_tenant;
2706 ARN arn = ARN(bucket);
2707 if (!verify_user_permission(this, s, arn, rgw::IAM::s3CreateBucket)) {
2708 return -EACCES;
2709 }
2710
2711 if (s->user->user_id.tenant != s->bucket_tenant) {
2712 ldpp_dout(this, 10) << "user cannot create a bucket in a different tenant"
2713 << " (user_id.tenant=" << s->user->user_id.tenant
2714 << " requested=" << s->bucket_tenant << ")"
2715 << dendl;
2716 return -EACCES;
2717 }
2718 if (s->user->max_buckets < 0) {
2719 return -EPERM;
2720 }
2721
2722 if (s->user->max_buckets) {
2723 RGWUserBuckets buckets;
2724 string marker;
2725 bool is_truncated = false;
2726 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
2727 marker, string(), s->user->max_buckets,
2728 false, &is_truncated);
2729 if (op_ret < 0) {
2730 return op_ret;
2731 }
2732
2733 if ((int)buckets.count() >= s->user->max_buckets) {
2734 return -ERR_TOO_MANY_BUCKETS;
2735 }
2736 }
2737
2738 return 0;
2739 }
2740
2741 static int forward_request_to_master(struct req_state *s, obj_version *objv,
2742 RGWRados *store, bufferlist& in_data,
2743 JSONParser *jp, req_info *forward_info)
2744 {
2745 if (!store->svc.zone->get_master_conn()) {
2746 ldpp_dout(s, 0) << "rest connection is invalid" << dendl;
2747 return -EINVAL;
2748 }
2749 ldpp_dout(s, 0) << "sending request to master zonegroup" << dendl;
2750 bufferlist response;
2751 string uid_str = s->user->user_id.to_str();
2752 #define MAX_REST_RESPONSE (128 * 1024) // we expect a very small response
2753 int ret = store->svc.zone->get_master_conn()->forward(uid_str, (forward_info ? *forward_info : s->info),
2754 objv, MAX_REST_RESPONSE, &in_data, &response);
2755 if (ret < 0)
2756 return ret;
2757
2758 ldpp_dout(s, 20) << "response: " << response.c_str() << dendl;
2759 if (jp && !jp->parse(response.c_str(), response.length())) {
2760 ldpp_dout(s, 0) << "failed parsing response from master zonegroup" << dendl;
2761 return -EINVAL;
2762 }
2763
2764 return 0;
2765 }
2766
2767 void RGWCreateBucket::pre_exec()
2768 {
2769 rgw_bucket_object_pre_exec(s);
2770 }
2771
2772 static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs,
2773 map<string, bufferlist>& out_attrs,
2774 map<string, bufferlist>& out_rmattrs)
2775 {
2776 for (const auto& kv : orig_attrs) {
2777 const string& name = kv.first;
2778
2779 /* Check if the attr is user-defined metadata item. */
2780 if (name.compare(0, sizeof(RGW_ATTR_META_PREFIX) - 1,
2781 RGW_ATTR_META_PREFIX) == 0) {
2782 /* For the objects all existing meta attrs have to be removed. */
2783 out_rmattrs[name] = kv.second;
2784 } else if (out_attrs.find(name) == std::end(out_attrs)) {
2785 out_attrs[name] = kv.second;
2786 }
2787 }
2788 }
2789
2790 /* Fuse resource metadata basing on original attributes in @orig_attrs, set
2791 * of _custom_ attribute names to remove in @rmattr_names and attributes in
2792 * @out_attrs. Place results in @out_attrs.
2793 *
2794 * NOTE: it's supposed that all special attrs already present in @out_attrs
2795 * will be preserved without any change. Special attributes are those which
2796 * names start with RGW_ATTR_META_PREFIX. They're complement to custom ones
2797 * used for X-Account-Meta-*, X-Container-Meta-*, X-Amz-Meta and so on. */
2798 static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs,
2799 const set<string>& rmattr_names,
2800 map<string, bufferlist>& out_attrs)
2801 {
2802 for (const auto& kv : orig_attrs) {
2803 const string& name = kv.first;
2804
2805 /* Check if the attr is user-defined metadata item. */
2806 if (name.compare(0, strlen(RGW_ATTR_META_PREFIX),
2807 RGW_ATTR_META_PREFIX) == 0) {
2808 /* For the buckets all existing meta attrs are preserved,
2809 except those that are listed in rmattr_names. */
2810 if (rmattr_names.find(name) != std::end(rmattr_names)) {
2811 const auto aiter = out_attrs.find(name);
2812
2813 if (aiter != std::end(out_attrs)) {
2814 out_attrs.erase(aiter);
2815 }
2816 } else {
2817 /* emplace() won't alter the map if the key is already present.
2818 * This behaviour is fully intensional here. */
2819 out_attrs.emplace(kv);
2820 }
2821 } else if (out_attrs.find(name) == std::end(out_attrs)) {
2822 out_attrs[name] = kv.second;
2823 }
2824 }
2825 }
2826
2827
2828 static void populate_with_generic_attrs(const req_state * const s,
2829 map<string, bufferlist>& out_attrs)
2830 {
2831 for (const auto& kv : s->generic_attrs) {
2832 bufferlist& attrbl = out_attrs[kv.first];
2833 const string& val = kv.second;
2834 attrbl.clear();
2835 attrbl.append(val.c_str(), val.size() + 1);
2836 }
2837 }
2838
2839
2840 static int filter_out_quota_info(std::map<std::string, bufferlist>& add_attrs,
2841 const std::set<std::string>& rmattr_names,
2842 RGWQuotaInfo& quota,
2843 bool * quota_extracted = nullptr)
2844 {
2845 bool extracted = false;
2846
2847 /* Put new limit on max objects. */
2848 auto iter = add_attrs.find(RGW_ATTR_QUOTA_NOBJS);
2849 std::string err;
2850 if (std::end(add_attrs) != iter) {
2851 quota.max_objects =
2852 static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err));
2853 if (!err.empty()) {
2854 return -EINVAL;
2855 }
2856 add_attrs.erase(iter);
2857 extracted = true;
2858 }
2859
2860 /* Put new limit on bucket (container) size. */
2861 iter = add_attrs.find(RGW_ATTR_QUOTA_MSIZE);
2862 if (iter != add_attrs.end()) {
2863 quota.max_size =
2864 static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err));
2865 if (!err.empty()) {
2866 return -EINVAL;
2867 }
2868 add_attrs.erase(iter);
2869 extracted = true;
2870 }
2871
2872 for (const auto& name : rmattr_names) {
2873 /* Remove limit on max objects. */
2874 if (name.compare(RGW_ATTR_QUOTA_NOBJS) == 0) {
2875 quota.max_objects = -1;
2876 extracted = true;
2877 }
2878
2879 /* Remove limit on max bucket size. */
2880 if (name.compare(RGW_ATTR_QUOTA_MSIZE) == 0) {
2881 quota.max_size = -1;
2882 extracted = true;
2883 }
2884 }
2885
2886 /* Swift requries checking on raw usage instead of the 4 KiB rounded one. */
2887 quota.check_on_raw = true;
2888 quota.enabled = quota.max_size > 0 || quota.max_objects > 0;
2889
2890 if (quota_extracted) {
2891 *quota_extracted = extracted;
2892 }
2893
2894 return 0;
2895 }
2896
2897
2898 static void filter_out_website(std::map<std::string, ceph::bufferlist>& add_attrs,
2899 const std::set<std::string>& rmattr_names,
2900 RGWBucketWebsiteConf& ws_conf)
2901 {
2902 std::string lstval;
2903
2904 /* Let's define a mapping between each custom attribute and the memory where
2905 * attribute's value should be stored. The memory location is expressed by
2906 * a non-const reference. */
2907 const auto mapping = {
2908 std::make_pair(RGW_ATTR_WEB_INDEX, std::ref(ws_conf.index_doc_suffix)),
2909 std::make_pair(RGW_ATTR_WEB_ERROR, std::ref(ws_conf.error_doc)),
2910 std::make_pair(RGW_ATTR_WEB_LISTINGS, std::ref(lstval)),
2911 std::make_pair(RGW_ATTR_WEB_LIST_CSS, std::ref(ws_conf.listing_css_doc)),
2912 std::make_pair(RGW_ATTR_SUBDIR_MARKER, std::ref(ws_conf.subdir_marker))
2913 };
2914
2915 for (const auto& kv : mapping) {
2916 const char * const key = kv.first;
2917 auto& target = kv.second;
2918
2919 auto iter = add_attrs.find(key);
2920
2921 if (std::end(add_attrs) != iter) {
2922 /* The "target" is a reference to ws_conf. */
2923 target = iter->second.c_str();
2924 add_attrs.erase(iter);
2925 }
2926
2927 if (rmattr_names.count(key)) {
2928 target = std::string();
2929 }
2930 }
2931
2932 if (! lstval.empty()) {
2933 ws_conf.listing_enabled = boost::algorithm::iequals(lstval, "true");
2934 }
2935 }
2936
2937
2938 void RGWCreateBucket::execute()
2939 {
2940 RGWAccessControlPolicy old_policy(s->cct);
2941 buffer::list aclbl;
2942 buffer::list corsbl;
2943 bool existed;
2944 string bucket_name;
2945 rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_name);
2946 rgw_raw_obj obj(store->svc.zone->get_zone_params().domain_root, bucket_name);
2947 obj_version objv, *pobjv = NULL;
2948
2949 op_ret = get_params();
2950 if (op_ret < 0)
2951 return;
2952
2953 if (!relaxed_region_enforcement &&
2954 !location_constraint.empty() &&
2955 !store->svc.zone->has_zonegroup_api(location_constraint)) {
2956 ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")"
2957 << " can't be found." << dendl;
2958 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
2959 s->err.message = "The specified location-constraint is not valid";
2960 return;
2961 }
2962
2963 if (!relaxed_region_enforcement && !store->svc.zone->get_zonegroup().is_master_zonegroup() && !location_constraint.empty() &&
2964 store->svc.zone->get_zonegroup().api_name != location_constraint) {
2965 ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")"
2966 << " doesn't match zonegroup" << " (" << store->svc.zone->get_zonegroup().api_name << ")"
2967 << dendl;
2968 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
2969 s->err.message = "The specified location-constraint is not valid";
2970 return;
2971 }
2972
2973 const auto& zonegroup = store->svc.zone->get_zonegroup();
2974 if (!placement_rule.name.empty() &&
2975 !zonegroup.placement_targets.count(placement_rule.name)) {
2976 ldpp_dout(this, 0) << "placement target (" << placement_rule.name << ")"
2977 << " doesn't exist in the placement targets of zonegroup"
2978 << " (" << store->svc.zone->get_zonegroup().api_name << ")" << dendl;
2979 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
2980 s->err.message = "The specified placement target does not exist";
2981 return;
2982 }
2983
2984 /* we need to make sure we read bucket info, it's not read before for this
2985 * specific request */
2986 op_ret = store->get_bucket_info(*s->sysobj_ctx, s->bucket_tenant, s->bucket_name,
2987 s->bucket_info, nullptr, &s->bucket_attrs);
2988 if (op_ret < 0 && op_ret != -ENOENT)
2989 return;
2990 s->bucket_exists = (op_ret != -ENOENT);
2991
2992 s->bucket_owner.set_id(s->user->user_id);
2993 s->bucket_owner.set_name(s->user->display_name);
2994 if (s->bucket_exists) {
2995 int r = rgw_op_get_bucket_policy_from_attr(s->cct, store, s->bucket_info,
2996 s->bucket_attrs, &old_policy);
2997 if (r >= 0) {
2998 if (old_policy.get_owner().get_id().compare(s->user->user_id) != 0) {
2999 op_ret = -EEXIST;
3000 return;
3001 }
3002 }
3003 }
3004
3005 RGWBucketInfo master_info;
3006 rgw_bucket *pmaster_bucket;
3007 uint32_t *pmaster_num_shards;
3008 real_time creation_time;
3009
3010 if (!store->svc.zone->is_meta_master()) {
3011 JSONParser jp;
3012 op_ret = forward_request_to_master(s, NULL, store, in_data, &jp);
3013 if (op_ret < 0) {
3014 return;
3015 }
3016
3017 JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp);
3018 JSONDecoder::decode_json("object_ver", objv, &jp);
3019 JSONDecoder::decode_json("bucket_info", master_info, &jp);
3020 ldpp_dout(this, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" << objv.ver << dendl;
3021 ldpp_dout(this, 20) << "got creation time: << " << master_info.creation_time << dendl;
3022 pmaster_bucket= &master_info.bucket;
3023 creation_time = master_info.creation_time;
3024 pmaster_num_shards = &master_info.num_shards;
3025 pobjv = &objv;
3026 } else {
3027 pmaster_bucket = NULL;
3028 pmaster_num_shards = NULL;
3029 }
3030
3031 string zonegroup_id;
3032
3033 if (s->system_request) {
3034 zonegroup_id = s->info.args.get(RGW_SYS_PARAM_PREFIX "zonegroup");
3035 if (zonegroup_id.empty()) {
3036 zonegroup_id = store->svc.zone->get_zonegroup().get_id();
3037 }
3038 } else {
3039 zonegroup_id = store->svc.zone->get_zonegroup().get_id();
3040 }
3041
3042 if (s->bucket_exists) {
3043 rgw_placement_rule selected_placement_rule;
3044 rgw_bucket bucket;
3045 bucket.tenant = s->bucket_tenant;
3046 bucket.name = s->bucket_name;
3047 op_ret = store->svc.zone->select_bucket_placement(*(s->user), zonegroup_id,
3048 placement_rule,
3049 &selected_placement_rule, nullptr);
3050 if (selected_placement_rule != s->bucket_info.placement_rule) {
3051 op_ret = -EEXIST;
3052 return;
3053 }
3054 }
3055
3056 /* Encode special metadata first as we're using std::map::emplace under
3057 * the hood. This method will add the new items only if the map doesn't
3058 * contain such keys yet. */
3059 policy.encode(aclbl);
3060 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
3061
3062 if (has_cors) {
3063 cors_config.encode(corsbl);
3064 emplace_attr(RGW_ATTR_CORS, std::move(corsbl));
3065 }
3066
3067 RGWQuotaInfo quota_info;
3068 const RGWQuotaInfo * pquota_info = nullptr;
3069 if (need_metadata_upload()) {
3070 /* It's supposed that following functions WILL NOT change any special
3071 * attributes (like RGW_ATTR_ACL) if they are already present in attrs. */
3072 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
3073 if (op_ret < 0) {
3074 return;
3075 }
3076 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
3077 populate_with_generic_attrs(s, attrs);
3078
3079 op_ret = filter_out_quota_info(attrs, rmattr_names, quota_info);
3080 if (op_ret < 0) {
3081 return;
3082 } else {
3083 pquota_info = &quota_info;
3084 }
3085
3086 /* Web site of Swift API. */
3087 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
3088 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
3089 }
3090
3091 s->bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */
3092 s->bucket.name = s->bucket_name;
3093
3094 /* Handle updates of the metadata for Swift's object versioning. */
3095 if (swift_ver_location) {
3096 s->bucket_info.swift_ver_location = *swift_ver_location;
3097 s->bucket_info.swift_versioning = (! swift_ver_location->empty());
3098 }
3099
3100 op_ret = store->create_bucket(*(s->user), s->bucket, zonegroup_id,
3101 placement_rule, s->bucket_info.swift_ver_location,
3102 pquota_info, attrs,
3103 info, pobjv, &ep_objv, creation_time,
3104 pmaster_bucket, pmaster_num_shards, true);
3105 /* continue if EEXIST and create_bucket will fail below. this way we can
3106 * recover from a partial create by retrying it. */
3107 ldpp_dout(this, 20) << "rgw_create_bucket returned ret=" << op_ret << " bucket=" << s->bucket << dendl;
3108
3109 if (op_ret && op_ret != -EEXIST)
3110 return;
3111
3112 existed = (op_ret == -EEXIST);
3113
3114 if (existed) {
3115 /* bucket already existed, might have raced with another bucket creation, or
3116 * might be partial bucket creation that never completed. Read existing bucket
3117 * info, verify that the reported bucket owner is the current user.
3118 * If all is ok then update the user's list of buckets.
3119 * Otherwise inform client about a name conflict.
3120 */
3121 if (info.owner.compare(s->user->user_id) != 0) {
3122 op_ret = -EEXIST;
3123 return;
3124 }
3125 s->bucket = info.bucket;
3126 }
3127
3128 op_ret = rgw_link_bucket(store, s->user->user_id, s->bucket,
3129 info.creation_time, false);
3130 if (op_ret && !existed && op_ret != -EEXIST) {
3131 /* if it exists (or previously existed), don't remove it! */
3132 op_ret = rgw_unlink_bucket(store, s->user->user_id, s->bucket.tenant,
3133 s->bucket.name);
3134 if (op_ret < 0) {
3135 ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
3136 << dendl;
3137 }
3138 } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) {
3139 op_ret = -ERR_BUCKET_EXISTS;
3140 }
3141
3142 if (need_metadata_upload() && existed) {
3143 /* OK, it looks we lost race with another request. As it's required to
3144 * handle metadata fusion and upload, the whole operation becomes very
3145 * similar in nature to PutMetadataBucket. However, as the attrs may
3146 * changed in the meantime, we have to refresh. */
3147 short tries = 0;
3148 do {
3149 RGWBucketInfo binfo;
3150 map<string, bufferlist> battrs;
3151
3152 op_ret = store->get_bucket_info(*s->sysobj_ctx, s->bucket_tenant, s->bucket_name,
3153 binfo, nullptr, &battrs);
3154 if (op_ret < 0) {
3155 return;
3156 } else if (binfo.owner.compare(s->user->user_id) != 0) {
3157 /* New bucket doesn't belong to the account we're operating on. */
3158 op_ret = -EEXIST;
3159 return;
3160 } else {
3161 s->bucket_info = binfo;
3162 s->bucket_attrs = battrs;
3163 }
3164
3165 attrs.clear();
3166
3167 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
3168 if (op_ret < 0) {
3169 return;
3170 }
3171 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
3172 populate_with_generic_attrs(s, attrs);
3173 op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket_info.quota);
3174 if (op_ret < 0) {
3175 return;
3176 }
3177
3178 /* Handle updates of the metadata for Swift's object versioning. */
3179 if (swift_ver_location) {
3180 s->bucket_info.swift_ver_location = *swift_ver_location;
3181 s->bucket_info.swift_versioning = (! swift_ver_location->empty());
3182 }
3183
3184 /* Web site of Swift API. */
3185 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
3186 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
3187
3188 /* This will also set the quota on the bucket. */
3189 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
3190 &s->bucket_info.objv_tracker);
3191 } while (op_ret == -ECANCELED && tries++ < 20);
3192
3193 /* Restore the proper return code. */
3194 if (op_ret >= 0) {
3195 op_ret = -ERR_BUCKET_EXISTS;
3196 }
3197 }
3198 }
3199
3200 int RGWDeleteBucket::verify_permission()
3201 {
3202 if (!verify_bucket_permission(this, s, rgw::IAM::s3DeleteBucket)) {
3203 return -EACCES;
3204 }
3205
3206 return 0;
3207 }
3208
3209 void RGWDeleteBucket::pre_exec()
3210 {
3211 rgw_bucket_object_pre_exec(s);
3212 }
3213
3214 void RGWDeleteBucket::execute()
3215 {
3216 if (s->bucket_name.empty()) {
3217 op_ret = -EINVAL;
3218 return;
3219 }
3220
3221 if (!s->bucket_exists) {
3222 ldpp_dout(this, 0) << "ERROR: bucket " << s->bucket_name << " not found" << dendl;
3223 op_ret = -ERR_NO_SUCH_BUCKET;
3224 return;
3225 }
3226 RGWObjVersionTracker ot;
3227 ot.read_version = s->bucket_info.ep_objv;
3228
3229 if (s->system_request) {
3230 string tag = s->info.args.get(RGW_SYS_PARAM_PREFIX "tag");
3231 string ver_str = s->info.args.get(RGW_SYS_PARAM_PREFIX "ver");
3232 if (!tag.empty()) {
3233 ot.read_version.tag = tag;
3234 uint64_t ver;
3235 string err;
3236 ver = strict_strtol(ver_str.c_str(), 10, &err);
3237 if (!err.empty()) {
3238 ldpp_dout(this, 0) << "failed to parse ver param" << dendl;
3239 op_ret = -EINVAL;
3240 return;
3241 }
3242 ot.read_version.ver = ver;
3243 }
3244 }
3245
3246 op_ret = rgw_bucket_sync_user_stats(store, s->user->user_id, s->bucket_info);
3247 if ( op_ret < 0) {
3248 ldpp_dout(this, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl;
3249 }
3250
3251 op_ret = store->check_bucket_empty(s->bucket_info);
3252 if (op_ret < 0) {
3253 return;
3254 }
3255
3256 if (!store->svc.zone->is_meta_master()) {
3257 bufferlist in_data;
3258 op_ret = forward_request_to_master(s, &ot.read_version, store, in_data,
3259 NULL);
3260 if (op_ret < 0) {
3261 if (op_ret == -ENOENT) {
3262 /* adjust error, we want to return with NoSuchBucket and not
3263 * NoSuchKey */
3264 op_ret = -ERR_NO_SUCH_BUCKET;
3265 }
3266 return;
3267 }
3268 }
3269
3270 string prefix, delimiter;
3271
3272 if (s->prot_flags & RGW_REST_SWIFT) {
3273 string path_args;
3274 path_args = s->info.args.get("path");
3275 if (!path_args.empty()) {
3276 if (!delimiter.empty() || !prefix.empty()) {
3277 op_ret = -EINVAL;
3278 return;
3279 }
3280 prefix = path_args;
3281 delimiter="/";
3282 }
3283 }
3284
3285 op_ret = abort_bucket_multiparts(store, s->cct, s->bucket_info, prefix, delimiter);
3286
3287 if (op_ret < 0) {
3288 return;
3289 }
3290
3291 op_ret = store->delete_bucket(s->bucket_info, ot, false);
3292
3293 if (op_ret == -ECANCELED) {
3294 // lost a race, either with mdlog sync or another delete bucket operation.
3295 // in either case, we've already called rgw_unlink_bucket()
3296 op_ret = 0;
3297 return;
3298 }
3299
3300 if (op_ret == 0) {
3301 op_ret = rgw_unlink_bucket(store, s->bucket_info.owner, s->bucket.tenant,
3302 s->bucket.name, false);
3303 if (op_ret < 0) {
3304 ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
3305 << dendl;
3306 }
3307 }
3308 }
3309
3310 int RGWPutObj::verify_permission()
3311 {
3312 if (! copy_source.empty()) {
3313
3314 RGWAccessControlPolicy cs_acl(s->cct);
3315 boost::optional<Policy> policy;
3316 map<string, bufferlist> cs_attrs;
3317 rgw_bucket cs_bucket(copy_source_bucket_info.bucket);
3318 rgw_obj_key cs_object(copy_source_object_name, copy_source_version_id);
3319
3320 rgw_obj obj(cs_bucket, cs_object);
3321 store->set_atomic(s->obj_ctx, obj);
3322 store->set_prefetch_data(s->obj_ctx, obj);
3323
3324 /* check source object permissions */
3325 if (read_obj_policy(store, s, copy_source_bucket_info, cs_attrs, &cs_acl, nullptr,
3326 policy, cs_bucket, cs_object) < 0) {
3327 return -EACCES;
3328 }
3329
3330 /* admin request overrides permission checks */
3331 if (! s->auth.identity->is_admin_of(cs_acl.get_owner().get_id())) {
3332 if (policy || ! s->iam_user_policies.empty()) {
3333 auto usr_policy_res = Effect::Pass;
3334 for (auto& user_policy : s->iam_user_policies) {
3335 if (usr_policy_res = user_policy.eval(s->env, *s->auth.identity,
3336 cs_object.instance.empty() ?
3337 rgw::IAM::s3GetObject :
3338 rgw::IAM::s3GetObjectVersion,
3339 rgw::IAM::ARN(obj)); usr_policy_res == Effect::Deny)
3340 return -EACCES;
3341 else if (usr_policy_res == Effect::Allow)
3342 break;
3343 }
3344 rgw::IAM::Effect e = Effect::Pass;
3345 if (policy) {
3346 e = policy->eval(s->env, *s->auth.identity,
3347 cs_object.instance.empty() ?
3348 rgw::IAM::s3GetObject :
3349 rgw::IAM::s3GetObjectVersion,
3350 rgw::IAM::ARN(obj));
3351 }
3352 if (e == Effect::Deny) {
3353 return -EACCES;
3354 } else if (usr_policy_res == Effect::Pass && e == Effect::Pass &&
3355 !cs_acl.verify_permission(this, *s->auth.identity, s->perm_mask,
3356 RGW_PERM_READ)) {
3357 return -EACCES;
3358 }
3359 } else if (!cs_acl.verify_permission(this, *s->auth.identity, s->perm_mask,
3360 RGW_PERM_READ)) {
3361 return -EACCES;
3362 }
3363 }
3364 }
3365
3366 auto op_ret = get_params();
3367 if (op_ret < 0) {
3368 ldpp_dout(this, 20) << "get_params() returned ret=" << op_ret << dendl;
3369 return op_ret;
3370 }
3371
3372 if (s->iam_policy || ! s->iam_user_policies.empty()) {
3373 rgw_add_grant_to_iam_environment(s->env, s);
3374
3375 rgw_add_to_iam_environment(s->env, "s3:x-amz-acl", s->canned_acl);
3376
3377 if (obj_tags != nullptr && obj_tags->count() > 0){
3378 auto tags = obj_tags->get_tags();
3379 for (const auto& kv: tags){
3380 rgw_add_to_iam_environment(s->env, "s3:RequestObjectTag/"+kv.first, kv.second);
3381 }
3382 }
3383
3384 constexpr auto encrypt_attr = "x-amz-server-side-encryption";
3385 constexpr auto s3_encrypt_attr = "s3:x-amz-server-side-encryption";
3386 auto enc_header = s->info.x_meta_map.find(encrypt_attr);
3387 if (enc_header != s->info.x_meta_map.end()){
3388 rgw_add_to_iam_environment(s->env, s3_encrypt_attr, enc_header->second);
3389 }
3390
3391 constexpr auto kms_attr = "x-amz-server-side-encryption-aws-kms-key-id";
3392 constexpr auto s3_kms_attr = "s3:x-amz-server-side-encryption-aws-kms-key-id";
3393 auto kms_header = s->info.x_meta_map.find(kms_attr);
3394 if (kms_header != s->info.x_meta_map.end()){
3395 rgw_add_to_iam_environment(s->env, s3_kms_attr, kms_header->second);
3396 }
3397
3398 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
3399 boost::none,
3400 rgw::IAM::s3PutObject,
3401 rgw_obj(s->bucket, s->object));
3402 if (usr_policy_res == Effect::Deny)
3403 return -EACCES;
3404
3405 rgw::IAM::Effect e = Effect::Pass;
3406 if (s->iam_policy) {
3407 e = s->iam_policy->eval(s->env, *s->auth.identity,
3408 rgw::IAM::s3PutObject,
3409 rgw_obj(s->bucket, s->object));
3410 }
3411 if (e == Effect::Allow) {
3412 return 0;
3413 } else if (e == Effect::Deny) {
3414 return -EACCES;
3415 } else if (usr_policy_res == Effect::Allow) {
3416 return 0;
3417 }
3418 }
3419
3420 if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
3421 return -EACCES;
3422 }
3423
3424 return 0;
3425 }
3426
3427
3428 void RGWPutObj::pre_exec()
3429 {
3430 rgw_bucket_object_pre_exec(s);
3431 }
3432
3433 class RGWPutObj_CB : public RGWGetObj_Filter
3434 {
3435 RGWPutObj *op;
3436 public:
3437 explicit RGWPutObj_CB(RGWPutObj *_op) : op(_op) {}
3438 ~RGWPutObj_CB() override {}
3439
3440 int handle_data(bufferlist& bl, off_t bl_ofs, off_t bl_len) override {
3441 return op->get_data_cb(bl, bl_ofs, bl_len);
3442 }
3443 };
3444
3445 int RGWPutObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
3446 {
3447 bufferlist bl_tmp;
3448 bl.copy(bl_ofs, bl_len, bl_tmp);
3449
3450 bl_aux.append(bl_tmp);
3451
3452 return bl_len;
3453 }
3454
3455 int RGWPutObj::get_data(const off_t fst, const off_t lst, bufferlist& bl)
3456 {
3457 RGWPutObj_CB cb(this);
3458 RGWGetObj_Filter* filter = &cb;
3459 boost::optional<RGWGetObj_Decompress> decompress;
3460 std::unique_ptr<RGWGetObj_Filter> decrypt;
3461 RGWCompressionInfo cs_info;
3462 map<string, bufferlist> attrs;
3463 map<string, bufferlist>::iterator attr_iter;
3464 int ret = 0;
3465
3466 uint64_t obj_size;
3467 int64_t new_ofs, new_end;
3468
3469 new_ofs = fst;
3470 new_end = lst;
3471
3472 rgw_obj_key obj_key(copy_source_object_name, copy_source_version_id);
3473 rgw_obj obj(copy_source_bucket_info.bucket, obj_key);
3474
3475 RGWRados::Object op_target(store, copy_source_bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
3476 RGWRados::Object::Read read_op(&op_target);
3477 read_op.params.obj_size = &obj_size;
3478 read_op.params.attrs = &attrs;
3479
3480 ret = read_op.prepare();
3481 if (ret < 0)
3482 return ret;
3483
3484 bool need_decompress;
3485 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
3486 if (op_ret < 0) {
3487 ldpp_dout(s, 0) << "ERROR: failed to decode compression info" << dendl;
3488 return -EIO;
3489 }
3490
3491 bool partial_content = true;
3492 if (need_decompress)
3493 {
3494 obj_size = cs_info.orig_size;
3495 decompress.emplace(s->cct, &cs_info, partial_content, filter);
3496 filter = &*decompress;
3497 }
3498
3499 attr_iter = attrs.find(RGW_ATTR_MANIFEST);
3500 op_ret = this->get_decrypt_filter(&decrypt,
3501 filter,
3502 attrs,
3503 attr_iter != attrs.end() ? &(attr_iter->second) : nullptr);
3504 if (decrypt != nullptr) {
3505 filter = decrypt.get();
3506 }
3507 if (op_ret < 0) {
3508 return ret;
3509 }
3510
3511 ret = read_op.range_to_ofs(obj_size, new_ofs, new_end);
3512 if (ret < 0)
3513 return ret;
3514
3515 filter->fixup_range(new_ofs, new_end);
3516 ret = read_op.iterate(new_ofs, new_end, filter);
3517
3518 if (ret >= 0)
3519 ret = filter->flush();
3520
3521 bl.claim_append(bl_aux);
3522
3523 return ret;
3524 }
3525
3526 // special handling for compression type = "random" with multipart uploads
3527 static CompressorRef get_compressor_plugin(const req_state *s,
3528 const std::string& compression_type)
3529 {
3530 if (compression_type != "random") {
3531 return Compressor::create(s->cct, compression_type);
3532 }
3533
3534 bool is_multipart{false};
3535 const auto& upload_id = s->info.args.get("uploadId", &is_multipart);
3536
3537 if (!is_multipart) {
3538 return Compressor::create(s->cct, compression_type);
3539 }
3540
3541 // use a hash of the multipart upload id so all parts use the same plugin
3542 const auto alg = std::hash<std::string>{}(upload_id) % Compressor::COMP_ALG_LAST;
3543 if (alg == Compressor::COMP_ALG_NONE) {
3544 return nullptr;
3545 }
3546 return Compressor::create(s->cct, alg);
3547 }
3548
3549 void RGWPutObj::execute()
3550 {
3551 char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1];
3552 char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3553 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3554 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
3555 MD5 hash;
3556 bufferlist bl, aclbl, bs;
3557 int len;
3558
3559 off_t fst;
3560 off_t lst;
3561
3562 bool need_calc_md5 = (dlo_manifest == NULL) && (slo_info == NULL);
3563 perfcounter->inc(l_rgw_put);
3564 // report latency on return
3565 auto put_lat = make_scope_guard([&] {
3566 perfcounter->tinc(l_rgw_put_lat, s->time_elapsed());
3567 });
3568
3569 op_ret = -EINVAL;
3570 if (s->object.empty()) {
3571 return;
3572 }
3573
3574 if (!s->bucket_exists) {
3575 op_ret = -ERR_NO_SUCH_BUCKET;
3576 return;
3577 }
3578
3579
3580 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
3581 if (op_ret < 0) {
3582 ldpp_dout(this, 20) << "get_system_versioning_params() returned ret="
3583 << op_ret << dendl;
3584 return;
3585 }
3586
3587 if (supplied_md5_b64) {
3588 need_calc_md5 = true;
3589
3590 ldpp_dout(this, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
3591 op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1],
3592 supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64));
3593 ldpp_dout(this, 15) << "ceph_armor ret=" << op_ret << dendl;
3594 if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) {
3595 op_ret = -ERR_INVALID_DIGEST;
3596 return;
3597 }
3598
3599 buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5);
3600 ldpp_dout(this, 15) << "supplied_md5=" << supplied_md5 << dendl;
3601 }
3602
3603 if (!chunked_upload) { /* with chunked upload we don't know how big is the upload.
3604 we also check sizes at the end anyway */
3605 op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
3606 user_quota, bucket_quota, s->content_length);
3607 if (op_ret < 0) {
3608 ldpp_dout(this, 20) << "check_quota() returned ret=" << op_ret << dendl;
3609 return;
3610 }
3611 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3612 if (op_ret < 0) {
3613 ldpp_dout(this, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl;
3614 return;
3615 }
3616 }
3617
3618 if (supplied_etag) {
3619 strncpy(supplied_md5, supplied_etag, sizeof(supplied_md5) - 1);
3620 supplied_md5[sizeof(supplied_md5) - 1] = '\0';
3621 }
3622
3623 const bool multipart = !multipart_upload_id.empty();
3624 auto& obj_ctx = *static_cast<RGWObjectCtx*>(s->obj_ctx);
3625 rgw_obj obj{s->bucket, s->object};
3626
3627 /* Handle object versioning of Swift API. */
3628 if (! multipart) {
3629 op_ret = store->swift_versioning_copy(obj_ctx,
3630 s->bucket_owner.get_id(),
3631 s->bucket_info,
3632 obj);
3633 if (op_ret < 0) {
3634 return;
3635 }
3636 }
3637
3638 // create the object processor
3639 rgw::AioThrottle aio(store->ctx()->_conf->rgw_put_obj_min_window_size);
3640 using namespace rgw::putobj;
3641 constexpr auto max_processor_size = std::max({sizeof(MultipartObjectProcessor),
3642 sizeof(AtomicObjectProcessor),
3643 sizeof(AppendObjectProcessor)});
3644 ceph::static_ptr<ObjectProcessor, max_processor_size> processor;
3645
3646 rgw_placement_rule *pdest_placement;
3647
3648 if (multipart) {
3649 RGWMPObj mp(s->object.name, multipart_upload_id);
3650
3651 multipart_upload_info upload_info;
3652 op_ret = get_multipart_info(store, s, mp.get_meta(), nullptr, nullptr, &upload_info);
3653 if (op_ret < 0) {
3654 if (op_ret != -ENOENT) {
3655 ldpp_dout(this, 0) << "ERROR: get_multipart_info returned " << op_ret << ": " << cpp_strerror(-op_ret) << dendl;
3656 } else {// -ENOENT: raced with upload complete/cancel, no need to spam log
3657 ldpp_dout(this, 20) << "failed to get multipart info (returned " << op_ret << ": " << cpp_strerror(-op_ret) << "): probably raced with upload complete / cancel" << dendl;
3658 }
3659 return;
3660 }
3661 pdest_placement = &upload_info.dest_placement;
3662 ldpp_dout(this, 20) << "dest_placement for part=" << upload_info.dest_placement << dendl;
3663 processor.emplace<MultipartObjectProcessor>(
3664 &aio, store, s->bucket_info, pdest_placement,
3665 s->owner.get_id(), obj_ctx, obj,
3666 multipart_upload_id, multipart_part_num, multipart_part_str);
3667 } else if(append) {
3668 if (s->bucket_info.versioned()) {
3669 op_ret = -ERR_INVALID_BUCKET_STATE;
3670 return;
3671 }
3672 pdest_placement = &s->dest_placement;
3673 processor.emplace<AppendObjectProcessor>(
3674 &aio, store, s->bucket_info, pdest_placement, s->bucket_owner.get_id(),obj_ctx, obj,
3675 s->req_id, position, &cur_accounted_size);
3676 } else {
3677 if (s->bucket_info.versioning_enabled()) {
3678 if (!version_id.empty()) {
3679 obj.key.set_instance(version_id);
3680 } else {
3681 store->gen_rand_obj_instance_name(&obj);
3682 version_id = obj.key.instance;
3683 }
3684 }
3685 pdest_placement = &s->dest_placement;
3686 processor.emplace<AtomicObjectProcessor>(
3687 &aio, store, s->bucket_info, pdest_placement,
3688 s->bucket_owner.get_id(), obj_ctx, obj, olh_epoch, s->req_id);
3689 }
3690
3691 op_ret = processor->prepare();
3692 if (op_ret < 0) {
3693 ldpp_dout(this, 20) << "processor->prepare() returned ret=" << op_ret
3694 << dendl;
3695 return;
3696 }
3697
3698 if ((! copy_source.empty()) && !copy_source_range) {
3699 rgw_obj_key obj_key(copy_source_object_name, copy_source_version_id);
3700 rgw_obj obj(copy_source_bucket_info.bucket, obj_key.name);
3701
3702 RGWObjState *astate;
3703 op_ret = store->get_obj_state(&obj_ctx, copy_source_bucket_info, obj,
3704 &astate, true, false);
3705 if (op_ret < 0) {
3706 ldpp_dout(this, 0) << "ERROR: get copy source obj state returned with error" << op_ret << dendl;
3707 return;
3708 }
3709 if (!astate->exists){
3710 op_ret = -ENOENT;
3711 return;
3712 }
3713 lst = astate->accounted_size - 1;
3714 } else {
3715 lst = copy_source_range_lst;
3716 }
3717
3718 fst = copy_source_range_fst;
3719
3720 // no filters by default
3721 DataProcessor *filter = processor.get();
3722
3723 const auto& compression_type = store->svc.zone->get_zone_params().get_compression_type(*pdest_placement);
3724 CompressorRef plugin;
3725 boost::optional<RGWPutObj_Compress> compressor;
3726
3727 std::unique_ptr<DataProcessor> encrypt;
3728 op_ret = get_encrypt_filter(&encrypt, filter);
3729 if (op_ret < 0) {
3730 return;
3731 }
3732 if (encrypt != nullptr) {
3733 filter = &*encrypt;
3734 } else if (compression_type != "none") {
3735 plugin = get_compressor_plugin(s, compression_type);
3736 if (!plugin) {
3737 ldpp_dout(this, 1) << "Cannot load plugin for compression type "
3738 << compression_type << dendl;
3739 } else {
3740 compressor.emplace(s->cct, plugin, filter);
3741 filter = &*compressor;
3742 }
3743 }
3744 tracepoint(rgw_op, before_data_transfer, s->req_id.c_str());
3745 do {
3746 bufferlist data;
3747 if (fst > lst)
3748 break;
3749 if (copy_source.empty()) {
3750 len = get_data(data);
3751 } else {
3752 uint64_t cur_lst = min(fst + s->cct->_conf->rgw_max_chunk_size - 1, lst);
3753 op_ret = get_data(fst, cur_lst, data);
3754 if (op_ret < 0)
3755 return;
3756 len = data.length();
3757 s->content_length += len;
3758 fst += len;
3759 }
3760 if (len < 0) {
3761 op_ret = len;
3762 ldpp_dout(this, 20) << "get_data() returned ret=" << op_ret << dendl;
3763 return;
3764 } else if (len == 0) {
3765 break;
3766 }
3767
3768 if (need_calc_md5) {
3769 hash.Update((const unsigned char *)data.c_str(), data.length());
3770 }
3771
3772 /* update torrrent */
3773 torrent.update(data);
3774
3775 op_ret = filter->process(std::move(data), ofs);
3776 if (op_ret < 0) {
3777 ldpp_dout(this, 20) << "processor->process() returned ret="
3778 << op_ret << dendl;
3779 return;
3780 }
3781
3782 ofs += len;
3783 } while (len > 0);
3784 tracepoint(rgw_op, after_data_transfer, s->req_id.c_str(), ofs);
3785
3786 // flush any data in filters
3787 op_ret = filter->process({}, ofs);
3788 if (op_ret < 0) {
3789 return;
3790 }
3791
3792 if (!chunked_upload && ofs != s->content_length) {
3793 op_ret = -ERR_REQUEST_TIMEOUT;
3794 return;
3795 }
3796 s->obj_size = ofs;
3797
3798 perfcounter->inc(l_rgw_put_b, s->obj_size);
3799
3800 op_ret = do_aws4_auth_completion();
3801 if (op_ret < 0) {
3802 return;
3803 }
3804
3805 op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
3806 user_quota, bucket_quota, s->obj_size);
3807 if (op_ret < 0) {
3808 ldpp_dout(this, 20) << "second check_quota() returned op_ret=" << op_ret << dendl;
3809 return;
3810 }
3811
3812 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3813 if (op_ret < 0) {
3814 ldpp_dout(this, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl;
3815 return;
3816 }
3817
3818 hash.Final(m);
3819
3820 if (compressor && compressor->is_compressed()) {
3821 bufferlist tmp;
3822 RGWCompressionInfo cs_info;
3823 cs_info.compression_type = plugin->get_type_name();
3824 cs_info.orig_size = s->obj_size;
3825 cs_info.blocks = move(compressor->get_compression_blocks());
3826 encode(cs_info, tmp);
3827 attrs[RGW_ATTR_COMPRESSION] = tmp;
3828 ldpp_dout(this, 20) << "storing " << RGW_ATTR_COMPRESSION
3829 << " with type=" << cs_info.compression_type
3830 << ", orig_size=" << cs_info.orig_size
3831 << ", blocks=" << cs_info.blocks.size() << dendl;
3832 }
3833
3834 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
3835
3836 etag = calc_md5;
3837
3838 if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) {
3839 op_ret = -ERR_BAD_DIGEST;
3840 return;
3841 }
3842
3843 policy.encode(aclbl);
3844 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
3845
3846 if (dlo_manifest) {
3847 op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
3848 if (op_ret < 0) {
3849 ldpp_dout(this, 0) << "bad user manifest: " << dlo_manifest << dendl;
3850 return;
3851 }
3852 }
3853
3854 if (slo_info) {
3855 bufferlist manifest_bl;
3856 encode(*slo_info, manifest_bl);
3857 emplace_attr(RGW_ATTR_SLO_MANIFEST, std::move(manifest_bl));
3858 }
3859
3860 if (supplied_etag && etag.compare(supplied_etag) != 0) {
3861 op_ret = -ERR_UNPROCESSABLE_ENTITY;
3862 return;
3863 }
3864 bl.append(etag.c_str(), etag.size());
3865 emplace_attr(RGW_ATTR_ETAG, std::move(bl));
3866
3867 populate_with_generic_attrs(s, attrs);
3868 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
3869 if (op_ret < 0) {
3870 return;
3871 }
3872 encode_delete_at_attr(delete_at, attrs);
3873 encode_obj_tags_attr(obj_tags.get(), attrs);
3874
3875 /* Add a custom metadata to expose the information whether an object
3876 * is an SLO or not. Appending the attribute must be performed AFTER
3877 * processing any input from user in order to prohibit overwriting. */
3878 if (slo_info) {
3879 bufferlist slo_userindicator_bl;
3880 slo_userindicator_bl.append("True", 4);
3881 emplace_attr(RGW_ATTR_SLO_UINDICATOR, std::move(slo_userindicator_bl));
3882 }
3883
3884 tracepoint(rgw_op, processor_complete_enter, s->req_id.c_str());
3885 op_ret = processor->complete(s->obj_size, etag, &mtime, real_time(), attrs,
3886 (delete_at ? *delete_at : real_time()), if_match, if_nomatch,
3887 (user_data.empty() ? nullptr : &user_data), nullptr, nullptr);
3888 tracepoint(rgw_op, processor_complete_exit, s->req_id.c_str());
3889
3890 /* produce torrent */
3891 if (s->cct->_conf->rgw_torrent_flag && (ofs == torrent.get_data_len()))
3892 {
3893 torrent.init(s, store);
3894 torrent.set_create_date(mtime);
3895 op_ret = torrent.complete();
3896 if (0 != op_ret)
3897 {
3898 ldpp_dout(this, 0) << "ERROR: torrent.handle_data() returned " << op_ret << dendl;
3899 return;
3900 }
3901 }
3902 }
3903
3904 int RGWPostObj::verify_permission()
3905 {
3906 return 0;
3907 }
3908
3909 void RGWPostObj::pre_exec()
3910 {
3911 rgw_bucket_object_pre_exec(s);
3912 }
3913
3914 void RGWPostObj::execute()
3915 {
3916 boost::optional<RGWPutObj_Compress> compressor;
3917 CompressorRef plugin;
3918 char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3919
3920 /* Read in the data from the POST form. */
3921 op_ret = get_params();
3922 if (op_ret < 0) {
3923 return;
3924 }
3925
3926 op_ret = verify_params();
3927 if (op_ret < 0) {
3928 return;
3929 }
3930
3931 if (s->iam_policy || ! s->iam_user_policies.empty()) {
3932 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
3933 boost::none,
3934 rgw::IAM::s3PutObject,
3935 rgw_obj(s->bucket, s->object));
3936 if (usr_policy_res == Effect::Deny) {
3937 op_ret = -EACCES;
3938 return;
3939 }
3940
3941 rgw::IAM::Effect e = Effect::Pass;
3942 if (s->iam_policy) {
3943 e = s->iam_policy->eval(s->env, *s->auth.identity,
3944 rgw::IAM::s3PutObject,
3945 rgw_obj(s->bucket, s->object));
3946 }
3947 if (e == Effect::Deny) {
3948 op_ret = -EACCES;
3949 return;
3950 } else if (usr_policy_res == Effect::Pass && e == Effect::Pass && !verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
3951 op_ret = -EACCES;
3952 return;
3953 }
3954 } else if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
3955 op_ret = -EACCES;
3956 return;
3957 }
3958
3959 /* Start iteration over data fields. It's necessary as Swift's FormPost
3960 * is capable to handle multiple files in single form. */
3961 do {
3962 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3963 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
3964 MD5 hash;
3965 ceph::buffer::list bl, aclbl;
3966 int len = 0;
3967
3968 op_ret = store->check_quota(s->bucket_owner.get_id(),
3969 s->bucket,
3970 user_quota,
3971 bucket_quota,
3972 s->content_length);
3973 if (op_ret < 0) {
3974 return;
3975 }
3976
3977 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3978 if (op_ret < 0) {
3979 return;
3980 }
3981
3982 if (supplied_md5_b64) {
3983 char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1];
3984 ldpp_dout(this, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
3985 op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1],
3986 supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64));
3987 ldpp_dout(this, 15) << "ceph_armor ret=" << op_ret << dendl;
3988 if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) {
3989 op_ret = -ERR_INVALID_DIGEST;
3990 return;
3991 }
3992
3993 buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5);
3994 ldpp_dout(this, 15) << "supplied_md5=" << supplied_md5 << dendl;
3995 }
3996
3997 rgw_obj obj(s->bucket, get_current_filename());
3998 if (s->bucket_info.versioning_enabled()) {
3999 store->gen_rand_obj_instance_name(&obj);
4000 }
4001
4002 rgw::AioThrottle aio(s->cct->_conf->rgw_put_obj_min_window_size);
4003
4004 using namespace rgw::putobj;
4005 AtomicObjectProcessor processor(&aio, store, s->bucket_info,
4006 &s->dest_placement,
4007 s->bucket_owner.get_id(),
4008 *static_cast<RGWObjectCtx*>(s->obj_ctx),
4009 obj, 0, s->req_id);
4010 op_ret = processor.prepare();
4011 if (op_ret < 0) {
4012 return;
4013 }
4014
4015 /* No filters by default. */
4016 DataProcessor *filter = &processor;
4017
4018 std::unique_ptr<DataProcessor> encrypt;
4019 op_ret = get_encrypt_filter(&encrypt, filter);
4020 if (op_ret < 0) {
4021 return;
4022 }
4023 if (encrypt != nullptr) {
4024 filter = encrypt.get();
4025 } else {
4026 const auto& compression_type = store->svc.zone->get_zone_params().get_compression_type(
4027 s->dest_placement);
4028 if (compression_type != "none") {
4029 plugin = Compressor::create(s->cct, compression_type);
4030 if (!plugin) {
4031 ldpp_dout(this, 1) << "Cannot load plugin for compression type "
4032 << compression_type << dendl;
4033 } else {
4034 compressor.emplace(s->cct, plugin, filter);
4035 filter = &*compressor;
4036 }
4037 }
4038 }
4039
4040 bool again;
4041 do {
4042 ceph::bufferlist data;
4043 len = get_data(data, again);
4044
4045 if (len < 0) {
4046 op_ret = len;
4047 return;
4048 }
4049
4050 if (!len) {
4051 break;
4052 }
4053
4054 hash.Update((const unsigned char *)data.c_str(), data.length());
4055 op_ret = filter->process(std::move(data), ofs);
4056
4057 ofs += len;
4058
4059 if (ofs > max_len) {
4060 op_ret = -ERR_TOO_LARGE;
4061 return;
4062 }
4063 } while (again);
4064
4065 // flush
4066 op_ret = filter->process({}, ofs);
4067 if (op_ret < 0) {
4068 return;
4069 }
4070
4071 if (len < min_len) {
4072 op_ret = -ERR_TOO_SMALL;
4073 return;
4074 }
4075
4076 s->obj_size = ofs;
4077
4078
4079 op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
4080 user_quota, bucket_quota, s->obj_size);
4081 if (op_ret < 0) {
4082 return;
4083 }
4084
4085 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
4086 if (op_ret < 0) {
4087 return;
4088 }
4089
4090 hash.Final(m);
4091 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
4092
4093 etag = calc_md5;
4094
4095 if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) {
4096 op_ret = -ERR_BAD_DIGEST;
4097 return;
4098 }
4099
4100 bl.append(etag.c_str(), etag.size());
4101 emplace_attr(RGW_ATTR_ETAG, std::move(bl));
4102
4103 policy.encode(aclbl);
4104 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
4105
4106 const std::string content_type = get_current_content_type();
4107 if (! content_type.empty()) {
4108 ceph::bufferlist ct_bl;
4109 ct_bl.append(content_type.c_str(), content_type.size() + 1);
4110 emplace_attr(RGW_ATTR_CONTENT_TYPE, std::move(ct_bl));
4111 }
4112
4113 if (compressor && compressor->is_compressed()) {
4114 ceph::bufferlist tmp;
4115 RGWCompressionInfo cs_info;
4116 cs_info.compression_type = plugin->get_type_name();
4117 cs_info.orig_size = s->obj_size;
4118 cs_info.blocks = move(compressor->get_compression_blocks());
4119 encode(cs_info, tmp);
4120 emplace_attr(RGW_ATTR_COMPRESSION, std::move(tmp));
4121 }
4122
4123 op_ret = processor.complete(s->obj_size, etag, nullptr, real_time(), attrs,
4124 (delete_at ? *delete_at : real_time()),
4125 nullptr, nullptr, nullptr, nullptr, nullptr);
4126 if (op_ret < 0) {
4127 return;
4128 }
4129 } while (is_next_file_to_upload());
4130 }
4131
4132
4133 void RGWPutMetadataAccount::filter_out_temp_url(map<string, bufferlist>& add_attrs,
4134 const set<string>& rmattr_names,
4135 map<int, string>& temp_url_keys)
4136 {
4137 map<string, bufferlist>::iterator iter;
4138
4139 iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY1);
4140 if (iter != add_attrs.end()) {
4141 temp_url_keys[0] = iter->second.c_str();
4142 add_attrs.erase(iter);
4143 }
4144
4145 iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY2);
4146 if (iter != add_attrs.end()) {
4147 temp_url_keys[1] = iter->second.c_str();
4148 add_attrs.erase(iter);
4149 }
4150
4151 for (const string& name : rmattr_names) {
4152 if (name.compare(RGW_ATTR_TEMPURL_KEY1) == 0) {
4153 temp_url_keys[0] = string();
4154 }
4155 if (name.compare(RGW_ATTR_TEMPURL_KEY2) == 0) {
4156 temp_url_keys[1] = string();
4157 }
4158 }
4159 }
4160
4161 int RGWPutMetadataAccount::init_processing()
4162 {
4163 /* First, go to the base class. At the time of writing the method was
4164 * responsible only for initializing the quota. This isn't necessary
4165 * here as we are touching metadata only. I'm putting this call only
4166 * for the future. */
4167 op_ret = RGWOp::init_processing();
4168 if (op_ret < 0) {
4169 return op_ret;
4170 }
4171
4172 op_ret = get_params();
4173 if (op_ret < 0) {
4174 return op_ret;
4175 }
4176
4177 op_ret = rgw_get_user_attrs_by_uid(store, s->user->user_id, orig_attrs,
4178 &acct_op_tracker);
4179 if (op_ret < 0) {
4180 return op_ret;
4181 }
4182
4183 if (has_policy) {
4184 bufferlist acl_bl;
4185 policy.encode(acl_bl);
4186 attrs.emplace(RGW_ATTR_ACL, std::move(acl_bl));
4187 }
4188
4189 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
4190 if (op_ret < 0) {
4191 return op_ret;
4192 }
4193 prepare_add_del_attrs(orig_attrs, rmattr_names, attrs);
4194 populate_with_generic_attrs(s, attrs);
4195
4196 /* Try extract the TempURL-related stuff now to allow verify_permission
4197 * evaluate whether we need FULL_CONTROL or not. */
4198 filter_out_temp_url(attrs, rmattr_names, temp_url_keys);
4199
4200 /* The same with quota except a client needs to be reseller admin. */
4201 op_ret = filter_out_quota_info(attrs, rmattr_names, new_quota,
4202 &new_quota_extracted);
4203 if (op_ret < 0) {
4204 return op_ret;
4205 }
4206
4207 return 0;
4208 }
4209
4210 int RGWPutMetadataAccount::verify_permission()
4211 {
4212 if (s->auth.identity->is_anonymous()) {
4213 return -EACCES;
4214 }
4215
4216 if (!verify_user_permission_no_policy(this, s, RGW_PERM_WRITE)) {
4217 return -EACCES;
4218 }
4219
4220 /* Altering TempURL keys requires FULL_CONTROL. */
4221 if (!temp_url_keys.empty() && s->perm_mask != RGW_PERM_FULL_CONTROL) {
4222 return -EPERM;
4223 }
4224
4225 /* We are failing this intensionally to allow system user/reseller admin
4226 * override in rgw_process.cc. This is the way to specify a given RGWOp
4227 * expect extra privileges. */
4228 if (new_quota_extracted) {
4229 return -EACCES;
4230 }
4231
4232 return 0;
4233 }
4234
4235 void RGWPutMetadataAccount::execute()
4236 {
4237 /* Params have been extracted earlier. See init_processing(). */
4238 RGWUserInfo new_uinfo;
4239 op_ret = rgw_get_user_info_by_uid(store, s->user->user_id, new_uinfo,
4240 &acct_op_tracker);
4241 if (op_ret < 0) {
4242 return;
4243 }
4244
4245 /* Handle the TempURL-related stuff. */
4246 if (!temp_url_keys.empty()) {
4247 for (auto& pair : temp_url_keys) {
4248 new_uinfo.temp_url_keys[pair.first] = std::move(pair.second);
4249 }
4250 }
4251
4252 /* Handle the quota extracted at the verify_permission step. */
4253 if (new_quota_extracted) {
4254 new_uinfo.user_quota = std::move(new_quota);
4255 }
4256
4257 /* We are passing here the current (old) user info to allow the function
4258 * optimize-out some operations. */
4259 op_ret = rgw_store_user_info(store, new_uinfo, s->user,
4260 &acct_op_tracker, real_time(), false, &attrs);
4261 }
4262
4263 int RGWPutMetadataBucket::verify_permission()
4264 {
4265 if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
4266 return -EACCES;
4267 }
4268
4269 return 0;
4270 }
4271
4272 void RGWPutMetadataBucket::pre_exec()
4273 {
4274 rgw_bucket_object_pre_exec(s);
4275 }
4276
4277 void RGWPutMetadataBucket::execute()
4278 {
4279 op_ret = get_params();
4280 if (op_ret < 0) {
4281 return;
4282 }
4283
4284 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
4285 if (op_ret < 0) {
4286 return;
4287 }
4288
4289 if (!placement_rule.empty() &&
4290 placement_rule != s->bucket_info.placement_rule) {
4291 op_ret = -EEXIST;
4292 return;
4293 }
4294
4295 op_ret = retry_raced_bucket_write(store, s, [this] {
4296 /* Encode special metadata first as we're using std::map::emplace under
4297 * the hood. This method will add the new items only if the map doesn't
4298 * contain such keys yet. */
4299 if (has_policy) {
4300 if (s->dialect.compare("swift") == 0) {
4301 auto old_policy = \
4302 static_cast<RGWAccessControlPolicy_SWIFT*>(s->bucket_acl.get());
4303 auto new_policy = static_cast<RGWAccessControlPolicy_SWIFT*>(&policy);
4304 new_policy->filter_merge(policy_rw_mask, old_policy);
4305 policy = *new_policy;
4306 }
4307 buffer::list bl;
4308 policy.encode(bl);
4309 emplace_attr(RGW_ATTR_ACL, std::move(bl));
4310 }
4311
4312 if (has_cors) {
4313 buffer::list bl;
4314 cors_config.encode(bl);
4315 emplace_attr(RGW_ATTR_CORS, std::move(bl));
4316 }
4317
4318 /* It's supposed that following functions WILL NOT change any
4319 * special attributes (like RGW_ATTR_ACL) if they are already
4320 * present in attrs. */
4321 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
4322 populate_with_generic_attrs(s, attrs);
4323
4324 /* According to the Swift's behaviour and its container_quota
4325 * WSGI middleware implementation: anyone with write permissions
4326 * is able to set the bucket quota. This stays in contrast to
4327 * account quotas that can be set only by clients holding
4328 * reseller admin privileges. */
4329 op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket_info.quota);
4330 if (op_ret < 0) {
4331 return op_ret;
4332 }
4333
4334 if (swift_ver_location) {
4335 s->bucket_info.swift_ver_location = *swift_ver_location;
4336 s->bucket_info.swift_versioning = (!swift_ver_location->empty());
4337 }
4338
4339 /* Web site of Swift API. */
4340 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
4341 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
4342
4343 /* Setting attributes also stores the provided bucket info. Due
4344 * to this fact, the new quota settings can be serialized with
4345 * the same call. */
4346 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
4347 &s->bucket_info.objv_tracker);
4348 return op_ret;
4349 });
4350 }
4351
4352 int RGWPutMetadataObject::verify_permission()
4353 {
4354 // This looks to be something specific to Swift. We could add
4355 // operations like swift:PutMetadataObject to the Policy Engine.
4356 if (!verify_object_permission_no_policy(this, s, RGW_PERM_WRITE)) {
4357 return -EACCES;
4358 }
4359
4360 return 0;
4361 }
4362
4363 void RGWPutMetadataObject::pre_exec()
4364 {
4365 rgw_bucket_object_pre_exec(s);
4366 }
4367
4368 void RGWPutMetadataObject::execute()
4369 {
4370 rgw_obj obj(s->bucket, s->object);
4371 map<string, bufferlist> attrs, orig_attrs, rmattrs;
4372
4373 store->set_atomic(s->obj_ctx, obj);
4374
4375 op_ret = get_params();
4376 if (op_ret < 0) {
4377 return;
4378 }
4379
4380 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
4381 if (op_ret < 0) {
4382 return;
4383 }
4384
4385 /* check if obj exists, read orig attrs */
4386 op_ret = get_obj_attrs(store, s, obj, orig_attrs);
4387 if (op_ret < 0) {
4388 return;
4389 }
4390
4391 /* Check whether the object has expired. Swift API documentation
4392 * stands that we should return 404 Not Found in such case. */
4393 if (need_object_expiration() && object_is_expired(orig_attrs)) {
4394 op_ret = -ENOENT;
4395 return;
4396 }
4397
4398 /* Filter currently existing attributes. */
4399 prepare_add_del_attrs(orig_attrs, attrs, rmattrs);
4400 populate_with_generic_attrs(s, attrs);
4401 encode_delete_at_attr(delete_at, attrs);
4402
4403 if (dlo_manifest) {
4404 op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
4405 if (op_ret < 0) {
4406 ldpp_dout(this, 0) << "bad user manifest: " << dlo_manifest << dendl;
4407 return;
4408 }
4409 }
4410
4411 op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, &rmattrs);
4412 }
4413
4414 int RGWDeleteObj::handle_slo_manifest(bufferlist& bl)
4415 {
4416 RGWSLOInfo slo_info;
4417 auto bliter = bl.cbegin();
4418 try {
4419 decode(slo_info, bliter);
4420 } catch (buffer::error& err) {
4421 ldpp_dout(this, 0) << "ERROR: failed to decode slo manifest" << dendl;
4422 return -EIO;
4423 }
4424
4425 try {
4426 deleter = std::unique_ptr<RGWBulkDelete::Deleter>(\
4427 new RGWBulkDelete::Deleter(this, store, s));
4428 } catch (const std::bad_alloc&) {
4429 return -ENOMEM;
4430 }
4431
4432 list<RGWBulkDelete::acct_path_t> items;
4433 for (const auto& iter : slo_info.entries) {
4434 const string& path_str = iter.path;
4435
4436 const size_t sep_pos = path_str.find('/', 1 /* skip first slash */);
4437 if (boost::string_view::npos == sep_pos) {
4438 return -EINVAL;
4439 }
4440
4441 RGWBulkDelete::acct_path_t path;
4442
4443 path.bucket_name = url_decode(path_str.substr(1, sep_pos - 1));
4444 path.obj_key = url_decode(path_str.substr(sep_pos + 1));
4445
4446 items.push_back(path);
4447 }
4448
4449 /* Request removal of the manifest object itself. */
4450 RGWBulkDelete::acct_path_t path;
4451 path.bucket_name = s->bucket_name;
4452 path.obj_key = s->object;
4453 items.push_back(path);
4454
4455 int ret = deleter->delete_chunk(items);
4456 if (ret < 0) {
4457 return ret;
4458 }
4459
4460 return 0;
4461 }
4462
4463 int RGWDeleteObj::verify_permission()
4464 {
4465 if (s->iam_policy || ! s->iam_user_policies.empty()) {
4466 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
4467 boost::none,
4468 s->object.instance.empty() ?
4469 rgw::IAM::s3DeleteObject :
4470 rgw::IAM::s3DeleteObjectVersion,
4471 ARN(s->bucket, s->object.name));
4472 if (usr_policy_res == Effect::Deny) {
4473 return -EACCES;
4474 }
4475
4476 rgw::IAM::Effect r = Effect::Pass;
4477 if (s->iam_policy) {
4478 r = s->iam_policy->eval(s->env, *s->auth.identity,
4479 s->object.instance.empty() ?
4480 rgw::IAM::s3DeleteObject :
4481 rgw::IAM::s3DeleteObjectVersion,
4482 ARN(s->bucket, s->object.name));
4483 }
4484 if (r == Effect::Allow)
4485 return 0;
4486 else if (r == Effect::Deny)
4487 return -EACCES;
4488 else if (usr_policy_res == Effect::Allow)
4489 return 0;
4490 }
4491
4492 if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
4493 return -EACCES;
4494 }
4495
4496 if (s->bucket_info.mfa_enabled() &&
4497 !s->object.instance.empty() &&
4498 !s->mfa_verified) {
4499 ldpp_dout(this, 5) << "NOTICE: object delete request with a versioned object, mfa auth not provided" << dendl;
4500 return -ERR_MFA_REQUIRED;
4501 }
4502
4503 return 0;
4504 }
4505
4506 void RGWDeleteObj::pre_exec()
4507 {
4508 rgw_bucket_object_pre_exec(s);
4509 }
4510
4511 void RGWDeleteObj::execute()
4512 {
4513 if (!s->bucket_exists) {
4514 op_ret = -ERR_NO_SUCH_BUCKET;
4515 return;
4516 }
4517
4518 op_ret = get_params();
4519 if (op_ret < 0) {
4520 return;
4521 }
4522
4523 rgw_obj obj(s->bucket, s->object);
4524 map<string, bufferlist> attrs;
4525
4526
4527 if (!s->object.empty()) {
4528 if (need_object_expiration() || multipart_delete) {
4529 /* check if obj exists, read orig attrs */
4530 op_ret = get_obj_attrs(store, s, obj, attrs);
4531 if (op_ret < 0) {
4532 return;
4533 }
4534 }
4535
4536 if (multipart_delete) {
4537 const auto slo_attr = attrs.find(RGW_ATTR_SLO_MANIFEST);
4538
4539 if (slo_attr != attrs.end()) {
4540 op_ret = handle_slo_manifest(slo_attr->second);
4541 if (op_ret < 0) {
4542 ldpp_dout(this, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret << dendl;
4543 }
4544 } else {
4545 op_ret = -ERR_NOT_SLO_MANIFEST;
4546 }
4547
4548 return;
4549 }
4550
4551 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
4552 obj_ctx->set_atomic(obj);
4553
4554 bool ver_restored = false;
4555 op_ret = store->swift_versioning_restore(*s->sysobj_ctx, *obj_ctx, s->bucket_owner.get_id(),
4556 s->bucket_info, obj, ver_restored);
4557 if (op_ret < 0) {
4558 return;
4559 }
4560
4561 if (!ver_restored) {
4562 /* Swift's versioning mechanism hasn't found any previous version of
4563 * the object that could be restored. This means we should proceed
4564 * with the regular delete path. */
4565 RGWRados::Object del_target(store, s->bucket_info, *obj_ctx, obj);
4566 RGWRados::Object::Delete del_op(&del_target);
4567
4568 op_ret = get_system_versioning_params(s, &del_op.params.olh_epoch,
4569 &del_op.params.marker_version_id);
4570 if (op_ret < 0) {
4571 return;
4572 }
4573
4574 del_op.params.bucket_owner = s->bucket_owner.get_id();
4575 del_op.params.versioning_status = s->bucket_info.versioning_status();
4576 del_op.params.obj_owner = s->owner;
4577 del_op.params.unmod_since = unmod_since;
4578 del_op.params.high_precision_time = s->system_request; /* system request uses high precision time */
4579
4580 op_ret = del_op.delete_obj();
4581 if (op_ret >= 0) {
4582 delete_marker = del_op.result.delete_marker;
4583 version_id = del_op.result.version_id;
4584 }
4585
4586 /* Check whether the object has expired. Swift API documentation
4587 * stands that we should return 404 Not Found in such case. */
4588 if (need_object_expiration() && object_is_expired(attrs)) {
4589 op_ret = -ENOENT;
4590 return;
4591 }
4592 }
4593
4594 if (op_ret == -ECANCELED) {
4595 op_ret = 0;
4596 }
4597 if (op_ret == -ERR_PRECONDITION_FAILED && no_precondition_error) {
4598 op_ret = 0;
4599 }
4600 } else {
4601 op_ret = -EINVAL;
4602 }
4603 }
4604
4605 bool RGWCopyObj::parse_copy_location(const boost::string_view& url_src,
4606 string& bucket_name,
4607 rgw_obj_key& key)
4608 {
4609 boost::string_view name_str;
4610 boost::string_view params_str;
4611
4612 size_t pos = url_src.find('?');
4613 if (pos == string::npos) {
4614 name_str = url_src;
4615 } else {
4616 name_str = url_src.substr(0, pos);
4617 params_str = url_src.substr(pos + 1);
4618 }
4619
4620 boost::string_view dec_src{name_str};
4621 if (dec_src[0] == '/')
4622 dec_src.remove_prefix(1);
4623
4624 pos = dec_src.find('/');
4625 if (pos ==string::npos)
4626 return false;
4627
4628 boost::string_view bn_view{dec_src.substr(0, pos)};
4629 bucket_name = std::string{bn_view.data(), bn_view.size()};
4630
4631 boost::string_view kn_view{dec_src.substr(pos + 1)};
4632 key.name = std::string{kn_view.data(), kn_view.size()};
4633
4634 if (key.name.empty()) {
4635 return false;
4636 }
4637
4638 if (! params_str.empty()) {
4639 RGWHTTPArgs args;
4640 args.set(params_str.to_string());
4641 args.parse();
4642
4643 key.instance = args.get("versionId", NULL);
4644 }
4645
4646 return true;
4647 }
4648
4649 int RGWCopyObj::verify_permission()
4650 {
4651 RGWAccessControlPolicy src_acl(s->cct);
4652 boost::optional<Policy> src_policy;
4653 op_ret = get_params();
4654 if (op_ret < 0)
4655 return op_ret;
4656
4657 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
4658 if (op_ret < 0) {
4659 return op_ret;
4660 }
4661 map<string, bufferlist> src_attrs;
4662
4663 if (s->bucket_instance_id.empty()) {
4664 op_ret = store->get_bucket_info(*s->sysobj_ctx, src_tenant_name, src_bucket_name, src_bucket_info, NULL, &src_attrs);
4665 } else {
4666 /* will only happen in intra region sync where the source and dest bucket is the same */
4667 op_ret = store->get_bucket_instance_info(*s->sysobj_ctx, s->bucket_instance_id, src_bucket_info, NULL, &src_attrs);
4668 }
4669 if (op_ret < 0) {
4670 if (op_ret == -ENOENT) {
4671 op_ret = -ERR_NO_SUCH_BUCKET;
4672 }
4673 return op_ret;
4674 }
4675
4676 src_bucket = src_bucket_info.bucket;
4677
4678 /* get buckets info (source and dest) */
4679 if (s->local_source && source_zone.empty()) {
4680 rgw_obj src_obj(src_bucket, src_object);
4681 store->set_atomic(s->obj_ctx, src_obj);
4682 store->set_prefetch_data(s->obj_ctx, src_obj);
4683
4684 rgw_placement_rule src_placement;
4685
4686 /* check source object permissions */
4687 op_ret = read_obj_policy(store, s, src_bucket_info, src_attrs, &src_acl, &src_placement.storage_class,
4688 src_policy, src_bucket, src_object);
4689 if (op_ret < 0) {
4690 return op_ret;
4691 }
4692
4693 /* follow up on previous checks that required reading source object head */
4694 if (need_to_check_storage_class) {
4695 src_placement.inherit_from(src_bucket_info.placement_rule);
4696
4697 op_ret = check_storage_class(src_placement);
4698 if (op_ret < 0) {
4699 return op_ret;
4700 }
4701 }
4702
4703 /* admin request overrides permission checks */
4704 if (!s->auth.identity->is_admin_of(src_acl.get_owner().get_id())) {
4705 if (src_policy) {
4706 auto e = src_policy->eval(s->env, *s->auth.identity,
4707 src_object.instance.empty() ?
4708 rgw::IAM::s3GetObject :
4709 rgw::IAM::s3GetObjectVersion,
4710 ARN(src_obj));
4711 if (e == Effect::Deny) {
4712 return -EACCES;
4713 } else if (e == Effect::Pass &&
4714 !src_acl.verify_permission(this, *s->auth.identity, s->perm_mask,
4715 RGW_PERM_READ)) {
4716 return -EACCES;
4717 }
4718 } else if (!src_acl.verify_permission(this, *s->auth.identity,
4719 s->perm_mask,
4720 RGW_PERM_READ)) {
4721 return -EACCES;
4722 }
4723 }
4724 }
4725
4726 RGWAccessControlPolicy dest_bucket_policy(s->cct);
4727 map<string, bufferlist> dest_attrs;
4728
4729 if (src_bucket_name.compare(dest_bucket_name) == 0) { /* will only happen if s->local_source
4730 or intra region sync */
4731 dest_bucket_info = src_bucket_info;
4732 dest_attrs = src_attrs;
4733 } else {
4734 op_ret = store->get_bucket_info(*s->sysobj_ctx, dest_tenant_name, dest_bucket_name,
4735 dest_bucket_info, nullptr, &dest_attrs);
4736 if (op_ret < 0) {
4737 if (op_ret == -ENOENT) {
4738 op_ret = -ERR_NO_SUCH_BUCKET;
4739 }
4740 return op_ret;
4741 }
4742 }
4743
4744 dest_bucket = dest_bucket_info.bucket;
4745
4746 rgw_obj dest_obj(dest_bucket, dest_object);
4747 store->set_atomic(s->obj_ctx, dest_obj);
4748
4749 /* check dest bucket permissions */
4750 op_ret = read_bucket_policy(store, s, dest_bucket_info, dest_attrs,
4751 &dest_bucket_policy, dest_bucket);
4752 if (op_ret < 0) {
4753 return op_ret;
4754 }
4755 auto dest_iam_policy = get_iam_policy_from_attr(s->cct, store, dest_attrs, dest_bucket.tenant);
4756 /* admin request overrides permission checks */
4757 if (! s->auth.identity->is_admin_of(dest_policy.get_owner().get_id())){
4758 if (dest_iam_policy != boost::none) {
4759 rgw_add_to_iam_environment(s->env, "s3:x-amz-copy-source", copy_source);
4760 rgw_add_to_iam_environment(s->env, "s3:x-amz-metadata-directive", md_directive);
4761
4762 auto e = dest_iam_policy->eval(s->env, *s->auth.identity,
4763 rgw::IAM::s3PutObject,
4764 ARN(dest_obj));
4765 if (e == Effect::Deny) {
4766 return -EACCES;
4767 } else if (e == Effect::Pass &&
4768 ! dest_bucket_policy.verify_permission(this,
4769 *s->auth.identity,
4770 s->perm_mask,
4771 RGW_PERM_WRITE)){
4772 return -EACCES;
4773 }
4774 }
4775 } else if (! dest_bucket_policy.verify_permission(this, *s->auth.identity, s->perm_mask,
4776 RGW_PERM_WRITE)) {
4777 return -EACCES;
4778 }
4779
4780 op_ret = init_dest_policy();
4781 if (op_ret < 0) {
4782 return op_ret;
4783 }
4784
4785 return 0;
4786 }
4787
4788
4789 int RGWCopyObj::init_common()
4790 {
4791 if (if_mod) {
4792 if (parse_time(if_mod, &mod_time) < 0) {
4793 op_ret = -EINVAL;
4794 return op_ret;
4795 }
4796 mod_ptr = &mod_time;
4797 }
4798
4799 if (if_unmod) {
4800 if (parse_time(if_unmod, &unmod_time) < 0) {
4801 op_ret = -EINVAL;
4802 return op_ret;
4803 }
4804 unmod_ptr = &unmod_time;
4805 }
4806
4807 bufferlist aclbl;
4808 dest_policy.encode(aclbl);
4809 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
4810
4811 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
4812 if (op_ret < 0) {
4813 return op_ret;
4814 }
4815 populate_with_generic_attrs(s, attrs);
4816
4817 return 0;
4818 }
4819
4820 static void copy_obj_progress_cb(off_t ofs, void *param)
4821 {
4822 RGWCopyObj *op = static_cast<RGWCopyObj *>(param);
4823 op->progress_cb(ofs);
4824 }
4825
4826 void RGWCopyObj::progress_cb(off_t ofs)
4827 {
4828 if (!s->cct->_conf->rgw_copy_obj_progress)
4829 return;
4830
4831 if (ofs - last_ofs < s->cct->_conf->rgw_copy_obj_progress_every_bytes)
4832 return;
4833
4834 send_partial_response(ofs);
4835
4836 last_ofs = ofs;
4837 }
4838
4839 void RGWCopyObj::pre_exec()
4840 {
4841 rgw_bucket_object_pre_exec(s);
4842 }
4843
4844 void RGWCopyObj::execute()
4845 {
4846 if (init_common() < 0)
4847 return;
4848
4849 rgw_obj src_obj(src_bucket, src_object);
4850 rgw_obj dst_obj(dest_bucket, dest_object);
4851
4852 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
4853 if ( ! version_id.empty()) {
4854 dst_obj.key.set_instance(version_id);
4855 } else if (dest_bucket_info.versioning_enabled()) {
4856 store->gen_rand_obj_instance_name(&dst_obj);
4857 }
4858
4859 obj_ctx.set_atomic(src_obj);
4860 obj_ctx.set_atomic(dst_obj);
4861
4862 encode_delete_at_attr(delete_at, attrs);
4863
4864 bool high_precision_time = (s->system_request);
4865
4866 /* Handle object versioning of Swift API. In case of copying to remote this
4867 * should fail gently (op_ret == 0) as the dst_obj will not exist here. */
4868 op_ret = store->swift_versioning_copy(obj_ctx,
4869 dest_bucket_info.owner,
4870 dest_bucket_info,
4871 dst_obj);
4872 if (op_ret < 0) {
4873 return;
4874 }
4875
4876 op_ret = store->copy_obj(obj_ctx,
4877 s->user->user_id,
4878 &s->info,
4879 source_zone,
4880 dst_obj,
4881 src_obj,
4882 dest_bucket_info,
4883 src_bucket_info,
4884 s->dest_placement,
4885 &src_mtime,
4886 &mtime,
4887 mod_ptr,
4888 unmod_ptr,
4889 high_precision_time,
4890 if_match,
4891 if_nomatch,
4892 attrs_mod,
4893 copy_if_newer,
4894 attrs, RGWObjCategory::Main,
4895 olh_epoch,
4896 (delete_at ? *delete_at : real_time()),
4897 (version_id.empty() ? NULL : &version_id),
4898 &s->req_id, /* use req_id as tag */
4899 &etag,
4900 copy_obj_progress_cb, (void *)this
4901 );
4902 }
4903
4904 int RGWGetACLs::verify_permission()
4905 {
4906 bool perm;
4907 if (!s->object.empty()) {
4908 auto iam_action = s->object.instance.empty() ?
4909 rgw::IAM::s3GetObjectAcl :
4910 rgw::IAM::s3GetObjectVersionAcl;
4911
4912 if (s->iam_policy && s->iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG)){
4913 rgw_obj obj = rgw_obj(s->bucket, s->object);
4914 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
4915 }
4916 if (! s->iam_user_policies.empty()) {
4917 for (auto& user_policy : s->iam_user_policies) {
4918 if (user_policy.has_partial_conditional(S3_EXISTING_OBJTAG)) {
4919 rgw_obj obj = rgw_obj(s->bucket, s->object);
4920 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
4921 }
4922 }
4923 }
4924 perm = verify_object_permission(this, s, iam_action);
4925 } else {
4926 if (!s->bucket_exists) {
4927 return -ERR_NO_SUCH_BUCKET;
4928 }
4929 perm = verify_bucket_permission(this, s, rgw::IAM::s3GetBucketAcl);
4930 }
4931 if (!perm)
4932 return -EACCES;
4933
4934 return 0;
4935 }
4936
4937 void RGWGetACLs::pre_exec()
4938 {
4939 rgw_bucket_object_pre_exec(s);
4940 }
4941
4942 void RGWGetACLs::execute()
4943 {
4944 stringstream ss;
4945 RGWAccessControlPolicy* const acl = \
4946 (!s->object.empty() ? s->object_acl.get() : s->bucket_acl.get());
4947 RGWAccessControlPolicy_S3* const s3policy = \
4948 static_cast<RGWAccessControlPolicy_S3*>(acl);
4949 s3policy->to_xml(ss);
4950 acls = ss.str();
4951 }
4952
4953
4954
4955 int RGWPutACLs::verify_permission()
4956 {
4957 bool perm;
4958
4959 rgw_add_to_iam_environment(s->env, "s3:x-amz-acl", s->canned_acl);
4960
4961 rgw_add_grant_to_iam_environment(s->env, s);
4962 if (!s->object.empty()) {
4963 auto iam_action = s->object.instance.empty() ? rgw::IAM::s3PutObjectAcl : rgw::IAM::s3PutObjectVersionAcl;
4964 auto obj = rgw_obj(s->bucket, s->object);
4965 op_ret = rgw_iam_add_existing_objtags(store, s, obj, iam_action);
4966 perm = verify_object_permission(this, s, iam_action);
4967 } else {
4968 perm = verify_bucket_permission(this, s, rgw::IAM::s3PutBucketAcl);
4969 }
4970 if (!perm)
4971 return -EACCES;
4972
4973 return 0;
4974 }
4975
4976 int RGWGetLC::verify_permission()
4977 {
4978 bool perm;
4979 perm = verify_bucket_permission(this, s, rgw::IAM::s3GetLifecycleConfiguration);
4980 if (!perm)
4981 return -EACCES;
4982
4983 return 0;
4984 }
4985
4986 int RGWPutLC::verify_permission()
4987 {
4988 bool perm;
4989 perm = verify_bucket_permission(this, s, rgw::IAM::s3PutLifecycleConfiguration);
4990 if (!perm)
4991 return -EACCES;
4992
4993 return 0;
4994 }
4995
4996 int RGWDeleteLC::verify_permission()
4997 {
4998 bool perm;
4999 perm = verify_bucket_permission(this, s, rgw::IAM::s3PutLifecycleConfiguration);
5000 if (!perm)
5001 return -EACCES;
5002
5003 return 0;
5004 }
5005
5006 void RGWPutACLs::pre_exec()
5007 {
5008 rgw_bucket_object_pre_exec(s);
5009 }
5010
5011 void RGWGetLC::pre_exec()
5012 {
5013 rgw_bucket_object_pre_exec(s);
5014 }
5015
5016 void RGWPutLC::pre_exec()
5017 {
5018 rgw_bucket_object_pre_exec(s);
5019 }
5020
5021 void RGWDeleteLC::pre_exec()
5022 {
5023 rgw_bucket_object_pre_exec(s);
5024 }
5025
5026 void RGWPutACLs::execute()
5027 {
5028 bufferlist bl;
5029
5030 RGWAccessControlPolicy_S3 *policy = NULL;
5031 RGWACLXMLParser_S3 parser(s->cct);
5032 RGWAccessControlPolicy_S3 new_policy(s->cct);
5033 stringstream ss;
5034 rgw_obj obj;
5035
5036 op_ret = 0; /* XXX redundant? */
5037
5038 if (!parser.init()) {
5039 op_ret = -EINVAL;
5040 return;
5041 }
5042
5043
5044 RGWAccessControlPolicy* const existing_policy = \
5045 (s->object.empty() ? s->bucket_acl.get() : s->object_acl.get());
5046
5047 owner = existing_policy->get_owner();
5048
5049 op_ret = get_params();
5050 if (op_ret < 0) {
5051 if (op_ret == -ERANGE) {
5052 ldpp_dout(this, 4) << "The size of request xml data is larger than the max limitation, data size = "
5053 << s->length << dendl;
5054 op_ret = -ERR_MALFORMED_XML;
5055 s->err.message = "The XML you provided was larger than the maximum " +
5056 std::to_string(s->cct->_conf->rgw_max_put_param_size) +
5057 " bytes allowed.";
5058 }
5059 return;
5060 }
5061
5062 char* buf = data.c_str();
5063 ldpp_dout(this, 15) << "read len=" << data.length() << " data=" << (buf ? buf : "") << dendl;
5064
5065 if (!s->canned_acl.empty() && data.length() > 0) {
5066 op_ret = -EINVAL;
5067 return;
5068 }
5069
5070 if (!s->canned_acl.empty() || s->has_acl_header) {
5071 op_ret = get_policy_from_state(store, s, ss);
5072 if (op_ret < 0)
5073 return;
5074
5075 data.clear();
5076 data.append(ss.str());
5077 }
5078
5079 if (!parser.parse(data.c_str(), data.length(), 1)) {
5080 op_ret = -EINVAL;
5081 return;
5082 }
5083 policy = static_cast<RGWAccessControlPolicy_S3 *>(parser.find_first("AccessControlPolicy"));
5084 if (!policy) {
5085 op_ret = -EINVAL;
5086 return;
5087 }
5088
5089 const RGWAccessControlList& req_acl = policy->get_acl();
5090 const multimap<string, ACLGrant>& req_grant_map = req_acl.get_grant_map();
5091 #define ACL_GRANTS_MAX_NUM 100
5092 int max_num = s->cct->_conf->rgw_acl_grants_max_num;
5093 if (max_num < 0) {
5094 max_num = ACL_GRANTS_MAX_NUM;
5095 }
5096
5097 int grants_num = req_grant_map.size();
5098 if (grants_num > max_num) {
5099 ldpp_dout(this, 4) << "An acl can have up to " << max_num
5100 << " grants, request acl grants num: " << grants_num << dendl;
5101 op_ret = -ERR_MALFORMED_ACL_ERROR;
5102 s->err.message = "The request is rejected, because the acl grants number you requested is larger than the maximum "
5103 + std::to_string(max_num)
5104 + " grants allowed in an acl.";
5105 return;
5106 }
5107
5108 // forward bucket acl requests to meta master zone
5109 if (s->object.empty() && !store->svc.zone->is_meta_master()) {
5110 bufferlist in_data;
5111 // include acl data unless it was generated from a canned_acl
5112 if (s->canned_acl.empty()) {
5113 in_data.append(data);
5114 }
5115 op_ret = forward_request_to_master(s, NULL, store, in_data, NULL);
5116 if (op_ret < 0) {
5117 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
5118 return;
5119 }
5120 }
5121
5122 if (s->cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
5123 ldpp_dout(this, 15) << "Old AccessControlPolicy";
5124 policy->to_xml(*_dout);
5125 *_dout << dendl;
5126 }
5127
5128 op_ret = policy->rebuild(store, &owner, new_policy);
5129 if (op_ret < 0)
5130 return;
5131
5132 if (s->cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
5133 ldpp_dout(this, 15) << "New AccessControlPolicy:";
5134 new_policy.to_xml(*_dout);
5135 *_dout << dendl;
5136 }
5137
5138 new_policy.encode(bl);
5139 map<string, bufferlist> attrs;
5140
5141 if (!s->object.empty()) {
5142 obj = rgw_obj(s->bucket, s->object);
5143 store->set_atomic(s->obj_ctx, obj);
5144 //if instance is empty, we should modify the latest object
5145 op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_ACL, bl);
5146 } else {
5147 attrs = s->bucket_attrs;
5148 attrs[RGW_ATTR_ACL] = bl;
5149 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
5150 }
5151 if (op_ret == -ECANCELED) {
5152 op_ret = 0; /* lost a race, but it's ok because acls are immutable */
5153 }
5154 }
5155
5156 void RGWPutLC::execute()
5157 {
5158 bufferlist bl;
5159
5160 RGWLifecycleConfiguration_S3 config(s->cct);
5161 RGWXMLParser parser;
5162 RGWLifecycleConfiguration_S3 new_config(s->cct);
5163
5164 content_md5 = s->info.env->get("HTTP_CONTENT_MD5");
5165 if (content_md5 == nullptr) {
5166 op_ret = -ERR_INVALID_REQUEST;
5167 s->err.message = "Missing required header for this request: Content-MD5";
5168 ldpp_dout(this, 5) << s->err.message << dendl;
5169 return;
5170 }
5171
5172 std::string content_md5_bin;
5173 try {
5174 content_md5_bin = rgw::from_base64(boost::string_view(content_md5));
5175 } catch (...) {
5176 s->err.message = "Request header Content-MD5 contains character "
5177 "that is not base64 encoded.";
5178 ldpp_dout(this, 5) << s->err.message << dendl;
5179 op_ret = -ERR_BAD_DIGEST;
5180 return;
5181 }
5182
5183 if (!parser.init()) {
5184 op_ret = -EINVAL;
5185 return;
5186 }
5187
5188 op_ret = get_params();
5189 if (op_ret < 0)
5190 return;
5191
5192 char* buf = data.c_str();
5193 ldpp_dout(this, 15) << "read len=" << data.length() << " data=" << (buf ? buf : "") << dendl;
5194
5195 MD5 data_hash;
5196 unsigned char data_hash_res[CEPH_CRYPTO_MD5_DIGESTSIZE];
5197 data_hash.Update(reinterpret_cast<const unsigned char*>(buf), data.length());
5198 data_hash.Final(data_hash_res);
5199
5200 if (memcmp(data_hash_res, content_md5_bin.c_str(), CEPH_CRYPTO_MD5_DIGESTSIZE) != 0) {
5201 op_ret = -ERR_BAD_DIGEST;
5202 s->err.message = "The Content-MD5 you specified did not match what we received.";
5203 ldpp_dout(this, 5) << s->err.message
5204 << " Specified content md5: " << content_md5
5205 << ", calculated content md5: " << data_hash_res
5206 << dendl;
5207 return;
5208 }
5209
5210 if (!parser.parse(buf, data.length(), 1)) {
5211 op_ret = -ERR_MALFORMED_XML;
5212 return;
5213 }
5214
5215 try {
5216 RGWXMLDecoder::decode_xml("LifecycleConfiguration", config, &parser);
5217 } catch (RGWXMLDecoder::err& err) {
5218 ldpp_dout(this, 5) << "Bad lifecycle configuration: " << err << dendl;
5219 op_ret = -ERR_MALFORMED_XML;
5220 return;
5221 }
5222
5223 op_ret = config.rebuild(store, new_config);
5224 if (op_ret < 0)
5225 return;
5226
5227 if (s->cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
5228 XMLFormatter xf;
5229 new_config.dump_xml(&xf);
5230 stringstream ss;
5231 xf.flush(ss);
5232 ldpp_dout(this, 15) << "New LifecycleConfiguration:" << ss.str() << dendl;
5233 }
5234
5235 op_ret = store->get_lc()->set_bucket_config(s->bucket_info, s->bucket_attrs, &new_config);
5236 if (op_ret < 0) {
5237 return;
5238 }
5239 return;
5240 }
5241
5242 void RGWDeleteLC::execute()
5243 {
5244 map<string, bufferlist> attrs = s->bucket_attrs;
5245 attrs.erase(RGW_ATTR_LC);
5246 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
5247 &s->bucket_info.objv_tracker);
5248 if (op_ret < 0) {
5249 ldpp_dout(this, 0) << "RGWLC::RGWDeleteLC() failed to set attrs on bucket="
5250 << s->bucket.name << " returned err=" << op_ret << dendl;
5251 return;
5252 }
5253
5254 op_ret = store->get_lc()->remove_bucket_config(s->bucket_info, s->bucket_attrs);
5255 if (op_ret < 0) {
5256 return;
5257 }
5258 return;
5259 }
5260
5261 int RGWGetCORS::verify_permission()
5262 {
5263 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketCORS);
5264 }
5265
5266 void RGWGetCORS::execute()
5267 {
5268 op_ret = read_bucket_cors();
5269 if (op_ret < 0)
5270 return ;
5271
5272 if (!cors_exist) {
5273 ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl;
5274 op_ret = -ERR_NO_CORS_FOUND;
5275 return;
5276 }
5277 }
5278
5279 int RGWPutCORS::verify_permission()
5280 {
5281 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketCORS);
5282 }
5283
5284 void RGWPutCORS::execute()
5285 {
5286 rgw_raw_obj obj;
5287
5288 op_ret = get_params();
5289 if (op_ret < 0)
5290 return;
5291
5292 if (!store->svc.zone->is_meta_master()) {
5293 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
5294 if (op_ret < 0) {
5295 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
5296 return;
5297 }
5298 }
5299
5300 op_ret = retry_raced_bucket_write(store, s, [this] {
5301 map<string, bufferlist> attrs = s->bucket_attrs;
5302 attrs[RGW_ATTR_CORS] = cors_bl;
5303 return rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
5304 });
5305 }
5306
5307 int RGWDeleteCORS::verify_permission()
5308 {
5309 // No separate delete permission
5310 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketCORS);
5311 }
5312
5313 void RGWDeleteCORS::execute()
5314 {
5315 op_ret = retry_raced_bucket_write(store, s, [this] {
5316 op_ret = read_bucket_cors();
5317 if (op_ret < 0)
5318 return op_ret;
5319
5320 if (!cors_exist) {
5321 ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl;
5322 op_ret = -ENOENT;
5323 return op_ret;
5324 }
5325
5326 map<string, bufferlist> attrs = s->bucket_attrs;
5327 attrs.erase(RGW_ATTR_CORS);
5328 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
5329 &s->bucket_info.objv_tracker);
5330 if (op_ret < 0) {
5331 ldpp_dout(this, 0) << "RGWLC::RGWDeleteCORS() failed to set attrs on bucket=" << s->bucket.name
5332 << " returned err=" << op_ret << dendl;
5333 }
5334 return op_ret;
5335 });
5336 }
5337
5338 void RGWOptionsCORS::get_response_params(string& hdrs, string& exp_hdrs, unsigned *max_age) {
5339 get_cors_response_headers(rule, req_hdrs, hdrs, exp_hdrs, max_age);
5340 }
5341
5342 int RGWOptionsCORS::validate_cors_request(RGWCORSConfiguration *cc) {
5343 rule = cc->host_name_rule(origin);
5344 if (!rule) {
5345 ldpp_dout(this, 10) << "There is no cors rule present for " << origin << dendl;
5346 return -ENOENT;
5347 }
5348
5349 if (!validate_cors_rule_method(rule, req_meth)) {
5350 return -ENOENT;
5351 }
5352
5353 if (!validate_cors_rule_header(rule, req_hdrs)) {
5354 return -ENOENT;
5355 }
5356
5357 return 0;
5358 }
5359
5360 void RGWOptionsCORS::execute()
5361 {
5362 op_ret = read_bucket_cors();
5363 if (op_ret < 0)
5364 return;
5365
5366 origin = s->info.env->get("HTTP_ORIGIN");
5367 if (!origin) {
5368 ldpp_dout(this, 0) << "Missing mandatory Origin header" << dendl;
5369 op_ret = -EINVAL;
5370 return;
5371 }
5372 req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD");
5373 if (!req_meth) {
5374 ldpp_dout(this, 0) << "Missing mandatory Access-control-request-method header" << dendl;
5375 op_ret = -EINVAL;
5376 return;
5377 }
5378 if (!cors_exist) {
5379 ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl;
5380 op_ret = -ENOENT;
5381 return;
5382 }
5383 req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS");
5384 op_ret = validate_cors_request(&bucket_cors);
5385 if (!rule) {
5386 origin = req_meth = NULL;
5387 return;
5388 }
5389 return;
5390 }
5391
5392 int RGWGetRequestPayment::verify_permission()
5393 {
5394 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketRequestPayment);
5395 }
5396
5397 void RGWGetRequestPayment::pre_exec()
5398 {
5399 rgw_bucket_object_pre_exec(s);
5400 }
5401
5402 void RGWGetRequestPayment::execute()
5403 {
5404 requester_pays = s->bucket_info.requester_pays;
5405 }
5406
5407 int RGWSetRequestPayment::verify_permission()
5408 {
5409 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketRequestPayment);
5410 }
5411
5412 void RGWSetRequestPayment::pre_exec()
5413 {
5414 rgw_bucket_object_pre_exec(s);
5415 }
5416
5417 void RGWSetRequestPayment::execute()
5418 {
5419 op_ret = get_params();
5420
5421 if (op_ret < 0)
5422 return;
5423
5424 s->bucket_info.requester_pays = requester_pays;
5425 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(),
5426 &s->bucket_attrs);
5427 if (op_ret < 0) {
5428 ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
5429 << " returned err=" << op_ret << dendl;
5430 return;
5431 }
5432 }
5433
5434 int RGWInitMultipart::verify_permission()
5435 {
5436 if (s->iam_policy || ! s->iam_user_policies.empty()) {
5437 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
5438 boost::none,
5439 rgw::IAM::s3PutObject,
5440 rgw_obj(s->bucket, s->object));
5441 if (usr_policy_res == Effect::Deny) {
5442 return -EACCES;
5443 }
5444
5445 rgw::IAM::Effect e = Effect::Pass;
5446 if (s->iam_policy) {
5447 e = s->iam_policy->eval(s->env, *s->auth.identity,
5448 rgw::IAM::s3PutObject,
5449 rgw_obj(s->bucket, s->object));
5450 }
5451 if (e == Effect::Allow) {
5452 return 0;
5453 } else if (e == Effect::Deny) {
5454 return -EACCES;
5455 } else if (usr_policy_res == Effect::Allow) {
5456 return 0;
5457 }
5458 }
5459
5460 if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
5461 return -EACCES;
5462 }
5463
5464 return 0;
5465 }
5466
5467 void RGWInitMultipart::pre_exec()
5468 {
5469 rgw_bucket_object_pre_exec(s);
5470 }
5471
5472 void RGWInitMultipart::execute()
5473 {
5474 bufferlist aclbl;
5475 map<string, bufferlist> attrs;
5476 rgw_obj obj;
5477
5478 if (get_params() < 0)
5479 return;
5480
5481 if (s->object.empty())
5482 return;
5483
5484 policy.encode(aclbl);
5485 attrs[RGW_ATTR_ACL] = aclbl;
5486
5487 populate_with_generic_attrs(s, attrs);
5488
5489 /* select encryption mode */
5490 op_ret = prepare_encryption(attrs);
5491 if (op_ret != 0)
5492 return;
5493
5494 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
5495 if (op_ret < 0) {
5496 return;
5497 }
5498
5499 do {
5500 char buf[33];
5501 gen_rand_alphanumeric(s->cct, buf, sizeof(buf) - 1);
5502 upload_id = MULTIPART_UPLOAD_ID_PREFIX; /* v2 upload id */
5503 upload_id.append(buf);
5504
5505 string tmp_obj_name;
5506 RGWMPObj mp(s->object.name, upload_id);
5507 tmp_obj_name = mp.get_meta();
5508
5509 obj.init_ns(s->bucket, tmp_obj_name, mp_ns);
5510 // the meta object will be indexed with 0 size, we c
5511 obj.set_in_extra_data(true);
5512 obj.index_hash_source = s->object.name;
5513
5514 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
5515 op_target.set_versioning_disabled(true); /* no versioning for multipart meta */
5516
5517 RGWRados::Object::Write obj_op(&op_target);
5518
5519 obj_op.meta.owner = s->owner.get_id();
5520 obj_op.meta.category = RGWObjCategory::MultiMeta;
5521 obj_op.meta.flags = PUT_OBJ_CREATE_EXCL;
5522
5523 multipart_upload_info upload_info;
5524 upload_info.dest_placement = s->dest_placement;
5525
5526 bufferlist bl;
5527 encode(upload_info, bl);
5528 obj_op.meta.data = &bl;
5529
5530 op_ret = obj_op.write_meta(bl.length(), 0, attrs);
5531 } while (op_ret == -EEXIST);
5532 }
5533
5534 int RGWCompleteMultipart::verify_permission()
5535 {
5536 if (s->iam_policy || ! s->iam_user_policies.empty()) {
5537 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
5538 boost::none,
5539 rgw::IAM::s3PutObject,
5540 rgw_obj(s->bucket, s->object));
5541 if (usr_policy_res == Effect::Deny) {
5542 return -EACCES;
5543 }
5544
5545 rgw::IAM::Effect e = Effect::Pass;
5546 if (s->iam_policy) {
5547 e = s->iam_policy->eval(s->env, *s->auth.identity,
5548 rgw::IAM::s3PutObject,
5549 rgw_obj(s->bucket, s->object));
5550 }
5551 if (e == Effect::Allow) {
5552 return 0;
5553 } else if (e == Effect::Deny) {
5554 return -EACCES;
5555 } else if (usr_policy_res == Effect::Allow) {
5556 return 0;
5557 }
5558 }
5559
5560 if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
5561 return -EACCES;
5562 }
5563
5564 return 0;
5565 }
5566
5567 void RGWCompleteMultipart::pre_exec()
5568 {
5569 rgw_bucket_object_pre_exec(s);
5570 }
5571
5572 void RGWCompleteMultipart::execute()
5573 {
5574 RGWMultiCompleteUpload *parts;
5575 map<int, string>::iterator iter;
5576 RGWMultiXMLParser parser;
5577 string meta_oid;
5578 map<uint32_t, RGWUploadPartInfo> obj_parts;
5579 map<uint32_t, RGWUploadPartInfo>::iterator obj_iter;
5580 map<string, bufferlist> attrs;
5581 off_t ofs = 0;
5582 MD5 hash;
5583 char final_etag[CEPH_CRYPTO_MD5_DIGESTSIZE];
5584 char final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 16];
5585 bufferlist etag_bl;
5586 rgw_obj meta_obj;
5587 rgw_obj target_obj;
5588 RGWMPObj mp;
5589 RGWObjManifest manifest;
5590 uint64_t olh_epoch = 0;
5591
5592 op_ret = get_params();
5593 if (op_ret < 0)
5594 return;
5595 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
5596 if (op_ret < 0) {
5597 return;
5598 }
5599
5600 if (!data.length()) {
5601 op_ret = -ERR_MALFORMED_XML;
5602 return;
5603 }
5604
5605 if (!parser.init()) {
5606 op_ret = -EIO;
5607 return;
5608 }
5609
5610 if (!parser.parse(data.c_str(), data.length(), 1)) {
5611 op_ret = -ERR_MALFORMED_XML;
5612 return;
5613 }
5614
5615 parts = static_cast<RGWMultiCompleteUpload *>(parser.find_first("CompleteMultipartUpload"));
5616 if (!parts || parts->parts.empty()) {
5617 op_ret = -ERR_MALFORMED_XML;
5618 return;
5619 }
5620
5621 if ((int)parts->parts.size() >
5622 s->cct->_conf->rgw_multipart_part_upload_limit) {
5623 op_ret = -ERANGE;
5624 return;
5625 }
5626
5627 mp.init(s->object.name, upload_id);
5628 meta_oid = mp.get_meta();
5629
5630 int total_parts = 0;
5631 int handled_parts = 0;
5632 int max_parts = 1000;
5633 int marker = 0;
5634 bool truncated;
5635 RGWCompressionInfo cs_info;
5636 bool compressed = false;
5637 uint64_t accounted_size = 0;
5638
5639 uint64_t min_part_size = s->cct->_conf->rgw_multipart_min_part_size;
5640
5641 list<rgw_obj_index_key> remove_objs; /* objects to be removed from index listing */
5642
5643 bool versioned_object = s->bucket_info.versioning_enabled();
5644
5645 iter = parts->parts.begin();
5646
5647 meta_obj.init_ns(s->bucket, meta_oid, mp_ns);
5648 meta_obj.set_in_extra_data(true);
5649 meta_obj.index_hash_source = s->object.name;
5650
5651 /*take a cls lock on meta_obj to prevent racing completions (or retries)
5652 from deleting the parts*/
5653 rgw_pool meta_pool;
5654 rgw_raw_obj raw_obj;
5655 int max_lock_secs_mp =
5656 s->cct->_conf.get_val<int64_t>("rgw_mp_lock_max_time");
5657 utime_t dur(max_lock_secs_mp, 0);
5658
5659 store->obj_to_raw((s->bucket_info).placement_rule, meta_obj, &raw_obj);
5660 store->get_obj_data_pool((s->bucket_info).placement_rule,
5661 meta_obj,&meta_pool);
5662 store->open_pool_ctx(meta_pool, serializer.ioctx);
5663
5664 op_ret = serializer.try_lock(raw_obj.oid, dur);
5665 if (op_ret < 0) {
5666 ldpp_dout(this, 0) << "failed to acquire lock" << dendl;
5667 op_ret = -ERR_INTERNAL_ERROR;
5668 s->err.message = "This multipart completion is already in progress";
5669 return;
5670 }
5671
5672 op_ret = get_obj_attrs(store, s, meta_obj, attrs);
5673
5674 if (op_ret < 0) {
5675 ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << meta_obj
5676 << " ret=" << op_ret << dendl;
5677 return;
5678 }
5679
5680 do {
5681 op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts,
5682 marker, obj_parts, &marker, &truncated);
5683 if (op_ret == -ENOENT) {
5684 op_ret = -ERR_NO_SUCH_UPLOAD;
5685 }
5686 if (op_ret < 0)
5687 return;
5688
5689 total_parts += obj_parts.size();
5690 if (!truncated && total_parts != (int)parts->parts.size()) {
5691 ldpp_dout(this, 0) << "NOTICE: total parts mismatch: have: " << total_parts
5692 << " expected: " << parts->parts.size() << dendl;
5693 op_ret = -ERR_INVALID_PART;
5694 return;
5695 }
5696
5697 for (obj_iter = obj_parts.begin(); iter != parts->parts.end() && obj_iter != obj_parts.end(); ++iter, ++obj_iter, ++handled_parts) {
5698 uint64_t part_size = obj_iter->second.accounted_size;
5699 if (handled_parts < (int)parts->parts.size() - 1 &&
5700 part_size < min_part_size) {
5701 op_ret = -ERR_TOO_SMALL;
5702 return;
5703 }
5704
5705 char petag[CEPH_CRYPTO_MD5_DIGESTSIZE];
5706 if (iter->first != (int)obj_iter->first) {
5707 ldpp_dout(this, 0) << "NOTICE: parts num mismatch: next requested: "
5708 << iter->first << " next uploaded: "
5709 << obj_iter->first << dendl;
5710 op_ret = -ERR_INVALID_PART;
5711 return;
5712 }
5713 string part_etag = rgw_string_unquote(iter->second);
5714 if (part_etag.compare(obj_iter->second.etag) != 0) {
5715 ldpp_dout(this, 0) << "NOTICE: etag mismatch: part: " << iter->first
5716 << " etag: " << iter->second << dendl;
5717 op_ret = -ERR_INVALID_PART;
5718 return;
5719 }
5720
5721 hex_to_buf(obj_iter->second.etag.c_str(), petag,
5722 CEPH_CRYPTO_MD5_DIGESTSIZE);
5723 hash.Update((const unsigned char *)petag, sizeof(petag));
5724
5725 RGWUploadPartInfo& obj_part = obj_iter->second;
5726
5727 /* update manifest for part */
5728 string oid = mp.get_part(obj_iter->second.num);
5729 rgw_obj src_obj;
5730 src_obj.init_ns(s->bucket, oid, mp_ns);
5731
5732 if (obj_part.manifest.empty()) {
5733 ldpp_dout(this, 0) << "ERROR: empty manifest for object part: obj="
5734 << src_obj << dendl;
5735 op_ret = -ERR_INVALID_PART;
5736 return;
5737 } else {
5738 manifest.append(obj_part.manifest, store->svc.zone);
5739 }
5740
5741 bool part_compressed = (obj_part.cs_info.compression_type != "none");
5742 if ((obj_iter != obj_parts.begin()) &&
5743 ((part_compressed != compressed) ||
5744 (cs_info.compression_type != obj_part.cs_info.compression_type))) {
5745 ldpp_dout(this, 0) << "ERROR: compression type was changed during multipart upload ("
5746 << cs_info.compression_type << ">>" << obj_part.cs_info.compression_type << ")" << dendl;
5747 op_ret = -ERR_INVALID_PART;
5748 return;
5749 }
5750
5751 if (part_compressed) {
5752 int64_t new_ofs; // offset in compression data for new part
5753 if (cs_info.blocks.size() > 0)
5754 new_ofs = cs_info.blocks.back().new_ofs + cs_info.blocks.back().len;
5755 else
5756 new_ofs = 0;
5757 for (const auto& block : obj_part.cs_info.blocks) {
5758 compression_block cb;
5759 cb.old_ofs = block.old_ofs + cs_info.orig_size;
5760 cb.new_ofs = new_ofs;
5761 cb.len = block.len;
5762 cs_info.blocks.push_back(cb);
5763 new_ofs = cb.new_ofs + cb.len;
5764 }
5765 if (!compressed)
5766 cs_info.compression_type = obj_part.cs_info.compression_type;
5767 cs_info.orig_size += obj_part.cs_info.orig_size;
5768 compressed = true;
5769 }
5770
5771 rgw_obj_index_key remove_key;
5772 src_obj.key.get_index_key(&remove_key);
5773
5774 remove_objs.push_back(remove_key);
5775
5776 ofs += obj_part.size;
5777 accounted_size += obj_part.accounted_size;
5778 }
5779 } while (truncated);
5780 hash.Final((unsigned char *)final_etag);
5781
5782 buf_to_hex((unsigned char *)final_etag, sizeof(final_etag), final_etag_str);
5783 snprintf(&final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2], sizeof(final_etag_str) - CEPH_CRYPTO_MD5_DIGESTSIZE * 2,
5784 "-%lld", (long long)parts->parts.size());
5785 etag = final_etag_str;
5786 ldpp_dout(this, 10) << "calculated etag: " << final_etag_str << dendl;
5787
5788 etag_bl.append(final_etag_str, strlen(final_etag_str));
5789
5790 attrs[RGW_ATTR_ETAG] = etag_bl;
5791
5792 if (compressed) {
5793 // write compression attribute to full object
5794 bufferlist tmp;
5795 encode(cs_info, tmp);
5796 attrs[RGW_ATTR_COMPRESSION] = tmp;
5797 }
5798
5799 target_obj.init(s->bucket, s->object.name);
5800 if (versioned_object) {
5801 if (!version_id.empty()) {
5802 target_obj.key.set_instance(version_id);
5803 } else {
5804 store->gen_rand_obj_instance_name(&target_obj);
5805 version_id = target_obj.key.get_instance();
5806 }
5807 }
5808
5809 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
5810
5811 obj_ctx.set_atomic(target_obj);
5812
5813 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), target_obj);
5814 RGWRados::Object::Write obj_op(&op_target);
5815
5816 obj_op.meta.manifest = &manifest;
5817 obj_op.meta.remove_objs = &remove_objs;
5818
5819 obj_op.meta.ptag = &s->req_id; /* use req_id as operation tag */
5820 obj_op.meta.owner = s->owner.get_id();
5821 obj_op.meta.flags = PUT_OBJ_CREATE;
5822 obj_op.meta.modify_tail = true;
5823 obj_op.meta.completeMultipart = true;
5824 obj_op.meta.olh_epoch = olh_epoch;
5825 op_ret = obj_op.write_meta(ofs, accounted_size, attrs);
5826 if (op_ret < 0)
5827 return;
5828
5829 // remove the upload obj
5830 int r = store->delete_obj(*static_cast<RGWObjectCtx *>(s->obj_ctx),
5831 s->bucket_info, meta_obj, 0);
5832 if (r >= 0) {
5833 /* serializer's exclusive lock is released */
5834 serializer.clear_locked();
5835 } else {
5836 ldpp_dout(this, 0) << "WARNING: failed to remove object " << meta_obj << dendl;
5837 }
5838 }
5839
5840 int RGWCompleteMultipart::MPSerializer::try_lock(
5841 const std::string& _oid,
5842 utime_t dur)
5843 {
5844 oid = _oid;
5845 op.assert_exists();
5846 lock.set_duration(dur);
5847 lock.lock_exclusive(&op);
5848 int ret = ioctx.operate(oid, &op);
5849 if (! ret) {
5850 locked = true;
5851 }
5852 return ret;
5853 }
5854
5855 void RGWCompleteMultipart::complete()
5856 {
5857 /* release exclusive lock iff not already */
5858 if (unlikely(serializer.locked)) {
5859 int r = serializer.unlock();
5860 if (r < 0) {
5861 ldpp_dout(this, 0) << "WARNING: failed to unlock " << serializer.oid << dendl;
5862 }
5863 }
5864 send_response();
5865 }
5866
5867 int RGWAbortMultipart::verify_permission()
5868 {
5869 if (s->iam_policy || ! s->iam_user_policies.empty()) {
5870 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
5871 boost::none,
5872 rgw::IAM::s3AbortMultipartUpload,
5873 rgw_obj(s->bucket, s->object));
5874 if (usr_policy_res == Effect::Deny) {
5875 return -EACCES;
5876 }
5877
5878 rgw::IAM::Effect e = Effect::Pass;
5879 if (s->iam_policy) {
5880 e = s->iam_policy->eval(s->env, *s->auth.identity,
5881 rgw::IAM::s3AbortMultipartUpload,
5882 rgw_obj(s->bucket, s->object));
5883 }
5884 if (e == Effect::Allow) {
5885 return 0;
5886 } else if (e == Effect::Deny) {
5887 return -EACCES;
5888 } else if (usr_policy_res == Effect::Allow)
5889 return 0;
5890 }
5891
5892 if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
5893 return -EACCES;
5894 }
5895
5896 return 0;
5897 }
5898
5899 void RGWAbortMultipart::pre_exec()
5900 {
5901 rgw_bucket_object_pre_exec(s);
5902 }
5903
5904 void RGWAbortMultipart::execute()
5905 {
5906 op_ret = -EINVAL;
5907 string upload_id;
5908 string meta_oid;
5909 upload_id = s->info.args.get("uploadId");
5910 rgw_obj meta_obj;
5911 RGWMPObj mp;
5912
5913 if (upload_id.empty() || s->object.empty())
5914 return;
5915
5916 mp.init(s->object.name, upload_id);
5917 meta_oid = mp.get_meta();
5918
5919 op_ret = get_multipart_info(store, s, meta_oid, nullptr, nullptr, nullptr);
5920 if (op_ret < 0)
5921 return;
5922
5923 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
5924 op_ret = abort_multipart_upload(store, s->cct, obj_ctx, s->bucket_info, mp);
5925 }
5926
5927 int RGWListMultipart::verify_permission()
5928 {
5929 if (!verify_object_permission(this, s, rgw::IAM::s3ListMultipartUploadParts))
5930 return -EACCES;
5931
5932 return 0;
5933 }
5934
5935 void RGWListMultipart::pre_exec()
5936 {
5937 rgw_bucket_object_pre_exec(s);
5938 }
5939
5940 void RGWListMultipart::execute()
5941 {
5942 string meta_oid;
5943 RGWMPObj mp;
5944
5945 op_ret = get_params();
5946 if (op_ret < 0)
5947 return;
5948
5949 mp.init(s->object.name, upload_id);
5950 meta_oid = mp.get_meta();
5951
5952 op_ret = get_multipart_info(store, s, meta_oid, &policy, nullptr, nullptr);
5953 if (op_ret < 0)
5954 return;
5955
5956 op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts,
5957 marker, parts, NULL, &truncated);
5958 }
5959
5960 int RGWListBucketMultiparts::verify_permission()
5961 {
5962 if (!verify_bucket_permission(this,
5963 s,
5964 rgw::IAM::s3ListBucketMultipartUploads))
5965 return -EACCES;
5966
5967 return 0;
5968 }
5969
5970 void RGWListBucketMultiparts::pre_exec()
5971 {
5972 rgw_bucket_object_pre_exec(s);
5973 }
5974
5975 void RGWListBucketMultiparts::execute()
5976 {
5977 vector<rgw_bucket_dir_entry> objs;
5978 string marker_meta;
5979
5980 op_ret = get_params();
5981 if (op_ret < 0)
5982 return;
5983
5984 if (s->prot_flags & RGW_REST_SWIFT) {
5985 string path_args;
5986 path_args = s->info.args.get("path");
5987 if (!path_args.empty()) {
5988 if (!delimiter.empty() || !prefix.empty()) {
5989 op_ret = -EINVAL;
5990 return;
5991 }
5992 prefix = path_args;
5993 delimiter="/";
5994 }
5995 }
5996 marker_meta = marker.get_meta();
5997
5998 op_ret = list_bucket_multiparts(store, s->bucket_info, prefix, marker_meta, delimiter,
5999 max_uploads, &objs, &common_prefixes, &is_truncated);
6000 if (op_ret < 0) {
6001 return;
6002 }
6003
6004 if (!objs.empty()) {
6005 vector<rgw_bucket_dir_entry>::iterator iter;
6006 RGWMultipartUploadEntry entry;
6007 for (iter = objs.begin(); iter != objs.end(); ++iter) {
6008 rgw_obj_key key(iter->key);
6009 if (!entry.mp.from_meta(key.name))
6010 continue;
6011 entry.obj = *iter;
6012 uploads.push_back(entry);
6013 }
6014 next_marker = entry;
6015 }
6016 }
6017
6018 void RGWGetHealthCheck::execute()
6019 {
6020 if (!g_conf()->rgw_healthcheck_disabling_path.empty() &&
6021 (::access(g_conf()->rgw_healthcheck_disabling_path.c_str(), F_OK) == 0)) {
6022 /* Disabling path specified & existent in the filesystem. */
6023 op_ret = -ERR_SERVICE_UNAVAILABLE; /* 503 */
6024 } else {
6025 op_ret = 0; /* 200 OK */
6026 }
6027 }
6028
6029 int RGWDeleteMultiObj::verify_permission()
6030 {
6031 if (s->iam_policy || ! s->iam_user_policies.empty()) {
6032 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
6033 boost::none,
6034 s->object.instance.empty() ?
6035 rgw::IAM::s3DeleteObject :
6036 rgw::IAM::s3DeleteObjectVersion,
6037 ARN(s->bucket));
6038 if (usr_policy_res == Effect::Deny) {
6039 return -EACCES;
6040 }
6041
6042 rgw::IAM::Effect r = Effect::Pass;
6043 if (s->iam_policy) {
6044 r = s->iam_policy->eval(s->env, *s->auth.identity,
6045 s->object.instance.empty() ?
6046 rgw::IAM::s3DeleteObject :
6047 rgw::IAM::s3DeleteObjectVersion,
6048 ARN(s->bucket));
6049 }
6050 if (r == Effect::Allow)
6051 return 0;
6052 else if (r == Effect::Deny)
6053 return -EACCES;
6054 else if (usr_policy_res == Effect::Allow)
6055 return 0;
6056 }
6057
6058 acl_allowed = verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE);
6059 if (!acl_allowed)
6060 return -EACCES;
6061
6062 return 0;
6063 }
6064
6065 void RGWDeleteMultiObj::pre_exec()
6066 {
6067 rgw_bucket_object_pre_exec(s);
6068 }
6069
6070 void RGWDeleteMultiObj::execute()
6071 {
6072 RGWMultiDelDelete *multi_delete;
6073 vector<rgw_obj_key>::iterator iter;
6074 RGWMultiDelXMLParser parser;
6075 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
6076 char* buf;
6077
6078 op_ret = get_params();
6079 if (op_ret < 0) {
6080 goto error;
6081 }
6082
6083 buf = data.c_str();
6084 if (!buf) {
6085 op_ret = -EINVAL;
6086 goto error;
6087 }
6088
6089 if (!parser.init()) {
6090 op_ret = -EINVAL;
6091 goto error;
6092 }
6093
6094 if (!parser.parse(buf, data.length(), 1)) {
6095 op_ret = -EINVAL;
6096 goto error;
6097 }
6098
6099 multi_delete = static_cast<RGWMultiDelDelete *>(parser.find_first("Delete"));
6100 if (!multi_delete) {
6101 op_ret = -EINVAL;
6102 goto error;
6103 } else {
6104 #define DELETE_MULTI_OBJ_MAX_NUM 1000
6105 int max_num = s->cct->_conf->rgw_delete_multi_obj_max_num;
6106 if (max_num < 0) {
6107 max_num = DELETE_MULTI_OBJ_MAX_NUM;
6108 }
6109 int multi_delete_object_num = multi_delete->objects.size();
6110 if (multi_delete_object_num > max_num) {
6111 op_ret = -ERR_MALFORMED_XML;
6112 goto error;
6113 }
6114 }
6115
6116 if (multi_delete->is_quiet())
6117 quiet = true;
6118
6119 if (s->bucket_info.mfa_enabled()) {
6120 bool has_versioned = false;
6121 for (auto i : multi_delete->objects) {
6122 if (!i.instance.empty()) {
6123 has_versioned = true;
6124 break;
6125 }
6126 }
6127 if (has_versioned && !s->mfa_verified) {
6128 ldpp_dout(this, 5) << "NOTICE: multi-object delete request with a versioned object, mfa auth not provided" << dendl;
6129 op_ret = -ERR_MFA_REQUIRED;
6130 goto error;
6131 }
6132 }
6133
6134 begin_response();
6135 if (multi_delete->objects.empty()) {
6136 goto done;
6137 }
6138
6139 for (iter = multi_delete->objects.begin();
6140 iter != multi_delete->objects.end();
6141 ++iter) {
6142 rgw_obj obj(bucket, *iter);
6143 if (s->iam_policy || ! s->iam_user_policies.empty()) {
6144 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
6145 boost::none,
6146 iter->instance.empty() ?
6147 rgw::IAM::s3DeleteObject :
6148 rgw::IAM::s3DeleteObjectVersion,
6149 ARN(obj));
6150 if (usr_policy_res == Effect::Deny) {
6151 send_partial_response(*iter, false, "", -EACCES);
6152 continue;
6153 }
6154
6155 rgw::IAM::Effect e = Effect::Pass;
6156 if (s->iam_policy) {
6157 e = s->iam_policy->eval(s->env,
6158 *s->auth.identity,
6159 iter->instance.empty() ?
6160 rgw::IAM::s3DeleteObject :
6161 rgw::IAM::s3DeleteObjectVersion,
6162 ARN(obj));
6163 }
6164 if ((e == Effect::Deny) ||
6165 (usr_policy_res == Effect::Pass && e == Effect::Pass && !acl_allowed)) {
6166 send_partial_response(*iter, false, "", -EACCES);
6167 continue;
6168 }
6169 }
6170
6171 obj_ctx->set_atomic(obj);
6172
6173 RGWRados::Object del_target(store, s->bucket_info, *obj_ctx, obj);
6174 RGWRados::Object::Delete del_op(&del_target);
6175
6176 del_op.params.bucket_owner = s->bucket_owner.get_id();
6177 del_op.params.versioning_status = s->bucket_info.versioning_status();
6178 del_op.params.obj_owner = s->owner;
6179
6180 op_ret = del_op.delete_obj();
6181 if (op_ret == -ENOENT) {
6182 op_ret = 0;
6183 }
6184
6185 send_partial_response(*iter, del_op.result.delete_marker,
6186 del_op.result.version_id, op_ret);
6187 }
6188
6189 /* set the return code to zero, errors at this point will be
6190 dumped to the response */
6191 op_ret = 0;
6192
6193 done:
6194 // will likely segfault if begin_response() has not been called
6195 end_response();
6196 return;
6197
6198 error:
6199 send_status();
6200 return;
6201
6202 }
6203
6204 bool RGWBulkDelete::Deleter::verify_permission(RGWBucketInfo& binfo,
6205 map<string, bufferlist>& battrs,
6206 ACLOwner& bucket_owner /* out */)
6207 {
6208 RGWAccessControlPolicy bacl(store->ctx());
6209 int ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket);
6210 if (ret < 0) {
6211 return false;
6212 }
6213
6214 auto policy = get_iam_policy_from_attr(s->cct, store, battrs, binfo.bucket.tenant);
6215
6216 bucket_owner = bacl.get_owner();
6217
6218 /* We can use global user_acl because each BulkDelete request is allowed
6219 * to work on entities from a single account only. */
6220 return verify_bucket_permission(dpp, s, binfo.bucket, s->user_acl.get(),
6221 &bacl, policy, s->iam_user_policies, rgw::IAM::s3DeleteBucket);
6222 }
6223
6224 bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path)
6225 {
6226 auto& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
6227
6228 RGWBucketInfo binfo;
6229 map<string, bufferlist> battrs;
6230 ACLOwner bowner;
6231
6232 int ret = store->get_bucket_info(*s->sysobj_ctx, s->user->user_id.tenant,
6233 path.bucket_name, binfo, nullptr,
6234 &battrs);
6235 if (ret < 0) {
6236 goto binfo_fail;
6237 }
6238
6239 if (!verify_permission(binfo, battrs, bowner)) {
6240 ret = -EACCES;
6241 goto auth_fail;
6242 }
6243
6244 if (!path.obj_key.empty()) {
6245 rgw_obj obj(binfo.bucket, path.obj_key);
6246 obj_ctx.set_atomic(obj);
6247
6248 RGWRados::Object del_target(store, binfo, obj_ctx, obj);
6249 RGWRados::Object::Delete del_op(&del_target);
6250
6251 del_op.params.bucket_owner = binfo.owner;
6252 del_op.params.versioning_status = binfo.versioning_status();
6253 del_op.params.obj_owner = bowner;
6254
6255 ret = del_op.delete_obj();
6256 if (ret < 0) {
6257 goto delop_fail;
6258 }
6259 } else {
6260 RGWObjVersionTracker ot;
6261 ot.read_version = binfo.ep_objv;
6262
6263 ret = store->delete_bucket(binfo, ot);
6264 if (0 == ret) {
6265 ret = rgw_unlink_bucket(store, binfo.owner, binfo.bucket.tenant,
6266 binfo.bucket.name, false);
6267 if (ret < 0) {
6268 ldpp_dout(s, 0) << "WARNING: failed to unlink bucket: ret=" << ret << dendl;
6269 }
6270 }
6271 if (ret < 0) {
6272 goto delop_fail;
6273 }
6274
6275 if (!store->svc.zone->is_meta_master()) {
6276 bufferlist in_data;
6277 ret = forward_request_to_master(s, &ot.read_version, store, in_data,
6278 nullptr);
6279 if (ret < 0) {
6280 if (ret == -ENOENT) {
6281 /* adjust error, we want to return with NoSuchBucket and not
6282 * NoSuchKey */
6283 ret = -ERR_NO_SUCH_BUCKET;
6284 }
6285 goto delop_fail;
6286 }
6287 }
6288 }
6289
6290 num_deleted++;
6291 return true;
6292
6293
6294 binfo_fail:
6295 if (-ENOENT == ret) {
6296 ldpp_dout(s, 20) << "cannot find bucket = " << path.bucket_name << dendl;
6297 num_unfound++;
6298 } else {
6299 ldpp_dout(s, 20) << "cannot get bucket info, ret = " << ret << dendl;
6300
6301 fail_desc_t failed_item = {
6302 .err = ret,
6303 .path = path
6304 };
6305 failures.push_back(failed_item);
6306 }
6307 return false;
6308
6309 auth_fail:
6310 ldpp_dout(s, 20) << "wrong auth for " << path << dendl;
6311 {
6312 fail_desc_t failed_item = {
6313 .err = ret,
6314 .path = path
6315 };
6316 failures.push_back(failed_item);
6317 }
6318 return false;
6319
6320 delop_fail:
6321 if (-ENOENT == ret) {
6322 ldpp_dout(s, 20) << "cannot find entry " << path << dendl;
6323 num_unfound++;
6324 } else {
6325 fail_desc_t failed_item = {
6326 .err = ret,
6327 .path = path
6328 };
6329 failures.push_back(failed_item);
6330 }
6331 return false;
6332 }
6333
6334 bool RGWBulkDelete::Deleter::delete_chunk(const std::list<acct_path_t>& paths)
6335 {
6336 ldpp_dout(s, 20) << "in delete_chunk" << dendl;
6337 for (auto path : paths) {
6338 ldpp_dout(s, 20) << "bulk deleting path: " << path << dendl;
6339 delete_single(path);
6340 }
6341
6342 return true;
6343 }
6344
6345 int RGWBulkDelete::verify_permission()
6346 {
6347 return 0;
6348 }
6349
6350 void RGWBulkDelete::pre_exec()
6351 {
6352 rgw_bucket_object_pre_exec(s);
6353 }
6354
6355 void RGWBulkDelete::execute()
6356 {
6357 deleter = std::unique_ptr<Deleter>(new Deleter(this, store, s));
6358
6359 bool is_truncated = false;
6360 do {
6361 list<RGWBulkDelete::acct_path_t> items;
6362
6363 int ret = get_data(items, &is_truncated);
6364 if (ret < 0) {
6365 return;
6366 }
6367
6368 ret = deleter->delete_chunk(items);
6369 } while (!op_ret && is_truncated);
6370
6371 return;
6372 }
6373
6374
6375 constexpr std::array<int, 2> RGWBulkUploadOp::terminal_errors;
6376
6377 int RGWBulkUploadOp::verify_permission()
6378 {
6379 if (s->auth.identity->is_anonymous()) {
6380 return -EACCES;
6381 }
6382
6383 if (! verify_user_permission_no_policy(this, s, RGW_PERM_WRITE)) {
6384 return -EACCES;
6385 }
6386
6387 if (s->user->user_id.tenant != s->bucket_tenant) {
6388 ldpp_dout(this, 10) << "user cannot create a bucket in a different tenant"
6389 << " (user_id.tenant=" << s->user->user_id.tenant
6390 << " requested=" << s->bucket_tenant << ")" << dendl;
6391 return -EACCES;
6392 }
6393
6394 if (s->user->max_buckets < 0) {
6395 return -EPERM;
6396 }
6397
6398 return 0;
6399 }
6400
6401 void RGWBulkUploadOp::pre_exec()
6402 {
6403 rgw_bucket_object_pre_exec(s);
6404 }
6405
6406 boost::optional<std::pair<std::string, rgw_obj_key>>
6407 RGWBulkUploadOp::parse_path(const boost::string_ref& path)
6408 {
6409 /* We need to skip all slashes at the beginning in order to preserve
6410 * compliance with Swift. */
6411 const size_t start_pos = path.find_first_not_of('/');
6412
6413 if (boost::string_ref::npos != start_pos) {
6414 /* Seperator is the first slash after the leading ones. */
6415 const size_t sep_pos = path.substr(start_pos).find('/');
6416
6417 if (boost::string_ref::npos != sep_pos) {
6418 const auto bucket_name = path.substr(start_pos, sep_pos - start_pos);
6419 const auto obj_name = path.substr(sep_pos + 1);
6420
6421 return std::make_pair(bucket_name.to_string(),
6422 rgw_obj_key(obj_name.to_string()));
6423 } else {
6424 /* It's guaranteed here that bucket name is at least one character
6425 * long and is different than slash. */
6426 return std::make_pair(path.substr(start_pos).to_string(),
6427 rgw_obj_key());
6428 }
6429 }
6430
6431 return none;
6432 }
6433
6434 std::pair<std::string, std::string>
6435 RGWBulkUploadOp::handle_upload_path(struct req_state *s)
6436 {
6437 std::string bucket_path, file_prefix;
6438 if (! s->init_state.url_bucket.empty()) {
6439 file_prefix = bucket_path = s->init_state.url_bucket + "/";
6440 if (! s->object.empty()) {
6441 std::string& object_name = s->object.name;
6442
6443 /* As rgw_obj_key::empty() already verified emptiness of s->object.name,
6444 * we can safely examine its last element. */
6445 if (object_name.back() == '/') {
6446 file_prefix.append(object_name);
6447 } else {
6448 file_prefix.append(object_name).append("/");
6449 }
6450 }
6451 }
6452 return std::make_pair(bucket_path, file_prefix);
6453 }
6454
6455 int RGWBulkUploadOp::handle_dir_verify_permission()
6456 {
6457 if (s->user->max_buckets > 0) {
6458 RGWUserBuckets buckets;
6459 std::string marker;
6460 bool is_truncated = false;
6461 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
6462 marker, std::string(), s->user->max_buckets,
6463 false, &is_truncated);
6464 if (op_ret < 0) {
6465 return op_ret;
6466 }
6467
6468 if (buckets.count() >= static_cast<size_t>(s->user->max_buckets)) {
6469 return -ERR_TOO_MANY_BUCKETS;
6470 }
6471 }
6472
6473 return 0;
6474 }
6475
6476 static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name)
6477 {
6478 /* the request of container or object level will contain bucket name.
6479 * only at account level need to append the bucket name */
6480 if (info.script_uri.find(bucket_name) != std::string::npos) {
6481 return;
6482 }
6483
6484 ldout(cct, 20) << "append the bucket: "<< bucket_name << " to req_info" << dendl;
6485 info.script_uri.append("/").append(bucket_name);
6486 info.request_uri_aws4 = info.request_uri = info.script_uri;
6487 info.effective_uri = "/" + bucket_name;
6488 }
6489
6490 void RGWBulkUploadOp::init(RGWRados* const store,
6491 struct req_state* const s,
6492 RGWHandler* const h)
6493 {
6494 RGWOp::init(store, s, h);
6495 dir_ctx.emplace(store->svc.sysobj->init_obj_ctx());
6496 }
6497
6498 int RGWBulkUploadOp::handle_dir(const boost::string_ref path)
6499 {
6500 ldpp_dout(this, 20) << "got directory=" << path << dendl;
6501
6502 op_ret = handle_dir_verify_permission();
6503 if (op_ret < 0) {
6504 return op_ret;
6505 }
6506
6507 std::string bucket_name;
6508 rgw_obj_key object_junk;
6509 std::tie(bucket_name, object_junk) = *parse_path(path);
6510
6511 rgw_raw_obj obj(store->svc.zone->get_zone_params().domain_root,
6512 rgw_make_bucket_entry_name(s->bucket_tenant, bucket_name));
6513
6514 /* we need to make sure we read bucket info, it's not read before for this
6515 * specific request */
6516 RGWBucketInfo binfo;
6517 std::map<std::string, ceph::bufferlist> battrs;
6518 op_ret = store->get_bucket_info(*dir_ctx, s->bucket_tenant, bucket_name,
6519 binfo, nullptr, &battrs);
6520 if (op_ret < 0 && op_ret != -ENOENT) {
6521 return op_ret;
6522 }
6523 const bool bucket_exists = (op_ret != -ENOENT);
6524
6525 if (bucket_exists) {
6526 RGWAccessControlPolicy old_policy(s->cct);
6527 int r = rgw_op_get_bucket_policy_from_attr(s->cct, store, binfo,
6528 battrs, &old_policy);
6529 if (r >= 0) {
6530 if (old_policy.get_owner().get_id().compare(s->user->user_id) != 0) {
6531 op_ret = -EEXIST;
6532 return op_ret;
6533 }
6534 }
6535 }
6536
6537 RGWBucketInfo master_info;
6538 rgw_bucket *pmaster_bucket = nullptr;
6539 uint32_t *pmaster_num_shards = nullptr;
6540 real_time creation_time;
6541 obj_version objv, ep_objv, *pobjv = nullptr;
6542
6543 if (! store->svc.zone->is_meta_master()) {
6544 JSONParser jp;
6545 ceph::bufferlist in_data;
6546 req_info info = s->info;
6547 forward_req_info(s->cct, info, bucket_name);
6548 op_ret = forward_request_to_master(s, nullptr, store, in_data, &jp, &info);
6549 if (op_ret < 0) {
6550 return op_ret;
6551 }
6552
6553 JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp);
6554 JSONDecoder::decode_json("object_ver", objv, &jp);
6555 JSONDecoder::decode_json("bucket_info", master_info, &jp);
6556
6557 ldpp_dout(this, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" << objv.ver << dendl;
6558 ldpp_dout(this, 20) << "got creation_time="<< master_info.creation_time << dendl;
6559
6560 pmaster_bucket= &master_info.bucket;
6561 creation_time = master_info.creation_time;
6562 pmaster_num_shards = &master_info.num_shards;
6563 pobjv = &objv;
6564 } else {
6565 pmaster_bucket = nullptr;
6566 pmaster_num_shards = nullptr;
6567 }
6568
6569 rgw_placement_rule placement_rule(binfo.placement_rule, s->info.storage_class);
6570
6571 if (bucket_exists) {
6572 rgw_placement_rule selected_placement_rule;
6573 rgw_bucket bucket;
6574 bucket.tenant = s->bucket_tenant;
6575 bucket.name = s->bucket_name;
6576 op_ret = store->svc.zone->select_bucket_placement(*(s->user),
6577 store->svc.zone->get_zonegroup().get_id(),
6578 placement_rule,
6579 &selected_placement_rule,
6580 nullptr);
6581 if (selected_placement_rule != binfo.placement_rule) {
6582 op_ret = -EEXIST;
6583 ldpp_dout(this, 20) << "non-coherent placement rule" << dendl;
6584 return op_ret;
6585 }
6586 }
6587
6588 /* Create metadata: ACLs. */
6589 std::map<std::string, ceph::bufferlist> attrs;
6590 RGWAccessControlPolicy policy;
6591 policy.create_default(s->user->user_id, s->user->display_name);
6592 ceph::bufferlist aclbl;
6593 policy.encode(aclbl);
6594 attrs.emplace(RGW_ATTR_ACL, std::move(aclbl));
6595
6596 RGWQuotaInfo quota_info;
6597 const RGWQuotaInfo * pquota_info = nullptr;
6598
6599 rgw_bucket bucket;
6600 bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */
6601 bucket.name = bucket_name;
6602
6603
6604 RGWBucketInfo out_info;
6605 op_ret = store->create_bucket(*(s->user),
6606 bucket,
6607 store->svc.zone->get_zonegroup().get_id(),
6608 placement_rule, binfo.swift_ver_location,
6609 pquota_info, attrs,
6610 out_info, pobjv, &ep_objv, creation_time,
6611 pmaster_bucket, pmaster_num_shards, true);
6612 /* continue if EEXIST and create_bucket will fail below. this way we can
6613 * recover from a partial create by retrying it. */
6614 ldpp_dout(this, 20) << "rgw_create_bucket returned ret=" << op_ret
6615 << ", bucket=" << bucket << dendl;
6616
6617 if (op_ret && op_ret != -EEXIST) {
6618 return op_ret;
6619 }
6620
6621 const bool existed = (op_ret == -EEXIST);
6622 if (existed) {
6623 /* bucket already existed, might have raced with another bucket creation, or
6624 * might be partial bucket creation that never completed. Read existing bucket
6625 * info, verify that the reported bucket owner is the current user.
6626 * If all is ok then update the user's list of buckets.
6627 * Otherwise inform client about a name conflict.
6628 */
6629 if (out_info.owner.compare(s->user->user_id) != 0) {
6630 op_ret = -EEXIST;
6631 ldpp_dout(this, 20) << "conflicting bucket name" << dendl;
6632 return op_ret;
6633 }
6634 bucket = out_info.bucket;
6635 }
6636
6637 op_ret = rgw_link_bucket(store, s->user->user_id, bucket,
6638 out_info.creation_time, false);
6639 if (op_ret && !existed && op_ret != -EEXIST) {
6640 /* if it exists (or previously existed), don't remove it! */
6641 op_ret = rgw_unlink_bucket(store, s->user->user_id,
6642 bucket.tenant, bucket.name);
6643 if (op_ret < 0) {
6644 ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret << dendl;
6645 }
6646 } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) {
6647 ldpp_dout(this, 20) << "containers already exists" << dendl;
6648 op_ret = -ERR_BUCKET_EXISTS;
6649 }
6650
6651 return op_ret;
6652 }
6653
6654
6655 bool RGWBulkUploadOp::handle_file_verify_permission(RGWBucketInfo& binfo,
6656 const rgw_obj& obj,
6657 std::map<std::string, ceph::bufferlist>& battrs,
6658 ACLOwner& bucket_owner /* out */)
6659 {
6660 RGWAccessControlPolicy bacl(store->ctx());
6661 op_ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket);
6662 if (op_ret < 0) {
6663 ldpp_dout(this, 20) << "cannot read_policy() for bucket" << dendl;
6664 return false;
6665 }
6666
6667 auto policy = get_iam_policy_from_attr(s->cct, store, battrs, binfo.bucket.tenant);
6668
6669 bucket_owner = bacl.get_owner();
6670 if (policy || ! s->iam_user_policies.empty()) {
6671 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
6672 boost::none,
6673 rgw::IAM::s3PutObject, obj);
6674 if (usr_policy_res == Effect::Deny) {
6675 return false;
6676 }
6677 auto e = policy->eval(s->env, *s->auth.identity,
6678 rgw::IAM::s3PutObject, obj);
6679 if (e == Effect::Allow) {
6680 return true;
6681 } else if (e == Effect::Deny) {
6682 return false;
6683 } else if (usr_policy_res == Effect::Allow) {
6684 return true;
6685 }
6686 }
6687
6688 return verify_bucket_permission_no_policy(this, s, s->user_acl.get(),
6689 &bacl, RGW_PERM_WRITE);
6690 }
6691
6692 int RGWBulkUploadOp::handle_file(const boost::string_ref path,
6693 const size_t size,
6694 AlignedStreamGetter& body)
6695 {
6696
6697 ldpp_dout(this, 20) << "got file=" << path << ", size=" << size << dendl;
6698
6699 if (size > static_cast<size_t>(s->cct->_conf->rgw_max_put_size)) {
6700 op_ret = -ERR_TOO_LARGE;
6701 return op_ret;
6702 }
6703
6704 std::string bucket_name;
6705 rgw_obj_key object;
6706 std::tie(bucket_name, object) = *parse_path(path);
6707
6708 auto& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
6709 RGWBucketInfo binfo;
6710 std::map<std::string, ceph::bufferlist> battrs;
6711 ACLOwner bowner;
6712 op_ret = store->get_bucket_info(*s->sysobj_ctx, s->user->user_id.tenant,
6713 bucket_name, binfo, nullptr, &battrs);
6714 if (op_ret == -ENOENT) {
6715 ldpp_dout(this, 20) << "non existent directory=" << bucket_name << dendl;
6716 } else if (op_ret < 0) {
6717 return op_ret;
6718 }
6719
6720 if (! handle_file_verify_permission(binfo,
6721 rgw_obj(binfo.bucket, object),
6722 battrs, bowner)) {
6723 ldpp_dout(this, 20) << "object creation unauthorized" << dendl;
6724 op_ret = -EACCES;
6725 return op_ret;
6726 }
6727
6728 op_ret = store->check_quota(bowner.get_id(), binfo.bucket,
6729 user_quota, bucket_quota, size);
6730 if (op_ret < 0) {
6731 return op_ret;
6732 }
6733
6734 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
6735 if (op_ret < 0) {
6736 return op_ret;
6737 }
6738
6739 rgw_obj obj(binfo.bucket, object);
6740 if (s->bucket_info.versioning_enabled()) {
6741 store->gen_rand_obj_instance_name(&obj);
6742 }
6743
6744 rgw_placement_rule dest_placement = s->dest_placement;
6745 dest_placement.inherit_from(binfo.placement_rule);
6746
6747 rgw::AioThrottle aio(store->ctx()->_conf->rgw_put_obj_min_window_size);
6748
6749 using namespace rgw::putobj;
6750
6751 AtomicObjectProcessor processor(&aio, store, binfo, &s->dest_placement, bowner.get_id(),
6752 obj_ctx, obj, 0, s->req_id);
6753
6754 op_ret = processor.prepare();
6755 if (op_ret < 0) {
6756 ldpp_dout(this, 20) << "cannot prepare processor due to ret=" << op_ret << dendl;
6757 return op_ret;
6758 }
6759
6760 /* No filters by default. */
6761 DataProcessor *filter = &processor;
6762
6763 const auto& compression_type = store->svc.zone->get_zone_params().get_compression_type(
6764 dest_placement);
6765 CompressorRef plugin;
6766 boost::optional<RGWPutObj_Compress> compressor;
6767 if (compression_type != "none") {
6768 plugin = Compressor::create(s->cct, compression_type);
6769 if (! plugin) {
6770 ldpp_dout(this, 1) << "Cannot load plugin for rgw_compression_type "
6771 << compression_type << dendl;
6772 } else {
6773 compressor.emplace(s->cct, plugin, filter);
6774 filter = &*compressor;
6775 }
6776 }
6777
6778 /* Upload file content. */
6779 ssize_t len = 0;
6780 size_t ofs = 0;
6781 MD5 hash;
6782 do {
6783 ceph::bufferlist data;
6784 len = body.get_at_most(s->cct->_conf->rgw_max_chunk_size, data);
6785
6786 ldpp_dout(this, 20) << "body=" << data.c_str() << dendl;
6787 if (len < 0) {
6788 op_ret = len;
6789 return op_ret;
6790 } else if (len > 0) {
6791 hash.Update((const unsigned char *)data.c_str(), data.length());
6792 op_ret = filter->process(std::move(data), ofs);
6793 if (op_ret < 0) {
6794 ldpp_dout(this, 20) << "filter->process() returned ret=" << op_ret << dendl;
6795 return op_ret;
6796 }
6797
6798 ofs += len;
6799 }
6800
6801 } while (len > 0);
6802
6803 // flush
6804 op_ret = filter->process({}, ofs);
6805 if (op_ret < 0) {
6806 return op_ret;
6807 }
6808
6809 if (ofs != size) {
6810 ldpp_dout(this, 10) << "real file size different from declared" << dendl;
6811 op_ret = -EINVAL;
6812 return op_ret;
6813 }
6814
6815 op_ret = store->check_quota(bowner.get_id(), binfo.bucket,
6816 user_quota, bucket_quota, size);
6817 if (op_ret < 0) {
6818 ldpp_dout(this, 20) << "quota exceeded for path=" << path << dendl;
6819 return op_ret;
6820 }
6821
6822 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
6823 if (op_ret < 0) {
6824 return op_ret;
6825 }
6826
6827 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
6828 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
6829 hash.Final(m);
6830 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
6831
6832 /* Create metadata: ETAG. */
6833 std::map<std::string, ceph::bufferlist> attrs;
6834 std::string etag = calc_md5;
6835 ceph::bufferlist etag_bl;
6836 etag_bl.append(etag.c_str(), etag.size() + 1);
6837 attrs.emplace(RGW_ATTR_ETAG, std::move(etag_bl));
6838
6839 /* Create metadata: ACLs. */
6840 RGWAccessControlPolicy policy;
6841 policy.create_default(s->user->user_id, s->user->display_name);
6842 ceph::bufferlist aclbl;
6843 policy.encode(aclbl);
6844 attrs.emplace(RGW_ATTR_ACL, std::move(aclbl));
6845
6846 /* Create metadata: compression info. */
6847 if (compressor && compressor->is_compressed()) {
6848 ceph::bufferlist tmp;
6849 RGWCompressionInfo cs_info;
6850 cs_info.compression_type = plugin->get_type_name();
6851 cs_info.orig_size = s->obj_size;
6852 cs_info.blocks = std::move(compressor->get_compression_blocks());
6853 encode(cs_info, tmp);
6854 attrs.emplace(RGW_ATTR_COMPRESSION, std::move(tmp));
6855 }
6856
6857 /* Complete the transaction. */
6858 op_ret = processor.complete(size, etag, nullptr, ceph::real_time(),
6859 attrs, ceph::real_time() /* delete_at */,
6860 nullptr, nullptr, nullptr, nullptr, nullptr);
6861 if (op_ret < 0) {
6862 ldpp_dout(this, 20) << "processor::complete returned op_ret=" << op_ret << dendl;
6863 }
6864
6865 return op_ret;
6866 }
6867
6868 void RGWBulkUploadOp::execute()
6869 {
6870 ceph::bufferlist buffer(64 * 1024);
6871
6872 ldpp_dout(this, 20) << "start" << dendl;
6873
6874 /* Create an instance of stream-abstracting class. Having this indirection
6875 * allows for easy introduction of decompressors like gzip and bzip2. */
6876 auto stream = create_stream();
6877 if (! stream) {
6878 return;
6879 }
6880
6881 /* Handling the $UPLOAD_PATH accordingly to the Swift's Bulk middleware. See:
6882 * https://github.com/openstack/swift/blob/2.13.0/swift/common/middleware/bulk.py#L31-L41 */
6883 std::string bucket_path, file_prefix;
6884 std::tie(bucket_path, file_prefix) = handle_upload_path(s);
6885
6886 auto status = rgw::tar::StatusIndicator::create();
6887 do {
6888 op_ret = stream->get_exactly(rgw::tar::BLOCK_SIZE, buffer);
6889 if (op_ret < 0) {
6890 ldpp_dout(this, 2) << "cannot read header" << dendl;
6891 return;
6892 }
6893
6894 /* We need to re-interpret the buffer as a TAR block. Exactly two blocks
6895 * must be tracked to detect out end-of-archive. It occurs when both of
6896 * them are empty (zeroed). Tracing this particular inter-block dependency
6897 * is responsibility of the rgw::tar::StatusIndicator class. */
6898 boost::optional<rgw::tar::HeaderView> header;
6899 std::tie(status, header) = rgw::tar::interpret_block(status, buffer);
6900
6901 if (! status.empty() && header) {
6902 /* This specific block isn't empty (entirely zeroed), so we can parse
6903 * it as a TAR header and dispatch. At the moment we do support only
6904 * regular files and directories. Everything else (symlinks, devices)
6905 * will be ignored but won't cease the whole upload. */
6906 switch (header->get_filetype()) {
6907 case rgw::tar::FileType::NORMAL_FILE: {
6908 ldpp_dout(this, 2) << "handling regular file" << dendl;
6909
6910 boost::string_ref filename = bucket_path.empty() ? header->get_filename() : \
6911 file_prefix + header->get_filename().to_string();
6912 auto body = AlignedStreamGetter(0, header->get_filesize(),
6913 rgw::tar::BLOCK_SIZE, *stream);
6914 op_ret = handle_file(filename,
6915 header->get_filesize(),
6916 body);
6917 if (! op_ret) {
6918 /* Only regular files counts. */
6919 num_created++;
6920 } else {
6921 failures.emplace_back(op_ret, filename.to_string());
6922 }
6923 break;
6924 }
6925 case rgw::tar::FileType::DIRECTORY: {
6926 ldpp_dout(this, 2) << "handling regular directory" << dendl;
6927
6928 boost::string_ref dirname = bucket_path.empty() ? header->get_filename() : bucket_path;
6929 op_ret = handle_dir(dirname);
6930 if (op_ret < 0 && op_ret != -ERR_BUCKET_EXISTS) {
6931 failures.emplace_back(op_ret, dirname.to_string());
6932 }
6933 break;
6934 }
6935 default: {
6936 /* Not recognized. Skip. */
6937 op_ret = 0;
6938 break;
6939 }
6940 }
6941
6942 /* In case of any problems with sub-request authorization Swift simply
6943 * terminates whole upload immediately. */
6944 if (boost::algorithm::contains(std::initializer_list<int>{ op_ret },
6945 terminal_errors)) {
6946 ldpp_dout(this, 2) << "terminating due to ret=" << op_ret << dendl;
6947 break;
6948 }
6949 } else {
6950 ldpp_dout(this, 2) << "an empty block" << dendl;
6951 op_ret = 0;
6952 }
6953
6954 buffer.clear();
6955 } while (! status.eof());
6956
6957 return;
6958 }
6959
6960 RGWBulkUploadOp::AlignedStreamGetter::~AlignedStreamGetter()
6961 {
6962 const size_t aligned_legnth = length + (-length % alignment);
6963 ceph::bufferlist junk;
6964
6965 DecoratedStreamGetter::get_exactly(aligned_legnth - position, junk);
6966 }
6967
6968 ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_at_most(const size_t want,
6969 ceph::bufferlist& dst)
6970 {
6971 const size_t max_to_read = std::min(want, length - position);
6972 const auto len = DecoratedStreamGetter::get_at_most(max_to_read, dst);
6973 if (len > 0) {
6974 position += len;
6975 }
6976 return len;
6977 }
6978
6979 ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_exactly(const size_t want,
6980 ceph::bufferlist& dst)
6981 {
6982 const auto len = DecoratedStreamGetter::get_exactly(want, dst);
6983 if (len > 0) {
6984 position += len;
6985 }
6986 return len;
6987 }
6988
6989 int RGWSetAttrs::verify_permission()
6990 {
6991 // This looks to be part of the RGW-NFS machinery and has no S3 or
6992 // Swift equivalent.
6993 bool perm;
6994 if (!s->object.empty()) {
6995 perm = verify_object_permission_no_policy(this, s, RGW_PERM_WRITE);
6996 } else {
6997 perm = verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE);
6998 }
6999 if (!perm)
7000 return -EACCES;
7001
7002 return 0;
7003 }
7004
7005 void RGWSetAttrs::pre_exec()
7006 {
7007 rgw_bucket_object_pre_exec(s);
7008 }
7009
7010 void RGWSetAttrs::execute()
7011 {
7012 op_ret = get_params();
7013 if (op_ret < 0)
7014 return;
7015
7016 rgw_obj obj(s->bucket, s->object);
7017
7018 if (!s->object.empty()) {
7019 store->set_atomic(s->obj_ctx, obj);
7020 op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, nullptr);
7021 } else {
7022 for (auto& iter : attrs) {
7023 s->bucket_attrs[iter.first] = std::move(iter.second);
7024 }
7025 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, s->bucket_attrs,
7026 &s->bucket_info.objv_tracker);
7027 }
7028 }
7029
7030 void RGWGetObjLayout::pre_exec()
7031 {
7032 rgw_bucket_object_pre_exec(s);
7033 }
7034
7035 void RGWGetObjLayout::execute()
7036 {
7037 rgw_obj obj(s->bucket, s->object);
7038 RGWRados::Object target(store,
7039 s->bucket_info,
7040 *static_cast<RGWObjectCtx *>(s->obj_ctx),
7041 rgw_obj(s->bucket, s->object));
7042 RGWRados::Object::Read stat_op(&target);
7043
7044 op_ret = stat_op.prepare();
7045 if (op_ret < 0) {
7046 return;
7047 }
7048
7049 head_obj = stat_op.state.head_obj;
7050
7051 op_ret = target.get_manifest(&manifest);
7052 }
7053
7054
7055 int RGWConfigBucketMetaSearch::verify_permission()
7056 {
7057 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
7058 return -EACCES;
7059 }
7060
7061 return 0;
7062 }
7063
7064 void RGWConfigBucketMetaSearch::pre_exec()
7065 {
7066 rgw_bucket_object_pre_exec(s);
7067 }
7068
7069 void RGWConfigBucketMetaSearch::execute()
7070 {
7071 op_ret = get_params();
7072 if (op_ret < 0) {
7073 ldpp_dout(this, 20) << "NOTICE: get_params() returned ret=" << op_ret << dendl;
7074 return;
7075 }
7076
7077 s->bucket_info.mdsearch_config = mdsearch_config;
7078
7079 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
7080 if (op_ret < 0) {
7081 ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
7082 << " returned err=" << op_ret << dendl;
7083 return;
7084 }
7085 }
7086
7087 int RGWGetBucketMetaSearch::verify_permission()
7088 {
7089 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
7090 return -EACCES;
7091 }
7092
7093 return 0;
7094 }
7095
7096 void RGWGetBucketMetaSearch::pre_exec()
7097 {
7098 rgw_bucket_object_pre_exec(s);
7099 }
7100
7101 int RGWDelBucketMetaSearch::verify_permission()
7102 {
7103 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
7104 return -EACCES;
7105 }
7106
7107 return 0;
7108 }
7109
7110 void RGWDelBucketMetaSearch::pre_exec()
7111 {
7112 rgw_bucket_object_pre_exec(s);
7113 }
7114
7115 void RGWDelBucketMetaSearch::execute()
7116 {
7117 s->bucket_info.mdsearch_config.clear();
7118
7119 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
7120 if (op_ret < 0) {
7121 ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
7122 << " returned err=" << op_ret << dendl;
7123 return;
7124 }
7125 }
7126
7127
7128 RGWHandler::~RGWHandler()
7129 {
7130 }
7131
7132 int RGWHandler::init(RGWRados *_store,
7133 struct req_state *_s,
7134 rgw::io::BasicClient *cio)
7135 {
7136 store = _store;
7137 s = _s;
7138
7139 return 0;
7140 }
7141
7142 int RGWHandler::do_init_permissions()
7143 {
7144 int ret = rgw_build_bucket_policies(store, s);
7145 if (ret < 0) {
7146 ldpp_dout(s, 10) << "init_permissions on " << s->bucket
7147 << " failed, ret=" << ret << dendl;
7148 return ret==-ENODATA ? -EACCES : ret;
7149 }
7150
7151 rgw_build_iam_environment(store, s);
7152 return ret;
7153 }
7154
7155 int RGWHandler::do_read_permissions(RGWOp *op, bool only_bucket)
7156 {
7157 if (only_bucket) {
7158 /* already read bucket info */
7159 return 0;
7160 }
7161 int ret = rgw_build_object_policies(store, s, op->prefetch_data());
7162
7163 if (ret < 0) {
7164 ldpp_dout(op, 10) << "read_permissions on " << s->bucket << ":"
7165 << s->object << " only_bucket=" << only_bucket
7166 << " ret=" << ret << dendl;
7167 if (ret == -ENODATA)
7168 ret = -EACCES;
7169 }
7170
7171 return ret;
7172 }
7173
7174 int RGWOp::error_handler(int err_no, string *error_content) {
7175 return dialect_handler->error_handler(err_no, error_content);
7176 }
7177
7178 int RGWHandler::error_handler(int err_no, string *error_content) {
7179 // This is the do-nothing error handler
7180 return err_no;
7181 }
7182
7183 std::ostream& RGWOp::gen_prefix(std::ostream& out) const
7184 {
7185 // append <dialect>:<op name> to the prefix
7186 return s->gen_prefix(out) << s->dialect << ':' << name() << ' ';
7187 }
7188
7189 void RGWDefaultResponseOp::send_response() {
7190 if (op_ret) {
7191 set_req_state_err(s, op_ret);
7192 }
7193 dump_errno(s);
7194 end_header(s);
7195 }
7196
7197 void RGWPutBucketPolicy::send_response()
7198 {
7199 if (op_ret) {
7200 set_req_state_err(s, op_ret);
7201 }
7202 dump_errno(s);
7203 end_header(s);
7204 }
7205
7206 int RGWPutBucketPolicy::verify_permission()
7207 {
7208 if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketPolicy)) {
7209 return -EACCES;
7210 }
7211
7212 return 0;
7213 }
7214
7215 int RGWPutBucketPolicy::get_params()
7216 {
7217 const auto max_size = s->cct->_conf->rgw_max_put_param_size;
7218 // At some point when I have more time I want to make a version of
7219 // rgw_rest_read_all_input that doesn't use malloc.
7220 std::tie(op_ret, data) = rgw_rest_read_all_input(s, max_size, false);
7221
7222 // And throws exceptions.
7223 return op_ret;
7224 }
7225
7226 void RGWPutBucketPolicy::execute()
7227 {
7228 op_ret = get_params();
7229 if (op_ret < 0) {
7230 return;
7231 }
7232
7233 if (!store->svc.zone->is_meta_master()) {
7234 op_ret = forward_request_to_master(s, NULL, store, data, nullptr);
7235 if (op_ret < 0) {
7236 ldpp_dout(this, 20) << "forward_request_to_master returned ret=" << op_ret << dendl;
7237 return;
7238 }
7239 }
7240
7241 try {
7242 const Policy p(s->cct, s->bucket_tenant, data);
7243 op_ret = retry_raced_bucket_write(store, s, [&p, this] {
7244 auto attrs = s->bucket_attrs;
7245 attrs[RGW_ATTR_IAM_POLICY].clear();
7246 attrs[RGW_ATTR_IAM_POLICY].append(p.text);
7247 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
7248 &s->bucket_info.objv_tracker);
7249 return op_ret;
7250 });
7251 } catch (rgw::IAM::PolicyParseException& e) {
7252 ldpp_dout(this, 20) << "failed to parse policy: " << e.what() << dendl;
7253 op_ret = -EINVAL;
7254 }
7255 }
7256
7257 void RGWGetBucketPolicy::send_response()
7258 {
7259 if (op_ret) {
7260 set_req_state_err(s, op_ret);
7261 }
7262 dump_errno(s);
7263 end_header(s, this, "application/json");
7264 dump_body(s, policy);
7265 }
7266
7267 int RGWGetBucketPolicy::verify_permission()
7268 {
7269 if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketPolicy)) {
7270 return -EACCES;
7271 }
7272
7273 return 0;
7274 }
7275
7276 void RGWGetBucketPolicy::execute()
7277 {
7278 auto attrs = s->bucket_attrs;
7279 map<string, bufferlist>::iterator aiter = attrs.find(RGW_ATTR_IAM_POLICY);
7280 if (aiter == attrs.end()) {
7281 ldpp_dout(this, 0) << "can't find bucket IAM POLICY attr bucket_name = "
7282 << s->bucket_name << dendl;
7283 op_ret = -ERR_NO_SUCH_BUCKET_POLICY;
7284 s->err.message = "The bucket policy does not exist";
7285 return;
7286 } else {
7287 policy = attrs[RGW_ATTR_IAM_POLICY];
7288
7289 if (policy.length() == 0) {
7290 ldpp_dout(this, 10) << "The bucket policy does not exist, bucket: "
7291 << s->bucket_name << dendl;
7292 op_ret = -ERR_NO_SUCH_BUCKET_POLICY;
7293 s->err.message = "The bucket policy does not exist";
7294 return;
7295 }
7296 }
7297 }
7298
7299 void RGWDeleteBucketPolicy::send_response()
7300 {
7301 if (op_ret) {
7302 set_req_state_err(s, op_ret);
7303 }
7304 dump_errno(s);
7305 end_header(s);
7306 }
7307
7308 int RGWDeleteBucketPolicy::verify_permission()
7309 {
7310 if (!verify_bucket_permission(this, s, rgw::IAM::s3DeleteBucketPolicy)) {
7311 return -EACCES;
7312 }
7313
7314 return 0;
7315 }
7316
7317 void RGWDeleteBucketPolicy::execute()
7318 {
7319 op_ret = retry_raced_bucket_write(store, s, [this] {
7320 auto attrs = s->bucket_attrs;
7321 attrs.erase(RGW_ATTR_IAM_POLICY);
7322 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
7323 &s->bucket_info.objv_tracker);
7324 return op_ret;
7325 });
7326 }
7327
7328 void RGWGetClusterStat::execute()
7329 {
7330 op_ret = this->store->get_rados_handle()->cluster_stat(stats_op);
7331 }
7332
7333