]> git.proxmox.com Git - ceph.git/blob - ceph/src/rgw/rgw_op.cc
import 14.2.4 nautilus point release
[ceph.git] / ceph / src / rgw / rgw_op.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include <errno.h>
5 #include <stdlib.h>
6 #include <system_error>
7 #include <unistd.h>
8
9 #include <sstream>
10
11 #include <boost/algorithm/string/predicate.hpp>
12 #include <boost/bind.hpp>
13 #include <boost/optional.hpp>
14 #include <boost/utility/in_place_factory.hpp>
15 #include <boost/utility/string_view.hpp>
16
17 #include "include/scope_guard.h"
18 #include "common/Clock.h"
19 #include "common/armor.h"
20 #include "common/errno.h"
21 #include "common/mime.h"
22 #include "common/utf8.h"
23 #include "common/ceph_json.h"
24 #include "common/static_ptr.h"
25
26 #include "rgw_rados.h"
27 #include "rgw_zone.h"
28 #include "rgw_op.h"
29 #include "rgw_rest.h"
30 #include "rgw_acl.h"
31 #include "rgw_acl_s3.h"
32 #include "rgw_acl_swift.h"
33 #include "rgw_aio_throttle.h"
34 #include "rgw_user.h"
35 #include "rgw_bucket.h"
36 #include "rgw_log.h"
37 #include "rgw_multi.h"
38 #include "rgw_multi_del.h"
39 #include "rgw_cors.h"
40 #include "rgw_cors_s3.h"
41 #include "rgw_rest_conn.h"
42 #include "rgw_rest_s3.h"
43 #include "rgw_tar.h"
44 #include "rgw_client_io.h"
45 #include "rgw_compression.h"
46 #include "rgw_role.h"
47 #include "rgw_tag_s3.h"
48 #include "rgw_putobj_processor.h"
49 #include "rgw_crypt.h"
50 #include "rgw_perf_counters.h"
51
52 #include "services/svc_zone.h"
53 #include "services/svc_quota.h"
54 #include "services/svc_sys_obj.h"
55
56 #include "cls/lock/cls_lock_client.h"
57 #include "cls/rgw/cls_rgw_client.h"
58
59
60 #include "include/ceph_assert.h"
61
62 #include "compressor/Compressor.h"
63
64 #ifdef WITH_LTTNG
65 #define TRACEPOINT_DEFINE
66 #define TRACEPOINT_PROBE_DYNAMIC_LINKAGE
67 #include "tracing/rgw_op.h"
68 #undef TRACEPOINT_PROBE_DYNAMIC_LINKAGE
69 #undef TRACEPOINT_DEFINE
70 #else
71 #define tracepoint(...)
72 #endif
73
74 #define dout_context g_ceph_context
75 #define dout_subsys ceph_subsys_rgw
76
77 using namespace librados;
78 using ceph::crypto::MD5;
79 using boost::optional;
80 using boost::none;
81
82 using rgw::IAM::ARN;
83 using rgw::IAM::Effect;
84 using rgw::IAM::Policy;
85
86 using rgw::IAM::Policy;
87
88 static string mp_ns = RGW_OBJ_NS_MULTIPART;
89 static string shadow_ns = RGW_OBJ_NS_SHADOW;
90
91 static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name);
92 static int forward_request_to_master(struct req_state *s, obj_version *objv, RGWRados *store,
93 bufferlist& in_data, JSONParser *jp, req_info *forward_info = nullptr);
94
95 static MultipartMetaFilter mp_filter;
96
97 // this probably should belong in the rgw_iam_policy_keywords, I'll get it to it
98 // at some point
99 static constexpr auto S3_EXISTING_OBJTAG = "s3:ExistingObjectTag";
100
101 int RGWGetObj::parse_range(void)
102 {
103 int r = -ERANGE;
104 string rs(range_str);
105 string ofs_str;
106 string end_str;
107
108 ignore_invalid_range = s->cct->_conf->rgw_ignore_get_invalid_range;
109 partial_content = false;
110
111 size_t pos = rs.find("bytes=");
112 if (pos == string::npos) {
113 pos = 0;
114 while (isspace(rs[pos]))
115 pos++;
116 int end = pos;
117 while (isalpha(rs[end]))
118 end++;
119 if (strncasecmp(rs.c_str(), "bytes", end - pos) != 0)
120 return 0;
121 while (isspace(rs[end]))
122 end++;
123 if (rs[end] != '=')
124 return 0;
125 rs = rs.substr(end + 1);
126 } else {
127 rs = rs.substr(pos + 6); /* size of("bytes=") */
128 }
129 pos = rs.find('-');
130 if (pos == string::npos)
131 goto done;
132
133 partial_content = true;
134
135 ofs_str = rs.substr(0, pos);
136 end_str = rs.substr(pos + 1);
137 if (end_str.length()) {
138 end = atoll(end_str.c_str());
139 if (end < 0)
140 goto done;
141 }
142
143 if (ofs_str.length()) {
144 ofs = atoll(ofs_str.c_str());
145 } else { // RFC2616 suffix-byte-range-spec
146 ofs = -end;
147 end = -1;
148 }
149
150 if (end >= 0 && end < ofs)
151 goto done;
152
153 range_parsed = true;
154 return 0;
155
156 done:
157 if (ignore_invalid_range) {
158 partial_content = false;
159 ofs = 0;
160 end = -1;
161 range_parsed = false; // allow retry
162 r = 0;
163 }
164
165 return r;
166 }
167
168 static int decode_policy(CephContext *cct,
169 bufferlist& bl,
170 RGWAccessControlPolicy *policy)
171 {
172 auto iter = bl.cbegin();
173 try {
174 policy->decode(iter);
175 } catch (buffer::error& err) {
176 ldout(cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
177 return -EIO;
178 }
179 if (cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
180 ldout(cct, 15) << __func__ << " Read AccessControlPolicy";
181 RGWAccessControlPolicy_S3 *s3policy = static_cast<RGWAccessControlPolicy_S3 *>(policy);
182 s3policy->to_xml(*_dout);
183 *_dout << dendl;
184 }
185 return 0;
186 }
187
188
189 static int get_user_policy_from_attr(CephContext * const cct,
190 RGWRados * const store,
191 map<string, bufferlist>& attrs,
192 RGWAccessControlPolicy& policy /* out */)
193 {
194 auto aiter = attrs.find(RGW_ATTR_ACL);
195 if (aiter != attrs.end()) {
196 int ret = decode_policy(cct, aiter->second, &policy);
197 if (ret < 0) {
198 return ret;
199 }
200 } else {
201 return -ENOENT;
202 }
203
204 return 0;
205 }
206
207 static int get_bucket_instance_policy_from_attr(CephContext *cct,
208 RGWRados *store,
209 RGWBucketInfo& bucket_info,
210 map<string, bufferlist>& bucket_attrs,
211 RGWAccessControlPolicy *policy)
212 {
213 map<string, bufferlist>::iterator aiter = bucket_attrs.find(RGW_ATTR_ACL);
214
215 if (aiter != bucket_attrs.end()) {
216 int ret = decode_policy(cct, aiter->second, policy);
217 if (ret < 0)
218 return ret;
219 } else {
220 ldout(cct, 0) << "WARNING: couldn't find acl header for bucket, generating default" << dendl;
221 RGWUserInfo uinfo;
222 /* object exists, but policy is broken */
223 int r = rgw_get_user_info_by_uid(store, bucket_info.owner, uinfo);
224 if (r < 0)
225 return r;
226
227 policy->create_default(bucket_info.owner, uinfo.display_name);
228 }
229 return 0;
230 }
231
232 static int get_obj_policy_from_attr(CephContext *cct,
233 RGWRados *store,
234 RGWObjectCtx& obj_ctx,
235 RGWBucketInfo& bucket_info,
236 map<string, bufferlist>& bucket_attrs,
237 RGWAccessControlPolicy *policy,
238 string *storage_class,
239 rgw_obj& obj)
240 {
241 bufferlist bl;
242 int ret = 0;
243
244 RGWRados::Object op_target(store, bucket_info, obj_ctx, obj);
245 RGWRados::Object::Read rop(&op_target);
246
247 ret = rop.get_attr(RGW_ATTR_ACL, bl);
248 if (ret >= 0) {
249 ret = decode_policy(cct, bl, policy);
250 if (ret < 0)
251 return ret;
252 } else if (ret == -ENODATA) {
253 /* object exists, but policy is broken */
254 ldout(cct, 0) << "WARNING: couldn't find acl header for object, generating default" << dendl;
255 RGWUserInfo uinfo;
256 ret = rgw_get_user_info_by_uid(store, bucket_info.owner, uinfo);
257 if (ret < 0)
258 return ret;
259
260 policy->create_default(bucket_info.owner, uinfo.display_name);
261 }
262
263 if (storage_class) {
264 bufferlist scbl;
265 int r = rop.get_attr(RGW_ATTR_STORAGE_CLASS, scbl);
266 if (r >= 0) {
267 *storage_class = scbl.to_str();
268 } else {
269 storage_class->clear();
270 }
271 }
272
273 return ret;
274 }
275
276
277 /**
278 * Get the AccessControlPolicy for an object off of disk.
279 * policy: must point to a valid RGWACL, and will be filled upon return.
280 * bucket: name of the bucket containing the object.
281 * object: name of the object to get the ACL for.
282 * Returns: 0 on success, -ERR# otherwise.
283 */
284 int rgw_op_get_bucket_policy_from_attr(CephContext *cct,
285 RGWRados *store,
286 RGWBucketInfo& bucket_info,
287 map<string, bufferlist>& bucket_attrs,
288 RGWAccessControlPolicy *policy)
289 {
290 return get_bucket_instance_policy_from_attr(cct, store, bucket_info, bucket_attrs, policy);
291 }
292
293 static boost::optional<Policy> get_iam_policy_from_attr(CephContext* cct,
294 RGWRados* store,
295 map<string, bufferlist>& attrs,
296 const string& tenant) {
297 auto i = attrs.find(RGW_ATTR_IAM_POLICY);
298 if (i != attrs.end()) {
299 return Policy(cct, tenant, i->second);
300 } else {
301 return none;
302 }
303 }
304
305 vector<Policy> get_iam_user_policy_from_attr(CephContext* cct,
306 RGWRados* store,
307 map<string, bufferlist>& attrs,
308 const string& tenant) {
309 vector<Policy> policies;
310 if (auto it = attrs.find(RGW_ATTR_USER_POLICY); it != attrs.end()) {
311 bufferlist out_bl = attrs[RGW_ATTR_USER_POLICY];
312 map<string, string> policy_map;
313 decode(policy_map, out_bl);
314 for (auto& it : policy_map) {
315 bufferlist bl = bufferlist::static_from_string(it.second);
316 Policy p(cct, tenant, bl);
317 policies.push_back(std::move(p));
318 }
319 }
320 return policies;
321 }
322
323 static int get_obj_attrs(RGWRados *store, struct req_state *s, const rgw_obj& obj, map<string, bufferlist>& attrs)
324 {
325 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
326 RGWRados::Object::Read read_op(&op_target);
327
328 read_op.params.attrs = &attrs;
329
330 return read_op.prepare();
331 }
332
333 static int get_obj_head(RGWRados *store, struct req_state *s,
334 const rgw_obj& obj,
335 map<string, bufferlist> *attrs,
336 bufferlist *pbl)
337 {
338 store->set_prefetch_data(s->obj_ctx, obj);
339
340 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
341 RGWRados::Object::Read read_op(&op_target);
342
343 read_op.params.attrs = attrs;
344
345 int ret = read_op.prepare();
346 if (ret < 0) {
347 return ret;
348 }
349
350 if (!pbl) {
351 return 0;
352 }
353
354 ret = read_op.read(0, s->cct->_conf->rgw_max_chunk_size, *pbl);
355
356 return 0;
357 }
358
359 struct multipart_upload_info
360 {
361 rgw_placement_rule dest_placement;
362
363 void encode(bufferlist& bl) const {
364 ENCODE_START(1, 1, bl);
365 encode(dest_placement, bl);
366 ENCODE_FINISH(bl);
367 }
368
369 void decode(bufferlist::const_iterator& bl) {
370 DECODE_START(1, bl);
371 decode(dest_placement, bl);
372 DECODE_FINISH(bl);
373 }
374 };
375 WRITE_CLASS_ENCODER(multipart_upload_info)
376
377 static int get_multipart_info(RGWRados *store, struct req_state *s,
378 const rgw_obj& obj,
379 RGWAccessControlPolicy *policy,
380 map<string, bufferlist> *attrs,
381 multipart_upload_info *upload_info)
382 {
383 bufferlist header;
384
385 bufferlist headbl;
386 bufferlist *pheadbl = (upload_info ? &headbl : nullptr);
387
388 int op_ret = get_obj_head(store, s, obj, attrs, pheadbl);
389 if (op_ret < 0) {
390 if (op_ret == -ENOENT) {
391 return -ERR_NO_SUCH_UPLOAD;
392 }
393 return op_ret;
394 }
395
396 if (upload_info && headbl.length() > 0) {
397 auto hiter = headbl.cbegin();
398 try {
399 decode(*upload_info, hiter);
400 } catch (buffer::error& err) {
401 ldpp_dout(s, 0) << "ERROR: failed to decode multipart upload info" << dendl;
402 return -EIO;
403 }
404 }
405
406 if (policy && attrs) {
407 for (auto& iter : *attrs) {
408 string name = iter.first;
409 if (name.compare(RGW_ATTR_ACL) == 0) {
410 bufferlist& bl = iter.second;
411 auto bli = bl.cbegin();
412 try {
413 decode(*policy, bli);
414 } catch (buffer::error& err) {
415 ldpp_dout(s, 0) << "ERROR: could not decode policy" << dendl;
416 return -EIO;
417 }
418 break;
419 }
420 }
421 }
422
423 return 0;
424 }
425
426 static int get_multipart_info(RGWRados *store, struct req_state *s,
427 const string& meta_oid,
428 RGWAccessControlPolicy *policy,
429 map<string, bufferlist> *attrs,
430 multipart_upload_info *upload_info)
431 {
432 map<string, bufferlist>::iterator iter;
433 bufferlist header;
434
435 rgw_obj meta_obj;
436 meta_obj.init_ns(s->bucket, meta_oid, mp_ns);
437 meta_obj.set_in_extra_data(true);
438
439 return get_multipart_info(store, s, meta_obj, policy, attrs, upload_info);
440 }
441
442 static int modify_obj_attr(RGWRados *store, struct req_state *s, const rgw_obj& obj, const char* attr_name, bufferlist& attr_val)
443 {
444 map<string, bufferlist> attrs;
445 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
446 RGWRados::Object::Read read_op(&op_target);
447
448 read_op.params.attrs = &attrs;
449
450 int r = read_op.prepare();
451 if (r < 0) {
452 return r;
453 }
454 store->set_atomic(s->obj_ctx, read_op.state.obj);
455 attrs[attr_name] = attr_val;
456 return store->set_attrs(s->obj_ctx, s->bucket_info, read_op.state.obj, attrs, NULL);
457 }
458
459 static int read_bucket_policy(RGWRados *store,
460 struct req_state *s,
461 RGWBucketInfo& bucket_info,
462 map<string, bufferlist>& bucket_attrs,
463 RGWAccessControlPolicy *policy,
464 rgw_bucket& bucket)
465 {
466 if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) {
467 ldpp_dout(s, 0) << "NOTICE: bucket " << bucket_info.bucket.name
468 << " is suspended" << dendl;
469 return -ERR_USER_SUSPENDED;
470 }
471
472 if (bucket.name.empty()) {
473 return 0;
474 }
475
476 int ret = rgw_op_get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, policy);
477 if (ret == -ENOENT) {
478 ret = -ERR_NO_SUCH_BUCKET;
479 }
480
481 return ret;
482 }
483
484 static int read_obj_policy(RGWRados *store,
485 struct req_state *s,
486 RGWBucketInfo& bucket_info,
487 map<string, bufferlist>& bucket_attrs,
488 RGWAccessControlPolicy* acl,
489 string *storage_class,
490 boost::optional<Policy>& policy,
491 rgw_bucket& bucket,
492 rgw_obj_key& object)
493 {
494 string upload_id;
495 upload_id = s->info.args.get("uploadId");
496 rgw_obj obj;
497
498 if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) {
499 ldpp_dout(s, 0) << "NOTICE: bucket " << bucket_info.bucket.name
500 << " is suspended" << dendl;
501 return -ERR_USER_SUSPENDED;
502 }
503
504 if (!upload_id.empty()) {
505 /* multipart upload */
506 RGWMPObj mp(object.name, upload_id);
507 string oid = mp.get_meta();
508 obj.init_ns(bucket, oid, mp_ns);
509 obj.set_in_extra_data(true);
510 } else {
511 obj = rgw_obj(bucket, object);
512 }
513 policy = get_iam_policy_from_attr(s->cct, store, bucket_attrs, bucket.tenant);
514
515 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
516 int ret = get_obj_policy_from_attr(s->cct, store, *obj_ctx,
517 bucket_info, bucket_attrs, acl, storage_class, obj);
518 if (ret == -ENOENT) {
519 /* object does not exist checking the bucket's ACL to make sure
520 that we send a proper error code */
521 RGWAccessControlPolicy bucket_policy(s->cct);
522 ret = rgw_op_get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, &bucket_policy);
523 if (ret < 0) {
524 return ret;
525 }
526 const rgw_user& bucket_owner = bucket_policy.get_owner().get_id();
527 if (bucket_owner.compare(s->user->user_id) != 0 &&
528 ! s->auth.identity->is_admin_of(bucket_owner)) {
529 if (policy) {
530 auto r = policy->eval(s->env, *s->auth.identity, rgw::IAM::s3ListBucket, ARN(bucket));
531 if (r == Effect::Allow)
532 return -ENOENT;
533 if (r == Effect::Deny)
534 return -EACCES;
535 }
536 if (! bucket_policy.verify_permission(s, *s->auth.identity, s->perm_mask, RGW_PERM_READ))
537 ret = -EACCES;
538 else
539 ret = -ENOENT;
540 } else {
541 ret = -ENOENT;
542 }
543 }
544
545 return ret;
546 }
547
548 /**
549 * Get the AccessControlPolicy for an user, bucket or object off of disk.
550 * s: The req_state to draw information from.
551 * only_bucket: If true, reads the user and bucket ACLs rather than the object ACL.
552 * Returns: 0 on success, -ERR# otherwise.
553 */
554 int rgw_build_bucket_policies(RGWRados* store, struct req_state* s)
555 {
556 int ret = 0;
557 rgw_obj_key obj;
558 RGWUserInfo bucket_owner_info;
559 auto obj_ctx = store->svc.sysobj->init_obj_ctx();
560
561 string bi = s->info.args.get(RGW_SYS_PARAM_PREFIX "bucket-instance");
562 if (!bi.empty()) {
563 ret = rgw_bucket_parse_bucket_instance(bi, &s->bucket_instance_id, &s->bucket_instance_shard_id);
564 if (ret < 0) {
565 return ret;
566 }
567 }
568
569 if(s->dialect.compare("s3") == 0) {
570 s->bucket_acl = std::make_unique<RGWAccessControlPolicy_S3>(s->cct);
571 } else if(s->dialect.compare("swift") == 0) {
572 /* We aren't allocating the account policy for those operations using
573 * the Swift's infrastructure that don't really need req_state::user.
574 * Typical example here is the implementation of /info. */
575 if (!s->user->user_id.empty()) {
576 s->user_acl = std::make_unique<RGWAccessControlPolicy_SWIFTAcct>(s->cct);
577 }
578 s->bucket_acl = std::make_unique<RGWAccessControlPolicy_SWIFT>(s->cct);
579 } else {
580 s->bucket_acl = std::make_unique<RGWAccessControlPolicy>(s->cct);
581 }
582
583 /* check if copy source is within the current domain */
584 if (!s->src_bucket_name.empty()) {
585 RGWBucketInfo source_info;
586
587 if (s->bucket_instance_id.empty()) {
588 ret = store->get_bucket_info(obj_ctx, s->src_tenant_name, s->src_bucket_name, source_info, NULL);
589 } else {
590 ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, source_info, NULL, NULL);
591 }
592 if (ret == 0) {
593 string& zonegroup = source_info.zonegroup;
594 s->local_source = store->svc.zone->get_zonegroup().equals(zonegroup);
595 }
596 }
597
598 struct {
599 rgw_user uid;
600 std::string display_name;
601 } acct_acl_user = {
602 s->user->user_id,
603 s->user->display_name,
604 };
605
606 if (!s->bucket_name.empty()) {
607 s->bucket_exists = true;
608 if (s->bucket_instance_id.empty()) {
609 ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name,
610 s->bucket_info, &s->bucket_mtime,
611 &s->bucket_attrs);
612 } else {
613 ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id,
614 s->bucket_info, &s->bucket_mtime,
615 &s->bucket_attrs);
616 }
617 if (ret < 0) {
618 if (ret != -ENOENT) {
619 string bucket_log;
620 rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_log);
621 ldpp_dout(s, 0) << "NOTICE: couldn't get bucket from bucket_name (name="
622 << bucket_log << ")" << dendl;
623 return ret;
624 }
625 s->bucket_exists = false;
626 }
627 s->bucket = s->bucket_info.bucket;
628
629 if (s->bucket_exists) {
630 ret = read_bucket_policy(store, s, s->bucket_info, s->bucket_attrs,
631 s->bucket_acl.get(), s->bucket);
632 acct_acl_user = {
633 s->bucket_info.owner,
634 s->bucket_acl->get_owner().get_display_name(),
635 };
636 } else {
637 s->bucket_acl->create_default(s->user->user_id, s->user->display_name);
638 ret = -ERR_NO_SUCH_BUCKET;
639 }
640
641 s->bucket_owner = s->bucket_acl->get_owner();
642
643 RGWZoneGroup zonegroup;
644 int r = store->svc.zone->get_zonegroup(s->bucket_info.zonegroup, zonegroup);
645 if (!r) {
646 if (!zonegroup.endpoints.empty()) {
647 s->zonegroup_endpoint = zonegroup.endpoints.front();
648 } else {
649 // use zonegroup's master zone endpoints
650 auto z = zonegroup.zones.find(zonegroup.master_zone);
651 if (z != zonegroup.zones.end() && !z->second.endpoints.empty()) {
652 s->zonegroup_endpoint = z->second.endpoints.front();
653 }
654 }
655 s->zonegroup_name = zonegroup.get_name();
656 }
657 if (r < 0 && ret == 0) {
658 ret = r;
659 }
660
661 if (s->bucket_exists && !store->svc.zone->get_zonegroup().equals(s->bucket_info.zonegroup)) {
662 ldpp_dout(s, 0) << "NOTICE: request for data in a different zonegroup ("
663 << s->bucket_info.zonegroup << " != "
664 << store->svc.zone->get_zonegroup().get_id() << ")" << dendl;
665 /* we now need to make sure that the operation actually requires copy source, that is
666 * it's a copy operation
667 */
668 if (store->svc.zone->get_zonegroup().is_master_zonegroup() && s->system_request) {
669 /*If this is the master, don't redirect*/
670 } else if (s->op_type == RGW_OP_GET_BUCKET_LOCATION ) {
671 /* If op is get bucket location, don't redirect */
672 } else if (!s->local_source ||
673 (s->op != OP_PUT && s->op != OP_COPY) ||
674 s->object.empty()) {
675 return -ERR_PERMANENT_REDIRECT;
676 }
677 }
678
679 /* init dest placement -- only if bucket exists, otherwise request is either not relevant, or
680 * it's a create_bucket request, in which case the op will deal with the placement later */
681 if (s->bucket_exists) {
682 s->dest_placement.storage_class = s->info.storage_class;
683 s->dest_placement.inherit_from(s->bucket_info.placement_rule);
684
685 if (!store->svc.zone->get_zone_params().valid_placement(s->dest_placement)) {
686 ldpp_dout(s, 0) << "NOTICE: invalid dest placement: " << s->dest_placement.to_str() << dendl;
687 return -EINVAL;
688 }
689 }
690 }
691
692 /* handle user ACL only for those APIs which support it */
693 if (s->user_acl) {
694 map<string, bufferlist> uattrs;
695 ret = rgw_get_user_attrs_by_uid(store, acct_acl_user.uid, uattrs);
696 if (!ret) {
697 ret = get_user_policy_from_attr(s->cct, store, uattrs, *s->user_acl);
698 }
699 if (-ENOENT == ret) {
700 /* In already existing clusters users won't have ACL. In such case
701 * assuming that only account owner has the rights seems to be
702 * reasonable. That allows to have only one verification logic.
703 * NOTE: there is small compatibility kludge for global, empty tenant:
704 * 1. if we try to reach an existing bucket, its owner is considered
705 * as account owner.
706 * 2. otherwise account owner is identity stored in s->user->user_id. */
707 s->user_acl->create_default(acct_acl_user.uid,
708 acct_acl_user.display_name);
709 ret = 0;
710 } else if (ret < 0) {
711 ldpp_dout(s, 0) << "NOTICE: couldn't get user attrs for handling ACL "
712 "(user_id=" << s->user->user_id << ", ret=" << ret << ")" << dendl;
713 return ret;
714 }
715 }
716 // We don't need user policies in case of STS token returned by AssumeRole,
717 // hence the check for user type
718 if (! s->user->user_id.empty() && s->auth.identity->get_identity_type() != TYPE_ROLE) {
719 try {
720 map<string, bufferlist> uattrs;
721 if (ret = rgw_get_user_attrs_by_uid(store, s->user->user_id, uattrs); ! ret) {
722 if (s->iam_user_policies.empty()) {
723 s->iam_user_policies = get_iam_user_policy_from_attr(s->cct, store, uattrs, s->user->user_id.tenant);
724 } else {
725 // This scenario can happen when a STS token has a policy, then we need to append other user policies
726 // to the existing ones. (e.g. token returned by GetSessionToken)
727 auto user_policies = get_iam_user_policy_from_attr(s->cct, store, uattrs, s->user->user_id.tenant);
728 s->iam_user_policies.insert(s->iam_user_policies.end(), user_policies.begin(), user_policies.end());
729 }
730 } else {
731 if (ret == -ENOENT)
732 ret = 0;
733 else ret = -EACCES;
734 }
735 } catch (const std::exception& e) {
736 lderr(s->cct) << "Error reading IAM User Policy: " << e.what() << dendl;
737 ret = -EACCES;
738 }
739 }
740
741 try {
742 s->iam_policy = get_iam_policy_from_attr(s->cct, store, s->bucket_attrs,
743 s->bucket_tenant);
744 } catch (const std::exception& e) {
745 // Really this is a can't happen condition. We parse the policy
746 // when it's given to us, so perhaps we should abort or otherwise
747 // raise bloody murder.
748 ldpp_dout(s, 0) << "Error reading IAM Policy: " << e.what() << dendl;
749 ret = -EACCES;
750 }
751
752 bool success = store->svc.zone->get_redirect_zone_endpoint(&s->redirect_zone_endpoint);
753 if (success) {
754 ldpp_dout(s, 20) << "redirect_zone_endpoint=" << s->redirect_zone_endpoint << dendl;
755 }
756
757 return ret;
758 }
759
760 /**
761 * Get the AccessControlPolicy for a bucket or object off of disk.
762 * s: The req_state to draw information from.
763 * only_bucket: If true, reads the bucket ACL rather than the object ACL.
764 * Returns: 0 on success, -ERR# otherwise.
765 */
766 int rgw_build_object_policies(RGWRados *store, struct req_state *s,
767 bool prefetch_data)
768 {
769 int ret = 0;
770
771 if (!s->object.empty()) {
772 if (!s->bucket_exists) {
773 return -ERR_NO_SUCH_BUCKET;
774 }
775 s->object_acl = std::make_unique<RGWAccessControlPolicy>(s->cct);
776 rgw_obj obj(s->bucket, s->object);
777
778 store->set_atomic(s->obj_ctx, obj);
779 if (prefetch_data) {
780 store->set_prefetch_data(s->obj_ctx, obj);
781 }
782 ret = read_obj_policy(store, s, s->bucket_info, s->bucket_attrs,
783 s->object_acl.get(), nullptr, s->iam_policy, s->bucket,
784 s->object);
785 }
786
787 return ret;
788 }
789
790 void rgw_add_to_iam_environment(rgw::IAM::Environment& e, std::string_view key, std::string_view val){
791 // This variant just adds non empty key pairs to IAM env., values can be empty
792 // in certain cases like tagging
793 if (!key.empty())
794 e.emplace(key,val);
795 }
796
797 static int rgw_iam_add_tags_from_bl(struct req_state* s, bufferlist& bl){
798 RGWObjTags tagset;
799 try {
800 auto bliter = bl.cbegin();
801 tagset.decode(bliter);
802 } catch (buffer::error& err) {
803 ldpp_dout(s, 0) << "ERROR: caught buffer::error, couldn't decode TagSet" << dendl;
804 return -EIO;
805 }
806
807 for (const auto& tag: tagset.get_tags()){
808 rgw_add_to_iam_environment(s->env, "s3:ExistingObjectTag/" + tag.first, tag.second);
809 }
810 return 0;
811 }
812
813 static int rgw_iam_add_existing_objtags(RGWRados* store, struct req_state* s, rgw_obj& obj, std::uint64_t action){
814 map <string, bufferlist> attrs;
815 store->set_atomic(s->obj_ctx, obj);
816 int op_ret = get_obj_attrs(store, s, obj, attrs);
817 if (op_ret < 0)
818 return op_ret;
819 auto tags = attrs.find(RGW_ATTR_TAGS);
820 if (tags != attrs.end()){
821 return rgw_iam_add_tags_from_bl(s, tags->second);
822 }
823 return 0;
824 }
825
826 static void rgw_add_grant_to_iam_environment(rgw::IAM::Environment& e, struct req_state *s){
827
828 using header_pair_t = std::pair <const char*, const char*>;
829 static const std::initializer_list <header_pair_t> acl_header_conditionals {
830 {"HTTP_X_AMZ_GRANT_READ", "s3:x-amz-grant-read"},
831 {"HTTP_X_AMZ_GRANT_WRITE", "s3:x-amz-grant-write"},
832 {"HTTP_X_AMZ_GRANT_READ_ACP", "s3:x-amz-grant-read-acp"},
833 {"HTTP_X_AMZ_GRANT_WRITE_ACP", "s3:x-amz-grant-write-acp"},
834 {"HTTP_X_AMZ_GRANT_FULL_CONTROL", "s3:x-amz-grant-full-control"}
835 };
836
837 if (s->has_acl_header){
838 for (const auto& c: acl_header_conditionals){
839 auto hdr = s->info.env->get(c.first);
840 if(hdr) {
841 e[c.second] = hdr;
842 }
843 }
844 }
845 }
846
847 void rgw_build_iam_environment(RGWRados* store,
848 struct req_state* s)
849 {
850 const auto& m = s->info.env->get_map();
851 auto t = ceph::real_clock::now();
852 s->env.emplace("aws:CurrentTime", std::to_string(ceph::real_clock::to_time_t(t)));
853 s->env.emplace("aws:EpochTime", ceph::to_iso_8601(t));
854 // TODO: This is fine for now, but once we have STS we'll need to
855 // look and see. Also this won't work with the IdentityApplier
856 // model, since we need to know the actual credential.
857 s->env.emplace("aws:PrincipalType", "User");
858
859 auto i = m.find("HTTP_REFERER");
860 if (i != m.end()) {
861 s->env.emplace("aws:Referer", i->second);
862 }
863
864 if (rgw_transport_is_secure(s->cct, *s->info.env)) {
865 s->env.emplace("aws:SecureTransport", "true");
866 }
867
868 const auto remote_addr_param = s->cct->_conf->rgw_remote_addr_param;
869 if (remote_addr_param.length()) {
870 i = m.find(remote_addr_param);
871 } else {
872 i = m.find("REMOTE_ADDR");
873 }
874 if (i != m.end()) {
875 const string* ip = &(i->second);
876 string temp;
877 if (remote_addr_param == "HTTP_X_FORWARDED_FOR") {
878 const auto comma = ip->find(',');
879 if (comma != string::npos) {
880 temp.assign(*ip, 0, comma);
881 ip = &temp;
882 }
883 }
884 s->env.emplace("aws:SourceIp", *ip);
885 }
886
887 i = m.find("HTTP_USER_AGENT"); {
888 if (i != m.end())
889 s->env.emplace("aws:UserAgent", i->second);
890 }
891
892 if (s->user) {
893 // What to do about aws::userid? One can have multiple access
894 // keys so that isn't really suitable. Do we have a durable
895 // identifier that can persist through name changes?
896 s->env.emplace("aws:username", s->user->user_id.id);
897 }
898
899 i = m.find("HTTP_X_AMZ_SECURITY_TOKEN");
900 if (i != m.end()) {
901 s->env.emplace("sts:authentication", "true");
902 } else {
903 s->env.emplace("sts:authentication", "false");
904 }
905 }
906
907 void rgw_bucket_object_pre_exec(struct req_state *s)
908 {
909 if (s->expect_cont)
910 dump_continue(s);
911
912 dump_bucket_from_state(s);
913 }
914
915 // So! Now and then when we try to update bucket information, the
916 // bucket has changed during the course of the operation. (Or we have
917 // a cache consistency problem that Watch/Notify isn't ruling out
918 // completely.)
919 //
920 // When this happens, we need to update the bucket info and try
921 // again. We have, however, to try the right *part* again. We can't
922 // simply re-send, since that will obliterate the previous update.
923 //
924 // Thus, callers of this function should include everything that
925 // merges information to be changed into the bucket information as
926 // well as the call to set it.
927 //
928 // The called function must return an integer, negative on error. In
929 // general, they should just return op_ret.
930 namespace {
931 template<typename F>
932 int retry_raced_bucket_write(RGWRados* g, req_state* s, const F& f) {
933 auto r = f();
934 for (auto i = 0u; i < 15u && r == -ECANCELED; ++i) {
935 r = g->try_refresh_bucket_info(s->bucket_info, nullptr,
936 &s->bucket_attrs);
937 if (r >= 0) {
938 r = f();
939 }
940 }
941 return r;
942 }
943 }
944
945
946 int RGWGetObj::verify_permission()
947 {
948 obj = rgw_obj(s->bucket, s->object);
949 store->set_atomic(s->obj_ctx, obj);
950 if (get_data) {
951 store->set_prefetch_data(s->obj_ctx, obj);
952 }
953
954 if (torrent.get_flag()) {
955 if (obj.key.instance.empty()) {
956 action = rgw::IAM::s3GetObjectTorrent;
957 } else {
958 action = rgw::IAM::s3GetObjectVersionTorrent;
959 }
960 } else {
961 if (obj.key.instance.empty()) {
962 action = rgw::IAM::s3GetObject;
963 } else {
964 action = rgw::IAM::s3GetObjectVersion;
965 }
966 if (s->iam_policy && s->iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG))
967 rgw_iam_add_existing_objtags(store, s, obj, action);
968 if (! s->iam_user_policies.empty()) {
969 for (auto& user_policy : s->iam_user_policies) {
970 if (user_policy.has_partial_conditional(S3_EXISTING_OBJTAG))
971 rgw_iam_add_existing_objtags(store, s, obj, action);
972 }
973 }
974 }
975
976 if (!verify_object_permission(this, s, action)) {
977 return -EACCES;
978 }
979
980 return 0;
981 }
982
983
984 int RGWOp::verify_op_mask()
985 {
986 uint32_t required_mask = op_mask();
987
988 ldpp_dout(this, 20) << "required_mask= " << required_mask
989 << " user.op_mask=" << s->user->op_mask << dendl;
990
991 if ((s->user->op_mask & required_mask) != required_mask) {
992 return -EPERM;
993 }
994
995 if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !store->svc.zone->zone_is_writeable()) {
996 ldpp_dout(this, 5) << "NOTICE: modify request to a read-only zone by a "
997 "non-system user, permission denied" << dendl;
998 return -EPERM;
999 }
1000
1001 return 0;
1002 }
1003
1004 int RGWGetObjTags::verify_permission()
1005 {
1006 auto iam_action = s->object.instance.empty()?
1007 rgw::IAM::s3GetObjectTagging:
1008 rgw::IAM::s3GetObjectVersionTagging;
1009 // TODO since we are parsing the bl now anyway, we probably change
1010 // the send_response function to accept RGWObjTag instead of a bl
1011 if (s->iam_policy && s->iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG)){
1012 rgw_obj obj = rgw_obj(s->bucket, s->object);
1013 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
1014 }
1015 if (! s->iam_user_policies.empty()) {
1016 for (auto& user_policy : s->iam_user_policies) {
1017 if (user_policy.has_partial_conditional(S3_EXISTING_OBJTAG)) {
1018 rgw_obj obj = rgw_obj(s->bucket, s->object);
1019 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
1020 }
1021 }
1022 }
1023 if (!verify_object_permission(this, s,iam_action))
1024 return -EACCES;
1025
1026 return 0;
1027 }
1028
1029 void RGWGetObjTags::pre_exec()
1030 {
1031 rgw_bucket_object_pre_exec(s);
1032 }
1033
1034 void RGWGetObjTags::execute()
1035 {
1036 rgw_obj obj;
1037 map<string,bufferlist> attrs;
1038
1039 obj = rgw_obj(s->bucket, s->object);
1040
1041 store->set_atomic(s->obj_ctx, obj);
1042
1043 op_ret = get_obj_attrs(store, s, obj, attrs);
1044 if (op_ret < 0) {
1045 ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << obj
1046 << " ret=" << op_ret << dendl;
1047 return;
1048 }
1049
1050 auto tags = attrs.find(RGW_ATTR_TAGS);
1051 if(tags != attrs.end()){
1052 has_tags = true;
1053 tags_bl.append(tags->second);
1054 }
1055 send_response_data(tags_bl);
1056 }
1057
1058 int RGWPutObjTags::verify_permission()
1059 {
1060 auto iam_action = s->object.instance.empty() ?
1061 rgw::IAM::s3PutObjectTagging:
1062 rgw::IAM::s3PutObjectVersionTagging;
1063
1064 if(s->iam_policy && s->iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG)){
1065 auto obj = rgw_obj(s->bucket, s->object);
1066 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
1067 }
1068 if (! s->iam_user_policies.empty()) {
1069 for (auto& user_policy : s->iam_user_policies) {
1070 if (user_policy.has_partial_conditional(S3_EXISTING_OBJTAG)) {
1071 rgw_obj obj = rgw_obj(s->bucket, s->object);
1072 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
1073 }
1074 }
1075 }
1076 if (!verify_object_permission(this, s,iam_action))
1077 return -EACCES;
1078 return 0;
1079 }
1080
1081 void RGWPutObjTags::execute()
1082 {
1083 op_ret = get_params();
1084 if (op_ret < 0)
1085 return;
1086
1087 if (s->object.empty()){
1088 op_ret= -EINVAL; // we only support tagging on existing objects
1089 return;
1090 }
1091
1092 rgw_obj obj;
1093 obj = rgw_obj(s->bucket, s->object);
1094 store->set_atomic(s->obj_ctx, obj);
1095 op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_TAGS, tags_bl);
1096 if (op_ret == -ECANCELED){
1097 op_ret = -ERR_TAG_CONFLICT;
1098 }
1099 }
1100
1101 void RGWDeleteObjTags::pre_exec()
1102 {
1103 rgw_bucket_object_pre_exec(s);
1104 }
1105
1106
1107 int RGWDeleteObjTags::verify_permission()
1108 {
1109 if (!s->object.empty()) {
1110 auto iam_action = s->object.instance.empty() ?
1111 rgw::IAM::s3DeleteObjectTagging:
1112 rgw::IAM::s3DeleteObjectVersionTagging;
1113
1114 if (s->iam_policy && s->iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG)){
1115 auto obj = rgw_obj(s->bucket, s->object);
1116 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
1117 }
1118 if (! s->iam_user_policies.empty()) {
1119 for (auto& user_policy : s->iam_user_policies) {
1120 if (user_policy.has_partial_conditional(S3_EXISTING_OBJTAG)) {
1121 auto obj = rgw_obj(s->bucket, s->object);
1122 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
1123 }
1124 }
1125 }
1126 if (!verify_object_permission(this, s, iam_action))
1127 return -EACCES;
1128 }
1129 return 0;
1130 }
1131
1132 void RGWDeleteObjTags::execute()
1133 {
1134 if (s->object.empty())
1135 return;
1136
1137 rgw_obj obj;
1138 obj = rgw_obj(s->bucket, s->object);
1139 store->set_atomic(s->obj_ctx, obj);
1140 map <string, bufferlist> attrs;
1141 map <string, bufferlist> rmattr;
1142 bufferlist bl;
1143 rmattr[RGW_ATTR_TAGS] = bl;
1144 op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, &rmattr);
1145 }
1146
1147 int RGWOp::do_aws4_auth_completion()
1148 {
1149 ldpp_dout(this, 5) << "NOTICE: call to do_aws4_auth_completion" << dendl;
1150 if (s->auth.completer) {
1151 if (!s->auth.completer->complete()) {
1152 return -ERR_AMZ_CONTENT_SHA256_MISMATCH;
1153 } else {
1154 ldpp_dout(this, 10) << "v4 auth ok -- do_aws4_auth_completion" << dendl;
1155 }
1156
1157 /* TODO(rzarzynski): yes, we're really called twice on PUTs. Only first
1158 * call passes, so we disable second one. This is old behaviour, sorry!
1159 * Plan for tomorrow: seek and destroy. */
1160 s->auth.completer = nullptr;
1161 }
1162
1163 return 0;
1164 }
1165
1166 int RGWOp::init_quota()
1167 {
1168 /* no quota enforcement for system requests */
1169 if (s->system_request)
1170 return 0;
1171
1172 /* init quota related stuff */
1173 if (!(s->user->op_mask & RGW_OP_TYPE_MODIFY)) {
1174 return 0;
1175 }
1176
1177 /* only interested in object related ops */
1178 if (s->object.empty()) {
1179 return 0;
1180 }
1181
1182 RGWUserInfo owner_info;
1183 RGWUserInfo *uinfo;
1184
1185 if (s->user->user_id == s->bucket_owner.get_id()) {
1186 uinfo = s->user;
1187 } else {
1188 int r = rgw_get_user_info_by_uid(store, s->bucket_info.owner, owner_info);
1189 if (r < 0)
1190 return r;
1191 uinfo = &owner_info;
1192 }
1193
1194 if (s->bucket_info.quota.enabled) {
1195 bucket_quota = s->bucket_info.quota;
1196 } else if (uinfo->bucket_quota.enabled) {
1197 bucket_quota = uinfo->bucket_quota;
1198 } else {
1199 bucket_quota = store->svc.quota->get_bucket_quota();
1200 }
1201
1202 if (uinfo->user_quota.enabled) {
1203 user_quota = uinfo->user_quota;
1204 } else {
1205 user_quota = store->svc.quota->get_user_quota();
1206 }
1207
1208 return 0;
1209 }
1210
1211 static bool validate_cors_rule_method(RGWCORSRule *rule, const char *req_meth) {
1212 uint8_t flags = 0;
1213
1214 if (!req_meth) {
1215 dout(5) << "req_meth is null" << dendl;
1216 return false;
1217 }
1218
1219 if (strcmp(req_meth, "GET") == 0) flags = RGW_CORS_GET;
1220 else if (strcmp(req_meth, "POST") == 0) flags = RGW_CORS_POST;
1221 else if (strcmp(req_meth, "PUT") == 0) flags = RGW_CORS_PUT;
1222 else if (strcmp(req_meth, "DELETE") == 0) flags = RGW_CORS_DELETE;
1223 else if (strcmp(req_meth, "HEAD") == 0) flags = RGW_CORS_HEAD;
1224
1225 if (rule->get_allowed_methods() & flags) {
1226 dout(10) << "Method " << req_meth << " is supported" << dendl;
1227 } else {
1228 dout(5) << "Method " << req_meth << " is not supported" << dendl;
1229 return false;
1230 }
1231
1232 return true;
1233 }
1234
1235 static bool validate_cors_rule_header(RGWCORSRule *rule, const char *req_hdrs) {
1236 if (req_hdrs) {
1237 vector<string> hdrs;
1238 get_str_vec(req_hdrs, hdrs);
1239 for (const auto& hdr : hdrs) {
1240 if (!rule->is_header_allowed(hdr.c_str(), hdr.length())) {
1241 dout(5) << "Header " << hdr << " is not registered in this rule" << dendl;
1242 return false;
1243 }
1244 }
1245 }
1246 return true;
1247 }
1248
1249 int RGWOp::read_bucket_cors()
1250 {
1251 bufferlist bl;
1252
1253 map<string, bufferlist>::iterator aiter = s->bucket_attrs.find(RGW_ATTR_CORS);
1254 if (aiter == s->bucket_attrs.end()) {
1255 ldpp_dout(this, 20) << "no CORS configuration attr found" << dendl;
1256 cors_exist = false;
1257 return 0; /* no CORS configuration found */
1258 }
1259
1260 cors_exist = true;
1261
1262 bl = aiter->second;
1263
1264 auto iter = bl.cbegin();
1265 try {
1266 bucket_cors.decode(iter);
1267 } catch (buffer::error& err) {
1268 ldpp_dout(this, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
1269 return -EIO;
1270 }
1271 if (s->cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
1272 RGWCORSConfiguration_S3 *s3cors = static_cast<RGWCORSConfiguration_S3 *>(&bucket_cors);
1273 ldpp_dout(this, 15) << "Read RGWCORSConfiguration";
1274 s3cors->to_xml(*_dout);
1275 *_dout << dendl;
1276 }
1277 return 0;
1278 }
1279
1280 /** CORS 6.2.6.
1281 * If any of the header field-names is not a ASCII case-insensitive match for
1282 * any of the values in list of headers do not set any additional headers and
1283 * terminate this set of steps.
1284 * */
1285 static void get_cors_response_headers(RGWCORSRule *rule, const char *req_hdrs, string& hdrs, string& exp_hdrs, unsigned *max_age) {
1286 if (req_hdrs) {
1287 list<string> hl;
1288 get_str_list(req_hdrs, hl);
1289 for(list<string>::iterator it = hl.begin(); it != hl.end(); ++it) {
1290 if (!rule->is_header_allowed((*it).c_str(), (*it).length())) {
1291 dout(5) << "Header " << (*it) << " is not registered in this rule" << dendl;
1292 } else {
1293 if (hdrs.length() > 0) hdrs.append(",");
1294 hdrs.append((*it));
1295 }
1296 }
1297 }
1298 rule->format_exp_headers(exp_hdrs);
1299 *max_age = rule->get_max_age();
1300 }
1301
1302 /**
1303 * Generate the CORS header response
1304 *
1305 * This is described in the CORS standard, section 6.2.
1306 */
1307 bool RGWOp::generate_cors_headers(string& origin, string& method, string& headers, string& exp_headers, unsigned *max_age)
1308 {
1309 /* CORS 6.2.1. */
1310 const char *orig = s->info.env->get("HTTP_ORIGIN");
1311 if (!orig) {
1312 return false;
1313 }
1314
1315 /* Custom: */
1316 origin = orig;
1317 op_ret = read_bucket_cors();
1318 if (op_ret < 0) {
1319 return false;
1320 }
1321
1322 if (!cors_exist) {
1323 ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl;
1324 return false;
1325 }
1326
1327 /* CORS 6.2.2. */
1328 RGWCORSRule *rule = bucket_cors.host_name_rule(orig);
1329 if (!rule)
1330 return false;
1331
1332 /*
1333 * Set the Allowed-Origin header to a asterisk if this is allowed in the rule
1334 * and no Authorization was send by the client
1335 *
1336 * The origin parameter specifies a URI that may access the resource. The browser must enforce this.
1337 * For requests without credentials, the server may specify "*" as a wildcard,
1338 * thereby allowing any origin to access the resource.
1339 */
1340 const char *authorization = s->info.env->get("HTTP_AUTHORIZATION");
1341 if (!authorization && rule->has_wildcard_origin())
1342 origin = "*";
1343
1344 /* CORS 6.2.3. */
1345 const char *req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD");
1346 if (!req_meth) {
1347 req_meth = s->info.method;
1348 }
1349
1350 if (req_meth) {
1351 method = req_meth;
1352 /* CORS 6.2.5. */
1353 if (!validate_cors_rule_method(rule, req_meth)) {
1354 return false;
1355 }
1356 }
1357
1358 /* CORS 6.2.4. */
1359 const char *req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS");
1360
1361 /* CORS 6.2.6. */
1362 get_cors_response_headers(rule, req_hdrs, headers, exp_headers, max_age);
1363
1364 return true;
1365 }
1366
1367 int RGWGetObj::read_user_manifest_part(rgw_bucket& bucket,
1368 const rgw_bucket_dir_entry& ent,
1369 RGWAccessControlPolicy * const bucket_acl,
1370 const boost::optional<Policy>& bucket_policy,
1371 const off_t start_ofs,
1372 const off_t end_ofs,
1373 bool swift_slo)
1374 {
1375 ldpp_dout(this, 20) << "user manifest obj=" << ent.key.name
1376 << "[" << ent.key.instance << "]" << dendl;
1377 RGWGetObj_CB cb(this);
1378 RGWGetObj_Filter* filter = &cb;
1379 boost::optional<RGWGetObj_Decompress> decompress;
1380
1381 int64_t cur_ofs = start_ofs;
1382 int64_t cur_end = end_ofs;
1383
1384 rgw_obj part(bucket, ent.key);
1385
1386 map<string, bufferlist> attrs;
1387
1388 uint64_t obj_size;
1389 RGWObjectCtx obj_ctx(store);
1390 RGWAccessControlPolicy obj_policy(s->cct);
1391
1392 ldpp_dout(this, 20) << "reading obj=" << part << " ofs=" << cur_ofs
1393 << " end=" << cur_end << dendl;
1394
1395 obj_ctx.set_atomic(part);
1396 store->set_prefetch_data(&obj_ctx, part);
1397
1398 RGWRados::Object op_target(store, s->bucket_info, obj_ctx, part);
1399 RGWRados::Object::Read read_op(&op_target);
1400
1401 if (!swift_slo) {
1402 /* SLO etag is optional */
1403 read_op.conds.if_match = ent.meta.etag.c_str();
1404 }
1405 read_op.params.attrs = &attrs;
1406 read_op.params.obj_size = &obj_size;
1407
1408 op_ret = read_op.prepare();
1409 if (op_ret < 0)
1410 return op_ret;
1411 op_ret = read_op.range_to_ofs(ent.meta.accounted_size, cur_ofs, cur_end);
1412 if (op_ret < 0)
1413 return op_ret;
1414 bool need_decompress;
1415 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
1416 if (op_ret < 0) {
1417 ldpp_dout(this, 0) << "ERROR: failed to decode compression info" << dendl;
1418 return -EIO;
1419 }
1420
1421 if (need_decompress)
1422 {
1423 if (cs_info.orig_size != ent.meta.accounted_size) {
1424 // hmm.. something wrong, object not as expected, abort!
1425 ldpp_dout(this, 0) << "ERROR: expected cs_info.orig_size=" << cs_info.orig_size
1426 << ", actual read size=" << ent.meta.size << dendl;
1427 return -EIO;
1428 }
1429 decompress.emplace(s->cct, &cs_info, partial_content, filter);
1430 filter = &*decompress;
1431 }
1432 else
1433 {
1434 if (obj_size != ent.meta.size) {
1435 // hmm.. something wrong, object not as expected, abort!
1436 ldpp_dout(this, 0) << "ERROR: expected obj_size=" << obj_size
1437 << ", actual read size=" << ent.meta.size << dendl;
1438 return -EIO;
1439 }
1440 }
1441
1442 op_ret = rgw_policy_from_attrset(s->cct, attrs, &obj_policy);
1443 if (op_ret < 0)
1444 return op_ret;
1445
1446 /* We can use global user_acl because LOs cannot have segments
1447 * stored inside different accounts. */
1448 if (s->system_request) {
1449 ldpp_dout(this, 2) << "overriding permissions due to system operation" << dendl;
1450 } else if (s->auth.identity->is_admin_of(s->user->user_id)) {
1451 ldpp_dout(this, 2) << "overriding permissions due to admin operation" << dendl;
1452 } else if (!verify_object_permission(this, s, part, s->user_acl.get(), bucket_acl,
1453 &obj_policy, bucket_policy, s->iam_user_policies, action)) {
1454 return -EPERM;
1455 }
1456 if (ent.meta.size == 0) {
1457 return 0;
1458 }
1459
1460 perfcounter->inc(l_rgw_get_b, cur_end - cur_ofs);
1461 filter->fixup_range(cur_ofs, cur_end);
1462 op_ret = read_op.iterate(cur_ofs, cur_end, filter);
1463 if (op_ret >= 0)
1464 op_ret = filter->flush();
1465 return op_ret;
1466 }
1467
1468 static int iterate_user_manifest_parts(CephContext * const cct,
1469 RGWRados * const store,
1470 const off_t ofs,
1471 const off_t end,
1472 RGWBucketInfo *pbucket_info,
1473 const string& obj_prefix,
1474 RGWAccessControlPolicy * const bucket_acl,
1475 const boost::optional<Policy>& bucket_policy,
1476 uint64_t * const ptotal_len,
1477 uint64_t * const pobj_size,
1478 string * const pobj_sum,
1479 int (*cb)(rgw_bucket& bucket,
1480 const rgw_bucket_dir_entry& ent,
1481 RGWAccessControlPolicy * const bucket_acl,
1482 const boost::optional<Policy>& bucket_policy,
1483 off_t start_ofs,
1484 off_t end_ofs,
1485 void *param,
1486 bool swift_slo),
1487 void * const cb_param)
1488 {
1489 rgw_bucket& bucket = pbucket_info->bucket;
1490 uint64_t obj_ofs = 0, len_count = 0;
1491 bool found_start = false, found_end = false, handled_end = false;
1492 string delim;
1493 bool is_truncated;
1494 vector<rgw_bucket_dir_entry> objs;
1495
1496 utime_t start_time = ceph_clock_now();
1497
1498 RGWRados::Bucket target(store, *pbucket_info);
1499 RGWRados::Bucket::List list_op(&target);
1500
1501 list_op.params.prefix = obj_prefix;
1502 list_op.params.delim = delim;
1503
1504 MD5 etag_sum;
1505 do {
1506 #define MAX_LIST_OBJS 100
1507 int r = list_op.list_objects(MAX_LIST_OBJS, &objs, NULL, &is_truncated);
1508 if (r < 0) {
1509 return r;
1510 }
1511
1512 for (rgw_bucket_dir_entry& ent : objs) {
1513 const uint64_t cur_total_len = obj_ofs;
1514 const uint64_t obj_size = ent.meta.accounted_size;
1515 uint64_t start_ofs = 0, end_ofs = obj_size;
1516
1517 if ((ptotal_len || cb) && !found_start && cur_total_len + obj_size > (uint64_t)ofs) {
1518 start_ofs = ofs - obj_ofs;
1519 found_start = true;
1520 }
1521
1522 obj_ofs += obj_size;
1523 if (pobj_sum) {
1524 etag_sum.Update((const unsigned char *)ent.meta.etag.c_str(),
1525 ent.meta.etag.length());
1526 }
1527
1528 if ((ptotal_len || cb) && !found_end && obj_ofs > (uint64_t)end) {
1529 end_ofs = end - cur_total_len + 1;
1530 found_end = true;
1531 }
1532
1533 perfcounter->tinc(l_rgw_get_lat,
1534 (ceph_clock_now() - start_time));
1535
1536 if (found_start && !handled_end) {
1537 len_count += end_ofs - start_ofs;
1538
1539 if (cb) {
1540 r = cb(bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs,
1541 cb_param, false /* swift_slo */);
1542 if (r < 0) {
1543 return r;
1544 }
1545 }
1546 }
1547
1548 handled_end = found_end;
1549 start_time = ceph_clock_now();
1550 }
1551 } while (is_truncated);
1552
1553 if (ptotal_len) {
1554 *ptotal_len = len_count;
1555 }
1556 if (pobj_size) {
1557 *pobj_size = obj_ofs;
1558 }
1559 if (pobj_sum) {
1560 complete_etag(etag_sum, pobj_sum);
1561 }
1562
1563 return 0;
1564 }
1565
1566 struct rgw_slo_part {
1567 RGWAccessControlPolicy *bucket_acl = nullptr;
1568 Policy* bucket_policy = nullptr;
1569 rgw_bucket bucket;
1570 string obj_name;
1571 uint64_t size = 0;
1572 string etag;
1573 };
1574
1575 static int iterate_slo_parts(CephContext *cct,
1576 RGWRados *store,
1577 off_t ofs,
1578 off_t end,
1579 map<uint64_t, rgw_slo_part>& slo_parts,
1580 int (*cb)(rgw_bucket& bucket,
1581 const rgw_bucket_dir_entry& ent,
1582 RGWAccessControlPolicy *bucket_acl,
1583 const boost::optional<Policy>& bucket_policy,
1584 off_t start_ofs,
1585 off_t end_ofs,
1586 void *param,
1587 bool swift_slo),
1588 void *cb_param)
1589 {
1590 bool found_start = false, found_end = false;
1591
1592 if (slo_parts.empty()) {
1593 return 0;
1594 }
1595
1596 utime_t start_time = ceph_clock_now();
1597
1598 map<uint64_t, rgw_slo_part>::iterator iter = slo_parts.upper_bound(ofs);
1599 if (iter != slo_parts.begin()) {
1600 --iter;
1601 }
1602
1603 uint64_t obj_ofs = iter->first;
1604
1605 for (; iter != slo_parts.end() && !found_end; ++iter) {
1606 rgw_slo_part& part = iter->second;
1607 rgw_bucket_dir_entry ent;
1608
1609 ent.key.name = part.obj_name;
1610 ent.meta.accounted_size = ent.meta.size = part.size;
1611 ent.meta.etag = part.etag;
1612
1613 uint64_t cur_total_len = obj_ofs;
1614 uint64_t start_ofs = 0, end_ofs = ent.meta.size;
1615
1616 if (!found_start && cur_total_len + ent.meta.size > (uint64_t)ofs) {
1617 start_ofs = ofs - obj_ofs;
1618 found_start = true;
1619 }
1620
1621 obj_ofs += ent.meta.size;
1622
1623 if (!found_end && obj_ofs > (uint64_t)end) {
1624 end_ofs = end - cur_total_len + 1;
1625 found_end = true;
1626 }
1627
1628 perfcounter->tinc(l_rgw_get_lat,
1629 (ceph_clock_now() - start_time));
1630
1631 if (found_start) {
1632 if (cb) {
1633 // SLO is a Swift thing, and Swift has no knowledge of S3 Policies.
1634 int r = cb(part.bucket, ent, part.bucket_acl,
1635 (part.bucket_policy ?
1636 boost::optional<Policy>(*part.bucket_policy) : none),
1637 start_ofs, end_ofs, cb_param, true /* swift_slo */);
1638 if (r < 0)
1639 return r;
1640 }
1641 }
1642
1643 start_time = ceph_clock_now();
1644 }
1645
1646 return 0;
1647 }
1648
1649 static int get_obj_user_manifest_iterate_cb(rgw_bucket& bucket,
1650 const rgw_bucket_dir_entry& ent,
1651 RGWAccessControlPolicy * const bucket_acl,
1652 const boost::optional<Policy>& bucket_policy,
1653 const off_t start_ofs,
1654 const off_t end_ofs,
1655 void * const param,
1656 bool swift_slo = false)
1657 {
1658 RGWGetObj *op = static_cast<RGWGetObj *>(param);
1659 return op->read_user_manifest_part(
1660 bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs, swift_slo);
1661 }
1662
1663 int RGWGetObj::handle_user_manifest(const char *prefix)
1664 {
1665 const boost::string_view prefix_view(prefix);
1666 ldpp_dout(this, 2) << "RGWGetObj::handle_user_manifest() prefix="
1667 << prefix_view << dendl;
1668
1669 const size_t pos = prefix_view.find('/');
1670 if (pos == string::npos) {
1671 return -EINVAL;
1672 }
1673
1674 const std::string bucket_name = url_decode(prefix_view.substr(0, pos));
1675 const std::string obj_prefix = url_decode(prefix_view.substr(pos + 1));
1676
1677 rgw_bucket bucket;
1678
1679 RGWAccessControlPolicy _bucket_acl(s->cct);
1680 RGWAccessControlPolicy *bucket_acl;
1681 boost::optional<Policy> _bucket_policy;
1682 boost::optional<Policy>* bucket_policy;
1683 RGWBucketInfo bucket_info;
1684 RGWBucketInfo *pbucket_info;
1685
1686 if (bucket_name.compare(s->bucket.name) != 0) {
1687 map<string, bufferlist> bucket_attrs;
1688 auto obj_ctx = store->svc.sysobj->init_obj_ctx();
1689 int r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
1690 bucket_name, bucket_info, NULL,
1691 &bucket_attrs);
1692 if (r < 0) {
1693 ldpp_dout(this, 0) << "could not get bucket info for bucket="
1694 << bucket_name << dendl;
1695 return r;
1696 }
1697 bucket = bucket_info.bucket;
1698 pbucket_info = &bucket_info;
1699 bucket_acl = &_bucket_acl;
1700 r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl, bucket);
1701 if (r < 0) {
1702 ldpp_dout(this, 0) << "failed to read bucket policy" << dendl;
1703 return r;
1704 }
1705 _bucket_policy = get_iam_policy_from_attr(s->cct, store, bucket_attrs,
1706 bucket_info.bucket.tenant);
1707 bucket_policy = &_bucket_policy;
1708 } else {
1709 bucket = s->bucket;
1710 pbucket_info = &s->bucket_info;
1711 bucket_acl = s->bucket_acl.get();
1712 bucket_policy = &s->iam_policy;
1713 }
1714
1715 /* dry run to find out:
1716 * - total length (of the parts we are going to send to client),
1717 * - overall DLO's content size,
1718 * - md5 sum of overall DLO's content (for etag of Swift API). */
1719 int r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1720 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1721 nullptr, &s->obj_size, &lo_etag,
1722 nullptr /* cb */, nullptr /* cb arg */);
1723 if (r < 0) {
1724 return r;
1725 }
1726
1727 r = RGWRados::Object::Read::range_to_ofs(s->obj_size, ofs, end);
1728 if (r < 0) {
1729 return r;
1730 }
1731
1732 r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1733 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1734 &total_len, nullptr, nullptr,
1735 nullptr, nullptr);
1736 if (r < 0) {
1737 return r;
1738 }
1739
1740 if (!get_data) {
1741 bufferlist bl;
1742 send_response_data(bl, 0, 0);
1743 return 0;
1744 }
1745
1746 r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1747 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1748 nullptr, nullptr, nullptr,
1749 get_obj_user_manifest_iterate_cb, (void *)this);
1750 if (r < 0) {
1751 return r;
1752 }
1753
1754 if (!total_len) {
1755 bufferlist bl;
1756 send_response_data(bl, 0, 0);
1757 }
1758
1759 return 0;
1760 }
1761
1762 int RGWGetObj::handle_slo_manifest(bufferlist& bl)
1763 {
1764 RGWSLOInfo slo_info;
1765 auto bliter = bl.cbegin();
1766 try {
1767 decode(slo_info, bliter);
1768 } catch (buffer::error& err) {
1769 ldpp_dout(this, 0) << "ERROR: failed to decode slo manifest" << dendl;
1770 return -EIO;
1771 }
1772 ldpp_dout(this, 2) << "RGWGetObj::handle_slo_manifest()" << dendl;
1773
1774 vector<RGWAccessControlPolicy> allocated_acls;
1775 map<string, pair<RGWAccessControlPolicy *, boost::optional<Policy>>> policies;
1776 map<string, rgw_bucket> buckets;
1777
1778 map<uint64_t, rgw_slo_part> slo_parts;
1779
1780 MD5 etag_sum;
1781 total_len = 0;
1782
1783 for (const auto& entry : slo_info.entries) {
1784 const string& path = entry.path;
1785
1786 /* If the path starts with slashes, strip them all. */
1787 const size_t pos_init = path.find_first_not_of('/');
1788 /* According to the documentation of std::string::find following check
1789 * is not necessary as we should get the std::string::npos propagation
1790 * here. This might be true with the accuracy to implementation's bugs.
1791 * See following question on SO:
1792 * http://stackoverflow.com/questions/1011790/why-does-stdstring-findtext-stdstringnpos-not-return-npos
1793 */
1794 if (pos_init == string::npos) {
1795 return -EINVAL;
1796 }
1797
1798 const size_t pos_sep = path.find('/', pos_init);
1799 if (pos_sep == string::npos) {
1800 return -EINVAL;
1801 }
1802
1803 string bucket_name = path.substr(pos_init, pos_sep - pos_init);
1804 string obj_name = path.substr(pos_sep + 1);
1805
1806 rgw_bucket bucket;
1807 RGWAccessControlPolicy *bucket_acl;
1808 Policy* bucket_policy;
1809
1810 if (bucket_name.compare(s->bucket.name) != 0) {
1811 const auto& piter = policies.find(bucket_name);
1812 if (piter != policies.end()) {
1813 bucket_acl = piter->second.first;
1814 bucket_policy = piter->second.second.get_ptr();
1815 bucket = buckets[bucket_name];
1816 } else {
1817 allocated_acls.push_back(RGWAccessControlPolicy(s->cct));
1818 RGWAccessControlPolicy& _bucket_acl = allocated_acls.back();
1819
1820 RGWBucketInfo bucket_info;
1821 map<string, bufferlist> bucket_attrs;
1822 auto obj_ctx = store->svc.sysobj->init_obj_ctx();
1823 int r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
1824 bucket_name, bucket_info, nullptr,
1825 &bucket_attrs);
1826 if (r < 0) {
1827 ldpp_dout(this, 0) << "could not get bucket info for bucket="
1828 << bucket_name << dendl;
1829 return r;
1830 }
1831 bucket = bucket_info.bucket;
1832 bucket_acl = &_bucket_acl;
1833 r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl,
1834 bucket);
1835 if (r < 0) {
1836 ldpp_dout(this, 0) << "failed to read bucket ACL for bucket "
1837 << bucket << dendl;
1838 return r;
1839 }
1840 auto _bucket_policy = get_iam_policy_from_attr(
1841 s->cct, store, bucket_attrs, bucket_info.bucket.tenant);
1842 bucket_policy = _bucket_policy.get_ptr();
1843 buckets[bucket_name] = bucket;
1844 policies[bucket_name] = make_pair(bucket_acl, _bucket_policy);
1845 }
1846 } else {
1847 bucket = s->bucket;
1848 bucket_acl = s->bucket_acl.get();
1849 bucket_policy = s->iam_policy.get_ptr();
1850 }
1851
1852 rgw_slo_part part;
1853 part.bucket_acl = bucket_acl;
1854 part.bucket_policy = bucket_policy;
1855 part.bucket = bucket;
1856 part.obj_name = obj_name;
1857 part.size = entry.size_bytes;
1858 part.etag = entry.etag;
1859 ldpp_dout(this, 20) << "slo_part: ofs=" << ofs
1860 << " bucket=" << part.bucket
1861 << " obj=" << part.obj_name
1862 << " size=" << part.size
1863 << " etag=" << part.etag
1864 << dendl;
1865
1866 etag_sum.Update((const unsigned char *)entry.etag.c_str(),
1867 entry.etag.length());
1868
1869 slo_parts[total_len] = part;
1870 total_len += part.size;
1871 } /* foreach entry */
1872
1873 complete_etag(etag_sum, &lo_etag);
1874
1875 s->obj_size = slo_info.total_size;
1876 ldpp_dout(this, 20) << "s->obj_size=" << s->obj_size << dendl;
1877
1878 int r = RGWRados::Object::Read::range_to_ofs(total_len, ofs, end);
1879 if (r < 0) {
1880 return r;
1881 }
1882
1883 total_len = end - ofs + 1;
1884
1885 r = iterate_slo_parts(s->cct, store, ofs, end, slo_parts,
1886 get_obj_user_manifest_iterate_cb, (void *)this);
1887 if (r < 0) {
1888 return r;
1889 }
1890
1891 return 0;
1892 }
1893
1894 int RGWGetObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
1895 {
1896 /* garbage collection related handling */
1897 utime_t start_time = ceph_clock_now();
1898 if (start_time > gc_invalidate_time) {
1899 int r = store->defer_gc(s->obj_ctx, s->bucket_info, obj);
1900 if (r < 0) {
1901 ldpp_dout(this, 0) << "WARNING: could not defer gc entry for obj" << dendl;
1902 }
1903 gc_invalidate_time = start_time;
1904 gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2);
1905 }
1906 return send_response_data(bl, bl_ofs, bl_len);
1907 }
1908
1909 bool RGWGetObj::prefetch_data()
1910 {
1911 /* HEAD request, stop prefetch*/
1912 if (!get_data) {
1913 return false;
1914 }
1915
1916 bool prefetch_first_chunk = true;
1917 range_str = s->info.env->get("HTTP_RANGE");
1918
1919 if (range_str) {
1920 int r = parse_range();
1921 /* error on parsing the range, stop prefetch and will fail in execute() */
1922 if (r < 0) {
1923 return false; /* range_parsed==false */
1924 }
1925 /* range get goes to shadow objects, stop prefetch */
1926 if (ofs >= s->cct->_conf->rgw_max_chunk_size) {
1927 prefetch_first_chunk = false;
1928 }
1929 }
1930
1931 return get_data && prefetch_first_chunk;
1932 }
1933
1934 void RGWGetObj::pre_exec()
1935 {
1936 rgw_bucket_object_pre_exec(s);
1937 }
1938
1939 static bool object_is_expired(map<string, bufferlist>& attrs) {
1940 map<string, bufferlist>::iterator iter = attrs.find(RGW_ATTR_DELETE_AT);
1941 if (iter != attrs.end()) {
1942 utime_t delete_at;
1943 try {
1944 decode(delete_at, iter->second);
1945 } catch (buffer::error& err) {
1946 dout(0) << "ERROR: " << __func__ << ": failed to decode " RGW_ATTR_DELETE_AT " attr" << dendl;
1947 return false;
1948 }
1949
1950 if (delete_at <= ceph_clock_now() && !delete_at.is_zero()) {
1951 return true;
1952 }
1953 }
1954
1955 return false;
1956 }
1957
1958 void RGWGetObj::execute()
1959 {
1960 bufferlist bl;
1961 gc_invalidate_time = ceph_clock_now();
1962 gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2);
1963
1964 bool need_decompress;
1965 int64_t ofs_x, end_x;
1966
1967 RGWGetObj_CB cb(this);
1968 RGWGetObj_Filter* filter = (RGWGetObj_Filter *)&cb;
1969 boost::optional<RGWGetObj_Decompress> decompress;
1970 std::unique_ptr<RGWGetObj_Filter> decrypt;
1971 map<string, bufferlist>::iterator attr_iter;
1972
1973 perfcounter->inc(l_rgw_get);
1974
1975 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
1976 RGWRados::Object::Read read_op(&op_target);
1977
1978 op_ret = get_params();
1979 if (op_ret < 0)
1980 goto done_err;
1981
1982 op_ret = init_common();
1983 if (op_ret < 0)
1984 goto done_err;
1985
1986 read_op.conds.mod_ptr = mod_ptr;
1987 read_op.conds.unmod_ptr = unmod_ptr;
1988 read_op.conds.high_precision_time = s->system_request; /* system request need to use high precision time */
1989 read_op.conds.mod_zone_id = mod_zone_id;
1990 read_op.conds.mod_pg_ver = mod_pg_ver;
1991 read_op.conds.if_match = if_match;
1992 read_op.conds.if_nomatch = if_nomatch;
1993 read_op.params.attrs = &attrs;
1994 read_op.params.lastmod = &lastmod;
1995 read_op.params.obj_size = &s->obj_size;
1996
1997 op_ret = read_op.prepare();
1998 if (op_ret < 0)
1999 goto done_err;
2000 version_id = read_op.state.obj.key.instance;
2001
2002 /* STAT ops don't need data, and do no i/o */
2003 if (get_type() == RGW_OP_STAT_OBJ) {
2004 return;
2005 }
2006
2007 /* start gettorrent */
2008 if (torrent.get_flag())
2009 {
2010 attr_iter = attrs.find(RGW_ATTR_CRYPT_MODE);
2011 if (attr_iter != attrs.end() && attr_iter->second.to_str() == "SSE-C-AES256") {
2012 ldpp_dout(this, 0) << "ERROR: torrents are not supported for objects "
2013 "encrypted with SSE-C" << dendl;
2014 op_ret = -EINVAL;
2015 goto done_err;
2016 }
2017 torrent.init(s, store);
2018 op_ret = torrent.get_torrent_file(read_op, total_len, bl, obj);
2019 if (op_ret < 0)
2020 {
2021 ldpp_dout(this, 0) << "ERROR: failed to get_torrent_file ret= " << op_ret
2022 << dendl;
2023 goto done_err;
2024 }
2025 op_ret = send_response_data(bl, 0, total_len);
2026 if (op_ret < 0)
2027 {
2028 ldpp_dout(this, 0) << "ERROR: failed to send_response_data ret= " << op_ret << dendl;
2029 goto done_err;
2030 }
2031 return;
2032 }
2033 /* end gettorrent */
2034
2035 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
2036 if (op_ret < 0) {
2037 ldpp_dout(s, 0) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
2038 goto done_err;
2039 }
2040 if (need_decompress) {
2041 s->obj_size = cs_info.orig_size;
2042 decompress.emplace(s->cct, &cs_info, partial_content, filter);
2043 filter = &*decompress;
2044 }
2045
2046 attr_iter = attrs.find(RGW_ATTR_USER_MANIFEST);
2047 if (attr_iter != attrs.end() && !skip_manifest) {
2048 op_ret = handle_user_manifest(attr_iter->second.c_str());
2049 if (op_ret < 0) {
2050 ldpp_dout(this, 0) << "ERROR: failed to handle user manifest ret="
2051 << op_ret << dendl;
2052 goto done_err;
2053 }
2054 return;
2055 }
2056
2057 attr_iter = attrs.find(RGW_ATTR_SLO_MANIFEST);
2058 if (attr_iter != attrs.end() && !skip_manifest) {
2059 is_slo = true;
2060 op_ret = handle_slo_manifest(attr_iter->second);
2061 if (op_ret < 0) {
2062 ldpp_dout(this, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret
2063 << dendl;
2064 goto done_err;
2065 }
2066 return;
2067 }
2068
2069 // for range requests with obj size 0
2070 if (range_str && !(s->obj_size)) {
2071 total_len = 0;
2072 op_ret = -ERANGE;
2073 goto done_err;
2074 }
2075
2076 op_ret = read_op.range_to_ofs(s->obj_size, ofs, end);
2077 if (op_ret < 0)
2078 goto done_err;
2079 total_len = (ofs <= end ? end + 1 - ofs : 0);
2080
2081 /* Check whether the object has expired. Swift API documentation
2082 * stands that we should return 404 Not Found in such case. */
2083 if (need_object_expiration() && object_is_expired(attrs)) {
2084 op_ret = -ENOENT;
2085 goto done_err;
2086 }
2087
2088 start = ofs;
2089
2090 attr_iter = attrs.find(RGW_ATTR_MANIFEST);
2091 op_ret = this->get_decrypt_filter(&decrypt, filter,
2092 attr_iter != attrs.end() ? &(attr_iter->second) : nullptr);
2093 if (decrypt != nullptr) {
2094 filter = decrypt.get();
2095 }
2096 if (op_ret < 0) {
2097 goto done_err;
2098 }
2099
2100 if (!get_data || ofs > end) {
2101 send_response_data(bl, 0, 0);
2102 return;
2103 }
2104
2105 perfcounter->inc(l_rgw_get_b, end - ofs);
2106
2107 ofs_x = ofs;
2108 end_x = end;
2109 filter->fixup_range(ofs_x, end_x);
2110 op_ret = read_op.iterate(ofs_x, end_x, filter);
2111
2112 if (op_ret >= 0)
2113 op_ret = filter->flush();
2114
2115 perfcounter->tinc(l_rgw_get_lat, s->time_elapsed());
2116 if (op_ret < 0) {
2117 goto done_err;
2118 }
2119
2120 op_ret = send_response_data(bl, 0, 0);
2121 if (op_ret < 0) {
2122 goto done_err;
2123 }
2124 return;
2125
2126 done_err:
2127 send_response_data_error();
2128 }
2129
2130 int RGWGetObj::init_common()
2131 {
2132 if (range_str) {
2133 /* range parsed error when prefetch */
2134 if (!range_parsed) {
2135 int r = parse_range();
2136 if (r < 0)
2137 return r;
2138 }
2139 }
2140 if (if_mod) {
2141 if (parse_time(if_mod, &mod_time) < 0)
2142 return -EINVAL;
2143 mod_ptr = &mod_time;
2144 }
2145
2146 if (if_unmod) {
2147 if (parse_time(if_unmod, &unmod_time) < 0)
2148 return -EINVAL;
2149 unmod_ptr = &unmod_time;
2150 }
2151
2152 return 0;
2153 }
2154
2155 int RGWListBuckets::verify_permission()
2156 {
2157 rgw::IAM::Partition partition = rgw::IAM::Partition::aws;
2158 rgw::IAM::Service service = rgw::IAM::Service::s3;
2159
2160 if (!verify_user_permission(this, s, ARN(partition, service, "", s->user->user_id.tenant, "*"), rgw::IAM::s3ListAllMyBuckets)) {
2161 return -EACCES;
2162 }
2163
2164 return 0;
2165 }
2166
2167 int RGWGetUsage::verify_permission()
2168 {
2169 if (s->auth.identity->is_anonymous()) {
2170 return -EACCES;
2171 }
2172
2173 return 0;
2174 }
2175
2176 void RGWListBuckets::execute()
2177 {
2178 bool done;
2179 bool started = false;
2180 uint64_t total_count = 0;
2181
2182 const uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk;
2183
2184 op_ret = get_params();
2185 if (op_ret < 0) {
2186 goto send_end;
2187 }
2188
2189 if (supports_account_metadata()) {
2190 op_ret = rgw_get_user_attrs_by_uid(store, s->user->user_id, attrs);
2191 if (op_ret < 0) {
2192 goto send_end;
2193 }
2194 }
2195
2196 is_truncated = false;
2197 do {
2198 RGWUserBuckets buckets;
2199 uint64_t read_count;
2200 if (limit >= 0) {
2201 read_count = min(limit - total_count, max_buckets);
2202 } else {
2203 read_count = max_buckets;
2204 }
2205
2206 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
2207 marker, end_marker, read_count,
2208 should_get_stats(), &is_truncated,
2209 get_default_max());
2210 if (op_ret < 0) {
2211 /* hmm.. something wrong here.. the user was authenticated, so it
2212 should exist */
2213 ldpp_dout(this, 10) << "WARNING: failed on rgw_get_user_buckets uid="
2214 << s->user->user_id << dendl;
2215 break;
2216 }
2217
2218 /* We need to have stats for all our policies - even if a given policy
2219 * isn't actually used in a given account. In such situation its usage
2220 * stats would be simply full of zeros. */
2221 for (const auto& policy : store->svc.zone->get_zonegroup().placement_targets) {
2222 policies_stats.emplace(policy.second.name,
2223 decltype(policies_stats)::mapped_type());
2224 }
2225
2226 std::map<std::string, RGWBucketEnt>& m = buckets.get_buckets();
2227 for (const auto& kv : m) {
2228 const auto& bucket = kv.second;
2229
2230 global_stats.bytes_used += bucket.size;
2231 global_stats.bytes_used_rounded += bucket.size_rounded;
2232 global_stats.objects_count += bucket.count;
2233
2234 /* operator[] still can create a new entry for storage policy seen
2235 * for first time. */
2236 auto& policy_stats = policies_stats[bucket.placement_rule.to_str()];
2237 policy_stats.bytes_used += bucket.size;
2238 policy_stats.bytes_used_rounded += bucket.size_rounded;
2239 policy_stats.buckets_count++;
2240 policy_stats.objects_count += bucket.count;
2241 }
2242 global_stats.buckets_count += m.size();
2243 total_count += m.size();
2244
2245 done = (m.size() < read_count || (limit >= 0 && total_count >= (uint64_t)limit));
2246
2247 if (!started) {
2248 send_response_begin(buckets.count() > 0);
2249 started = true;
2250 }
2251
2252 if (!m.empty()) {
2253 map<string, RGWBucketEnt>::reverse_iterator riter = m.rbegin();
2254 marker = riter->first;
2255
2256 handle_listing_chunk(std::move(buckets));
2257 }
2258 } while (is_truncated && !done);
2259
2260 send_end:
2261 if (!started) {
2262 send_response_begin(false);
2263 }
2264 send_response_end();
2265 }
2266
2267 void RGWGetUsage::execute()
2268 {
2269 uint64_t start_epoch = 0;
2270 uint64_t end_epoch = (uint64_t)-1;
2271 op_ret = get_params();
2272 if (op_ret < 0)
2273 return;
2274
2275 if (!start_date.empty()) {
2276 op_ret = utime_t::parse_date(start_date, &start_epoch, NULL);
2277 if (op_ret < 0) {
2278 ldpp_dout(this, 0) << "ERROR: failed to parse start date" << dendl;
2279 return;
2280 }
2281 }
2282
2283 if (!end_date.empty()) {
2284 op_ret = utime_t::parse_date(end_date, &end_epoch, NULL);
2285 if (op_ret < 0) {
2286 ldpp_dout(this, 0) << "ERROR: failed to parse end date" << dendl;
2287 return;
2288 }
2289 }
2290
2291 uint32_t max_entries = 1000;
2292
2293 bool is_truncated = true;
2294
2295 RGWUsageIter usage_iter;
2296
2297 while (is_truncated) {
2298 op_ret = store->read_usage(s->user->user_id, s->bucket_name, start_epoch, end_epoch, max_entries,
2299 &is_truncated, usage_iter, usage);
2300
2301 if (op_ret == -ENOENT) {
2302 op_ret = 0;
2303 is_truncated = false;
2304 }
2305
2306 if (op_ret < 0) {
2307 return;
2308 }
2309 }
2310
2311 op_ret = rgw_user_sync_all_stats(store, s->user->user_id);
2312 if (op_ret < 0) {
2313 ldpp_dout(this, 0) << "ERROR: failed to sync user stats" << dendl;
2314 return;
2315 }
2316
2317 op_ret = rgw_user_get_all_buckets_stats(store, s->user->user_id, buckets_usage);
2318 if (op_ret < 0) {
2319 ldpp_dout(this, 0) << "ERROR: failed to get user's buckets stats" << dendl;
2320 return;
2321 }
2322
2323 string user_str = s->user->user_id.to_str();
2324 op_ret = store->cls_user_get_header(user_str, &header);
2325 if (op_ret < 0) {
2326 ldpp_dout(this, 0) << "ERROR: can't read user header" << dendl;
2327 return;
2328 }
2329
2330 return;
2331 }
2332
2333 int RGWStatAccount::verify_permission()
2334 {
2335 if (!verify_user_permission_no_policy(this, s, RGW_PERM_READ)) {
2336 return -EACCES;
2337 }
2338
2339 return 0;
2340 }
2341
2342 void RGWStatAccount::execute()
2343 {
2344 string marker;
2345 bool is_truncated = false;
2346 uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk;
2347
2348 do {
2349 RGWUserBuckets buckets;
2350
2351 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets, marker,
2352 string(), max_buckets, true, &is_truncated);
2353 if (op_ret < 0) {
2354 /* hmm.. something wrong here.. the user was authenticated, so it
2355 should exist */
2356 ldpp_dout(this, 10) << "WARNING: failed on rgw_get_user_buckets uid="
2357 << s->user->user_id << dendl;
2358 break;
2359 } else {
2360 /* We need to have stats for all our policies - even if a given policy
2361 * isn't actually used in a given account. In such situation its usage
2362 * stats would be simply full of zeros. */
2363 for (const auto& policy : store->svc.zone->get_zonegroup().placement_targets) {
2364 policies_stats.emplace(policy.second.name,
2365 decltype(policies_stats)::mapped_type());
2366 }
2367
2368 std::map<std::string, RGWBucketEnt>& m = buckets.get_buckets();
2369 for (const auto& kv : m) {
2370 const auto& bucket = kv.second;
2371
2372 global_stats.bytes_used += bucket.size;
2373 global_stats.bytes_used_rounded += bucket.size_rounded;
2374 global_stats.objects_count += bucket.count;
2375
2376 /* operator[] still can create a new entry for storage policy seen
2377 * for first time. */
2378 auto& policy_stats = policies_stats[bucket.placement_rule.to_str()];
2379 policy_stats.bytes_used += bucket.size;
2380 policy_stats.bytes_used_rounded += bucket.size_rounded;
2381 policy_stats.buckets_count++;
2382 policy_stats.objects_count += bucket.count;
2383 }
2384 global_stats.buckets_count += m.size();
2385
2386 }
2387 } while (is_truncated);
2388 }
2389
2390 int RGWGetBucketVersioning::verify_permission()
2391 {
2392 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketVersioning);
2393 }
2394
2395 void RGWGetBucketVersioning::pre_exec()
2396 {
2397 rgw_bucket_object_pre_exec(s);
2398 }
2399
2400 void RGWGetBucketVersioning::execute()
2401 {
2402 versioned = s->bucket_info.versioned();
2403 versioning_enabled = s->bucket_info.versioning_enabled();
2404 mfa_enabled = s->bucket_info.mfa_enabled();
2405 }
2406
2407 int RGWSetBucketVersioning::verify_permission()
2408 {
2409 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketVersioning);
2410 }
2411
2412 void RGWSetBucketVersioning::pre_exec()
2413 {
2414 rgw_bucket_object_pre_exec(s);
2415 }
2416
2417 void RGWSetBucketVersioning::execute()
2418 {
2419 op_ret = get_params();
2420 if (op_ret < 0)
2421 return;
2422
2423 bool cur_mfa_status = (s->bucket_info.flags & BUCKET_MFA_ENABLED) != 0;
2424
2425 mfa_set_status &= (mfa_status != cur_mfa_status);
2426
2427 if (mfa_set_status &&
2428 !s->mfa_verified) {
2429 op_ret = -ERR_MFA_REQUIRED;
2430 return;
2431 }
2432
2433 if (!store->svc.zone->is_meta_master()) {
2434 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
2435 if (op_ret < 0) {
2436 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
2437 return;
2438 }
2439 }
2440
2441 bool modified = mfa_set_status;
2442
2443 op_ret = retry_raced_bucket_write(store, s, [&] {
2444 if (mfa_set_status) {
2445 if (mfa_status) {
2446 s->bucket_info.flags |= BUCKET_MFA_ENABLED;
2447 } else {
2448 s->bucket_info.flags &= ~BUCKET_MFA_ENABLED;
2449 }
2450 }
2451
2452 if (versioning_status == VersioningEnabled) {
2453 s->bucket_info.flags |= BUCKET_VERSIONED;
2454 s->bucket_info.flags &= ~BUCKET_VERSIONS_SUSPENDED;
2455 modified = true;
2456 } else if (versioning_status == VersioningSuspended) {
2457 s->bucket_info.flags |= (BUCKET_VERSIONED | BUCKET_VERSIONS_SUSPENDED);
2458 modified = true;
2459 } else {
2460 return op_ret;
2461 }
2462 return store->put_bucket_instance_info(s->bucket_info, false, real_time(),
2463 &s->bucket_attrs);
2464 });
2465
2466 if (!modified) {
2467 return;
2468 }
2469
2470 if (op_ret < 0) {
2471 ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
2472 << " returned err=" << op_ret << dendl;
2473 return;
2474 }
2475 }
2476
2477 int RGWGetBucketWebsite::verify_permission()
2478 {
2479 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketWebsite);
2480 }
2481
2482 void RGWGetBucketWebsite::pre_exec()
2483 {
2484 rgw_bucket_object_pre_exec(s);
2485 }
2486
2487 void RGWGetBucketWebsite::execute()
2488 {
2489 if (!s->bucket_info.has_website) {
2490 op_ret = -ERR_NO_SUCH_WEBSITE_CONFIGURATION;
2491 }
2492 }
2493
2494 int RGWSetBucketWebsite::verify_permission()
2495 {
2496 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketWebsite);
2497 }
2498
2499 void RGWSetBucketWebsite::pre_exec()
2500 {
2501 rgw_bucket_object_pre_exec(s);
2502 }
2503
2504 void RGWSetBucketWebsite::execute()
2505 {
2506 op_ret = get_params();
2507
2508 if (op_ret < 0)
2509 return;
2510
2511 if (!store->svc.zone->is_meta_master()) {
2512 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
2513 if (op_ret < 0) {
2514 ldpp_dout(this, 0) << " forward_request_to_master returned ret=" << op_ret << dendl;
2515 return;
2516 }
2517 }
2518
2519 op_ret = retry_raced_bucket_write(store, s, [this] {
2520 s->bucket_info.has_website = true;
2521 s->bucket_info.website_conf = website_conf;
2522 op_ret = store->put_bucket_instance_info(s->bucket_info, false,
2523 real_time(), &s->bucket_attrs);
2524 return op_ret;
2525 });
2526
2527 if (op_ret < 0) {
2528 ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
2529 << " returned err=" << op_ret << dendl;
2530 return;
2531 }
2532 }
2533
2534 int RGWDeleteBucketWebsite::verify_permission()
2535 {
2536 return verify_bucket_owner_or_policy(s, rgw::IAM::s3DeleteBucketWebsite);
2537 }
2538
2539 void RGWDeleteBucketWebsite::pre_exec()
2540 {
2541 rgw_bucket_object_pre_exec(s);
2542 }
2543
2544 void RGWDeleteBucketWebsite::execute()
2545 {
2546
2547 if (!store->svc.zone->is_meta_master()) {
2548 bufferlist in_data;
2549 op_ret = forward_request_to_master(s, nullptr, store, in_data, nullptr);
2550 if (op_ret < 0) {
2551 ldpp_dout(this, 0) << "NOTICE: forward_to_master failed on bucket=" << s->bucket.name
2552 << "returned err=" << op_ret << dendl;
2553 return;
2554 }
2555 }
2556 op_ret = retry_raced_bucket_write(store, s, [this] {
2557 s->bucket_info.has_website = false;
2558 s->bucket_info.website_conf = RGWBucketWebsiteConf();
2559 op_ret = store->put_bucket_instance_info(s->bucket_info, false,
2560 real_time(), &s->bucket_attrs);
2561 return op_ret;
2562 });
2563 if (op_ret < 0) {
2564 ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
2565 << " returned err=" << op_ret << dendl;
2566 return;
2567 }
2568 }
2569
2570 int RGWStatBucket::verify_permission()
2571 {
2572 // This (a HEAD request on a bucket) is governed by the s3:ListBucket permission.
2573 if (!verify_bucket_permission(this, s, rgw::IAM::s3ListBucket)) {
2574 return -EACCES;
2575 }
2576
2577 return 0;
2578 }
2579
2580 void RGWStatBucket::pre_exec()
2581 {
2582 rgw_bucket_object_pre_exec(s);
2583 }
2584
2585 void RGWStatBucket::execute()
2586 {
2587 if (!s->bucket_exists) {
2588 op_ret = -ERR_NO_SUCH_BUCKET;
2589 return;
2590 }
2591
2592 RGWUserBuckets buckets;
2593 bucket.bucket = s->bucket;
2594 buckets.add(bucket);
2595 map<string, RGWBucketEnt>& m = buckets.get_buckets();
2596 op_ret = store->update_containers_stats(m);
2597 if (! op_ret)
2598 op_ret = -EEXIST;
2599 if (op_ret > 0) {
2600 op_ret = 0;
2601 map<string, RGWBucketEnt>::iterator iter = m.find(bucket.bucket.name);
2602 if (iter != m.end()) {
2603 bucket = iter->second;
2604 } else {
2605 op_ret = -EINVAL;
2606 }
2607 }
2608 }
2609
2610 int RGWListBucket::verify_permission()
2611 {
2612 op_ret = get_params();
2613 if (op_ret < 0) {
2614 return op_ret;
2615 }
2616 if (!prefix.empty())
2617 s->env.emplace("s3:prefix", prefix);
2618
2619 if (!delimiter.empty())
2620 s->env.emplace("s3:delimiter", delimiter);
2621
2622 s->env.emplace("s3:max-keys", std::to_string(max));
2623
2624 if (!verify_bucket_permission(this,
2625 s,
2626 list_versions ?
2627 rgw::IAM::s3ListBucketVersions :
2628 rgw::IAM::s3ListBucket)) {
2629 return -EACCES;
2630 }
2631
2632 return 0;
2633 }
2634
2635 int RGWListBucket::parse_max_keys()
2636 {
2637 // Bound max value of max-keys to configured value for security
2638 // Bound min value of max-keys to '0'
2639 // Some S3 clients explicitly send max-keys=0 to detect if the bucket is
2640 // empty without listing any items.
2641 return parse_value_and_bound(max_keys, max, 0,
2642 g_conf().get_val<uint64_t>("rgw_max_listing_results"),
2643 default_max);
2644 }
2645
2646 void RGWListBucket::pre_exec()
2647 {
2648 rgw_bucket_object_pre_exec(s);
2649 }
2650
2651 void RGWListBucket::execute()
2652 {
2653 if (!s->bucket_exists) {
2654 op_ret = -ERR_NO_SUCH_BUCKET;
2655 return;
2656 }
2657
2658 if (allow_unordered && !delimiter.empty()) {
2659 ldpp_dout(this, 0) <<
2660 "ERROR: unordered bucket listing requested with a delimiter" << dendl;
2661 op_ret = -EINVAL;
2662 return;
2663 }
2664
2665 if (need_container_stats()) {
2666 map<string, RGWBucketEnt> m;
2667 m[s->bucket.name] = RGWBucketEnt();
2668 m.begin()->second.bucket = s->bucket;
2669 op_ret = store->update_containers_stats(m);
2670 if (op_ret > 0) {
2671 bucket = m.begin()->second;
2672 }
2673 }
2674
2675 RGWRados::Bucket target(store, s->bucket_info);
2676 if (shard_id >= 0) {
2677 target.set_shard_id(shard_id);
2678 }
2679 RGWRados::Bucket::List list_op(&target);
2680
2681 list_op.params.prefix = prefix;
2682 list_op.params.delim = delimiter;
2683 list_op.params.marker = marker;
2684 list_op.params.end_marker = end_marker;
2685 list_op.params.list_versions = list_versions;
2686 list_op.params.allow_unordered = allow_unordered;
2687
2688 op_ret = list_op.list_objects(max, &objs, &common_prefixes, &is_truncated);
2689 if (op_ret >= 0) {
2690 next_marker = list_op.get_next_marker();
2691 }
2692 }
2693
2694 int RGWGetBucketLogging::verify_permission()
2695 {
2696 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketLogging);
2697 }
2698
2699 int RGWGetBucketLocation::verify_permission()
2700 {
2701 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketLocation);
2702 }
2703
2704 int RGWCreateBucket::verify_permission()
2705 {
2706 /* This check is mostly needed for S3 that doesn't support account ACL.
2707 * Swift doesn't allow to delegate any permission to an anonymous user,
2708 * so it will become an early exit in such case. */
2709 if (s->auth.identity->is_anonymous()) {
2710 return -EACCES;
2711 }
2712
2713 rgw_bucket bucket;
2714 bucket.name = s->bucket_name;
2715 bucket.tenant = s->bucket_tenant;
2716 ARN arn = ARN(bucket);
2717 if (!verify_user_permission(this, s, arn, rgw::IAM::s3CreateBucket)) {
2718 return -EACCES;
2719 }
2720
2721 if (s->user->user_id.tenant != s->bucket_tenant) {
2722 ldpp_dout(this, 10) << "user cannot create a bucket in a different tenant"
2723 << " (user_id.tenant=" << s->user->user_id.tenant
2724 << " requested=" << s->bucket_tenant << ")"
2725 << dendl;
2726 return -EACCES;
2727 }
2728 if (s->user->max_buckets < 0) {
2729 return -EPERM;
2730 }
2731
2732 if (s->user->max_buckets) {
2733 RGWUserBuckets buckets;
2734 string marker;
2735 bool is_truncated = false;
2736 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
2737 marker, string(), s->user->max_buckets,
2738 false, &is_truncated);
2739 if (op_ret < 0) {
2740 return op_ret;
2741 }
2742
2743 if ((int)buckets.count() >= s->user->max_buckets) {
2744 return -ERR_TOO_MANY_BUCKETS;
2745 }
2746 }
2747
2748 return 0;
2749 }
2750
2751 static int forward_request_to_master(struct req_state *s, obj_version *objv,
2752 RGWRados *store, bufferlist& in_data,
2753 JSONParser *jp, req_info *forward_info)
2754 {
2755 if (!store->svc.zone->get_master_conn()) {
2756 ldpp_dout(s, 0) << "rest connection is invalid" << dendl;
2757 return -EINVAL;
2758 }
2759 ldpp_dout(s, 0) << "sending request to master zonegroup" << dendl;
2760 bufferlist response;
2761 string uid_str = s->user->user_id.to_str();
2762 #define MAX_REST_RESPONSE (128 * 1024) // we expect a very small response
2763 int ret = store->svc.zone->get_master_conn()->forward(uid_str, (forward_info ? *forward_info : s->info),
2764 objv, MAX_REST_RESPONSE, &in_data, &response);
2765 if (ret < 0)
2766 return ret;
2767
2768 ldpp_dout(s, 20) << "response: " << response.c_str() << dendl;
2769 if (jp && !jp->parse(response.c_str(), response.length())) {
2770 ldpp_dout(s, 0) << "failed parsing response from master zonegroup" << dendl;
2771 return -EINVAL;
2772 }
2773
2774 return 0;
2775 }
2776
2777 void RGWCreateBucket::pre_exec()
2778 {
2779 rgw_bucket_object_pre_exec(s);
2780 }
2781
2782 static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs,
2783 map<string, bufferlist>& out_attrs,
2784 map<string, bufferlist>& out_rmattrs)
2785 {
2786 for (const auto& kv : orig_attrs) {
2787 const string& name = kv.first;
2788
2789 /* Check if the attr is user-defined metadata item. */
2790 if (name.compare(0, sizeof(RGW_ATTR_META_PREFIX) - 1,
2791 RGW_ATTR_META_PREFIX) == 0) {
2792 /* For the objects all existing meta attrs have to be removed. */
2793 out_rmattrs[name] = kv.second;
2794 } else if (out_attrs.find(name) == std::end(out_attrs)) {
2795 out_attrs[name] = kv.second;
2796 }
2797 }
2798 }
2799
2800 /* Fuse resource metadata basing on original attributes in @orig_attrs, set
2801 * of _custom_ attribute names to remove in @rmattr_names and attributes in
2802 * @out_attrs. Place results in @out_attrs.
2803 *
2804 * NOTE: it's supposed that all special attrs already present in @out_attrs
2805 * will be preserved without any change. Special attributes are those which
2806 * names start with RGW_ATTR_META_PREFIX. They're complement to custom ones
2807 * used for X-Account-Meta-*, X-Container-Meta-*, X-Amz-Meta and so on. */
2808 static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs,
2809 const set<string>& rmattr_names,
2810 map<string, bufferlist>& out_attrs)
2811 {
2812 for (const auto& kv : orig_attrs) {
2813 const string& name = kv.first;
2814
2815 /* Check if the attr is user-defined metadata item. */
2816 if (name.compare(0, strlen(RGW_ATTR_META_PREFIX),
2817 RGW_ATTR_META_PREFIX) == 0) {
2818 /* For the buckets all existing meta attrs are preserved,
2819 except those that are listed in rmattr_names. */
2820 if (rmattr_names.find(name) != std::end(rmattr_names)) {
2821 const auto aiter = out_attrs.find(name);
2822
2823 if (aiter != std::end(out_attrs)) {
2824 out_attrs.erase(aiter);
2825 }
2826 } else {
2827 /* emplace() won't alter the map if the key is already present.
2828 * This behaviour is fully intensional here. */
2829 out_attrs.emplace(kv);
2830 }
2831 } else if (out_attrs.find(name) == std::end(out_attrs)) {
2832 out_attrs[name] = kv.second;
2833 }
2834 }
2835 }
2836
2837
2838 static void populate_with_generic_attrs(const req_state * const s,
2839 map<string, bufferlist>& out_attrs)
2840 {
2841 for (const auto& kv : s->generic_attrs) {
2842 bufferlist& attrbl = out_attrs[kv.first];
2843 const string& val = kv.second;
2844 attrbl.clear();
2845 attrbl.append(val.c_str(), val.size() + 1);
2846 }
2847 }
2848
2849
2850 static int filter_out_quota_info(std::map<std::string, bufferlist>& add_attrs,
2851 const std::set<std::string>& rmattr_names,
2852 RGWQuotaInfo& quota,
2853 bool * quota_extracted = nullptr)
2854 {
2855 bool extracted = false;
2856
2857 /* Put new limit on max objects. */
2858 auto iter = add_attrs.find(RGW_ATTR_QUOTA_NOBJS);
2859 std::string err;
2860 if (std::end(add_attrs) != iter) {
2861 quota.max_objects =
2862 static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err));
2863 if (!err.empty()) {
2864 return -EINVAL;
2865 }
2866 add_attrs.erase(iter);
2867 extracted = true;
2868 }
2869
2870 /* Put new limit on bucket (container) size. */
2871 iter = add_attrs.find(RGW_ATTR_QUOTA_MSIZE);
2872 if (iter != add_attrs.end()) {
2873 quota.max_size =
2874 static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err));
2875 if (!err.empty()) {
2876 return -EINVAL;
2877 }
2878 add_attrs.erase(iter);
2879 extracted = true;
2880 }
2881
2882 for (const auto& name : rmattr_names) {
2883 /* Remove limit on max objects. */
2884 if (name.compare(RGW_ATTR_QUOTA_NOBJS) == 0) {
2885 quota.max_objects = -1;
2886 extracted = true;
2887 }
2888
2889 /* Remove limit on max bucket size. */
2890 if (name.compare(RGW_ATTR_QUOTA_MSIZE) == 0) {
2891 quota.max_size = -1;
2892 extracted = true;
2893 }
2894 }
2895
2896 /* Swift requries checking on raw usage instead of the 4 KiB rounded one. */
2897 quota.check_on_raw = true;
2898 quota.enabled = quota.max_size > 0 || quota.max_objects > 0;
2899
2900 if (quota_extracted) {
2901 *quota_extracted = extracted;
2902 }
2903
2904 return 0;
2905 }
2906
2907
2908 static void filter_out_website(std::map<std::string, ceph::bufferlist>& add_attrs,
2909 const std::set<std::string>& rmattr_names,
2910 RGWBucketWebsiteConf& ws_conf)
2911 {
2912 std::string lstval;
2913
2914 /* Let's define a mapping between each custom attribute and the memory where
2915 * attribute's value should be stored. The memory location is expressed by
2916 * a non-const reference. */
2917 const auto mapping = {
2918 std::make_pair(RGW_ATTR_WEB_INDEX, std::ref(ws_conf.index_doc_suffix)),
2919 std::make_pair(RGW_ATTR_WEB_ERROR, std::ref(ws_conf.error_doc)),
2920 std::make_pair(RGW_ATTR_WEB_LISTINGS, std::ref(lstval)),
2921 std::make_pair(RGW_ATTR_WEB_LIST_CSS, std::ref(ws_conf.listing_css_doc)),
2922 std::make_pair(RGW_ATTR_SUBDIR_MARKER, std::ref(ws_conf.subdir_marker))
2923 };
2924
2925 for (const auto& kv : mapping) {
2926 const char * const key = kv.first;
2927 auto& target = kv.second;
2928
2929 auto iter = add_attrs.find(key);
2930
2931 if (std::end(add_attrs) != iter) {
2932 /* The "target" is a reference to ws_conf. */
2933 target = iter->second.c_str();
2934 add_attrs.erase(iter);
2935 }
2936
2937 if (rmattr_names.count(key)) {
2938 target = std::string();
2939 }
2940 }
2941
2942 if (! lstval.empty()) {
2943 ws_conf.listing_enabled = boost::algorithm::iequals(lstval, "true");
2944 }
2945 }
2946
2947
2948 void RGWCreateBucket::execute()
2949 {
2950 RGWAccessControlPolicy old_policy(s->cct);
2951 buffer::list aclbl;
2952 buffer::list corsbl;
2953 bool existed;
2954 string bucket_name;
2955 rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_name);
2956 rgw_raw_obj obj(store->svc.zone->get_zone_params().domain_root, bucket_name);
2957 obj_version objv, *pobjv = NULL;
2958
2959 op_ret = get_params();
2960 if (op_ret < 0)
2961 return;
2962
2963 if (!relaxed_region_enforcement &&
2964 !location_constraint.empty() &&
2965 !store->svc.zone->has_zonegroup_api(location_constraint)) {
2966 ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")"
2967 << " can't be found." << dendl;
2968 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
2969 s->err.message = "The specified location-constraint is not valid";
2970 return;
2971 }
2972
2973 if (!relaxed_region_enforcement && !store->svc.zone->get_zonegroup().is_master_zonegroup() && !location_constraint.empty() &&
2974 store->svc.zone->get_zonegroup().api_name != location_constraint) {
2975 ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")"
2976 << " doesn't match zonegroup" << " (" << store->svc.zone->get_zonegroup().api_name << ")"
2977 << dendl;
2978 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
2979 s->err.message = "The specified location-constraint is not valid";
2980 return;
2981 }
2982
2983 const auto& zonegroup = store->svc.zone->get_zonegroup();
2984 if (!placement_rule.name.empty() &&
2985 !zonegroup.placement_targets.count(placement_rule.name)) {
2986 ldpp_dout(this, 0) << "placement target (" << placement_rule.name << ")"
2987 << " doesn't exist in the placement targets of zonegroup"
2988 << " (" << store->svc.zone->get_zonegroup().api_name << ")" << dendl;
2989 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
2990 s->err.message = "The specified placement target does not exist";
2991 return;
2992 }
2993
2994 /* we need to make sure we read bucket info, it's not read before for this
2995 * specific request */
2996 op_ret = store->get_bucket_info(*s->sysobj_ctx, s->bucket_tenant, s->bucket_name,
2997 s->bucket_info, nullptr, &s->bucket_attrs);
2998 if (op_ret < 0 && op_ret != -ENOENT)
2999 return;
3000 s->bucket_exists = (op_ret != -ENOENT);
3001
3002 s->bucket_owner.set_id(s->user->user_id);
3003 s->bucket_owner.set_name(s->user->display_name);
3004 if (s->bucket_exists) {
3005 int r = rgw_op_get_bucket_policy_from_attr(s->cct, store, s->bucket_info,
3006 s->bucket_attrs, &old_policy);
3007 if (r >= 0) {
3008 if (old_policy.get_owner().get_id().compare(s->user->user_id) != 0) {
3009 op_ret = -EEXIST;
3010 return;
3011 }
3012 }
3013 }
3014
3015 RGWBucketInfo master_info;
3016 rgw_bucket *pmaster_bucket;
3017 uint32_t *pmaster_num_shards;
3018 real_time creation_time;
3019
3020 if (!store->svc.zone->is_meta_master()) {
3021 JSONParser jp;
3022 op_ret = forward_request_to_master(s, NULL, store, in_data, &jp);
3023 if (op_ret < 0) {
3024 return;
3025 }
3026
3027 JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp);
3028 JSONDecoder::decode_json("object_ver", objv, &jp);
3029 JSONDecoder::decode_json("bucket_info", master_info, &jp);
3030 ldpp_dout(this, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" << objv.ver << dendl;
3031 ldpp_dout(this, 20) << "got creation time: << " << master_info.creation_time << dendl;
3032 pmaster_bucket= &master_info.bucket;
3033 creation_time = master_info.creation_time;
3034 pmaster_num_shards = &master_info.num_shards;
3035 pobjv = &objv;
3036 } else {
3037 pmaster_bucket = NULL;
3038 pmaster_num_shards = NULL;
3039 }
3040
3041 string zonegroup_id;
3042
3043 if (s->system_request) {
3044 zonegroup_id = s->info.args.get(RGW_SYS_PARAM_PREFIX "zonegroup");
3045 if (zonegroup_id.empty()) {
3046 zonegroup_id = store->svc.zone->get_zonegroup().get_id();
3047 }
3048 } else {
3049 zonegroup_id = store->svc.zone->get_zonegroup().get_id();
3050 }
3051
3052 if (s->bucket_exists) {
3053 rgw_placement_rule selected_placement_rule;
3054 rgw_bucket bucket;
3055 bucket.tenant = s->bucket_tenant;
3056 bucket.name = s->bucket_name;
3057 op_ret = store->svc.zone->select_bucket_placement(*(s->user), zonegroup_id,
3058 placement_rule,
3059 &selected_placement_rule, nullptr);
3060 if (selected_placement_rule != s->bucket_info.placement_rule) {
3061 op_ret = -EEXIST;
3062 return;
3063 }
3064 }
3065
3066 /* Encode special metadata first as we're using std::map::emplace under
3067 * the hood. This method will add the new items only if the map doesn't
3068 * contain such keys yet. */
3069 policy.encode(aclbl);
3070 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
3071
3072 if (has_cors) {
3073 cors_config.encode(corsbl);
3074 emplace_attr(RGW_ATTR_CORS, std::move(corsbl));
3075 }
3076
3077 RGWQuotaInfo quota_info;
3078 const RGWQuotaInfo * pquota_info = nullptr;
3079 if (need_metadata_upload()) {
3080 /* It's supposed that following functions WILL NOT change any special
3081 * attributes (like RGW_ATTR_ACL) if they are already present in attrs. */
3082 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
3083 if (op_ret < 0) {
3084 return;
3085 }
3086 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
3087 populate_with_generic_attrs(s, attrs);
3088
3089 op_ret = filter_out_quota_info(attrs, rmattr_names, quota_info);
3090 if (op_ret < 0) {
3091 return;
3092 } else {
3093 pquota_info = &quota_info;
3094 }
3095
3096 /* Web site of Swift API. */
3097 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
3098 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
3099 }
3100
3101 s->bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */
3102 s->bucket.name = s->bucket_name;
3103
3104 /* Handle updates of the metadata for Swift's object versioning. */
3105 if (swift_ver_location) {
3106 s->bucket_info.swift_ver_location = *swift_ver_location;
3107 s->bucket_info.swift_versioning = (! swift_ver_location->empty());
3108 }
3109
3110 op_ret = store->create_bucket(*(s->user), s->bucket, zonegroup_id,
3111 placement_rule, s->bucket_info.swift_ver_location,
3112 pquota_info, attrs,
3113 info, pobjv, &ep_objv, creation_time,
3114 pmaster_bucket, pmaster_num_shards, true);
3115 /* continue if EEXIST and create_bucket will fail below. this way we can
3116 * recover from a partial create by retrying it. */
3117 ldpp_dout(this, 20) << "rgw_create_bucket returned ret=" << op_ret << " bucket=" << s->bucket << dendl;
3118
3119 if (op_ret && op_ret != -EEXIST)
3120 return;
3121
3122 existed = (op_ret == -EEXIST);
3123
3124 if (existed) {
3125 /* bucket already existed, might have raced with another bucket creation, or
3126 * might be partial bucket creation that never completed. Read existing bucket
3127 * info, verify that the reported bucket owner is the current user.
3128 * If all is ok then update the user's list of buckets.
3129 * Otherwise inform client about a name conflict.
3130 */
3131 if (info.owner.compare(s->user->user_id) != 0) {
3132 op_ret = -EEXIST;
3133 return;
3134 }
3135 s->bucket = info.bucket;
3136 }
3137
3138 op_ret = rgw_link_bucket(store, s->user->user_id, s->bucket,
3139 info.creation_time, false);
3140 if (op_ret && !existed && op_ret != -EEXIST) {
3141 /* if it exists (or previously existed), don't remove it! */
3142 op_ret = rgw_unlink_bucket(store, s->user->user_id, s->bucket.tenant,
3143 s->bucket.name);
3144 if (op_ret < 0) {
3145 ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
3146 << dendl;
3147 }
3148 } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) {
3149 op_ret = -ERR_BUCKET_EXISTS;
3150 }
3151
3152 if (need_metadata_upload() && existed) {
3153 /* OK, it looks we lost race with another request. As it's required to
3154 * handle metadata fusion and upload, the whole operation becomes very
3155 * similar in nature to PutMetadataBucket. However, as the attrs may
3156 * changed in the meantime, we have to refresh. */
3157 short tries = 0;
3158 do {
3159 RGWBucketInfo binfo;
3160 map<string, bufferlist> battrs;
3161
3162 op_ret = store->get_bucket_info(*s->sysobj_ctx, s->bucket_tenant, s->bucket_name,
3163 binfo, nullptr, &battrs);
3164 if (op_ret < 0) {
3165 return;
3166 } else if (binfo.owner.compare(s->user->user_id) != 0) {
3167 /* New bucket doesn't belong to the account we're operating on. */
3168 op_ret = -EEXIST;
3169 return;
3170 } else {
3171 s->bucket_info = binfo;
3172 s->bucket_attrs = battrs;
3173 }
3174
3175 attrs.clear();
3176
3177 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
3178 if (op_ret < 0) {
3179 return;
3180 }
3181 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
3182 populate_with_generic_attrs(s, attrs);
3183 op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket_info.quota);
3184 if (op_ret < 0) {
3185 return;
3186 }
3187
3188 /* Handle updates of the metadata for Swift's object versioning. */
3189 if (swift_ver_location) {
3190 s->bucket_info.swift_ver_location = *swift_ver_location;
3191 s->bucket_info.swift_versioning = (! swift_ver_location->empty());
3192 }
3193
3194 /* Web site of Swift API. */
3195 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
3196 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
3197
3198 /* This will also set the quota on the bucket. */
3199 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
3200 &s->bucket_info.objv_tracker);
3201 } while (op_ret == -ECANCELED && tries++ < 20);
3202
3203 /* Restore the proper return code. */
3204 if (op_ret >= 0) {
3205 op_ret = -ERR_BUCKET_EXISTS;
3206 }
3207 }
3208 }
3209
3210 int RGWDeleteBucket::verify_permission()
3211 {
3212 if (!verify_bucket_permission(this, s, rgw::IAM::s3DeleteBucket)) {
3213 return -EACCES;
3214 }
3215
3216 return 0;
3217 }
3218
3219 void RGWDeleteBucket::pre_exec()
3220 {
3221 rgw_bucket_object_pre_exec(s);
3222 }
3223
3224 void RGWDeleteBucket::execute()
3225 {
3226 if (s->bucket_name.empty()) {
3227 op_ret = -EINVAL;
3228 return;
3229 }
3230
3231 if (!s->bucket_exists) {
3232 ldpp_dout(this, 0) << "ERROR: bucket " << s->bucket_name << " not found" << dendl;
3233 op_ret = -ERR_NO_SUCH_BUCKET;
3234 return;
3235 }
3236 RGWObjVersionTracker ot;
3237 ot.read_version = s->bucket_info.ep_objv;
3238
3239 if (s->system_request) {
3240 string tag = s->info.args.get(RGW_SYS_PARAM_PREFIX "tag");
3241 string ver_str = s->info.args.get(RGW_SYS_PARAM_PREFIX "ver");
3242 if (!tag.empty()) {
3243 ot.read_version.tag = tag;
3244 uint64_t ver;
3245 string err;
3246 ver = strict_strtol(ver_str.c_str(), 10, &err);
3247 if (!err.empty()) {
3248 ldpp_dout(this, 0) << "failed to parse ver param" << dendl;
3249 op_ret = -EINVAL;
3250 return;
3251 }
3252 ot.read_version.ver = ver;
3253 }
3254 }
3255
3256 op_ret = rgw_bucket_sync_user_stats(store, s->user->user_id, s->bucket_info);
3257 if ( op_ret < 0) {
3258 ldpp_dout(this, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl;
3259 }
3260
3261 op_ret = store->check_bucket_empty(s->bucket_info);
3262 if (op_ret < 0) {
3263 return;
3264 }
3265
3266 if (!store->svc.zone->is_meta_master()) {
3267 bufferlist in_data;
3268 op_ret = forward_request_to_master(s, &ot.read_version, store, in_data,
3269 NULL);
3270 if (op_ret < 0) {
3271 if (op_ret == -ENOENT) {
3272 /* adjust error, we want to return with NoSuchBucket and not
3273 * NoSuchKey */
3274 op_ret = -ERR_NO_SUCH_BUCKET;
3275 }
3276 return;
3277 }
3278 }
3279
3280 string prefix, delimiter;
3281
3282 if (s->prot_flags & RGW_REST_SWIFT) {
3283 string path_args;
3284 path_args = s->info.args.get("path");
3285 if (!path_args.empty()) {
3286 if (!delimiter.empty() || !prefix.empty()) {
3287 op_ret = -EINVAL;
3288 return;
3289 }
3290 prefix = path_args;
3291 delimiter="/";
3292 }
3293 }
3294
3295 op_ret = abort_bucket_multiparts(store, s->cct, s->bucket_info, prefix, delimiter);
3296
3297 if (op_ret < 0) {
3298 return;
3299 }
3300
3301 op_ret = store->delete_bucket(s->bucket_info, ot, false);
3302
3303 if (op_ret == -ECANCELED) {
3304 // lost a race, either with mdlog sync or another delete bucket operation.
3305 // in either case, we've already called rgw_unlink_bucket()
3306 op_ret = 0;
3307 return;
3308 }
3309
3310 if (op_ret == 0) {
3311 op_ret = rgw_unlink_bucket(store, s->bucket_info.owner, s->bucket.tenant,
3312 s->bucket.name, false);
3313 if (op_ret < 0) {
3314 ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
3315 << dendl;
3316 }
3317 }
3318 }
3319
3320 int RGWPutObj::verify_permission()
3321 {
3322 if (! copy_source.empty()) {
3323
3324 RGWAccessControlPolicy cs_acl(s->cct);
3325 boost::optional<Policy> policy;
3326 map<string, bufferlist> cs_attrs;
3327 rgw_bucket cs_bucket(copy_source_bucket_info.bucket);
3328 rgw_obj_key cs_object(copy_source_object_name, copy_source_version_id);
3329
3330 rgw_obj obj(cs_bucket, cs_object);
3331 store->set_atomic(s->obj_ctx, obj);
3332 store->set_prefetch_data(s->obj_ctx, obj);
3333
3334 /* check source object permissions */
3335 if (read_obj_policy(store, s, copy_source_bucket_info, cs_attrs, &cs_acl, nullptr,
3336 policy, cs_bucket, cs_object) < 0) {
3337 return -EACCES;
3338 }
3339
3340 /* admin request overrides permission checks */
3341 if (! s->auth.identity->is_admin_of(cs_acl.get_owner().get_id())) {
3342 if (policy || ! s->iam_user_policies.empty()) {
3343 auto usr_policy_res = Effect::Pass;
3344 for (auto& user_policy : s->iam_user_policies) {
3345 if (usr_policy_res = user_policy.eval(s->env, *s->auth.identity,
3346 cs_object.instance.empty() ?
3347 rgw::IAM::s3GetObject :
3348 rgw::IAM::s3GetObjectVersion,
3349 rgw::IAM::ARN(obj)); usr_policy_res == Effect::Deny)
3350 return -EACCES;
3351 else if (usr_policy_res == Effect::Allow)
3352 break;
3353 }
3354 rgw::IAM::Effect e = Effect::Pass;
3355 if (policy) {
3356 e = policy->eval(s->env, *s->auth.identity,
3357 cs_object.instance.empty() ?
3358 rgw::IAM::s3GetObject :
3359 rgw::IAM::s3GetObjectVersion,
3360 rgw::IAM::ARN(obj));
3361 }
3362 if (e == Effect::Deny) {
3363 return -EACCES;
3364 } else if (usr_policy_res == Effect::Pass && e == Effect::Pass &&
3365 !cs_acl.verify_permission(this, *s->auth.identity, s->perm_mask,
3366 RGW_PERM_READ)) {
3367 return -EACCES;
3368 }
3369 } else if (!cs_acl.verify_permission(this, *s->auth.identity, s->perm_mask,
3370 RGW_PERM_READ)) {
3371 return -EACCES;
3372 }
3373 }
3374 }
3375
3376 auto op_ret = get_params();
3377 if (op_ret < 0) {
3378 ldpp_dout(this, 20) << "get_params() returned ret=" << op_ret << dendl;
3379 return op_ret;
3380 }
3381
3382 if (s->iam_policy || ! s->iam_user_policies.empty()) {
3383 rgw_add_grant_to_iam_environment(s->env, s);
3384
3385 rgw_add_to_iam_environment(s->env, "s3:x-amz-acl", s->canned_acl);
3386
3387 if (obj_tags != nullptr && obj_tags->count() > 0){
3388 auto tags = obj_tags->get_tags();
3389 for (const auto& kv: tags){
3390 rgw_add_to_iam_environment(s->env, "s3:RequestObjectTag/"+kv.first, kv.second);
3391 }
3392 }
3393
3394 constexpr auto encrypt_attr = "x-amz-server-side-encryption";
3395 constexpr auto s3_encrypt_attr = "s3:x-amz-server-side-encryption";
3396 auto enc_header = s->info.x_meta_map.find(encrypt_attr);
3397 if (enc_header != s->info.x_meta_map.end()){
3398 rgw_add_to_iam_environment(s->env, s3_encrypt_attr, enc_header->second);
3399 }
3400
3401 constexpr auto kms_attr = "x-amz-server-side-encryption-aws-kms-key-id";
3402 constexpr auto s3_kms_attr = "s3:x-amz-server-side-encryption-aws-kms-key-id";
3403 auto kms_header = s->info.x_meta_map.find(kms_attr);
3404 if (kms_header != s->info.x_meta_map.end()){
3405 rgw_add_to_iam_environment(s->env, s3_kms_attr, kms_header->second);
3406 }
3407
3408 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
3409 boost::none,
3410 rgw::IAM::s3PutObject,
3411 rgw_obj(s->bucket, s->object));
3412 if (usr_policy_res == Effect::Deny)
3413 return -EACCES;
3414
3415 rgw::IAM::Effect e = Effect::Pass;
3416 if (s->iam_policy) {
3417 e = s->iam_policy->eval(s->env, *s->auth.identity,
3418 rgw::IAM::s3PutObject,
3419 rgw_obj(s->bucket, s->object));
3420 }
3421 if (e == Effect::Allow) {
3422 return 0;
3423 } else if (e == Effect::Deny) {
3424 return -EACCES;
3425 } else if (usr_policy_res == Effect::Allow) {
3426 return 0;
3427 }
3428 }
3429
3430 if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
3431 return -EACCES;
3432 }
3433
3434 return 0;
3435 }
3436
3437
3438 void RGWPutObj::pre_exec()
3439 {
3440 rgw_bucket_object_pre_exec(s);
3441 }
3442
3443 class RGWPutObj_CB : public RGWGetObj_Filter
3444 {
3445 RGWPutObj *op;
3446 public:
3447 explicit RGWPutObj_CB(RGWPutObj *_op) : op(_op) {}
3448 ~RGWPutObj_CB() override {}
3449
3450 int handle_data(bufferlist& bl, off_t bl_ofs, off_t bl_len) override {
3451 return op->get_data_cb(bl, bl_ofs, bl_len);
3452 }
3453 };
3454
3455 int RGWPutObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
3456 {
3457 bufferlist bl_tmp;
3458 bl.copy(bl_ofs, bl_len, bl_tmp);
3459
3460 bl_aux.append(bl_tmp);
3461
3462 return bl_len;
3463 }
3464
3465 int RGWPutObj::get_data(const off_t fst, const off_t lst, bufferlist& bl)
3466 {
3467 RGWPutObj_CB cb(this);
3468 RGWGetObj_Filter* filter = &cb;
3469 boost::optional<RGWGetObj_Decompress> decompress;
3470 std::unique_ptr<RGWGetObj_Filter> decrypt;
3471 RGWCompressionInfo cs_info;
3472 map<string, bufferlist> attrs;
3473 map<string, bufferlist>::iterator attr_iter;
3474 int ret = 0;
3475
3476 uint64_t obj_size;
3477 int64_t new_ofs, new_end;
3478
3479 new_ofs = fst;
3480 new_end = lst;
3481
3482 rgw_obj_key obj_key(copy_source_object_name, copy_source_version_id);
3483 rgw_obj obj(copy_source_bucket_info.bucket, obj_key);
3484
3485 RGWRados::Object op_target(store, copy_source_bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
3486 RGWRados::Object::Read read_op(&op_target);
3487 read_op.params.obj_size = &obj_size;
3488 read_op.params.attrs = &attrs;
3489
3490 ret = read_op.prepare();
3491 if (ret < 0)
3492 return ret;
3493
3494 bool need_decompress;
3495 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
3496 if (op_ret < 0) {
3497 ldpp_dout(s, 0) << "ERROR: failed to decode compression info" << dendl;
3498 return -EIO;
3499 }
3500
3501 bool partial_content = true;
3502 if (need_decompress)
3503 {
3504 obj_size = cs_info.orig_size;
3505 decompress.emplace(s->cct, &cs_info, partial_content, filter);
3506 filter = &*decompress;
3507 }
3508
3509 attr_iter = attrs.find(RGW_ATTR_MANIFEST);
3510 op_ret = this->get_decrypt_filter(&decrypt,
3511 filter,
3512 attrs,
3513 attr_iter != attrs.end() ? &(attr_iter->second) : nullptr);
3514 if (decrypt != nullptr) {
3515 filter = decrypt.get();
3516 }
3517 if (op_ret < 0) {
3518 return ret;
3519 }
3520
3521 ret = read_op.range_to_ofs(obj_size, new_ofs, new_end);
3522 if (ret < 0)
3523 return ret;
3524
3525 filter->fixup_range(new_ofs, new_end);
3526 ret = read_op.iterate(new_ofs, new_end, filter);
3527
3528 if (ret >= 0)
3529 ret = filter->flush();
3530
3531 bl.claim_append(bl_aux);
3532
3533 return ret;
3534 }
3535
3536 // special handling for compression type = "random" with multipart uploads
3537 static CompressorRef get_compressor_plugin(const req_state *s,
3538 const std::string& compression_type)
3539 {
3540 if (compression_type != "random") {
3541 return Compressor::create(s->cct, compression_type);
3542 }
3543
3544 bool is_multipart{false};
3545 const auto& upload_id = s->info.args.get("uploadId", &is_multipart);
3546
3547 if (!is_multipart) {
3548 return Compressor::create(s->cct, compression_type);
3549 }
3550
3551 // use a hash of the multipart upload id so all parts use the same plugin
3552 const auto alg = std::hash<std::string>{}(upload_id) % Compressor::COMP_ALG_LAST;
3553 if (alg == Compressor::COMP_ALG_NONE) {
3554 return nullptr;
3555 }
3556 return Compressor::create(s->cct, alg);
3557 }
3558
3559 void RGWPutObj::execute()
3560 {
3561 char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1];
3562 char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3563 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3564 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
3565 MD5 hash;
3566 bufferlist bl, aclbl, bs;
3567 int len;
3568
3569 off_t fst;
3570 off_t lst;
3571
3572 bool need_calc_md5 = (dlo_manifest == NULL) && (slo_info == NULL);
3573 perfcounter->inc(l_rgw_put);
3574 // report latency on return
3575 auto put_lat = make_scope_guard([&] {
3576 perfcounter->tinc(l_rgw_put_lat, s->time_elapsed());
3577 });
3578
3579 op_ret = -EINVAL;
3580 if (s->object.empty()) {
3581 return;
3582 }
3583
3584 if (!s->bucket_exists) {
3585 op_ret = -ERR_NO_SUCH_BUCKET;
3586 return;
3587 }
3588
3589
3590 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
3591 if (op_ret < 0) {
3592 ldpp_dout(this, 20) << "get_system_versioning_params() returned ret="
3593 << op_ret << dendl;
3594 return;
3595 }
3596
3597 if (supplied_md5_b64) {
3598 need_calc_md5 = true;
3599
3600 ldpp_dout(this, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
3601 op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1],
3602 supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64));
3603 ldpp_dout(this, 15) << "ceph_armor ret=" << op_ret << dendl;
3604 if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) {
3605 op_ret = -ERR_INVALID_DIGEST;
3606 return;
3607 }
3608
3609 buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5);
3610 ldpp_dout(this, 15) << "supplied_md5=" << supplied_md5 << dendl;
3611 }
3612
3613 if (!chunked_upload) { /* with chunked upload we don't know how big is the upload.
3614 we also check sizes at the end anyway */
3615 op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
3616 user_quota, bucket_quota, s->content_length);
3617 if (op_ret < 0) {
3618 ldpp_dout(this, 20) << "check_quota() returned ret=" << op_ret << dendl;
3619 return;
3620 }
3621 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3622 if (op_ret < 0) {
3623 ldpp_dout(this, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl;
3624 return;
3625 }
3626 }
3627
3628 if (supplied_etag) {
3629 strncpy(supplied_md5, supplied_etag, sizeof(supplied_md5) - 1);
3630 supplied_md5[sizeof(supplied_md5) - 1] = '\0';
3631 }
3632
3633 const bool multipart = !multipart_upload_id.empty();
3634 auto& obj_ctx = *static_cast<RGWObjectCtx*>(s->obj_ctx);
3635 rgw_obj obj{s->bucket, s->object};
3636
3637 /* Handle object versioning of Swift API. */
3638 if (! multipart) {
3639 op_ret = store->swift_versioning_copy(obj_ctx,
3640 s->bucket_owner.get_id(),
3641 s->bucket_info,
3642 obj);
3643 if (op_ret < 0) {
3644 return;
3645 }
3646 }
3647
3648 // create the object processor
3649 rgw::AioThrottle aio(store->ctx()->_conf->rgw_put_obj_min_window_size);
3650 using namespace rgw::putobj;
3651 constexpr auto max_processor_size = std::max({sizeof(MultipartObjectProcessor),
3652 sizeof(AtomicObjectProcessor),
3653 sizeof(AppendObjectProcessor)});
3654 ceph::static_ptr<ObjectProcessor, max_processor_size> processor;
3655
3656 rgw_placement_rule *pdest_placement;
3657
3658 if (multipart) {
3659 RGWMPObj mp(s->object.name, multipart_upload_id);
3660
3661 multipart_upload_info upload_info;
3662 op_ret = get_multipart_info(store, s, mp.get_meta(), nullptr, nullptr, &upload_info);
3663 if (op_ret < 0) {
3664 if (op_ret != -ENOENT) {
3665 ldpp_dout(this, 0) << "ERROR: get_multipart_info returned " << op_ret << ": " << cpp_strerror(-op_ret) << dendl;
3666 } else {// -ENOENT: raced with upload complete/cancel, no need to spam log
3667 ldpp_dout(this, 20) << "failed to get multipart info (returned " << op_ret << ": " << cpp_strerror(-op_ret) << "): probably raced with upload complete / cancel" << dendl;
3668 }
3669 return;
3670 }
3671 pdest_placement = &upload_info.dest_placement;
3672 ldpp_dout(this, 20) << "dest_placement for part=" << upload_info.dest_placement << dendl;
3673 processor.emplace<MultipartObjectProcessor>(
3674 &aio, store, s->bucket_info, pdest_placement,
3675 s->owner.get_id(), obj_ctx, obj,
3676 multipart_upload_id, multipart_part_num, multipart_part_str);
3677 } else if(append) {
3678 if (s->bucket_info.versioned()) {
3679 op_ret = -ERR_INVALID_BUCKET_STATE;
3680 return;
3681 }
3682 pdest_placement = &s->dest_placement;
3683 processor.emplace<AppendObjectProcessor>(
3684 &aio, store, s->bucket_info, pdest_placement, s->bucket_owner.get_id(),obj_ctx, obj,
3685 s->req_id, position, &cur_accounted_size);
3686 } else {
3687 if (s->bucket_info.versioning_enabled()) {
3688 if (!version_id.empty()) {
3689 obj.key.set_instance(version_id);
3690 } else {
3691 store->gen_rand_obj_instance_name(&obj);
3692 version_id = obj.key.instance;
3693 }
3694 }
3695 pdest_placement = &s->dest_placement;
3696 processor.emplace<AtomicObjectProcessor>(
3697 &aio, store, s->bucket_info, pdest_placement,
3698 s->bucket_owner.get_id(), obj_ctx, obj, olh_epoch, s->req_id);
3699 }
3700
3701 op_ret = processor->prepare();
3702 if (op_ret < 0) {
3703 ldpp_dout(this, 20) << "processor->prepare() returned ret=" << op_ret
3704 << dendl;
3705 return;
3706 }
3707
3708 if ((! copy_source.empty()) && !copy_source_range) {
3709 rgw_obj_key obj_key(copy_source_object_name, copy_source_version_id);
3710 rgw_obj obj(copy_source_bucket_info.bucket, obj_key.name);
3711
3712 RGWObjState *astate;
3713 op_ret = store->get_obj_state(&obj_ctx, copy_source_bucket_info, obj,
3714 &astate, true, false);
3715 if (op_ret < 0) {
3716 ldpp_dout(this, 0) << "ERROR: get copy source obj state returned with error" << op_ret << dendl;
3717 return;
3718 }
3719 if (!astate->exists){
3720 op_ret = -ENOENT;
3721 return;
3722 }
3723 lst = astate->accounted_size - 1;
3724 } else {
3725 lst = copy_source_range_lst;
3726 }
3727
3728 fst = copy_source_range_fst;
3729
3730 // no filters by default
3731 DataProcessor *filter = processor.get();
3732
3733 const auto& compression_type = store->svc.zone->get_zone_params().get_compression_type(*pdest_placement);
3734 CompressorRef plugin;
3735 boost::optional<RGWPutObj_Compress> compressor;
3736
3737 std::unique_ptr<DataProcessor> encrypt;
3738 op_ret = get_encrypt_filter(&encrypt, filter);
3739 if (op_ret < 0) {
3740 return;
3741 }
3742 if (encrypt != nullptr) {
3743 filter = &*encrypt;
3744 } else if (compression_type != "none") {
3745 plugin = get_compressor_plugin(s, compression_type);
3746 if (!plugin) {
3747 ldpp_dout(this, 1) << "Cannot load plugin for compression type "
3748 << compression_type << dendl;
3749 } else {
3750 compressor.emplace(s->cct, plugin, filter);
3751 filter = &*compressor;
3752 }
3753 }
3754 tracepoint(rgw_op, before_data_transfer, s->req_id.c_str());
3755 do {
3756 bufferlist data;
3757 if (fst > lst)
3758 break;
3759 if (copy_source.empty()) {
3760 len = get_data(data);
3761 } else {
3762 uint64_t cur_lst = min(fst + s->cct->_conf->rgw_max_chunk_size - 1, lst);
3763 op_ret = get_data(fst, cur_lst, data);
3764 if (op_ret < 0)
3765 return;
3766 len = data.length();
3767 s->content_length += len;
3768 fst += len;
3769 }
3770 if (len < 0) {
3771 op_ret = len;
3772 ldpp_dout(this, 20) << "get_data() returned ret=" << op_ret << dendl;
3773 return;
3774 } else if (len == 0) {
3775 break;
3776 }
3777
3778 if (need_calc_md5) {
3779 hash.Update((const unsigned char *)data.c_str(), data.length());
3780 }
3781
3782 /* update torrrent */
3783 torrent.update(data);
3784
3785 op_ret = filter->process(std::move(data), ofs);
3786 if (op_ret < 0) {
3787 ldpp_dout(this, 20) << "processor->process() returned ret="
3788 << op_ret << dendl;
3789 return;
3790 }
3791
3792 ofs += len;
3793 } while (len > 0);
3794 tracepoint(rgw_op, after_data_transfer, s->req_id.c_str(), ofs);
3795
3796 // flush any data in filters
3797 op_ret = filter->process({}, ofs);
3798 if (op_ret < 0) {
3799 return;
3800 }
3801
3802 if (!chunked_upload && ofs != s->content_length) {
3803 op_ret = -ERR_REQUEST_TIMEOUT;
3804 return;
3805 }
3806 s->obj_size = ofs;
3807
3808 perfcounter->inc(l_rgw_put_b, s->obj_size);
3809
3810 op_ret = do_aws4_auth_completion();
3811 if (op_ret < 0) {
3812 return;
3813 }
3814
3815 op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
3816 user_quota, bucket_quota, s->obj_size);
3817 if (op_ret < 0) {
3818 ldpp_dout(this, 20) << "second check_quota() returned op_ret=" << op_ret << dendl;
3819 return;
3820 }
3821
3822 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3823 if (op_ret < 0) {
3824 ldpp_dout(this, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl;
3825 return;
3826 }
3827
3828 hash.Final(m);
3829
3830 if (compressor && compressor->is_compressed()) {
3831 bufferlist tmp;
3832 RGWCompressionInfo cs_info;
3833 cs_info.compression_type = plugin->get_type_name();
3834 cs_info.orig_size = s->obj_size;
3835 cs_info.blocks = move(compressor->get_compression_blocks());
3836 encode(cs_info, tmp);
3837 attrs[RGW_ATTR_COMPRESSION] = tmp;
3838 ldpp_dout(this, 20) << "storing " << RGW_ATTR_COMPRESSION
3839 << " with type=" << cs_info.compression_type
3840 << ", orig_size=" << cs_info.orig_size
3841 << ", blocks=" << cs_info.blocks.size() << dendl;
3842 }
3843
3844 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
3845
3846 etag = calc_md5;
3847
3848 if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) {
3849 op_ret = -ERR_BAD_DIGEST;
3850 return;
3851 }
3852
3853 policy.encode(aclbl);
3854 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
3855
3856 if (dlo_manifest) {
3857 op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
3858 if (op_ret < 0) {
3859 ldpp_dout(this, 0) << "bad user manifest: " << dlo_manifest << dendl;
3860 return;
3861 }
3862 }
3863
3864 if (slo_info) {
3865 bufferlist manifest_bl;
3866 encode(*slo_info, manifest_bl);
3867 emplace_attr(RGW_ATTR_SLO_MANIFEST, std::move(manifest_bl));
3868 }
3869
3870 if (supplied_etag && etag.compare(supplied_etag) != 0) {
3871 op_ret = -ERR_UNPROCESSABLE_ENTITY;
3872 return;
3873 }
3874 bl.append(etag.c_str(), etag.size());
3875 emplace_attr(RGW_ATTR_ETAG, std::move(bl));
3876
3877 populate_with_generic_attrs(s, attrs);
3878 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
3879 if (op_ret < 0) {
3880 return;
3881 }
3882 encode_delete_at_attr(delete_at, attrs);
3883 encode_obj_tags_attr(obj_tags.get(), attrs);
3884
3885 /* Add a custom metadata to expose the information whether an object
3886 * is an SLO or not. Appending the attribute must be performed AFTER
3887 * processing any input from user in order to prohibit overwriting. */
3888 if (slo_info) {
3889 bufferlist slo_userindicator_bl;
3890 slo_userindicator_bl.append("True", 4);
3891 emplace_attr(RGW_ATTR_SLO_UINDICATOR, std::move(slo_userindicator_bl));
3892 }
3893
3894 tracepoint(rgw_op, processor_complete_enter, s->req_id.c_str());
3895 op_ret = processor->complete(s->obj_size, etag, &mtime, real_time(), attrs,
3896 (delete_at ? *delete_at : real_time()), if_match, if_nomatch,
3897 (user_data.empty() ? nullptr : &user_data), nullptr, nullptr);
3898 tracepoint(rgw_op, processor_complete_exit, s->req_id.c_str());
3899
3900 /* produce torrent */
3901 if (s->cct->_conf->rgw_torrent_flag && (ofs == torrent.get_data_len()))
3902 {
3903 torrent.init(s, store);
3904 torrent.set_create_date(mtime);
3905 op_ret = torrent.complete();
3906 if (0 != op_ret)
3907 {
3908 ldpp_dout(this, 0) << "ERROR: torrent.handle_data() returned " << op_ret << dendl;
3909 return;
3910 }
3911 }
3912 }
3913
3914 int RGWPostObj::verify_permission()
3915 {
3916 return 0;
3917 }
3918
3919 void RGWPostObj::pre_exec()
3920 {
3921 rgw_bucket_object_pre_exec(s);
3922 }
3923
3924 void RGWPostObj::execute()
3925 {
3926 boost::optional<RGWPutObj_Compress> compressor;
3927 CompressorRef plugin;
3928 char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3929
3930 /* Read in the data from the POST form. */
3931 op_ret = get_params();
3932 if (op_ret < 0) {
3933 return;
3934 }
3935
3936 op_ret = verify_params();
3937 if (op_ret < 0) {
3938 return;
3939 }
3940
3941 if (s->iam_policy || ! s->iam_user_policies.empty()) {
3942 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
3943 boost::none,
3944 rgw::IAM::s3PutObject,
3945 rgw_obj(s->bucket, s->object));
3946 if (usr_policy_res == Effect::Deny) {
3947 op_ret = -EACCES;
3948 return;
3949 }
3950
3951 rgw::IAM::Effect e = Effect::Pass;
3952 if (s->iam_policy) {
3953 e = s->iam_policy->eval(s->env, *s->auth.identity,
3954 rgw::IAM::s3PutObject,
3955 rgw_obj(s->bucket, s->object));
3956 }
3957 if (e == Effect::Deny) {
3958 op_ret = -EACCES;
3959 return;
3960 } else if (usr_policy_res == Effect::Pass && e == Effect::Pass && !verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
3961 op_ret = -EACCES;
3962 return;
3963 }
3964 } else if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
3965 op_ret = -EACCES;
3966 return;
3967 }
3968
3969 /* Start iteration over data fields. It's necessary as Swift's FormPost
3970 * is capable to handle multiple files in single form. */
3971 do {
3972 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3973 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
3974 MD5 hash;
3975 ceph::buffer::list bl, aclbl;
3976 int len = 0;
3977
3978 op_ret = store->check_quota(s->bucket_owner.get_id(),
3979 s->bucket,
3980 user_quota,
3981 bucket_quota,
3982 s->content_length);
3983 if (op_ret < 0) {
3984 return;
3985 }
3986
3987 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3988 if (op_ret < 0) {
3989 return;
3990 }
3991
3992 if (supplied_md5_b64) {
3993 char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1];
3994 ldpp_dout(this, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
3995 op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1],
3996 supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64));
3997 ldpp_dout(this, 15) << "ceph_armor ret=" << op_ret << dendl;
3998 if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) {
3999 op_ret = -ERR_INVALID_DIGEST;
4000 return;
4001 }
4002
4003 buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5);
4004 ldpp_dout(this, 15) << "supplied_md5=" << supplied_md5 << dendl;
4005 }
4006
4007 rgw_obj obj(s->bucket, get_current_filename());
4008 if (s->bucket_info.versioning_enabled()) {
4009 store->gen_rand_obj_instance_name(&obj);
4010 }
4011
4012 rgw::AioThrottle aio(s->cct->_conf->rgw_put_obj_min_window_size);
4013
4014 using namespace rgw::putobj;
4015 AtomicObjectProcessor processor(&aio, store, s->bucket_info,
4016 &s->dest_placement,
4017 s->bucket_owner.get_id(),
4018 *static_cast<RGWObjectCtx*>(s->obj_ctx),
4019 obj, 0, s->req_id);
4020 op_ret = processor.prepare();
4021 if (op_ret < 0) {
4022 return;
4023 }
4024
4025 /* No filters by default. */
4026 DataProcessor *filter = &processor;
4027
4028 std::unique_ptr<DataProcessor> encrypt;
4029 op_ret = get_encrypt_filter(&encrypt, filter);
4030 if (op_ret < 0) {
4031 return;
4032 }
4033 if (encrypt != nullptr) {
4034 filter = encrypt.get();
4035 } else {
4036 const auto& compression_type = store->svc.zone->get_zone_params().get_compression_type(
4037 s->dest_placement);
4038 if (compression_type != "none") {
4039 plugin = Compressor::create(s->cct, compression_type);
4040 if (!plugin) {
4041 ldpp_dout(this, 1) << "Cannot load plugin for compression type "
4042 << compression_type << dendl;
4043 } else {
4044 compressor.emplace(s->cct, plugin, filter);
4045 filter = &*compressor;
4046 }
4047 }
4048 }
4049
4050 bool again;
4051 do {
4052 ceph::bufferlist data;
4053 len = get_data(data, again);
4054
4055 if (len < 0) {
4056 op_ret = len;
4057 return;
4058 }
4059
4060 if (!len) {
4061 break;
4062 }
4063
4064 hash.Update((const unsigned char *)data.c_str(), data.length());
4065 op_ret = filter->process(std::move(data), ofs);
4066
4067 ofs += len;
4068
4069 if (ofs > max_len) {
4070 op_ret = -ERR_TOO_LARGE;
4071 return;
4072 }
4073 } while (again);
4074
4075 // flush
4076 op_ret = filter->process({}, ofs);
4077 if (op_ret < 0) {
4078 return;
4079 }
4080
4081 if (len < min_len) {
4082 op_ret = -ERR_TOO_SMALL;
4083 return;
4084 }
4085
4086 s->obj_size = ofs;
4087
4088
4089 op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
4090 user_quota, bucket_quota, s->obj_size);
4091 if (op_ret < 0) {
4092 return;
4093 }
4094
4095 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
4096 if (op_ret < 0) {
4097 return;
4098 }
4099
4100 hash.Final(m);
4101 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
4102
4103 etag = calc_md5;
4104
4105 if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) {
4106 op_ret = -ERR_BAD_DIGEST;
4107 return;
4108 }
4109
4110 bl.append(etag.c_str(), etag.size());
4111 emplace_attr(RGW_ATTR_ETAG, std::move(bl));
4112
4113 policy.encode(aclbl);
4114 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
4115
4116 const std::string content_type = get_current_content_type();
4117 if (! content_type.empty()) {
4118 ceph::bufferlist ct_bl;
4119 ct_bl.append(content_type.c_str(), content_type.size() + 1);
4120 emplace_attr(RGW_ATTR_CONTENT_TYPE, std::move(ct_bl));
4121 }
4122
4123 if (compressor && compressor->is_compressed()) {
4124 ceph::bufferlist tmp;
4125 RGWCompressionInfo cs_info;
4126 cs_info.compression_type = plugin->get_type_name();
4127 cs_info.orig_size = s->obj_size;
4128 cs_info.blocks = move(compressor->get_compression_blocks());
4129 encode(cs_info, tmp);
4130 emplace_attr(RGW_ATTR_COMPRESSION, std::move(tmp));
4131 }
4132
4133 op_ret = processor.complete(s->obj_size, etag, nullptr, real_time(), attrs,
4134 (delete_at ? *delete_at : real_time()),
4135 nullptr, nullptr, nullptr, nullptr, nullptr);
4136 if (op_ret < 0) {
4137 return;
4138 }
4139 } while (is_next_file_to_upload());
4140 }
4141
4142
4143 void RGWPutMetadataAccount::filter_out_temp_url(map<string, bufferlist>& add_attrs,
4144 const set<string>& rmattr_names,
4145 map<int, string>& temp_url_keys)
4146 {
4147 map<string, bufferlist>::iterator iter;
4148
4149 iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY1);
4150 if (iter != add_attrs.end()) {
4151 temp_url_keys[0] = iter->second.c_str();
4152 add_attrs.erase(iter);
4153 }
4154
4155 iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY2);
4156 if (iter != add_attrs.end()) {
4157 temp_url_keys[1] = iter->second.c_str();
4158 add_attrs.erase(iter);
4159 }
4160
4161 for (const string& name : rmattr_names) {
4162 if (name.compare(RGW_ATTR_TEMPURL_KEY1) == 0) {
4163 temp_url_keys[0] = string();
4164 }
4165 if (name.compare(RGW_ATTR_TEMPURL_KEY2) == 0) {
4166 temp_url_keys[1] = string();
4167 }
4168 }
4169 }
4170
4171 int RGWPutMetadataAccount::init_processing()
4172 {
4173 /* First, go to the base class. At the time of writing the method was
4174 * responsible only for initializing the quota. This isn't necessary
4175 * here as we are touching metadata only. I'm putting this call only
4176 * for the future. */
4177 op_ret = RGWOp::init_processing();
4178 if (op_ret < 0) {
4179 return op_ret;
4180 }
4181
4182 op_ret = get_params();
4183 if (op_ret < 0) {
4184 return op_ret;
4185 }
4186
4187 op_ret = rgw_get_user_attrs_by_uid(store, s->user->user_id, orig_attrs,
4188 &acct_op_tracker);
4189 if (op_ret < 0) {
4190 return op_ret;
4191 }
4192
4193 if (has_policy) {
4194 bufferlist acl_bl;
4195 policy.encode(acl_bl);
4196 attrs.emplace(RGW_ATTR_ACL, std::move(acl_bl));
4197 }
4198
4199 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
4200 if (op_ret < 0) {
4201 return op_ret;
4202 }
4203 prepare_add_del_attrs(orig_attrs, rmattr_names, attrs);
4204 populate_with_generic_attrs(s, attrs);
4205
4206 /* Try extract the TempURL-related stuff now to allow verify_permission
4207 * evaluate whether we need FULL_CONTROL or not. */
4208 filter_out_temp_url(attrs, rmattr_names, temp_url_keys);
4209
4210 /* The same with quota except a client needs to be reseller admin. */
4211 op_ret = filter_out_quota_info(attrs, rmattr_names, new_quota,
4212 &new_quota_extracted);
4213 if (op_ret < 0) {
4214 return op_ret;
4215 }
4216
4217 return 0;
4218 }
4219
4220 int RGWPutMetadataAccount::verify_permission()
4221 {
4222 if (s->auth.identity->is_anonymous()) {
4223 return -EACCES;
4224 }
4225
4226 if (!verify_user_permission_no_policy(this, s, RGW_PERM_WRITE)) {
4227 return -EACCES;
4228 }
4229
4230 /* Altering TempURL keys requires FULL_CONTROL. */
4231 if (!temp_url_keys.empty() && s->perm_mask != RGW_PERM_FULL_CONTROL) {
4232 return -EPERM;
4233 }
4234
4235 /* We are failing this intensionally to allow system user/reseller admin
4236 * override in rgw_process.cc. This is the way to specify a given RGWOp
4237 * expect extra privileges. */
4238 if (new_quota_extracted) {
4239 return -EACCES;
4240 }
4241
4242 return 0;
4243 }
4244
4245 void RGWPutMetadataAccount::execute()
4246 {
4247 /* Params have been extracted earlier. See init_processing(). */
4248 RGWUserInfo new_uinfo;
4249 op_ret = rgw_get_user_info_by_uid(store, s->user->user_id, new_uinfo,
4250 &acct_op_tracker);
4251 if (op_ret < 0) {
4252 return;
4253 }
4254
4255 /* Handle the TempURL-related stuff. */
4256 if (!temp_url_keys.empty()) {
4257 for (auto& pair : temp_url_keys) {
4258 new_uinfo.temp_url_keys[pair.first] = std::move(pair.second);
4259 }
4260 }
4261
4262 /* Handle the quota extracted at the verify_permission step. */
4263 if (new_quota_extracted) {
4264 new_uinfo.user_quota = std::move(new_quota);
4265 }
4266
4267 /* We are passing here the current (old) user info to allow the function
4268 * optimize-out some operations. */
4269 op_ret = rgw_store_user_info(store, new_uinfo, s->user,
4270 &acct_op_tracker, real_time(), false, &attrs);
4271 }
4272
4273 int RGWPutMetadataBucket::verify_permission()
4274 {
4275 if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
4276 return -EACCES;
4277 }
4278
4279 return 0;
4280 }
4281
4282 void RGWPutMetadataBucket::pre_exec()
4283 {
4284 rgw_bucket_object_pre_exec(s);
4285 }
4286
4287 void RGWPutMetadataBucket::execute()
4288 {
4289 op_ret = get_params();
4290 if (op_ret < 0) {
4291 return;
4292 }
4293
4294 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
4295 if (op_ret < 0) {
4296 return;
4297 }
4298
4299 if (!placement_rule.empty() &&
4300 placement_rule != s->bucket_info.placement_rule) {
4301 op_ret = -EEXIST;
4302 return;
4303 }
4304
4305 op_ret = retry_raced_bucket_write(store, s, [this] {
4306 /* Encode special metadata first as we're using std::map::emplace under
4307 * the hood. This method will add the new items only if the map doesn't
4308 * contain such keys yet. */
4309 if (has_policy) {
4310 if (s->dialect.compare("swift") == 0) {
4311 auto old_policy = \
4312 static_cast<RGWAccessControlPolicy_SWIFT*>(s->bucket_acl.get());
4313 auto new_policy = static_cast<RGWAccessControlPolicy_SWIFT*>(&policy);
4314 new_policy->filter_merge(policy_rw_mask, old_policy);
4315 policy = *new_policy;
4316 }
4317 buffer::list bl;
4318 policy.encode(bl);
4319 emplace_attr(RGW_ATTR_ACL, std::move(bl));
4320 }
4321
4322 if (has_cors) {
4323 buffer::list bl;
4324 cors_config.encode(bl);
4325 emplace_attr(RGW_ATTR_CORS, std::move(bl));
4326 }
4327
4328 /* It's supposed that following functions WILL NOT change any
4329 * special attributes (like RGW_ATTR_ACL) if they are already
4330 * present in attrs. */
4331 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
4332 populate_with_generic_attrs(s, attrs);
4333
4334 /* According to the Swift's behaviour and its container_quota
4335 * WSGI middleware implementation: anyone with write permissions
4336 * is able to set the bucket quota. This stays in contrast to
4337 * account quotas that can be set only by clients holding
4338 * reseller admin privileges. */
4339 op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket_info.quota);
4340 if (op_ret < 0) {
4341 return op_ret;
4342 }
4343
4344 if (swift_ver_location) {
4345 s->bucket_info.swift_ver_location = *swift_ver_location;
4346 s->bucket_info.swift_versioning = (!swift_ver_location->empty());
4347 }
4348
4349 /* Web site of Swift API. */
4350 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
4351 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
4352
4353 /* Setting attributes also stores the provided bucket info. Due
4354 * to this fact, the new quota settings can be serialized with
4355 * the same call. */
4356 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
4357 &s->bucket_info.objv_tracker);
4358 return op_ret;
4359 });
4360 }
4361
4362 int RGWPutMetadataObject::verify_permission()
4363 {
4364 // This looks to be something specific to Swift. We could add
4365 // operations like swift:PutMetadataObject to the Policy Engine.
4366 if (!verify_object_permission_no_policy(this, s, RGW_PERM_WRITE)) {
4367 return -EACCES;
4368 }
4369
4370 return 0;
4371 }
4372
4373 void RGWPutMetadataObject::pre_exec()
4374 {
4375 rgw_bucket_object_pre_exec(s);
4376 }
4377
4378 void RGWPutMetadataObject::execute()
4379 {
4380 rgw_obj obj(s->bucket, s->object);
4381 map<string, bufferlist> attrs, orig_attrs, rmattrs;
4382
4383 store->set_atomic(s->obj_ctx, obj);
4384
4385 op_ret = get_params();
4386 if (op_ret < 0) {
4387 return;
4388 }
4389
4390 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
4391 if (op_ret < 0) {
4392 return;
4393 }
4394
4395 /* check if obj exists, read orig attrs */
4396 op_ret = get_obj_attrs(store, s, obj, orig_attrs);
4397 if (op_ret < 0) {
4398 return;
4399 }
4400
4401 /* Check whether the object has expired. Swift API documentation
4402 * stands that we should return 404 Not Found in such case. */
4403 if (need_object_expiration() && object_is_expired(orig_attrs)) {
4404 op_ret = -ENOENT;
4405 return;
4406 }
4407
4408 /* Filter currently existing attributes. */
4409 prepare_add_del_attrs(orig_attrs, attrs, rmattrs);
4410 populate_with_generic_attrs(s, attrs);
4411 encode_delete_at_attr(delete_at, attrs);
4412
4413 if (dlo_manifest) {
4414 op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
4415 if (op_ret < 0) {
4416 ldpp_dout(this, 0) << "bad user manifest: " << dlo_manifest << dendl;
4417 return;
4418 }
4419 }
4420
4421 op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, &rmattrs);
4422 }
4423
4424 int RGWDeleteObj::handle_slo_manifest(bufferlist& bl)
4425 {
4426 RGWSLOInfo slo_info;
4427 auto bliter = bl.cbegin();
4428 try {
4429 decode(slo_info, bliter);
4430 } catch (buffer::error& err) {
4431 ldpp_dout(this, 0) << "ERROR: failed to decode slo manifest" << dendl;
4432 return -EIO;
4433 }
4434
4435 try {
4436 deleter = std::unique_ptr<RGWBulkDelete::Deleter>(\
4437 new RGWBulkDelete::Deleter(this, store, s));
4438 } catch (const std::bad_alloc&) {
4439 return -ENOMEM;
4440 }
4441
4442 list<RGWBulkDelete::acct_path_t> items;
4443 for (const auto& iter : slo_info.entries) {
4444 const string& path_str = iter.path;
4445
4446 const size_t sep_pos = path_str.find('/', 1 /* skip first slash */);
4447 if (boost::string_view::npos == sep_pos) {
4448 return -EINVAL;
4449 }
4450
4451 RGWBulkDelete::acct_path_t path;
4452
4453 path.bucket_name = url_decode(path_str.substr(1, sep_pos - 1));
4454 path.obj_key = url_decode(path_str.substr(sep_pos + 1));
4455
4456 items.push_back(path);
4457 }
4458
4459 /* Request removal of the manifest object itself. */
4460 RGWBulkDelete::acct_path_t path;
4461 path.bucket_name = s->bucket_name;
4462 path.obj_key = s->object;
4463 items.push_back(path);
4464
4465 int ret = deleter->delete_chunk(items);
4466 if (ret < 0) {
4467 return ret;
4468 }
4469
4470 return 0;
4471 }
4472
4473 int RGWDeleteObj::verify_permission()
4474 {
4475 if (s->iam_policy || ! s->iam_user_policies.empty()) {
4476 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
4477 boost::none,
4478 s->object.instance.empty() ?
4479 rgw::IAM::s3DeleteObject :
4480 rgw::IAM::s3DeleteObjectVersion,
4481 ARN(s->bucket, s->object.name));
4482 if (usr_policy_res == Effect::Deny) {
4483 return -EACCES;
4484 }
4485
4486 rgw::IAM::Effect r = Effect::Pass;
4487 if (s->iam_policy) {
4488 r = s->iam_policy->eval(s->env, *s->auth.identity,
4489 s->object.instance.empty() ?
4490 rgw::IAM::s3DeleteObject :
4491 rgw::IAM::s3DeleteObjectVersion,
4492 ARN(s->bucket, s->object.name));
4493 }
4494 if (r == Effect::Allow)
4495 return 0;
4496 else if (r == Effect::Deny)
4497 return -EACCES;
4498 else if (usr_policy_res == Effect::Allow)
4499 return 0;
4500 }
4501
4502 if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
4503 return -EACCES;
4504 }
4505
4506 if (s->bucket_info.mfa_enabled() &&
4507 !s->object.instance.empty() &&
4508 !s->mfa_verified) {
4509 ldpp_dout(this, 5) << "NOTICE: object delete request with a versioned object, mfa auth not provided" << dendl;
4510 return -ERR_MFA_REQUIRED;
4511 }
4512
4513 return 0;
4514 }
4515
4516 void RGWDeleteObj::pre_exec()
4517 {
4518 rgw_bucket_object_pre_exec(s);
4519 }
4520
4521 void RGWDeleteObj::execute()
4522 {
4523 if (!s->bucket_exists) {
4524 op_ret = -ERR_NO_SUCH_BUCKET;
4525 return;
4526 }
4527
4528 op_ret = get_params();
4529 if (op_ret < 0) {
4530 return;
4531 }
4532
4533 rgw_obj obj(s->bucket, s->object);
4534 map<string, bufferlist> attrs;
4535
4536
4537 if (!s->object.empty()) {
4538 if (need_object_expiration() || multipart_delete) {
4539 /* check if obj exists, read orig attrs */
4540 op_ret = get_obj_attrs(store, s, obj, attrs);
4541 if (op_ret < 0) {
4542 return;
4543 }
4544 }
4545
4546 if (multipart_delete) {
4547 const auto slo_attr = attrs.find(RGW_ATTR_SLO_MANIFEST);
4548
4549 if (slo_attr != attrs.end()) {
4550 op_ret = handle_slo_manifest(slo_attr->second);
4551 if (op_ret < 0) {
4552 ldpp_dout(this, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret << dendl;
4553 }
4554 } else {
4555 op_ret = -ERR_NOT_SLO_MANIFEST;
4556 }
4557
4558 return;
4559 }
4560
4561 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
4562 obj_ctx->set_atomic(obj);
4563
4564 bool ver_restored = false;
4565 op_ret = store->swift_versioning_restore(*s->sysobj_ctx, *obj_ctx, s->bucket_owner.get_id(),
4566 s->bucket_info, obj, ver_restored);
4567 if (op_ret < 0) {
4568 return;
4569 }
4570
4571 if (!ver_restored) {
4572 /* Swift's versioning mechanism hasn't found any previous version of
4573 * the object that could be restored. This means we should proceed
4574 * with the regular delete path. */
4575 RGWRados::Object del_target(store, s->bucket_info, *obj_ctx, obj);
4576 RGWRados::Object::Delete del_op(&del_target);
4577
4578 op_ret = get_system_versioning_params(s, &del_op.params.olh_epoch,
4579 &del_op.params.marker_version_id);
4580 if (op_ret < 0) {
4581 return;
4582 }
4583
4584 del_op.params.bucket_owner = s->bucket_owner.get_id();
4585 del_op.params.versioning_status = s->bucket_info.versioning_status();
4586 del_op.params.obj_owner = s->owner;
4587 del_op.params.unmod_since = unmod_since;
4588 del_op.params.high_precision_time = s->system_request; /* system request uses high precision time */
4589
4590 op_ret = del_op.delete_obj();
4591 if (op_ret >= 0) {
4592 delete_marker = del_op.result.delete_marker;
4593 version_id = del_op.result.version_id;
4594 }
4595
4596 /* Check whether the object has expired. Swift API documentation
4597 * stands that we should return 404 Not Found in such case. */
4598 if (need_object_expiration() && object_is_expired(attrs)) {
4599 op_ret = -ENOENT;
4600 return;
4601 }
4602 }
4603
4604 if (op_ret == -ECANCELED) {
4605 op_ret = 0;
4606 }
4607 if (op_ret == -ERR_PRECONDITION_FAILED && no_precondition_error) {
4608 op_ret = 0;
4609 }
4610 } else {
4611 op_ret = -EINVAL;
4612 }
4613 }
4614
4615 bool RGWCopyObj::parse_copy_location(const boost::string_view& url_src,
4616 string& bucket_name,
4617 rgw_obj_key& key)
4618 {
4619 boost::string_view name_str;
4620 boost::string_view params_str;
4621
4622 // search for ? before url-decoding so we don't accidentally match %3F
4623 size_t pos = url_src.find('?');
4624 if (pos == string::npos) {
4625 name_str = url_src;
4626 } else {
4627 name_str = url_src.substr(0, pos);
4628 params_str = url_src.substr(pos + 1);
4629 }
4630
4631 boost::string_view dec_src{name_str};
4632 if (dec_src[0] == '/')
4633 dec_src.remove_prefix(1);
4634
4635 pos = dec_src.find('/');
4636 if (pos == string::npos)
4637 return false;
4638
4639 bucket_name = url_decode(dec_src.substr(0, pos));
4640 key.name = url_decode(dec_src.substr(pos + 1));
4641
4642 if (key.name.empty()) {
4643 return false;
4644 }
4645
4646 if (! params_str.empty()) {
4647 RGWHTTPArgs args;
4648 args.set(params_str.to_string());
4649 args.parse();
4650
4651 key.instance = args.get("versionId", NULL);
4652 }
4653
4654 return true;
4655 }
4656
4657 int RGWCopyObj::verify_permission()
4658 {
4659 RGWAccessControlPolicy src_acl(s->cct);
4660 boost::optional<Policy> src_policy;
4661 op_ret = get_params();
4662 if (op_ret < 0)
4663 return op_ret;
4664
4665 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
4666 if (op_ret < 0) {
4667 return op_ret;
4668 }
4669 map<string, bufferlist> src_attrs;
4670
4671 if (s->bucket_instance_id.empty()) {
4672 op_ret = store->get_bucket_info(*s->sysobj_ctx, src_tenant_name, src_bucket_name, src_bucket_info, NULL, &src_attrs);
4673 } else {
4674 /* will only happen in intra region sync where the source and dest bucket is the same */
4675 op_ret = store->get_bucket_instance_info(*s->sysobj_ctx, s->bucket_instance_id, src_bucket_info, NULL, &src_attrs);
4676 }
4677 if (op_ret < 0) {
4678 if (op_ret == -ENOENT) {
4679 op_ret = -ERR_NO_SUCH_BUCKET;
4680 }
4681 return op_ret;
4682 }
4683
4684 src_bucket = src_bucket_info.bucket;
4685
4686 /* get buckets info (source and dest) */
4687 if (s->local_source && source_zone.empty()) {
4688 rgw_obj src_obj(src_bucket, src_object);
4689 store->set_atomic(s->obj_ctx, src_obj);
4690 store->set_prefetch_data(s->obj_ctx, src_obj);
4691
4692 rgw_placement_rule src_placement;
4693
4694 /* check source object permissions */
4695 op_ret = read_obj_policy(store, s, src_bucket_info, src_attrs, &src_acl, &src_placement.storage_class,
4696 src_policy, src_bucket, src_object);
4697 if (op_ret < 0) {
4698 return op_ret;
4699 }
4700
4701 /* follow up on previous checks that required reading source object head */
4702 if (need_to_check_storage_class) {
4703 src_placement.inherit_from(src_bucket_info.placement_rule);
4704
4705 op_ret = check_storage_class(src_placement);
4706 if (op_ret < 0) {
4707 return op_ret;
4708 }
4709 }
4710
4711 /* admin request overrides permission checks */
4712 if (!s->auth.identity->is_admin_of(src_acl.get_owner().get_id())) {
4713 if (src_policy) {
4714 auto e = src_policy->eval(s->env, *s->auth.identity,
4715 src_object.instance.empty() ?
4716 rgw::IAM::s3GetObject :
4717 rgw::IAM::s3GetObjectVersion,
4718 ARN(src_obj));
4719 if (e == Effect::Deny) {
4720 return -EACCES;
4721 } else if (e == Effect::Pass &&
4722 !src_acl.verify_permission(this, *s->auth.identity, s->perm_mask,
4723 RGW_PERM_READ)) {
4724 return -EACCES;
4725 }
4726 } else if (!src_acl.verify_permission(this, *s->auth.identity,
4727 s->perm_mask,
4728 RGW_PERM_READ)) {
4729 return -EACCES;
4730 }
4731 }
4732 }
4733
4734 RGWAccessControlPolicy dest_bucket_policy(s->cct);
4735 map<string, bufferlist> dest_attrs;
4736
4737 if (src_bucket_name.compare(dest_bucket_name) == 0) { /* will only happen if s->local_source
4738 or intra region sync */
4739 dest_bucket_info = src_bucket_info;
4740 dest_attrs = src_attrs;
4741 } else {
4742 op_ret = store->get_bucket_info(*s->sysobj_ctx, dest_tenant_name, dest_bucket_name,
4743 dest_bucket_info, nullptr, &dest_attrs);
4744 if (op_ret < 0) {
4745 if (op_ret == -ENOENT) {
4746 op_ret = -ERR_NO_SUCH_BUCKET;
4747 }
4748 return op_ret;
4749 }
4750 }
4751
4752 dest_bucket = dest_bucket_info.bucket;
4753
4754 rgw_obj dest_obj(dest_bucket, dest_object);
4755 store->set_atomic(s->obj_ctx, dest_obj);
4756
4757 /* check dest bucket permissions */
4758 op_ret = read_bucket_policy(store, s, dest_bucket_info, dest_attrs,
4759 &dest_bucket_policy, dest_bucket);
4760 if (op_ret < 0) {
4761 return op_ret;
4762 }
4763 auto dest_iam_policy = get_iam_policy_from_attr(s->cct, store, dest_attrs, dest_bucket.tenant);
4764 /* admin request overrides permission checks */
4765 if (! s->auth.identity->is_admin_of(dest_policy.get_owner().get_id())){
4766 if (dest_iam_policy != boost::none) {
4767 rgw_add_to_iam_environment(s->env, "s3:x-amz-copy-source", copy_source);
4768 if (md_directive)
4769 rgw_add_to_iam_environment(s->env, "s3:x-amz-metadata-directive",
4770 *md_directive);
4771
4772 auto e = dest_iam_policy->eval(s->env, *s->auth.identity,
4773 rgw::IAM::s3PutObject,
4774 ARN(dest_obj));
4775 if (e == Effect::Deny) {
4776 return -EACCES;
4777 } else if (e == Effect::Pass &&
4778 ! dest_bucket_policy.verify_permission(this,
4779 *s->auth.identity,
4780 s->perm_mask,
4781 RGW_PERM_WRITE)){
4782 return -EACCES;
4783 }
4784 }
4785 } else if (! dest_bucket_policy.verify_permission(this, *s->auth.identity, s->perm_mask,
4786 RGW_PERM_WRITE)) {
4787 return -EACCES;
4788 }
4789
4790 op_ret = init_dest_policy();
4791 if (op_ret < 0) {
4792 return op_ret;
4793 }
4794
4795 return 0;
4796 }
4797
4798
4799 int RGWCopyObj::init_common()
4800 {
4801 if (if_mod) {
4802 if (parse_time(if_mod, &mod_time) < 0) {
4803 op_ret = -EINVAL;
4804 return op_ret;
4805 }
4806 mod_ptr = &mod_time;
4807 }
4808
4809 if (if_unmod) {
4810 if (parse_time(if_unmod, &unmod_time) < 0) {
4811 op_ret = -EINVAL;
4812 return op_ret;
4813 }
4814 unmod_ptr = &unmod_time;
4815 }
4816
4817 bufferlist aclbl;
4818 dest_policy.encode(aclbl);
4819 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
4820
4821 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
4822 if (op_ret < 0) {
4823 return op_ret;
4824 }
4825 populate_with_generic_attrs(s, attrs);
4826
4827 return 0;
4828 }
4829
4830 static void copy_obj_progress_cb(off_t ofs, void *param)
4831 {
4832 RGWCopyObj *op = static_cast<RGWCopyObj *>(param);
4833 op->progress_cb(ofs);
4834 }
4835
4836 void RGWCopyObj::progress_cb(off_t ofs)
4837 {
4838 if (!s->cct->_conf->rgw_copy_obj_progress)
4839 return;
4840
4841 if (ofs - last_ofs < s->cct->_conf->rgw_copy_obj_progress_every_bytes)
4842 return;
4843
4844 send_partial_response(ofs);
4845
4846 last_ofs = ofs;
4847 }
4848
4849 void RGWCopyObj::pre_exec()
4850 {
4851 rgw_bucket_object_pre_exec(s);
4852 }
4853
4854 void RGWCopyObj::execute()
4855 {
4856 if (init_common() < 0)
4857 return;
4858
4859 rgw_obj src_obj(src_bucket, src_object);
4860 rgw_obj dst_obj(dest_bucket, dest_object);
4861
4862 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
4863 if ( ! version_id.empty()) {
4864 dst_obj.key.set_instance(version_id);
4865 } else if (dest_bucket_info.versioning_enabled()) {
4866 store->gen_rand_obj_instance_name(&dst_obj);
4867 }
4868
4869 obj_ctx.set_atomic(src_obj);
4870 obj_ctx.set_atomic(dst_obj);
4871
4872 encode_delete_at_attr(delete_at, attrs);
4873
4874 bool high_precision_time = (s->system_request);
4875
4876 /* Handle object versioning of Swift API. In case of copying to remote this
4877 * should fail gently (op_ret == 0) as the dst_obj will not exist here. */
4878 op_ret = store->swift_versioning_copy(obj_ctx,
4879 dest_bucket_info.owner,
4880 dest_bucket_info,
4881 dst_obj);
4882 if (op_ret < 0) {
4883 return;
4884 }
4885
4886 op_ret = store->copy_obj(obj_ctx,
4887 s->user->user_id,
4888 &s->info,
4889 source_zone,
4890 dst_obj,
4891 src_obj,
4892 dest_bucket_info,
4893 src_bucket_info,
4894 s->dest_placement,
4895 &src_mtime,
4896 &mtime,
4897 mod_ptr,
4898 unmod_ptr,
4899 high_precision_time,
4900 if_match,
4901 if_nomatch,
4902 attrs_mod,
4903 copy_if_newer,
4904 attrs, RGWObjCategory::Main,
4905 olh_epoch,
4906 (delete_at ? *delete_at : real_time()),
4907 (version_id.empty() ? NULL : &version_id),
4908 &s->req_id, /* use req_id as tag */
4909 &etag,
4910 copy_obj_progress_cb, (void *)this
4911 );
4912 }
4913
4914 int RGWGetACLs::verify_permission()
4915 {
4916 bool perm;
4917 if (!s->object.empty()) {
4918 auto iam_action = s->object.instance.empty() ?
4919 rgw::IAM::s3GetObjectAcl :
4920 rgw::IAM::s3GetObjectVersionAcl;
4921
4922 if (s->iam_policy && s->iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG)){
4923 rgw_obj obj = rgw_obj(s->bucket, s->object);
4924 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
4925 }
4926 if (! s->iam_user_policies.empty()) {
4927 for (auto& user_policy : s->iam_user_policies) {
4928 if (user_policy.has_partial_conditional(S3_EXISTING_OBJTAG)) {
4929 rgw_obj obj = rgw_obj(s->bucket, s->object);
4930 rgw_iam_add_existing_objtags(store, s, obj, iam_action);
4931 }
4932 }
4933 }
4934 perm = verify_object_permission(this, s, iam_action);
4935 } else {
4936 if (!s->bucket_exists) {
4937 return -ERR_NO_SUCH_BUCKET;
4938 }
4939 perm = verify_bucket_permission(this, s, rgw::IAM::s3GetBucketAcl);
4940 }
4941 if (!perm)
4942 return -EACCES;
4943
4944 return 0;
4945 }
4946
4947 void RGWGetACLs::pre_exec()
4948 {
4949 rgw_bucket_object_pre_exec(s);
4950 }
4951
4952 void RGWGetACLs::execute()
4953 {
4954 stringstream ss;
4955 RGWAccessControlPolicy* const acl = \
4956 (!s->object.empty() ? s->object_acl.get() : s->bucket_acl.get());
4957 RGWAccessControlPolicy_S3* const s3policy = \
4958 static_cast<RGWAccessControlPolicy_S3*>(acl);
4959 s3policy->to_xml(ss);
4960 acls = ss.str();
4961 }
4962
4963
4964
4965 int RGWPutACLs::verify_permission()
4966 {
4967 bool perm;
4968
4969 rgw_add_to_iam_environment(s->env, "s3:x-amz-acl", s->canned_acl);
4970
4971 rgw_add_grant_to_iam_environment(s->env, s);
4972 if (!s->object.empty()) {
4973 auto iam_action = s->object.instance.empty() ? rgw::IAM::s3PutObjectAcl : rgw::IAM::s3PutObjectVersionAcl;
4974 auto obj = rgw_obj(s->bucket, s->object);
4975 op_ret = rgw_iam_add_existing_objtags(store, s, obj, iam_action);
4976 perm = verify_object_permission(this, s, iam_action);
4977 } else {
4978 perm = verify_bucket_permission(this, s, rgw::IAM::s3PutBucketAcl);
4979 }
4980 if (!perm)
4981 return -EACCES;
4982
4983 return 0;
4984 }
4985
4986 int RGWGetLC::verify_permission()
4987 {
4988 bool perm;
4989 perm = verify_bucket_permission(this, s, rgw::IAM::s3GetLifecycleConfiguration);
4990 if (!perm)
4991 return -EACCES;
4992
4993 return 0;
4994 }
4995
4996 int RGWPutLC::verify_permission()
4997 {
4998 bool perm;
4999 perm = verify_bucket_permission(this, s, rgw::IAM::s3PutLifecycleConfiguration);
5000 if (!perm)
5001 return -EACCES;
5002
5003 return 0;
5004 }
5005
5006 int RGWDeleteLC::verify_permission()
5007 {
5008 bool perm;
5009 perm = verify_bucket_permission(this, s, rgw::IAM::s3PutLifecycleConfiguration);
5010 if (!perm)
5011 return -EACCES;
5012
5013 return 0;
5014 }
5015
5016 void RGWPutACLs::pre_exec()
5017 {
5018 rgw_bucket_object_pre_exec(s);
5019 }
5020
5021 void RGWGetLC::pre_exec()
5022 {
5023 rgw_bucket_object_pre_exec(s);
5024 }
5025
5026 void RGWPutLC::pre_exec()
5027 {
5028 rgw_bucket_object_pre_exec(s);
5029 }
5030
5031 void RGWDeleteLC::pre_exec()
5032 {
5033 rgw_bucket_object_pre_exec(s);
5034 }
5035
5036 void RGWPutACLs::execute()
5037 {
5038 bufferlist bl;
5039
5040 RGWAccessControlPolicy_S3 *policy = NULL;
5041 RGWACLXMLParser_S3 parser(s->cct);
5042 RGWAccessControlPolicy_S3 new_policy(s->cct);
5043 stringstream ss;
5044 rgw_obj obj;
5045
5046 op_ret = 0; /* XXX redundant? */
5047
5048 if (!parser.init()) {
5049 op_ret = -EINVAL;
5050 return;
5051 }
5052
5053
5054 RGWAccessControlPolicy* const existing_policy = \
5055 (s->object.empty() ? s->bucket_acl.get() : s->object_acl.get());
5056
5057 owner = existing_policy->get_owner();
5058
5059 op_ret = get_params();
5060 if (op_ret < 0) {
5061 if (op_ret == -ERANGE) {
5062 ldpp_dout(this, 4) << "The size of request xml data is larger than the max limitation, data size = "
5063 << s->length << dendl;
5064 op_ret = -ERR_MALFORMED_XML;
5065 s->err.message = "The XML you provided was larger than the maximum " +
5066 std::to_string(s->cct->_conf->rgw_max_put_param_size) +
5067 " bytes allowed.";
5068 }
5069 return;
5070 }
5071
5072 char* buf = data.c_str();
5073 ldpp_dout(this, 15) << "read len=" << data.length() << " data=" << (buf ? buf : "") << dendl;
5074
5075 if (!s->canned_acl.empty() && data.length() > 0) {
5076 op_ret = -EINVAL;
5077 return;
5078 }
5079
5080 if (!s->canned_acl.empty() || s->has_acl_header) {
5081 op_ret = get_policy_from_state(store, s, ss);
5082 if (op_ret < 0)
5083 return;
5084
5085 data.clear();
5086 data.append(ss.str());
5087 }
5088
5089 if (!parser.parse(data.c_str(), data.length(), 1)) {
5090 op_ret = -EINVAL;
5091 return;
5092 }
5093 policy = static_cast<RGWAccessControlPolicy_S3 *>(parser.find_first("AccessControlPolicy"));
5094 if (!policy) {
5095 op_ret = -EINVAL;
5096 return;
5097 }
5098
5099 const RGWAccessControlList& req_acl = policy->get_acl();
5100 const multimap<string, ACLGrant>& req_grant_map = req_acl.get_grant_map();
5101 #define ACL_GRANTS_MAX_NUM 100
5102 int max_num = s->cct->_conf->rgw_acl_grants_max_num;
5103 if (max_num < 0) {
5104 max_num = ACL_GRANTS_MAX_NUM;
5105 }
5106
5107 int grants_num = req_grant_map.size();
5108 if (grants_num > max_num) {
5109 ldpp_dout(this, 4) << "An acl can have up to " << max_num
5110 << " grants, request acl grants num: " << grants_num << dendl;
5111 op_ret = -ERR_MALFORMED_ACL_ERROR;
5112 s->err.message = "The request is rejected, because the acl grants number you requested is larger than the maximum "
5113 + std::to_string(max_num)
5114 + " grants allowed in an acl.";
5115 return;
5116 }
5117
5118 // forward bucket acl requests to meta master zone
5119 if (s->object.empty() && !store->svc.zone->is_meta_master()) {
5120 bufferlist in_data;
5121 // include acl data unless it was generated from a canned_acl
5122 if (s->canned_acl.empty()) {
5123 in_data.append(data);
5124 }
5125 op_ret = forward_request_to_master(s, NULL, store, in_data, NULL);
5126 if (op_ret < 0) {
5127 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
5128 return;
5129 }
5130 }
5131
5132 if (s->cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
5133 ldpp_dout(this, 15) << "Old AccessControlPolicy";
5134 policy->to_xml(*_dout);
5135 *_dout << dendl;
5136 }
5137
5138 op_ret = policy->rebuild(store, &owner, new_policy);
5139 if (op_ret < 0)
5140 return;
5141
5142 if (s->cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
5143 ldpp_dout(this, 15) << "New AccessControlPolicy:";
5144 new_policy.to_xml(*_dout);
5145 *_dout << dendl;
5146 }
5147
5148 new_policy.encode(bl);
5149 map<string, bufferlist> attrs;
5150
5151 if (!s->object.empty()) {
5152 obj = rgw_obj(s->bucket, s->object);
5153 store->set_atomic(s->obj_ctx, obj);
5154 //if instance is empty, we should modify the latest object
5155 op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_ACL, bl);
5156 } else {
5157 attrs = s->bucket_attrs;
5158 attrs[RGW_ATTR_ACL] = bl;
5159 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
5160 }
5161 if (op_ret == -ECANCELED) {
5162 op_ret = 0; /* lost a race, but it's ok because acls are immutable */
5163 }
5164 }
5165
5166 void RGWPutLC::execute()
5167 {
5168 bufferlist bl;
5169
5170 RGWLifecycleConfiguration_S3 config(s->cct);
5171 RGWXMLParser parser;
5172 RGWLifecycleConfiguration_S3 new_config(s->cct);
5173
5174 content_md5 = s->info.env->get("HTTP_CONTENT_MD5");
5175 if (content_md5 == nullptr) {
5176 op_ret = -ERR_INVALID_REQUEST;
5177 s->err.message = "Missing required header for this request: Content-MD5";
5178 ldpp_dout(this, 5) << s->err.message << dendl;
5179 return;
5180 }
5181
5182 std::string content_md5_bin;
5183 try {
5184 content_md5_bin = rgw::from_base64(boost::string_view(content_md5));
5185 } catch (...) {
5186 s->err.message = "Request header Content-MD5 contains character "
5187 "that is not base64 encoded.";
5188 ldpp_dout(this, 5) << s->err.message << dendl;
5189 op_ret = -ERR_BAD_DIGEST;
5190 return;
5191 }
5192
5193 if (!parser.init()) {
5194 op_ret = -EINVAL;
5195 return;
5196 }
5197
5198 op_ret = get_params();
5199 if (op_ret < 0)
5200 return;
5201
5202 char* buf = data.c_str();
5203 ldpp_dout(this, 15) << "read len=" << data.length() << " data=" << (buf ? buf : "") << dendl;
5204
5205 MD5 data_hash;
5206 unsigned char data_hash_res[CEPH_CRYPTO_MD5_DIGESTSIZE];
5207 data_hash.Update(reinterpret_cast<const unsigned char*>(buf), data.length());
5208 data_hash.Final(data_hash_res);
5209
5210 if (memcmp(data_hash_res, content_md5_bin.c_str(), CEPH_CRYPTO_MD5_DIGESTSIZE) != 0) {
5211 op_ret = -ERR_BAD_DIGEST;
5212 s->err.message = "The Content-MD5 you specified did not match what we received.";
5213 ldpp_dout(this, 5) << s->err.message
5214 << " Specified content md5: " << content_md5
5215 << ", calculated content md5: " << data_hash_res
5216 << dendl;
5217 return;
5218 }
5219
5220 if (!parser.parse(buf, data.length(), 1)) {
5221 op_ret = -ERR_MALFORMED_XML;
5222 return;
5223 }
5224
5225 try {
5226 RGWXMLDecoder::decode_xml("LifecycleConfiguration", config, &parser);
5227 } catch (RGWXMLDecoder::err& err) {
5228 ldpp_dout(this, 5) << "Bad lifecycle configuration: " << err << dendl;
5229 op_ret = -ERR_MALFORMED_XML;
5230 return;
5231 }
5232
5233 op_ret = config.rebuild(store, new_config);
5234 if (op_ret < 0)
5235 return;
5236
5237 if (s->cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
5238 XMLFormatter xf;
5239 new_config.dump_xml(&xf);
5240 stringstream ss;
5241 xf.flush(ss);
5242 ldpp_dout(this, 15) << "New LifecycleConfiguration:" << ss.str() << dendl;
5243 }
5244
5245 if (!store->svc.zone->is_meta_master()) {
5246 op_ret = forward_request_to_master(s, nullptr, store, data, nullptr);
5247 if (op_ret < 0) {
5248 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
5249 return;
5250 }
5251 }
5252
5253 op_ret = store->get_lc()->set_bucket_config(s->bucket_info, s->bucket_attrs, &new_config);
5254 if (op_ret < 0) {
5255 return;
5256 }
5257 return;
5258 }
5259
5260 void RGWDeleteLC::execute()
5261 {
5262 if (!store->svc.zone->is_meta_master()) {
5263 bufferlist data;
5264 op_ret = forward_request_to_master(s, nullptr, store, data, nullptr);
5265 if (op_ret < 0) {
5266 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
5267 return;
5268 }
5269 }
5270 map<string, bufferlist> attrs = s->bucket_attrs;
5271 attrs.erase(RGW_ATTR_LC);
5272 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
5273 &s->bucket_info.objv_tracker);
5274 if (op_ret < 0) {
5275 ldpp_dout(this, 0) << "RGWLC::RGWDeleteLC() failed to set attrs on bucket="
5276 << s->bucket.name << " returned err=" << op_ret << dendl;
5277 return;
5278 }
5279
5280 op_ret = store->get_lc()->remove_bucket_config(s->bucket_info, s->bucket_attrs);
5281 if (op_ret < 0) {
5282 return;
5283 }
5284 return;
5285 }
5286
5287 int RGWGetCORS::verify_permission()
5288 {
5289 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketCORS);
5290 }
5291
5292 void RGWGetCORS::execute()
5293 {
5294 op_ret = read_bucket_cors();
5295 if (op_ret < 0)
5296 return ;
5297
5298 if (!cors_exist) {
5299 ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl;
5300 op_ret = -ERR_NO_CORS_FOUND;
5301 return;
5302 }
5303 }
5304
5305 int RGWPutCORS::verify_permission()
5306 {
5307 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketCORS);
5308 }
5309
5310 void RGWPutCORS::execute()
5311 {
5312 rgw_raw_obj obj;
5313
5314 op_ret = get_params();
5315 if (op_ret < 0)
5316 return;
5317
5318 if (!store->svc.zone->is_meta_master()) {
5319 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
5320 if (op_ret < 0) {
5321 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
5322 return;
5323 }
5324 }
5325
5326 op_ret = retry_raced_bucket_write(store, s, [this] {
5327 map<string, bufferlist> attrs = s->bucket_attrs;
5328 attrs[RGW_ATTR_CORS] = cors_bl;
5329 return rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
5330 });
5331 }
5332
5333 int RGWDeleteCORS::verify_permission()
5334 {
5335 // No separate delete permission
5336 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketCORS);
5337 }
5338
5339 void RGWDeleteCORS::execute()
5340 {
5341 if (!store->svc.zone->is_meta_master()) {
5342 bufferlist data;
5343 op_ret = forward_request_to_master(s, nullptr, store, data, nullptr);
5344 if (op_ret < 0) {
5345 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
5346 return;
5347 }
5348 }
5349
5350 op_ret = retry_raced_bucket_write(store, s, [this] {
5351 op_ret = read_bucket_cors();
5352 if (op_ret < 0)
5353 return op_ret;
5354
5355 if (!cors_exist) {
5356 ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl;
5357 op_ret = -ENOENT;
5358 return op_ret;
5359 }
5360
5361 map<string, bufferlist> attrs = s->bucket_attrs;
5362 attrs.erase(RGW_ATTR_CORS);
5363 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
5364 &s->bucket_info.objv_tracker);
5365 if (op_ret < 0) {
5366 ldpp_dout(this, 0) << "RGWLC::RGWDeleteCORS() failed to set attrs on bucket=" << s->bucket.name
5367 << " returned err=" << op_ret << dendl;
5368 }
5369 return op_ret;
5370 });
5371 }
5372
5373 void RGWOptionsCORS::get_response_params(string& hdrs, string& exp_hdrs, unsigned *max_age) {
5374 get_cors_response_headers(rule, req_hdrs, hdrs, exp_hdrs, max_age);
5375 }
5376
5377 int RGWOptionsCORS::validate_cors_request(RGWCORSConfiguration *cc) {
5378 rule = cc->host_name_rule(origin);
5379 if (!rule) {
5380 ldpp_dout(this, 10) << "There is no cors rule present for " << origin << dendl;
5381 return -ENOENT;
5382 }
5383
5384 if (!validate_cors_rule_method(rule, req_meth)) {
5385 return -ENOENT;
5386 }
5387
5388 if (!validate_cors_rule_header(rule, req_hdrs)) {
5389 return -ENOENT;
5390 }
5391
5392 return 0;
5393 }
5394
5395 void RGWOptionsCORS::execute()
5396 {
5397 op_ret = read_bucket_cors();
5398 if (op_ret < 0)
5399 return;
5400
5401 origin = s->info.env->get("HTTP_ORIGIN");
5402 if (!origin) {
5403 ldpp_dout(this, 0) << "Missing mandatory Origin header" << dendl;
5404 op_ret = -EINVAL;
5405 return;
5406 }
5407 req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD");
5408 if (!req_meth) {
5409 ldpp_dout(this, 0) << "Missing mandatory Access-control-request-method header" << dendl;
5410 op_ret = -EINVAL;
5411 return;
5412 }
5413 if (!cors_exist) {
5414 ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl;
5415 op_ret = -ENOENT;
5416 return;
5417 }
5418 req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS");
5419 op_ret = validate_cors_request(&bucket_cors);
5420 if (!rule) {
5421 origin = req_meth = NULL;
5422 return;
5423 }
5424 return;
5425 }
5426
5427 int RGWGetRequestPayment::verify_permission()
5428 {
5429 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketRequestPayment);
5430 }
5431
5432 void RGWGetRequestPayment::pre_exec()
5433 {
5434 rgw_bucket_object_pre_exec(s);
5435 }
5436
5437 void RGWGetRequestPayment::execute()
5438 {
5439 requester_pays = s->bucket_info.requester_pays;
5440 }
5441
5442 int RGWSetRequestPayment::verify_permission()
5443 {
5444 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketRequestPayment);
5445 }
5446
5447 void RGWSetRequestPayment::pre_exec()
5448 {
5449 rgw_bucket_object_pre_exec(s);
5450 }
5451
5452 void RGWSetRequestPayment::execute()
5453 {
5454
5455 if (!store->svc.zone->is_meta_master()) {
5456 op_ret = forward_request_to_master(s, nullptr, store, in_data, nullptr);
5457 if (op_ret < 0) {
5458 ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
5459 return;
5460 }
5461 }
5462
5463 op_ret = get_params();
5464
5465 if (op_ret < 0)
5466 return;
5467
5468 s->bucket_info.requester_pays = requester_pays;
5469 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(),
5470 &s->bucket_attrs);
5471 if (op_ret < 0) {
5472 ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
5473 << " returned err=" << op_ret << dendl;
5474 return;
5475 }
5476 }
5477
5478 int RGWInitMultipart::verify_permission()
5479 {
5480 if (s->iam_policy || ! s->iam_user_policies.empty()) {
5481 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
5482 boost::none,
5483 rgw::IAM::s3PutObject,
5484 rgw_obj(s->bucket, s->object));
5485 if (usr_policy_res == Effect::Deny) {
5486 return -EACCES;
5487 }
5488
5489 rgw::IAM::Effect e = Effect::Pass;
5490 if (s->iam_policy) {
5491 e = s->iam_policy->eval(s->env, *s->auth.identity,
5492 rgw::IAM::s3PutObject,
5493 rgw_obj(s->bucket, s->object));
5494 }
5495 if (e == Effect::Allow) {
5496 return 0;
5497 } else if (e == Effect::Deny) {
5498 return -EACCES;
5499 } else if (usr_policy_res == Effect::Allow) {
5500 return 0;
5501 }
5502 }
5503
5504 if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
5505 return -EACCES;
5506 }
5507
5508 return 0;
5509 }
5510
5511 void RGWInitMultipart::pre_exec()
5512 {
5513 rgw_bucket_object_pre_exec(s);
5514 }
5515
5516 void RGWInitMultipart::execute()
5517 {
5518 bufferlist aclbl;
5519 map<string, bufferlist> attrs;
5520 rgw_obj obj;
5521
5522 if (get_params() < 0)
5523 return;
5524
5525 if (s->object.empty())
5526 return;
5527
5528 policy.encode(aclbl);
5529 attrs[RGW_ATTR_ACL] = aclbl;
5530
5531 populate_with_generic_attrs(s, attrs);
5532
5533 /* select encryption mode */
5534 op_ret = prepare_encryption(attrs);
5535 if (op_ret != 0)
5536 return;
5537
5538 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
5539 if (op_ret < 0) {
5540 return;
5541 }
5542
5543 do {
5544 char buf[33];
5545 gen_rand_alphanumeric(s->cct, buf, sizeof(buf) - 1);
5546 upload_id = MULTIPART_UPLOAD_ID_PREFIX; /* v2 upload id */
5547 upload_id.append(buf);
5548
5549 string tmp_obj_name;
5550 RGWMPObj mp(s->object.name, upload_id);
5551 tmp_obj_name = mp.get_meta();
5552
5553 obj.init_ns(s->bucket, tmp_obj_name, mp_ns);
5554 // the meta object will be indexed with 0 size, we c
5555 obj.set_in_extra_data(true);
5556 obj.index_hash_source = s->object.name;
5557
5558 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
5559 op_target.set_versioning_disabled(true); /* no versioning for multipart meta */
5560
5561 RGWRados::Object::Write obj_op(&op_target);
5562
5563 obj_op.meta.owner = s->owner.get_id();
5564 obj_op.meta.category = RGWObjCategory::MultiMeta;
5565 obj_op.meta.flags = PUT_OBJ_CREATE_EXCL;
5566
5567 multipart_upload_info upload_info;
5568 upload_info.dest_placement = s->dest_placement;
5569
5570 bufferlist bl;
5571 encode(upload_info, bl);
5572 obj_op.meta.data = &bl;
5573
5574 op_ret = obj_op.write_meta(bl.length(), 0, attrs);
5575 } while (op_ret == -EEXIST);
5576 }
5577
5578 int RGWCompleteMultipart::verify_permission()
5579 {
5580 if (s->iam_policy || ! s->iam_user_policies.empty()) {
5581 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
5582 boost::none,
5583 rgw::IAM::s3PutObject,
5584 rgw_obj(s->bucket, s->object));
5585 if (usr_policy_res == Effect::Deny) {
5586 return -EACCES;
5587 }
5588
5589 rgw::IAM::Effect e = Effect::Pass;
5590 if (s->iam_policy) {
5591 e = s->iam_policy->eval(s->env, *s->auth.identity,
5592 rgw::IAM::s3PutObject,
5593 rgw_obj(s->bucket, s->object));
5594 }
5595 if (e == Effect::Allow) {
5596 return 0;
5597 } else if (e == Effect::Deny) {
5598 return -EACCES;
5599 } else if (usr_policy_res == Effect::Allow) {
5600 return 0;
5601 }
5602 }
5603
5604 if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
5605 return -EACCES;
5606 }
5607
5608 return 0;
5609 }
5610
5611 void RGWCompleteMultipart::pre_exec()
5612 {
5613 rgw_bucket_object_pre_exec(s);
5614 }
5615
5616 void RGWCompleteMultipart::execute()
5617 {
5618 RGWMultiCompleteUpload *parts;
5619 map<int, string>::iterator iter;
5620 RGWMultiXMLParser parser;
5621 string meta_oid;
5622 map<uint32_t, RGWUploadPartInfo> obj_parts;
5623 map<uint32_t, RGWUploadPartInfo>::iterator obj_iter;
5624 map<string, bufferlist> attrs;
5625 off_t ofs = 0;
5626 MD5 hash;
5627 char final_etag[CEPH_CRYPTO_MD5_DIGESTSIZE];
5628 char final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 16];
5629 bufferlist etag_bl;
5630 rgw_obj meta_obj;
5631 rgw_obj target_obj;
5632 RGWMPObj mp;
5633 RGWObjManifest manifest;
5634 uint64_t olh_epoch = 0;
5635
5636 op_ret = get_params();
5637 if (op_ret < 0)
5638 return;
5639 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
5640 if (op_ret < 0) {
5641 return;
5642 }
5643
5644 if (!data.length()) {
5645 op_ret = -ERR_MALFORMED_XML;
5646 return;
5647 }
5648
5649 if (!parser.init()) {
5650 op_ret = -EIO;
5651 return;
5652 }
5653
5654 if (!parser.parse(data.c_str(), data.length(), 1)) {
5655 op_ret = -ERR_MALFORMED_XML;
5656 return;
5657 }
5658
5659 parts = static_cast<RGWMultiCompleteUpload *>(parser.find_first("CompleteMultipartUpload"));
5660 if (!parts || parts->parts.empty()) {
5661 op_ret = -ERR_MALFORMED_XML;
5662 return;
5663 }
5664
5665 if ((int)parts->parts.size() >
5666 s->cct->_conf->rgw_multipart_part_upload_limit) {
5667 op_ret = -ERANGE;
5668 return;
5669 }
5670
5671 mp.init(s->object.name, upload_id);
5672 meta_oid = mp.get_meta();
5673
5674 int total_parts = 0;
5675 int handled_parts = 0;
5676 int max_parts = 1000;
5677 int marker = 0;
5678 bool truncated;
5679 RGWCompressionInfo cs_info;
5680 bool compressed = false;
5681 uint64_t accounted_size = 0;
5682
5683 uint64_t min_part_size = s->cct->_conf->rgw_multipart_min_part_size;
5684
5685 list<rgw_obj_index_key> remove_objs; /* objects to be removed from index listing */
5686
5687 bool versioned_object = s->bucket_info.versioning_enabled();
5688
5689 iter = parts->parts.begin();
5690
5691 meta_obj.init_ns(s->bucket, meta_oid, mp_ns);
5692 meta_obj.set_in_extra_data(true);
5693 meta_obj.index_hash_source = s->object.name;
5694
5695 /*take a cls lock on meta_obj to prevent racing completions (or retries)
5696 from deleting the parts*/
5697 rgw_pool meta_pool;
5698 rgw_raw_obj raw_obj;
5699 int max_lock_secs_mp =
5700 s->cct->_conf.get_val<int64_t>("rgw_mp_lock_max_time");
5701 utime_t dur(max_lock_secs_mp, 0);
5702
5703 store->obj_to_raw((s->bucket_info).placement_rule, meta_obj, &raw_obj);
5704 store->get_obj_data_pool((s->bucket_info).placement_rule,
5705 meta_obj,&meta_pool);
5706 store->open_pool_ctx(meta_pool, serializer.ioctx, true);
5707
5708 op_ret = serializer.try_lock(raw_obj.oid, dur);
5709 if (op_ret < 0) {
5710 ldpp_dout(this, 0) << "failed to acquire lock" << dendl;
5711 op_ret = -ERR_INTERNAL_ERROR;
5712 s->err.message = "This multipart completion is already in progress";
5713 return;
5714 }
5715
5716 op_ret = get_obj_attrs(store, s, meta_obj, attrs);
5717
5718 if (op_ret < 0) {
5719 ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << meta_obj
5720 << " ret=" << op_ret << dendl;
5721 return;
5722 }
5723
5724 do {
5725 op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts,
5726 marker, obj_parts, &marker, &truncated);
5727 if (op_ret == -ENOENT) {
5728 op_ret = -ERR_NO_SUCH_UPLOAD;
5729 }
5730 if (op_ret < 0)
5731 return;
5732
5733 total_parts += obj_parts.size();
5734 if (!truncated && total_parts != (int)parts->parts.size()) {
5735 ldpp_dout(this, 0) << "NOTICE: total parts mismatch: have: " << total_parts
5736 << " expected: " << parts->parts.size() << dendl;
5737 op_ret = -ERR_INVALID_PART;
5738 return;
5739 }
5740
5741 for (obj_iter = obj_parts.begin(); iter != parts->parts.end() && obj_iter != obj_parts.end(); ++iter, ++obj_iter, ++handled_parts) {
5742 uint64_t part_size = obj_iter->second.accounted_size;
5743 if (handled_parts < (int)parts->parts.size() - 1 &&
5744 part_size < min_part_size) {
5745 op_ret = -ERR_TOO_SMALL;
5746 return;
5747 }
5748
5749 char petag[CEPH_CRYPTO_MD5_DIGESTSIZE];
5750 if (iter->first != (int)obj_iter->first) {
5751 ldpp_dout(this, 0) << "NOTICE: parts num mismatch: next requested: "
5752 << iter->first << " next uploaded: "
5753 << obj_iter->first << dendl;
5754 op_ret = -ERR_INVALID_PART;
5755 return;
5756 }
5757 string part_etag = rgw_string_unquote(iter->second);
5758 if (part_etag.compare(obj_iter->second.etag) != 0) {
5759 ldpp_dout(this, 0) << "NOTICE: etag mismatch: part: " << iter->first
5760 << " etag: " << iter->second << dendl;
5761 op_ret = -ERR_INVALID_PART;
5762 return;
5763 }
5764
5765 hex_to_buf(obj_iter->second.etag.c_str(), petag,
5766 CEPH_CRYPTO_MD5_DIGESTSIZE);
5767 hash.Update((const unsigned char *)petag, sizeof(petag));
5768
5769 RGWUploadPartInfo& obj_part = obj_iter->second;
5770
5771 /* update manifest for part */
5772 string oid = mp.get_part(obj_iter->second.num);
5773 rgw_obj src_obj;
5774 src_obj.init_ns(s->bucket, oid, mp_ns);
5775
5776 if (obj_part.manifest.empty()) {
5777 ldpp_dout(this, 0) << "ERROR: empty manifest for object part: obj="
5778 << src_obj << dendl;
5779 op_ret = -ERR_INVALID_PART;
5780 return;
5781 } else {
5782 manifest.append(obj_part.manifest, store->svc.zone);
5783 }
5784
5785 bool part_compressed = (obj_part.cs_info.compression_type != "none");
5786 if ((obj_iter != obj_parts.begin()) &&
5787 ((part_compressed != compressed) ||
5788 (cs_info.compression_type != obj_part.cs_info.compression_type))) {
5789 ldpp_dout(this, 0) << "ERROR: compression type was changed during multipart upload ("
5790 << cs_info.compression_type << ">>" << obj_part.cs_info.compression_type << ")" << dendl;
5791 op_ret = -ERR_INVALID_PART;
5792 return;
5793 }
5794
5795 if (part_compressed) {
5796 int64_t new_ofs; // offset in compression data for new part
5797 if (cs_info.blocks.size() > 0)
5798 new_ofs = cs_info.blocks.back().new_ofs + cs_info.blocks.back().len;
5799 else
5800 new_ofs = 0;
5801 for (const auto& block : obj_part.cs_info.blocks) {
5802 compression_block cb;
5803 cb.old_ofs = block.old_ofs + cs_info.orig_size;
5804 cb.new_ofs = new_ofs;
5805 cb.len = block.len;
5806 cs_info.blocks.push_back(cb);
5807 new_ofs = cb.new_ofs + cb.len;
5808 }
5809 if (!compressed)
5810 cs_info.compression_type = obj_part.cs_info.compression_type;
5811 cs_info.orig_size += obj_part.cs_info.orig_size;
5812 compressed = true;
5813 }
5814
5815 rgw_obj_index_key remove_key;
5816 src_obj.key.get_index_key(&remove_key);
5817
5818 remove_objs.push_back(remove_key);
5819
5820 ofs += obj_part.size;
5821 accounted_size += obj_part.accounted_size;
5822 }
5823 } while (truncated);
5824 hash.Final((unsigned char *)final_etag);
5825
5826 buf_to_hex((unsigned char *)final_etag, sizeof(final_etag), final_etag_str);
5827 snprintf(&final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2], sizeof(final_etag_str) - CEPH_CRYPTO_MD5_DIGESTSIZE * 2,
5828 "-%lld", (long long)parts->parts.size());
5829 etag = final_etag_str;
5830 ldpp_dout(this, 10) << "calculated etag: " << final_etag_str << dendl;
5831
5832 etag_bl.append(final_etag_str, strlen(final_etag_str));
5833
5834 attrs[RGW_ATTR_ETAG] = etag_bl;
5835
5836 if (compressed) {
5837 // write compression attribute to full object
5838 bufferlist tmp;
5839 encode(cs_info, tmp);
5840 attrs[RGW_ATTR_COMPRESSION] = tmp;
5841 }
5842
5843 target_obj.init(s->bucket, s->object.name);
5844 if (versioned_object) {
5845 if (!version_id.empty()) {
5846 target_obj.key.set_instance(version_id);
5847 } else {
5848 store->gen_rand_obj_instance_name(&target_obj);
5849 version_id = target_obj.key.get_instance();
5850 }
5851 }
5852
5853 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
5854
5855 obj_ctx.set_atomic(target_obj);
5856
5857 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), target_obj);
5858 RGWRados::Object::Write obj_op(&op_target);
5859
5860 obj_op.meta.manifest = &manifest;
5861 obj_op.meta.remove_objs = &remove_objs;
5862
5863 obj_op.meta.ptag = &s->req_id; /* use req_id as operation tag */
5864 obj_op.meta.owner = s->owner.get_id();
5865 obj_op.meta.flags = PUT_OBJ_CREATE;
5866 obj_op.meta.modify_tail = true;
5867 obj_op.meta.completeMultipart = true;
5868 obj_op.meta.olh_epoch = olh_epoch;
5869 op_ret = obj_op.write_meta(ofs, accounted_size, attrs);
5870 if (op_ret < 0)
5871 return;
5872
5873 // remove the upload obj
5874 int r = store->delete_obj(*static_cast<RGWObjectCtx *>(s->obj_ctx),
5875 s->bucket_info, meta_obj, 0);
5876 if (r >= 0) {
5877 /* serializer's exclusive lock is released */
5878 serializer.clear_locked();
5879 } else {
5880 ldpp_dout(this, 0) << "WARNING: failed to remove object " << meta_obj << dendl;
5881 }
5882 }
5883
5884 int RGWCompleteMultipart::MPSerializer::try_lock(
5885 const std::string& _oid,
5886 utime_t dur)
5887 {
5888 oid = _oid;
5889 op.assert_exists();
5890 lock.set_duration(dur);
5891 lock.lock_exclusive(&op);
5892 int ret = ioctx.operate(oid, &op);
5893 if (! ret) {
5894 locked = true;
5895 }
5896 return ret;
5897 }
5898
5899 void RGWCompleteMultipart::complete()
5900 {
5901 /* release exclusive lock iff not already */
5902 if (unlikely(serializer.locked)) {
5903 int r = serializer.unlock();
5904 if (r < 0) {
5905 ldpp_dout(this, 0) << "WARNING: failed to unlock " << serializer.oid << dendl;
5906 }
5907 }
5908 send_response();
5909 }
5910
5911 int RGWAbortMultipart::verify_permission()
5912 {
5913 if (s->iam_policy || ! s->iam_user_policies.empty()) {
5914 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
5915 boost::none,
5916 rgw::IAM::s3AbortMultipartUpload,
5917 rgw_obj(s->bucket, s->object));
5918 if (usr_policy_res == Effect::Deny) {
5919 return -EACCES;
5920 }
5921
5922 rgw::IAM::Effect e = Effect::Pass;
5923 if (s->iam_policy) {
5924 e = s->iam_policy->eval(s->env, *s->auth.identity,
5925 rgw::IAM::s3AbortMultipartUpload,
5926 rgw_obj(s->bucket, s->object));
5927 }
5928 if (e == Effect::Allow) {
5929 return 0;
5930 } else if (e == Effect::Deny) {
5931 return -EACCES;
5932 } else if (usr_policy_res == Effect::Allow)
5933 return 0;
5934 }
5935
5936 if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
5937 return -EACCES;
5938 }
5939
5940 return 0;
5941 }
5942
5943 void RGWAbortMultipart::pre_exec()
5944 {
5945 rgw_bucket_object_pre_exec(s);
5946 }
5947
5948 void RGWAbortMultipart::execute()
5949 {
5950 op_ret = -EINVAL;
5951 string upload_id;
5952 string meta_oid;
5953 upload_id = s->info.args.get("uploadId");
5954 rgw_obj meta_obj;
5955 RGWMPObj mp;
5956
5957 if (upload_id.empty() || s->object.empty())
5958 return;
5959
5960 mp.init(s->object.name, upload_id);
5961 meta_oid = mp.get_meta();
5962
5963 op_ret = get_multipart_info(store, s, meta_oid, nullptr, nullptr, nullptr);
5964 if (op_ret < 0)
5965 return;
5966
5967 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
5968 op_ret = abort_multipart_upload(store, s->cct, obj_ctx, s->bucket_info, mp);
5969 }
5970
5971 int RGWListMultipart::verify_permission()
5972 {
5973 if (!verify_object_permission(this, s, rgw::IAM::s3ListMultipartUploadParts))
5974 return -EACCES;
5975
5976 return 0;
5977 }
5978
5979 void RGWListMultipart::pre_exec()
5980 {
5981 rgw_bucket_object_pre_exec(s);
5982 }
5983
5984 void RGWListMultipart::execute()
5985 {
5986 string meta_oid;
5987 RGWMPObj mp;
5988
5989 op_ret = get_params();
5990 if (op_ret < 0)
5991 return;
5992
5993 mp.init(s->object.name, upload_id);
5994 meta_oid = mp.get_meta();
5995
5996 op_ret = get_multipart_info(store, s, meta_oid, &policy, nullptr, nullptr);
5997 if (op_ret < 0)
5998 return;
5999
6000 op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts,
6001 marker, parts, NULL, &truncated);
6002 }
6003
6004 int RGWListBucketMultiparts::verify_permission()
6005 {
6006 if (!verify_bucket_permission(this,
6007 s,
6008 rgw::IAM::s3ListBucketMultipartUploads))
6009 return -EACCES;
6010
6011 return 0;
6012 }
6013
6014 void RGWListBucketMultiparts::pre_exec()
6015 {
6016 rgw_bucket_object_pre_exec(s);
6017 }
6018
6019 void RGWListBucketMultiparts::execute()
6020 {
6021 vector<rgw_bucket_dir_entry> objs;
6022 string marker_meta;
6023
6024 op_ret = get_params();
6025 if (op_ret < 0)
6026 return;
6027
6028 if (s->prot_flags & RGW_REST_SWIFT) {
6029 string path_args;
6030 path_args = s->info.args.get("path");
6031 if (!path_args.empty()) {
6032 if (!delimiter.empty() || !prefix.empty()) {
6033 op_ret = -EINVAL;
6034 return;
6035 }
6036 prefix = path_args;
6037 delimiter="/";
6038 }
6039 }
6040 marker_meta = marker.get_meta();
6041
6042 op_ret = list_bucket_multiparts(store, s->bucket_info, prefix, marker_meta, delimiter,
6043 max_uploads, &objs, &common_prefixes, &is_truncated);
6044 if (op_ret < 0) {
6045 return;
6046 }
6047
6048 if (!objs.empty()) {
6049 vector<rgw_bucket_dir_entry>::iterator iter;
6050 RGWMultipartUploadEntry entry;
6051 for (iter = objs.begin(); iter != objs.end(); ++iter) {
6052 rgw_obj_key key(iter->key);
6053 if (!entry.mp.from_meta(key.name))
6054 continue;
6055 entry.obj = *iter;
6056 uploads.push_back(entry);
6057 }
6058 next_marker = entry;
6059 }
6060 }
6061
6062 void RGWGetHealthCheck::execute()
6063 {
6064 if (!g_conf()->rgw_healthcheck_disabling_path.empty() &&
6065 (::access(g_conf()->rgw_healthcheck_disabling_path.c_str(), F_OK) == 0)) {
6066 /* Disabling path specified & existent in the filesystem. */
6067 op_ret = -ERR_SERVICE_UNAVAILABLE; /* 503 */
6068 } else {
6069 op_ret = 0; /* 200 OK */
6070 }
6071 }
6072
6073 int RGWDeleteMultiObj::verify_permission()
6074 {
6075 if (s->iam_policy || ! s->iam_user_policies.empty()) {
6076 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
6077 boost::none,
6078 s->object.instance.empty() ?
6079 rgw::IAM::s3DeleteObject :
6080 rgw::IAM::s3DeleteObjectVersion,
6081 ARN(s->bucket));
6082 if (usr_policy_res == Effect::Deny) {
6083 return -EACCES;
6084 }
6085
6086 rgw::IAM::Effect r = Effect::Pass;
6087 if (s->iam_policy) {
6088 r = s->iam_policy->eval(s->env, *s->auth.identity,
6089 s->object.instance.empty() ?
6090 rgw::IAM::s3DeleteObject :
6091 rgw::IAM::s3DeleteObjectVersion,
6092 ARN(s->bucket));
6093 }
6094 if (r == Effect::Allow)
6095 return 0;
6096 else if (r == Effect::Deny)
6097 return -EACCES;
6098 else if (usr_policy_res == Effect::Allow)
6099 return 0;
6100 }
6101
6102 acl_allowed = verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE);
6103 if (!acl_allowed)
6104 return -EACCES;
6105
6106 return 0;
6107 }
6108
6109 void RGWDeleteMultiObj::pre_exec()
6110 {
6111 rgw_bucket_object_pre_exec(s);
6112 }
6113
6114 void RGWDeleteMultiObj::execute()
6115 {
6116 RGWMultiDelDelete *multi_delete;
6117 vector<rgw_obj_key>::iterator iter;
6118 RGWMultiDelXMLParser parser;
6119 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
6120 char* buf;
6121
6122 op_ret = get_params();
6123 if (op_ret < 0) {
6124 goto error;
6125 }
6126
6127 buf = data.c_str();
6128 if (!buf) {
6129 op_ret = -EINVAL;
6130 goto error;
6131 }
6132
6133 if (!parser.init()) {
6134 op_ret = -EINVAL;
6135 goto error;
6136 }
6137
6138 if (!parser.parse(buf, data.length(), 1)) {
6139 op_ret = -EINVAL;
6140 goto error;
6141 }
6142
6143 multi_delete = static_cast<RGWMultiDelDelete *>(parser.find_first("Delete"));
6144 if (!multi_delete) {
6145 op_ret = -EINVAL;
6146 goto error;
6147 } else {
6148 #define DELETE_MULTI_OBJ_MAX_NUM 1000
6149 int max_num = s->cct->_conf->rgw_delete_multi_obj_max_num;
6150 if (max_num < 0) {
6151 max_num = DELETE_MULTI_OBJ_MAX_NUM;
6152 }
6153 int multi_delete_object_num = multi_delete->objects.size();
6154 if (multi_delete_object_num > max_num) {
6155 op_ret = -ERR_MALFORMED_XML;
6156 goto error;
6157 }
6158 }
6159
6160 if (multi_delete->is_quiet())
6161 quiet = true;
6162
6163 if (s->bucket_info.mfa_enabled()) {
6164 bool has_versioned = false;
6165 for (auto i : multi_delete->objects) {
6166 if (!i.instance.empty()) {
6167 has_versioned = true;
6168 break;
6169 }
6170 }
6171 if (has_versioned && !s->mfa_verified) {
6172 ldpp_dout(this, 5) << "NOTICE: multi-object delete request with a versioned object, mfa auth not provided" << dendl;
6173 op_ret = -ERR_MFA_REQUIRED;
6174 goto error;
6175 }
6176 }
6177
6178 begin_response();
6179 if (multi_delete->objects.empty()) {
6180 goto done;
6181 }
6182
6183 for (iter = multi_delete->objects.begin();
6184 iter != multi_delete->objects.end();
6185 ++iter) {
6186 rgw_obj obj(bucket, *iter);
6187 if (s->iam_policy || ! s->iam_user_policies.empty()) {
6188 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
6189 boost::none,
6190 iter->instance.empty() ?
6191 rgw::IAM::s3DeleteObject :
6192 rgw::IAM::s3DeleteObjectVersion,
6193 ARN(obj));
6194 if (usr_policy_res == Effect::Deny) {
6195 send_partial_response(*iter, false, "", -EACCES);
6196 continue;
6197 }
6198
6199 rgw::IAM::Effect e = Effect::Pass;
6200 if (s->iam_policy) {
6201 e = s->iam_policy->eval(s->env,
6202 *s->auth.identity,
6203 iter->instance.empty() ?
6204 rgw::IAM::s3DeleteObject :
6205 rgw::IAM::s3DeleteObjectVersion,
6206 ARN(obj));
6207 }
6208 if ((e == Effect::Deny) ||
6209 (usr_policy_res == Effect::Pass && e == Effect::Pass && !acl_allowed)) {
6210 send_partial_response(*iter, false, "", -EACCES);
6211 continue;
6212 }
6213 }
6214
6215 obj_ctx->set_atomic(obj);
6216
6217 RGWRados::Object del_target(store, s->bucket_info, *obj_ctx, obj);
6218 RGWRados::Object::Delete del_op(&del_target);
6219
6220 del_op.params.bucket_owner = s->bucket_owner.get_id();
6221 del_op.params.versioning_status = s->bucket_info.versioning_status();
6222 del_op.params.obj_owner = s->owner;
6223
6224 op_ret = del_op.delete_obj();
6225 if (op_ret == -ENOENT) {
6226 op_ret = 0;
6227 }
6228
6229 send_partial_response(*iter, del_op.result.delete_marker,
6230 del_op.result.version_id, op_ret);
6231 }
6232
6233 /* set the return code to zero, errors at this point will be
6234 dumped to the response */
6235 op_ret = 0;
6236
6237 done:
6238 // will likely segfault if begin_response() has not been called
6239 end_response();
6240 return;
6241
6242 error:
6243 send_status();
6244 return;
6245
6246 }
6247
6248 bool RGWBulkDelete::Deleter::verify_permission(RGWBucketInfo& binfo,
6249 map<string, bufferlist>& battrs,
6250 ACLOwner& bucket_owner /* out */)
6251 {
6252 RGWAccessControlPolicy bacl(store->ctx());
6253 int ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket);
6254 if (ret < 0) {
6255 return false;
6256 }
6257
6258 auto policy = get_iam_policy_from_attr(s->cct, store, battrs, binfo.bucket.tenant);
6259
6260 bucket_owner = bacl.get_owner();
6261
6262 /* We can use global user_acl because each BulkDelete request is allowed
6263 * to work on entities from a single account only. */
6264 return verify_bucket_permission(dpp, s, binfo.bucket, s->user_acl.get(),
6265 &bacl, policy, s->iam_user_policies, rgw::IAM::s3DeleteBucket);
6266 }
6267
6268 bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path)
6269 {
6270 auto& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
6271
6272 RGWBucketInfo binfo;
6273 map<string, bufferlist> battrs;
6274 ACLOwner bowner;
6275
6276 int ret = store->get_bucket_info(*s->sysobj_ctx, s->user->user_id.tenant,
6277 path.bucket_name, binfo, nullptr,
6278 &battrs);
6279 if (ret < 0) {
6280 goto binfo_fail;
6281 }
6282
6283 if (!verify_permission(binfo, battrs, bowner)) {
6284 ret = -EACCES;
6285 goto auth_fail;
6286 }
6287
6288 if (!path.obj_key.empty()) {
6289 rgw_obj obj(binfo.bucket, path.obj_key);
6290 obj_ctx.set_atomic(obj);
6291
6292 RGWRados::Object del_target(store, binfo, obj_ctx, obj);
6293 RGWRados::Object::Delete del_op(&del_target);
6294
6295 del_op.params.bucket_owner = binfo.owner;
6296 del_op.params.versioning_status = binfo.versioning_status();
6297 del_op.params.obj_owner = bowner;
6298
6299 ret = del_op.delete_obj();
6300 if (ret < 0) {
6301 goto delop_fail;
6302 }
6303 } else {
6304 RGWObjVersionTracker ot;
6305 ot.read_version = binfo.ep_objv;
6306
6307 ret = store->delete_bucket(binfo, ot);
6308 if (0 == ret) {
6309 ret = rgw_unlink_bucket(store, binfo.owner, binfo.bucket.tenant,
6310 binfo.bucket.name, false);
6311 if (ret < 0) {
6312 ldpp_dout(s, 0) << "WARNING: failed to unlink bucket: ret=" << ret << dendl;
6313 }
6314 }
6315 if (ret < 0) {
6316 goto delop_fail;
6317 }
6318
6319 if (!store->svc.zone->is_meta_master()) {
6320 bufferlist in_data;
6321 ret = forward_request_to_master(s, &ot.read_version, store, in_data,
6322 nullptr);
6323 if (ret < 0) {
6324 if (ret == -ENOENT) {
6325 /* adjust error, we want to return with NoSuchBucket and not
6326 * NoSuchKey */
6327 ret = -ERR_NO_SUCH_BUCKET;
6328 }
6329 goto delop_fail;
6330 }
6331 }
6332 }
6333
6334 num_deleted++;
6335 return true;
6336
6337
6338 binfo_fail:
6339 if (-ENOENT == ret) {
6340 ldpp_dout(s, 20) << "cannot find bucket = " << path.bucket_name << dendl;
6341 num_unfound++;
6342 } else {
6343 ldpp_dout(s, 20) << "cannot get bucket info, ret = " << ret << dendl;
6344
6345 fail_desc_t failed_item = {
6346 .err = ret,
6347 .path = path
6348 };
6349 failures.push_back(failed_item);
6350 }
6351 return false;
6352
6353 auth_fail:
6354 ldpp_dout(s, 20) << "wrong auth for " << path << dendl;
6355 {
6356 fail_desc_t failed_item = {
6357 .err = ret,
6358 .path = path
6359 };
6360 failures.push_back(failed_item);
6361 }
6362 return false;
6363
6364 delop_fail:
6365 if (-ENOENT == ret) {
6366 ldpp_dout(s, 20) << "cannot find entry " << path << dendl;
6367 num_unfound++;
6368 } else {
6369 fail_desc_t failed_item = {
6370 .err = ret,
6371 .path = path
6372 };
6373 failures.push_back(failed_item);
6374 }
6375 return false;
6376 }
6377
6378 bool RGWBulkDelete::Deleter::delete_chunk(const std::list<acct_path_t>& paths)
6379 {
6380 ldpp_dout(s, 20) << "in delete_chunk" << dendl;
6381 for (auto path : paths) {
6382 ldpp_dout(s, 20) << "bulk deleting path: " << path << dendl;
6383 delete_single(path);
6384 }
6385
6386 return true;
6387 }
6388
6389 int RGWBulkDelete::verify_permission()
6390 {
6391 return 0;
6392 }
6393
6394 void RGWBulkDelete::pre_exec()
6395 {
6396 rgw_bucket_object_pre_exec(s);
6397 }
6398
6399 void RGWBulkDelete::execute()
6400 {
6401 deleter = std::unique_ptr<Deleter>(new Deleter(this, store, s));
6402
6403 bool is_truncated = false;
6404 do {
6405 list<RGWBulkDelete::acct_path_t> items;
6406
6407 int ret = get_data(items, &is_truncated);
6408 if (ret < 0) {
6409 return;
6410 }
6411
6412 ret = deleter->delete_chunk(items);
6413 } while (!op_ret && is_truncated);
6414
6415 return;
6416 }
6417
6418
6419 constexpr std::array<int, 2> RGWBulkUploadOp::terminal_errors;
6420
6421 int RGWBulkUploadOp::verify_permission()
6422 {
6423 if (s->auth.identity->is_anonymous()) {
6424 return -EACCES;
6425 }
6426
6427 if (! verify_user_permission_no_policy(this, s, RGW_PERM_WRITE)) {
6428 return -EACCES;
6429 }
6430
6431 if (s->user->user_id.tenant != s->bucket_tenant) {
6432 ldpp_dout(this, 10) << "user cannot create a bucket in a different tenant"
6433 << " (user_id.tenant=" << s->user->user_id.tenant
6434 << " requested=" << s->bucket_tenant << ")" << dendl;
6435 return -EACCES;
6436 }
6437
6438 if (s->user->max_buckets < 0) {
6439 return -EPERM;
6440 }
6441
6442 return 0;
6443 }
6444
6445 void RGWBulkUploadOp::pre_exec()
6446 {
6447 rgw_bucket_object_pre_exec(s);
6448 }
6449
6450 boost::optional<std::pair<std::string, rgw_obj_key>>
6451 RGWBulkUploadOp::parse_path(const boost::string_ref& path)
6452 {
6453 /* We need to skip all slashes at the beginning in order to preserve
6454 * compliance with Swift. */
6455 const size_t start_pos = path.find_first_not_of('/');
6456
6457 if (boost::string_ref::npos != start_pos) {
6458 /* Seperator is the first slash after the leading ones. */
6459 const size_t sep_pos = path.substr(start_pos).find('/');
6460
6461 if (boost::string_ref::npos != sep_pos) {
6462 const auto bucket_name = path.substr(start_pos, sep_pos - start_pos);
6463 const auto obj_name = path.substr(sep_pos + 1);
6464
6465 return std::make_pair(bucket_name.to_string(),
6466 rgw_obj_key(obj_name.to_string()));
6467 } else {
6468 /* It's guaranteed here that bucket name is at least one character
6469 * long and is different than slash. */
6470 return std::make_pair(path.substr(start_pos).to_string(),
6471 rgw_obj_key());
6472 }
6473 }
6474
6475 return none;
6476 }
6477
6478 std::pair<std::string, std::string>
6479 RGWBulkUploadOp::handle_upload_path(struct req_state *s)
6480 {
6481 std::string bucket_path, file_prefix;
6482 if (! s->init_state.url_bucket.empty()) {
6483 file_prefix = bucket_path = s->init_state.url_bucket + "/";
6484 if (! s->object.empty()) {
6485 std::string& object_name = s->object.name;
6486
6487 /* As rgw_obj_key::empty() already verified emptiness of s->object.name,
6488 * we can safely examine its last element. */
6489 if (object_name.back() == '/') {
6490 file_prefix.append(object_name);
6491 } else {
6492 file_prefix.append(object_name).append("/");
6493 }
6494 }
6495 }
6496 return std::make_pair(bucket_path, file_prefix);
6497 }
6498
6499 int RGWBulkUploadOp::handle_dir_verify_permission()
6500 {
6501 if (s->user->max_buckets > 0) {
6502 RGWUserBuckets buckets;
6503 std::string marker;
6504 bool is_truncated = false;
6505 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
6506 marker, std::string(), s->user->max_buckets,
6507 false, &is_truncated);
6508 if (op_ret < 0) {
6509 return op_ret;
6510 }
6511
6512 if (buckets.count() >= static_cast<size_t>(s->user->max_buckets)) {
6513 return -ERR_TOO_MANY_BUCKETS;
6514 }
6515 }
6516
6517 return 0;
6518 }
6519
6520 static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name)
6521 {
6522 /* the request of container or object level will contain bucket name.
6523 * only at account level need to append the bucket name */
6524 if (info.script_uri.find(bucket_name) != std::string::npos) {
6525 return;
6526 }
6527
6528 ldout(cct, 20) << "append the bucket: "<< bucket_name << " to req_info" << dendl;
6529 info.script_uri.append("/").append(bucket_name);
6530 info.request_uri_aws4 = info.request_uri = info.script_uri;
6531 info.effective_uri = "/" + bucket_name;
6532 }
6533
6534 void RGWBulkUploadOp::init(RGWRados* const store,
6535 struct req_state* const s,
6536 RGWHandler* const h)
6537 {
6538 RGWOp::init(store, s, h);
6539 dir_ctx.emplace(store->svc.sysobj->init_obj_ctx());
6540 }
6541
6542 int RGWBulkUploadOp::handle_dir(const boost::string_ref path)
6543 {
6544 ldpp_dout(this, 20) << "got directory=" << path << dendl;
6545
6546 op_ret = handle_dir_verify_permission();
6547 if (op_ret < 0) {
6548 return op_ret;
6549 }
6550
6551 std::string bucket_name;
6552 rgw_obj_key object_junk;
6553 std::tie(bucket_name, object_junk) = *parse_path(path);
6554
6555 rgw_raw_obj obj(store->svc.zone->get_zone_params().domain_root,
6556 rgw_make_bucket_entry_name(s->bucket_tenant, bucket_name));
6557
6558 /* we need to make sure we read bucket info, it's not read before for this
6559 * specific request */
6560 RGWBucketInfo binfo;
6561 std::map<std::string, ceph::bufferlist> battrs;
6562 op_ret = store->get_bucket_info(*dir_ctx, s->bucket_tenant, bucket_name,
6563 binfo, nullptr, &battrs);
6564 if (op_ret < 0 && op_ret != -ENOENT) {
6565 return op_ret;
6566 }
6567 const bool bucket_exists = (op_ret != -ENOENT);
6568
6569 if (bucket_exists) {
6570 RGWAccessControlPolicy old_policy(s->cct);
6571 int r = rgw_op_get_bucket_policy_from_attr(s->cct, store, binfo,
6572 battrs, &old_policy);
6573 if (r >= 0) {
6574 if (old_policy.get_owner().get_id().compare(s->user->user_id) != 0) {
6575 op_ret = -EEXIST;
6576 return op_ret;
6577 }
6578 }
6579 }
6580
6581 RGWBucketInfo master_info;
6582 rgw_bucket *pmaster_bucket = nullptr;
6583 uint32_t *pmaster_num_shards = nullptr;
6584 real_time creation_time;
6585 obj_version objv, ep_objv, *pobjv = nullptr;
6586
6587 if (! store->svc.zone->is_meta_master()) {
6588 JSONParser jp;
6589 ceph::bufferlist in_data;
6590 req_info info = s->info;
6591 forward_req_info(s->cct, info, bucket_name);
6592 op_ret = forward_request_to_master(s, nullptr, store, in_data, &jp, &info);
6593 if (op_ret < 0) {
6594 return op_ret;
6595 }
6596
6597 JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp);
6598 JSONDecoder::decode_json("object_ver", objv, &jp);
6599 JSONDecoder::decode_json("bucket_info", master_info, &jp);
6600
6601 ldpp_dout(this, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" << objv.ver << dendl;
6602 ldpp_dout(this, 20) << "got creation_time="<< master_info.creation_time << dendl;
6603
6604 pmaster_bucket= &master_info.bucket;
6605 creation_time = master_info.creation_time;
6606 pmaster_num_shards = &master_info.num_shards;
6607 pobjv = &objv;
6608 } else {
6609 pmaster_bucket = nullptr;
6610 pmaster_num_shards = nullptr;
6611 }
6612
6613 rgw_placement_rule placement_rule(binfo.placement_rule, s->info.storage_class);
6614
6615 if (bucket_exists) {
6616 rgw_placement_rule selected_placement_rule;
6617 rgw_bucket bucket;
6618 bucket.tenant = s->bucket_tenant;
6619 bucket.name = s->bucket_name;
6620 op_ret = store->svc.zone->select_bucket_placement(*(s->user),
6621 store->svc.zone->get_zonegroup().get_id(),
6622 placement_rule,
6623 &selected_placement_rule,
6624 nullptr);
6625 if (selected_placement_rule != binfo.placement_rule) {
6626 op_ret = -EEXIST;
6627 ldpp_dout(this, 20) << "non-coherent placement rule" << dendl;
6628 return op_ret;
6629 }
6630 }
6631
6632 /* Create metadata: ACLs. */
6633 std::map<std::string, ceph::bufferlist> attrs;
6634 RGWAccessControlPolicy policy;
6635 policy.create_default(s->user->user_id, s->user->display_name);
6636 ceph::bufferlist aclbl;
6637 policy.encode(aclbl);
6638 attrs.emplace(RGW_ATTR_ACL, std::move(aclbl));
6639
6640 RGWQuotaInfo quota_info;
6641 const RGWQuotaInfo * pquota_info = nullptr;
6642
6643 rgw_bucket bucket;
6644 bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */
6645 bucket.name = bucket_name;
6646
6647
6648 RGWBucketInfo out_info;
6649 op_ret = store->create_bucket(*(s->user),
6650 bucket,
6651 store->svc.zone->get_zonegroup().get_id(),
6652 placement_rule, binfo.swift_ver_location,
6653 pquota_info, attrs,
6654 out_info, pobjv, &ep_objv, creation_time,
6655 pmaster_bucket, pmaster_num_shards, true);
6656 /* continue if EEXIST and create_bucket will fail below. this way we can
6657 * recover from a partial create by retrying it. */
6658 ldpp_dout(this, 20) << "rgw_create_bucket returned ret=" << op_ret
6659 << ", bucket=" << bucket << dendl;
6660
6661 if (op_ret && op_ret != -EEXIST) {
6662 return op_ret;
6663 }
6664
6665 const bool existed = (op_ret == -EEXIST);
6666 if (existed) {
6667 /* bucket already existed, might have raced with another bucket creation, or
6668 * might be partial bucket creation that never completed. Read existing bucket
6669 * info, verify that the reported bucket owner is the current user.
6670 * If all is ok then update the user's list of buckets.
6671 * Otherwise inform client about a name conflict.
6672 */
6673 if (out_info.owner.compare(s->user->user_id) != 0) {
6674 op_ret = -EEXIST;
6675 ldpp_dout(this, 20) << "conflicting bucket name" << dendl;
6676 return op_ret;
6677 }
6678 bucket = out_info.bucket;
6679 }
6680
6681 op_ret = rgw_link_bucket(store, s->user->user_id, bucket,
6682 out_info.creation_time, false);
6683 if (op_ret && !existed && op_ret != -EEXIST) {
6684 /* if it exists (or previously existed), don't remove it! */
6685 op_ret = rgw_unlink_bucket(store, s->user->user_id,
6686 bucket.tenant, bucket.name);
6687 if (op_ret < 0) {
6688 ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret << dendl;
6689 }
6690 } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) {
6691 ldpp_dout(this, 20) << "containers already exists" << dendl;
6692 op_ret = -ERR_BUCKET_EXISTS;
6693 }
6694
6695 return op_ret;
6696 }
6697
6698
6699 bool RGWBulkUploadOp::handle_file_verify_permission(RGWBucketInfo& binfo,
6700 const rgw_obj& obj,
6701 std::map<std::string, ceph::bufferlist>& battrs,
6702 ACLOwner& bucket_owner /* out */)
6703 {
6704 RGWAccessControlPolicy bacl(store->ctx());
6705 op_ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket);
6706 if (op_ret < 0) {
6707 ldpp_dout(this, 20) << "cannot read_policy() for bucket" << dendl;
6708 return false;
6709 }
6710
6711 auto policy = get_iam_policy_from_attr(s->cct, store, battrs, binfo.bucket.tenant);
6712
6713 bucket_owner = bacl.get_owner();
6714 if (policy || ! s->iam_user_policies.empty()) {
6715 auto usr_policy_res = eval_user_policies(s->iam_user_policies, s->env,
6716 boost::none,
6717 rgw::IAM::s3PutObject, obj);
6718 if (usr_policy_res == Effect::Deny) {
6719 return false;
6720 }
6721 auto e = policy->eval(s->env, *s->auth.identity,
6722 rgw::IAM::s3PutObject, obj);
6723 if (e == Effect::Allow) {
6724 return true;
6725 } else if (e == Effect::Deny) {
6726 return false;
6727 } else if (usr_policy_res == Effect::Allow) {
6728 return true;
6729 }
6730 }
6731
6732 return verify_bucket_permission_no_policy(this, s, s->user_acl.get(),
6733 &bacl, RGW_PERM_WRITE);
6734 }
6735
6736 int RGWBulkUploadOp::handle_file(const boost::string_ref path,
6737 const size_t size,
6738 AlignedStreamGetter& body)
6739 {
6740
6741 ldpp_dout(this, 20) << "got file=" << path << ", size=" << size << dendl;
6742
6743 if (size > static_cast<size_t>(s->cct->_conf->rgw_max_put_size)) {
6744 op_ret = -ERR_TOO_LARGE;
6745 return op_ret;
6746 }
6747
6748 std::string bucket_name;
6749 rgw_obj_key object;
6750 std::tie(bucket_name, object) = *parse_path(path);
6751
6752 auto& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
6753 RGWBucketInfo binfo;
6754 std::map<std::string, ceph::bufferlist> battrs;
6755 ACLOwner bowner;
6756 op_ret = store->get_bucket_info(*s->sysobj_ctx, s->user->user_id.tenant,
6757 bucket_name, binfo, nullptr, &battrs);
6758 if (op_ret == -ENOENT) {
6759 ldpp_dout(this, 20) << "non existent directory=" << bucket_name << dendl;
6760 } else if (op_ret < 0) {
6761 return op_ret;
6762 }
6763
6764 if (! handle_file_verify_permission(binfo,
6765 rgw_obj(binfo.bucket, object),
6766 battrs, bowner)) {
6767 ldpp_dout(this, 20) << "object creation unauthorized" << dendl;
6768 op_ret = -EACCES;
6769 return op_ret;
6770 }
6771
6772 op_ret = store->check_quota(bowner.get_id(), binfo.bucket,
6773 user_quota, bucket_quota, size);
6774 if (op_ret < 0) {
6775 return op_ret;
6776 }
6777
6778 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
6779 if (op_ret < 0) {
6780 return op_ret;
6781 }
6782
6783 rgw_obj obj(binfo.bucket, object);
6784 if (s->bucket_info.versioning_enabled()) {
6785 store->gen_rand_obj_instance_name(&obj);
6786 }
6787
6788 rgw_placement_rule dest_placement = s->dest_placement;
6789 dest_placement.inherit_from(binfo.placement_rule);
6790
6791 rgw::AioThrottle aio(store->ctx()->_conf->rgw_put_obj_min_window_size);
6792
6793 using namespace rgw::putobj;
6794
6795 AtomicObjectProcessor processor(&aio, store, binfo, &s->dest_placement, bowner.get_id(),
6796 obj_ctx, obj, 0, s->req_id);
6797
6798 op_ret = processor.prepare();
6799 if (op_ret < 0) {
6800 ldpp_dout(this, 20) << "cannot prepare processor due to ret=" << op_ret << dendl;
6801 return op_ret;
6802 }
6803
6804 /* No filters by default. */
6805 DataProcessor *filter = &processor;
6806
6807 const auto& compression_type = store->svc.zone->get_zone_params().get_compression_type(
6808 dest_placement);
6809 CompressorRef plugin;
6810 boost::optional<RGWPutObj_Compress> compressor;
6811 if (compression_type != "none") {
6812 plugin = Compressor::create(s->cct, compression_type);
6813 if (! plugin) {
6814 ldpp_dout(this, 1) << "Cannot load plugin for rgw_compression_type "
6815 << compression_type << dendl;
6816 } else {
6817 compressor.emplace(s->cct, plugin, filter);
6818 filter = &*compressor;
6819 }
6820 }
6821
6822 /* Upload file content. */
6823 ssize_t len = 0;
6824 size_t ofs = 0;
6825 MD5 hash;
6826 do {
6827 ceph::bufferlist data;
6828 len = body.get_at_most(s->cct->_conf->rgw_max_chunk_size, data);
6829
6830 ldpp_dout(this, 20) << "body=" << data.c_str() << dendl;
6831 if (len < 0) {
6832 op_ret = len;
6833 return op_ret;
6834 } else if (len > 0) {
6835 hash.Update((const unsigned char *)data.c_str(), data.length());
6836 op_ret = filter->process(std::move(data), ofs);
6837 if (op_ret < 0) {
6838 ldpp_dout(this, 20) << "filter->process() returned ret=" << op_ret << dendl;
6839 return op_ret;
6840 }
6841
6842 ofs += len;
6843 }
6844
6845 } while (len > 0);
6846
6847 // flush
6848 op_ret = filter->process({}, ofs);
6849 if (op_ret < 0) {
6850 return op_ret;
6851 }
6852
6853 if (ofs != size) {
6854 ldpp_dout(this, 10) << "real file size different from declared" << dendl;
6855 op_ret = -EINVAL;
6856 return op_ret;
6857 }
6858
6859 op_ret = store->check_quota(bowner.get_id(), binfo.bucket,
6860 user_quota, bucket_quota, size);
6861 if (op_ret < 0) {
6862 ldpp_dout(this, 20) << "quota exceeded for path=" << path << dendl;
6863 return op_ret;
6864 }
6865
6866 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
6867 if (op_ret < 0) {
6868 return op_ret;
6869 }
6870
6871 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
6872 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
6873 hash.Final(m);
6874 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
6875
6876 /* Create metadata: ETAG. */
6877 std::map<std::string, ceph::bufferlist> attrs;
6878 std::string etag = calc_md5;
6879 ceph::bufferlist etag_bl;
6880 etag_bl.append(etag.c_str(), etag.size() + 1);
6881 attrs.emplace(RGW_ATTR_ETAG, std::move(etag_bl));
6882
6883 /* Create metadata: ACLs. */
6884 RGWAccessControlPolicy policy;
6885 policy.create_default(s->user->user_id, s->user->display_name);
6886 ceph::bufferlist aclbl;
6887 policy.encode(aclbl);
6888 attrs.emplace(RGW_ATTR_ACL, std::move(aclbl));
6889
6890 /* Create metadata: compression info. */
6891 if (compressor && compressor->is_compressed()) {
6892 ceph::bufferlist tmp;
6893 RGWCompressionInfo cs_info;
6894 cs_info.compression_type = plugin->get_type_name();
6895 cs_info.orig_size = s->obj_size;
6896 cs_info.blocks = std::move(compressor->get_compression_blocks());
6897 encode(cs_info, tmp);
6898 attrs.emplace(RGW_ATTR_COMPRESSION, std::move(tmp));
6899 }
6900
6901 /* Complete the transaction. */
6902 op_ret = processor.complete(size, etag, nullptr, ceph::real_time(),
6903 attrs, ceph::real_time() /* delete_at */,
6904 nullptr, nullptr, nullptr, nullptr, nullptr);
6905 if (op_ret < 0) {
6906 ldpp_dout(this, 20) << "processor::complete returned op_ret=" << op_ret << dendl;
6907 }
6908
6909 return op_ret;
6910 }
6911
6912 void RGWBulkUploadOp::execute()
6913 {
6914 ceph::bufferlist buffer(64 * 1024);
6915
6916 ldpp_dout(this, 20) << "start" << dendl;
6917
6918 /* Create an instance of stream-abstracting class. Having this indirection
6919 * allows for easy introduction of decompressors like gzip and bzip2. */
6920 auto stream = create_stream();
6921 if (! stream) {
6922 return;
6923 }
6924
6925 /* Handling the $UPLOAD_PATH accordingly to the Swift's Bulk middleware. See:
6926 * https://github.com/openstack/swift/blob/2.13.0/swift/common/middleware/bulk.py#L31-L41 */
6927 std::string bucket_path, file_prefix;
6928 std::tie(bucket_path, file_prefix) = handle_upload_path(s);
6929
6930 auto status = rgw::tar::StatusIndicator::create();
6931 do {
6932 op_ret = stream->get_exactly(rgw::tar::BLOCK_SIZE, buffer);
6933 if (op_ret < 0) {
6934 ldpp_dout(this, 2) << "cannot read header" << dendl;
6935 return;
6936 }
6937
6938 /* We need to re-interpret the buffer as a TAR block. Exactly two blocks
6939 * must be tracked to detect out end-of-archive. It occurs when both of
6940 * them are empty (zeroed). Tracing this particular inter-block dependency
6941 * is responsibility of the rgw::tar::StatusIndicator class. */
6942 boost::optional<rgw::tar::HeaderView> header;
6943 std::tie(status, header) = rgw::tar::interpret_block(status, buffer);
6944
6945 if (! status.empty() && header) {
6946 /* This specific block isn't empty (entirely zeroed), so we can parse
6947 * it as a TAR header and dispatch. At the moment we do support only
6948 * regular files and directories. Everything else (symlinks, devices)
6949 * will be ignored but won't cease the whole upload. */
6950 switch (header->get_filetype()) {
6951 case rgw::tar::FileType::NORMAL_FILE: {
6952 ldpp_dout(this, 2) << "handling regular file" << dendl;
6953
6954 boost::string_ref filename = bucket_path.empty() ? header->get_filename() : \
6955 file_prefix + header->get_filename().to_string();
6956 auto body = AlignedStreamGetter(0, header->get_filesize(),
6957 rgw::tar::BLOCK_SIZE, *stream);
6958 op_ret = handle_file(filename,
6959 header->get_filesize(),
6960 body);
6961 if (! op_ret) {
6962 /* Only regular files counts. */
6963 num_created++;
6964 } else {
6965 failures.emplace_back(op_ret, filename.to_string());
6966 }
6967 break;
6968 }
6969 case rgw::tar::FileType::DIRECTORY: {
6970 ldpp_dout(this, 2) << "handling regular directory" << dendl;
6971
6972 boost::string_ref dirname = bucket_path.empty() ? header->get_filename() : bucket_path;
6973 op_ret = handle_dir(dirname);
6974 if (op_ret < 0 && op_ret != -ERR_BUCKET_EXISTS) {
6975 failures.emplace_back(op_ret, dirname.to_string());
6976 }
6977 break;
6978 }
6979 default: {
6980 /* Not recognized. Skip. */
6981 op_ret = 0;
6982 break;
6983 }
6984 }
6985
6986 /* In case of any problems with sub-request authorization Swift simply
6987 * terminates whole upload immediately. */
6988 if (boost::algorithm::contains(std::initializer_list<int>{ op_ret },
6989 terminal_errors)) {
6990 ldpp_dout(this, 2) << "terminating due to ret=" << op_ret << dendl;
6991 break;
6992 }
6993 } else {
6994 ldpp_dout(this, 2) << "an empty block" << dendl;
6995 op_ret = 0;
6996 }
6997
6998 buffer.clear();
6999 } while (! status.eof());
7000
7001 return;
7002 }
7003
7004 RGWBulkUploadOp::AlignedStreamGetter::~AlignedStreamGetter()
7005 {
7006 const size_t aligned_legnth = length + (-length % alignment);
7007 ceph::bufferlist junk;
7008
7009 DecoratedStreamGetter::get_exactly(aligned_legnth - position, junk);
7010 }
7011
7012 ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_at_most(const size_t want,
7013 ceph::bufferlist& dst)
7014 {
7015 const size_t max_to_read = std::min(want, length - position);
7016 const auto len = DecoratedStreamGetter::get_at_most(max_to_read, dst);
7017 if (len > 0) {
7018 position += len;
7019 }
7020 return len;
7021 }
7022
7023 ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_exactly(const size_t want,
7024 ceph::bufferlist& dst)
7025 {
7026 const auto len = DecoratedStreamGetter::get_exactly(want, dst);
7027 if (len > 0) {
7028 position += len;
7029 }
7030 return len;
7031 }
7032
7033 int RGWSetAttrs::verify_permission()
7034 {
7035 // This looks to be part of the RGW-NFS machinery and has no S3 or
7036 // Swift equivalent.
7037 bool perm;
7038 if (!s->object.empty()) {
7039 perm = verify_object_permission_no_policy(this, s, RGW_PERM_WRITE);
7040 } else {
7041 perm = verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE);
7042 }
7043 if (!perm)
7044 return -EACCES;
7045
7046 return 0;
7047 }
7048
7049 void RGWSetAttrs::pre_exec()
7050 {
7051 rgw_bucket_object_pre_exec(s);
7052 }
7053
7054 void RGWSetAttrs::execute()
7055 {
7056 op_ret = get_params();
7057 if (op_ret < 0)
7058 return;
7059
7060 rgw_obj obj(s->bucket, s->object);
7061
7062 if (!s->object.empty()) {
7063 store->set_atomic(s->obj_ctx, obj);
7064 op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, nullptr);
7065 } else {
7066 for (auto& iter : attrs) {
7067 s->bucket_attrs[iter.first] = std::move(iter.second);
7068 }
7069 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, s->bucket_attrs,
7070 &s->bucket_info.objv_tracker);
7071 }
7072 }
7073
7074 void RGWGetObjLayout::pre_exec()
7075 {
7076 rgw_bucket_object_pre_exec(s);
7077 }
7078
7079 void RGWGetObjLayout::execute()
7080 {
7081 rgw_obj obj(s->bucket, s->object);
7082 RGWRados::Object target(store,
7083 s->bucket_info,
7084 *static_cast<RGWObjectCtx *>(s->obj_ctx),
7085 rgw_obj(s->bucket, s->object));
7086 RGWRados::Object::Read stat_op(&target);
7087
7088 op_ret = stat_op.prepare();
7089 if (op_ret < 0) {
7090 return;
7091 }
7092
7093 head_obj = stat_op.state.head_obj;
7094
7095 op_ret = target.get_manifest(&manifest);
7096 }
7097
7098
7099 int RGWConfigBucketMetaSearch::verify_permission()
7100 {
7101 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
7102 return -EACCES;
7103 }
7104
7105 return 0;
7106 }
7107
7108 void RGWConfigBucketMetaSearch::pre_exec()
7109 {
7110 rgw_bucket_object_pre_exec(s);
7111 }
7112
7113 void RGWConfigBucketMetaSearch::execute()
7114 {
7115 op_ret = get_params();
7116 if (op_ret < 0) {
7117 ldpp_dout(this, 20) << "NOTICE: get_params() returned ret=" << op_ret << dendl;
7118 return;
7119 }
7120
7121 s->bucket_info.mdsearch_config = mdsearch_config;
7122
7123 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
7124 if (op_ret < 0) {
7125 ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
7126 << " returned err=" << op_ret << dendl;
7127 return;
7128 }
7129 }
7130
7131 int RGWGetBucketMetaSearch::verify_permission()
7132 {
7133 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
7134 return -EACCES;
7135 }
7136
7137 return 0;
7138 }
7139
7140 void RGWGetBucketMetaSearch::pre_exec()
7141 {
7142 rgw_bucket_object_pre_exec(s);
7143 }
7144
7145 int RGWDelBucketMetaSearch::verify_permission()
7146 {
7147 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
7148 return -EACCES;
7149 }
7150
7151 return 0;
7152 }
7153
7154 void RGWDelBucketMetaSearch::pre_exec()
7155 {
7156 rgw_bucket_object_pre_exec(s);
7157 }
7158
7159 void RGWDelBucketMetaSearch::execute()
7160 {
7161 s->bucket_info.mdsearch_config.clear();
7162
7163 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
7164 if (op_ret < 0) {
7165 ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
7166 << " returned err=" << op_ret << dendl;
7167 return;
7168 }
7169 }
7170
7171
7172 RGWHandler::~RGWHandler()
7173 {
7174 }
7175
7176 int RGWHandler::init(RGWRados *_store,
7177 struct req_state *_s,
7178 rgw::io::BasicClient *cio)
7179 {
7180 store = _store;
7181 s = _s;
7182
7183 return 0;
7184 }
7185
7186 int RGWHandler::do_init_permissions()
7187 {
7188 int ret = rgw_build_bucket_policies(store, s);
7189 if (ret < 0) {
7190 ldpp_dout(s, 10) << "init_permissions on " << s->bucket
7191 << " failed, ret=" << ret << dendl;
7192 return ret==-ENODATA ? -EACCES : ret;
7193 }
7194
7195 rgw_build_iam_environment(store, s);
7196 return ret;
7197 }
7198
7199 int RGWHandler::do_read_permissions(RGWOp *op, bool only_bucket)
7200 {
7201 if (only_bucket) {
7202 /* already read bucket info */
7203 return 0;
7204 }
7205 int ret = rgw_build_object_policies(store, s, op->prefetch_data());
7206
7207 if (ret < 0) {
7208 ldpp_dout(op, 10) << "read_permissions on " << s->bucket << ":"
7209 << s->object << " only_bucket=" << only_bucket
7210 << " ret=" << ret << dendl;
7211 if (ret == -ENODATA)
7212 ret = -EACCES;
7213 }
7214
7215 return ret;
7216 }
7217
7218 int RGWOp::error_handler(int err_no, string *error_content) {
7219 return dialect_handler->error_handler(err_no, error_content);
7220 }
7221
7222 int RGWHandler::error_handler(int err_no, string *error_content) {
7223 // This is the do-nothing error handler
7224 return err_no;
7225 }
7226
7227 std::ostream& RGWOp::gen_prefix(std::ostream& out) const
7228 {
7229 // append <dialect>:<op name> to the prefix
7230 return s->gen_prefix(out) << s->dialect << ':' << name() << ' ';
7231 }
7232
7233 void RGWDefaultResponseOp::send_response() {
7234 if (op_ret) {
7235 set_req_state_err(s, op_ret);
7236 }
7237 dump_errno(s);
7238 end_header(s);
7239 }
7240
7241 void RGWPutBucketPolicy::send_response()
7242 {
7243 if (op_ret) {
7244 set_req_state_err(s, op_ret);
7245 }
7246 dump_errno(s);
7247 end_header(s);
7248 }
7249
7250 int RGWPutBucketPolicy::verify_permission()
7251 {
7252 if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketPolicy)) {
7253 return -EACCES;
7254 }
7255
7256 return 0;
7257 }
7258
7259 int RGWPutBucketPolicy::get_params()
7260 {
7261 const auto max_size = s->cct->_conf->rgw_max_put_param_size;
7262 // At some point when I have more time I want to make a version of
7263 // rgw_rest_read_all_input that doesn't use malloc.
7264 std::tie(op_ret, data) = rgw_rest_read_all_input(s, max_size, false);
7265
7266 // And throws exceptions.
7267 return op_ret;
7268 }
7269
7270 void RGWPutBucketPolicy::execute()
7271 {
7272 op_ret = get_params();
7273 if (op_ret < 0) {
7274 return;
7275 }
7276
7277 if (!store->svc.zone->is_meta_master()) {
7278 op_ret = forward_request_to_master(s, NULL, store, data, nullptr);
7279 if (op_ret < 0) {
7280 ldpp_dout(this, 20) << "forward_request_to_master returned ret=" << op_ret << dendl;
7281 return;
7282 }
7283 }
7284
7285 try {
7286 const Policy p(s->cct, s->bucket_tenant, data);
7287 op_ret = retry_raced_bucket_write(store, s, [&p, this] {
7288 auto attrs = s->bucket_attrs;
7289 attrs[RGW_ATTR_IAM_POLICY].clear();
7290 attrs[RGW_ATTR_IAM_POLICY].append(p.text);
7291 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
7292 &s->bucket_info.objv_tracker);
7293 return op_ret;
7294 });
7295 } catch (rgw::IAM::PolicyParseException& e) {
7296 ldpp_dout(this, 20) << "failed to parse policy: " << e.what() << dendl;
7297 op_ret = -EINVAL;
7298 }
7299 }
7300
7301 void RGWGetBucketPolicy::send_response()
7302 {
7303 if (op_ret) {
7304 set_req_state_err(s, op_ret);
7305 }
7306 dump_errno(s);
7307 end_header(s, this, "application/json");
7308 dump_body(s, policy);
7309 }
7310
7311 int RGWGetBucketPolicy::verify_permission()
7312 {
7313 if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketPolicy)) {
7314 return -EACCES;
7315 }
7316
7317 return 0;
7318 }
7319
7320 void RGWGetBucketPolicy::execute()
7321 {
7322 auto attrs = s->bucket_attrs;
7323 map<string, bufferlist>::iterator aiter = attrs.find(RGW_ATTR_IAM_POLICY);
7324 if (aiter == attrs.end()) {
7325 ldpp_dout(this, 0) << "can't find bucket IAM POLICY attr bucket_name = "
7326 << s->bucket_name << dendl;
7327 op_ret = -ERR_NO_SUCH_BUCKET_POLICY;
7328 s->err.message = "The bucket policy does not exist";
7329 return;
7330 } else {
7331 policy = attrs[RGW_ATTR_IAM_POLICY];
7332
7333 if (policy.length() == 0) {
7334 ldpp_dout(this, 10) << "The bucket policy does not exist, bucket: "
7335 << s->bucket_name << dendl;
7336 op_ret = -ERR_NO_SUCH_BUCKET_POLICY;
7337 s->err.message = "The bucket policy does not exist";
7338 return;
7339 }
7340 }
7341 }
7342
7343 void RGWDeleteBucketPolicy::send_response()
7344 {
7345 if (op_ret) {
7346 set_req_state_err(s, op_ret);
7347 }
7348 dump_errno(s);
7349 end_header(s);
7350 }
7351
7352 int RGWDeleteBucketPolicy::verify_permission()
7353 {
7354 if (!verify_bucket_permission(this, s, rgw::IAM::s3DeleteBucketPolicy)) {
7355 return -EACCES;
7356 }
7357
7358 return 0;
7359 }
7360
7361 void RGWDeleteBucketPolicy::execute()
7362 {
7363 op_ret = retry_raced_bucket_write(store, s, [this] {
7364 auto attrs = s->bucket_attrs;
7365 attrs.erase(RGW_ATTR_IAM_POLICY);
7366 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
7367 &s->bucket_info.objv_tracker);
7368 return op_ret;
7369 });
7370 }
7371
7372 void RGWGetClusterStat::execute()
7373 {
7374 op_ret = this->store->get_rados_handle()->cluster_stat(stats_op);
7375 }
7376
7377