]> git.proxmox.com Git - ceph.git/blob - ceph/src/rgw/rgw_op.cc
update sources to v12.2.1
[ceph.git] / ceph / src / rgw / rgw_op.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include <errno.h>
5 #include <stdlib.h>
6 #include <system_error>
7 #include <unistd.h>
8
9 #include <sstream>
10
11 #include <boost/algorithm/string/predicate.hpp>
12 #include <boost/bind.hpp>
13 #include <boost/optional.hpp>
14 #include <boost/utility/in_place_factory.hpp>
15 #include <boost/utility/string_view.hpp>
16
17 #include "common/Clock.h"
18 #include "common/armor.h"
19 #include "common/backport14.h"
20 #include "common/errno.h"
21 #include "common/mime.h"
22 #include "common/utf8.h"
23 #include "common/ceph_json.h"
24
25 #include "rgw_rados.h"
26 #include "rgw_op.h"
27 #include "rgw_rest.h"
28 #include "rgw_acl.h"
29 #include "rgw_acl_s3.h"
30 #include "rgw_acl_swift.h"
31 #include "rgw_user.h"
32 #include "rgw_bucket.h"
33 #include "rgw_log.h"
34 #include "rgw_multi.h"
35 #include "rgw_multi_del.h"
36 #include "rgw_cors.h"
37 #include "rgw_cors_s3.h"
38 #include "rgw_rest_conn.h"
39 #include "rgw_rest_s3.h"
40 #include "rgw_tar.h"
41 #include "rgw_client_io.h"
42 #include "rgw_compression.h"
43 #include "rgw_role.h"
44 #include "rgw_tag_s3.h"
45 #include "cls/lock/cls_lock_client.h"
46 #include "cls/rgw/cls_rgw_client.h"
47
48
49 #include "include/assert.h"
50
51 #include "compressor/Compressor.h"
52
53 #include "rgw_acl_swift.h"
54
55 #define dout_context g_ceph_context
56 #define dout_subsys ceph_subsys_rgw
57
58 using namespace std;
59 using namespace librados;
60 using ceph::crypto::MD5;
61 using boost::optional;
62 using boost::none;
63
64 using rgw::IAM::ARN;
65 using rgw::IAM::Effect;
66 using rgw::IAM::Policy;
67
68 using rgw::IAM::Policy;
69
70 static string mp_ns = RGW_OBJ_NS_MULTIPART;
71 static string shadow_ns = RGW_OBJ_NS_SHADOW;
72
73 static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name);
74 static int forward_request_to_master(struct req_state *s, obj_version *objv, RGWRados *store,
75 bufferlist& in_data, JSONParser *jp, req_info *forward_info = nullptr);
76
77 static MultipartMetaFilter mp_filter;
78
79 static int parse_range(const char *range, off_t& ofs, off_t& end, bool *partial_content)
80 {
81 int r = -ERANGE;
82 string s(range);
83 string ofs_str;
84 string end_str;
85
86 *partial_content = false;
87
88 size_t pos = s.find("bytes=");
89 if (pos == string::npos) {
90 pos = 0;
91 while (isspace(s[pos]))
92 pos++;
93 int end = pos;
94 while (isalpha(s[end]))
95 end++;
96 if (strncasecmp(s.c_str(), "bytes", end - pos) != 0)
97 return 0;
98 while (isspace(s[end]))
99 end++;
100 if (s[end] != '=')
101 return 0;
102 s = s.substr(end + 1);
103 } else {
104 s = s.substr(pos + 6); /* size of("bytes=") */
105 }
106 pos = s.find('-');
107 if (pos == string::npos)
108 goto done;
109
110 *partial_content = true;
111
112 ofs_str = s.substr(0, pos);
113 end_str = s.substr(pos + 1);
114 if (end_str.length()) {
115 end = atoll(end_str.c_str());
116 if (end < 0)
117 goto done;
118 }
119
120 if (ofs_str.length()) {
121 ofs = atoll(ofs_str.c_str());
122 } else { // RFC2616 suffix-byte-range-spec
123 ofs = -end;
124 end = -1;
125 }
126
127 if (end >= 0 && end < ofs)
128 goto done;
129
130 r = 0;
131 done:
132 return r;
133 }
134
135 static int decode_policy(CephContext *cct,
136 bufferlist& bl,
137 RGWAccessControlPolicy *policy)
138 {
139 bufferlist::iterator iter = bl.begin();
140 try {
141 policy->decode(iter);
142 } catch (buffer::error& err) {
143 ldout(cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
144 return -EIO;
145 }
146 if (cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
147 RGWAccessControlPolicy_S3 *s3policy = static_cast<RGWAccessControlPolicy_S3 *>(policy);
148 ldout(cct, 15) << __func__ << " Read AccessControlPolicy";
149 s3policy->to_xml(*_dout);
150 *_dout << dendl;
151 }
152 return 0;
153 }
154
155
156 static int get_user_policy_from_attr(CephContext * const cct,
157 RGWRados * const store,
158 map<string, bufferlist>& attrs,
159 RGWAccessControlPolicy& policy /* out */)
160 {
161 auto aiter = attrs.find(RGW_ATTR_ACL);
162 if (aiter != attrs.end()) {
163 int ret = decode_policy(cct, aiter->second, &policy);
164 if (ret < 0) {
165 return ret;
166 }
167 } else {
168 return -ENOENT;
169 }
170
171 return 0;
172 }
173
174 static int get_bucket_instance_policy_from_attr(CephContext *cct,
175 RGWRados *store,
176 RGWBucketInfo& bucket_info,
177 map<string, bufferlist>& bucket_attrs,
178 RGWAccessControlPolicy *policy,
179 rgw_raw_obj& obj)
180 {
181 map<string, bufferlist>::iterator aiter = bucket_attrs.find(RGW_ATTR_ACL);
182
183 if (aiter != bucket_attrs.end()) {
184 int ret = decode_policy(cct, aiter->second, policy);
185 if (ret < 0)
186 return ret;
187 } else {
188 ldout(cct, 0) << "WARNING: couldn't find acl header for bucket, generating default" << dendl;
189 RGWUserInfo uinfo;
190 /* object exists, but policy is broken */
191 int r = rgw_get_user_info_by_uid(store, bucket_info.owner, uinfo);
192 if (r < 0)
193 return r;
194
195 policy->create_default(bucket_info.owner, uinfo.display_name);
196 }
197 return 0;
198 }
199
200 static int get_obj_policy_from_attr(CephContext *cct,
201 RGWRados *store,
202 RGWObjectCtx& obj_ctx,
203 RGWBucketInfo& bucket_info,
204 map<string, bufferlist>& bucket_attrs,
205 RGWAccessControlPolicy *policy,
206 rgw_obj& obj)
207 {
208 bufferlist bl;
209 int ret = 0;
210
211 RGWRados::Object op_target(store, bucket_info, obj_ctx, obj);
212 RGWRados::Object::Read rop(&op_target);
213
214 ret = rop.get_attr(RGW_ATTR_ACL, bl);
215 if (ret >= 0) {
216 ret = decode_policy(cct, bl, policy);
217 if (ret < 0)
218 return ret;
219 } else if (ret == -ENODATA) {
220 /* object exists, but policy is broken */
221 ldout(cct, 0) << "WARNING: couldn't find acl header for object, generating default" << dendl;
222 RGWUserInfo uinfo;
223 ret = rgw_get_user_info_by_uid(store, bucket_info.owner, uinfo);
224 if (ret < 0)
225 return ret;
226
227 policy->create_default(bucket_info.owner, uinfo.display_name);
228 }
229 return ret;
230 }
231
232
233 /**
234 * Get the AccessControlPolicy for an object off of disk.
235 * policy: must point to a valid RGWACL, and will be filled upon return.
236 * bucket: name of the bucket containing the object.
237 * object: name of the object to get the ACL for.
238 * Returns: 0 on success, -ERR# otherwise.
239 */
240 static int get_bucket_policy_from_attr(CephContext *cct,
241 RGWRados *store,
242 RGWBucketInfo& bucket_info,
243 map<string, bufferlist>& bucket_attrs,
244 RGWAccessControlPolicy *policy)
245 {
246 rgw_raw_obj instance_obj;
247 store->get_bucket_instance_obj(bucket_info.bucket, instance_obj);
248 return get_bucket_instance_policy_from_attr(cct, store, bucket_info, bucket_attrs,
249 policy, instance_obj);
250 }
251
252 static optional<Policy> get_iam_policy_from_attr(CephContext* cct,
253 RGWRados* store,
254 map<string, bufferlist>& attrs,
255 const string& tenant) {
256 auto i = attrs.find(RGW_ATTR_IAM_POLICY);
257 if (i != attrs.end()) {
258 return Policy(cct, tenant, i->second);
259 } else {
260 return none;
261 }
262 }
263
264 static int get_obj_attrs(RGWRados *store, struct req_state *s, rgw_obj& obj, map<string, bufferlist>& attrs)
265 {
266 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
267 RGWRados::Object::Read read_op(&op_target);
268
269 read_op.params.attrs = &attrs;
270
271 return read_op.prepare();
272 }
273
274 static int modify_obj_attr(RGWRados *store, struct req_state *s, rgw_obj& obj, const char* attr_name, bufferlist& attr_val)
275 {
276 map<string, bufferlist> attrs;
277 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
278 RGWRados::Object::Read read_op(&op_target);
279
280 read_op.params.attrs = &attrs;
281
282 int r = read_op.prepare();
283 if (r < 0) {
284 return r;
285 }
286 store->set_atomic(s->obj_ctx, read_op.state.obj);
287 attrs[attr_name] = attr_val;
288 return store->set_attrs(s->obj_ctx, s->bucket_info, read_op.state.obj, attrs, NULL);
289 }
290
291 static int get_system_obj_attrs(RGWRados *store, struct req_state *s, rgw_raw_obj& obj, map<string, bufferlist>& attrs,
292 uint64_t *obj_size, RGWObjVersionTracker *objv_tracker)
293 {
294 RGWRados::SystemObject src(store, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
295 RGWRados::SystemObject::Read rop(&src);
296
297 rop.stat_params.attrs = &attrs;
298 rop.stat_params.obj_size = obj_size;
299
300 int ret = rop.stat(objv_tracker);
301 return ret;
302 }
303
304 static int read_bucket_policy(RGWRados *store,
305 struct req_state *s,
306 RGWBucketInfo& bucket_info,
307 map<string, bufferlist>& bucket_attrs,
308 RGWAccessControlPolicy *policy,
309 rgw_bucket& bucket)
310 {
311 if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) {
312 ldout(s->cct, 0) << "NOTICE: bucket " << bucket_info.bucket.name << " is suspended" << dendl;
313 return -ERR_USER_SUSPENDED;
314 }
315
316 if (bucket.name.empty()) {
317 return 0;
318 }
319
320 int ret = get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, policy);
321 if (ret == -ENOENT) {
322 ret = -ERR_NO_SUCH_BUCKET;
323 }
324
325 return ret;
326 }
327
328 static int read_obj_policy(RGWRados *store,
329 struct req_state *s,
330 RGWBucketInfo& bucket_info,
331 map<string, bufferlist>& bucket_attrs,
332 RGWAccessControlPolicy* acl,
333 optional<Policy>& policy,
334 rgw_bucket& bucket,
335 rgw_obj_key& object)
336 {
337 string upload_id;
338 upload_id = s->info.args.get("uploadId");
339 rgw_obj obj;
340
341 if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) {
342 ldout(s->cct, 0) << "NOTICE: bucket " << bucket_info.bucket.name << " is suspended" << dendl;
343 return -ERR_USER_SUSPENDED;
344 }
345
346 if (!upload_id.empty()) {
347 /* multipart upload */
348 RGWMPObj mp(object.name, upload_id);
349 string oid = mp.get_meta();
350 obj.init_ns(bucket, oid, mp_ns);
351 obj.set_in_extra_data(true);
352 } else {
353 obj = rgw_obj(bucket, object);
354 }
355 policy = get_iam_policy_from_attr(s->cct, store, bucket_attrs, bucket.tenant);
356
357 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
358 int ret = get_obj_policy_from_attr(s->cct, store, *obj_ctx,
359 bucket_info, bucket_attrs, acl, obj);
360 if (ret == -ENOENT) {
361 /* object does not exist checking the bucket's ACL to make sure
362 that we send a proper error code */
363 RGWAccessControlPolicy bucket_policy(s->cct);
364 ret = get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, &bucket_policy);
365 if (ret < 0) {
366 return ret;
367 }
368
369 const rgw_user& bucket_owner = bucket_policy.get_owner().get_id();
370 if (bucket_owner.compare(s->user->user_id) != 0 &&
371 ! s->auth.identity->is_admin_of(bucket_owner) &&
372 ! bucket_policy.verify_permission(*s->auth.identity, s->perm_mask,
373 RGW_PERM_READ)) {
374 ret = -EACCES;
375 } else {
376 ret = -ENOENT;
377 }
378 }
379
380 return ret;
381 }
382
383 /**
384 * Get the AccessControlPolicy for an user, bucket or object off of disk.
385 * s: The req_state to draw information from.
386 * only_bucket: If true, reads the user and bucket ACLs rather than the object ACL.
387 * Returns: 0 on success, -ERR# otherwise.
388 */
389 int rgw_build_bucket_policies(RGWRados* store, struct req_state* s)
390 {
391 int ret = 0;
392 rgw_obj_key obj;
393 RGWUserInfo bucket_owner_info;
394 RGWObjectCtx obj_ctx(store);
395
396 string bi = s->info.args.get(RGW_SYS_PARAM_PREFIX "bucket-instance");
397 if (!bi.empty()) {
398 ret = rgw_bucket_parse_bucket_instance(bi, &s->bucket_instance_id, &s->bucket_instance_shard_id);
399 if (ret < 0) {
400 return ret;
401 }
402 }
403
404 if(s->dialect.compare("s3") == 0) {
405 s->bucket_acl = ceph::make_unique<RGWAccessControlPolicy_S3>(s->cct);
406 } else if(s->dialect.compare("swift") == 0) {
407 /* We aren't allocating the account policy for those operations using
408 * the Swift's infrastructure that don't really need req_state::user.
409 * Typical example here is the implementation of /info. */
410 if (!s->user->user_id.empty()) {
411 s->user_acl = ceph::make_unique<RGWAccessControlPolicy_SWIFTAcct>(s->cct);
412 }
413 s->bucket_acl = ceph::make_unique<RGWAccessControlPolicy_SWIFT>(s->cct);
414 } else {
415 s->bucket_acl = ceph::make_unique<RGWAccessControlPolicy>(s->cct);
416 }
417
418 /* check if copy source is within the current domain */
419 if (!s->src_bucket_name.empty()) {
420 RGWBucketInfo source_info;
421
422 if (s->bucket_instance_id.empty()) {
423 ret = store->get_bucket_info(obj_ctx, s->src_tenant_name, s->src_bucket_name, source_info, NULL);
424 } else {
425 ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, source_info, NULL, NULL);
426 }
427 if (ret == 0) {
428 string& zonegroup = source_info.zonegroup;
429 s->local_source = store->get_zonegroup().equals(zonegroup);
430 }
431 }
432
433 struct {
434 rgw_user uid;
435 std::string display_name;
436 } acct_acl_user = {
437 s->user->user_id,
438 s->user->display_name,
439 };
440
441 if (!s->bucket_name.empty()) {
442 s->bucket_exists = true;
443 if (s->bucket_instance_id.empty()) {
444 ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name, s->bucket_info, NULL, &s->bucket_attrs);
445 } else {
446 ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, s->bucket_info, NULL, &s->bucket_attrs);
447 }
448 if (ret < 0) {
449 if (ret != -ENOENT) {
450 string bucket_log;
451 rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_log);
452 ldout(s->cct, 0) << "NOTICE: couldn't get bucket from bucket_name (name=" << bucket_log << ")" << dendl;
453 return ret;
454 }
455 s->bucket_exists = false;
456 }
457 s->bucket = s->bucket_info.bucket;
458
459 if (s->bucket_exists) {
460 ret = read_bucket_policy(store, s, s->bucket_info, s->bucket_attrs,
461 s->bucket_acl.get(), s->bucket);
462 acct_acl_user = {
463 s->bucket_info.owner,
464 s->bucket_acl->get_owner().get_display_name(),
465 };
466 } else {
467 s->bucket_acl->create_default(s->user->user_id, s->user->display_name);
468 ret = -ERR_NO_SUCH_BUCKET;
469 }
470
471 s->bucket_owner = s->bucket_acl->get_owner();
472
473 RGWZoneGroup zonegroup;
474 int r = store->get_zonegroup(s->bucket_info.zonegroup, zonegroup);
475 if (!r) {
476 if (!zonegroup.endpoints.empty()) {
477 s->zonegroup_endpoint = zonegroup.endpoints.front();
478 } else {
479 // use zonegroup's master zone endpoints
480 auto z = zonegroup.zones.find(zonegroup.master_zone);
481 if (z != zonegroup.zones.end() && !z->second.endpoints.empty()) {
482 s->zonegroup_endpoint = z->second.endpoints.front();
483 }
484 }
485 s->zonegroup_name = zonegroup.get_name();
486 }
487 if (r < 0 && ret == 0) {
488 ret = r;
489 }
490
491 if (s->bucket_exists && !store->get_zonegroup().equals(s->bucket_info.zonegroup)) {
492 ldout(s->cct, 0) << "NOTICE: request for data in a different zonegroup (" << s->bucket_info.zonegroup << " != " << store->get_zonegroup().get_id() << ")" << dendl;
493 /* we now need to make sure that the operation actually requires copy source, that is
494 * it's a copy operation
495 */
496 if (store->get_zonegroup().is_master_zonegroup() && s->system_request) {
497 /*If this is the master, don't redirect*/
498 } else if (!s->local_source ||
499 (s->op != OP_PUT && s->op != OP_COPY) ||
500 s->object.empty()) {
501 return -ERR_PERMANENT_REDIRECT;
502 }
503 }
504 }
505
506 /* handle user ACL only for those APIs which support it */
507 if (s->user_acl) {
508 map<string, bufferlist> uattrs;
509
510 ret = rgw_get_user_attrs_by_uid(store, acct_acl_user.uid, uattrs);
511 if (!ret) {
512 ret = get_user_policy_from_attr(s->cct, store, uattrs, *s->user_acl);
513 }
514 if (-ENOENT == ret) {
515 /* In already existing clusters users won't have ACL. In such case
516 * assuming that only account owner has the rights seems to be
517 * reasonable. That allows to have only one verification logic.
518 * NOTE: there is small compatibility kludge for global, empty tenant:
519 * 1. if we try to reach an existing bucket, its owner is considered
520 * as account owner.
521 * 2. otherwise account owner is identity stored in s->user->user_id. */
522 s->user_acl->create_default(acct_acl_user.uid,
523 acct_acl_user.display_name);
524 ret = 0;
525 } else {
526 ldout(s->cct, 0) << "NOTICE: couldn't get user attrs for handling ACL (user_id="
527 << s->user->user_id
528 << ", ret="
529 << ret
530 << ")" << dendl;
531 return ret;
532 }
533 }
534
535 try {
536 s->iam_policy = get_iam_policy_from_attr(s->cct, store, s->bucket_attrs,
537 s->bucket_tenant);
538 } catch (const std::exception& e) {
539 // Really this is a can't happen condition. We parse the policy
540 // when it's given to us, so perhaps we should abort or otherwise
541 // raise bloody murder.
542 lderr(s->cct) << "Error reading IAM Policy: " << e.what() << dendl;
543 ret = -EACCES;
544 }
545
546 return ret;
547 }
548
549 /**
550 * Get the AccessControlPolicy for a bucket or object off of disk.
551 * s: The req_state to draw information from.
552 * only_bucket: If true, reads the bucket ACL rather than the object ACL.
553 * Returns: 0 on success, -ERR# otherwise.
554 */
555 int rgw_build_object_policies(RGWRados *store, struct req_state *s,
556 bool prefetch_data)
557 {
558 int ret = 0;
559
560 if (!s->object.empty()) {
561 if (!s->bucket_exists) {
562 return -ERR_NO_SUCH_BUCKET;
563 }
564 s->object_acl = ceph::make_unique<RGWAccessControlPolicy>(s->cct);
565
566 rgw_obj obj(s->bucket, s->object);
567
568 store->set_atomic(s->obj_ctx, obj);
569 if (prefetch_data) {
570 store->set_prefetch_data(s->obj_ctx, obj);
571 }
572 ret = read_obj_policy(store, s, s->bucket_info, s->bucket_attrs,
573 s->object_acl.get(), s->iam_policy, s->bucket,
574 s->object);
575 }
576
577 return ret;
578 }
579
580 rgw::IAM::Environment rgw_build_iam_environment(RGWRados* store,
581 struct req_state* s)
582 {
583 rgw::IAM::Environment e;
584 const auto& m = s->info.env->get_map();
585 auto t = ceph::real_clock::now();
586 e.emplace(std::piecewise_construct,
587 std::forward_as_tuple("aws:CurrentTime"),
588 std::forward_as_tuple(std::to_string(
589 ceph::real_clock::to_time_t(t))));
590 e.emplace(std::piecewise_construct,
591 std::forward_as_tuple("aws:EpochTime"),
592 std::forward_as_tuple(ceph::to_iso_8601(t)));
593 // TODO: This is fine for now, but once we have STS we'll need to
594 // look and see. Also this won't work with the IdentityApplier
595 // model, since we need to know the actual credential.
596 e.emplace(std::piecewise_construct,
597 std::forward_as_tuple("aws:PrincipalType"),
598 std::forward_as_tuple("User"));
599
600 auto i = m.find("HTTP_REFERER");
601 if (i != m.end()) {
602 e.emplace(std::piecewise_construct,
603 std::forward_as_tuple("aws:Referer"),
604 std::forward_as_tuple(i->second));
605 }
606
607 // These seem to be the semantics, judging from rest_rgw_s3.cc
608 i = m.find("SERVER_PORT_SECURE");
609 if (i != m.end()) {
610 e.emplace(std::piecewise_construct,
611 std::forward_as_tuple("aws:SecureTransport"),
612 std::forward_as_tuple("true"));
613 }
614
615 i = m.find("HTTP_HOST");
616 if (i != m.end()) {
617 e.emplace(std::piecewise_construct,
618 std::forward_as_tuple("aws:SourceIp"),
619 std::forward_as_tuple(i->second));
620 }
621
622 i = m.find("HTTP_USER_AGENT"); {
623 if (i != m.end())
624 e.emplace(std::piecewise_construct,
625 std::forward_as_tuple("aws:UserAgent"),
626 std::forward_as_tuple(i->second));
627 }
628
629 if (s->user) {
630 // What to do about aws::userid? One can have multiple access
631 // keys so that isn't really suitable. Do we have a durable
632 // identifier that can persist through name changes?
633 e.emplace(std::piecewise_construct,
634 std::forward_as_tuple("aws:username"),
635 std::forward_as_tuple(s->user->user_id.id));
636 }
637 return e;
638 }
639
640 void rgw_bucket_object_pre_exec(struct req_state *s)
641 {
642 if (s->expect_cont)
643 dump_continue(s);
644
645 dump_bucket_from_state(s);
646 }
647
648 int RGWGetObj::verify_permission()
649 {
650 obj = rgw_obj(s->bucket, s->object);
651 store->set_atomic(s->obj_ctx, obj);
652 if (get_data) {
653 store->set_prefetch_data(s->obj_ctx, obj);
654 }
655
656 if (torrent.get_flag()) {
657 if (obj.key.instance.empty()) {
658 action = rgw::IAM::s3GetObjectTorrent;
659 } else {
660 action = rgw::IAM::s3GetObjectVersionTorrent;
661 }
662 } else {
663 if (obj.key.instance.empty()) {
664 action = rgw::IAM::s3GetObject;
665 } else {
666 action = rgw::IAM::s3GetObjectVersion;
667 }
668 }
669
670 if (!verify_object_permission(s, action)) {
671 return -EACCES;
672 }
673
674 return 0;
675 }
676
677
678 int RGWOp::verify_op_mask()
679 {
680 uint32_t required_mask = op_mask();
681
682 ldout(s->cct, 20) << "required_mask= " << required_mask
683 << " user.op_mask=" << s->user->op_mask << dendl;
684
685 if ((s->user->op_mask & required_mask) != required_mask) {
686 return -EPERM;
687 }
688
689 if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !store->zone_is_writeable()) {
690 ldout(s->cct, 5) << "NOTICE: modify request to a read-only zone by a non-system user, permission denied" << dendl;
691 return -EPERM;
692 }
693
694 return 0;
695 }
696
697 int RGWGetObjTags::verify_permission()
698 {
699 if (!verify_object_permission(s,
700 s->object.instance.empty() ?
701 rgw::IAM::s3GetObjectTagging:
702 rgw::IAM::s3GetObjectVersionTagging))
703 return -EACCES;
704
705 return 0;
706 }
707
708 void RGWGetObjTags::pre_exec()
709 {
710 rgw_bucket_object_pre_exec(s);
711 }
712
713 void RGWGetObjTags::execute()
714 {
715 rgw_obj obj;
716 map<string,bufferlist> attrs;
717
718 obj = rgw_obj(s->bucket, s->object);
719
720 store->set_atomic(s->obj_ctx, obj);
721
722 op_ret = get_obj_attrs(store, s, obj, attrs);
723 if (op_ret < 0) {
724 ldout(s->cct, 0) << "ERROR: failed to get obj attrs, obj=" << obj
725 << " ret=" << op_ret << dendl;
726 return;
727 }
728
729 auto tags = attrs.find(RGW_ATTR_TAGS);
730 if(tags != attrs.end()){
731 has_tags = true;
732 tags_bl.append(tags->second);
733 }
734 send_response_data(tags_bl);
735 }
736
737 int RGWPutObjTags::verify_permission()
738 {
739 if (!verify_object_permission(s,
740 s->object.instance.empty() ?
741 rgw::IAM::s3PutObjectTagging:
742 rgw::IAM::s3PutObjectVersionTagging))
743 return -EACCES;
744 return 0;
745 }
746
747 void RGWPutObjTags::execute()
748 {
749 op_ret = get_params();
750 if (op_ret < 0)
751 return;
752
753 if (s->object.empty()){
754 op_ret= -EINVAL; // we only support tagging on existing objects
755 return;
756 }
757
758 rgw_obj obj;
759 obj = rgw_obj(s->bucket, s->object);
760 store->set_atomic(s->obj_ctx, obj);
761 op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_TAGS, tags_bl);
762 if (op_ret == -ECANCELED){
763 op_ret = -ERR_TAG_CONFLICT;
764 }
765 }
766
767 void RGWDeleteObjTags::pre_exec()
768 {
769 rgw_bucket_object_pre_exec(s);
770 }
771
772
773 int RGWDeleteObjTags::verify_permission()
774 {
775 if (!s->object.empty()) {
776 if (!verify_object_permission(s,
777 s->object.instance.empty() ?
778 rgw::IAM::s3DeleteObjectTagging:
779 rgw::IAM::s3DeleteObjectVersionTagging))
780 return -EACCES;
781 }
782 return 0;
783 }
784
785 void RGWDeleteObjTags::execute()
786 {
787 if (s->object.empty())
788 return;
789
790 rgw_obj obj;
791 obj = rgw_obj(s->bucket, s->object);
792 store->set_atomic(s->obj_ctx, obj);
793 map <string, bufferlist> attrs;
794 map <string, bufferlist> rmattr;
795 bufferlist bl;
796 rmattr[RGW_ATTR_TAGS] = bl;
797 op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, &rmattr);
798 }
799
800 int RGWOp::do_aws4_auth_completion()
801 {
802 ldout(s->cct, 5) << "NOTICE: call to do_aws4_auth_completion" << dendl;
803 if (s->auth.completer) {
804 if (!s->auth.completer->complete()) {
805 return -ERR_AMZ_CONTENT_SHA256_MISMATCH;
806 } else {
807 dout(10) << "v4 auth ok -- do_aws4_auth_completion" << dendl;
808 }
809
810 /* TODO(rzarzynski): yes, we're really called twice on PUTs. Only first
811 * call passes, so we disable second one. This is old behaviour, sorry!
812 * Plan for tomorrow: seek and destroy. */
813 s->auth.completer = nullptr;
814 }
815
816 return 0;
817 }
818
819 int RGWOp::init_quota()
820 {
821 /* no quota enforcement for system requests */
822 if (s->system_request)
823 return 0;
824
825 /* init quota related stuff */
826 if (!(s->user->op_mask & RGW_OP_TYPE_MODIFY)) {
827 return 0;
828 }
829
830 /* only interested in object related ops */
831 if (s->object.empty()) {
832 return 0;
833 }
834
835 RGWUserInfo owner_info;
836 RGWUserInfo *uinfo;
837
838 if (s->user->user_id == s->bucket_owner.get_id()) {
839 uinfo = s->user;
840 } else {
841 int r = rgw_get_user_info_by_uid(store, s->bucket_info.owner, owner_info);
842 if (r < 0)
843 return r;
844 uinfo = &owner_info;
845 }
846
847 if (s->bucket_info.quota.enabled) {
848 bucket_quota = s->bucket_info.quota;
849 } else if (uinfo->bucket_quota.enabled) {
850 bucket_quota = uinfo->bucket_quota;
851 } else {
852 bucket_quota = store->get_bucket_quota();
853 }
854
855 if (uinfo->user_quota.enabled) {
856 user_quota = uinfo->user_quota;
857 } else {
858 user_quota = store->get_user_quota();
859 }
860
861 return 0;
862 }
863
864 static bool validate_cors_rule_method(RGWCORSRule *rule, const char *req_meth) {
865 uint8_t flags = 0;
866
867 if (!req_meth) {
868 dout(5) << "req_meth is null" << dendl;
869 return false;
870 }
871
872 if (strcmp(req_meth, "GET") == 0) flags = RGW_CORS_GET;
873 else if (strcmp(req_meth, "POST") == 0) flags = RGW_CORS_POST;
874 else if (strcmp(req_meth, "PUT") == 0) flags = RGW_CORS_PUT;
875 else if (strcmp(req_meth, "DELETE") == 0) flags = RGW_CORS_DELETE;
876 else if (strcmp(req_meth, "HEAD") == 0) flags = RGW_CORS_HEAD;
877
878 if ((rule->get_allowed_methods() & flags) == flags) {
879 dout(10) << "Method " << req_meth << " is supported" << dendl;
880 } else {
881 dout(5) << "Method " << req_meth << " is not supported" << dendl;
882 return false;
883 }
884
885 return true;
886 }
887
888 int RGWOp::read_bucket_cors()
889 {
890 bufferlist bl;
891
892 map<string, bufferlist>::iterator aiter = s->bucket_attrs.find(RGW_ATTR_CORS);
893 if (aiter == s->bucket_attrs.end()) {
894 ldout(s->cct, 20) << "no CORS configuration attr found" << dendl;
895 cors_exist = false;
896 return 0; /* no CORS configuration found */
897 }
898
899 cors_exist = true;
900
901 bl = aiter->second;
902
903 bufferlist::iterator iter = bl.begin();
904 try {
905 bucket_cors.decode(iter);
906 } catch (buffer::error& err) {
907 ldout(s->cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
908 return -EIO;
909 }
910 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
911 RGWCORSConfiguration_S3 *s3cors = static_cast<RGWCORSConfiguration_S3 *>(&bucket_cors);
912 ldout(s->cct, 15) << "Read RGWCORSConfiguration";
913 s3cors->to_xml(*_dout);
914 *_dout << dendl;
915 }
916 return 0;
917 }
918
919 /** CORS 6.2.6.
920 * If any of the header field-names is not a ASCII case-insensitive match for
921 * any of the values in list of headers do not set any additional headers and
922 * terminate this set of steps.
923 * */
924 static void get_cors_response_headers(RGWCORSRule *rule, const char *req_hdrs, string& hdrs, string& exp_hdrs, unsigned *max_age) {
925 if (req_hdrs) {
926 list<string> hl;
927 get_str_list(req_hdrs, hl);
928 for(list<string>::iterator it = hl.begin(); it != hl.end(); ++it) {
929 if (!rule->is_header_allowed((*it).c_str(), (*it).length())) {
930 dout(5) << "Header " << (*it) << " is not registered in this rule" << dendl;
931 } else {
932 if (hdrs.length() > 0) hdrs.append(",");
933 hdrs.append((*it));
934 }
935 }
936 }
937 rule->format_exp_headers(exp_hdrs);
938 *max_age = rule->get_max_age();
939 }
940
941 /**
942 * Generate the CORS header response
943 *
944 * This is described in the CORS standard, section 6.2.
945 */
946 bool RGWOp::generate_cors_headers(string& origin, string& method, string& headers, string& exp_headers, unsigned *max_age)
947 {
948 /* CORS 6.2.1. */
949 const char *orig = s->info.env->get("HTTP_ORIGIN");
950 if (!orig) {
951 return false;
952 }
953
954 /* Custom: */
955 origin = orig;
956 op_ret = read_bucket_cors();
957 if (op_ret < 0) {
958 return false;
959 }
960
961 if (!cors_exist) {
962 dout(2) << "No CORS configuration set yet for this bucket" << dendl;
963 return false;
964 }
965
966 /* CORS 6.2.2. */
967 RGWCORSRule *rule = bucket_cors.host_name_rule(orig);
968 if (!rule)
969 return false;
970
971 /*
972 * Set the Allowed-Origin header to a asterisk if this is allowed in the rule
973 * and no Authorization was send by the client
974 *
975 * The origin parameter specifies a URI that may access the resource. The browser must enforce this.
976 * For requests without credentials, the server may specify "*" as a wildcard,
977 * thereby allowing any origin to access the resource.
978 */
979 const char *authorization = s->info.env->get("HTTP_AUTHORIZATION");
980 if (!authorization && rule->has_wildcard_origin())
981 origin = "*";
982
983 /* CORS 6.2.3. */
984 const char *req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD");
985 if (!req_meth) {
986 req_meth = s->info.method;
987 }
988
989 if (req_meth) {
990 method = req_meth;
991 /* CORS 6.2.5. */
992 if (!validate_cors_rule_method(rule, req_meth)) {
993 return false;
994 }
995 }
996
997 /* CORS 6.2.4. */
998 const char *req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS");
999
1000 /* CORS 6.2.6. */
1001 get_cors_response_headers(rule, req_hdrs, headers, exp_headers, max_age);
1002
1003 return true;
1004 }
1005
1006 int RGWGetObj::read_user_manifest_part(rgw_bucket& bucket,
1007 const rgw_bucket_dir_entry& ent,
1008 RGWAccessControlPolicy * const bucket_acl,
1009 const optional<Policy>& bucket_policy,
1010 const off_t start_ofs,
1011 const off_t end_ofs)
1012 {
1013 ldout(s->cct, 20) << "user manifest obj=" << ent.key.name << "[" << ent.key.instance << "]" << dendl;
1014 RGWGetObj_CB cb(this);
1015 RGWGetDataCB* filter = &cb;
1016 boost::optional<RGWGetObj_Decompress> decompress;
1017
1018 int64_t cur_ofs = start_ofs;
1019 int64_t cur_end = end_ofs;
1020
1021 rgw_obj part(bucket, ent.key);
1022
1023 map<string, bufferlist> attrs;
1024
1025 uint64_t obj_size;
1026 RGWObjectCtx obj_ctx(store);
1027 RGWAccessControlPolicy obj_policy(s->cct);
1028
1029 ldout(s->cct, 20) << "reading obj=" << part << " ofs=" << cur_ofs << " end=" << cur_end << dendl;
1030
1031 obj_ctx.obj.set_atomic(part);
1032 store->set_prefetch_data(&obj_ctx, part);
1033
1034 RGWRados::Object op_target(store, s->bucket_info, obj_ctx, part);
1035 RGWRados::Object::Read read_op(&op_target);
1036
1037 read_op.conds.if_match = ent.meta.etag.c_str();
1038 read_op.params.attrs = &attrs;
1039 read_op.params.obj_size = &obj_size;
1040
1041 op_ret = read_op.prepare();
1042 if (op_ret < 0)
1043 return op_ret;
1044 op_ret = read_op.range_to_ofs(ent.meta.accounted_size, cur_ofs, cur_end);
1045 if (op_ret < 0)
1046 return op_ret;
1047 bool need_decompress;
1048 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
1049 if (op_ret < 0) {
1050 lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
1051 return -EIO;
1052 }
1053
1054 if (need_decompress)
1055 {
1056 if (cs_info.orig_size != ent.meta.accounted_size) {
1057 // hmm.. something wrong, object not as expected, abort!
1058 ldout(s->cct, 0) << "ERROR: expected cs_info.orig_size=" << cs_info.orig_size <<
1059 ", actual read size=" << ent.meta.size << dendl;
1060 return -EIO;
1061 }
1062 decompress.emplace(s->cct, &cs_info, partial_content, filter);
1063 filter = &*decompress;
1064 }
1065 else
1066 {
1067 if (obj_size != ent.meta.size) {
1068 // hmm.. something wrong, object not as expected, abort!
1069 ldout(s->cct, 0) << "ERROR: expected obj_size=" << obj_size << ", actual read size=" << ent.meta.size << dendl;
1070 return -EIO;
1071 }
1072 }
1073
1074 op_ret = rgw_policy_from_attrset(s->cct, attrs, &obj_policy);
1075 if (op_ret < 0)
1076 return op_ret;
1077
1078 /* We can use global user_acl because LOs cannot have segments
1079 * stored inside different accounts. */
1080 if (s->system_request) {
1081 ldout(s->cct, 2) << "overriding permissions due to system operation" << dendl;
1082 } else if (s->auth.identity->is_admin_of(s->user->user_id)) {
1083 ldout(s->cct, 2) << "overriding permissions due to admin operation" << dendl;
1084 } else if (!verify_object_permission(s, part, s->user_acl.get(), bucket_acl,
1085 &obj_policy, bucket_policy, action)) {
1086 return -EPERM;
1087 }
1088
1089 if (ent.meta.size == 0) {
1090 return 0;
1091 }
1092
1093 perfcounter->inc(l_rgw_get_b, cur_end - cur_ofs);
1094 filter->fixup_range(cur_ofs, cur_end);
1095 op_ret = read_op.iterate(cur_ofs, cur_end, filter);
1096 if (op_ret >= 0)
1097 op_ret = filter->flush();
1098 return op_ret;
1099 }
1100
1101 static int iterate_user_manifest_parts(CephContext * const cct,
1102 RGWRados * const store,
1103 const off_t ofs,
1104 const off_t end,
1105 RGWBucketInfo *pbucket_info,
1106 const string& obj_prefix,
1107 RGWAccessControlPolicy * const bucket_acl,
1108 const optional<Policy>& bucket_policy,
1109 uint64_t * const ptotal_len,
1110 uint64_t * const pobj_size,
1111 string * const pobj_sum,
1112 int (*cb)(rgw_bucket& bucket,
1113 const rgw_bucket_dir_entry& ent,
1114 RGWAccessControlPolicy * const bucket_acl,
1115 const optional<Policy>& bucket_policy,
1116 off_t start_ofs,
1117 off_t end_ofs,
1118 void *param),
1119 void * const cb_param)
1120 {
1121 rgw_bucket& bucket = pbucket_info->bucket;
1122 uint64_t obj_ofs = 0, len_count = 0;
1123 bool found_start = false, found_end = false, handled_end = false;
1124 string delim;
1125 bool is_truncated;
1126 vector<rgw_bucket_dir_entry> objs;
1127
1128 utime_t start_time = ceph_clock_now();
1129
1130 RGWRados::Bucket target(store, *pbucket_info);
1131 RGWRados::Bucket::List list_op(&target);
1132
1133 list_op.params.prefix = obj_prefix;
1134 list_op.params.delim = delim;
1135
1136 MD5 etag_sum;
1137 do {
1138 #define MAX_LIST_OBJS 100
1139 int r = list_op.list_objects(MAX_LIST_OBJS, &objs, NULL, &is_truncated);
1140 if (r < 0) {
1141 return r;
1142 }
1143
1144 for (rgw_bucket_dir_entry& ent : objs) {
1145 const uint64_t cur_total_len = obj_ofs;
1146 const uint64_t obj_size = ent.meta.accounted_size;
1147 uint64_t start_ofs = 0, end_ofs = obj_size;
1148
1149 if ((ptotal_len || cb) && !found_start && cur_total_len + obj_size > (uint64_t)ofs) {
1150 start_ofs = ofs - obj_ofs;
1151 found_start = true;
1152 }
1153
1154 obj_ofs += obj_size;
1155 if (pobj_sum) {
1156 etag_sum.Update((const byte *)ent.meta.etag.c_str(),
1157 ent.meta.etag.length());
1158 }
1159
1160 if ((ptotal_len || cb) && !found_end && obj_ofs > (uint64_t)end) {
1161 end_ofs = end - cur_total_len + 1;
1162 found_end = true;
1163 }
1164
1165 perfcounter->tinc(l_rgw_get_lat,
1166 (ceph_clock_now() - start_time));
1167
1168 if (found_start && !handled_end) {
1169 len_count += end_ofs - start_ofs;
1170
1171 if (cb) {
1172 r = cb(bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs, cb_param);
1173 if (r < 0) {
1174 return r;
1175 }
1176 }
1177 }
1178
1179 handled_end = found_end;
1180 start_time = ceph_clock_now();
1181 }
1182 } while (is_truncated);
1183
1184 if (ptotal_len) {
1185 *ptotal_len = len_count;
1186 }
1187 if (pobj_size) {
1188 *pobj_size = obj_ofs;
1189 }
1190 if (pobj_sum) {
1191 complete_etag(etag_sum, pobj_sum);
1192 }
1193
1194 return 0;
1195 }
1196
1197 struct rgw_slo_part {
1198 RGWAccessControlPolicy *bucket_acl = nullptr;
1199 Policy* bucket_policy = nullptr;
1200 rgw_bucket bucket;
1201 string obj_name;
1202 uint64_t size = 0;
1203 string etag;
1204 };
1205
1206 static int iterate_slo_parts(CephContext *cct,
1207 RGWRados *store,
1208 off_t ofs,
1209 off_t end,
1210 map<uint64_t, rgw_slo_part>& slo_parts,
1211 int (*cb)(rgw_bucket& bucket,
1212 const rgw_bucket_dir_entry& ent,
1213 RGWAccessControlPolicy *bucket_acl,
1214 const optional<Policy>& bucket_policy,
1215 off_t start_ofs,
1216 off_t end_ofs,
1217 void *param),
1218 void *cb_param)
1219 {
1220 bool found_start = false, found_end = false;
1221
1222 if (slo_parts.empty()) {
1223 return 0;
1224 }
1225
1226 utime_t start_time = ceph_clock_now();
1227
1228 map<uint64_t, rgw_slo_part>::iterator iter = slo_parts.upper_bound(ofs);
1229 if (iter != slo_parts.begin()) {
1230 --iter;
1231 }
1232
1233 uint64_t obj_ofs = iter->first;
1234
1235 for (; iter != slo_parts.end() && !found_end; ++iter) {
1236 rgw_slo_part& part = iter->second;
1237 rgw_bucket_dir_entry ent;
1238
1239 ent.key.name = part.obj_name;
1240 ent.meta.accounted_size = ent.meta.size = part.size;
1241 ent.meta.etag = part.etag;
1242
1243 uint64_t cur_total_len = obj_ofs;
1244 uint64_t start_ofs = 0, end_ofs = ent.meta.size;
1245
1246 if (!found_start && cur_total_len + ent.meta.size > (uint64_t)ofs) {
1247 start_ofs = ofs - obj_ofs;
1248 found_start = true;
1249 }
1250
1251 obj_ofs += ent.meta.size;
1252
1253 if (!found_end && obj_ofs > (uint64_t)end) {
1254 end_ofs = end - cur_total_len + 1;
1255 found_end = true;
1256 }
1257
1258 perfcounter->tinc(l_rgw_get_lat,
1259 (ceph_clock_now() - start_time));
1260
1261 if (found_start) {
1262 if (cb) {
1263 // SLO is a Swift thing, and Swift has no knowledge of S3 Policies.
1264 int r = cb(part.bucket, ent, part.bucket_acl,
1265 (part.bucket_policy ?
1266 optional<Policy>(*part.bucket_policy) : none),
1267 start_ofs, end_ofs, cb_param);
1268 if (r < 0)
1269 return r;
1270 }
1271 }
1272
1273 start_time = ceph_clock_now();
1274 }
1275
1276 return 0;
1277 }
1278
1279 static int get_obj_user_manifest_iterate_cb(rgw_bucket& bucket,
1280 const rgw_bucket_dir_entry& ent,
1281 RGWAccessControlPolicy * const bucket_acl,
1282 const optional<Policy>& bucket_policy,
1283 const off_t start_ofs,
1284 const off_t end_ofs,
1285 void * const param)
1286 {
1287 RGWGetObj *op = static_cast<RGWGetObj *>(param);
1288 return op->read_user_manifest_part(bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs);
1289 }
1290
1291 int RGWGetObj::handle_user_manifest(const char *prefix)
1292 {
1293 const boost::string_view prefix_view(prefix);
1294 ldout(s->cct, 2) << "RGWGetObj::handle_user_manifest() prefix="
1295 << prefix_view << dendl;
1296
1297 const size_t pos = prefix_view.find('/');
1298 if (pos == string::npos) {
1299 return -EINVAL;
1300 }
1301
1302 const std::string bucket_name = url_decode(prefix_view.substr(0, pos));
1303 const std::string obj_prefix = url_decode(prefix_view.substr(pos + 1));
1304
1305 rgw_bucket bucket;
1306
1307 RGWAccessControlPolicy _bucket_acl(s->cct);
1308 RGWAccessControlPolicy *bucket_acl;
1309 optional<Policy> _bucket_policy;
1310 optional<Policy>* bucket_policy;
1311 RGWBucketInfo bucket_info;
1312 RGWBucketInfo *pbucket_info;
1313
1314 if (bucket_name.compare(s->bucket.name) != 0) {
1315 map<string, bufferlist> bucket_attrs;
1316 RGWObjectCtx obj_ctx(store);
1317 int r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
1318 bucket_name, bucket_info, NULL,
1319 &bucket_attrs);
1320 if (r < 0) {
1321 ldout(s->cct, 0) << "could not get bucket info for bucket="
1322 << bucket_name << dendl;
1323 return r;
1324 }
1325 bucket = bucket_info.bucket;
1326 pbucket_info = &bucket_info;
1327 bucket_acl = &_bucket_acl;
1328 r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl, bucket);
1329 if (r < 0) {
1330 ldout(s->cct, 0) << "failed to read bucket policy" << dendl;
1331 return r;
1332 }
1333 _bucket_policy = get_iam_policy_from_attr(s->cct, store, bucket_attrs,
1334 bucket_info.bucket.tenant);
1335 bucket_policy = &_bucket_policy;
1336 } else {
1337 bucket = s->bucket;
1338 pbucket_info = &s->bucket_info;
1339 bucket_acl = s->bucket_acl.get();
1340 bucket_policy = &s->iam_policy;
1341 }
1342
1343 /* dry run to find out:
1344 * - total length (of the parts we are going to send to client),
1345 * - overall DLO's content size,
1346 * - md5 sum of overall DLO's content (for etag of Swift API). */
1347 int r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1348 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1349 nullptr, &s->obj_size, &lo_etag,
1350 nullptr /* cb */, nullptr /* cb arg */);
1351 if (r < 0) {
1352 return r;
1353 }
1354
1355 r = RGWRados::Object::Read::range_to_ofs(s->obj_size, ofs, end);
1356 if (r < 0) {
1357 return r;
1358 }
1359
1360 r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1361 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1362 &total_len, nullptr, nullptr,
1363 nullptr, nullptr);
1364 if (r < 0) {
1365 return r;
1366 }
1367
1368 if (!get_data) {
1369 bufferlist bl;
1370 send_response_data(bl, 0, 0);
1371 return 0;
1372 }
1373
1374 r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1375 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1376 nullptr, nullptr, nullptr,
1377 get_obj_user_manifest_iterate_cb, (void *)this);
1378 if (r < 0) {
1379 return r;
1380 }
1381
1382 if (!total_len) {
1383 bufferlist bl;
1384 send_response_data(bl, 0, 0);
1385 }
1386
1387 return 0;
1388 }
1389
1390 int RGWGetObj::handle_slo_manifest(bufferlist& bl)
1391 {
1392 RGWSLOInfo slo_info;
1393 bufferlist::iterator bliter = bl.begin();
1394 try {
1395 ::decode(slo_info, bliter);
1396 } catch (buffer::error& err) {
1397 ldout(s->cct, 0) << "ERROR: failed to decode slo manifest" << dendl;
1398 return -EIO;
1399 }
1400 ldout(s->cct, 2) << "RGWGetObj::handle_slo_manifest()" << dendl;
1401
1402 vector<RGWAccessControlPolicy> allocated_acls;
1403 map<string, pair<RGWAccessControlPolicy *, optional<Policy>>> policies;
1404 map<string, rgw_bucket> buckets;
1405
1406 map<uint64_t, rgw_slo_part> slo_parts;
1407
1408 MD5 etag_sum;
1409 total_len = 0;
1410
1411 for (const auto& entry : slo_info.entries) {
1412 const string& path = entry.path;
1413
1414 /* If the path starts with slashes, strip them all. */
1415 const size_t pos_init = path.find_first_not_of('/');
1416 /* According to the documentation of std::string::find following check
1417 * is not necessary as we should get the std::string::npos propagation
1418 * here. This might be true with the accuracy to implementation's bugs.
1419 * See following question on SO:
1420 * http://stackoverflow.com/questions/1011790/why-does-stdstring-findtext-stdstringnpos-not-return-npos
1421 */
1422 if (pos_init == string::npos) {
1423 return -EINVAL;
1424 }
1425
1426 const size_t pos_sep = path.find('/', pos_init);
1427 if (pos_sep == string::npos) {
1428 return -EINVAL;
1429 }
1430
1431 string bucket_name = path.substr(pos_init, pos_sep - pos_init);
1432 string obj_name = path.substr(pos_sep + 1);
1433
1434 rgw_bucket bucket;
1435 RGWAccessControlPolicy *bucket_acl;
1436 Policy* bucket_policy;
1437
1438 if (bucket_name.compare(s->bucket.name) != 0) {
1439 const auto& piter = policies.find(bucket_name);
1440 if (piter != policies.end()) {
1441 bucket_acl = piter->second.first;
1442 bucket_policy = piter->second.second.get_ptr();
1443 bucket = buckets[bucket_name];
1444 } else {
1445 allocated_acls.push_back(RGWAccessControlPolicy(s->cct));
1446 RGWAccessControlPolicy& _bucket_acl = allocated_acls.back();
1447
1448 RGWBucketInfo bucket_info;
1449 map<string, bufferlist> bucket_attrs;
1450 RGWObjectCtx obj_ctx(store);
1451 int r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
1452 bucket_name, bucket_info, nullptr,
1453 &bucket_attrs);
1454 if (r < 0) {
1455 ldout(s->cct, 0) << "could not get bucket info for bucket="
1456 << bucket_name << dendl;
1457 return r;
1458 }
1459 bucket = bucket_info.bucket;
1460 bucket_acl = &_bucket_acl;
1461 r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl,
1462 bucket);
1463 if (r < 0) {
1464 ldout(s->cct, 0) << "failed to read bucket ACL for bucket "
1465 << bucket << dendl;
1466 return r;
1467 }
1468 auto _bucket_policy = get_iam_policy_from_attr(
1469 s->cct, store, bucket_attrs, bucket_info.bucket.tenant);
1470 bucket_policy = _bucket_policy.get_ptr();
1471 buckets[bucket_name] = bucket;
1472 policies[bucket_name] = make_pair(bucket_acl, _bucket_policy);
1473 }
1474 } else {
1475 bucket = s->bucket;
1476 bucket_acl = s->bucket_acl.get();
1477 bucket_policy = s->iam_policy.get_ptr();
1478 }
1479
1480 rgw_slo_part part;
1481 part.bucket_acl = bucket_acl;
1482 part.bucket_policy = bucket_policy;
1483 part.bucket = bucket;
1484 part.obj_name = obj_name;
1485 part.size = entry.size_bytes;
1486 part.etag = entry.etag;
1487 ldout(s->cct, 20) << "slo_part: ofs=" << ofs
1488 << " bucket=" << part.bucket
1489 << " obj=" << part.obj_name
1490 << " size=" << part.size
1491 << " etag=" << part.etag
1492 << dendl;
1493
1494 etag_sum.Update((const byte *)entry.etag.c_str(),
1495 entry.etag.length());
1496
1497 slo_parts[total_len] = part;
1498 total_len += part.size;
1499 }
1500
1501 complete_etag(etag_sum, &lo_etag);
1502
1503 s->obj_size = slo_info.total_size;
1504 ldout(s->cct, 20) << "s->obj_size=" << s->obj_size << dendl;
1505
1506 int r = RGWRados::Object::Read::range_to_ofs(total_len, ofs, end);
1507 if (r < 0) {
1508 return r;
1509 }
1510
1511 total_len = end - ofs + 1;
1512
1513 r = iterate_slo_parts(s->cct, store, ofs, end, slo_parts,
1514 get_obj_user_manifest_iterate_cb, (void *)this);
1515 if (r < 0) {
1516 return r;
1517 }
1518
1519 return 0;
1520 }
1521
1522 int RGWGetObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
1523 {
1524 /* garbage collection related handling */
1525 utime_t start_time = ceph_clock_now();
1526 if (start_time > gc_invalidate_time) {
1527 int r = store->defer_gc(s->obj_ctx, s->bucket_info, obj);
1528 if (r < 0) {
1529 dout(0) << "WARNING: could not defer gc entry for obj" << dendl;
1530 }
1531 gc_invalidate_time = start_time;
1532 gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2);
1533 }
1534 return send_response_data(bl, bl_ofs, bl_len);
1535 }
1536
1537 bool RGWGetObj::prefetch_data()
1538 {
1539 /* HEAD request, stop prefetch*/
1540 if (!get_data) {
1541 return false;
1542 }
1543
1544 bool prefetch_first_chunk = true;
1545 range_str = s->info.env->get("HTTP_RANGE");
1546
1547 if(range_str) {
1548 int r = parse_range(range_str, ofs, end, &partial_content);
1549 /* error on parsing the range, stop prefetch and will fail in execte() */
1550 if (r < 0) {
1551 range_parsed = false;
1552 return false;
1553 } else {
1554 range_parsed = true;
1555 }
1556 /* range get goes to shadown objects, stop prefetch */
1557 if (ofs >= s->cct->_conf->rgw_max_chunk_size) {
1558 prefetch_first_chunk = false;
1559 }
1560 }
1561
1562 return get_data && prefetch_first_chunk;
1563 }
1564 void RGWGetObj::pre_exec()
1565 {
1566 rgw_bucket_object_pre_exec(s);
1567 }
1568
1569 static bool object_is_expired(map<string, bufferlist>& attrs) {
1570 map<string, bufferlist>::iterator iter = attrs.find(RGW_ATTR_DELETE_AT);
1571 if (iter != attrs.end()) {
1572 utime_t delete_at;
1573 try {
1574 ::decode(delete_at, iter->second);
1575 } catch (buffer::error& err) {
1576 dout(0) << "ERROR: " << __func__ << ": failed to decode " RGW_ATTR_DELETE_AT " attr" << dendl;
1577 return false;
1578 }
1579
1580 if (delete_at <= ceph_clock_now() && !delete_at.is_zero()) {
1581 return true;
1582 }
1583 }
1584
1585 return false;
1586 }
1587
1588 void RGWGetObj::execute()
1589 {
1590 utime_t start_time = s->time;
1591 bufferlist bl;
1592 gc_invalidate_time = ceph_clock_now();
1593 gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2);
1594
1595 bool need_decompress;
1596 int64_t ofs_x, end_x;
1597
1598 RGWGetObj_CB cb(this);
1599 RGWGetDataCB* filter = (RGWGetDataCB*)&cb;
1600 boost::optional<RGWGetObj_Decompress> decompress;
1601 std::unique_ptr<RGWGetDataCB> decrypt;
1602 map<string, bufferlist>::iterator attr_iter;
1603
1604 perfcounter->inc(l_rgw_get);
1605
1606 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
1607 RGWRados::Object::Read read_op(&op_target);
1608
1609 op_ret = get_params();
1610 if (op_ret < 0)
1611 goto done_err;
1612
1613 op_ret = init_common();
1614 if (op_ret < 0)
1615 goto done_err;
1616
1617 read_op.conds.mod_ptr = mod_ptr;
1618 read_op.conds.unmod_ptr = unmod_ptr;
1619 read_op.conds.high_precision_time = s->system_request; /* system request need to use high precision time */
1620 read_op.conds.mod_zone_id = mod_zone_id;
1621 read_op.conds.mod_pg_ver = mod_pg_ver;
1622 read_op.conds.if_match = if_match;
1623 read_op.conds.if_nomatch = if_nomatch;
1624 read_op.params.attrs = &attrs;
1625 read_op.params.lastmod = &lastmod;
1626 read_op.params.obj_size = &s->obj_size;
1627
1628 op_ret = read_op.prepare();
1629 if (op_ret < 0)
1630 goto done_err;
1631 version_id = read_op.state.obj.key.instance;
1632
1633 /* STAT ops don't need data, and do no i/o */
1634 if (get_type() == RGW_OP_STAT_OBJ) {
1635 return;
1636 }
1637
1638 /* start gettorrent */
1639 if (torrent.get_flag())
1640 {
1641 torrent.init(s, store);
1642 torrent.get_torrent_file(op_ret, read_op, total_len, bl, obj);
1643 if (op_ret < 0)
1644 {
1645 ldout(s->cct, 0) << "ERROR: failed to get_torrent_file ret= " << op_ret
1646 << dendl;
1647 goto done_err;
1648 }
1649 op_ret = send_response_data(bl, 0, total_len);
1650 if (op_ret < 0)
1651 {
1652 ldout(s->cct, 0) << "ERROR: failed to send_response_data ret= " << op_ret
1653 << dendl;
1654 goto done_err;
1655 }
1656 return;
1657 }
1658 /* end gettorrent */
1659
1660 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
1661 if (op_ret < 0) {
1662 lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
1663 goto done_err;
1664 }
1665 if (need_decompress) {
1666 s->obj_size = cs_info.orig_size;
1667 decompress.emplace(s->cct, &cs_info, partial_content, filter);
1668 filter = &*decompress;
1669 }
1670
1671 attr_iter = attrs.find(RGW_ATTR_USER_MANIFEST);
1672 if (attr_iter != attrs.end() && !skip_manifest) {
1673 op_ret = handle_user_manifest(attr_iter->second.c_str());
1674 if (op_ret < 0) {
1675 ldout(s->cct, 0) << "ERROR: failed to handle user manifest ret="
1676 << op_ret << dendl;
1677 goto done_err;
1678 }
1679 return;
1680 }
1681
1682 attr_iter = attrs.find(RGW_ATTR_SLO_MANIFEST);
1683 if (attr_iter != attrs.end() && !skip_manifest) {
1684 is_slo = true;
1685 op_ret = handle_slo_manifest(attr_iter->second);
1686 if (op_ret < 0) {
1687 ldout(s->cct, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret
1688 << dendl;
1689 goto done_err;
1690 }
1691 return;
1692 }
1693
1694 // for range requests with obj size 0
1695 if (range_str && !(s->obj_size)) {
1696 total_len = 0;
1697 op_ret = -ERANGE;
1698 goto done_err;
1699 }
1700
1701 op_ret = read_op.range_to_ofs(s->obj_size, ofs, end);
1702 if (op_ret < 0)
1703 goto done_err;
1704 total_len = (ofs <= end ? end + 1 - ofs : 0);
1705
1706 /* Check whether the object has expired. Swift API documentation
1707 * stands that we should return 404 Not Found in such case. */
1708 if (need_object_expiration() && object_is_expired(attrs)) {
1709 op_ret = -ENOENT;
1710 goto done_err;
1711 }
1712
1713 start = ofs;
1714
1715 /* STAT ops don't need data, and do no i/o */
1716 if (get_type() == RGW_OP_STAT_OBJ) {
1717 return;
1718 }
1719
1720 attr_iter = attrs.find(RGW_ATTR_MANIFEST);
1721 op_ret = this->get_decrypt_filter(&decrypt, filter,
1722 attr_iter != attrs.end() ? &(attr_iter->second) : nullptr);
1723 if (decrypt != nullptr) {
1724 filter = decrypt.get();
1725 }
1726 if (op_ret < 0) {
1727 goto done_err;
1728 }
1729
1730 if (!get_data || ofs > end) {
1731 send_response_data(bl, 0, 0);
1732 return;
1733 }
1734
1735 perfcounter->inc(l_rgw_get_b, end - ofs);
1736
1737 ofs_x = ofs;
1738 end_x = end;
1739 filter->fixup_range(ofs_x, end_x);
1740 op_ret = read_op.iterate(ofs_x, end_x, filter);
1741
1742 if (op_ret >= 0)
1743 op_ret = filter->flush();
1744
1745 perfcounter->tinc(l_rgw_get_lat,
1746 (ceph_clock_now() - start_time));
1747 if (op_ret < 0) {
1748 goto done_err;
1749 }
1750
1751 op_ret = send_response_data(bl, 0, 0);
1752 if (op_ret < 0) {
1753 goto done_err;
1754 }
1755 return;
1756
1757 done_err:
1758 send_response_data_error();
1759 }
1760
1761 int RGWGetObj::init_common()
1762 {
1763 if (range_str) {
1764 /* range parsed error when prefetch*/
1765 if (!range_parsed) {
1766 int r = parse_range(range_str, ofs, end, &partial_content);
1767 if (r < 0)
1768 return r;
1769 }
1770 }
1771 if (if_mod) {
1772 if (parse_time(if_mod, &mod_time) < 0)
1773 return -EINVAL;
1774 mod_ptr = &mod_time;
1775 }
1776
1777 if (if_unmod) {
1778 if (parse_time(if_unmod, &unmod_time) < 0)
1779 return -EINVAL;
1780 unmod_ptr = &unmod_time;
1781 }
1782
1783 return 0;
1784 }
1785
1786 int RGWListBuckets::verify_permission()
1787 {
1788 if (!verify_user_permission(s, RGW_PERM_READ)) {
1789 return -EACCES;
1790 }
1791
1792 return 0;
1793 }
1794
1795 int RGWGetUsage::verify_permission()
1796 {
1797 if (s->auth.identity->is_anonymous()) {
1798 return -EACCES;
1799 }
1800
1801 return 0;
1802 }
1803
1804 void RGWListBuckets::execute()
1805 {
1806 bool done;
1807 bool started = false;
1808 uint64_t total_count = 0;
1809
1810 uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk;
1811
1812 op_ret = get_params();
1813 if (op_ret < 0) {
1814 goto send_end;
1815 }
1816
1817 if (supports_account_metadata()) {
1818 op_ret = rgw_get_user_attrs_by_uid(store, s->user->user_id, attrs);
1819 if (op_ret < 0) {
1820 goto send_end;
1821 }
1822 }
1823
1824 is_truncated = false;
1825 do {
1826 RGWUserBuckets buckets;
1827 uint64_t read_count;
1828 if (limit >= 0) {
1829 read_count = min(limit - total_count, (uint64_t)max_buckets);
1830 } else {
1831 read_count = max_buckets;
1832 }
1833
1834 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
1835 marker, end_marker, read_count,
1836 should_get_stats(), &is_truncated,
1837 get_default_max());
1838 if (op_ret < 0) {
1839 /* hmm.. something wrong here.. the user was authenticated, so it
1840 should exist */
1841 ldout(s->cct, 10) << "WARNING: failed on rgw_get_user_buckets uid="
1842 << s->user->user_id << dendl;
1843 break;
1844 }
1845 map<string, RGWBucketEnt>& m = buckets.get_buckets();
1846 map<string, RGWBucketEnt>::iterator iter;
1847 for (iter = m.begin(); iter != m.end(); ++iter) {
1848 RGWBucketEnt& bucket = iter->second;
1849 buckets_size += bucket.size;
1850 buckets_size_rounded += bucket.size_rounded;
1851 buckets_objcount += bucket.count;
1852 }
1853 buckets_count += m.size();
1854 total_count += m.size();
1855
1856 done = (m.size() < read_count || (limit >= 0 && total_count >= (uint64_t)limit));
1857
1858 if (!started) {
1859 send_response_begin(buckets.count() > 0);
1860 started = true;
1861 }
1862
1863 if (!m.empty()) {
1864 send_response_data(buckets);
1865
1866 map<string, RGWBucketEnt>::reverse_iterator riter = m.rbegin();
1867 marker = riter->first;
1868 }
1869 } while (is_truncated && !done);
1870
1871 send_end:
1872 if (!started) {
1873 send_response_begin(false);
1874 }
1875 send_response_end();
1876 }
1877
1878 void RGWGetUsage::execute()
1879 {
1880 uint64_t start_epoch = 0;
1881 uint64_t end_epoch = (uint64_t)-1;
1882 op_ret = get_params();
1883 if (op_ret < 0)
1884 return;
1885
1886 if (!start_date.empty()) {
1887 op_ret = utime_t::parse_date(start_date, &start_epoch, NULL);
1888 if (op_ret < 0) {
1889 ldout(store->ctx(), 0) << "ERROR: failed to parse start date" << dendl;
1890 return;
1891 }
1892 }
1893
1894 if (!end_date.empty()) {
1895 op_ret = utime_t::parse_date(end_date, &end_epoch, NULL);
1896 if (op_ret < 0) {
1897 ldout(store->ctx(), 0) << "ERROR: failed to parse end date" << dendl;
1898 return;
1899 }
1900 }
1901
1902 uint32_t max_entries = 1000;
1903
1904 bool is_truncated = true;
1905
1906 RGWUsageIter usage_iter;
1907
1908 while (is_truncated) {
1909 op_ret = store->read_usage(s->user->user_id, start_epoch, end_epoch, max_entries,
1910 &is_truncated, usage_iter, usage);
1911
1912 if (op_ret == -ENOENT) {
1913 op_ret = 0;
1914 is_truncated = false;
1915 }
1916
1917 if (op_ret < 0) {
1918 return;
1919 }
1920 }
1921
1922 op_ret = rgw_user_sync_all_stats(store, s->user->user_id);
1923 if (op_ret < 0) {
1924 ldout(store->ctx(), 0) << "ERROR: failed to sync user stats: " << dendl;
1925 return;
1926 }
1927
1928 op_ret = rgw_user_get_all_buckets_stats(store, s->user->user_id, buckets_usage);
1929 if (op_ret < 0) {
1930 cerr << "ERROR: failed to sync user stats: " << std::endl;
1931 return ;
1932 }
1933
1934 string user_str = s->user->user_id.to_str();
1935 op_ret = store->cls_user_get_header(user_str, &header);
1936 if (op_ret < 0) {
1937 ldout(store->ctx(), 0) << "ERROR: can't read user header: " << dendl;
1938 return;
1939 }
1940
1941 return;
1942 }
1943
1944 int RGWStatAccount::verify_permission()
1945 {
1946 if (!verify_user_permission(s, RGW_PERM_READ)) {
1947 return -EACCES;
1948 }
1949
1950 return 0;
1951 }
1952
1953 void RGWStatAccount::execute()
1954 {
1955 string marker;
1956 bool is_truncated = false;
1957 uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk;
1958
1959 do {
1960 RGWUserBuckets buckets;
1961
1962 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets, marker,
1963 string(), max_buckets, true, &is_truncated);
1964 if (op_ret < 0) {
1965 /* hmm.. something wrong here.. the user was authenticated, so it
1966 should exist */
1967 ldout(s->cct, 10) << "WARNING: failed on rgw_get_user_buckets uid="
1968 << s->user->user_id << dendl;
1969 break;
1970 } else {
1971 map<string, RGWBucketEnt>& m = buckets.get_buckets();
1972 map<string, RGWBucketEnt>::iterator iter;
1973 for (iter = m.begin(); iter != m.end(); ++iter) {
1974 RGWBucketEnt& bucket = iter->second;
1975 buckets_size += bucket.size;
1976 buckets_size_rounded += bucket.size_rounded;
1977 buckets_objcount += bucket.count;
1978
1979 marker = iter->first;
1980 }
1981 buckets_count += m.size();
1982
1983 }
1984 } while (is_truncated);
1985 }
1986
1987 int RGWGetBucketVersioning::verify_permission()
1988 {
1989 if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
1990 return -EACCES;
1991 }
1992
1993 return 0;
1994 }
1995
1996 void RGWGetBucketVersioning::pre_exec()
1997 {
1998 rgw_bucket_object_pre_exec(s);
1999 }
2000
2001 void RGWGetBucketVersioning::execute()
2002 {
2003 versioned = s->bucket_info.versioned();
2004 versioning_enabled = s->bucket_info.versioning_enabled();
2005 }
2006
2007 int RGWSetBucketVersioning::verify_permission()
2008 {
2009 if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
2010 return -EACCES;
2011 }
2012
2013 return 0;
2014 }
2015
2016 void RGWSetBucketVersioning::pre_exec()
2017 {
2018 rgw_bucket_object_pre_exec(s);
2019 }
2020
2021 void RGWSetBucketVersioning::execute()
2022 {
2023 op_ret = get_params();
2024 if (op_ret < 0)
2025 return;
2026
2027 if (!store->is_meta_master()) {
2028 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
2029 if (op_ret < 0) {
2030 ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
2031 return;
2032 }
2033 }
2034
2035 if (enable_versioning) {
2036 s->bucket_info.flags |= BUCKET_VERSIONED;
2037 s->bucket_info.flags &= ~BUCKET_VERSIONS_SUSPENDED;
2038 } else {
2039 s->bucket_info.flags |= (BUCKET_VERSIONED | BUCKET_VERSIONS_SUSPENDED);
2040 }
2041
2042 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(),
2043 &s->bucket_attrs);
2044 if (op_ret < 0) {
2045 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
2046 << " returned err=" << op_ret << dendl;
2047 return;
2048 }
2049 }
2050
2051 int RGWGetBucketWebsite::verify_permission()
2052 {
2053 if (s->user->user_id.compare(s->bucket_owner.get_id()) != 0)
2054 return -EACCES;
2055
2056 return 0;
2057 }
2058
2059 void RGWGetBucketWebsite::pre_exec()
2060 {
2061 rgw_bucket_object_pre_exec(s);
2062 }
2063
2064 void RGWGetBucketWebsite::execute()
2065 {
2066 if (!s->bucket_info.has_website) {
2067 op_ret = -ENOENT;
2068 }
2069 }
2070
2071 int RGWSetBucketWebsite::verify_permission()
2072 {
2073 if (s->user->user_id.compare(s->bucket_owner.get_id()) != 0)
2074 return -EACCES;
2075
2076 return 0;
2077 }
2078
2079 void RGWSetBucketWebsite::pre_exec()
2080 {
2081 rgw_bucket_object_pre_exec(s);
2082 }
2083
2084 void RGWSetBucketWebsite::execute()
2085 {
2086 op_ret = get_params();
2087
2088 if (op_ret < 0)
2089 return;
2090
2091 if (!store->is_meta_master()) {
2092 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
2093 if (op_ret < 0) {
2094 ldout(s->cct, 20) << __func__ << " forward_request_to_master returned ret=" << op_ret << dendl;
2095 return;
2096 }
2097 }
2098
2099 s->bucket_info.has_website = true;
2100 s->bucket_info.website_conf = website_conf;
2101
2102 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
2103 if (op_ret < 0) {
2104 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
2105 return;
2106 }
2107 }
2108
2109 int RGWDeleteBucketWebsite::verify_permission()
2110 {
2111 if (s->user->user_id.compare(s->bucket_owner.get_id()) != 0)
2112 return -EACCES;
2113
2114 return 0;
2115 }
2116
2117 void RGWDeleteBucketWebsite::pre_exec()
2118 {
2119 rgw_bucket_object_pre_exec(s);
2120 }
2121
2122 void RGWDeleteBucketWebsite::execute()
2123 {
2124 s->bucket_info.has_website = false;
2125 s->bucket_info.website_conf = RGWBucketWebsiteConf();
2126
2127 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
2128 if (op_ret < 0) {
2129 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
2130 return;
2131 }
2132 }
2133
2134 int RGWStatBucket::verify_permission()
2135 {
2136 // This (a HEAD request on a bucket) is governed by the s3:ListBucket permission.
2137 if (!verify_bucket_permission(s, rgw::IAM::s3ListBucket)) {
2138 return -EACCES;
2139 }
2140
2141 return 0;
2142 }
2143
2144 void RGWStatBucket::pre_exec()
2145 {
2146 rgw_bucket_object_pre_exec(s);
2147 }
2148
2149 void RGWStatBucket::execute()
2150 {
2151 if (!s->bucket_exists) {
2152 op_ret = -ERR_NO_SUCH_BUCKET;
2153 return;
2154 }
2155
2156 RGWUserBuckets buckets;
2157 bucket.bucket = s->bucket;
2158 buckets.add(bucket);
2159 map<string, RGWBucketEnt>& m = buckets.get_buckets();
2160 op_ret = store->update_containers_stats(m);
2161 if (! op_ret)
2162 op_ret = -EEXIST;
2163 if (op_ret > 0) {
2164 op_ret = 0;
2165 map<string, RGWBucketEnt>::iterator iter = m.find(bucket.bucket.name);
2166 if (iter != m.end()) {
2167 bucket = iter->second;
2168 } else {
2169 op_ret = -EINVAL;
2170 }
2171 }
2172 }
2173
2174 int RGWListBucket::verify_permission()
2175 {
2176 op_ret = get_params();
2177 if (op_ret < 0) {
2178 return op_ret;
2179 }
2180
2181 if (!verify_bucket_permission(s,
2182 list_versions ?
2183 rgw::IAM::s3ListBucketVersions :
2184 rgw::IAM::s3ListBucket)) {
2185 return -EACCES;
2186 }
2187
2188 return 0;
2189 }
2190
2191 int RGWListBucket::parse_max_keys()
2192 {
2193 if (!max_keys.empty()) {
2194 char *endptr;
2195 max = strtol(max_keys.c_str(), &endptr, 10);
2196 if (endptr) {
2197 while (*endptr && isspace(*endptr)) // ignore white space
2198 endptr++;
2199 if (*endptr) {
2200 return -EINVAL;
2201 }
2202 }
2203 } else {
2204 max = default_max;
2205 }
2206
2207 return 0;
2208 }
2209
2210 void RGWListBucket::pre_exec()
2211 {
2212 rgw_bucket_object_pre_exec(s);
2213 }
2214
2215 void RGWListBucket::execute()
2216 {
2217 if (!s->bucket_exists) {
2218 op_ret = -ERR_NO_SUCH_BUCKET;
2219 return;
2220 }
2221
2222 if (need_container_stats()) {
2223 map<string, RGWBucketEnt> m;
2224 m[s->bucket.name] = RGWBucketEnt();
2225 m.begin()->second.bucket = s->bucket;
2226 op_ret = store->update_containers_stats(m);
2227 if (op_ret > 0) {
2228 bucket = m.begin()->second;
2229 }
2230 }
2231
2232 RGWRados::Bucket target(store, s->bucket_info);
2233 if (shard_id >= 0) {
2234 target.set_shard_id(shard_id);
2235 }
2236 RGWRados::Bucket::List list_op(&target);
2237
2238 list_op.params.prefix = prefix;
2239 list_op.params.delim = delimiter;
2240 list_op.params.marker = marker;
2241 list_op.params.end_marker = end_marker;
2242 list_op.params.list_versions = list_versions;
2243
2244 op_ret = list_op.list_objects(max, &objs, &common_prefixes, &is_truncated);
2245 if (op_ret >= 0) {
2246 next_marker = list_op.get_next_marker();
2247 }
2248 }
2249
2250 int RGWGetBucketLogging::verify_permission()
2251 {
2252 if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
2253 return -EACCES;
2254 }
2255
2256 return 0;
2257 }
2258
2259 int RGWGetBucketLocation::verify_permission()
2260 {
2261 if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
2262 return -EACCES;
2263 }
2264
2265 return 0;
2266 }
2267
2268 int RGWCreateBucket::verify_permission()
2269 {
2270 /* This check is mostly needed for S3 that doesn't support account ACL.
2271 * Swift doesn't allow to delegate any permission to an anonymous user,
2272 * so it will become an early exit in such case. */
2273 if (s->auth.identity->is_anonymous()) {
2274 return -EACCES;
2275 }
2276
2277 if (!verify_user_permission(s, RGW_PERM_WRITE)) {
2278 return -EACCES;
2279 }
2280
2281 if (s->user->user_id.tenant != s->bucket_tenant) {
2282 ldout(s->cct, 10) << "user cannot create a bucket in a different tenant"
2283 << " (user_id.tenant=" << s->user->user_id.tenant
2284 << " requested=" << s->bucket_tenant << ")"
2285 << dendl;
2286 return -EACCES;
2287 }
2288 if (s->user->max_buckets < 0) {
2289 return -EPERM;
2290 }
2291
2292 if (s->user->max_buckets) {
2293 RGWUserBuckets buckets;
2294 string marker;
2295 bool is_truncated = false;
2296 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
2297 marker, string(), s->user->max_buckets,
2298 false, &is_truncated);
2299 if (op_ret < 0) {
2300 return op_ret;
2301 }
2302
2303 if ((int)buckets.count() >= s->user->max_buckets) {
2304 return -ERR_TOO_MANY_BUCKETS;
2305 }
2306 }
2307
2308 return 0;
2309 }
2310
2311 static int forward_request_to_master(struct req_state *s, obj_version *objv,
2312 RGWRados *store, bufferlist& in_data,
2313 JSONParser *jp, req_info *forward_info)
2314 {
2315 if (!store->rest_master_conn) {
2316 ldout(s->cct, 0) << "rest connection is invalid" << dendl;
2317 return -EINVAL;
2318 }
2319 ldout(s->cct, 0) << "sending request to master zonegroup" << dendl;
2320 bufferlist response;
2321 string uid_str = s->user->user_id.to_str();
2322 #define MAX_REST_RESPONSE (128 * 1024) // we expect a very small response
2323 int ret = store->rest_master_conn->forward(uid_str, (forward_info ? *forward_info : s->info),
2324 objv, MAX_REST_RESPONSE, &in_data, &response);
2325 if (ret < 0)
2326 return ret;
2327
2328 ldout(s->cct, 20) << "response: " << response.c_str() << dendl;
2329 if (jp && !jp->parse(response.c_str(), response.length())) {
2330 ldout(s->cct, 0) << "failed parsing response from master zonegroup" << dendl;
2331 return -EINVAL;
2332 }
2333
2334 return 0;
2335 }
2336
2337 void RGWCreateBucket::pre_exec()
2338 {
2339 rgw_bucket_object_pre_exec(s);
2340 }
2341
2342 static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs,
2343 map<string, bufferlist>& out_attrs,
2344 map<string, bufferlist>& out_rmattrs)
2345 {
2346 for (const auto& kv : orig_attrs) {
2347 const string& name = kv.first;
2348
2349 /* Check if the attr is user-defined metadata item. */
2350 if (name.compare(0, sizeof(RGW_ATTR_META_PREFIX) - 1,
2351 RGW_ATTR_META_PREFIX) == 0) {
2352 /* For the objects all existing meta attrs have to be removed. */
2353 out_rmattrs[name] = kv.second;
2354 } else if (out_attrs.find(name) == std::end(out_attrs)) {
2355 out_attrs[name] = kv.second;
2356 }
2357 }
2358 }
2359
2360 /* Fuse resource metadata basing on original attributes in @orig_attrs, set
2361 * of _custom_ attribute names to remove in @rmattr_names and attributes in
2362 * @out_attrs. Place results in @out_attrs.
2363 *
2364 * NOTE: it's supposed that all special attrs already present in @out_attrs
2365 * will be preserved without any change. Special attributes are those which
2366 * names start with RGW_ATTR_META_PREFIX. They're complement to custom ones
2367 * used for X-Account-Meta-*, X-Container-Meta-*, X-Amz-Meta and so on. */
2368 static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs,
2369 const set<string>& rmattr_names,
2370 map<string, bufferlist>& out_attrs)
2371 {
2372 for (const auto& kv : orig_attrs) {
2373 const string& name = kv.first;
2374
2375 /* Check if the attr is user-defined metadata item. */
2376 if (name.compare(0, strlen(RGW_ATTR_META_PREFIX),
2377 RGW_ATTR_META_PREFIX) == 0) {
2378 /* For the buckets all existing meta attrs are preserved,
2379 except those that are listed in rmattr_names. */
2380 if (rmattr_names.find(name) != std::end(rmattr_names)) {
2381 const auto aiter = out_attrs.find(name);
2382
2383 if (aiter != std::end(out_attrs)) {
2384 out_attrs.erase(aiter);
2385 }
2386 } else {
2387 /* emplace() won't alter the map if the key is already present.
2388 * This behaviour is fully intensional here. */
2389 out_attrs.emplace(kv);
2390 }
2391 } else if (out_attrs.find(name) == std::end(out_attrs)) {
2392 out_attrs[name] = kv.second;
2393 }
2394 }
2395 }
2396
2397
2398 static void populate_with_generic_attrs(const req_state * const s,
2399 map<string, bufferlist>& out_attrs)
2400 {
2401 for (const auto& kv : s->generic_attrs) {
2402 bufferlist& attrbl = out_attrs[kv.first];
2403 const string& val = kv.second;
2404 attrbl.clear();
2405 attrbl.append(val.c_str(), val.size() + 1);
2406 }
2407 }
2408
2409
2410 static int filter_out_quota_info(std::map<std::string, bufferlist>& add_attrs,
2411 const std::set<std::string>& rmattr_names,
2412 RGWQuotaInfo& quota,
2413 bool * quota_extracted = nullptr)
2414 {
2415 bool extracted = false;
2416
2417 /* Put new limit on max objects. */
2418 auto iter = add_attrs.find(RGW_ATTR_QUOTA_NOBJS);
2419 std::string err;
2420 if (std::end(add_attrs) != iter) {
2421 quota.max_objects =
2422 static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err));
2423 if (!err.empty()) {
2424 return -EINVAL;
2425 }
2426 add_attrs.erase(iter);
2427 extracted = true;
2428 }
2429
2430 /* Put new limit on bucket (container) size. */
2431 iter = add_attrs.find(RGW_ATTR_QUOTA_MSIZE);
2432 if (iter != add_attrs.end()) {
2433 quota.max_size =
2434 static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err));
2435 if (!err.empty()) {
2436 return -EINVAL;
2437 }
2438 add_attrs.erase(iter);
2439 extracted = true;
2440 }
2441
2442 for (const auto& name : rmattr_names) {
2443 /* Remove limit on max objects. */
2444 if (name.compare(RGW_ATTR_QUOTA_NOBJS) == 0) {
2445 quota.max_objects = -1;
2446 extracted = true;
2447 }
2448
2449 /* Remove limit on max bucket size. */
2450 if (name.compare(RGW_ATTR_QUOTA_MSIZE) == 0) {
2451 quota.max_size = -1;
2452 extracted = true;
2453 }
2454 }
2455
2456 /* Swift requries checking on raw usage instead of the 4 KiB rounded one. */
2457 quota.check_on_raw = true;
2458 quota.enabled = quota.max_size > 0 || quota.max_objects > 0;
2459
2460 if (quota_extracted) {
2461 *quota_extracted = extracted;
2462 }
2463
2464 return 0;
2465 }
2466
2467
2468 static void filter_out_website(std::map<std::string, ceph::bufferlist>& add_attrs,
2469 const std::set<std::string>& rmattr_names,
2470 RGWBucketWebsiteConf& ws_conf)
2471 {
2472 std::string lstval;
2473
2474 /* Let's define a mapping between each custom attribute and the memory where
2475 * attribute's value should be stored. The memory location is expressed by
2476 * a non-const reference. */
2477 const auto mapping = {
2478 std::make_pair(RGW_ATTR_WEB_INDEX, std::ref(ws_conf.index_doc_suffix)),
2479 std::make_pair(RGW_ATTR_WEB_ERROR, std::ref(ws_conf.error_doc)),
2480 std::make_pair(RGW_ATTR_WEB_LISTINGS, std::ref(lstval)),
2481 std::make_pair(RGW_ATTR_WEB_LIST_CSS, std::ref(ws_conf.listing_css_doc)),
2482 std::make_pair(RGW_ATTR_SUBDIR_MARKER, std::ref(ws_conf.subdir_marker))
2483 };
2484
2485 for (const auto& kv : mapping) {
2486 const char * const key = kv.first;
2487 auto& target = kv.second;
2488
2489 auto iter = add_attrs.find(key);
2490
2491 if (std::end(add_attrs) != iter) {
2492 /* The "target" is a reference to ws_conf. */
2493 target = iter->second.c_str();
2494 add_attrs.erase(iter);
2495 }
2496
2497 if (rmattr_names.count(key)) {
2498 target = std::string();
2499 }
2500 }
2501
2502 if (! lstval.empty()) {
2503 ws_conf.listing_enabled = boost::algorithm::iequals(lstval, "true");
2504 }
2505 }
2506
2507
2508 void RGWCreateBucket::execute()
2509 {
2510 RGWAccessControlPolicy old_policy(s->cct);
2511 buffer::list aclbl;
2512 buffer::list corsbl;
2513 bool existed;
2514 string bucket_name;
2515 rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_name);
2516 rgw_raw_obj obj(store->get_zone_params().domain_root, bucket_name);
2517 obj_version objv, *pobjv = NULL;
2518
2519 op_ret = get_params();
2520 if (op_ret < 0)
2521 return;
2522
2523 if (!location_constraint.empty() &&
2524 !store->has_zonegroup_api(location_constraint)) {
2525 ldout(s->cct, 0) << "location constraint (" << location_constraint << ")"
2526 << " can't be found." << dendl;
2527 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
2528 s->err.message = "The specified location-constraint is not valid";
2529 return;
2530 }
2531
2532 if (!store->get_zonegroup().is_master_zonegroup() && !location_constraint.empty() &&
2533 store->get_zonegroup().api_name != location_constraint) {
2534 ldout(s->cct, 0) << "location constraint (" << location_constraint << ")"
2535 << " doesn't match zonegroup" << " (" << store->get_zonegroup().api_name << ")"
2536 << dendl;
2537 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
2538 s->err.message = "The specified location-constraint is not valid";
2539 return;
2540 }
2541
2542 const auto& zonegroup = store->get_zonegroup();
2543 if (!placement_rule.empty() &&
2544 !zonegroup.placement_targets.count(placement_rule)) {
2545 ldout(s->cct, 0) << "placement target (" << placement_rule << ")"
2546 << " doesn't exist in the placement targets of zonegroup"
2547 << " (" << store->get_zonegroup().api_name << ")" << dendl;
2548 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
2549 s->err.message = "The specified placement target does not exist";
2550 return;
2551 }
2552
2553 /* we need to make sure we read bucket info, it's not read before for this
2554 * specific request */
2555 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
2556 op_ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name,
2557 s->bucket_info, NULL, &s->bucket_attrs);
2558 if (op_ret < 0 && op_ret != -ENOENT)
2559 return;
2560 s->bucket_exists = (op_ret != -ENOENT);
2561
2562 s->bucket_owner.set_id(s->user->user_id);
2563 s->bucket_owner.set_name(s->user->display_name);
2564 if (s->bucket_exists) {
2565 int r = get_bucket_policy_from_attr(s->cct, store, s->bucket_info,
2566 s->bucket_attrs, &old_policy);
2567 if (r >= 0) {
2568 if (old_policy.get_owner().get_id().compare(s->user->user_id) != 0) {
2569 op_ret = -EEXIST;
2570 return;
2571 }
2572 }
2573 }
2574
2575 RGWBucketInfo master_info;
2576 rgw_bucket *pmaster_bucket;
2577 uint32_t *pmaster_num_shards;
2578 real_time creation_time;
2579
2580 if (!store->is_meta_master()) {
2581 JSONParser jp;
2582 op_ret = forward_request_to_master(s, NULL, store, in_data, &jp);
2583 if (op_ret < 0) {
2584 return;
2585 }
2586
2587 JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp);
2588 JSONDecoder::decode_json("object_ver", objv, &jp);
2589 JSONDecoder::decode_json("bucket_info", master_info, &jp);
2590 ldout(s->cct, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" << objv.ver << dendl;
2591 ldout(s->cct, 20) << "got creation time: << " << master_info.creation_time << dendl;
2592 pmaster_bucket= &master_info.bucket;
2593 creation_time = master_info.creation_time;
2594 pmaster_num_shards = &master_info.num_shards;
2595 pobjv = &objv;
2596 } else {
2597 pmaster_bucket = NULL;
2598 pmaster_num_shards = NULL;
2599 }
2600
2601 string zonegroup_id;
2602
2603 if (s->system_request) {
2604 zonegroup_id = s->info.args.get(RGW_SYS_PARAM_PREFIX "zonegroup");
2605 if (zonegroup_id.empty()) {
2606 zonegroup_id = store->get_zonegroup().get_id();
2607 }
2608 } else {
2609 zonegroup_id = store->get_zonegroup().get_id();
2610 }
2611
2612 if (s->bucket_exists) {
2613 string selected_placement_rule;
2614 rgw_bucket bucket;
2615 bucket.tenant = s->bucket_tenant;
2616 bucket.name = s->bucket_name;
2617 op_ret = store->select_bucket_placement(*(s->user), zonegroup_id,
2618 placement_rule,
2619 &selected_placement_rule, nullptr);
2620 if (selected_placement_rule != s->bucket_info.placement_rule) {
2621 op_ret = -EEXIST;
2622 return;
2623 }
2624 }
2625
2626 /* Encode special metadata first as we're using std::map::emplace under
2627 * the hood. This method will add the new items only if the map doesn't
2628 * contain such keys yet. */
2629 policy.encode(aclbl);
2630 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
2631
2632 if (has_cors) {
2633 cors_config.encode(corsbl);
2634 emplace_attr(RGW_ATTR_CORS, std::move(corsbl));
2635 }
2636
2637 RGWQuotaInfo quota_info;
2638 const RGWQuotaInfo * pquota_info = nullptr;
2639 if (need_metadata_upload()) {
2640 /* It's supposed that following functions WILL NOT change any special
2641 * attributes (like RGW_ATTR_ACL) if they are already present in attrs. */
2642 rgw_get_request_metadata(s->cct, s->info, attrs, false);
2643 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
2644 populate_with_generic_attrs(s, attrs);
2645
2646 op_ret = filter_out_quota_info(attrs, rmattr_names, quota_info);
2647 if (op_ret < 0) {
2648 return;
2649 } else {
2650 pquota_info = &quota_info;
2651 }
2652
2653 /* Web site of Swift API. */
2654 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
2655 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
2656 }
2657
2658 s->bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */
2659 s->bucket.name = s->bucket_name;
2660
2661 /* Handle updates of the metadata for Swift's object versioning. */
2662 if (swift_ver_location) {
2663 s->bucket_info.swift_ver_location = *swift_ver_location;
2664 s->bucket_info.swift_versioning = (! swift_ver_location->empty());
2665 }
2666
2667 op_ret = store->create_bucket(*(s->user), s->bucket, zonegroup_id,
2668 placement_rule, s->bucket_info.swift_ver_location,
2669 pquota_info, attrs,
2670 info, pobjv, &ep_objv, creation_time,
2671 pmaster_bucket, pmaster_num_shards, true);
2672 /* continue if EEXIST and create_bucket will fail below. this way we can
2673 * recover from a partial create by retrying it. */
2674 ldout(s->cct, 20) << "rgw_create_bucket returned ret=" << op_ret << " bucket=" << s->bucket << dendl;
2675
2676 if (op_ret && op_ret != -EEXIST)
2677 return;
2678
2679 existed = (op_ret == -EEXIST);
2680
2681 if (existed) {
2682 /* bucket already existed, might have raced with another bucket creation, or
2683 * might be partial bucket creation that never completed. Read existing bucket
2684 * info, verify that the reported bucket owner is the current user.
2685 * If all is ok then update the user's list of buckets.
2686 * Otherwise inform client about a name conflict.
2687 */
2688 if (info.owner.compare(s->user->user_id) != 0) {
2689 op_ret = -EEXIST;
2690 return;
2691 }
2692 s->bucket = info.bucket;
2693 }
2694
2695 op_ret = rgw_link_bucket(store, s->user->user_id, s->bucket,
2696 info.creation_time, false);
2697 if (op_ret && !existed && op_ret != -EEXIST) {
2698 /* if it exists (or previously existed), don't remove it! */
2699 op_ret = rgw_unlink_bucket(store, s->user->user_id, s->bucket.tenant,
2700 s->bucket.name);
2701 if (op_ret < 0) {
2702 ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
2703 << dendl;
2704 }
2705 } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) {
2706 op_ret = -ERR_BUCKET_EXISTS;
2707 }
2708
2709 if (need_metadata_upload() && existed) {
2710 /* OK, it looks we lost race with another request. As it's required to
2711 * handle metadata fusion and upload, the whole operation becomes very
2712 * similar in nature to PutMetadataBucket. However, as the attrs may
2713 * changed in the meantime, we have to refresh. */
2714 short tries = 0;
2715 do {
2716 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
2717 RGWBucketInfo binfo;
2718 map<string, bufferlist> battrs;
2719
2720 op_ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name,
2721 binfo, nullptr, &battrs);
2722 if (op_ret < 0) {
2723 return;
2724 } else if (binfo.owner.compare(s->user->user_id) != 0) {
2725 /* New bucket doesn't belong to the account we're operating on. */
2726 op_ret = -EEXIST;
2727 return;
2728 } else {
2729 s->bucket_info = binfo;
2730 s->bucket_attrs = battrs;
2731 }
2732
2733 attrs.clear();
2734
2735 rgw_get_request_metadata(s->cct, s->info, attrs, false);
2736 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
2737 populate_with_generic_attrs(s, attrs);
2738 op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket_info.quota);
2739 if (op_ret < 0) {
2740 return;
2741 }
2742
2743 /* Handle updates of the metadata for Swift's object versioning. */
2744 if (swift_ver_location) {
2745 s->bucket_info.swift_ver_location = *swift_ver_location;
2746 s->bucket_info.swift_versioning = (! swift_ver_location->empty());
2747 }
2748
2749 /* Web site of Swift API. */
2750 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
2751 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
2752
2753 /* This will also set the quota on the bucket. */
2754 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
2755 &s->bucket_info.objv_tracker);
2756 } while (op_ret == -ECANCELED && tries++ < 20);
2757
2758 /* Restore the proper return code. */
2759 if (op_ret >= 0) {
2760 op_ret = -ERR_BUCKET_EXISTS;
2761 }
2762 }
2763 }
2764
2765 int RGWDeleteBucket::verify_permission()
2766 {
2767 if (!verify_bucket_permission(s, rgw::IAM::s3DeleteBucket)) {
2768 return -EACCES;
2769 }
2770
2771 return 0;
2772 }
2773
2774 void RGWDeleteBucket::pre_exec()
2775 {
2776 rgw_bucket_object_pre_exec(s);
2777 }
2778
2779 void RGWDeleteBucket::execute()
2780 {
2781 op_ret = -EINVAL;
2782
2783 if (s->bucket_name.empty())
2784 return;
2785
2786 if (!s->bucket_exists) {
2787 ldout(s->cct, 0) << "ERROR: bucket " << s->bucket_name << " not found" << dendl;
2788 op_ret = -ERR_NO_SUCH_BUCKET;
2789 return;
2790 }
2791 RGWObjVersionTracker ot;
2792 ot.read_version = s->bucket_info.ep_objv;
2793
2794 if (s->system_request) {
2795 string tag = s->info.args.get(RGW_SYS_PARAM_PREFIX "tag");
2796 string ver_str = s->info.args.get(RGW_SYS_PARAM_PREFIX "ver");
2797 if (!tag.empty()) {
2798 ot.read_version.tag = tag;
2799 uint64_t ver;
2800 string err;
2801 ver = strict_strtol(ver_str.c_str(), 10, &err);
2802 if (!err.empty()) {
2803 ldout(s->cct, 0) << "failed to parse ver param" << dendl;
2804 op_ret = -EINVAL;
2805 return;
2806 }
2807 ot.read_version.ver = ver;
2808 }
2809 }
2810
2811 op_ret = rgw_bucket_sync_user_stats(store, s->user->user_id, s->bucket_info);
2812 if ( op_ret < 0) {
2813 ldout(s->cct, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl;
2814 }
2815
2816 op_ret = store->check_bucket_empty(s->bucket_info);
2817 if (op_ret < 0) {
2818 return;
2819 }
2820
2821 if (!store->is_meta_master()) {
2822 bufferlist in_data;
2823 op_ret = forward_request_to_master(s, &ot.read_version, store, in_data,
2824 NULL);
2825 if (op_ret < 0) {
2826 if (op_ret == -ENOENT) {
2827 /* adjust error, we want to return with NoSuchBucket and not
2828 * NoSuchKey */
2829 op_ret = -ERR_NO_SUCH_BUCKET;
2830 }
2831 return;
2832 }
2833 }
2834
2835 string prefix, delimiter;
2836
2837 if (s->prot_flags & RGW_REST_SWIFT) {
2838 string path_args;
2839 path_args = s->info.args.get("path");
2840 if (!path_args.empty()) {
2841 if (!delimiter.empty() || !prefix.empty()) {
2842 op_ret = -EINVAL;
2843 return;
2844 }
2845 prefix = path_args;
2846 delimiter="/";
2847 }
2848 }
2849
2850 op_ret = abort_bucket_multiparts(store, s->cct, s->bucket_info, prefix, delimiter);
2851
2852 if (op_ret < 0) {
2853 return;
2854 }
2855
2856 op_ret = store->delete_bucket(s->bucket_info, ot, false);
2857
2858 if (op_ret == -ECANCELED) {
2859 // lost a race, either with mdlog sync or another delete bucket operation.
2860 // in either case, we've already called rgw_unlink_bucket()
2861 op_ret = 0;
2862 return;
2863 }
2864
2865 if (op_ret == 0) {
2866 op_ret = rgw_unlink_bucket(store, s->user->user_id, s->bucket.tenant,
2867 s->bucket.name, false);
2868 if (op_ret < 0) {
2869 ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
2870 << dendl;
2871 }
2872 }
2873
2874 if (op_ret < 0) {
2875 return;
2876 }
2877
2878
2879 }
2880
2881 int RGWPutObj::verify_permission()
2882 {
2883 if (copy_source) {
2884
2885 RGWAccessControlPolicy cs_acl(s->cct);
2886 optional<Policy> policy;
2887 map<string, bufferlist> cs_attrs;
2888 rgw_bucket cs_bucket(copy_source_bucket_info.bucket);
2889 rgw_obj_key cs_object(copy_source_object_name, copy_source_version_id);
2890
2891 rgw_obj obj(cs_bucket, cs_object);
2892 store->set_atomic(s->obj_ctx, obj);
2893 store->set_prefetch_data(s->obj_ctx, obj);
2894
2895 /* check source object permissions */
2896 if (read_obj_policy(store, s, copy_source_bucket_info, cs_attrs, &cs_acl, policy,
2897 cs_bucket, cs_object) < 0) {
2898 return -EACCES;
2899 }
2900
2901 /* admin request overrides permission checks */
2902 if (! s->auth.identity->is_admin_of(cs_acl.get_owner().get_id())) {
2903 if (policy) {
2904 auto e = policy->eval(s->env, *s->auth.identity,
2905 cs_object.instance.empty() ?
2906 rgw::IAM::s3GetObject :
2907 rgw::IAM::s3GetObjectVersion,
2908 rgw::IAM::ARN(obj));
2909 if (e == Effect::Deny) {
2910 return -EACCES;
2911 } else if (e == Effect::Pass &&
2912 !cs_acl.verify_permission(*s->auth.identity, s->perm_mask,
2913 RGW_PERM_READ)) {
2914 return -EACCES;
2915 }
2916 } else if (!cs_acl.verify_permission(*s->auth.identity, s->perm_mask,
2917 RGW_PERM_READ)) {
2918 return -EACCES;
2919 }
2920 }
2921 }
2922
2923 if (s->iam_policy) {
2924 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
2925 rgw::IAM::s3PutObject,
2926 rgw_obj(s->bucket, s->object));
2927 if (e == Effect::Allow) {
2928 return 0;
2929 } else if (e == Effect::Deny) {
2930 return -EACCES;
2931 }
2932 }
2933
2934 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
2935 return -EACCES;
2936 }
2937
2938 return 0;
2939 }
2940
2941 void RGWPutObjProcessor_Multipart::get_mp(RGWMPObj** _mp){
2942 *_mp = &mp;
2943 }
2944
2945 int RGWPutObjProcessor_Multipart::prepare(RGWRados *store, string *oid_rand)
2946 {
2947 string oid = obj_str;
2948 upload_id = s->info.args.get("uploadId");
2949 if (!oid_rand) {
2950 mp.init(oid, upload_id);
2951 } else {
2952 mp.init(oid, upload_id, *oid_rand);
2953 }
2954
2955 part_num = s->info.args.get("partNumber");
2956 if (part_num.empty()) {
2957 ldout(s->cct, 10) << "part number is empty" << dendl;
2958 return -EINVAL;
2959 }
2960
2961 string err;
2962 uint64_t num = (uint64_t)strict_strtol(part_num.c_str(), 10, &err);
2963
2964 if (!err.empty()) {
2965 ldout(s->cct, 10) << "bad part number: " << part_num << ": " << err << dendl;
2966 return -EINVAL;
2967 }
2968
2969 string upload_prefix = oid + ".";
2970
2971 if (!oid_rand) {
2972 upload_prefix.append(upload_id);
2973 } else {
2974 upload_prefix.append(*oid_rand);
2975 }
2976
2977 rgw_obj target_obj;
2978 target_obj.init(bucket, oid);
2979
2980 manifest.set_prefix(upload_prefix);
2981
2982 manifest.set_multipart_part_rule(store->ctx()->_conf->rgw_obj_stripe_size, num);
2983
2984 int r = manifest_gen.create_begin(store->ctx(), &manifest, s->bucket_info.placement_rule, bucket, target_obj);
2985 if (r < 0) {
2986 return r;
2987 }
2988
2989 cur_obj = manifest_gen.get_cur_obj(store);
2990 rgw_raw_obj_to_obj(bucket, cur_obj, &head_obj);
2991 head_obj.index_hash_source = obj_str;
2992
2993 r = prepare_init(store, NULL);
2994 if (r < 0) {
2995 return r;
2996 }
2997
2998 return 0;
2999 }
3000
3001 int RGWPutObjProcessor_Multipart::do_complete(size_t accounted_size,
3002 const string& etag,
3003 real_time *mtime, real_time set_mtime,
3004 map<string, bufferlist>& attrs,
3005 real_time delete_at,
3006 const char *if_match,
3007 const char *if_nomatch, const string *user_data, rgw_zone_set *zones_trace)
3008 {
3009 complete_writing_data();
3010
3011 RGWRados::Object op_target(store, s->bucket_info, obj_ctx, head_obj);
3012 op_target.set_versioning_disabled(true);
3013 RGWRados::Object::Write head_obj_op(&op_target);
3014
3015 head_obj_op.meta.set_mtime = set_mtime;
3016 head_obj_op.meta.mtime = mtime;
3017 head_obj_op.meta.owner = s->owner.get_id();
3018 head_obj_op.meta.delete_at = delete_at;
3019 head_obj_op.meta.zones_trace = zones_trace;
3020 head_obj_op.meta.modify_tail = true;
3021
3022 int r = head_obj_op.write_meta(obj_len, accounted_size, attrs);
3023 if (r < 0)
3024 return r;
3025
3026 bufferlist bl;
3027 RGWUploadPartInfo info;
3028 string p = "part.";
3029 bool sorted_omap = is_v2_upload_id(upload_id);
3030
3031 if (sorted_omap) {
3032 string err;
3033 int part_num_int = strict_strtol(part_num.c_str(), 10, &err);
3034 if (!err.empty()) {
3035 dout(10) << "bad part number specified: " << part_num << dendl;
3036 return -EINVAL;
3037 }
3038 char buf[32];
3039 snprintf(buf, sizeof(buf), "%08d", part_num_int);
3040 p.append(buf);
3041 } else {
3042 p.append(part_num);
3043 }
3044 info.num = atoi(part_num.c_str());
3045 info.etag = etag;
3046 info.size = obj_len;
3047 info.accounted_size = accounted_size;
3048 info.modified = real_clock::now();
3049 info.manifest = manifest;
3050
3051 bool compressed;
3052 r = rgw_compression_info_from_attrset(attrs, compressed, info.cs_info);
3053 if (r < 0) {
3054 dout(1) << "cannot get compression info" << dendl;
3055 return r;
3056 }
3057
3058 ::encode(info, bl);
3059
3060 string multipart_meta_obj = mp.get_meta();
3061
3062 rgw_obj meta_obj;
3063 meta_obj.init_ns(bucket, multipart_meta_obj, mp_ns);
3064 meta_obj.set_in_extra_data(true);
3065
3066 rgw_raw_obj raw_meta_obj;
3067
3068 store->obj_to_raw(s->bucket_info.placement_rule, meta_obj, &raw_meta_obj);
3069
3070 r = store->omap_set(raw_meta_obj, p, bl);
3071
3072 return r;
3073 }
3074
3075 RGWPutObjProcessor *RGWPutObj::select_processor(RGWObjectCtx& obj_ctx, bool *is_multipart)
3076 {
3077 RGWPutObjProcessor *processor;
3078
3079 bool multipart = s->info.args.exists("uploadId");
3080
3081 uint64_t part_size = s->cct->_conf->rgw_obj_stripe_size;
3082
3083 if (!multipart) {
3084 processor = new RGWPutObjProcessor_Atomic(obj_ctx, s->bucket_info, s->bucket, s->object.name, part_size, s->req_id, s->bucket_info.versioning_enabled());
3085 (static_cast<RGWPutObjProcessor_Atomic *>(processor))->set_olh_epoch(olh_epoch);
3086 (static_cast<RGWPutObjProcessor_Atomic *>(processor))->set_version_id(version_id);
3087 } else {
3088 processor = new RGWPutObjProcessor_Multipart(obj_ctx, s->bucket_info, part_size, s);
3089 }
3090
3091 if (is_multipart) {
3092 *is_multipart = multipart;
3093 }
3094
3095 return processor;
3096 }
3097
3098 void RGWPutObj::dispose_processor(RGWPutObjDataProcessor *processor)
3099 {
3100 delete processor;
3101 }
3102
3103 void RGWPutObj::pre_exec()
3104 {
3105 rgw_bucket_object_pre_exec(s);
3106 }
3107
3108 class RGWPutObj_CB : public RGWGetDataCB
3109 {
3110 RGWPutObj *op;
3111 public:
3112 RGWPutObj_CB(RGWPutObj *_op) : op(_op) {}
3113 ~RGWPutObj_CB() override {}
3114
3115 int handle_data(bufferlist& bl, off_t bl_ofs, off_t bl_len) override {
3116 return op->get_data_cb(bl, bl_ofs, bl_len);
3117 }
3118 };
3119
3120 int RGWPutObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
3121 {
3122 bufferlist bl_tmp;
3123 bl.copy(bl_ofs, bl_len, bl_tmp);
3124
3125 bl_aux.append(bl_tmp);
3126
3127 return bl_len;
3128 }
3129
3130 int RGWPutObj::get_data(const off_t fst, const off_t lst, bufferlist& bl)
3131 {
3132 RGWPutObj_CB cb(this);
3133 RGWGetDataCB* filter = &cb;
3134 boost::optional<RGWGetObj_Decompress> decompress;
3135 std::unique_ptr<RGWGetDataCB> decrypt;
3136 RGWCompressionInfo cs_info;
3137 map<string, bufferlist> attrs;
3138 map<string, bufferlist>::iterator attr_iter;
3139 int ret = 0;
3140
3141 uint64_t obj_size;
3142 int64_t new_ofs, new_end;
3143
3144 new_ofs = fst;
3145 new_end = lst;
3146
3147 rgw_obj_key obj_key(copy_source_object_name, copy_source_version_id);
3148 rgw_obj obj(copy_source_bucket_info.bucket, obj_key);
3149
3150 RGWRados::Object op_target(store, copy_source_bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
3151 RGWRados::Object::Read read_op(&op_target);
3152 read_op.params.obj_size = &obj_size;
3153 read_op.params.attrs = &attrs;
3154
3155 ret = read_op.prepare();
3156 if (ret < 0)
3157 return ret;
3158
3159 bool need_decompress;
3160 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
3161 if (op_ret < 0) {
3162 lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
3163 return -EIO;
3164 }
3165
3166 bool partial_content = true;
3167 if (need_decompress)
3168 {
3169 obj_size = cs_info.orig_size;
3170 decompress.emplace(s->cct, &cs_info, partial_content, filter);
3171 filter = &*decompress;
3172 }
3173
3174 attr_iter = attrs.find(RGW_ATTR_MANIFEST);
3175 op_ret = this->get_decrypt_filter(&decrypt,
3176 filter,
3177 attrs,
3178 attr_iter != attrs.end() ? &(attr_iter->second) : nullptr);
3179 if (decrypt != nullptr) {
3180 filter = decrypt.get();
3181 }
3182 if (op_ret < 0) {
3183 return ret;
3184 }
3185
3186 ret = read_op.range_to_ofs(obj_size, new_ofs, new_end);
3187 if (ret < 0)
3188 return ret;
3189
3190 filter->fixup_range(new_ofs, new_end);
3191 ret = read_op.iterate(new_ofs, new_end, filter);
3192
3193 if (ret >= 0)
3194 ret = filter->flush();
3195
3196 bl.claim_append(bl_aux);
3197
3198 return ret;
3199 }
3200
3201 // special handling for compression type = "random" with multipart uploads
3202 static CompressorRef get_compressor_plugin(const req_state *s,
3203 const std::string& compression_type)
3204 {
3205 if (compression_type != "random") {
3206 return Compressor::create(s->cct, compression_type);
3207 }
3208
3209 bool is_multipart{false};
3210 const auto& upload_id = s->info.args.get("uploadId", &is_multipart);
3211
3212 if (!is_multipart) {
3213 return Compressor::create(s->cct, compression_type);
3214 }
3215
3216 // use a hash of the multipart upload id so all parts use the same plugin
3217 const auto alg = std::hash<std::string>{}(upload_id) % Compressor::COMP_ALG_LAST;
3218 if (alg == Compressor::COMP_ALG_NONE) {
3219 return nullptr;
3220 }
3221 return Compressor::create(s->cct, alg);
3222 }
3223
3224 void RGWPutObj::execute()
3225 {
3226 RGWPutObjProcessor *processor = NULL;
3227 RGWPutObjDataProcessor *filter = nullptr;
3228 std::unique_ptr<RGWPutObjDataProcessor> encrypt;
3229 char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1];
3230 char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3231 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3232 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
3233 MD5 hash;
3234 bufferlist bl, aclbl, bs;
3235 int len;
3236 map<string, string>::iterator iter;
3237 bool multipart;
3238
3239 off_t fst;
3240 off_t lst;
3241 const auto& compression_type = store->get_zone_params().get_compression_type(
3242 s->bucket_info.placement_rule);
3243 CompressorRef plugin;
3244 boost::optional<RGWPutObj_Compress> compressor;
3245
3246 bool need_calc_md5 = (dlo_manifest == NULL) && (slo_info == NULL);
3247 perfcounter->inc(l_rgw_put);
3248 op_ret = -EINVAL;
3249 if (s->object.empty()) {
3250 goto done;
3251 }
3252
3253 if (!s->bucket_exists) {
3254 op_ret = -ERR_NO_SUCH_BUCKET;
3255 return;
3256 }
3257
3258 op_ret = get_params();
3259 if (op_ret < 0) {
3260 ldout(s->cct, 20) << "get_params() returned ret=" << op_ret << dendl;
3261 goto done;
3262 }
3263
3264 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
3265 if (op_ret < 0) {
3266 ldout(s->cct, 20) << "get_system_versioning_params() returned ret="
3267 << op_ret << dendl;
3268 goto done;
3269 }
3270
3271 if (supplied_md5_b64) {
3272 need_calc_md5 = true;
3273
3274 ldout(s->cct, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
3275 op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1],
3276 supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64));
3277 ldout(s->cct, 15) << "ceph_armor ret=" << op_ret << dendl;
3278 if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) {
3279 op_ret = -ERR_INVALID_DIGEST;
3280 goto done;
3281 }
3282
3283 buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5);
3284 ldout(s->cct, 15) << "supplied_md5=" << supplied_md5 << dendl;
3285 }
3286
3287 if (!chunked_upload) { /* with chunked upload we don't know how big is the upload.
3288 we also check sizes at the end anyway */
3289 op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
3290 user_quota, bucket_quota, s->content_length);
3291 if (op_ret < 0) {
3292 ldout(s->cct, 20) << "check_quota() returned ret=" << op_ret << dendl;
3293 goto done;
3294 }
3295 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3296 if (op_ret < 0) {
3297 ldout(s->cct, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl;
3298 goto done;
3299 }
3300 }
3301
3302 if (supplied_etag) {
3303 strncpy(supplied_md5, supplied_etag, sizeof(supplied_md5) - 1);
3304 supplied_md5[sizeof(supplied_md5) - 1] = '\0';
3305 }
3306
3307 processor = select_processor(*static_cast<RGWObjectCtx *>(s->obj_ctx), &multipart);
3308
3309 // no filters by default
3310 filter = processor;
3311
3312 /* Handle object versioning of Swift API. */
3313 if (! multipart) {
3314 rgw_obj obj(s->bucket, s->object);
3315 op_ret = store->swift_versioning_copy(*static_cast<RGWObjectCtx *>(s->obj_ctx),
3316 s->bucket_owner.get_id(),
3317 s->bucket_info,
3318 obj);
3319 if (op_ret < 0) {
3320 goto done;
3321 }
3322 }
3323
3324 op_ret = processor->prepare(store, NULL);
3325 if (op_ret < 0) {
3326 ldout(s->cct, 20) << "processor->prepare() returned ret=" << op_ret
3327 << dendl;
3328 goto done;
3329 }
3330
3331 fst = copy_source_range_fst;
3332 lst = copy_source_range_lst;
3333
3334 op_ret = get_encrypt_filter(&encrypt, filter);
3335 if (op_ret < 0) {
3336 goto done;
3337 }
3338 if (encrypt != nullptr) {
3339 filter = encrypt.get();
3340 } else {
3341 //no encryption, we can try compression
3342 if (compression_type != "none") {
3343 plugin = get_compressor_plugin(s, compression_type);
3344 if (!plugin) {
3345 ldout(s->cct, 1) << "Cannot load plugin for compression type "
3346 << compression_type << dendl;
3347 } else {
3348 compressor.emplace(s->cct, plugin, filter);
3349 filter = &*compressor;
3350 }
3351 }
3352 }
3353
3354 do {
3355 bufferlist data;
3356 if (fst > lst)
3357 break;
3358 if (!copy_source) {
3359 len = get_data(data);
3360 } else {
3361 uint64_t cur_lst = min(fst + s->cct->_conf->rgw_max_chunk_size - 1, lst);
3362 op_ret = get_data(fst, cur_lst, data);
3363 if (op_ret < 0)
3364 goto done;
3365 len = data.length();
3366 s->content_length += len;
3367 fst += len;
3368 }
3369 if (len < 0) {
3370 op_ret = len;
3371 goto done;
3372 }
3373
3374 if (need_calc_md5) {
3375 hash.Update((const byte *)data.c_str(), data.length());
3376 }
3377
3378 /* update torrrent */
3379 torrent.update(data);
3380
3381 /* do we need this operation to be synchronous? if we're dealing with an object with immutable
3382 * head, e.g., multipart object we need to make sure we're the first one writing to this object
3383 */
3384 bool need_to_wait = (ofs == 0) && multipart;
3385
3386 bufferlist orig_data;
3387
3388 if (need_to_wait) {
3389 orig_data = data;
3390 }
3391
3392 op_ret = put_data_and_throttle(filter, data, ofs, need_to_wait);
3393 if (op_ret < 0) {
3394 if (!need_to_wait || op_ret != -EEXIST) {
3395 ldout(s->cct, 20) << "processor->thottle_data() returned ret="
3396 << op_ret << dendl;
3397 goto done;
3398 }
3399 /* need_to_wait == true and op_ret == -EEXIST */
3400 ldout(s->cct, 5) << "NOTICE: processor->throttle_data() returned -EEXIST, need to restart write" << dendl;
3401
3402 /* restore original data */
3403 data.swap(orig_data);
3404
3405 /* restart processing with different oid suffix */
3406
3407 dispose_processor(processor);
3408 processor = select_processor(*static_cast<RGWObjectCtx *>(s->obj_ctx), &multipart);
3409 filter = processor;
3410
3411 string oid_rand;
3412 char buf[33];
3413 gen_rand_alphanumeric(store->ctx(), buf, sizeof(buf) - 1);
3414 oid_rand.append(buf);
3415
3416 op_ret = processor->prepare(store, &oid_rand);
3417 if (op_ret < 0) {
3418 ldout(s->cct, 0) << "ERROR: processor->prepare() returned "
3419 << op_ret << dendl;
3420 goto done;
3421 }
3422
3423 op_ret = get_encrypt_filter(&encrypt, filter);
3424 if (op_ret < 0) {
3425 goto done;
3426 }
3427 if (encrypt != nullptr) {
3428 filter = encrypt.get();
3429 } else {
3430 if (compressor) {
3431 compressor.emplace(s->cct, plugin, filter);
3432 filter = &*compressor;
3433 }
3434 }
3435 op_ret = put_data_and_throttle(filter, data, ofs, false);
3436 if (op_ret < 0) {
3437 goto done;
3438 }
3439 }
3440
3441 ofs += len;
3442 } while (len > 0);
3443
3444 {
3445 bufferlist flush;
3446 op_ret = put_data_and_throttle(filter, flush, ofs, false);
3447 if (op_ret < 0) {
3448 goto done;
3449 }
3450 }
3451
3452 if (!chunked_upload && ofs != s->content_length) {
3453 op_ret = -ERR_REQUEST_TIMEOUT;
3454 goto done;
3455 }
3456 s->obj_size = ofs;
3457
3458 perfcounter->inc(l_rgw_put_b, s->obj_size);
3459
3460 op_ret = do_aws4_auth_completion();
3461 if (op_ret < 0) {
3462 goto done;
3463 }
3464
3465 op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
3466 user_quota, bucket_quota, s->obj_size);
3467 if (op_ret < 0) {
3468 ldout(s->cct, 20) << "second check_quota() returned op_ret=" << op_ret << dendl;
3469 goto done;
3470 }
3471
3472 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3473 if (op_ret < 0) {
3474 ldout(s->cct, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl;
3475 goto done;
3476 }
3477
3478 hash.Final(m);
3479
3480 if (compressor && compressor->is_compressed()) {
3481 bufferlist tmp;
3482 RGWCompressionInfo cs_info;
3483 cs_info.compression_type = plugin->get_type_name();
3484 cs_info.orig_size = s->obj_size;
3485 cs_info.blocks = move(compressor->get_compression_blocks());
3486 ::encode(cs_info, tmp);
3487 attrs[RGW_ATTR_COMPRESSION] = tmp;
3488 ldout(s->cct, 20) << "storing " << RGW_ATTR_COMPRESSION
3489 << " with type=" << cs_info.compression_type
3490 << ", orig_size=" << cs_info.orig_size
3491 << ", blocks=" << cs_info.blocks.size() << dendl;
3492 }
3493
3494 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
3495
3496 etag = calc_md5;
3497
3498 if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) {
3499 op_ret = -ERR_BAD_DIGEST;
3500 goto done;
3501 }
3502
3503 policy.encode(aclbl);
3504 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
3505
3506 if (dlo_manifest) {
3507 op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
3508 if (op_ret < 0) {
3509 ldout(s->cct, 0) << "bad user manifest: " << dlo_manifest << dendl;
3510 goto done;
3511 }
3512 complete_etag(hash, &etag);
3513 ldout(s->cct, 10) << __func__ << ": calculated md5 for user manifest: " << etag << dendl;
3514 }
3515
3516 if (slo_info) {
3517 bufferlist manifest_bl;
3518 ::encode(*slo_info, manifest_bl);
3519 emplace_attr(RGW_ATTR_SLO_MANIFEST, std::move(manifest_bl));
3520
3521 hash.Update((byte *)slo_info->raw_data, slo_info->raw_data_len);
3522 complete_etag(hash, &etag);
3523 ldout(s->cct, 10) << __func__ << ": calculated md5 for user manifest: " << etag << dendl;
3524 }
3525
3526 if (supplied_etag && etag.compare(supplied_etag) != 0) {
3527 op_ret = -ERR_UNPROCESSABLE_ENTITY;
3528 goto done;
3529 }
3530 bl.append(etag.c_str(), etag.size() + 1);
3531 emplace_attr(RGW_ATTR_ETAG, std::move(bl));
3532
3533 populate_with_generic_attrs(s, attrs);
3534 rgw_get_request_metadata(s->cct, s->info, attrs);
3535 encode_delete_at_attr(delete_at, attrs);
3536 encode_obj_tags_attr(obj_tags.get(), attrs);
3537
3538 /* Add a custom metadata to expose the information whether an object
3539 * is an SLO or not. Appending the attribute must be performed AFTER
3540 * processing any input from user in order to prohibit overwriting. */
3541 if (slo_info) {
3542 bufferlist slo_userindicator_bl;
3543 slo_userindicator_bl.append("True", 4);
3544 emplace_attr(RGW_ATTR_SLO_UINDICATOR, std::move(slo_userindicator_bl));
3545 }
3546
3547 op_ret = processor->complete(s->obj_size, etag, &mtime, real_time(), attrs,
3548 (delete_at ? *delete_at : real_time()), if_match, if_nomatch,
3549 (user_data.empty() ? nullptr : &user_data));
3550
3551 /* produce torrent */
3552 if (s->cct->_conf->rgw_torrent_flag && (ofs == torrent.get_data_len()))
3553 {
3554 torrent.init(s, store);
3555 torrent.set_create_date(mtime);
3556 op_ret = torrent.complete();
3557 if (0 != op_ret)
3558 {
3559 ldout(s->cct, 0) << "ERROR: torrent.handle_data() returned " << op_ret << dendl;
3560 goto done;
3561 }
3562 }
3563
3564 done:
3565 dispose_processor(processor);
3566 perfcounter->tinc(l_rgw_put_lat,
3567 (ceph_clock_now() - s->time));
3568 }
3569
3570 int RGWPostObj::verify_permission()
3571 {
3572 return 0;
3573 }
3574 /*
3575 RGWPutObjProcessor *RGWPostObj::select_processor(RGWObjectCtx& obj_ctx)
3576 {
3577 RGWPutObjProcessor *processor;
3578
3579 uint64_t part_size = s->cct->_conf->rgw_obj_stripe_size;
3580
3581 processor = new RGWPutObjProcessor_Atomic(obj_ctx, s->bucket_info, s->bucket, s->object.name, part_size, s->req_id, s->bucket_info.versioning_enabled());
3582
3583 return processor;
3584 }
3585
3586 void RGWPostObj::dispose_processor(RGWPutObjDataProcessor *processor)
3587 {
3588 delete processor;
3589 }
3590 */
3591 void RGWPostObj::pre_exec()
3592 {
3593 rgw_bucket_object_pre_exec(s);
3594 }
3595
3596 void RGWPostObj::execute()
3597 {
3598 RGWPutObjDataProcessor *filter = nullptr;
3599 boost::optional<RGWPutObj_Compress> compressor;
3600 CompressorRef plugin;
3601 char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3602
3603 /* Read in the data from the POST form. */
3604 op_ret = get_params();
3605 if (op_ret < 0) {
3606 return;
3607 }
3608
3609 op_ret = verify_params();
3610 if (op_ret < 0) {
3611 return;
3612 }
3613
3614 if (s->iam_policy) {
3615 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
3616 rgw::IAM::s3PutObject,
3617 rgw_obj(s->bucket, s->object));
3618 if (e == Effect::Deny) {
3619 op_ret = -EACCES;
3620 return;
3621 } else if (e == Effect::Pass && !verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
3622 op_ret = -EACCES;
3623 return;
3624 }
3625 } else if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
3626 op_ret = -EACCES;
3627 return;
3628 }
3629
3630 /* Start iteration over data fields. It's necessary as Swift's FormPost
3631 * is capable to handle multiple files in single form. */
3632 do {
3633 std::unique_ptr<RGWPutObjDataProcessor> encrypt;
3634 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3635 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
3636 MD5 hash;
3637 ceph::buffer::list bl, aclbl;
3638 int len = 0;
3639
3640 op_ret = store->check_quota(s->bucket_owner.get_id(),
3641 s->bucket,
3642 user_quota,
3643 bucket_quota,
3644 s->content_length);
3645 if (op_ret < 0) {
3646 return;
3647 }
3648
3649 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3650 if (op_ret < 0) {
3651 return;
3652 }
3653
3654 if (supplied_md5_b64) {
3655 char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1];
3656 ldout(s->cct, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
3657 op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1],
3658 supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64));
3659 ldout(s->cct, 15) << "ceph_armor ret=" << op_ret << dendl;
3660 if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) {
3661 op_ret = -ERR_INVALID_DIGEST;
3662 return;
3663 }
3664
3665 buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5);
3666 ldout(s->cct, 15) << "supplied_md5=" << supplied_md5 << dendl;
3667 }
3668
3669 RGWPutObjProcessor_Atomic processor(*static_cast<RGWObjectCtx *>(s->obj_ctx),
3670 s->bucket_info,
3671 s->bucket,
3672 get_current_filename(),
3673 /* part size */
3674 s->cct->_conf->rgw_obj_stripe_size,
3675 s->req_id,
3676 s->bucket_info.versioning_enabled());
3677 /* No filters by default. */
3678 filter = &processor;
3679
3680 op_ret = processor.prepare(store, nullptr);
3681 if (op_ret < 0) {
3682 return;
3683 }
3684
3685 op_ret = get_encrypt_filter(&encrypt, filter);
3686 if (op_ret < 0) {
3687 return;
3688 }
3689 if (encrypt != nullptr) {
3690 filter = encrypt.get();
3691 } else {
3692 const auto& compression_type = store->get_zone_params().get_compression_type(
3693 s->bucket_info.placement_rule);
3694 if (compression_type != "none") {
3695 plugin = Compressor::create(s->cct, compression_type);
3696 if (!plugin) {
3697 ldout(s->cct, 1) << "Cannot load plugin for compression type "
3698 << compression_type << dendl;
3699 } else {
3700 compressor.emplace(s->cct, plugin, filter);
3701 filter = &*compressor;
3702 }
3703 }
3704 }
3705
3706 bool again;
3707 do {
3708 ceph::bufferlist data;
3709 len = get_data(data, again);
3710
3711 if (len < 0) {
3712 op_ret = len;
3713 return;
3714 }
3715
3716 if (!len) {
3717 break;
3718 }
3719
3720 hash.Update((const byte *)data.c_str(), data.length());
3721 op_ret = put_data_and_throttle(filter, data, ofs, false);
3722
3723 ofs += len;
3724
3725 if (ofs > max_len) {
3726 op_ret = -ERR_TOO_LARGE;
3727 return;
3728 }
3729 } while (again);
3730
3731 {
3732 bufferlist flush;
3733 op_ret = put_data_and_throttle(filter, flush, ofs, false);
3734 }
3735
3736 if (len < min_len) {
3737 op_ret = -ERR_TOO_SMALL;
3738 return;
3739 }
3740
3741 s->obj_size = ofs;
3742
3743 if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) {
3744 op_ret = -ERR_BAD_DIGEST;
3745 return;
3746 }
3747
3748 op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
3749 user_quota, bucket_quota, s->obj_size);
3750 if (op_ret < 0) {
3751 return;
3752 }
3753
3754 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3755 if (op_ret < 0) {
3756 return;
3757 }
3758
3759 hash.Final(m);
3760 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
3761
3762 etag = calc_md5;
3763 bl.append(etag.c_str(), etag.size() + 1);
3764 emplace_attr(RGW_ATTR_ETAG, std::move(bl));
3765
3766 policy.encode(aclbl);
3767 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
3768
3769 const std::string content_type = get_current_content_type();
3770 if (! content_type.empty()) {
3771 ceph::bufferlist ct_bl;
3772 ct_bl.append(content_type.c_str(), content_type.size() + 1);
3773 emplace_attr(RGW_ATTR_CONTENT_TYPE, std::move(ct_bl));
3774 }
3775
3776 if (compressor && compressor->is_compressed()) {
3777 ceph::bufferlist tmp;
3778 RGWCompressionInfo cs_info;
3779 cs_info.compression_type = plugin->get_type_name();
3780 cs_info.orig_size = s->obj_size;
3781 cs_info.blocks = move(compressor->get_compression_blocks());
3782 ::encode(cs_info, tmp);
3783 emplace_attr(RGW_ATTR_COMPRESSION, std::move(tmp));
3784 }
3785
3786 op_ret = processor.complete(s->obj_size, etag, nullptr, real_time(),
3787 attrs, (delete_at ? *delete_at : real_time()));
3788 } while (is_next_file_to_upload());
3789 }
3790
3791
3792 void RGWPutMetadataAccount::filter_out_temp_url(map<string, bufferlist>& add_attrs,
3793 const set<string>& rmattr_names,
3794 map<int, string>& temp_url_keys)
3795 {
3796 map<string, bufferlist>::iterator iter;
3797
3798 iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY1);
3799 if (iter != add_attrs.end()) {
3800 temp_url_keys[0] = iter->second.c_str();
3801 add_attrs.erase(iter);
3802 }
3803
3804 iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY2);
3805 if (iter != add_attrs.end()) {
3806 temp_url_keys[1] = iter->second.c_str();
3807 add_attrs.erase(iter);
3808 }
3809
3810 for (const string& name : rmattr_names) {
3811 if (name.compare(RGW_ATTR_TEMPURL_KEY1) == 0) {
3812 temp_url_keys[0] = string();
3813 }
3814 if (name.compare(RGW_ATTR_TEMPURL_KEY2) == 0) {
3815 temp_url_keys[1] = string();
3816 }
3817 }
3818 }
3819
3820 int RGWPutMetadataAccount::init_processing()
3821 {
3822 /* First, go to the base class. At the time of writing the method was
3823 * responsible only for initializing the quota. This isn't necessary
3824 * here as we are touching metadata only. I'm putting this call only
3825 * for the future. */
3826 op_ret = RGWOp::init_processing();
3827 if (op_ret < 0) {
3828 return op_ret;
3829 }
3830
3831 op_ret = get_params();
3832 if (op_ret < 0) {
3833 return op_ret;
3834 }
3835
3836 op_ret = rgw_get_user_attrs_by_uid(store, s->user->user_id, orig_attrs,
3837 &acct_op_tracker);
3838 if (op_ret < 0) {
3839 return op_ret;
3840 }
3841
3842 if (has_policy) {
3843 bufferlist acl_bl;
3844 policy.encode(acl_bl);
3845 attrs.emplace(RGW_ATTR_ACL, std::move(acl_bl));
3846 }
3847
3848 rgw_get_request_metadata(s->cct, s->info, attrs, false);
3849 prepare_add_del_attrs(orig_attrs, rmattr_names, attrs);
3850 populate_with_generic_attrs(s, attrs);
3851
3852 /* Try extract the TempURL-related stuff now to allow verify_permission
3853 * evaluate whether we need FULL_CONTROL or not. */
3854 filter_out_temp_url(attrs, rmattr_names, temp_url_keys);
3855
3856 /* The same with quota except a client needs to be reseller admin. */
3857 op_ret = filter_out_quota_info(attrs, rmattr_names, new_quota,
3858 &new_quota_extracted);
3859 if (op_ret < 0) {
3860 return op_ret;
3861 }
3862
3863 return 0;
3864 }
3865
3866 int RGWPutMetadataAccount::verify_permission()
3867 {
3868 if (s->auth.identity->is_anonymous()) {
3869 return -EACCES;
3870 }
3871
3872 if (!verify_user_permission(s, RGW_PERM_WRITE)) {
3873 return -EACCES;
3874 }
3875
3876 /* Altering TempURL keys requires FULL_CONTROL. */
3877 if (!temp_url_keys.empty() && s->perm_mask != RGW_PERM_FULL_CONTROL) {
3878 return -EPERM;
3879 }
3880
3881 /* We are failing this intensionally to allow system user/reseller admin
3882 * override in rgw_process.cc. This is the way to specify a given RGWOp
3883 * expect extra privileges. */
3884 if (new_quota_extracted) {
3885 return -EACCES;
3886 }
3887
3888 return 0;
3889 }
3890
3891 void RGWPutMetadataAccount::execute()
3892 {
3893 /* Params have been extracted earlier. See init_processing(). */
3894 RGWUserInfo new_uinfo;
3895 op_ret = rgw_get_user_info_by_uid(store, s->user->user_id, new_uinfo,
3896 &acct_op_tracker);
3897 if (op_ret < 0) {
3898 return;
3899 }
3900
3901 /* Handle the TempURL-related stuff. */
3902 if (!temp_url_keys.empty()) {
3903 for (auto& pair : temp_url_keys) {
3904 new_uinfo.temp_url_keys[pair.first] = std::move(pair.second);
3905 }
3906 }
3907
3908 /* Handle the quota extracted at the verify_permission step. */
3909 if (new_quota_extracted) {
3910 new_uinfo.user_quota = std::move(new_quota);
3911 }
3912
3913 /* We are passing here the current (old) user info to allow the function
3914 * optimize-out some operations. */
3915 op_ret = rgw_store_user_info(store, new_uinfo, s->user,
3916 &acct_op_tracker, real_time(), false, &attrs);
3917 }
3918
3919 int RGWPutMetadataBucket::verify_permission()
3920 {
3921 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
3922 return -EACCES;
3923 }
3924
3925 return 0;
3926 }
3927
3928 void RGWPutMetadataBucket::pre_exec()
3929 {
3930 rgw_bucket_object_pre_exec(s);
3931 }
3932
3933 void RGWPutMetadataBucket::execute()
3934 {
3935 op_ret = get_params();
3936 if (op_ret < 0) {
3937 return;
3938 }
3939
3940 rgw_get_request_metadata(s->cct, s->info, attrs, false);
3941
3942 if (!placement_rule.empty() &&
3943 placement_rule != s->bucket_info.placement_rule) {
3944 op_ret = -EEXIST;
3945 return;
3946 }
3947
3948 /* Encode special metadata first as we're using std::map::emplace under
3949 * the hood. This method will add the new items only if the map doesn't
3950 * contain such keys yet. */
3951 if (has_policy) {
3952 if (s->dialect.compare("swift") == 0) {
3953 auto old_policy = \
3954 static_cast<RGWAccessControlPolicy_SWIFT*>(s->bucket_acl.get());
3955 auto new_policy = static_cast<RGWAccessControlPolicy_SWIFT*>(&policy);
3956 new_policy->filter_merge(policy_rw_mask, old_policy);
3957 policy = *new_policy;
3958 }
3959 buffer::list bl;
3960 policy.encode(bl);
3961 emplace_attr(RGW_ATTR_ACL, std::move(bl));
3962 }
3963
3964 if (has_cors) {
3965 buffer::list bl;
3966 cors_config.encode(bl);
3967 emplace_attr(RGW_ATTR_CORS, std::move(bl));
3968 }
3969
3970 /* It's supposed that following functions WILL NOT change any special
3971 * attributes (like RGW_ATTR_ACL) if they are already present in attrs. */
3972 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
3973 populate_with_generic_attrs(s, attrs);
3974
3975 /* According to the Swift's behaviour and its container_quota WSGI middleware
3976 * implementation: anyone with write permissions is able to set the bucket
3977 * quota. This stays in contrast to account quotas that can be set only by
3978 * clients holding reseller admin privileges. */
3979 op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket_info.quota);
3980 if (op_ret < 0) {
3981 return;
3982 }
3983
3984 if (swift_ver_location) {
3985 s->bucket_info.swift_ver_location = *swift_ver_location;
3986 s->bucket_info.swift_versioning = (! swift_ver_location->empty());
3987 }
3988
3989 /* Web site of Swift API. */
3990 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
3991 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
3992
3993 /* Setting attributes also stores the provided bucket info. Due to this
3994 * fact, the new quota settings can be serialized with the same call. */
3995 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
3996 &s->bucket_info.objv_tracker);
3997 }
3998
3999 int RGWPutMetadataObject::verify_permission()
4000 {
4001 // This looks to be something specific to Swift. We could add
4002 // operations like swift:PutMetadataObject to the Policy Engine.
4003 if (!verify_object_permission_no_policy(s, RGW_PERM_WRITE)) {
4004 return -EACCES;
4005 }
4006
4007 return 0;
4008 }
4009
4010 void RGWPutMetadataObject::pre_exec()
4011 {
4012 rgw_bucket_object_pre_exec(s);
4013 }
4014
4015 void RGWPutMetadataObject::execute()
4016 {
4017 rgw_obj obj(s->bucket, s->object);
4018 map<string, bufferlist> attrs, orig_attrs, rmattrs;
4019
4020 store->set_atomic(s->obj_ctx, obj);
4021
4022 op_ret = get_params();
4023 if (op_ret < 0) {
4024 return;
4025 }
4026
4027 rgw_get_request_metadata(s->cct, s->info, attrs);
4028 /* check if obj exists, read orig attrs */
4029 op_ret = get_obj_attrs(store, s, obj, orig_attrs);
4030 if (op_ret < 0) {
4031 return;
4032 }
4033
4034 /* Check whether the object has expired. Swift API documentation
4035 * stands that we should return 404 Not Found in such case. */
4036 if (need_object_expiration() && object_is_expired(orig_attrs)) {
4037 op_ret = -ENOENT;
4038 return;
4039 }
4040
4041 /* Filter currently existing attributes. */
4042 prepare_add_del_attrs(orig_attrs, attrs, rmattrs);
4043 populate_with_generic_attrs(s, attrs);
4044 encode_delete_at_attr(delete_at, attrs);
4045
4046 if (dlo_manifest) {
4047 op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
4048 if (op_ret < 0) {
4049 ldout(s->cct, 0) << "bad user manifest: " << dlo_manifest << dendl;
4050 return;
4051 }
4052 }
4053
4054 op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, &rmattrs);
4055 }
4056
4057 int RGWDeleteObj::handle_slo_manifest(bufferlist& bl)
4058 {
4059 RGWSLOInfo slo_info;
4060 bufferlist::iterator bliter = bl.begin();
4061 try {
4062 ::decode(slo_info, bliter);
4063 } catch (buffer::error& err) {
4064 ldout(s->cct, 0) << "ERROR: failed to decode slo manifest" << dendl;
4065 return -EIO;
4066 }
4067
4068 try {
4069 deleter = std::unique_ptr<RGWBulkDelete::Deleter>(\
4070 new RGWBulkDelete::Deleter(store, s));
4071 } catch (std::bad_alloc) {
4072 return -ENOMEM;
4073 }
4074
4075 list<RGWBulkDelete::acct_path_t> items;
4076 for (const auto& iter : slo_info.entries) {
4077 const string& path_str = iter.path;
4078
4079 const size_t sep_pos = path_str.find('/', 1 /* skip first slash */);
4080 if (boost::string_view::npos == sep_pos) {
4081 return -EINVAL;
4082 }
4083
4084 RGWBulkDelete::acct_path_t path;
4085
4086 path.bucket_name = url_decode(path_str.substr(1, sep_pos - 1));
4087 path.obj_key = url_decode(path_str.substr(sep_pos + 1));
4088
4089 items.push_back(path);
4090 }
4091
4092 /* Request removal of the manifest object itself. */
4093 RGWBulkDelete::acct_path_t path;
4094 path.bucket_name = s->bucket_name;
4095 path.obj_key = s->object;
4096 items.push_back(path);
4097
4098 int ret = deleter->delete_chunk(items);
4099 if (ret < 0) {
4100 return ret;
4101 }
4102
4103 return 0;
4104 }
4105
4106 int RGWDeleteObj::verify_permission()
4107 {
4108 if (s->iam_policy) {
4109 auto r = s->iam_policy->eval(s->env, *s->auth.identity,
4110 s->object.instance.empty() ?
4111 rgw::IAM::s3DeleteObject :
4112 rgw::IAM::s3DeleteObjectVersion,
4113 ARN(s->bucket, s->object.name));
4114 if (r == Effect::Allow)
4115 return true;
4116 else if (r == Effect::Deny)
4117 return false;
4118 }
4119
4120 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
4121 return -EACCES;
4122 }
4123
4124 return 0;
4125 }
4126
4127 void RGWDeleteObj::pre_exec()
4128 {
4129 rgw_bucket_object_pre_exec(s);
4130 }
4131
4132 void RGWDeleteObj::execute()
4133 {
4134 if (!s->bucket_exists) {
4135 op_ret = -ERR_NO_SUCH_BUCKET;
4136 return;
4137 }
4138
4139 op_ret = get_params();
4140 if (op_ret < 0) {
4141 return;
4142 }
4143
4144 rgw_obj obj(s->bucket, s->object);
4145 map<string, bufferlist> attrs;
4146
4147
4148 if (!s->object.empty()) {
4149 if (need_object_expiration() || multipart_delete) {
4150 /* check if obj exists, read orig attrs */
4151 op_ret = get_obj_attrs(store, s, obj, attrs);
4152 if (op_ret < 0) {
4153 return;
4154 }
4155 }
4156
4157 if (multipart_delete) {
4158 const auto slo_attr = attrs.find(RGW_ATTR_SLO_MANIFEST);
4159
4160 if (slo_attr != attrs.end()) {
4161 op_ret = handle_slo_manifest(slo_attr->second);
4162 if (op_ret < 0) {
4163 ldout(s->cct, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret << dendl;
4164 }
4165 } else {
4166 op_ret = -ERR_NOT_SLO_MANIFEST;
4167 }
4168
4169 return;
4170 }
4171
4172 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
4173 obj_ctx->obj.set_atomic(obj);
4174
4175 bool ver_restored = false;
4176 op_ret = store->swift_versioning_restore(*obj_ctx, s->bucket_owner.get_id(),
4177 s->bucket_info, obj, ver_restored);
4178 if (op_ret < 0) {
4179 return;
4180 }
4181
4182 if (!ver_restored) {
4183 /* Swift's versioning mechanism hasn't found any previous version of
4184 * the object that could be restored. This means we should proceed
4185 * with the regular delete path. */
4186 RGWRados::Object del_target(store, s->bucket_info, *obj_ctx, obj);
4187 RGWRados::Object::Delete del_op(&del_target);
4188
4189 op_ret = get_system_versioning_params(s, &del_op.params.olh_epoch,
4190 &del_op.params.marker_version_id);
4191 if (op_ret < 0) {
4192 return;
4193 }
4194
4195 del_op.params.bucket_owner = s->bucket_owner.get_id();
4196 del_op.params.versioning_status = s->bucket_info.versioning_status();
4197 del_op.params.obj_owner = s->owner;
4198 del_op.params.unmod_since = unmod_since;
4199 del_op.params.high_precision_time = s->system_request; /* system request uses high precision time */
4200
4201 op_ret = del_op.delete_obj();
4202 if (op_ret >= 0) {
4203 delete_marker = del_op.result.delete_marker;
4204 version_id = del_op.result.version_id;
4205 }
4206
4207 /* Check whether the object has expired. Swift API documentation
4208 * stands that we should return 404 Not Found in such case. */
4209 if (need_object_expiration() && object_is_expired(attrs)) {
4210 op_ret = -ENOENT;
4211 return;
4212 }
4213 }
4214
4215 if (op_ret == -ERR_PRECONDITION_FAILED && no_precondition_error) {
4216 op_ret = 0;
4217 }
4218 } else {
4219 op_ret = -EINVAL;
4220 }
4221 }
4222
4223
4224 bool RGWCopyObj::parse_copy_location(const string& url_src, string& bucket_name, rgw_obj_key& key)
4225 {
4226 string name_str;
4227 string params_str;
4228
4229 size_t pos = url_src.find('?');
4230 if (pos == string::npos) {
4231 name_str = url_src;
4232 } else {
4233 name_str = url_src.substr(0, pos);
4234 params_str = url_src.substr(pos + 1);
4235 }
4236
4237 std::string dec_src = url_decode(name_str);
4238 const char *src = dec_src.c_str();
4239
4240 if (*src == '/') ++src;
4241
4242 string str(src);
4243
4244 pos = str.find('/');
4245 if (pos ==string::npos)
4246 return false;
4247
4248 bucket_name = str.substr(0, pos);
4249 key.name = str.substr(pos + 1);
4250
4251 if (key.name.empty()) {
4252 return false;
4253 }
4254
4255 if (!params_str.empty()) {
4256 RGWHTTPArgs args;
4257 args.set(params_str);
4258 args.parse();
4259
4260 key.instance = args.get("versionId", NULL);
4261 }
4262
4263 return true;
4264 }
4265
4266 int RGWCopyObj::verify_permission()
4267 {
4268 RGWAccessControlPolicy src_acl(s->cct);
4269 optional<Policy> src_policy;
4270 op_ret = get_params();
4271 if (op_ret < 0)
4272 return op_ret;
4273
4274 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
4275 if (op_ret < 0) {
4276 return op_ret;
4277 }
4278 map<string, bufferlist> src_attrs;
4279
4280 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
4281
4282 if (s->bucket_instance_id.empty()) {
4283 op_ret = store->get_bucket_info(obj_ctx, src_tenant_name, src_bucket_name, src_bucket_info, NULL, &src_attrs);
4284 } else {
4285 /* will only happen in intra region sync where the source and dest bucket is the same */
4286 op_ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, src_bucket_info, NULL, &src_attrs);
4287 }
4288 if (op_ret < 0) {
4289 if (op_ret == -ENOENT) {
4290 op_ret = -ERR_NO_SUCH_BUCKET;
4291 }
4292 return op_ret;
4293 }
4294
4295 src_bucket = src_bucket_info.bucket;
4296
4297 /* get buckets info (source and dest) */
4298 if (s->local_source && source_zone.empty()) {
4299 rgw_obj src_obj(src_bucket, src_object);
4300 store->set_atomic(s->obj_ctx, src_obj);
4301 store->set_prefetch_data(s->obj_ctx, src_obj);
4302
4303 /* check source object permissions */
4304 op_ret = read_obj_policy(store, s, src_bucket_info, src_attrs, &src_acl,
4305 src_policy, src_bucket, src_object);
4306 if (op_ret < 0) {
4307 return op_ret;
4308 }
4309
4310 /* admin request overrides permission checks */
4311 if (!s->auth.identity->is_admin_of(src_acl.get_owner().get_id())) {
4312 if (src_policy) {
4313 auto e = src_policy->eval(s->env, *s->auth.identity,
4314 src_object.instance.empty() ?
4315 rgw::IAM::s3GetObject :
4316 rgw::IAM::s3GetObjectVersion,
4317 ARN(src_obj));
4318 if (e == Effect::Deny) {
4319 return -EACCES;
4320 } else if (e == Effect::Pass &&
4321 !src_acl.verify_permission(*s->auth.identity, s->perm_mask,
4322 RGW_PERM_READ)) {
4323 return -EACCES;
4324 }
4325 } else if (!src_acl.verify_permission(*s->auth.identity,
4326 s->perm_mask,
4327 RGW_PERM_READ)) {
4328 return -EACCES;
4329 }
4330 }
4331 }
4332
4333 RGWAccessControlPolicy dest_bucket_policy(s->cct);
4334 map<string, bufferlist> dest_attrs;
4335
4336 if (src_bucket_name.compare(dest_bucket_name) == 0) { /* will only happen if s->local_source
4337 or intra region sync */
4338 dest_bucket_info = src_bucket_info;
4339 dest_attrs = src_attrs;
4340 } else {
4341 op_ret = store->get_bucket_info(obj_ctx, dest_tenant_name, dest_bucket_name,
4342 dest_bucket_info, nullptr, &dest_attrs);
4343 if (op_ret < 0) {
4344 if (op_ret == -ENOENT) {
4345 op_ret = -ERR_NO_SUCH_BUCKET;
4346 }
4347 return op_ret;
4348 }
4349 }
4350
4351 dest_bucket = dest_bucket_info.bucket;
4352
4353 rgw_obj dest_obj(dest_bucket, dest_object);
4354 store->set_atomic(s->obj_ctx, dest_obj);
4355
4356 /* check dest bucket permissions */
4357 op_ret = read_bucket_policy(store, s, dest_bucket_info, dest_attrs,
4358 &dest_bucket_policy, dest_bucket);
4359 if (op_ret < 0) {
4360 return op_ret;
4361 }
4362
4363 /* admin request overrides permission checks */
4364 if (! s->auth.identity->is_admin_of(dest_policy.get_owner().get_id()) &&
4365 ! dest_bucket_policy.verify_permission(*s->auth.identity, s->perm_mask,
4366 RGW_PERM_WRITE)) {
4367 return -EACCES;
4368 }
4369
4370 op_ret = init_dest_policy();
4371 if (op_ret < 0) {
4372 return op_ret;
4373 }
4374
4375 return 0;
4376 }
4377
4378
4379 int RGWCopyObj::init_common()
4380 {
4381 if (if_mod) {
4382 if (parse_time(if_mod, &mod_time) < 0) {
4383 op_ret = -EINVAL;
4384 return op_ret;
4385 }
4386 mod_ptr = &mod_time;
4387 }
4388
4389 if (if_unmod) {
4390 if (parse_time(if_unmod, &unmod_time) < 0) {
4391 op_ret = -EINVAL;
4392 return op_ret;
4393 }
4394 unmod_ptr = &unmod_time;
4395 }
4396
4397 bufferlist aclbl;
4398 dest_policy.encode(aclbl);
4399 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
4400
4401 rgw_get_request_metadata(s->cct, s->info, attrs);
4402 populate_with_generic_attrs(s, attrs);
4403
4404 return 0;
4405 }
4406
4407 static void copy_obj_progress_cb(off_t ofs, void *param)
4408 {
4409 RGWCopyObj *op = static_cast<RGWCopyObj *>(param);
4410 op->progress_cb(ofs);
4411 }
4412
4413 void RGWCopyObj::progress_cb(off_t ofs)
4414 {
4415 if (!s->cct->_conf->rgw_copy_obj_progress)
4416 return;
4417
4418 if (ofs - last_ofs < s->cct->_conf->rgw_copy_obj_progress_every_bytes)
4419 return;
4420
4421 send_partial_response(ofs);
4422
4423 last_ofs = ofs;
4424 }
4425
4426 void RGWCopyObj::pre_exec()
4427 {
4428 rgw_bucket_object_pre_exec(s);
4429 }
4430
4431 void RGWCopyObj::execute()
4432 {
4433 if (init_common() < 0)
4434 return;
4435
4436 rgw_obj src_obj(src_bucket, src_object);
4437 rgw_obj dst_obj(dest_bucket, dest_object);
4438
4439 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
4440 obj_ctx.obj.set_atomic(src_obj);
4441 obj_ctx.obj.set_atomic(dst_obj);
4442
4443 encode_delete_at_attr(delete_at, attrs);
4444
4445 bool high_precision_time = (s->system_request);
4446
4447 /* Handle object versioning of Swift API. In case of copying to remote this
4448 * should fail gently (op_ret == 0) as the dst_obj will not exist here. */
4449 op_ret = store->swift_versioning_copy(obj_ctx,
4450 dest_bucket_info.owner,
4451 dest_bucket_info,
4452 dst_obj);
4453 if (op_ret < 0) {
4454 return;
4455 }
4456
4457 op_ret = store->copy_obj(obj_ctx,
4458 s->user->user_id,
4459 client_id,
4460 op_id,
4461 &s->info,
4462 source_zone,
4463 dst_obj,
4464 src_obj,
4465 dest_bucket_info,
4466 src_bucket_info,
4467 &src_mtime,
4468 &mtime,
4469 mod_ptr,
4470 unmod_ptr,
4471 high_precision_time,
4472 if_match,
4473 if_nomatch,
4474 attrs_mod,
4475 copy_if_newer,
4476 attrs, RGW_OBJ_CATEGORY_MAIN,
4477 olh_epoch,
4478 (delete_at ? *delete_at : real_time()),
4479 (version_id.empty() ? NULL : &version_id),
4480 &s->req_id, /* use req_id as tag */
4481 &etag,
4482 copy_obj_progress_cb, (void *)this
4483 );
4484 }
4485
4486 int RGWGetACLs::verify_permission()
4487 {
4488 bool perm;
4489 if (!s->object.empty()) {
4490 perm = verify_object_permission(s,
4491 s->object.instance.empty() ?
4492 rgw::IAM::s3GetObjectAcl :
4493 rgw::IAM::s3GetObjectVersionAcl);
4494 } else {
4495 perm = verify_bucket_permission(s, rgw::IAM::s3GetBucketAcl);
4496 }
4497 if (!perm)
4498 return -EACCES;
4499
4500 return 0;
4501 }
4502
4503 void RGWGetACLs::pre_exec()
4504 {
4505 rgw_bucket_object_pre_exec(s);
4506 }
4507
4508 void RGWGetACLs::execute()
4509 {
4510 stringstream ss;
4511 RGWAccessControlPolicy* const acl = \
4512 (!s->object.empty() ? s->object_acl.get() : s->bucket_acl.get());
4513 RGWAccessControlPolicy_S3* const s3policy = \
4514 static_cast<RGWAccessControlPolicy_S3*>(acl);
4515 s3policy->to_xml(ss);
4516 acls = ss.str();
4517 }
4518
4519
4520
4521 int RGWPutACLs::verify_permission()
4522 {
4523 bool perm;
4524 if (!s->object.empty()) {
4525 perm = verify_object_permission(s,
4526 s->object.instance.empty() ?
4527 rgw::IAM::s3PutObjectAcl :
4528 rgw::IAM::s3PutObjectVersionAcl);
4529 } else {
4530 perm = verify_bucket_permission(s, rgw::IAM::s3PutBucketAcl);
4531 }
4532 if (!perm)
4533 return -EACCES;
4534
4535 return 0;
4536 }
4537
4538 int RGWGetLC::verify_permission()
4539 {
4540 bool perm;
4541 perm = verify_bucket_permission(s, rgw::IAM::s3GetLifecycleConfiguration);
4542 if (!perm)
4543 return -EACCES;
4544
4545 return 0;
4546 }
4547
4548 int RGWPutLC::verify_permission()
4549 {
4550 bool perm;
4551 perm = verify_bucket_permission(s, rgw::IAM::s3PutLifecycleConfiguration);
4552 if (!perm)
4553 return -EACCES;
4554
4555 return 0;
4556 }
4557
4558 int RGWDeleteLC::verify_permission()
4559 {
4560 bool perm;
4561 perm = verify_bucket_permission(s, rgw::IAM::s3PutLifecycleConfiguration);
4562 if (!perm)
4563 return -EACCES;
4564
4565 return 0;
4566 }
4567
4568 void RGWPutACLs::pre_exec()
4569 {
4570 rgw_bucket_object_pre_exec(s);
4571 }
4572
4573 void RGWGetLC::pre_exec()
4574 {
4575 rgw_bucket_object_pre_exec(s);
4576 }
4577
4578 void RGWPutLC::pre_exec()
4579 {
4580 rgw_bucket_object_pre_exec(s);
4581 }
4582
4583 void RGWDeleteLC::pre_exec()
4584 {
4585 rgw_bucket_object_pre_exec(s);
4586 }
4587
4588 void RGWPutACLs::execute()
4589 {
4590 bufferlist bl;
4591
4592 RGWAccessControlPolicy_S3 *policy = NULL;
4593 RGWACLXMLParser_S3 parser(s->cct);
4594 RGWAccessControlPolicy_S3 new_policy(s->cct);
4595 stringstream ss;
4596 char *new_data = NULL;
4597 rgw_obj obj;
4598
4599 op_ret = 0; /* XXX redundant? */
4600
4601 if (!parser.init()) {
4602 op_ret = -EINVAL;
4603 return;
4604 }
4605
4606
4607 RGWAccessControlPolicy* const existing_policy = \
4608 (s->object.empty() ? s->bucket_acl.get() : s->object_acl.get());
4609
4610 owner = existing_policy->get_owner();
4611
4612 op_ret = get_params();
4613 if (op_ret < 0) {
4614 if (op_ret == -ERANGE) {
4615 ldout(s->cct, 4) << "The size of request xml data is larger than the max limitation, data size = "
4616 << s->length << dendl;
4617 op_ret = -ERR_MALFORMED_XML;
4618 s->err.message = "The XML you provided was larger than the maximum " +
4619 std::to_string(s->cct->_conf->rgw_max_put_param_size) +
4620 " bytes allowed.";
4621 }
4622 return;
4623 }
4624
4625 ldout(s->cct, 15) << "read len=" << len << " data=" << (data ? data : "") << dendl;
4626
4627 if (!s->canned_acl.empty() && len) {
4628 op_ret = -EINVAL;
4629 return;
4630 }
4631
4632 if (!s->canned_acl.empty() || s->has_acl_header) {
4633 op_ret = get_policy_from_state(store, s, ss);
4634 if (op_ret < 0)
4635 return;
4636
4637 new_data = strdup(ss.str().c_str());
4638 free(data);
4639 data = new_data;
4640 len = ss.str().size();
4641 }
4642
4643 if (!parser.parse(data, len, 1)) {
4644 op_ret = -EINVAL;
4645 return;
4646 }
4647 policy = static_cast<RGWAccessControlPolicy_S3 *>(parser.find_first("AccessControlPolicy"));
4648 if (!policy) {
4649 op_ret = -EINVAL;
4650 return;
4651 }
4652
4653 const RGWAccessControlList& req_acl = policy->get_acl();
4654 const multimap<string, ACLGrant>& req_grant_map = req_acl.get_grant_map();
4655 #define ACL_GRANTS_MAX_NUM 100
4656 int max_num = s->cct->_conf->rgw_acl_grants_max_num;
4657 if (max_num < 0) {
4658 max_num = ACL_GRANTS_MAX_NUM;
4659 }
4660
4661 int grants_num = req_grant_map.size();
4662 if (grants_num > max_num) {
4663 ldout(s->cct, 4) << "An acl can have up to "
4664 << max_num
4665 << " grants, request acl grants num: "
4666 << grants_num << dendl;
4667 op_ret = -ERR_MALFORMED_ACL_ERROR;
4668 s->err.message = "The request is rejected, because the acl grants number you requested is larger than the maximum "
4669 + std::to_string(max_num)
4670 + " grants allowed in an acl.";
4671 return;
4672 }
4673
4674 // forward bucket acl requests to meta master zone
4675 if (s->object.empty() && !store->is_meta_master()) {
4676 bufferlist in_data;
4677 // include acl data unless it was generated from a canned_acl
4678 if (s->canned_acl.empty()) {
4679 in_data.append(data, len);
4680 }
4681 op_ret = forward_request_to_master(s, NULL, store, in_data, NULL);
4682 if (op_ret < 0) {
4683 ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
4684 return;
4685 }
4686 }
4687
4688 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
4689 ldout(s->cct, 15) << "Old AccessControlPolicy";
4690 policy->to_xml(*_dout);
4691 *_dout << dendl;
4692 }
4693
4694 op_ret = policy->rebuild(store, &owner, new_policy);
4695 if (op_ret < 0)
4696 return;
4697
4698 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
4699 ldout(s->cct, 15) << "New AccessControlPolicy:";
4700 new_policy.to_xml(*_dout);
4701 *_dout << dendl;
4702 }
4703
4704 new_policy.encode(bl);
4705 map<string, bufferlist> attrs;
4706
4707 if (!s->object.empty()) {
4708 obj = rgw_obj(s->bucket, s->object);
4709 store->set_atomic(s->obj_ctx, obj);
4710 //if instance is empty, we should modify the latest object
4711 op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_ACL, bl);
4712 } else {
4713 attrs = s->bucket_attrs;
4714 attrs[RGW_ATTR_ACL] = bl;
4715 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
4716 }
4717 if (op_ret == -ECANCELED) {
4718 op_ret = 0; /* lost a race, but it's ok because acls are immutable */
4719 }
4720 }
4721
4722 static void get_lc_oid(struct req_state *s, string& oid)
4723 {
4724 string shard_id = s->bucket.name + ':' +s->bucket.bucket_id;
4725 int max_objs = (s->cct->_conf->rgw_lc_max_objs > HASH_PRIME)?HASH_PRIME:s->cct->_conf->rgw_lc_max_objs;
4726 int index = ceph_str_hash_linux(shard_id.c_str(), shard_id.size()) % HASH_PRIME % max_objs;
4727 oid = lc_oid_prefix;
4728 char buf[32];
4729 snprintf(buf, 32, ".%d", index);
4730 oid.append(buf);
4731 return;
4732 }
4733
4734 void RGWPutLC::execute()
4735 {
4736 bufferlist bl;
4737
4738 RGWLifecycleConfiguration_S3 *config = NULL;
4739 RGWLCXMLParser_S3 parser(s->cct);
4740 RGWLifecycleConfiguration_S3 new_config(s->cct);
4741
4742 if (!parser.init()) {
4743 op_ret = -EINVAL;
4744 return;
4745 }
4746
4747 op_ret = get_params();
4748 if (op_ret < 0)
4749 return;
4750
4751 ldout(s->cct, 15) << "read len=" << len << " data=" << (data ? data : "") << dendl;
4752
4753 if (!parser.parse(data, len, 1)) {
4754 op_ret = -ERR_MALFORMED_XML;
4755 return;
4756 }
4757 config = static_cast<RGWLifecycleConfiguration_S3 *>(parser.find_first("LifecycleConfiguration"));
4758 if (!config) {
4759 op_ret = -ERR_MALFORMED_XML;
4760 return;
4761 }
4762
4763 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
4764 ldout(s->cct, 15) << "Old LifecycleConfiguration:";
4765 config->to_xml(*_dout);
4766 *_dout << dendl;
4767 }
4768
4769 op_ret = config->rebuild(store, new_config);
4770 if (op_ret < 0)
4771 return;
4772
4773 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
4774 ldout(s->cct, 15) << "New LifecycleConfiguration:";
4775 new_config.to_xml(*_dout);
4776 *_dout << dendl;
4777 }
4778
4779 new_config.encode(bl);
4780 map<string, bufferlist> attrs;
4781 attrs = s->bucket_attrs;
4782 attrs[RGW_ATTR_LC] = bl;
4783 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
4784 if (op_ret < 0)
4785 return;
4786 string shard_id = s->bucket.tenant + ':' + s->bucket.name + ':' + s->bucket.bucket_id;
4787 string oid;
4788 get_lc_oid(s, oid);
4789 pair<string, int> entry(shard_id, lc_uninitial);
4790 int max_lock_secs = s->cct->_conf->rgw_lc_lock_max_time;
4791 rados::cls::lock::Lock l(lc_index_lock_name);
4792 utime_t time(max_lock_secs, 0);
4793 l.set_duration(time);
4794 l.set_cookie(cookie);
4795 librados::IoCtx *ctx = store->get_lc_pool_ctx();
4796 do {
4797 op_ret = l.lock_exclusive(ctx, oid);
4798 if (op_ret == -EBUSY) {
4799 dout(0) << "RGWLC::RGWPutLC() failed to acquire lock on, sleep 5, try again" << oid << dendl;
4800 sleep(5);
4801 continue;
4802 }
4803 if (op_ret < 0) {
4804 dout(0) << "RGWLC::RGWPutLC() failed to acquire lock " << oid << op_ret << dendl;
4805 break;
4806 }
4807 op_ret = cls_rgw_lc_set_entry(*ctx, oid, entry);
4808 if (op_ret < 0) {
4809 dout(0) << "RGWLC::RGWPutLC() failed to set entry " << oid << op_ret << dendl;
4810 }
4811 break;
4812 }while(1);
4813 l.unlock(ctx, oid);
4814 return;
4815 }
4816
4817 void RGWDeleteLC::execute()
4818 {
4819 bufferlist bl;
4820 map<string, bufferlist> orig_attrs, attrs;
4821 map<string, bufferlist>::iterator iter;
4822 rgw_raw_obj obj;
4823 store->get_bucket_instance_obj(s->bucket, obj);
4824 store->set_prefetch_data(s->obj_ctx, obj);
4825 op_ret = get_system_obj_attrs(store, s, obj, orig_attrs, NULL, &s->bucket_info.objv_tracker);
4826 if (op_ret < 0)
4827 return;
4828
4829 for (iter = orig_attrs.begin(); iter != orig_attrs.end(); ++iter) {
4830 const string& name = iter->first;
4831 dout(10) << "DeleteLC : attr: " << name << dendl;
4832 if (name.compare(0, (sizeof(RGW_ATTR_LC) - 1), RGW_ATTR_LC) != 0) {
4833 if (attrs.find(name) == attrs.end()) {
4834 attrs[name] = iter->second;
4835 }
4836 }
4837 }
4838 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
4839 string shard_id = s->bucket.name + ':' +s->bucket.bucket_id;
4840 pair<string, int> entry(shard_id, lc_uninitial);
4841 string oid;
4842 get_lc_oid(s, oid);
4843 int max_lock_secs = s->cct->_conf->rgw_lc_lock_max_time;
4844 librados::IoCtx *ctx = store->get_lc_pool_ctx();
4845 rados::cls::lock::Lock l(lc_index_lock_name);
4846 utime_t time(max_lock_secs, 0);
4847 l.set_duration(time);
4848 do {
4849 op_ret = l.lock_exclusive(ctx, oid);
4850 if (op_ret == -EBUSY) {
4851 dout(0) << "RGWLC::RGWDeleteLC() failed to acquire lock on, sleep 5, try again" << oid << dendl;
4852 sleep(5);
4853 continue;
4854 }
4855 if (op_ret < 0) {
4856 dout(0) << "RGWLC::RGWDeleteLC() failed to acquire lock " << oid << op_ret << dendl;
4857 break;
4858 }
4859 op_ret = cls_rgw_lc_rm_entry(*ctx, oid, entry);
4860 if (op_ret < 0) {
4861 dout(0) << "RGWLC::RGWDeleteLC() failed to set entry " << oid << op_ret << dendl;
4862 }
4863 break;
4864 }while(1);
4865 l.unlock(ctx, oid);
4866 return;
4867 }
4868
4869 int RGWGetCORS::verify_permission()
4870 {
4871 if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
4872 return -EACCES;
4873 }
4874
4875 return 0;
4876 }
4877
4878 void RGWGetCORS::execute()
4879 {
4880 op_ret = read_bucket_cors();
4881 if (op_ret < 0)
4882 return ;
4883
4884 if (!cors_exist) {
4885 dout(2) << "No CORS configuration set yet for this bucket" << dendl;
4886 op_ret = -ENOENT;
4887 return;
4888 }
4889 }
4890
4891 int RGWPutCORS::verify_permission()
4892 {
4893 if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
4894 return -EACCES;
4895 }
4896
4897 return 0;
4898 }
4899
4900 void RGWPutCORS::execute()
4901 {
4902 rgw_raw_obj obj;
4903
4904 op_ret = get_params();
4905 if (op_ret < 0)
4906 return;
4907
4908 if (!store->is_meta_master()) {
4909 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
4910 if (op_ret < 0) {
4911 ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
4912 return;
4913 }
4914 }
4915
4916 map<string, bufferlist> attrs = s->bucket_attrs;
4917 attrs[RGW_ATTR_CORS] = cors_bl;
4918 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
4919 }
4920
4921 int RGWDeleteCORS::verify_permission()
4922 {
4923 if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
4924 return -EACCES;
4925 }
4926
4927 return 0;
4928 }
4929
4930 void RGWDeleteCORS::execute()
4931 {
4932 op_ret = read_bucket_cors();
4933 if (op_ret < 0)
4934 return;
4935
4936 bufferlist bl;
4937 rgw_raw_obj obj;
4938 if (!cors_exist) {
4939 dout(2) << "No CORS configuration set yet for this bucket" << dendl;
4940 op_ret = -ENOENT;
4941 return;
4942 }
4943 store->get_bucket_instance_obj(s->bucket, obj);
4944 store->set_prefetch_data(s->obj_ctx, obj);
4945 map<string, bufferlist> orig_attrs, attrs, rmattrs;
4946 map<string, bufferlist>::iterator iter;
4947
4948 op_ret = get_system_obj_attrs(store, s, obj, orig_attrs, NULL, &s->bucket_info.objv_tracker);
4949 if (op_ret < 0)
4950 return;
4951
4952 /* only remove meta attrs */
4953 for (iter = orig_attrs.begin(); iter != orig_attrs.end(); ++iter) {
4954 const string& name = iter->first;
4955 dout(10) << "DeleteCORS : attr: " << name << dendl;
4956 if (name.compare(0, (sizeof(RGW_ATTR_CORS) - 1), RGW_ATTR_CORS) == 0) {
4957 rmattrs[name] = iter->second;
4958 } else if (attrs.find(name) == attrs.end()) {
4959 attrs[name] = iter->second;
4960 }
4961 }
4962 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
4963 }
4964
4965 void RGWOptionsCORS::get_response_params(string& hdrs, string& exp_hdrs, unsigned *max_age) {
4966 get_cors_response_headers(rule, req_hdrs, hdrs, exp_hdrs, max_age);
4967 }
4968
4969 int RGWOptionsCORS::validate_cors_request(RGWCORSConfiguration *cc) {
4970 rule = cc->host_name_rule(origin);
4971 if (!rule) {
4972 dout(10) << "There is no cors rule present for " << origin << dendl;
4973 return -ENOENT;
4974 }
4975
4976 if (!validate_cors_rule_method(rule, req_meth)) {
4977 return -ENOENT;
4978 }
4979 return 0;
4980 }
4981
4982 void RGWOptionsCORS::execute()
4983 {
4984 op_ret = read_bucket_cors();
4985 if (op_ret < 0)
4986 return;
4987
4988 origin = s->info.env->get("HTTP_ORIGIN");
4989 if (!origin) {
4990 dout(0) <<
4991 "Preflight request without mandatory Origin header"
4992 << dendl;
4993 op_ret = -EINVAL;
4994 return;
4995 }
4996 req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD");
4997 if (!req_meth) {
4998 dout(0) <<
4999 "Preflight request without mandatory Access-control-request-method header"
5000 << dendl;
5001 op_ret = -EINVAL;
5002 return;
5003 }
5004 if (!cors_exist) {
5005 dout(2) << "No CORS configuration set yet for this bucket" << dendl;
5006 op_ret = -ENOENT;
5007 return;
5008 }
5009 req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS");
5010 op_ret = validate_cors_request(&bucket_cors);
5011 if (!rule) {
5012 origin = req_meth = NULL;
5013 return;
5014 }
5015 return;
5016 }
5017
5018 int RGWGetRequestPayment::verify_permission()
5019 {
5020 return 0;
5021 }
5022
5023 void RGWGetRequestPayment::pre_exec()
5024 {
5025 rgw_bucket_object_pre_exec(s);
5026 }
5027
5028 void RGWGetRequestPayment::execute()
5029 {
5030 requester_pays = s->bucket_info.requester_pays;
5031 }
5032
5033 int RGWSetRequestPayment::verify_permission()
5034 {
5035 if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
5036 return -EACCES;
5037 }
5038
5039 return 0;
5040 }
5041
5042 void RGWSetRequestPayment::pre_exec()
5043 {
5044 rgw_bucket_object_pre_exec(s);
5045 }
5046
5047 void RGWSetRequestPayment::execute()
5048 {
5049 op_ret = get_params();
5050
5051 if (op_ret < 0)
5052 return;
5053
5054 s->bucket_info.requester_pays = requester_pays;
5055 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(),
5056 &s->bucket_attrs);
5057 if (op_ret < 0) {
5058 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
5059 << " returned err=" << op_ret << dendl;
5060 return;
5061 }
5062 }
5063
5064 int RGWInitMultipart::verify_permission()
5065 {
5066 if (s->iam_policy) {
5067 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
5068 rgw::IAM::s3PutObject,
5069 rgw_obj(s->bucket, s->object));
5070 if (e == Effect::Allow) {
5071 return 0;
5072 } else if (e == Effect::Deny) {
5073 return -EACCES;
5074 }
5075 }
5076
5077 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
5078 return -EACCES;
5079 }
5080
5081 return 0;
5082 }
5083
5084 void RGWInitMultipart::pre_exec()
5085 {
5086 rgw_bucket_object_pre_exec(s);
5087 }
5088
5089 void RGWInitMultipart::execute()
5090 {
5091 bufferlist aclbl;
5092 map<string, bufferlist> attrs;
5093 rgw_obj obj;
5094
5095 if (get_params() < 0)
5096 return;
5097
5098 if (s->object.empty())
5099 return;
5100
5101 policy.encode(aclbl);
5102 attrs[RGW_ATTR_ACL] = aclbl;
5103
5104 populate_with_generic_attrs(s, attrs);
5105
5106 /* select encryption mode */
5107 op_ret = prepare_encryption(attrs);
5108 if (op_ret != 0)
5109 return;
5110
5111 rgw_get_request_metadata(s->cct, s->info, attrs);
5112
5113 do {
5114 char buf[33];
5115 gen_rand_alphanumeric(s->cct, buf, sizeof(buf) - 1);
5116 upload_id = MULTIPART_UPLOAD_ID_PREFIX; /* v2 upload id */
5117 upload_id.append(buf);
5118
5119 string tmp_obj_name;
5120 RGWMPObj mp(s->object.name, upload_id);
5121 tmp_obj_name = mp.get_meta();
5122
5123 obj.init_ns(s->bucket, tmp_obj_name, mp_ns);
5124 // the meta object will be indexed with 0 size, we c
5125 obj.set_in_extra_data(true);
5126 obj.index_hash_source = s->object.name;
5127
5128 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
5129 op_target.set_versioning_disabled(true); /* no versioning for multipart meta */
5130
5131 RGWRados::Object::Write obj_op(&op_target);
5132
5133 obj_op.meta.owner = s->owner.get_id();
5134 obj_op.meta.category = RGW_OBJ_CATEGORY_MULTIMETA;
5135 obj_op.meta.flags = PUT_OBJ_CREATE_EXCL;
5136
5137 op_ret = obj_op.write_meta(0, 0, attrs);
5138 } while (op_ret == -EEXIST);
5139 }
5140
5141 static int get_multipart_info(RGWRados *store, struct req_state *s,
5142 string& meta_oid,
5143 RGWAccessControlPolicy *policy,
5144 map<string, bufferlist>& attrs)
5145 {
5146 map<string, bufferlist>::iterator iter;
5147 bufferlist header;
5148
5149 rgw_obj obj;
5150 obj.init_ns(s->bucket, meta_oid, mp_ns);
5151 obj.set_in_extra_data(true);
5152
5153 int op_ret = get_obj_attrs(store, s, obj, attrs);
5154 if (op_ret < 0) {
5155 if (op_ret == -ENOENT) {
5156 return -ERR_NO_SUCH_UPLOAD;
5157 }
5158 return op_ret;
5159 }
5160
5161 if (policy) {
5162 for (iter = attrs.begin(); iter != attrs.end(); ++iter) {
5163 string name = iter->first;
5164 if (name.compare(RGW_ATTR_ACL) == 0) {
5165 bufferlist& bl = iter->second;
5166 bufferlist::iterator bli = bl.begin();
5167 try {
5168 ::decode(*policy, bli);
5169 } catch (buffer::error& err) {
5170 ldout(s->cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
5171 return -EIO;
5172 }
5173 break;
5174 }
5175 }
5176 }
5177
5178 return 0;
5179 }
5180
5181 int RGWCompleteMultipart::verify_permission()
5182 {
5183 if (s->iam_policy) {
5184 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
5185 rgw::IAM::s3PutObject,
5186 rgw_obj(s->bucket, s->object));
5187 if (e == Effect::Allow) {
5188 return 0;
5189 } else if (e == Effect::Deny) {
5190 return -EACCES;
5191 }
5192 }
5193
5194 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
5195 return -EACCES;
5196 }
5197
5198 return 0;
5199 }
5200
5201 void RGWCompleteMultipart::pre_exec()
5202 {
5203 rgw_bucket_object_pre_exec(s);
5204 }
5205
5206 void RGWCompleteMultipart::execute()
5207 {
5208 RGWMultiCompleteUpload *parts;
5209 map<int, string>::iterator iter;
5210 RGWMultiXMLParser parser;
5211 string meta_oid;
5212 map<uint32_t, RGWUploadPartInfo> obj_parts;
5213 map<uint32_t, RGWUploadPartInfo>::iterator obj_iter;
5214 map<string, bufferlist> attrs;
5215 off_t ofs = 0;
5216 MD5 hash;
5217 char final_etag[CEPH_CRYPTO_MD5_DIGESTSIZE];
5218 char final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 16];
5219 bufferlist etag_bl;
5220 rgw_obj meta_obj;
5221 rgw_obj target_obj;
5222 RGWMPObj mp;
5223 RGWObjManifest manifest;
5224 uint64_t olh_epoch = 0;
5225 string version_id;
5226
5227 op_ret = get_params();
5228 if (op_ret < 0)
5229 return;
5230 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
5231 if (op_ret < 0) {
5232 return;
5233 }
5234
5235 if (!data || !len) {
5236 op_ret = -ERR_MALFORMED_XML;
5237 return;
5238 }
5239
5240 if (!parser.init()) {
5241 op_ret = -EIO;
5242 return;
5243 }
5244
5245 if (!parser.parse(data, len, 1)) {
5246 op_ret = -ERR_MALFORMED_XML;
5247 return;
5248 }
5249
5250 parts = static_cast<RGWMultiCompleteUpload *>(parser.find_first("CompleteMultipartUpload"));
5251 if (!parts || parts->parts.empty()) {
5252 op_ret = -ERR_MALFORMED_XML;
5253 return;
5254 }
5255
5256 if ((int)parts->parts.size() >
5257 s->cct->_conf->rgw_multipart_part_upload_limit) {
5258 op_ret = -ERANGE;
5259 return;
5260 }
5261
5262 mp.init(s->object.name, upload_id);
5263 meta_oid = mp.get_meta();
5264
5265 int total_parts = 0;
5266 int handled_parts = 0;
5267 int max_parts = 1000;
5268 int marker = 0;
5269 bool truncated;
5270 RGWCompressionInfo cs_info;
5271 bool compressed = false;
5272 uint64_t accounted_size = 0;
5273
5274 uint64_t min_part_size = s->cct->_conf->rgw_multipart_min_part_size;
5275
5276 list<rgw_obj_index_key> remove_objs; /* objects to be removed from index listing */
5277
5278 bool versioned_object = s->bucket_info.versioning_enabled();
5279
5280 iter = parts->parts.begin();
5281
5282 meta_obj.init_ns(s->bucket, meta_oid, mp_ns);
5283 meta_obj.set_in_extra_data(true);
5284 meta_obj.index_hash_source = s->object.name;
5285
5286 /*take a cls lock on meta_obj to prevent racing completions (or retries)
5287 from deleting the parts*/
5288 rgw_pool meta_pool;
5289 rgw_raw_obj raw_obj;
5290 librados::ObjectWriteOperation op;
5291 librados::IoCtx ioctx;
5292 rados::cls::lock::Lock l("RGWCompleteMultipart");
5293 int max_lock_secs_mp = s->cct->_conf->get_val<int64_t>("rgw_mp_lock_max_time");
5294
5295 op.assert_exists();
5296 store->obj_to_raw((s->bucket_info).placement_rule, meta_obj, &raw_obj);
5297 store->get_obj_data_pool((s->bucket_info).placement_rule,meta_obj,&meta_pool);
5298 store->open_pool_ctx(meta_pool, ioctx);
5299
5300 const string raw_meta_oid = raw_obj.oid;
5301 utime_t time(max_lock_secs_mp, 0);
5302 l.set_duration(time);
5303 l.lock_exclusive(&op);
5304 op_ret = ioctx.operate(raw_meta_oid, &op);
5305
5306 if (op_ret < 0) {
5307 dout(0) << "RGWCompleteMultipart::execute() failed to acquire lock " << dendl;
5308 op_ret = -ERR_INTERNAL_ERROR;
5309 s->err.message = "This multipart completion is already in progress";
5310 return;
5311 }
5312
5313 op_ret = get_obj_attrs(store, s, meta_obj, attrs);
5314
5315 if (op_ret < 0) {
5316 ldout(s->cct, 0) << "ERROR: failed to get obj attrs, obj=" << meta_obj
5317 << " ret=" << op_ret << dendl;
5318 return;
5319 }
5320
5321 do {
5322 op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts,
5323 marker, obj_parts, &marker, &truncated);
5324 if (op_ret == -ENOENT) {
5325 op_ret = -ERR_NO_SUCH_UPLOAD;
5326 }
5327 if (op_ret < 0)
5328 return;
5329
5330 total_parts += obj_parts.size();
5331 if (!truncated && total_parts != (int)parts->parts.size()) {
5332 ldout(s->cct, 0) << "NOTICE: total parts mismatch: have: " << total_parts
5333 << " expected: " << parts->parts.size() << dendl;
5334 op_ret = -ERR_INVALID_PART;
5335 return;
5336 }
5337
5338 for (obj_iter = obj_parts.begin(); iter != parts->parts.end() && obj_iter != obj_parts.end(); ++iter, ++obj_iter, ++handled_parts) {
5339 uint64_t part_size = obj_iter->second.accounted_size;
5340 if (handled_parts < (int)parts->parts.size() - 1 &&
5341 part_size < min_part_size) {
5342 op_ret = -ERR_TOO_SMALL;
5343 return;
5344 }
5345
5346 char petag[CEPH_CRYPTO_MD5_DIGESTSIZE];
5347 if (iter->first != (int)obj_iter->first) {
5348 ldout(s->cct, 0) << "NOTICE: parts num mismatch: next requested: "
5349 << iter->first << " next uploaded: "
5350 << obj_iter->first << dendl;
5351 op_ret = -ERR_INVALID_PART;
5352 return;
5353 }
5354 string part_etag = rgw_string_unquote(iter->second);
5355 if (part_etag.compare(obj_iter->second.etag) != 0) {
5356 ldout(s->cct, 0) << "NOTICE: etag mismatch: part: " << iter->first
5357 << " etag: " << iter->second << dendl;
5358 op_ret = -ERR_INVALID_PART;
5359 return;
5360 }
5361
5362 hex_to_buf(obj_iter->second.etag.c_str(), petag,
5363 CEPH_CRYPTO_MD5_DIGESTSIZE);
5364 hash.Update((const byte *)petag, sizeof(petag));
5365
5366 RGWUploadPartInfo& obj_part = obj_iter->second;
5367
5368 /* update manifest for part */
5369 string oid = mp.get_part(obj_iter->second.num);
5370 rgw_obj src_obj;
5371 src_obj.init_ns(s->bucket, oid, mp_ns);
5372
5373 if (obj_part.manifest.empty()) {
5374 ldout(s->cct, 0) << "ERROR: empty manifest for object part: obj="
5375 << src_obj << dendl;
5376 op_ret = -ERR_INVALID_PART;
5377 return;
5378 } else {
5379 manifest.append(obj_part.manifest, store);
5380 }
5381
5382 if (obj_part.cs_info.compression_type != "none") {
5383 if (compressed && cs_info.compression_type != obj_part.cs_info.compression_type) {
5384 ldout(s->cct, 0) << "ERROR: compression type was changed during multipart upload ("
5385 << cs_info.compression_type << ">>" << obj_part.cs_info.compression_type << ")" << dendl;
5386 op_ret = -ERR_INVALID_PART;
5387 return;
5388 }
5389 int64_t new_ofs; // offset in compression data for new part
5390 if (cs_info.blocks.size() > 0)
5391 new_ofs = cs_info.blocks.back().new_ofs + cs_info.blocks.back().len;
5392 else
5393 new_ofs = 0;
5394 for (const auto& block : obj_part.cs_info.blocks) {
5395 compression_block cb;
5396 cb.old_ofs = block.old_ofs + cs_info.orig_size;
5397 cb.new_ofs = new_ofs;
5398 cb.len = block.len;
5399 cs_info.blocks.push_back(cb);
5400 new_ofs = cb.new_ofs + cb.len;
5401 }
5402 if (!compressed)
5403 cs_info.compression_type = obj_part.cs_info.compression_type;
5404 cs_info.orig_size += obj_part.cs_info.orig_size;
5405 compressed = true;
5406 }
5407
5408 rgw_obj_index_key remove_key;
5409 src_obj.key.get_index_key(&remove_key);
5410
5411 remove_objs.push_back(remove_key);
5412
5413 ofs += obj_part.size;
5414 accounted_size += obj_part.accounted_size;
5415 }
5416 } while (truncated);
5417 hash.Final((byte *)final_etag);
5418
5419 buf_to_hex((unsigned char *)final_etag, sizeof(final_etag), final_etag_str);
5420 snprintf(&final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2], sizeof(final_etag_str) - CEPH_CRYPTO_MD5_DIGESTSIZE * 2,
5421 "-%lld", (long long)parts->parts.size());
5422 etag = final_etag_str;
5423 ldout(s->cct, 10) << "calculated etag: " << final_etag_str << dendl;
5424
5425 etag_bl.append(final_etag_str, strlen(final_etag_str) + 1);
5426
5427 attrs[RGW_ATTR_ETAG] = etag_bl;
5428
5429 if (compressed) {
5430 // write compression attribute to full object
5431 bufferlist tmp;
5432 ::encode(cs_info, tmp);
5433 attrs[RGW_ATTR_COMPRESSION] = tmp;
5434 }
5435
5436 target_obj.init(s->bucket, s->object.name);
5437 if (versioned_object) {
5438 store->gen_rand_obj_instance_name(&target_obj);
5439 }
5440
5441 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
5442
5443 obj_ctx.obj.set_atomic(target_obj);
5444
5445 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), target_obj);
5446 RGWRados::Object::Write obj_op(&op_target);
5447
5448 obj_op.meta.manifest = &manifest;
5449 obj_op.meta.remove_objs = &remove_objs;
5450
5451 obj_op.meta.ptag = &s->req_id; /* use req_id as operation tag */
5452 obj_op.meta.owner = s->owner.get_id();
5453 obj_op.meta.flags = PUT_OBJ_CREATE;
5454 obj_op.meta.modify_tail = true;
5455 op_ret = obj_op.write_meta(ofs, accounted_size, attrs);
5456 if (op_ret < 0)
5457 return;
5458
5459 // remove the upload obj
5460 int r = store->delete_obj(*static_cast<RGWObjectCtx *>(s->obj_ctx),
5461 s->bucket_info, meta_obj, 0);
5462 if (r < 0) {
5463 ldout(store->ctx(), 0) << "WARNING: failed to remove object " << meta_obj << dendl;
5464 r = l.unlock(&ioctx, raw_meta_oid);
5465 if (r < 0) {
5466 ldout(store->ctx(), 0) << "WARNING: failed to unlock " << raw_meta_oid << dendl;
5467 }
5468 }
5469 }
5470
5471 int RGWAbortMultipart::verify_permission()
5472 {
5473 if (s->iam_policy) {
5474 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
5475 rgw::IAM::s3AbortMultipartUpload,
5476 rgw_obj(s->bucket, s->object));
5477 if (e == Effect::Allow) {
5478 return 0;
5479 } else if (e == Effect::Deny) {
5480 return -EACCES;
5481 }
5482 }
5483
5484 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
5485 return -EACCES;
5486 }
5487
5488 return 0;
5489 }
5490
5491 void RGWAbortMultipart::pre_exec()
5492 {
5493 rgw_bucket_object_pre_exec(s);
5494 }
5495
5496 void RGWAbortMultipart::execute()
5497 {
5498 op_ret = -EINVAL;
5499 string upload_id;
5500 string meta_oid;
5501 upload_id = s->info.args.get("uploadId");
5502 map<string, bufferlist> attrs;
5503 rgw_obj meta_obj;
5504 RGWMPObj mp;
5505
5506 if (upload_id.empty() || s->object.empty())
5507 return;
5508
5509 mp.init(s->object.name, upload_id);
5510 meta_oid = mp.get_meta();
5511
5512 op_ret = get_multipart_info(store, s, meta_oid, NULL, attrs);
5513 if (op_ret < 0)
5514 return;
5515
5516 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
5517 op_ret = abort_multipart_upload(store, s->cct, obj_ctx, s->bucket_info, mp);
5518 }
5519
5520 int RGWListMultipart::verify_permission()
5521 {
5522 if (!verify_object_permission(s, rgw::IAM::s3ListMultipartUploadParts))
5523 return -EACCES;
5524
5525 return 0;
5526 }
5527
5528 void RGWListMultipart::pre_exec()
5529 {
5530 rgw_bucket_object_pre_exec(s);
5531 }
5532
5533 void RGWListMultipart::execute()
5534 {
5535 map<string, bufferlist> xattrs;
5536 string meta_oid;
5537 RGWMPObj mp;
5538
5539 op_ret = get_params();
5540 if (op_ret < 0)
5541 return;
5542
5543 mp.init(s->object.name, upload_id);
5544 meta_oid = mp.get_meta();
5545
5546 op_ret = get_multipart_info(store, s, meta_oid, &policy, xattrs);
5547 if (op_ret < 0)
5548 return;
5549
5550 op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts,
5551 marker, parts, NULL, &truncated);
5552 }
5553
5554 int RGWListBucketMultiparts::verify_permission()
5555 {
5556 if (!verify_bucket_permission(s,
5557 rgw::IAM::s3ListBucketMultiPartUploads))
5558 return -EACCES;
5559
5560 return 0;
5561 }
5562
5563 void RGWListBucketMultiparts::pre_exec()
5564 {
5565 rgw_bucket_object_pre_exec(s);
5566 }
5567
5568 void RGWListBucketMultiparts::execute()
5569 {
5570 vector<rgw_bucket_dir_entry> objs;
5571 string marker_meta;
5572
5573 op_ret = get_params();
5574 if (op_ret < 0)
5575 return;
5576
5577 if (s->prot_flags & RGW_REST_SWIFT) {
5578 string path_args;
5579 path_args = s->info.args.get("path");
5580 if (!path_args.empty()) {
5581 if (!delimiter.empty() || !prefix.empty()) {
5582 op_ret = -EINVAL;
5583 return;
5584 }
5585 prefix = path_args;
5586 delimiter="/";
5587 }
5588 }
5589 marker_meta = marker.get_meta();
5590
5591 op_ret = list_bucket_multiparts(store, s->bucket_info, prefix, marker_meta, delimiter,
5592 max_uploads, &objs, &common_prefixes, &is_truncated);
5593 if (op_ret < 0) {
5594 return;
5595 }
5596
5597 if (!objs.empty()) {
5598 vector<rgw_bucket_dir_entry>::iterator iter;
5599 RGWMultipartUploadEntry entry;
5600 for (iter = objs.begin(); iter != objs.end(); ++iter) {
5601 rgw_obj_key key(iter->key);
5602 if (!entry.mp.from_meta(key.name))
5603 continue;
5604 entry.obj = *iter;
5605 uploads.push_back(entry);
5606 }
5607 next_marker = entry;
5608 }
5609 }
5610
5611 void RGWGetHealthCheck::execute()
5612 {
5613 if (!g_conf->rgw_healthcheck_disabling_path.empty() &&
5614 (::access(g_conf->rgw_healthcheck_disabling_path.c_str(), F_OK) == 0)) {
5615 /* Disabling path specified & existent in the filesystem. */
5616 op_ret = -ERR_SERVICE_UNAVAILABLE; /* 503 */
5617 } else {
5618 op_ret = 0; /* 200 OK */
5619 }
5620 }
5621
5622 int RGWDeleteMultiObj::verify_permission()
5623 {
5624 acl_allowed = verify_bucket_permission_no_policy(s, RGW_PERM_WRITE);
5625 if (!acl_allowed && !s->iam_policy)
5626 return -EACCES;
5627
5628 return 0;
5629 }
5630
5631 void RGWDeleteMultiObj::pre_exec()
5632 {
5633 rgw_bucket_object_pre_exec(s);
5634 }
5635
5636 void RGWDeleteMultiObj::execute()
5637 {
5638 RGWMultiDelDelete *multi_delete;
5639 vector<rgw_obj_key>::iterator iter;
5640 RGWMultiDelXMLParser parser;
5641 int num_processed = 0;
5642 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
5643
5644 op_ret = get_params();
5645 if (op_ret < 0) {
5646 goto error;
5647 }
5648
5649 if (!data) {
5650 op_ret = -EINVAL;
5651 goto error;
5652 }
5653
5654 if (!parser.init()) {
5655 op_ret = -EINVAL;
5656 goto error;
5657 }
5658
5659 if (!parser.parse(data, len, 1)) {
5660 op_ret = -EINVAL;
5661 goto error;
5662 }
5663
5664 multi_delete = static_cast<RGWMultiDelDelete *>(parser.find_first("Delete"));
5665 if (!multi_delete) {
5666 op_ret = -EINVAL;
5667 goto error;
5668 }
5669
5670 if (multi_delete->is_quiet())
5671 quiet = true;
5672
5673 begin_response();
5674 if (multi_delete->objects.empty()) {
5675 goto done;
5676 }
5677
5678 for (iter = multi_delete->objects.begin();
5679 iter != multi_delete->objects.end() && num_processed < max_to_delete;
5680 ++iter, num_processed++) {
5681 rgw_obj obj(bucket, *iter);
5682 if (s->iam_policy) {
5683 auto e = s->iam_policy->eval(s->env,
5684 *s->auth.identity,
5685 iter->instance.empty() ?
5686 rgw::IAM::s3DeleteObject :
5687 rgw::IAM::s3DeleteObjectVersion,
5688 obj);
5689 if ((e == Effect::Deny) ||
5690 (e == Effect::Pass && !acl_allowed)) {
5691 send_partial_response(*iter, false, "", -EACCES);
5692 continue;
5693 }
5694 }
5695
5696 obj_ctx->obj.set_atomic(obj);
5697
5698 RGWRados::Object del_target(store, s->bucket_info, *obj_ctx, obj);
5699 RGWRados::Object::Delete del_op(&del_target);
5700
5701 del_op.params.bucket_owner = s->bucket_owner.get_id();
5702 del_op.params.versioning_status = s->bucket_info.versioning_status();
5703 del_op.params.obj_owner = s->owner;
5704
5705 op_ret = del_op.delete_obj();
5706 if (op_ret == -ENOENT) {
5707 op_ret = 0;
5708 }
5709
5710 send_partial_response(*iter, del_op.result.delete_marker,
5711 del_op.result.version_id, op_ret);
5712 }
5713
5714 /* set the return code to zero, errors at this point will be
5715 dumped to the response */
5716 op_ret = 0;
5717
5718 done:
5719 // will likely segfault if begin_response() has not been called
5720 end_response();
5721 free(data);
5722 return;
5723
5724 error:
5725 send_status();
5726 free(data);
5727 return;
5728
5729 }
5730
5731 bool RGWBulkDelete::Deleter::verify_permission(RGWBucketInfo& binfo,
5732 map<string, bufferlist>& battrs,
5733 ACLOwner& bucket_owner /* out */)
5734 {
5735 RGWAccessControlPolicy bacl(store->ctx());
5736 int ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket);
5737 if (ret < 0) {
5738 return false;
5739 }
5740
5741 auto policy = get_iam_policy_from_attr(s->cct, store, battrs, binfo.bucket.tenant);
5742
5743 bucket_owner = bacl.get_owner();
5744
5745 /* We can use global user_acl because each BulkDelete request is allowed
5746 * to work on entities from a single account only. */
5747 return verify_bucket_permission(s, binfo.bucket, s->user_acl.get(),
5748 &bacl, policy, rgw::IAM::s3DeleteBucket);
5749 }
5750
5751 bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path)
5752 {
5753 auto& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
5754
5755 RGWBucketInfo binfo;
5756 map<string, bufferlist> battrs;
5757 ACLOwner bowner;
5758
5759 int ret = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
5760 path.bucket_name, binfo, nullptr,
5761 &battrs);
5762 if (ret < 0) {
5763 goto binfo_fail;
5764 }
5765
5766 if (!verify_permission(binfo, battrs, bowner)) {
5767 ret = -EACCES;
5768 goto auth_fail;
5769 }
5770
5771 if (!path.obj_key.empty()) {
5772 rgw_obj obj(binfo.bucket, path.obj_key);
5773 obj_ctx.obj.set_atomic(obj);
5774
5775 RGWRados::Object del_target(store, binfo, obj_ctx, obj);
5776 RGWRados::Object::Delete del_op(&del_target);
5777
5778 del_op.params.bucket_owner = binfo.owner;
5779 del_op.params.versioning_status = binfo.versioning_status();
5780 del_op.params.obj_owner = bowner;
5781
5782 ret = del_op.delete_obj();
5783 if (ret < 0) {
5784 goto delop_fail;
5785 }
5786 } else {
5787 RGWObjVersionTracker ot;
5788 ot.read_version = binfo.ep_objv;
5789
5790 ret = store->delete_bucket(binfo, ot);
5791 if (0 == ret) {
5792 ret = rgw_unlink_bucket(store, binfo.owner, binfo.bucket.tenant,
5793 binfo.bucket.name, false);
5794 if (ret < 0) {
5795 ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << ret
5796 << dendl;
5797 }
5798 }
5799 if (ret < 0) {
5800 goto delop_fail;
5801 }
5802
5803 if (!store->is_meta_master()) {
5804 bufferlist in_data;
5805 ret = forward_request_to_master(s, &ot.read_version, store, in_data,
5806 nullptr);
5807 if (ret < 0) {
5808 if (ret == -ENOENT) {
5809 /* adjust error, we want to return with NoSuchBucket and not
5810 * NoSuchKey */
5811 ret = -ERR_NO_SUCH_BUCKET;
5812 }
5813 goto delop_fail;
5814 }
5815 }
5816 }
5817
5818 num_deleted++;
5819 return true;
5820
5821
5822 binfo_fail:
5823 if (-ENOENT == ret) {
5824 ldout(store->ctx(), 20) << "cannot find bucket = " << path.bucket_name << dendl;
5825 num_unfound++;
5826 } else {
5827 ldout(store->ctx(), 20) << "cannot get bucket info, ret = " << ret
5828 << dendl;
5829
5830 fail_desc_t failed_item = {
5831 .err = ret,
5832 .path = path
5833 };
5834 failures.push_back(failed_item);
5835 }
5836 return false;
5837
5838 auth_fail:
5839 ldout(store->ctx(), 20) << "wrong auth for " << path << dendl;
5840 {
5841 fail_desc_t failed_item = {
5842 .err = ret,
5843 .path = path
5844 };
5845 failures.push_back(failed_item);
5846 }
5847 return false;
5848
5849 delop_fail:
5850 if (-ENOENT == ret) {
5851 ldout(store->ctx(), 20) << "cannot find entry " << path << dendl;
5852 num_unfound++;
5853 } else {
5854 fail_desc_t failed_item = {
5855 .err = ret,
5856 .path = path
5857 };
5858 failures.push_back(failed_item);
5859 }
5860 return false;
5861 }
5862
5863 bool RGWBulkDelete::Deleter::delete_chunk(const std::list<acct_path_t>& paths)
5864 {
5865 ldout(store->ctx(), 20) << "in delete_chunk" << dendl;
5866 for (auto path : paths) {
5867 ldout(store->ctx(), 20) << "bulk deleting path: " << path << dendl;
5868 delete_single(path);
5869 }
5870
5871 return true;
5872 }
5873
5874 int RGWBulkDelete::verify_permission()
5875 {
5876 return 0;
5877 }
5878
5879 void RGWBulkDelete::pre_exec()
5880 {
5881 rgw_bucket_object_pre_exec(s);
5882 }
5883
5884 void RGWBulkDelete::execute()
5885 {
5886 deleter = std::unique_ptr<Deleter>(new Deleter(store, s));
5887
5888 bool is_truncated = false;
5889 do {
5890 list<RGWBulkDelete::acct_path_t> items;
5891
5892 int ret = get_data(items, &is_truncated);
5893 if (ret < 0) {
5894 return;
5895 }
5896
5897 ret = deleter->delete_chunk(items);
5898 } while (!op_ret && is_truncated);
5899
5900 return;
5901 }
5902
5903
5904 constexpr std::array<int, 2> RGWBulkUploadOp::terminal_errors;
5905
5906 int RGWBulkUploadOp::verify_permission()
5907 {
5908 if (s->auth.identity->is_anonymous()) {
5909 return -EACCES;
5910 }
5911
5912 if (! verify_user_permission(s, RGW_PERM_WRITE)) {
5913 return -EACCES;
5914 }
5915
5916 if (s->user->user_id.tenant != s->bucket_tenant) {
5917 ldout(s->cct, 10) << "user cannot create a bucket in a different tenant"
5918 << " (user_id.tenant=" << s->user->user_id.tenant
5919 << " requested=" << s->bucket_tenant << ")"
5920 << dendl;
5921 return -EACCES;
5922 }
5923
5924 if (s->user->max_buckets < 0) {
5925 return -EPERM;
5926 }
5927
5928 return 0;
5929 }
5930
5931 void RGWBulkUploadOp::pre_exec()
5932 {
5933 rgw_bucket_object_pre_exec(s);
5934 }
5935
5936 boost::optional<std::pair<std::string, rgw_obj_key>>
5937 RGWBulkUploadOp::parse_path(const boost::string_ref& path)
5938 {
5939 /* We need to skip all slashes at the beginning in order to preserve
5940 * compliance with Swift. */
5941 const size_t start_pos = path.find_first_not_of('/');
5942
5943 if (boost::string_ref::npos != start_pos) {
5944 /* Seperator is the first slash after the leading ones. */
5945 const size_t sep_pos = path.substr(start_pos).find('/');
5946
5947 if (boost::string_ref::npos != sep_pos) {
5948 const auto bucket_name = path.substr(start_pos, sep_pos - start_pos);
5949 const auto obj_name = path.substr(sep_pos + 1);
5950
5951 return std::make_pair(bucket_name.to_string(),
5952 rgw_obj_key(obj_name.to_string()));
5953 } else {
5954 /* It's guaranteed here that bucket name is at least one character
5955 * long and is different than slash. */
5956 return std::make_pair(path.substr(start_pos).to_string(),
5957 rgw_obj_key());
5958 }
5959 }
5960
5961 return none;
5962 }
5963
5964 std::pair<std::string, std::string>
5965 RGWBulkUploadOp::handle_upload_path(struct req_state *s)
5966 {
5967 std::string bucket_path, file_prefix;
5968 if (! s->init_state.url_bucket.empty()) {
5969 file_prefix = bucket_path = s->init_state.url_bucket + "/";
5970 if (! s->object.empty()) {
5971 std::string& object_name = s->object.name;
5972
5973 /* As rgw_obj_key::empty() already verified emptiness of s->object.name,
5974 * we can safely examine its last element. */
5975 if (object_name.back() == '/') {
5976 file_prefix.append(object_name);
5977 } else {
5978 file_prefix.append(object_name).append("/");
5979 }
5980 }
5981 }
5982 return std::make_pair(bucket_path, file_prefix);
5983 }
5984
5985 int RGWBulkUploadOp::handle_dir_verify_permission()
5986 {
5987 if (s->user->max_buckets > 0) {
5988 RGWUserBuckets buckets;
5989 std::string marker;
5990 bool is_truncated = false;
5991 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
5992 marker, std::string(), s->user->max_buckets,
5993 false, &is_truncated);
5994 if (op_ret < 0) {
5995 return op_ret;
5996 }
5997
5998 if (buckets.count() >= static_cast<size_t>(s->user->max_buckets)) {
5999 return -ERR_TOO_MANY_BUCKETS;
6000 }
6001 }
6002
6003 return 0;
6004 }
6005
6006 static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name)
6007 {
6008 /* the request of container or object level will contain bucket name.
6009 * only at account level need to append the bucket name */
6010 if (info.script_uri.find(bucket_name) != std::string::npos) {
6011 return;
6012 }
6013
6014 ldout(cct, 20) << "append the bucket: "<< bucket_name << " to req_info" << dendl;
6015 info.script_uri.append("/").append(bucket_name);
6016 info.request_uri_aws4 = info.request_uri = info.script_uri;
6017 info.effective_uri = "/" + bucket_name;
6018 }
6019
6020 int RGWBulkUploadOp::handle_dir(const boost::string_ref path)
6021 {
6022 ldout(s->cct, 20) << "bulk upload: got directory=" << path << dendl;
6023
6024 op_ret = handle_dir_verify_permission();
6025 if (op_ret < 0) {
6026 return op_ret;
6027 }
6028
6029 std::string bucket_name;
6030 rgw_obj_key object_junk;
6031 std::tie(bucket_name, object_junk) = *parse_path(path);
6032
6033 rgw_raw_obj obj(store->get_zone_params().domain_root,
6034 rgw_make_bucket_entry_name(s->bucket_tenant, bucket_name));
6035
6036 /* we need to make sure we read bucket info, it's not read before for this
6037 * specific request */
6038 RGWBucketInfo binfo;
6039 std::map<std::string, ceph::bufferlist> battrs;
6040 op_ret = store->get_bucket_info(*dir_ctx, s->bucket_tenant, bucket_name,
6041 binfo, NULL, &battrs);
6042 if (op_ret < 0 && op_ret != -ENOENT) {
6043 return op_ret;
6044 }
6045 const bool bucket_exists = (op_ret != -ENOENT);
6046
6047 if (bucket_exists) {
6048 RGWAccessControlPolicy old_policy(s->cct);
6049 int r = get_bucket_policy_from_attr(s->cct, store, binfo,
6050 battrs, &old_policy);
6051 if (r >= 0) {
6052 if (old_policy.get_owner().get_id().compare(s->user->user_id) != 0) {
6053 op_ret = -EEXIST;
6054 return op_ret;
6055 }
6056 }
6057 }
6058
6059 RGWBucketInfo master_info;
6060 rgw_bucket *pmaster_bucket = nullptr;
6061 uint32_t *pmaster_num_shards = nullptr;
6062 real_time creation_time;
6063 obj_version objv, ep_objv, *pobjv = nullptr;
6064
6065 if (! store->is_meta_master()) {
6066 JSONParser jp;
6067 ceph::bufferlist in_data;
6068 req_info info = s->info;
6069 forward_req_info(s->cct, info, bucket_name);
6070 op_ret = forward_request_to_master(s, nullptr, store, in_data, &jp, &info);
6071 if (op_ret < 0) {
6072 return op_ret;
6073 }
6074
6075 JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp);
6076 JSONDecoder::decode_json("object_ver", objv, &jp);
6077 JSONDecoder::decode_json("bucket_info", master_info, &jp);
6078
6079 ldout(s->cct, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver="
6080 << objv.ver << dendl;
6081 ldout(s->cct, 20) << "got creation_time="<< master_info.creation_time
6082 << dendl;
6083
6084 pmaster_bucket= &master_info.bucket;
6085 creation_time = master_info.creation_time;
6086 pmaster_num_shards = &master_info.num_shards;
6087 pobjv = &objv;
6088 } else {
6089 pmaster_bucket = nullptr;
6090 pmaster_num_shards = nullptr;
6091 }
6092
6093
6094 std::string placement_rule;
6095 if (bucket_exists) {
6096 std::string selected_placement_rule;
6097 rgw_bucket bucket;
6098 bucket.tenant = s->bucket_tenant;
6099 bucket.name = s->bucket_name;
6100 op_ret = store->select_bucket_placement(*(s->user),
6101 store->get_zonegroup().get_id(),
6102 placement_rule,
6103 &selected_placement_rule,
6104 nullptr);
6105 if (selected_placement_rule != binfo.placement_rule) {
6106 op_ret = -EEXIST;
6107 ldout(s->cct, 20) << "bulk upload: non-coherent placement rule" << dendl;
6108 return op_ret;
6109 }
6110 }
6111
6112 /* Create metadata: ACLs. */
6113 std::map<std::string, ceph::bufferlist> attrs;
6114 RGWAccessControlPolicy policy;
6115 policy.create_default(s->user->user_id, s->user->display_name);
6116 ceph::bufferlist aclbl;
6117 policy.encode(aclbl);
6118 attrs.emplace(RGW_ATTR_ACL, std::move(aclbl));
6119
6120 RGWQuotaInfo quota_info;
6121 const RGWQuotaInfo * pquota_info = nullptr;
6122
6123 rgw_bucket bucket;
6124 bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */
6125 bucket.name = bucket_name;
6126
6127
6128 RGWBucketInfo out_info;
6129 op_ret = store->create_bucket(*(s->user),
6130 bucket,
6131 store->get_zonegroup().get_id(),
6132 placement_rule, binfo.swift_ver_location,
6133 pquota_info, attrs,
6134 out_info, pobjv, &ep_objv, creation_time,
6135 pmaster_bucket, pmaster_num_shards, true);
6136 /* continue if EEXIST and create_bucket will fail below. this way we can
6137 * recover from a partial create by retrying it. */
6138 ldout(s->cct, 20) << "rgw_create_bucket returned ret=" << op_ret
6139 << ", bucket=" << bucket << dendl;
6140
6141 if (op_ret && op_ret != -EEXIST) {
6142 return op_ret;
6143 }
6144
6145 const bool existed = (op_ret == -EEXIST);
6146 if (existed) {
6147 /* bucket already existed, might have raced with another bucket creation, or
6148 * might be partial bucket creation that never completed. Read existing bucket
6149 * info, verify that the reported bucket owner is the current user.
6150 * If all is ok then update the user's list of buckets.
6151 * Otherwise inform client about a name conflict.
6152 */
6153 if (out_info.owner.compare(s->user->user_id) != 0) {
6154 op_ret = -EEXIST;
6155 ldout(s->cct, 20) << "bulk upload: conflicting bucket name" << dendl;
6156 return op_ret;
6157 }
6158 bucket = out_info.bucket;
6159 }
6160
6161 op_ret = rgw_link_bucket(store, s->user->user_id, bucket,
6162 out_info.creation_time, false);
6163 if (op_ret && !existed && op_ret != -EEXIST) {
6164 /* if it exists (or previously existed), don't remove it! */
6165 op_ret = rgw_unlink_bucket(store, s->user->user_id,
6166 bucket.tenant, bucket.name);
6167 if (op_ret < 0) {
6168 ldout(s->cct, 0) << "bulk upload: WARNING: failed to unlink bucket: ret="
6169 << op_ret << dendl;
6170 }
6171 } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) {
6172 ldout(s->cct, 20) << "bulk upload: containers already exists"
6173 << dendl;
6174 op_ret = -ERR_BUCKET_EXISTS;
6175 }
6176
6177 return op_ret;
6178 }
6179
6180
6181 bool RGWBulkUploadOp::handle_file_verify_permission(RGWBucketInfo& binfo,
6182 const rgw_obj& obj,
6183 std::map<std::string, ceph::bufferlist>& battrs,
6184 ACLOwner& bucket_owner /* out */)
6185 {
6186 RGWAccessControlPolicy bacl(store->ctx());
6187 op_ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket);
6188 if (op_ret < 0) {
6189 ldout(s->cct, 20) << "bulk upload: cannot read_policy() for bucket"
6190 << dendl;
6191 return false;
6192 }
6193
6194 auto policy = get_iam_policy_from_attr(s->cct, store, battrs, binfo.bucket.tenant);
6195
6196 bucket_owner = bacl.get_owner();
6197 if (policy) {
6198 auto e = policy->eval(s->env, *s->auth.identity,
6199 rgw::IAM::s3PutObject, obj);
6200 if (e == Effect::Allow) {
6201 return true;
6202 } else if (e == Effect::Deny) {
6203 return false;
6204 }
6205 }
6206
6207 return verify_bucket_permission_no_policy(s, s->user_acl.get(),
6208 &bacl, RGW_PERM_WRITE);
6209 }
6210
6211 int RGWBulkUploadOp::handle_file(const boost::string_ref path,
6212 const size_t size,
6213 AlignedStreamGetter& body)
6214 {
6215
6216 ldout(s->cct, 20) << "bulk upload: got file=" << path << ", size=" << size
6217 << dendl;
6218
6219 RGWPutObjDataProcessor *filter = nullptr;
6220 boost::optional<RGWPutObj_Compress> compressor;
6221
6222 if (size > static_cast<const size_t>(s->cct->_conf->rgw_max_put_size)) {
6223 op_ret = -ERR_TOO_LARGE;
6224 return op_ret;
6225 }
6226
6227 std::string bucket_name;
6228 rgw_obj_key object;
6229 std::tie(bucket_name, object) = *parse_path(path);
6230
6231 auto& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
6232 RGWBucketInfo binfo;
6233 std::map<std::string, ceph::bufferlist> battrs;
6234 ACLOwner bowner;
6235 op_ret = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
6236 bucket_name, binfo, nullptr, &battrs);
6237 if (op_ret == -ENOENT) {
6238 ldout(s->cct, 20) << "bulk upload: non existent directory=" << bucket_name
6239 << dendl;
6240 } else if (op_ret < 0) {
6241 return op_ret;
6242 }
6243
6244 if (! handle_file_verify_permission(binfo,
6245 rgw_obj(binfo.bucket, object),
6246 battrs, bowner)) {
6247 ldout(s->cct, 20) << "bulk upload: object creation unauthorized" << dendl;
6248 op_ret = -EACCES;
6249 return op_ret;
6250 }
6251
6252 op_ret = store->check_quota(bowner.get_id(), binfo.bucket,
6253 user_quota, bucket_quota, size);
6254 if (op_ret < 0) {
6255 return op_ret;
6256 }
6257
6258 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
6259 if (op_ret < 0) {
6260 return op_ret;
6261 }
6262
6263 RGWPutObjProcessor_Atomic processor(obj_ctx,
6264 binfo,
6265 binfo.bucket,
6266 object.name,
6267 /* part size */
6268 s->cct->_conf->rgw_obj_stripe_size,
6269 s->req_id,
6270 binfo.versioning_enabled());
6271
6272 /* No filters by default. */
6273 filter = &processor;
6274
6275 op_ret = processor.prepare(store, nullptr);
6276 if (op_ret < 0) {
6277 ldout(s->cct, 20) << "bulk upload: cannot prepare processor due to ret="
6278 << op_ret << dendl;
6279 return op_ret;
6280 }
6281
6282 const auto& compression_type = store->get_zone_params().get_compression_type(
6283 binfo.placement_rule);
6284 CompressorRef plugin;
6285 if (compression_type != "none") {
6286 plugin = Compressor::create(s->cct, compression_type);
6287 if (! plugin) {
6288 ldout(s->cct, 1) << "Cannot load plugin for rgw_compression_type "
6289 << compression_type << dendl;
6290 } else {
6291 compressor.emplace(s->cct, plugin, filter);
6292 filter = &*compressor;
6293 }
6294 }
6295
6296 /* Upload file content. */
6297 ssize_t len = 0;
6298 size_t ofs = 0;
6299 MD5 hash;
6300 do {
6301 ceph::bufferlist data;
6302 len = body.get_at_most(s->cct->_conf->rgw_max_chunk_size, data);
6303
6304 ldout(s->cct, 20) << "bulk upload: body=" << data.c_str() << dendl;
6305 if (len < 0) {
6306 op_ret = len;
6307 return op_ret;
6308 } else if (len > 0) {
6309 hash.Update((const byte *)data.c_str(), data.length());
6310 op_ret = put_data_and_throttle(filter, data, ofs, false);
6311 if (op_ret < 0) {
6312 ldout(s->cct, 20) << "processor->thottle_data() returned ret="
6313 << op_ret << dendl;
6314 return op_ret;
6315 }
6316
6317 ofs += len;
6318 }
6319
6320 } while (len > 0);
6321
6322 if (ofs != size) {
6323 ldout(s->cct, 10) << "bulk upload: real file size different from declared"
6324 << dendl;
6325 op_ret = -EINVAL;
6326 }
6327
6328 op_ret = store->check_quota(bowner.get_id(), binfo.bucket,
6329 user_quota, bucket_quota, size);
6330 if (op_ret < 0) {
6331 ldout(s->cct, 20) << "bulk upload: quota exceeded for path=" << path
6332 << dendl;
6333 return op_ret;
6334 }
6335
6336 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
6337 if (op_ret < 0) {
6338 return op_ret;
6339 }
6340
6341 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
6342 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
6343 hash.Final(m);
6344 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
6345
6346 /* Create metadata: ETAG. */
6347 std::map<std::string, ceph::bufferlist> attrs;
6348 std::string etag = calc_md5;
6349 ceph::bufferlist etag_bl;
6350 etag_bl.append(etag.c_str(), etag.size() + 1);
6351 attrs.emplace(RGW_ATTR_ETAG, std::move(etag_bl));
6352
6353 /* Create metadata: ACLs. */
6354 RGWAccessControlPolicy policy;
6355 policy.create_default(s->user->user_id, s->user->display_name);
6356 ceph::bufferlist aclbl;
6357 policy.encode(aclbl);
6358 attrs.emplace(RGW_ATTR_ACL, std::move(aclbl));
6359
6360 /* Create metadata: compression info. */
6361 if (compressor && compressor->is_compressed()) {
6362 ceph::bufferlist tmp;
6363 RGWCompressionInfo cs_info;
6364 cs_info.compression_type = plugin->get_type_name();
6365 cs_info.orig_size = s->obj_size;
6366 cs_info.blocks = std::move(compressor->get_compression_blocks());
6367 ::encode(cs_info, tmp);
6368 attrs.emplace(RGW_ATTR_COMPRESSION, std::move(tmp));
6369 }
6370
6371 /* Complete the transaction. */
6372 op_ret = processor.complete(size, etag, nullptr, ceph::real_time(), attrs,
6373 ceph::real_time() /* delete_at */);
6374 if (op_ret < 0) {
6375 ldout(s->cct, 20) << "bulk upload: processor::complete returned op_ret="
6376 << op_ret << dendl;
6377 }
6378
6379 return op_ret;
6380 }
6381
6382 void RGWBulkUploadOp::execute()
6383 {
6384 ceph::bufferlist buffer(64 * 1024);
6385
6386 ldout(s->cct, 20) << "bulk upload: start" << dendl;
6387
6388 /* Create an instance of stream-abstracting class. Having this indirection
6389 * allows for easy introduction of decompressors like gzip and bzip2. */
6390 auto stream = create_stream();
6391 if (! stream) {
6392 return;
6393 }
6394
6395 /* Handling the $UPLOAD_PATH accordingly to the Swift's Bulk middleware. See:
6396 * https://github.com/openstack/swift/blob/2.13.0/swift/common/middleware/bulk.py#L31-L41 */
6397 std::string bucket_path, file_prefix;
6398 std::tie(bucket_path, file_prefix) = handle_upload_path(s);
6399
6400 auto status = rgw::tar::StatusIndicator::create();
6401 do {
6402 op_ret = stream->get_exactly(rgw::tar::BLOCK_SIZE, buffer);
6403 if (op_ret < 0) {
6404 ldout(s->cct, 2) << "bulk upload: cannot read header" << dendl;
6405 return;
6406 }
6407
6408 /* We need to re-interpret the buffer as a TAR block. Exactly two blocks
6409 * must be tracked to detect out end-of-archive. It occurs when both of
6410 * them are empty (zeroed). Tracing this particular inter-block dependency
6411 * is responsibility of the rgw::tar::StatusIndicator class. */
6412 boost::optional<rgw::tar::HeaderView> header;
6413 std::tie(status, header) = rgw::tar::interpret_block(status, buffer);
6414
6415 if (! status.empty() && header) {
6416 /* This specific block isn't empty (entirely zeroed), so we can parse
6417 * it as a TAR header and dispatch. At the moment we do support only
6418 * regular files and directories. Everything else (symlinks, devices)
6419 * will be ignored but won't cease the whole upload. */
6420 switch (header->get_filetype()) {
6421 case rgw::tar::FileType::NORMAL_FILE: {
6422 ldout(s->cct, 2) << "bulk upload: handling regular file" << dendl;
6423
6424 boost::string_ref filename = bucket_path.empty() ? header->get_filename() : \
6425 file_prefix + header->get_filename().to_string();
6426 auto body = AlignedStreamGetter(0, header->get_filesize(),
6427 rgw::tar::BLOCK_SIZE, *stream);
6428 op_ret = handle_file(filename,
6429 header->get_filesize(),
6430 body);
6431 if (! op_ret) {
6432 /* Only regular files counts. */
6433 num_created++;
6434 } else {
6435 failures.emplace_back(op_ret, filename.to_string());
6436 }
6437 break;
6438 }
6439 case rgw::tar::FileType::DIRECTORY: {
6440 ldout(s->cct, 2) << "bulk upload: handling regular directory" << dendl;
6441
6442 boost::string_ref dirname = bucket_path.empty() ? header->get_filename() : bucket_path;
6443 op_ret = handle_dir(dirname);
6444 if (op_ret < 0 && op_ret != -ERR_BUCKET_EXISTS) {
6445 failures.emplace_back(op_ret, dirname.to_string());
6446 }
6447 break;
6448 }
6449 default: {
6450 /* Not recognized. Skip. */
6451 op_ret = 0;
6452 break;
6453 }
6454 }
6455
6456 /* In case of any problems with sub-request authorization Swift simply
6457 * terminates whole upload immediately. */
6458 if (boost::algorithm::contains(std::initializer_list<int>{ op_ret },
6459 terminal_errors)) {
6460 ldout(s->cct, 2) << "bulk upload: terminating due to ret=" << op_ret
6461 << dendl;
6462 break;
6463 }
6464 } else {
6465 ldout(s->cct, 2) << "bulk upload: an empty block" << dendl;
6466 op_ret = 0;
6467 }
6468
6469 buffer.clear();
6470 } while (! status.eof());
6471
6472 return;
6473 }
6474
6475 RGWBulkUploadOp::AlignedStreamGetter::~AlignedStreamGetter()
6476 {
6477 const size_t aligned_legnth = length + (-length % alignment);
6478 ceph::bufferlist junk;
6479
6480 DecoratedStreamGetter::get_exactly(aligned_legnth - position, junk);
6481 }
6482
6483 ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_at_most(const size_t want,
6484 ceph::bufferlist& dst)
6485 {
6486 const size_t max_to_read = std::min(want, length - position);
6487 const auto len = DecoratedStreamGetter::get_at_most(max_to_read, dst);
6488 if (len > 0) {
6489 position += len;
6490 }
6491 return len;
6492 }
6493
6494 ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_exactly(const size_t want,
6495 ceph::bufferlist& dst)
6496 {
6497 const auto len = DecoratedStreamGetter::get_exactly(want, dst);
6498 if (len > 0) {
6499 position += len;
6500 }
6501 return len;
6502 }
6503
6504 int RGWSetAttrs::verify_permission()
6505 {
6506 // This looks to be part of the RGW-NFS machinery and has no S3 or
6507 // Swift equivalent.
6508 bool perm;
6509 if (!s->object.empty()) {
6510 perm = verify_object_permission_no_policy(s, RGW_PERM_WRITE);
6511 } else {
6512 perm = verify_bucket_permission_no_policy(s, RGW_PERM_WRITE);
6513 }
6514 if (!perm)
6515 return -EACCES;
6516
6517 return 0;
6518 }
6519
6520 void RGWSetAttrs::pre_exec()
6521 {
6522 rgw_bucket_object_pre_exec(s);
6523 }
6524
6525 void RGWSetAttrs::execute()
6526 {
6527 op_ret = get_params();
6528 if (op_ret < 0)
6529 return;
6530
6531 rgw_obj obj(s->bucket, s->object);
6532
6533 if (!s->object.empty()) {
6534 store->set_atomic(s->obj_ctx, obj);
6535 op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, nullptr);
6536 } else {
6537 for (auto& iter : attrs) {
6538 s->bucket_attrs[iter.first] = std::move(iter.second);
6539 }
6540 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, s->bucket_attrs,
6541 &s->bucket_info.objv_tracker);
6542 }
6543 }
6544
6545 void RGWGetObjLayout::pre_exec()
6546 {
6547 rgw_bucket_object_pre_exec(s);
6548 }
6549
6550 void RGWGetObjLayout::execute()
6551 {
6552 rgw_obj obj(s->bucket, s->object);
6553 RGWRados::Object target(store,
6554 s->bucket_info,
6555 *static_cast<RGWObjectCtx *>(s->obj_ctx),
6556 rgw_obj(s->bucket, s->object));
6557 RGWRados::Object::Read stat_op(&target);
6558
6559 op_ret = stat_op.prepare();
6560 if (op_ret < 0) {
6561 return;
6562 }
6563
6564 head_obj = stat_op.state.head_obj;
6565
6566 op_ret = target.get_manifest(&manifest);
6567 }
6568
6569
6570 int RGWConfigBucketMetaSearch::verify_permission()
6571 {
6572 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
6573 return -EACCES;
6574 }
6575
6576 return 0;
6577 }
6578
6579 void RGWConfigBucketMetaSearch::pre_exec()
6580 {
6581 rgw_bucket_object_pre_exec(s);
6582 }
6583
6584 void RGWConfigBucketMetaSearch::execute()
6585 {
6586 op_ret = get_params();
6587 if (op_ret < 0) {
6588 ldout(s->cct, 20) << "NOTICE: get_params() returned ret=" << op_ret << dendl;
6589 return;
6590 }
6591
6592 s->bucket_info.mdsearch_config = mdsearch_config;
6593
6594 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
6595 if (op_ret < 0) {
6596 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
6597 return;
6598 }
6599 }
6600
6601 int RGWGetBucketMetaSearch::verify_permission()
6602 {
6603 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
6604 return -EACCES;
6605 }
6606
6607 return 0;
6608 }
6609
6610 void RGWGetBucketMetaSearch::pre_exec()
6611 {
6612 rgw_bucket_object_pre_exec(s);
6613 }
6614
6615 int RGWDelBucketMetaSearch::verify_permission()
6616 {
6617 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
6618 return -EACCES;
6619 }
6620
6621 return 0;
6622 }
6623
6624 void RGWDelBucketMetaSearch::pre_exec()
6625 {
6626 rgw_bucket_object_pre_exec(s);
6627 }
6628
6629 void RGWDelBucketMetaSearch::execute()
6630 {
6631 s->bucket_info.mdsearch_config.clear();
6632
6633 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
6634 if (op_ret < 0) {
6635 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
6636 return;
6637 }
6638 }
6639
6640
6641 RGWHandler::~RGWHandler()
6642 {
6643 }
6644
6645 int RGWHandler::init(RGWRados *_store,
6646 struct req_state *_s,
6647 rgw::io::BasicClient *cio)
6648 {
6649 store = _store;
6650 s = _s;
6651
6652 return 0;
6653 }
6654
6655 int RGWHandler::do_init_permissions()
6656 {
6657 int ret = rgw_build_bucket_policies(store, s);
6658 s->env = rgw_build_iam_environment(store, s);
6659
6660 if (ret < 0) {
6661 ldout(s->cct, 10) << "read_permissions on " << s->bucket << " ret=" << ret << dendl;
6662 if (ret == -ENODATA)
6663 ret = -EACCES;
6664 }
6665
6666 return ret;
6667 }
6668
6669 int RGWHandler::do_read_permissions(RGWOp *op, bool only_bucket)
6670 {
6671 if (only_bucket) {
6672 /* already read bucket info */
6673 return 0;
6674 }
6675 int ret = rgw_build_object_policies(store, s, op->prefetch_data());
6676
6677 if (ret < 0) {
6678 ldout(s->cct, 10) << "read_permissions on " << s->bucket << ":"
6679 << s->object << " only_bucket=" << only_bucket
6680 << " ret=" << ret << dendl;
6681 if (ret == -ENODATA)
6682 ret = -EACCES;
6683 }
6684
6685 return ret;
6686 }
6687
6688 int RGWOp::error_handler(int err_no, string *error_content) {
6689 return dialect_handler->error_handler(err_no, error_content);
6690 }
6691
6692 int RGWHandler::error_handler(int err_no, string *error_content) {
6693 // This is the do-nothing error handler
6694 return err_no;
6695 }
6696
6697
6698 void RGWPutBucketPolicy::send_response()
6699 {
6700 if (op_ret) {
6701 set_req_state_err(s, op_ret);
6702 }
6703 dump_errno(s);
6704 end_header(s);
6705 }
6706
6707 int RGWPutBucketPolicy::verify_permission()
6708 {
6709 if (!verify_bucket_permission(s, rgw::IAM::s3PutBucketPolicy)) {
6710 return -EACCES;
6711 }
6712
6713 return 0;
6714 }
6715
6716 int RGWPutBucketPolicy::get_params()
6717 {
6718 const auto max_size = s->cct->_conf->rgw_max_put_param_size;
6719 // At some point when I have more time I want to make a version of
6720 // rgw_rest_read_all_input that doesn't use malloc.
6721 op_ret = rgw_rest_read_all_input(s, &data, &len, max_size, false);
6722 // And throws exceptions.
6723 return op_ret;
6724 }
6725
6726 void RGWPutBucketPolicy::execute()
6727 {
6728 op_ret = get_params();
6729 if (op_ret < 0) {
6730 return;
6731 }
6732
6733 bufferlist in_data = bufferlist::static_from_mem(data, len);
6734
6735 if (!store->is_meta_master()) {
6736 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
6737 if (op_ret < 0) {
6738 ldout(s->cct, 20) << "forward_request_to_master returned ret=" << op_ret << dendl;
6739 return;
6740 }
6741 }
6742
6743 try {
6744 Policy p(s->cct, s->bucket_tenant, in_data);
6745 auto attrs = s->bucket_attrs;
6746 attrs[RGW_ATTR_IAM_POLICY].clear();
6747 attrs[RGW_ATTR_IAM_POLICY].append(p.text);
6748 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
6749 &s->bucket_info.objv_tracker);
6750 if (op_ret == -ECANCELED) {
6751 op_ret = 0; /* lost a race, but it's ok because policies are immutable */
6752 }
6753 } catch (rgw::IAM::PolicyParseException& e) {
6754 ldout(s->cct, 20) << "failed to parse policy: " << e.what() << dendl;
6755 op_ret = -EINVAL;
6756 }
6757 }
6758
6759 void RGWGetBucketPolicy::send_response()
6760 {
6761 if (op_ret) {
6762 set_req_state_err(s, op_ret);
6763 }
6764 dump_errno(s);
6765 end_header(s, this, "application/json");
6766 dump_body(s, policy);
6767 }
6768
6769 int RGWGetBucketPolicy::verify_permission()
6770 {
6771 if (!verify_bucket_permission(s, rgw::IAM::s3GetBucketPolicy)) {
6772 return -EACCES;
6773 }
6774
6775 return 0;
6776 }
6777
6778 void RGWGetBucketPolicy::execute()
6779 {
6780 auto attrs = s->bucket_attrs;
6781 map<string, bufferlist>::iterator aiter = attrs.find(RGW_ATTR_IAM_POLICY);
6782 if (aiter == attrs.end()) {
6783 ldout(s->cct, 0) << __func__ << " can't find bucket IAM POLICY attr"
6784 << " bucket_name = " << s->bucket_name << dendl;
6785 op_ret = -ERR_NO_SUCH_BUCKET_POLICY;
6786 s->err.message = "The bucket policy does not exist";
6787 return;
6788 } else {
6789 policy = attrs[RGW_ATTR_IAM_POLICY];
6790
6791 if (policy.length() == 0) {
6792 ldout(s->cct, 10) << "The bucket policy does not exist, bucket: " << s->bucket_name << dendl;
6793 op_ret = -ERR_NO_SUCH_BUCKET_POLICY;
6794 s->err.message = "The bucket policy does not exist";
6795 return;
6796 }
6797 }
6798 }
6799
6800 void RGWDeleteBucketPolicy::send_response()
6801 {
6802 if (op_ret) {
6803 set_req_state_err(s, op_ret);
6804 }
6805 dump_errno(s);
6806 end_header(s);
6807 }
6808
6809 int RGWDeleteBucketPolicy::verify_permission()
6810 {
6811 if (!verify_bucket_permission(s, rgw::IAM::s3DeleteBucketPolicy)) {
6812 return -EACCES;
6813 }
6814
6815 return 0;
6816 }
6817
6818 void RGWDeleteBucketPolicy::execute()
6819 {
6820 auto attrs = s->bucket_attrs;
6821 attrs.erase(RGW_ATTR_IAM_POLICY);
6822 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
6823 &s->bucket_info.objv_tracker);
6824 if (op_ret == -ECANCELED) {
6825 op_ret = 0; /* lost a race, but it's ok because policies are immutable */
6826 }
6827 }