]> git.proxmox.com Git - ceph.git/blob - ceph/src/rgw/rgw_op.cc
update sources to v12.2.0
[ceph.git] / ceph / src / rgw / rgw_op.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include <errno.h>
5 #include <stdlib.h>
6 #include <system_error>
7 #include <unistd.h>
8
9 #include <sstream>
10
11 #include <boost/algorithm/string/predicate.hpp>
12 #include <boost/bind.hpp>
13 #include <boost/optional.hpp>
14 #include <boost/utility/in_place_factory.hpp>
15 #include <boost/utility/string_view.hpp>
16
17 #include "common/Clock.h"
18 #include "common/armor.h"
19 #include "common/backport14.h"
20 #include "common/errno.h"
21 #include "common/mime.h"
22 #include "common/utf8.h"
23 #include "common/ceph_json.h"
24
25 #include "rgw_rados.h"
26 #include "rgw_op.h"
27 #include "rgw_rest.h"
28 #include "rgw_acl.h"
29 #include "rgw_acl_s3.h"
30 #include "rgw_acl_swift.h"
31 #include "rgw_user.h"
32 #include "rgw_bucket.h"
33 #include "rgw_log.h"
34 #include "rgw_multi.h"
35 #include "rgw_multi_del.h"
36 #include "rgw_cors.h"
37 #include "rgw_cors_s3.h"
38 #include "rgw_rest_conn.h"
39 #include "rgw_rest_s3.h"
40 #include "rgw_tar.h"
41 #include "rgw_client_io.h"
42 #include "rgw_compression.h"
43 #include "rgw_role.h"
44 #include "rgw_tag_s3.h"
45 #include "cls/lock/cls_lock_client.h"
46 #include "cls/rgw/cls_rgw_client.h"
47
48
49 #include "include/assert.h"
50
51 #include "compressor/Compressor.h"
52
53 #include "rgw_acl_swift.h"
54
55 #define dout_context g_ceph_context
56 #define dout_subsys ceph_subsys_rgw
57
58 using namespace std;
59 using namespace librados;
60 using ceph::crypto::MD5;
61 using boost::optional;
62 using boost::none;
63
64 using rgw::IAM::ARN;
65 using rgw::IAM::Effect;
66 using rgw::IAM::Policy;
67
68 using rgw::IAM::Policy;
69
70 static string mp_ns = RGW_OBJ_NS_MULTIPART;
71 static string shadow_ns = RGW_OBJ_NS_SHADOW;
72
73 static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name);
74 static int forward_request_to_master(struct req_state *s, obj_version *objv, RGWRados *store,
75 bufferlist& in_data, JSONParser *jp, req_info *forward_info = nullptr);
76
77 static MultipartMetaFilter mp_filter;
78
79 static int parse_range(const char *range, off_t& ofs, off_t& end, bool *partial_content)
80 {
81 int r = -ERANGE;
82 string s(range);
83 string ofs_str;
84 string end_str;
85
86 *partial_content = false;
87
88 size_t pos = s.find("bytes=");
89 if (pos == string::npos) {
90 pos = 0;
91 while (isspace(s[pos]))
92 pos++;
93 int end = pos;
94 while (isalpha(s[end]))
95 end++;
96 if (strncasecmp(s.c_str(), "bytes", end - pos) != 0)
97 return 0;
98 while (isspace(s[end]))
99 end++;
100 if (s[end] != '=')
101 return 0;
102 s = s.substr(end + 1);
103 } else {
104 s = s.substr(pos + 6); /* size of("bytes=") */
105 }
106 pos = s.find('-');
107 if (pos == string::npos)
108 goto done;
109
110 *partial_content = true;
111
112 ofs_str = s.substr(0, pos);
113 end_str = s.substr(pos + 1);
114 if (end_str.length()) {
115 end = atoll(end_str.c_str());
116 if (end < 0)
117 goto done;
118 }
119
120 if (ofs_str.length()) {
121 ofs = atoll(ofs_str.c_str());
122 } else { // RFC2616 suffix-byte-range-spec
123 ofs = -end;
124 end = -1;
125 }
126
127 if (end >= 0 && end < ofs)
128 goto done;
129
130 r = 0;
131 done:
132 return r;
133 }
134
135 static int decode_policy(CephContext *cct,
136 bufferlist& bl,
137 RGWAccessControlPolicy *policy)
138 {
139 bufferlist::iterator iter = bl.begin();
140 try {
141 policy->decode(iter);
142 } catch (buffer::error& err) {
143 ldout(cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
144 return -EIO;
145 }
146 if (cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
147 RGWAccessControlPolicy_S3 *s3policy = static_cast<RGWAccessControlPolicy_S3 *>(policy);
148 ldout(cct, 15) << __func__ << " Read AccessControlPolicy";
149 s3policy->to_xml(*_dout);
150 *_dout << dendl;
151 }
152 return 0;
153 }
154
155
156 static int get_user_policy_from_attr(CephContext * const cct,
157 RGWRados * const store,
158 map<string, bufferlist>& attrs,
159 RGWAccessControlPolicy& policy /* out */)
160 {
161 auto aiter = attrs.find(RGW_ATTR_ACL);
162 if (aiter != attrs.end()) {
163 int ret = decode_policy(cct, aiter->second, &policy);
164 if (ret < 0) {
165 return ret;
166 }
167 } else {
168 return -ENOENT;
169 }
170
171 return 0;
172 }
173
174 static int get_bucket_instance_policy_from_attr(CephContext *cct,
175 RGWRados *store,
176 RGWBucketInfo& bucket_info,
177 map<string, bufferlist>& bucket_attrs,
178 RGWAccessControlPolicy *policy,
179 rgw_raw_obj& obj)
180 {
181 map<string, bufferlist>::iterator aiter = bucket_attrs.find(RGW_ATTR_ACL);
182
183 if (aiter != bucket_attrs.end()) {
184 int ret = decode_policy(cct, aiter->second, policy);
185 if (ret < 0)
186 return ret;
187 } else {
188 ldout(cct, 0) << "WARNING: couldn't find acl header for bucket, generating default" << dendl;
189 RGWUserInfo uinfo;
190 /* object exists, but policy is broken */
191 int r = rgw_get_user_info_by_uid(store, bucket_info.owner, uinfo);
192 if (r < 0)
193 return r;
194
195 policy->create_default(bucket_info.owner, uinfo.display_name);
196 }
197 return 0;
198 }
199
200 static int get_obj_policy_from_attr(CephContext *cct,
201 RGWRados *store,
202 RGWObjectCtx& obj_ctx,
203 RGWBucketInfo& bucket_info,
204 map<string, bufferlist>& bucket_attrs,
205 RGWAccessControlPolicy *policy,
206 rgw_obj& obj)
207 {
208 bufferlist bl;
209 int ret = 0;
210
211 RGWRados::Object op_target(store, bucket_info, obj_ctx, obj);
212 RGWRados::Object::Read rop(&op_target);
213
214 ret = rop.get_attr(RGW_ATTR_ACL, bl);
215 if (ret >= 0) {
216 ret = decode_policy(cct, bl, policy);
217 if (ret < 0)
218 return ret;
219 } else if (ret == -ENODATA) {
220 /* object exists, but policy is broken */
221 ldout(cct, 0) << "WARNING: couldn't find acl header for object, generating default" << dendl;
222 RGWUserInfo uinfo;
223 ret = rgw_get_user_info_by_uid(store, bucket_info.owner, uinfo);
224 if (ret < 0)
225 return ret;
226
227 policy->create_default(bucket_info.owner, uinfo.display_name);
228 }
229 return ret;
230 }
231
232
233 /**
234 * Get the AccessControlPolicy for an object off of disk.
235 * policy: must point to a valid RGWACL, and will be filled upon return.
236 * bucket: name of the bucket containing the object.
237 * object: name of the object to get the ACL for.
238 * Returns: 0 on success, -ERR# otherwise.
239 */
240 static int get_bucket_policy_from_attr(CephContext *cct,
241 RGWRados *store,
242 RGWBucketInfo& bucket_info,
243 map<string, bufferlist>& bucket_attrs,
244 RGWAccessControlPolicy *policy)
245 {
246 rgw_raw_obj instance_obj;
247 store->get_bucket_instance_obj(bucket_info.bucket, instance_obj);
248 return get_bucket_instance_policy_from_attr(cct, store, bucket_info, bucket_attrs,
249 policy, instance_obj);
250 }
251
252 static optional<Policy> get_iam_policy_from_attr(CephContext* cct,
253 RGWRados* store,
254 map<string, bufferlist>& attrs,
255 const string& tenant) {
256 auto i = attrs.find(RGW_ATTR_IAM_POLICY);
257 if (i != attrs.end()) {
258 return Policy(cct, tenant, i->second);
259 } else {
260 return none;
261 }
262 }
263
264 static int get_obj_attrs(RGWRados *store, struct req_state *s, rgw_obj& obj, map<string, bufferlist>& attrs)
265 {
266 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
267 RGWRados::Object::Read read_op(&op_target);
268
269 read_op.params.attrs = &attrs;
270
271 return read_op.prepare();
272 }
273
274 static int modify_obj_attr(RGWRados *store, struct req_state *s, rgw_obj& obj, const char* attr_name, bufferlist& attr_val)
275 {
276 map<string, bufferlist> attrs;
277 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
278 RGWRados::Object::Read read_op(&op_target);
279
280 read_op.params.attrs = &attrs;
281
282 int r = read_op.prepare();
283 if (r < 0) {
284 return r;
285 }
286 store->set_atomic(s->obj_ctx, read_op.state.obj);
287 attrs[attr_name] = attr_val;
288 return store->set_attrs(s->obj_ctx, s->bucket_info, read_op.state.obj, attrs, NULL);
289 }
290
291 static int get_system_obj_attrs(RGWRados *store, struct req_state *s, rgw_raw_obj& obj, map<string, bufferlist>& attrs,
292 uint64_t *obj_size, RGWObjVersionTracker *objv_tracker)
293 {
294 RGWRados::SystemObject src(store, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
295 RGWRados::SystemObject::Read rop(&src);
296
297 rop.stat_params.attrs = &attrs;
298 rop.stat_params.obj_size = obj_size;
299
300 int ret = rop.stat(objv_tracker);
301 return ret;
302 }
303
304 static int read_bucket_policy(RGWRados *store,
305 struct req_state *s,
306 RGWBucketInfo& bucket_info,
307 map<string, bufferlist>& bucket_attrs,
308 RGWAccessControlPolicy *policy,
309 rgw_bucket& bucket)
310 {
311 if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) {
312 ldout(s->cct, 0) << "NOTICE: bucket " << bucket_info.bucket.name << " is suspended" << dendl;
313 return -ERR_USER_SUSPENDED;
314 }
315
316 if (bucket.name.empty()) {
317 return 0;
318 }
319
320 int ret = get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, policy);
321 if (ret == -ENOENT) {
322 ret = -ERR_NO_SUCH_BUCKET;
323 }
324
325 return ret;
326 }
327
328 static int read_obj_policy(RGWRados *store,
329 struct req_state *s,
330 RGWBucketInfo& bucket_info,
331 map<string, bufferlist>& bucket_attrs,
332 RGWAccessControlPolicy* acl,
333 optional<Policy>& policy,
334 rgw_bucket& bucket,
335 rgw_obj_key& object)
336 {
337 string upload_id;
338 upload_id = s->info.args.get("uploadId");
339 rgw_obj obj;
340
341 if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) {
342 ldout(s->cct, 0) << "NOTICE: bucket " << bucket_info.bucket.name << " is suspended" << dendl;
343 return -ERR_USER_SUSPENDED;
344 }
345
346 if (!upload_id.empty()) {
347 /* multipart upload */
348 RGWMPObj mp(object.name, upload_id);
349 string oid = mp.get_meta();
350 obj.init_ns(bucket, oid, mp_ns);
351 obj.set_in_extra_data(true);
352 } else {
353 obj = rgw_obj(bucket, object);
354 }
355 policy = get_iam_policy_from_attr(s->cct, store, bucket_attrs, bucket.tenant);
356
357 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
358 int ret = get_obj_policy_from_attr(s->cct, store, *obj_ctx,
359 bucket_info, bucket_attrs, acl, obj);
360 if (ret == -ENOENT) {
361 /* object does not exist checking the bucket's ACL to make sure
362 that we send a proper error code */
363 RGWAccessControlPolicy bucket_policy(s->cct);
364 ret = get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, &bucket_policy);
365 if (ret < 0) {
366 return ret;
367 }
368
369 const rgw_user& bucket_owner = bucket_policy.get_owner().get_id();
370 if (bucket_owner.compare(s->user->user_id) != 0 &&
371 ! s->auth.identity->is_admin_of(bucket_owner) &&
372 ! bucket_policy.verify_permission(*s->auth.identity, s->perm_mask,
373 RGW_PERM_READ)) {
374 ret = -EACCES;
375 } else {
376 ret = -ENOENT;
377 }
378 }
379
380 return ret;
381 }
382
383 /**
384 * Get the AccessControlPolicy for an user, bucket or object off of disk.
385 * s: The req_state to draw information from.
386 * only_bucket: If true, reads the user and bucket ACLs rather than the object ACL.
387 * Returns: 0 on success, -ERR# otherwise.
388 */
389 int rgw_build_bucket_policies(RGWRados* store, struct req_state* s)
390 {
391 int ret = 0;
392 rgw_obj_key obj;
393 RGWUserInfo bucket_owner_info;
394 RGWObjectCtx obj_ctx(store);
395
396 string bi = s->info.args.get(RGW_SYS_PARAM_PREFIX "bucket-instance");
397 if (!bi.empty()) {
398 ret = rgw_bucket_parse_bucket_instance(bi, &s->bucket_instance_id, &s->bucket_instance_shard_id);
399 if (ret < 0) {
400 return ret;
401 }
402 }
403
404 if(s->dialect.compare("s3") == 0) {
405 s->bucket_acl = ceph::make_unique<RGWAccessControlPolicy_S3>(s->cct);
406 } else if(s->dialect.compare("swift") == 0) {
407 /* We aren't allocating the account policy for those operations using
408 * the Swift's infrastructure that don't really need req_state::user.
409 * Typical example here is the implementation of /info. */
410 if (!s->user->user_id.empty()) {
411 s->user_acl = ceph::make_unique<RGWAccessControlPolicy_SWIFTAcct>(s->cct);
412 }
413 s->bucket_acl = ceph::make_unique<RGWAccessControlPolicy_SWIFT>(s->cct);
414 } else {
415 s->bucket_acl = ceph::make_unique<RGWAccessControlPolicy>(s->cct);
416 }
417
418 /* check if copy source is within the current domain */
419 if (!s->src_bucket_name.empty()) {
420 RGWBucketInfo source_info;
421
422 if (s->bucket_instance_id.empty()) {
423 ret = store->get_bucket_info(obj_ctx, s->src_tenant_name, s->src_bucket_name, source_info, NULL);
424 } else {
425 ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, source_info, NULL, NULL);
426 }
427 if (ret == 0) {
428 string& zonegroup = source_info.zonegroup;
429 s->local_source = store->get_zonegroup().equals(zonegroup);
430 }
431 }
432
433 struct {
434 rgw_user uid;
435 std::string display_name;
436 } acct_acl_user = {
437 s->user->user_id,
438 s->user->display_name,
439 };
440
441 if (!s->bucket_name.empty()) {
442 s->bucket_exists = true;
443 if (s->bucket_instance_id.empty()) {
444 ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name, s->bucket_info, NULL, &s->bucket_attrs);
445 } else {
446 ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, s->bucket_info, NULL, &s->bucket_attrs);
447 }
448 if (ret < 0) {
449 if (ret != -ENOENT) {
450 string bucket_log;
451 rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_log);
452 ldout(s->cct, 0) << "NOTICE: couldn't get bucket from bucket_name (name=" << bucket_log << ")" << dendl;
453 return ret;
454 }
455 s->bucket_exists = false;
456 }
457 s->bucket = s->bucket_info.bucket;
458
459 if (s->bucket_exists) {
460 ret = read_bucket_policy(store, s, s->bucket_info, s->bucket_attrs,
461 s->bucket_acl.get(), s->bucket);
462 acct_acl_user = {
463 s->bucket_info.owner,
464 s->bucket_acl->get_owner().get_display_name(),
465 };
466 } else {
467 s->bucket_acl->create_default(s->user->user_id, s->user->display_name);
468 ret = -ERR_NO_SUCH_BUCKET;
469 }
470
471 s->bucket_owner = s->bucket_acl->get_owner();
472
473 RGWZoneGroup zonegroup;
474 int r = store->get_zonegroup(s->bucket_info.zonegroup, zonegroup);
475 if (!r) {
476 if (!zonegroup.endpoints.empty()) {
477 s->zonegroup_endpoint = zonegroup.endpoints.front();
478 } else {
479 // use zonegroup's master zone endpoints
480 auto z = zonegroup.zones.find(zonegroup.master_zone);
481 if (z != zonegroup.zones.end() && !z->second.endpoints.empty()) {
482 s->zonegroup_endpoint = z->second.endpoints.front();
483 }
484 }
485 s->zonegroup_name = zonegroup.get_name();
486 }
487 if (r < 0 && ret == 0) {
488 ret = r;
489 }
490
491 if (s->bucket_exists && !store->get_zonegroup().equals(s->bucket_info.zonegroup)) {
492 ldout(s->cct, 0) << "NOTICE: request for data in a different zonegroup (" << s->bucket_info.zonegroup << " != " << store->get_zonegroup().get_id() << ")" << dendl;
493 /* we now need to make sure that the operation actually requires copy source, that is
494 * it's a copy operation
495 */
496 if (store->get_zonegroup().is_master_zonegroup() && s->system_request) {
497 /*If this is the master, don't redirect*/
498 } else if (!s->local_source ||
499 (s->op != OP_PUT && s->op != OP_COPY) ||
500 s->object.empty()) {
501 return -ERR_PERMANENT_REDIRECT;
502 }
503 }
504 }
505
506 /* handle user ACL only for those APIs which support it */
507 if (s->user_acl) {
508 map<string, bufferlist> uattrs;
509
510 ret = rgw_get_user_attrs_by_uid(store, acct_acl_user.uid, uattrs);
511 if (!ret) {
512 ret = get_user_policy_from_attr(s->cct, store, uattrs, *s->user_acl);
513 }
514 if (-ENOENT == ret) {
515 /* In already existing clusters users won't have ACL. In such case
516 * assuming that only account owner has the rights seems to be
517 * reasonable. That allows to have only one verification logic.
518 * NOTE: there is small compatibility kludge for global, empty tenant:
519 * 1. if we try to reach an existing bucket, its owner is considered
520 * as account owner.
521 * 2. otherwise account owner is identity stored in s->user->user_id. */
522 s->user_acl->create_default(acct_acl_user.uid,
523 acct_acl_user.display_name);
524 ret = 0;
525 } else {
526 ldout(s->cct, 0) << "NOTICE: couldn't get user attrs for handling ACL (user_id="
527 << s->user->user_id
528 << ", ret="
529 << ret
530 << ")" << dendl;
531 return ret;
532 }
533 }
534
535 try {
536 s->iam_policy = get_iam_policy_from_attr(s->cct, store, s->bucket_attrs,
537 s->bucket_tenant);
538 } catch (const std::exception& e) {
539 // Really this is a can't happen condition. We parse the policy
540 // when it's given to us, so perhaps we should abort or otherwise
541 // raise bloody murder.
542 lderr(s->cct) << "Error reading IAM Policy: " << e.what() << dendl;
543 ret = -EACCES;
544 }
545
546 return ret;
547 }
548
549 /**
550 * Get the AccessControlPolicy for a bucket or object off of disk.
551 * s: The req_state to draw information from.
552 * only_bucket: If true, reads the bucket ACL rather than the object ACL.
553 * Returns: 0 on success, -ERR# otherwise.
554 */
555 int rgw_build_object_policies(RGWRados *store, struct req_state *s,
556 bool prefetch_data)
557 {
558 int ret = 0;
559
560 if (!s->object.empty()) {
561 if (!s->bucket_exists) {
562 return -ERR_NO_SUCH_BUCKET;
563 }
564 s->object_acl = ceph::make_unique<RGWAccessControlPolicy>(s->cct);
565
566 rgw_obj obj(s->bucket, s->object);
567
568 store->set_atomic(s->obj_ctx, obj);
569 if (prefetch_data) {
570 store->set_prefetch_data(s->obj_ctx, obj);
571 }
572 ret = read_obj_policy(store, s, s->bucket_info, s->bucket_attrs,
573 s->object_acl.get(), s->iam_policy, s->bucket,
574 s->object);
575 }
576
577 return ret;
578 }
579
580 rgw::IAM::Environment rgw_build_iam_environment(RGWRados* store,
581 struct req_state* s)
582 {
583 rgw::IAM::Environment e;
584 const auto& m = s->info.env->get_map();
585 auto t = ceph::real_clock::now();
586 e.emplace(std::piecewise_construct,
587 std::forward_as_tuple("aws:CurrentTime"),
588 std::forward_as_tuple(std::to_string(
589 ceph::real_clock::to_time_t(t))));
590 e.emplace(std::piecewise_construct,
591 std::forward_as_tuple("aws:EpochTime"),
592 std::forward_as_tuple(ceph::to_iso_8601(t)));
593 // TODO: This is fine for now, but once we have STS we'll need to
594 // look and see. Also this won't work with the IdentityApplier
595 // model, since we need to know the actual credential.
596 e.emplace(std::piecewise_construct,
597 std::forward_as_tuple("aws:PrincipalType"),
598 std::forward_as_tuple("User"));
599
600 auto i = m.find("HTTP_REFERER");
601 if (i != m.end()) {
602 e.emplace(std::piecewise_construct,
603 std::forward_as_tuple("aws:Referer"),
604 std::forward_as_tuple(i->second));
605 }
606
607 // These seem to be the semantics, judging from rest_rgw_s3.cc
608 i = m.find("SERVER_PORT_SECURE");
609 if (i != m.end()) {
610 e.emplace(std::piecewise_construct,
611 std::forward_as_tuple("aws:SecureTransport"),
612 std::forward_as_tuple("true"));
613 }
614
615 i = m.find("HTTP_HOST");
616 if (i != m.end()) {
617 e.emplace(std::piecewise_construct,
618 std::forward_as_tuple("aws:SourceIp"),
619 std::forward_as_tuple(i->second));
620 }
621
622 i = m.find("HTTP_USER_AGENT"); {
623 if (i != m.end())
624 e.emplace(std::piecewise_construct,
625 std::forward_as_tuple("aws:UserAgent"),
626 std::forward_as_tuple(i->second));
627 }
628
629 if (s->user) {
630 // What to do about aws::userid? One can have multiple access
631 // keys so that isn't really suitable. Do we have a durable
632 // identifier that can persist through name changes?
633 e.emplace(std::piecewise_construct,
634 std::forward_as_tuple("aws:username"),
635 std::forward_as_tuple(s->user->user_id.id));
636 }
637 return e;
638 }
639
640 void rgw_bucket_object_pre_exec(struct req_state *s)
641 {
642 if (s->expect_cont)
643 dump_continue(s);
644
645 dump_bucket_from_state(s);
646 }
647
648 int RGWGetObj::verify_permission()
649 {
650 obj = rgw_obj(s->bucket, s->object);
651 store->set_atomic(s->obj_ctx, obj);
652 if (get_data) {
653 store->set_prefetch_data(s->obj_ctx, obj);
654 }
655
656 if (torrent.get_flag()) {
657 if (obj.key.instance.empty()) {
658 action = rgw::IAM::s3GetObjectTorrent;
659 } else {
660 action = rgw::IAM::s3GetObjectVersionTorrent;
661 }
662 } else {
663 if (obj.key.instance.empty()) {
664 action = rgw::IAM::s3GetObject;
665 } else {
666 action = rgw::IAM::s3GetObjectVersion;
667 }
668 }
669
670 if (!verify_object_permission(s, action)) {
671 return -EACCES;
672 }
673
674 return 0;
675 }
676
677
678 int RGWOp::verify_op_mask()
679 {
680 uint32_t required_mask = op_mask();
681
682 ldout(s->cct, 20) << "required_mask= " << required_mask
683 << " user.op_mask=" << s->user->op_mask << dendl;
684
685 if ((s->user->op_mask & required_mask) != required_mask) {
686 return -EPERM;
687 }
688
689 if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !store->zone_is_writeable()) {
690 ldout(s->cct, 5) << "NOTICE: modify request to a read-only zone by a non-system user, permission denied" << dendl;
691 return -EPERM;
692 }
693
694 return 0;
695 }
696
697 int RGWGetObjTags::verify_permission()
698 {
699 if (!verify_object_permission(s,
700 s->object.instance.empty() ?
701 rgw::IAM::s3GetObjectTagging:
702 rgw::IAM::s3GetObjectVersionTagging))
703 return -EACCES;
704
705 return 0;
706 }
707
708 void RGWGetObjTags::pre_exec()
709 {
710 rgw_bucket_object_pre_exec(s);
711 }
712
713 void RGWGetObjTags::execute()
714 {
715 rgw_obj obj;
716 map<string,bufferlist> attrs;
717
718 obj = rgw_obj(s->bucket, s->object);
719
720 store->set_atomic(s->obj_ctx, obj);
721
722 op_ret = get_obj_attrs(store, s, obj, attrs);
723 if (op_ret < 0) {
724 ldout(s->cct, 0) << "ERROR: failed to get obj attrs, obj=" << obj
725 << " ret=" << op_ret << dendl;
726 return;
727 }
728
729 auto tags = attrs.find(RGW_ATTR_TAGS);
730 if(tags != attrs.end()){
731 has_tags = true;
732 tags_bl.append(tags->second);
733 }
734 send_response_data(tags_bl);
735 }
736
737 int RGWPutObjTags::verify_permission()
738 {
739 if (!verify_object_permission(s,
740 s->object.instance.empty() ?
741 rgw::IAM::s3PutObjectTagging:
742 rgw::IAM::s3PutObjectVersionTagging))
743 return -EACCES;
744 return 0;
745 }
746
747 void RGWPutObjTags::execute()
748 {
749 op_ret = get_params();
750 if (op_ret < 0)
751 return;
752
753 if (s->object.empty()){
754 op_ret= -EINVAL; // we only support tagging on existing objects
755 return;
756 }
757
758 rgw_obj obj;
759 obj = rgw_obj(s->bucket, s->object);
760 store->set_atomic(s->obj_ctx, obj);
761 op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_TAGS, tags_bl);
762 if (op_ret == -ECANCELED){
763 op_ret = -ERR_TAG_CONFLICT;
764 }
765 }
766
767 void RGWDeleteObjTags::pre_exec()
768 {
769 rgw_bucket_object_pre_exec(s);
770 }
771
772
773 int RGWDeleteObjTags::verify_permission()
774 {
775 if (!s->object.empty()) {
776 if (!verify_object_permission(s,
777 s->object.instance.empty() ?
778 rgw::IAM::s3DeleteObjectTagging:
779 rgw::IAM::s3DeleteObjectVersionTagging))
780 return -EACCES;
781 }
782 return 0;
783 }
784
785 void RGWDeleteObjTags::execute()
786 {
787 if (s->object.empty())
788 return;
789
790 rgw_obj obj;
791 obj = rgw_obj(s->bucket, s->object);
792 store->set_atomic(s->obj_ctx, obj);
793 map <string, bufferlist> attrs;
794 map <string, bufferlist> rmattr;
795 bufferlist bl;
796 rmattr[RGW_ATTR_TAGS] = bl;
797 op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, &rmattr);
798 }
799
800 int RGWOp::do_aws4_auth_completion()
801 {
802 ldout(s->cct, 5) << "NOTICE: call to do_aws4_auth_completion" << dendl;
803 if (s->auth.completer) {
804 if (!s->auth.completer->complete()) {
805 return -ERR_AMZ_CONTENT_SHA256_MISMATCH;
806 } else {
807 dout(10) << "v4 auth ok -- do_aws4_auth_completion" << dendl;
808 }
809
810 /* TODO(rzarzynski): yes, we're really called twice on PUTs. Only first
811 * call passes, so we disable second one. This is old behaviour, sorry!
812 * Plan for tomorrow: seek and destroy. */
813 s->auth.completer = nullptr;
814 }
815
816 return 0;
817 }
818
819 int RGWOp::init_quota()
820 {
821 /* no quota enforcement for system requests */
822 if (s->system_request)
823 return 0;
824
825 /* init quota related stuff */
826 if (!(s->user->op_mask & RGW_OP_TYPE_MODIFY)) {
827 return 0;
828 }
829
830 /* only interested in object related ops */
831 if (s->object.empty()) {
832 return 0;
833 }
834
835 RGWUserInfo owner_info;
836 RGWUserInfo *uinfo;
837
838 if (s->user->user_id == s->bucket_owner.get_id()) {
839 uinfo = s->user;
840 } else {
841 int r = rgw_get_user_info_by_uid(store, s->bucket_info.owner, owner_info);
842 if (r < 0)
843 return r;
844 uinfo = &owner_info;
845 }
846
847 if (s->bucket_info.quota.enabled) {
848 bucket_quota = s->bucket_info.quota;
849 } else if (uinfo->bucket_quota.enabled) {
850 bucket_quota = uinfo->bucket_quota;
851 } else {
852 bucket_quota = store->get_bucket_quota();
853 }
854
855 if (uinfo->user_quota.enabled) {
856 user_quota = uinfo->user_quota;
857 } else {
858 user_quota = store->get_user_quota();
859 }
860
861 return 0;
862 }
863
864 static bool validate_cors_rule_method(RGWCORSRule *rule, const char *req_meth) {
865 uint8_t flags = 0;
866
867 if (!req_meth) {
868 dout(5) << "req_meth is null" << dendl;
869 return false;
870 }
871
872 if (strcmp(req_meth, "GET") == 0) flags = RGW_CORS_GET;
873 else if (strcmp(req_meth, "POST") == 0) flags = RGW_CORS_POST;
874 else if (strcmp(req_meth, "PUT") == 0) flags = RGW_CORS_PUT;
875 else if (strcmp(req_meth, "DELETE") == 0) flags = RGW_CORS_DELETE;
876 else if (strcmp(req_meth, "HEAD") == 0) flags = RGW_CORS_HEAD;
877
878 if ((rule->get_allowed_methods() & flags) == flags) {
879 dout(10) << "Method " << req_meth << " is supported" << dendl;
880 } else {
881 dout(5) << "Method " << req_meth << " is not supported" << dendl;
882 return false;
883 }
884
885 return true;
886 }
887
888 int RGWOp::read_bucket_cors()
889 {
890 bufferlist bl;
891
892 map<string, bufferlist>::iterator aiter = s->bucket_attrs.find(RGW_ATTR_CORS);
893 if (aiter == s->bucket_attrs.end()) {
894 ldout(s->cct, 20) << "no CORS configuration attr found" << dendl;
895 cors_exist = false;
896 return 0; /* no CORS configuration found */
897 }
898
899 cors_exist = true;
900
901 bl = aiter->second;
902
903 bufferlist::iterator iter = bl.begin();
904 try {
905 bucket_cors.decode(iter);
906 } catch (buffer::error& err) {
907 ldout(s->cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
908 return -EIO;
909 }
910 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
911 RGWCORSConfiguration_S3 *s3cors = static_cast<RGWCORSConfiguration_S3 *>(&bucket_cors);
912 ldout(s->cct, 15) << "Read RGWCORSConfiguration";
913 s3cors->to_xml(*_dout);
914 *_dout << dendl;
915 }
916 return 0;
917 }
918
919 /** CORS 6.2.6.
920 * If any of the header field-names is not a ASCII case-insensitive match for
921 * any of the values in list of headers do not set any additional headers and
922 * terminate this set of steps.
923 * */
924 static void get_cors_response_headers(RGWCORSRule *rule, const char *req_hdrs, string& hdrs, string& exp_hdrs, unsigned *max_age) {
925 if (req_hdrs) {
926 list<string> hl;
927 get_str_list(req_hdrs, hl);
928 for(list<string>::iterator it = hl.begin(); it != hl.end(); ++it) {
929 if (!rule->is_header_allowed((*it).c_str(), (*it).length())) {
930 dout(5) << "Header " << (*it) << " is not registered in this rule" << dendl;
931 } else {
932 if (hdrs.length() > 0) hdrs.append(",");
933 hdrs.append((*it));
934 }
935 }
936 }
937 rule->format_exp_headers(exp_hdrs);
938 *max_age = rule->get_max_age();
939 }
940
941 /**
942 * Generate the CORS header response
943 *
944 * This is described in the CORS standard, section 6.2.
945 */
946 bool RGWOp::generate_cors_headers(string& origin, string& method, string& headers, string& exp_headers, unsigned *max_age)
947 {
948 /* CORS 6.2.1. */
949 const char *orig = s->info.env->get("HTTP_ORIGIN");
950 if (!orig) {
951 return false;
952 }
953
954 /* Custom: */
955 origin = orig;
956 op_ret = read_bucket_cors();
957 if (op_ret < 0) {
958 return false;
959 }
960
961 if (!cors_exist) {
962 dout(2) << "No CORS configuration set yet for this bucket" << dendl;
963 return false;
964 }
965
966 /* CORS 6.2.2. */
967 RGWCORSRule *rule = bucket_cors.host_name_rule(orig);
968 if (!rule)
969 return false;
970
971 /*
972 * Set the Allowed-Origin header to a asterisk if this is allowed in the rule
973 * and no Authorization was send by the client
974 *
975 * The origin parameter specifies a URI that may access the resource. The browser must enforce this.
976 * For requests without credentials, the server may specify "*" as a wildcard,
977 * thereby allowing any origin to access the resource.
978 */
979 const char *authorization = s->info.env->get("HTTP_AUTHORIZATION");
980 if (!authorization && rule->has_wildcard_origin())
981 origin = "*";
982
983 /* CORS 6.2.3. */
984 const char *req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD");
985 if (!req_meth) {
986 req_meth = s->info.method;
987 }
988
989 if (req_meth) {
990 method = req_meth;
991 /* CORS 6.2.5. */
992 if (!validate_cors_rule_method(rule, req_meth)) {
993 return false;
994 }
995 }
996
997 /* CORS 6.2.4. */
998 const char *req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS");
999
1000 /* CORS 6.2.6. */
1001 get_cors_response_headers(rule, req_hdrs, headers, exp_headers, max_age);
1002
1003 return true;
1004 }
1005
1006 int RGWGetObj::read_user_manifest_part(rgw_bucket& bucket,
1007 const rgw_bucket_dir_entry& ent,
1008 RGWAccessControlPolicy * const bucket_acl,
1009 const optional<Policy>& bucket_policy,
1010 const off_t start_ofs,
1011 const off_t end_ofs)
1012 {
1013 ldout(s->cct, 20) << "user manifest obj=" << ent.key.name << "[" << ent.key.instance << "]" << dendl;
1014 RGWGetObj_CB cb(this);
1015 RGWGetDataCB* filter = &cb;
1016 boost::optional<RGWGetObj_Decompress> decompress;
1017
1018 int64_t cur_ofs = start_ofs;
1019 int64_t cur_end = end_ofs;
1020
1021 rgw_obj part(bucket, ent.key);
1022
1023 map<string, bufferlist> attrs;
1024
1025 uint64_t obj_size;
1026 RGWObjectCtx obj_ctx(store);
1027 RGWAccessControlPolicy obj_policy(s->cct);
1028
1029 ldout(s->cct, 20) << "reading obj=" << part << " ofs=" << cur_ofs << " end=" << cur_end << dendl;
1030
1031 obj_ctx.obj.set_atomic(part);
1032 store->set_prefetch_data(&obj_ctx, part);
1033
1034 RGWRados::Object op_target(store, s->bucket_info, obj_ctx, part);
1035 RGWRados::Object::Read read_op(&op_target);
1036
1037 read_op.conds.if_match = ent.meta.etag.c_str();
1038 read_op.params.attrs = &attrs;
1039 read_op.params.obj_size = &obj_size;
1040
1041 op_ret = read_op.prepare();
1042 if (op_ret < 0)
1043 return op_ret;
1044 op_ret = read_op.range_to_ofs(ent.meta.accounted_size, cur_ofs, cur_end);
1045 if (op_ret < 0)
1046 return op_ret;
1047 bool need_decompress;
1048 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
1049 if (op_ret < 0) {
1050 lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
1051 return -EIO;
1052 }
1053
1054 if (need_decompress)
1055 {
1056 if (cs_info.orig_size != ent.meta.accounted_size) {
1057 // hmm.. something wrong, object not as expected, abort!
1058 ldout(s->cct, 0) << "ERROR: expected cs_info.orig_size=" << cs_info.orig_size <<
1059 ", actual read size=" << ent.meta.size << dendl;
1060 return -EIO;
1061 }
1062 decompress.emplace(s->cct, &cs_info, partial_content, filter);
1063 filter = &*decompress;
1064 }
1065 else
1066 {
1067 if (obj_size != ent.meta.size) {
1068 // hmm.. something wrong, object not as expected, abort!
1069 ldout(s->cct, 0) << "ERROR: expected obj_size=" << obj_size << ", actual read size=" << ent.meta.size << dendl;
1070 return -EIO;
1071 }
1072 }
1073
1074 op_ret = rgw_policy_from_attrset(s->cct, attrs, &obj_policy);
1075 if (op_ret < 0)
1076 return op_ret;
1077
1078 /* We can use global user_acl because LOs cannot have segments
1079 * stored inside different accounts. */
1080 if (s->system_request) {
1081 ldout(s->cct, 2) << "overriding permissions due to system operation" << dendl;
1082 } else if (s->auth.identity->is_admin_of(s->user->user_id)) {
1083 ldout(s->cct, 2) << "overriding permissions due to admin operation" << dendl;
1084 } else if (!verify_object_permission(s, part, s->user_acl.get(), bucket_acl,
1085 &obj_policy, bucket_policy, action)) {
1086 return -EPERM;
1087 }
1088
1089 if (ent.meta.size == 0) {
1090 return 0;
1091 }
1092
1093 perfcounter->inc(l_rgw_get_b, cur_end - cur_ofs);
1094 filter->fixup_range(cur_ofs, cur_end);
1095 op_ret = read_op.iterate(cur_ofs, cur_end, filter);
1096 if (op_ret >= 0)
1097 op_ret = filter->flush();
1098 return op_ret;
1099 }
1100
1101 static int iterate_user_manifest_parts(CephContext * const cct,
1102 RGWRados * const store,
1103 const off_t ofs,
1104 const off_t end,
1105 RGWBucketInfo *pbucket_info,
1106 const string& obj_prefix,
1107 RGWAccessControlPolicy * const bucket_acl,
1108 const optional<Policy>& bucket_policy,
1109 uint64_t * const ptotal_len,
1110 uint64_t * const pobj_size,
1111 string * const pobj_sum,
1112 int (*cb)(rgw_bucket& bucket,
1113 const rgw_bucket_dir_entry& ent,
1114 RGWAccessControlPolicy * const bucket_acl,
1115 const optional<Policy>& bucket_policy,
1116 off_t start_ofs,
1117 off_t end_ofs,
1118 void *param),
1119 void * const cb_param)
1120 {
1121 rgw_bucket& bucket = pbucket_info->bucket;
1122 uint64_t obj_ofs = 0, len_count = 0;
1123 bool found_start = false, found_end = false, handled_end = false;
1124 string delim;
1125 bool is_truncated;
1126 vector<rgw_bucket_dir_entry> objs;
1127
1128 utime_t start_time = ceph_clock_now();
1129
1130 RGWRados::Bucket target(store, *pbucket_info);
1131 RGWRados::Bucket::List list_op(&target);
1132
1133 list_op.params.prefix = obj_prefix;
1134 list_op.params.delim = delim;
1135
1136 MD5 etag_sum;
1137 do {
1138 #define MAX_LIST_OBJS 100
1139 int r = list_op.list_objects(MAX_LIST_OBJS, &objs, NULL, &is_truncated);
1140 if (r < 0) {
1141 return r;
1142 }
1143
1144 for (rgw_bucket_dir_entry& ent : objs) {
1145 const uint64_t cur_total_len = obj_ofs;
1146 const uint64_t obj_size = ent.meta.accounted_size;
1147 uint64_t start_ofs = 0, end_ofs = obj_size;
1148
1149 if ((ptotal_len || cb) && !found_start && cur_total_len + obj_size > (uint64_t)ofs) {
1150 start_ofs = ofs - obj_ofs;
1151 found_start = true;
1152 }
1153
1154 obj_ofs += obj_size;
1155 if (pobj_sum) {
1156 etag_sum.Update((const byte *)ent.meta.etag.c_str(),
1157 ent.meta.etag.length());
1158 }
1159
1160 if ((ptotal_len || cb) && !found_end && obj_ofs > (uint64_t)end) {
1161 end_ofs = end - cur_total_len + 1;
1162 found_end = true;
1163 }
1164
1165 perfcounter->tinc(l_rgw_get_lat,
1166 (ceph_clock_now() - start_time));
1167
1168 if (found_start && !handled_end) {
1169 len_count += end_ofs - start_ofs;
1170
1171 if (cb) {
1172 r = cb(bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs, cb_param);
1173 if (r < 0) {
1174 return r;
1175 }
1176 }
1177 }
1178
1179 handled_end = found_end;
1180 start_time = ceph_clock_now();
1181 }
1182 } while (is_truncated);
1183
1184 if (ptotal_len) {
1185 *ptotal_len = len_count;
1186 }
1187 if (pobj_size) {
1188 *pobj_size = obj_ofs;
1189 }
1190 if (pobj_sum) {
1191 complete_etag(etag_sum, pobj_sum);
1192 }
1193
1194 return 0;
1195 }
1196
1197 struct rgw_slo_part {
1198 RGWAccessControlPolicy *bucket_acl = nullptr;
1199 Policy* bucket_policy = nullptr;
1200 rgw_bucket bucket;
1201 string obj_name;
1202 uint64_t size = 0;
1203 string etag;
1204 };
1205
1206 static int iterate_slo_parts(CephContext *cct,
1207 RGWRados *store,
1208 off_t ofs,
1209 off_t end,
1210 map<uint64_t, rgw_slo_part>& slo_parts,
1211 int (*cb)(rgw_bucket& bucket,
1212 const rgw_bucket_dir_entry& ent,
1213 RGWAccessControlPolicy *bucket_acl,
1214 const optional<Policy>& bucket_policy,
1215 off_t start_ofs,
1216 off_t end_ofs,
1217 void *param),
1218 void *cb_param)
1219 {
1220 bool found_start = false, found_end = false;
1221
1222 if (slo_parts.empty()) {
1223 return 0;
1224 }
1225
1226 utime_t start_time = ceph_clock_now();
1227
1228 map<uint64_t, rgw_slo_part>::iterator iter = slo_parts.upper_bound(ofs);
1229 if (iter != slo_parts.begin()) {
1230 --iter;
1231 }
1232
1233 uint64_t obj_ofs = iter->first;
1234
1235 for (; iter != slo_parts.end() && !found_end; ++iter) {
1236 rgw_slo_part& part = iter->second;
1237 rgw_bucket_dir_entry ent;
1238
1239 ent.key.name = part.obj_name;
1240 ent.meta.accounted_size = ent.meta.size = part.size;
1241 ent.meta.etag = part.etag;
1242
1243 uint64_t cur_total_len = obj_ofs;
1244 uint64_t start_ofs = 0, end_ofs = ent.meta.size;
1245
1246 if (!found_start && cur_total_len + ent.meta.size > (uint64_t)ofs) {
1247 start_ofs = ofs - obj_ofs;
1248 found_start = true;
1249 }
1250
1251 obj_ofs += ent.meta.size;
1252
1253 if (!found_end && obj_ofs > (uint64_t)end) {
1254 end_ofs = end - cur_total_len + 1;
1255 found_end = true;
1256 }
1257
1258 perfcounter->tinc(l_rgw_get_lat,
1259 (ceph_clock_now() - start_time));
1260
1261 if (found_start) {
1262 if (cb) {
1263 // SLO is a Swift thing, and Swift has no knowledge of S3 Policies.
1264 int r = cb(part.bucket, ent, part.bucket_acl,
1265 (part.bucket_policy ?
1266 optional<Policy>(*part.bucket_policy) : none),
1267 start_ofs, end_ofs, cb_param);
1268 if (r < 0)
1269 return r;
1270 }
1271 }
1272
1273 start_time = ceph_clock_now();
1274 }
1275
1276 return 0;
1277 }
1278
1279 static int get_obj_user_manifest_iterate_cb(rgw_bucket& bucket,
1280 const rgw_bucket_dir_entry& ent,
1281 RGWAccessControlPolicy * const bucket_acl,
1282 const optional<Policy>& bucket_policy,
1283 const off_t start_ofs,
1284 const off_t end_ofs,
1285 void * const param)
1286 {
1287 RGWGetObj *op = static_cast<RGWGetObj *>(param);
1288 return op->read_user_manifest_part(bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs);
1289 }
1290
1291 int RGWGetObj::handle_user_manifest(const char *prefix)
1292 {
1293 const boost::string_view prefix_view(prefix);
1294 ldout(s->cct, 2) << "RGWGetObj::handle_user_manifest() prefix="
1295 << prefix_view << dendl;
1296
1297 const size_t pos = prefix_view.find('/');
1298 if (pos == string::npos) {
1299 return -EINVAL;
1300 }
1301
1302 const std::string bucket_name = url_decode(prefix_view.substr(0, pos));
1303 const std::string obj_prefix = url_decode(prefix_view.substr(pos + 1));
1304
1305 rgw_bucket bucket;
1306
1307 RGWAccessControlPolicy _bucket_acl(s->cct);
1308 RGWAccessControlPolicy *bucket_acl;
1309 optional<Policy> _bucket_policy;
1310 optional<Policy>* bucket_policy;
1311 RGWBucketInfo bucket_info;
1312 RGWBucketInfo *pbucket_info;
1313
1314 if (bucket_name.compare(s->bucket.name) != 0) {
1315 map<string, bufferlist> bucket_attrs;
1316 RGWObjectCtx obj_ctx(store);
1317 int r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
1318 bucket_name, bucket_info, NULL,
1319 &bucket_attrs);
1320 if (r < 0) {
1321 ldout(s->cct, 0) << "could not get bucket info for bucket="
1322 << bucket_name << dendl;
1323 return r;
1324 }
1325 bucket = bucket_info.bucket;
1326 pbucket_info = &bucket_info;
1327 bucket_acl = &_bucket_acl;
1328 r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl, bucket);
1329 if (r < 0) {
1330 ldout(s->cct, 0) << "failed to read bucket policy" << dendl;
1331 return r;
1332 }
1333 _bucket_policy = get_iam_policy_from_attr(s->cct, store, bucket_attrs,
1334 bucket_info.bucket.tenant);
1335 bucket_policy = &_bucket_policy;
1336 } else {
1337 bucket = s->bucket;
1338 pbucket_info = &s->bucket_info;
1339 bucket_acl = s->bucket_acl.get();
1340 bucket_policy = &s->iam_policy;
1341 }
1342
1343 /* dry run to find out:
1344 * - total length (of the parts we are going to send to client),
1345 * - overall DLO's content size,
1346 * - md5 sum of overall DLO's content (for etag of Swift API). */
1347 int r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1348 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1349 nullptr, &s->obj_size, &lo_etag,
1350 nullptr /* cb */, nullptr /* cb arg */);
1351 if (r < 0) {
1352 return r;
1353 }
1354
1355 r = RGWRados::Object::Read::range_to_ofs(s->obj_size, ofs, end);
1356 if (r < 0) {
1357 return r;
1358 }
1359
1360 r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1361 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1362 &total_len, nullptr, nullptr,
1363 nullptr, nullptr);
1364 if (r < 0) {
1365 return r;
1366 }
1367
1368 if (!get_data) {
1369 bufferlist bl;
1370 send_response_data(bl, 0, 0);
1371 return 0;
1372 }
1373
1374 r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1375 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1376 nullptr, nullptr, nullptr,
1377 get_obj_user_manifest_iterate_cb, (void *)this);
1378 if (r < 0) {
1379 return r;
1380 }
1381
1382 if (!total_len) {
1383 bufferlist bl;
1384 send_response_data(bl, 0, 0);
1385 }
1386
1387 return 0;
1388 }
1389
1390 int RGWGetObj::handle_slo_manifest(bufferlist& bl)
1391 {
1392 RGWSLOInfo slo_info;
1393 bufferlist::iterator bliter = bl.begin();
1394 try {
1395 ::decode(slo_info, bliter);
1396 } catch (buffer::error& err) {
1397 ldout(s->cct, 0) << "ERROR: failed to decode slo manifest" << dendl;
1398 return -EIO;
1399 }
1400 ldout(s->cct, 2) << "RGWGetObj::handle_slo_manifest()" << dendl;
1401
1402 vector<RGWAccessControlPolicy> allocated_acls;
1403 map<string, pair<RGWAccessControlPolicy *, optional<Policy>>> policies;
1404 map<string, rgw_bucket> buckets;
1405
1406 map<uint64_t, rgw_slo_part> slo_parts;
1407
1408 MD5 etag_sum;
1409 total_len = 0;
1410
1411 for (const auto& entry : slo_info.entries) {
1412 const string& path = entry.path;
1413
1414 /* If the path starts with slashes, strip them all. */
1415 const size_t pos_init = path.find_first_not_of('/');
1416 /* According to the documentation of std::string::find following check
1417 * is not necessary as we should get the std::string::npos propagation
1418 * here. This might be true with the accuracy to implementation's bugs.
1419 * See following question on SO:
1420 * http://stackoverflow.com/questions/1011790/why-does-stdstring-findtext-stdstringnpos-not-return-npos
1421 */
1422 if (pos_init == string::npos) {
1423 return -EINVAL;
1424 }
1425
1426 const size_t pos_sep = path.find('/', pos_init);
1427 if (pos_sep == string::npos) {
1428 return -EINVAL;
1429 }
1430
1431 string bucket_name = path.substr(pos_init, pos_sep - pos_init);
1432 string obj_name = path.substr(pos_sep + 1);
1433
1434 rgw_bucket bucket;
1435 RGWAccessControlPolicy *bucket_acl;
1436 Policy* bucket_policy;
1437
1438 if (bucket_name.compare(s->bucket.name) != 0) {
1439 const auto& piter = policies.find(bucket_name);
1440 if (piter != policies.end()) {
1441 bucket_acl = piter->second.first;
1442 bucket_policy = piter->second.second.get_ptr();
1443 bucket = buckets[bucket_name];
1444 } else {
1445 allocated_acls.push_back(RGWAccessControlPolicy(s->cct));
1446 RGWAccessControlPolicy& _bucket_acl = allocated_acls.back();
1447
1448 RGWBucketInfo bucket_info;
1449 map<string, bufferlist> bucket_attrs;
1450 RGWObjectCtx obj_ctx(store);
1451 int r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
1452 bucket_name, bucket_info, nullptr,
1453 &bucket_attrs);
1454 if (r < 0) {
1455 ldout(s->cct, 0) << "could not get bucket info for bucket="
1456 << bucket_name << dendl;
1457 return r;
1458 }
1459 bucket = bucket_info.bucket;
1460 bucket_acl = &_bucket_acl;
1461 r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl,
1462 bucket);
1463 if (r < 0) {
1464 ldout(s->cct, 0) << "failed to read bucket ACL for bucket "
1465 << bucket << dendl;
1466 return r;
1467 }
1468 auto _bucket_policy = get_iam_policy_from_attr(
1469 s->cct, store, bucket_attrs, bucket_info.bucket.tenant);
1470 bucket_policy = _bucket_policy.get_ptr();
1471 buckets[bucket_name] = bucket;
1472 policies[bucket_name] = make_pair(bucket_acl, _bucket_policy);
1473 }
1474 } else {
1475 bucket = s->bucket;
1476 bucket_acl = s->bucket_acl.get();
1477 bucket_policy = s->iam_policy.get_ptr();
1478 }
1479
1480 rgw_slo_part part;
1481 part.bucket_acl = bucket_acl;
1482 part.bucket_policy = bucket_policy;
1483 part.bucket = bucket;
1484 part.obj_name = obj_name;
1485 part.size = entry.size_bytes;
1486 part.etag = entry.etag;
1487 ldout(s->cct, 20) << "slo_part: ofs=" << ofs
1488 << " bucket=" << part.bucket
1489 << " obj=" << part.obj_name
1490 << " size=" << part.size
1491 << " etag=" << part.etag
1492 << dendl;
1493
1494 etag_sum.Update((const byte *)entry.etag.c_str(),
1495 entry.etag.length());
1496
1497 slo_parts[total_len] = part;
1498 total_len += part.size;
1499 }
1500
1501 complete_etag(etag_sum, &lo_etag);
1502
1503 s->obj_size = slo_info.total_size;
1504 ldout(s->cct, 20) << "s->obj_size=" << s->obj_size << dendl;
1505
1506 int r = RGWRados::Object::Read::range_to_ofs(total_len, ofs, end);
1507 if (r < 0) {
1508 return r;
1509 }
1510
1511 total_len = end - ofs + 1;
1512
1513 r = iterate_slo_parts(s->cct, store, ofs, end, slo_parts,
1514 get_obj_user_manifest_iterate_cb, (void *)this);
1515 if (r < 0) {
1516 return r;
1517 }
1518
1519 return 0;
1520 }
1521
1522 int RGWGetObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
1523 {
1524 /* garbage collection related handling */
1525 utime_t start_time = ceph_clock_now();
1526 if (start_time > gc_invalidate_time) {
1527 int r = store->defer_gc(s->obj_ctx, s->bucket_info, obj);
1528 if (r < 0) {
1529 dout(0) << "WARNING: could not defer gc entry for obj" << dendl;
1530 }
1531 gc_invalidate_time = start_time;
1532 gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2);
1533 }
1534 return send_response_data(bl, bl_ofs, bl_len);
1535 }
1536
1537 bool RGWGetObj::prefetch_data()
1538 {
1539 /* HEAD request, stop prefetch*/
1540 if (!get_data) {
1541 return false;
1542 }
1543
1544 bool prefetch_first_chunk = true;
1545 range_str = s->info.env->get("HTTP_RANGE");
1546
1547 if(range_str) {
1548 int r = parse_range(range_str, ofs, end, &partial_content);
1549 /* error on parsing the range, stop prefetch and will fail in execte() */
1550 if (r < 0) {
1551 range_parsed = false;
1552 return false;
1553 } else {
1554 range_parsed = true;
1555 }
1556 /* range get goes to shadown objects, stop prefetch */
1557 if (ofs >= s->cct->_conf->rgw_max_chunk_size) {
1558 prefetch_first_chunk = false;
1559 }
1560 }
1561
1562 return get_data && prefetch_first_chunk;
1563 }
1564 void RGWGetObj::pre_exec()
1565 {
1566 rgw_bucket_object_pre_exec(s);
1567 }
1568
1569 static bool object_is_expired(map<string, bufferlist>& attrs) {
1570 map<string, bufferlist>::iterator iter = attrs.find(RGW_ATTR_DELETE_AT);
1571 if (iter != attrs.end()) {
1572 utime_t delete_at;
1573 try {
1574 ::decode(delete_at, iter->second);
1575 } catch (buffer::error& err) {
1576 dout(0) << "ERROR: " << __func__ << ": failed to decode " RGW_ATTR_DELETE_AT " attr" << dendl;
1577 return false;
1578 }
1579
1580 if (delete_at <= ceph_clock_now() && !delete_at.is_zero()) {
1581 return true;
1582 }
1583 }
1584
1585 return false;
1586 }
1587
1588 void RGWGetObj::execute()
1589 {
1590 utime_t start_time = s->time;
1591 bufferlist bl;
1592 gc_invalidate_time = ceph_clock_now();
1593 gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2);
1594
1595 bool need_decompress;
1596 int64_t ofs_x, end_x;
1597
1598 RGWGetObj_CB cb(this);
1599 RGWGetDataCB* filter = (RGWGetDataCB*)&cb;
1600 boost::optional<RGWGetObj_Decompress> decompress;
1601 std::unique_ptr<RGWGetDataCB> decrypt;
1602 map<string, bufferlist>::iterator attr_iter;
1603
1604 perfcounter->inc(l_rgw_get);
1605
1606 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
1607 RGWRados::Object::Read read_op(&op_target);
1608
1609 op_ret = get_params();
1610 if (op_ret < 0)
1611 goto done_err;
1612
1613 op_ret = init_common();
1614 if (op_ret < 0)
1615 goto done_err;
1616
1617 read_op.conds.mod_ptr = mod_ptr;
1618 read_op.conds.unmod_ptr = unmod_ptr;
1619 read_op.conds.high_precision_time = s->system_request; /* system request need to use high precision time */
1620 read_op.conds.mod_zone_id = mod_zone_id;
1621 read_op.conds.mod_pg_ver = mod_pg_ver;
1622 read_op.conds.if_match = if_match;
1623 read_op.conds.if_nomatch = if_nomatch;
1624 read_op.params.attrs = &attrs;
1625 read_op.params.lastmod = &lastmod;
1626 read_op.params.obj_size = &s->obj_size;
1627
1628 op_ret = read_op.prepare();
1629 if (op_ret < 0)
1630 goto done_err;
1631 version_id = read_op.state.obj.key.instance;
1632
1633 /* STAT ops don't need data, and do no i/o */
1634 if (get_type() == RGW_OP_STAT_OBJ) {
1635 return;
1636 }
1637
1638 /* start gettorrent */
1639 if (torrent.get_flag())
1640 {
1641 torrent.init(s, store);
1642 torrent.get_torrent_file(op_ret, read_op, total_len, bl, obj);
1643 if (op_ret < 0)
1644 {
1645 ldout(s->cct, 0) << "ERROR: failed to get_torrent_file ret= " << op_ret
1646 << dendl;
1647 goto done_err;
1648 }
1649 op_ret = send_response_data(bl, 0, total_len);
1650 if (op_ret < 0)
1651 {
1652 ldout(s->cct, 0) << "ERROR: failed to send_response_data ret= " << op_ret
1653 << dendl;
1654 goto done_err;
1655 }
1656 return;
1657 }
1658 /* end gettorrent */
1659
1660 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
1661 if (op_ret < 0) {
1662 lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
1663 goto done_err;
1664 }
1665 if (need_decompress) {
1666 s->obj_size = cs_info.orig_size;
1667 decompress.emplace(s->cct, &cs_info, partial_content, filter);
1668 filter = &*decompress;
1669 }
1670
1671 attr_iter = attrs.find(RGW_ATTR_USER_MANIFEST);
1672 if (attr_iter != attrs.end() && !skip_manifest) {
1673 op_ret = handle_user_manifest(attr_iter->second.c_str());
1674 if (op_ret < 0) {
1675 ldout(s->cct, 0) << "ERROR: failed to handle user manifest ret="
1676 << op_ret << dendl;
1677 goto done_err;
1678 }
1679 return;
1680 }
1681
1682 attr_iter = attrs.find(RGW_ATTR_SLO_MANIFEST);
1683 if (attr_iter != attrs.end() && !skip_manifest) {
1684 is_slo = true;
1685 op_ret = handle_slo_manifest(attr_iter->second);
1686 if (op_ret < 0) {
1687 ldout(s->cct, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret
1688 << dendl;
1689 goto done_err;
1690 }
1691 return;
1692 }
1693
1694 // for range requests with obj size 0
1695 if (range_str && !(s->obj_size)) {
1696 total_len = 0;
1697 op_ret = -ERANGE;
1698 goto done_err;
1699 }
1700
1701 op_ret = read_op.range_to_ofs(s->obj_size, ofs, end);
1702 if (op_ret < 0)
1703 goto done_err;
1704 total_len = (ofs <= end ? end + 1 - ofs : 0);
1705
1706 /* Check whether the object has expired. Swift API documentation
1707 * stands that we should return 404 Not Found in such case. */
1708 if (need_object_expiration() && object_is_expired(attrs)) {
1709 op_ret = -ENOENT;
1710 goto done_err;
1711 }
1712
1713 start = ofs;
1714
1715 /* STAT ops don't need data, and do no i/o */
1716 if (get_type() == RGW_OP_STAT_OBJ) {
1717 return;
1718 }
1719
1720 attr_iter = attrs.find(RGW_ATTR_MANIFEST);
1721 op_ret = this->get_decrypt_filter(&decrypt, filter,
1722 attr_iter != attrs.end() ? &(attr_iter->second) : nullptr);
1723 if (decrypt != nullptr) {
1724 filter = decrypt.get();
1725 }
1726 if (op_ret < 0) {
1727 goto done_err;
1728 }
1729
1730 if (!get_data || ofs > end) {
1731 send_response_data(bl, 0, 0);
1732 return;
1733 }
1734
1735 perfcounter->inc(l_rgw_get_b, end - ofs);
1736
1737 ofs_x = ofs;
1738 end_x = end;
1739 filter->fixup_range(ofs_x, end_x);
1740 op_ret = read_op.iterate(ofs_x, end_x, filter);
1741
1742 if (op_ret >= 0)
1743 op_ret = filter->flush();
1744
1745 perfcounter->tinc(l_rgw_get_lat,
1746 (ceph_clock_now() - start_time));
1747 if (op_ret < 0) {
1748 goto done_err;
1749 }
1750
1751 op_ret = send_response_data(bl, 0, 0);
1752 if (op_ret < 0) {
1753 goto done_err;
1754 }
1755 return;
1756
1757 done_err:
1758 send_response_data_error();
1759 }
1760
1761 int RGWGetObj::init_common()
1762 {
1763 if (range_str) {
1764 /* range parsed error when prefetch*/
1765 if (!range_parsed) {
1766 int r = parse_range(range_str, ofs, end, &partial_content);
1767 if (r < 0)
1768 return r;
1769 }
1770 }
1771 if (if_mod) {
1772 if (parse_time(if_mod, &mod_time) < 0)
1773 return -EINVAL;
1774 mod_ptr = &mod_time;
1775 }
1776
1777 if (if_unmod) {
1778 if (parse_time(if_unmod, &unmod_time) < 0)
1779 return -EINVAL;
1780 unmod_ptr = &unmod_time;
1781 }
1782
1783 return 0;
1784 }
1785
1786 int RGWListBuckets::verify_permission()
1787 {
1788 if (!verify_user_permission(s, RGW_PERM_READ)) {
1789 return -EACCES;
1790 }
1791
1792 return 0;
1793 }
1794
1795 int RGWGetUsage::verify_permission()
1796 {
1797 if (s->auth.identity->is_anonymous()) {
1798 return -EACCES;
1799 }
1800
1801 return 0;
1802 }
1803
1804 void RGWListBuckets::execute()
1805 {
1806 bool done;
1807 bool started = false;
1808 uint64_t total_count = 0;
1809
1810 uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk;
1811
1812 op_ret = get_params();
1813 if (op_ret < 0) {
1814 goto send_end;
1815 }
1816
1817 if (supports_account_metadata()) {
1818 op_ret = rgw_get_user_attrs_by_uid(store, s->user->user_id, attrs);
1819 if (op_ret < 0) {
1820 goto send_end;
1821 }
1822 }
1823
1824 is_truncated = false;
1825 do {
1826 RGWUserBuckets buckets;
1827 uint64_t read_count;
1828 if (limit >= 0) {
1829 read_count = min(limit - total_count, (uint64_t)max_buckets);
1830 } else {
1831 read_count = max_buckets;
1832 }
1833
1834 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
1835 marker, end_marker, read_count,
1836 should_get_stats(), &is_truncated,
1837 get_default_max());
1838 if (op_ret < 0) {
1839 /* hmm.. something wrong here.. the user was authenticated, so it
1840 should exist */
1841 ldout(s->cct, 10) << "WARNING: failed on rgw_get_user_buckets uid="
1842 << s->user->user_id << dendl;
1843 break;
1844 }
1845 map<string, RGWBucketEnt>& m = buckets.get_buckets();
1846 map<string, RGWBucketEnt>::iterator iter;
1847 for (iter = m.begin(); iter != m.end(); ++iter) {
1848 RGWBucketEnt& bucket = iter->second;
1849 buckets_size += bucket.size;
1850 buckets_size_rounded += bucket.size_rounded;
1851 buckets_objcount += bucket.count;
1852 }
1853 buckets_count += m.size();
1854 total_count += m.size();
1855
1856 done = (m.size() < read_count || (limit >= 0 && total_count >= (uint64_t)limit));
1857
1858 if (!started) {
1859 send_response_begin(buckets.count() > 0);
1860 started = true;
1861 }
1862
1863 if (!m.empty()) {
1864 send_response_data(buckets);
1865
1866 map<string, RGWBucketEnt>::reverse_iterator riter = m.rbegin();
1867 marker = riter->first;
1868 }
1869 } while (is_truncated && !done);
1870
1871 send_end:
1872 if (!started) {
1873 send_response_begin(false);
1874 }
1875 send_response_end();
1876 }
1877
1878 void RGWGetUsage::execute()
1879 {
1880 uint64_t start_epoch = 0;
1881 uint64_t end_epoch = (uint64_t)-1;
1882 op_ret = get_params();
1883 if (op_ret < 0)
1884 return;
1885
1886 if (!start_date.empty()) {
1887 op_ret = utime_t::parse_date(start_date, &start_epoch, NULL);
1888 if (op_ret < 0) {
1889 ldout(store->ctx(), 0) << "ERROR: failed to parse start date" << dendl;
1890 return;
1891 }
1892 }
1893
1894 if (!end_date.empty()) {
1895 op_ret = utime_t::parse_date(end_date, &end_epoch, NULL);
1896 if (op_ret < 0) {
1897 ldout(store->ctx(), 0) << "ERROR: failed to parse end date" << dendl;
1898 return;
1899 }
1900 }
1901
1902 uint32_t max_entries = 1000;
1903
1904 bool is_truncated = true;
1905
1906 RGWUsageIter usage_iter;
1907
1908 while (is_truncated) {
1909 op_ret = store->read_usage(s->user->user_id, start_epoch, end_epoch, max_entries,
1910 &is_truncated, usage_iter, usage);
1911
1912 if (op_ret == -ENOENT) {
1913 op_ret = 0;
1914 is_truncated = false;
1915 }
1916
1917 if (op_ret < 0) {
1918 return;
1919 }
1920 }
1921
1922 op_ret = rgw_user_sync_all_stats(store, s->user->user_id);
1923 if (op_ret < 0) {
1924 ldout(store->ctx(), 0) << "ERROR: failed to sync user stats: " << dendl;
1925 return;
1926 }
1927
1928 op_ret = rgw_user_get_all_buckets_stats(store, s->user->user_id, buckets_usage);
1929 if (op_ret < 0) {
1930 cerr << "ERROR: failed to sync user stats: " << std::endl;
1931 return ;
1932 }
1933
1934 string user_str = s->user->user_id.to_str();
1935 op_ret = store->cls_user_get_header(user_str, &header);
1936 if (op_ret < 0) {
1937 ldout(store->ctx(), 0) << "ERROR: can't read user header: " << dendl;
1938 return;
1939 }
1940
1941 return;
1942 }
1943
1944 int RGWStatAccount::verify_permission()
1945 {
1946 if (!verify_user_permission(s, RGW_PERM_READ)) {
1947 return -EACCES;
1948 }
1949
1950 return 0;
1951 }
1952
1953 void RGWStatAccount::execute()
1954 {
1955 string marker;
1956 bool is_truncated = false;
1957 uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk;
1958
1959 do {
1960 RGWUserBuckets buckets;
1961
1962 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets, marker,
1963 string(), max_buckets, true, &is_truncated);
1964 if (op_ret < 0) {
1965 /* hmm.. something wrong here.. the user was authenticated, so it
1966 should exist */
1967 ldout(s->cct, 10) << "WARNING: failed on rgw_get_user_buckets uid="
1968 << s->user->user_id << dendl;
1969 break;
1970 } else {
1971 map<string, RGWBucketEnt>& m = buckets.get_buckets();
1972 map<string, RGWBucketEnt>::iterator iter;
1973 for (iter = m.begin(); iter != m.end(); ++iter) {
1974 RGWBucketEnt& bucket = iter->second;
1975 buckets_size += bucket.size;
1976 buckets_size_rounded += bucket.size_rounded;
1977 buckets_objcount += bucket.count;
1978
1979 marker = iter->first;
1980 }
1981 buckets_count += m.size();
1982
1983 }
1984 } while (is_truncated);
1985 }
1986
1987 int RGWGetBucketVersioning::verify_permission()
1988 {
1989 if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
1990 return -EACCES;
1991 }
1992
1993 return 0;
1994 }
1995
1996 void RGWGetBucketVersioning::pre_exec()
1997 {
1998 rgw_bucket_object_pre_exec(s);
1999 }
2000
2001 void RGWGetBucketVersioning::execute()
2002 {
2003 versioned = s->bucket_info.versioned();
2004 versioning_enabled = s->bucket_info.versioning_enabled();
2005 }
2006
2007 int RGWSetBucketVersioning::verify_permission()
2008 {
2009 if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
2010 return -EACCES;
2011 }
2012
2013 return 0;
2014 }
2015
2016 void RGWSetBucketVersioning::pre_exec()
2017 {
2018 rgw_bucket_object_pre_exec(s);
2019 }
2020
2021 void RGWSetBucketVersioning::execute()
2022 {
2023 op_ret = get_params();
2024 if (op_ret < 0)
2025 return;
2026
2027 if (!store->is_meta_master()) {
2028 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
2029 if (op_ret < 0) {
2030 ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
2031 return;
2032 }
2033 }
2034
2035 if (enable_versioning) {
2036 s->bucket_info.flags |= BUCKET_VERSIONED;
2037 s->bucket_info.flags &= ~BUCKET_VERSIONS_SUSPENDED;
2038 } else {
2039 s->bucket_info.flags |= (BUCKET_VERSIONED | BUCKET_VERSIONS_SUSPENDED);
2040 }
2041
2042 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(),
2043 &s->bucket_attrs);
2044 if (op_ret < 0) {
2045 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
2046 << " returned err=" << op_ret << dendl;
2047 return;
2048 }
2049 }
2050
2051 int RGWGetBucketWebsite::verify_permission()
2052 {
2053 if (s->user->user_id.compare(s->bucket_owner.get_id()) != 0)
2054 return -EACCES;
2055
2056 return 0;
2057 }
2058
2059 void RGWGetBucketWebsite::pre_exec()
2060 {
2061 rgw_bucket_object_pre_exec(s);
2062 }
2063
2064 void RGWGetBucketWebsite::execute()
2065 {
2066 if (!s->bucket_info.has_website) {
2067 op_ret = -ENOENT;
2068 }
2069 }
2070
2071 int RGWSetBucketWebsite::verify_permission()
2072 {
2073 if (s->user->user_id.compare(s->bucket_owner.get_id()) != 0)
2074 return -EACCES;
2075
2076 return 0;
2077 }
2078
2079 void RGWSetBucketWebsite::pre_exec()
2080 {
2081 rgw_bucket_object_pre_exec(s);
2082 }
2083
2084 void RGWSetBucketWebsite::execute()
2085 {
2086 op_ret = get_params();
2087
2088 if (op_ret < 0)
2089 return;
2090
2091 if (!store->is_meta_master()) {
2092 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
2093 if (op_ret < 0) {
2094 ldout(s->cct, 20) << __func__ << " forward_request_to_master returned ret=" << op_ret << dendl;
2095 return;
2096 }
2097 }
2098
2099 s->bucket_info.has_website = true;
2100 s->bucket_info.website_conf = website_conf;
2101
2102 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
2103 if (op_ret < 0) {
2104 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
2105 return;
2106 }
2107 }
2108
2109 int RGWDeleteBucketWebsite::verify_permission()
2110 {
2111 if (s->user->user_id.compare(s->bucket_owner.get_id()) != 0)
2112 return -EACCES;
2113
2114 return 0;
2115 }
2116
2117 void RGWDeleteBucketWebsite::pre_exec()
2118 {
2119 rgw_bucket_object_pre_exec(s);
2120 }
2121
2122 void RGWDeleteBucketWebsite::execute()
2123 {
2124 s->bucket_info.has_website = false;
2125 s->bucket_info.website_conf = RGWBucketWebsiteConf();
2126
2127 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
2128 if (op_ret < 0) {
2129 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
2130 return;
2131 }
2132 }
2133
2134 int RGWStatBucket::verify_permission()
2135 {
2136 // This (a HEAD request on a bucket) is governed by the s3:ListBucket permission.
2137 if (!verify_bucket_permission(s, rgw::IAM::s3ListBucket)) {
2138 return -EACCES;
2139 }
2140
2141 return 0;
2142 }
2143
2144 void RGWStatBucket::pre_exec()
2145 {
2146 rgw_bucket_object_pre_exec(s);
2147 }
2148
2149 void RGWStatBucket::execute()
2150 {
2151 if (!s->bucket_exists) {
2152 op_ret = -ERR_NO_SUCH_BUCKET;
2153 return;
2154 }
2155
2156 RGWUserBuckets buckets;
2157 bucket.bucket = s->bucket;
2158 buckets.add(bucket);
2159 map<string, RGWBucketEnt>& m = buckets.get_buckets();
2160 op_ret = store->update_containers_stats(m);
2161 if (! op_ret)
2162 op_ret = -EEXIST;
2163 if (op_ret > 0) {
2164 op_ret = 0;
2165 map<string, RGWBucketEnt>::iterator iter = m.find(bucket.bucket.name);
2166 if (iter != m.end()) {
2167 bucket = iter->second;
2168 } else {
2169 op_ret = -EINVAL;
2170 }
2171 }
2172 }
2173
2174 int RGWListBucket::verify_permission()
2175 {
2176 op_ret = get_params();
2177 if (op_ret < 0) {
2178 return op_ret;
2179 }
2180
2181 if (!verify_bucket_permission(s,
2182 list_versions ?
2183 rgw::IAM::s3ListBucketVersions :
2184 rgw::IAM::s3ListBucket)) {
2185 return -EACCES;
2186 }
2187
2188 return 0;
2189 }
2190
2191 int RGWListBucket::parse_max_keys()
2192 {
2193 if (!max_keys.empty()) {
2194 char *endptr;
2195 max = strtol(max_keys.c_str(), &endptr, 10);
2196 if (endptr) {
2197 while (*endptr && isspace(*endptr)) // ignore white space
2198 endptr++;
2199 if (*endptr) {
2200 return -EINVAL;
2201 }
2202 }
2203 } else {
2204 max = default_max;
2205 }
2206
2207 return 0;
2208 }
2209
2210 void RGWListBucket::pre_exec()
2211 {
2212 rgw_bucket_object_pre_exec(s);
2213 }
2214
2215 void RGWListBucket::execute()
2216 {
2217 if (!s->bucket_exists) {
2218 op_ret = -ERR_NO_SUCH_BUCKET;
2219 return;
2220 }
2221
2222 if (need_container_stats()) {
2223 map<string, RGWBucketEnt> m;
2224 m[s->bucket.name] = RGWBucketEnt();
2225 m.begin()->second.bucket = s->bucket;
2226 op_ret = store->update_containers_stats(m);
2227 if (op_ret > 0) {
2228 bucket = m.begin()->second;
2229 }
2230 }
2231
2232 RGWRados::Bucket target(store, s->bucket_info);
2233 if (shard_id >= 0) {
2234 target.set_shard_id(shard_id);
2235 }
2236 RGWRados::Bucket::List list_op(&target);
2237
2238 list_op.params.prefix = prefix;
2239 list_op.params.delim = delimiter;
2240 list_op.params.marker = marker;
2241 list_op.params.end_marker = end_marker;
2242 list_op.params.list_versions = list_versions;
2243
2244 op_ret = list_op.list_objects(max, &objs, &common_prefixes, &is_truncated);
2245 if (op_ret >= 0) {
2246 next_marker = list_op.get_next_marker();
2247 }
2248 }
2249
2250 int RGWGetBucketLogging::verify_permission()
2251 {
2252 if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
2253 return -EACCES;
2254 }
2255
2256 return 0;
2257 }
2258
2259 int RGWGetBucketLocation::verify_permission()
2260 {
2261 if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
2262 return -EACCES;
2263 }
2264
2265 return 0;
2266 }
2267
2268 int RGWCreateBucket::verify_permission()
2269 {
2270 /* This check is mostly needed for S3 that doesn't support account ACL.
2271 * Swift doesn't allow to delegate any permission to an anonymous user,
2272 * so it will become an early exit in such case. */
2273 if (s->auth.identity->is_anonymous()) {
2274 return -EACCES;
2275 }
2276
2277 if (!verify_user_permission(s, RGW_PERM_WRITE)) {
2278 return -EACCES;
2279 }
2280
2281 if (s->user->user_id.tenant != s->bucket_tenant) {
2282 ldout(s->cct, 10) << "user cannot create a bucket in a different tenant"
2283 << " (user_id.tenant=" << s->user->user_id.tenant
2284 << " requested=" << s->bucket_tenant << ")"
2285 << dendl;
2286 return -EACCES;
2287 }
2288 if (s->user->max_buckets < 0) {
2289 return -EPERM;
2290 }
2291
2292 if (s->user->max_buckets) {
2293 RGWUserBuckets buckets;
2294 string marker;
2295 bool is_truncated = false;
2296 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
2297 marker, string(), s->user->max_buckets,
2298 false, &is_truncated);
2299 if (op_ret < 0) {
2300 return op_ret;
2301 }
2302
2303 if ((int)buckets.count() >= s->user->max_buckets) {
2304 return -ERR_TOO_MANY_BUCKETS;
2305 }
2306 }
2307
2308 return 0;
2309 }
2310
2311 static int forward_request_to_master(struct req_state *s, obj_version *objv,
2312 RGWRados *store, bufferlist& in_data,
2313 JSONParser *jp, req_info *forward_info)
2314 {
2315 if (!store->rest_master_conn) {
2316 ldout(s->cct, 0) << "rest connection is invalid" << dendl;
2317 return -EINVAL;
2318 }
2319 ldout(s->cct, 0) << "sending request to master zonegroup" << dendl;
2320 bufferlist response;
2321 string uid_str = s->user->user_id.to_str();
2322 #define MAX_REST_RESPONSE (128 * 1024) // we expect a very small response
2323 int ret = store->rest_master_conn->forward(uid_str, (forward_info ? *forward_info : s->info),
2324 objv, MAX_REST_RESPONSE, &in_data, &response);
2325 if (ret < 0)
2326 return ret;
2327
2328 ldout(s->cct, 20) << "response: " << response.c_str() << dendl;
2329 if (jp && !jp->parse(response.c_str(), response.length())) {
2330 ldout(s->cct, 0) << "failed parsing response from master zonegroup" << dendl;
2331 return -EINVAL;
2332 }
2333
2334 return 0;
2335 }
2336
2337 void RGWCreateBucket::pre_exec()
2338 {
2339 rgw_bucket_object_pre_exec(s);
2340 }
2341
2342 static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs,
2343 map<string, bufferlist>& out_attrs,
2344 map<string, bufferlist>& out_rmattrs)
2345 {
2346 for (const auto& kv : orig_attrs) {
2347 const string& name = kv.first;
2348
2349 /* Check if the attr is user-defined metadata item. */
2350 if (name.compare(0, sizeof(RGW_ATTR_META_PREFIX) - 1,
2351 RGW_ATTR_META_PREFIX) == 0) {
2352 /* For the objects all existing meta attrs have to be removed. */
2353 out_rmattrs[name] = kv.second;
2354 } else if (out_attrs.find(name) == std::end(out_attrs)) {
2355 out_attrs[name] = kv.second;
2356 }
2357 }
2358 }
2359
2360 /* Fuse resource metadata basing on original attributes in @orig_attrs, set
2361 * of _custom_ attribute names to remove in @rmattr_names and attributes in
2362 * @out_attrs. Place results in @out_attrs.
2363 *
2364 * NOTE: it's supposed that all special attrs already present in @out_attrs
2365 * will be preserved without any change. Special attributes are those which
2366 * names start with RGW_ATTR_META_PREFIX. They're complement to custom ones
2367 * used for X-Account-Meta-*, X-Container-Meta-*, X-Amz-Meta and so on. */
2368 static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs,
2369 const set<string>& rmattr_names,
2370 map<string, bufferlist>& out_attrs)
2371 {
2372 for (const auto& kv : orig_attrs) {
2373 const string& name = kv.first;
2374
2375 /* Check if the attr is user-defined metadata item. */
2376 if (name.compare(0, strlen(RGW_ATTR_META_PREFIX),
2377 RGW_ATTR_META_PREFIX) == 0) {
2378 /* For the buckets all existing meta attrs are preserved,
2379 except those that are listed in rmattr_names. */
2380 if (rmattr_names.find(name) != std::end(rmattr_names)) {
2381 const auto aiter = out_attrs.find(name);
2382
2383 if (aiter != std::end(out_attrs)) {
2384 out_attrs.erase(aiter);
2385 }
2386 } else {
2387 /* emplace() won't alter the map if the key is already present.
2388 * This behaviour is fully intensional here. */
2389 out_attrs.emplace(kv);
2390 }
2391 } else if (out_attrs.find(name) == std::end(out_attrs)) {
2392 out_attrs[name] = kv.second;
2393 }
2394 }
2395 }
2396
2397
2398 static void populate_with_generic_attrs(const req_state * const s,
2399 map<string, bufferlist>& out_attrs)
2400 {
2401 for (const auto& kv : s->generic_attrs) {
2402 bufferlist& attrbl = out_attrs[kv.first];
2403 const string& val = kv.second;
2404 attrbl.clear();
2405 attrbl.append(val.c_str(), val.size() + 1);
2406 }
2407 }
2408
2409
2410 static int filter_out_quota_info(std::map<std::string, bufferlist>& add_attrs,
2411 const std::set<std::string>& rmattr_names,
2412 RGWQuotaInfo& quota,
2413 bool * quota_extracted = nullptr)
2414 {
2415 bool extracted = false;
2416
2417 /* Put new limit on max objects. */
2418 auto iter = add_attrs.find(RGW_ATTR_QUOTA_NOBJS);
2419 std::string err;
2420 if (std::end(add_attrs) != iter) {
2421 quota.max_objects =
2422 static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err));
2423 if (!err.empty()) {
2424 return -EINVAL;
2425 }
2426 add_attrs.erase(iter);
2427 extracted = true;
2428 }
2429
2430 /* Put new limit on bucket (container) size. */
2431 iter = add_attrs.find(RGW_ATTR_QUOTA_MSIZE);
2432 if (iter != add_attrs.end()) {
2433 quota.max_size =
2434 static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err));
2435 if (!err.empty()) {
2436 return -EINVAL;
2437 }
2438 add_attrs.erase(iter);
2439 extracted = true;
2440 }
2441
2442 for (const auto& name : rmattr_names) {
2443 /* Remove limit on max objects. */
2444 if (name.compare(RGW_ATTR_QUOTA_NOBJS) == 0) {
2445 quota.max_objects = -1;
2446 extracted = true;
2447 }
2448
2449 /* Remove limit on max bucket size. */
2450 if (name.compare(RGW_ATTR_QUOTA_MSIZE) == 0) {
2451 quota.max_size = -1;
2452 extracted = true;
2453 }
2454 }
2455
2456 /* Swift requries checking on raw usage instead of the 4 KiB rounded one. */
2457 quota.check_on_raw = true;
2458 quota.enabled = quota.max_size > 0 || quota.max_objects > 0;
2459
2460 if (quota_extracted) {
2461 *quota_extracted = extracted;
2462 }
2463
2464 return 0;
2465 }
2466
2467
2468 static void filter_out_website(std::map<std::string, ceph::bufferlist>& add_attrs,
2469 const std::set<std::string>& rmattr_names,
2470 RGWBucketWebsiteConf& ws_conf)
2471 {
2472 std::string lstval;
2473
2474 /* Let's define a mapping between each custom attribute and the memory where
2475 * attribute's value should be stored. The memory location is expressed by
2476 * a non-const reference. */
2477 const auto mapping = {
2478 std::make_pair(RGW_ATTR_WEB_INDEX, std::ref(ws_conf.index_doc_suffix)),
2479 std::make_pair(RGW_ATTR_WEB_ERROR, std::ref(ws_conf.error_doc)),
2480 std::make_pair(RGW_ATTR_WEB_LISTINGS, std::ref(lstval)),
2481 std::make_pair(RGW_ATTR_WEB_LIST_CSS, std::ref(ws_conf.listing_css_doc)),
2482 std::make_pair(RGW_ATTR_SUBDIR_MARKER, std::ref(ws_conf.subdir_marker))
2483 };
2484
2485 for (const auto& kv : mapping) {
2486 const char * const key = kv.first;
2487 auto& target = kv.second;
2488
2489 auto iter = add_attrs.find(key);
2490
2491 if (std::end(add_attrs) != iter) {
2492 /* The "target" is a reference to ws_conf. */
2493 target = iter->second.c_str();
2494 add_attrs.erase(iter);
2495 }
2496
2497 if (rmattr_names.count(key)) {
2498 target = std::string();
2499 }
2500 }
2501
2502 if (! lstval.empty()) {
2503 ws_conf.listing_enabled = boost::algorithm::iequals(lstval, "true");
2504 }
2505 }
2506
2507
2508 void RGWCreateBucket::execute()
2509 {
2510 RGWAccessControlPolicy old_policy(s->cct);
2511 buffer::list aclbl;
2512 buffer::list corsbl;
2513 bool existed;
2514 string bucket_name;
2515 rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_name);
2516 rgw_raw_obj obj(store->get_zone_params().domain_root, bucket_name);
2517 obj_version objv, *pobjv = NULL;
2518
2519 op_ret = get_params();
2520 if (op_ret < 0)
2521 return;
2522
2523 if (!location_constraint.empty() &&
2524 !store->has_zonegroup_api(location_constraint)) {
2525 ldout(s->cct, 0) << "location constraint (" << location_constraint << ")"
2526 << " can't be found." << dendl;
2527 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
2528 s->err.message = "The specified location-constraint is not valid";
2529 return;
2530 }
2531
2532 if (!store->get_zonegroup().is_master_zonegroup() && !location_constraint.empty() &&
2533 store->get_zonegroup().api_name != location_constraint) {
2534 ldout(s->cct, 0) << "location constraint (" << location_constraint << ")"
2535 << " doesn't match zonegroup" << " (" << store->get_zonegroup().api_name << ")"
2536 << dendl;
2537 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
2538 s->err.message = "The specified location-constraint is not valid";
2539 return;
2540 }
2541
2542 const auto& zonegroup = store->get_zonegroup();
2543 if (!placement_rule.empty() &&
2544 !zonegroup.placement_targets.count(placement_rule)) {
2545 ldout(s->cct, 0) << "placement target (" << placement_rule << ")"
2546 << " doesn't exist in the placement targets of zonegroup"
2547 << " (" << store->get_zonegroup().api_name << ")" << dendl;
2548 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
2549 s->err.message = "The specified placement target does not exist";
2550 return;
2551 }
2552
2553 /* we need to make sure we read bucket info, it's not read before for this
2554 * specific request */
2555 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
2556 op_ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name,
2557 s->bucket_info, NULL, &s->bucket_attrs);
2558 if (op_ret < 0 && op_ret != -ENOENT)
2559 return;
2560 s->bucket_exists = (op_ret != -ENOENT);
2561
2562 s->bucket_owner.set_id(s->user->user_id);
2563 s->bucket_owner.set_name(s->user->display_name);
2564 if (s->bucket_exists) {
2565 int r = get_bucket_policy_from_attr(s->cct, store, s->bucket_info,
2566 s->bucket_attrs, &old_policy);
2567 if (r >= 0) {
2568 if (old_policy.get_owner().get_id().compare(s->user->user_id) != 0) {
2569 op_ret = -EEXIST;
2570 return;
2571 }
2572 }
2573 }
2574
2575 RGWBucketInfo master_info;
2576 rgw_bucket *pmaster_bucket;
2577 uint32_t *pmaster_num_shards;
2578 real_time creation_time;
2579
2580 if (!store->is_meta_master()) {
2581 JSONParser jp;
2582 op_ret = forward_request_to_master(s, NULL, store, in_data, &jp);
2583 if (op_ret < 0) {
2584 return;
2585 }
2586
2587 JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp);
2588 JSONDecoder::decode_json("object_ver", objv, &jp);
2589 JSONDecoder::decode_json("bucket_info", master_info, &jp);
2590 ldout(s->cct, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" << objv.ver << dendl;
2591 ldout(s->cct, 20) << "got creation time: << " << master_info.creation_time << dendl;
2592 pmaster_bucket= &master_info.bucket;
2593 creation_time = master_info.creation_time;
2594 pmaster_num_shards = &master_info.num_shards;
2595 pobjv = &objv;
2596 } else {
2597 pmaster_bucket = NULL;
2598 pmaster_num_shards = NULL;
2599 }
2600
2601 string zonegroup_id;
2602
2603 if (s->system_request) {
2604 zonegroup_id = s->info.args.get(RGW_SYS_PARAM_PREFIX "zonegroup");
2605 if (zonegroup_id.empty()) {
2606 zonegroup_id = store->get_zonegroup().get_id();
2607 }
2608 } else {
2609 zonegroup_id = store->get_zonegroup().get_id();
2610 }
2611
2612 if (s->bucket_exists) {
2613 string selected_placement_rule;
2614 rgw_bucket bucket;
2615 bucket.tenant = s->bucket_tenant;
2616 bucket.name = s->bucket_name;
2617 op_ret = store->select_bucket_placement(*(s->user), zonegroup_id,
2618 placement_rule,
2619 &selected_placement_rule, nullptr);
2620 if (selected_placement_rule != s->bucket_info.placement_rule) {
2621 op_ret = -EEXIST;
2622 return;
2623 }
2624 }
2625
2626 /* Encode special metadata first as we're using std::map::emplace under
2627 * the hood. This method will add the new items only if the map doesn't
2628 * contain such keys yet. */
2629 policy.encode(aclbl);
2630 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
2631
2632 if (has_cors) {
2633 cors_config.encode(corsbl);
2634 emplace_attr(RGW_ATTR_CORS, std::move(corsbl));
2635 }
2636
2637 RGWQuotaInfo quota_info;
2638 const RGWQuotaInfo * pquota_info = nullptr;
2639 if (need_metadata_upload()) {
2640 /* It's supposed that following functions WILL NOT change any special
2641 * attributes (like RGW_ATTR_ACL) if they are already present in attrs. */
2642 rgw_get_request_metadata(s->cct, s->info, attrs, false);
2643 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
2644 populate_with_generic_attrs(s, attrs);
2645
2646 op_ret = filter_out_quota_info(attrs, rmattr_names, quota_info);
2647 if (op_ret < 0) {
2648 return;
2649 } else {
2650 pquota_info = &quota_info;
2651 }
2652
2653 /* Web site of Swift API. */
2654 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
2655 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
2656 }
2657
2658 s->bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */
2659 s->bucket.name = s->bucket_name;
2660
2661 /* Handle updates of the metadata for Swift's object versioning. */
2662 if (swift_ver_location) {
2663 s->bucket_info.swift_ver_location = *swift_ver_location;
2664 s->bucket_info.swift_versioning = (! swift_ver_location->empty());
2665 }
2666
2667 op_ret = store->create_bucket(*(s->user), s->bucket, zonegroup_id,
2668 placement_rule, s->bucket_info.swift_ver_location,
2669 pquota_info, attrs,
2670 info, pobjv, &ep_objv, creation_time,
2671 pmaster_bucket, pmaster_num_shards, true);
2672 /* continue if EEXIST and create_bucket will fail below. this way we can
2673 * recover from a partial create by retrying it. */
2674 ldout(s->cct, 20) << "rgw_create_bucket returned ret=" << op_ret << " bucket=" << s->bucket << dendl;
2675
2676 if (op_ret && op_ret != -EEXIST)
2677 return;
2678
2679 existed = (op_ret == -EEXIST);
2680
2681 if (existed) {
2682 /* bucket already existed, might have raced with another bucket creation, or
2683 * might be partial bucket creation that never completed. Read existing bucket
2684 * info, verify that the reported bucket owner is the current user.
2685 * If all is ok then update the user's list of buckets.
2686 * Otherwise inform client about a name conflict.
2687 */
2688 if (info.owner.compare(s->user->user_id) != 0) {
2689 op_ret = -EEXIST;
2690 return;
2691 }
2692 s->bucket = info.bucket;
2693 }
2694
2695 op_ret = rgw_link_bucket(store, s->user->user_id, s->bucket,
2696 info.creation_time, false);
2697 if (op_ret && !existed && op_ret != -EEXIST) {
2698 /* if it exists (or previously existed), don't remove it! */
2699 op_ret = rgw_unlink_bucket(store, s->user->user_id, s->bucket.tenant,
2700 s->bucket.name);
2701 if (op_ret < 0) {
2702 ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
2703 << dendl;
2704 }
2705 } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) {
2706 op_ret = -ERR_BUCKET_EXISTS;
2707 }
2708
2709 if (need_metadata_upload() && existed) {
2710 /* OK, it looks we lost race with another request. As it's required to
2711 * handle metadata fusion and upload, the whole operation becomes very
2712 * similar in nature to PutMetadataBucket. However, as the attrs may
2713 * changed in the meantime, we have to refresh. */
2714 short tries = 0;
2715 do {
2716 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
2717 RGWBucketInfo binfo;
2718 map<string, bufferlist> battrs;
2719
2720 op_ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name,
2721 binfo, nullptr, &battrs);
2722 if (op_ret < 0) {
2723 return;
2724 } else if (binfo.owner.compare(s->user->user_id) != 0) {
2725 /* New bucket doesn't belong to the account we're operating on. */
2726 op_ret = -EEXIST;
2727 return;
2728 } else {
2729 s->bucket_info = binfo;
2730 s->bucket_attrs = battrs;
2731 }
2732
2733 attrs.clear();
2734
2735 rgw_get_request_metadata(s->cct, s->info, attrs, false);
2736 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
2737 populate_with_generic_attrs(s, attrs);
2738 op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket_info.quota);
2739 if (op_ret < 0) {
2740 return;
2741 }
2742
2743 /* Handle updates of the metadata for Swift's object versioning. */
2744 if (swift_ver_location) {
2745 s->bucket_info.swift_ver_location = *swift_ver_location;
2746 s->bucket_info.swift_versioning = (! swift_ver_location->empty());
2747 }
2748
2749 /* Web site of Swift API. */
2750 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
2751 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
2752
2753 /* This will also set the quota on the bucket. */
2754 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
2755 &s->bucket_info.objv_tracker);
2756 } while (op_ret == -ECANCELED && tries++ < 20);
2757
2758 /* Restore the proper return code. */
2759 if (op_ret >= 0) {
2760 op_ret = -ERR_BUCKET_EXISTS;
2761 }
2762 }
2763 }
2764
2765 int RGWDeleteBucket::verify_permission()
2766 {
2767 if (!verify_bucket_permission(s, rgw::IAM::s3DeleteBucket)) {
2768 return -EACCES;
2769 }
2770
2771 return 0;
2772 }
2773
2774 void RGWDeleteBucket::pre_exec()
2775 {
2776 rgw_bucket_object_pre_exec(s);
2777 }
2778
2779 void RGWDeleteBucket::execute()
2780 {
2781 op_ret = -EINVAL;
2782
2783 if (s->bucket_name.empty())
2784 return;
2785
2786 if (!s->bucket_exists) {
2787 ldout(s->cct, 0) << "ERROR: bucket " << s->bucket_name << " not found" << dendl;
2788 op_ret = -ERR_NO_SUCH_BUCKET;
2789 return;
2790 }
2791 RGWObjVersionTracker ot;
2792 ot.read_version = s->bucket_info.ep_objv;
2793
2794 if (s->system_request) {
2795 string tag = s->info.args.get(RGW_SYS_PARAM_PREFIX "tag");
2796 string ver_str = s->info.args.get(RGW_SYS_PARAM_PREFIX "ver");
2797 if (!tag.empty()) {
2798 ot.read_version.tag = tag;
2799 uint64_t ver;
2800 string err;
2801 ver = strict_strtol(ver_str.c_str(), 10, &err);
2802 if (!err.empty()) {
2803 ldout(s->cct, 0) << "failed to parse ver param" << dendl;
2804 op_ret = -EINVAL;
2805 return;
2806 }
2807 ot.read_version.ver = ver;
2808 }
2809 }
2810
2811 op_ret = rgw_bucket_sync_user_stats(store, s->user->user_id, s->bucket_info);
2812 if ( op_ret < 0) {
2813 ldout(s->cct, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl;
2814 }
2815
2816 op_ret = store->check_bucket_empty(s->bucket_info);
2817 if (op_ret < 0) {
2818 return;
2819 }
2820
2821 if (!store->is_meta_master()) {
2822 bufferlist in_data;
2823 op_ret = forward_request_to_master(s, &ot.read_version, store, in_data,
2824 NULL);
2825 if (op_ret < 0) {
2826 if (op_ret == -ENOENT) {
2827 /* adjust error, we want to return with NoSuchBucket and not
2828 * NoSuchKey */
2829 op_ret = -ERR_NO_SUCH_BUCKET;
2830 }
2831 return;
2832 }
2833 }
2834
2835 string prefix, delimiter;
2836
2837 if (s->prot_flags & RGW_REST_SWIFT) {
2838 string path_args;
2839 path_args = s->info.args.get("path");
2840 if (!path_args.empty()) {
2841 if (!delimiter.empty() || !prefix.empty()) {
2842 op_ret = -EINVAL;
2843 return;
2844 }
2845 prefix = path_args;
2846 delimiter="/";
2847 }
2848 }
2849
2850 op_ret = abort_bucket_multiparts(store, s->cct, s->bucket_info, prefix, delimiter);
2851
2852 if (op_ret < 0) {
2853 return;
2854 }
2855
2856 op_ret = store->delete_bucket(s->bucket_info, ot, false);
2857
2858 if (op_ret == -ECANCELED) {
2859 // lost a race, either with mdlog sync or another delete bucket operation.
2860 // in either case, we've already called rgw_unlink_bucket()
2861 op_ret = 0;
2862 return;
2863 }
2864
2865 if (op_ret == 0) {
2866 op_ret = rgw_unlink_bucket(store, s->user->user_id, s->bucket.tenant,
2867 s->bucket.name, false);
2868 if (op_ret < 0) {
2869 ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
2870 << dendl;
2871 }
2872 }
2873
2874 if (op_ret < 0) {
2875 return;
2876 }
2877
2878
2879 }
2880
2881 int RGWPutObj::verify_permission()
2882 {
2883 if (copy_source) {
2884
2885 RGWAccessControlPolicy cs_acl(s->cct);
2886 optional<Policy> policy;
2887 map<string, bufferlist> cs_attrs;
2888 rgw_bucket cs_bucket(copy_source_bucket_info.bucket);
2889 rgw_obj_key cs_object(copy_source_object_name, copy_source_version_id);
2890
2891 rgw_obj obj(cs_bucket, cs_object);
2892 store->set_atomic(s->obj_ctx, obj);
2893 store->set_prefetch_data(s->obj_ctx, obj);
2894
2895 /* check source object permissions */
2896 if (read_obj_policy(store, s, copy_source_bucket_info, cs_attrs, &cs_acl, policy,
2897 cs_bucket, cs_object) < 0) {
2898 return -EACCES;
2899 }
2900
2901 /* admin request overrides permission checks */
2902 if (! s->auth.identity->is_admin_of(cs_acl.get_owner().get_id())) {
2903 if (policy) {
2904 auto e = policy->eval(s->env, *s->auth.identity,
2905 cs_object.instance.empty() ?
2906 rgw::IAM::s3GetObject :
2907 rgw::IAM::s3GetObjectVersion,
2908 rgw::IAM::ARN(obj));
2909 if (e == Effect::Deny) {
2910 return -EACCES;
2911 } else if (e == Effect::Pass &&
2912 !cs_acl.verify_permission(*s->auth.identity, s->perm_mask,
2913 RGW_PERM_READ)) {
2914 return -EACCES;
2915 }
2916 } else if (!cs_acl.verify_permission(*s->auth.identity, s->perm_mask,
2917 RGW_PERM_READ)) {
2918 return -EACCES;
2919 }
2920 }
2921 }
2922
2923 if (s->iam_policy) {
2924 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
2925 rgw::IAM::s3PutObject,
2926 rgw_obj(s->bucket, s->object));
2927 if (e == Effect::Allow) {
2928 return 0;
2929 } else if (e == Effect::Deny) {
2930 return -EACCES;
2931 }
2932 }
2933
2934 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
2935 return -EACCES;
2936 }
2937
2938 return 0;
2939 }
2940
2941 void RGWPutObjProcessor_Multipart::get_mp(RGWMPObj** _mp){
2942 *_mp = &mp;
2943 }
2944
2945 int RGWPutObjProcessor_Multipart::prepare(RGWRados *store, string *oid_rand)
2946 {
2947 string oid = obj_str;
2948 upload_id = s->info.args.get("uploadId");
2949 if (!oid_rand) {
2950 mp.init(oid, upload_id);
2951 } else {
2952 mp.init(oid, upload_id, *oid_rand);
2953 }
2954
2955 part_num = s->info.args.get("partNumber");
2956 if (part_num.empty()) {
2957 ldout(s->cct, 10) << "part number is empty" << dendl;
2958 return -EINVAL;
2959 }
2960
2961 string err;
2962 uint64_t num = (uint64_t)strict_strtol(part_num.c_str(), 10, &err);
2963
2964 if (!err.empty()) {
2965 ldout(s->cct, 10) << "bad part number: " << part_num << ": " << err << dendl;
2966 return -EINVAL;
2967 }
2968
2969 string upload_prefix = oid + ".";
2970
2971 if (!oid_rand) {
2972 upload_prefix.append(upload_id);
2973 } else {
2974 upload_prefix.append(*oid_rand);
2975 }
2976
2977 rgw_obj target_obj;
2978 target_obj.init(bucket, oid);
2979
2980 manifest.set_prefix(upload_prefix);
2981
2982 manifest.set_multipart_part_rule(store->ctx()->_conf->rgw_obj_stripe_size, num);
2983
2984 int r = manifest_gen.create_begin(store->ctx(), &manifest, s->bucket_info.placement_rule, bucket, target_obj);
2985 if (r < 0) {
2986 return r;
2987 }
2988
2989 cur_obj = manifest_gen.get_cur_obj(store);
2990 rgw_raw_obj_to_obj(bucket, cur_obj, &head_obj);
2991 head_obj.index_hash_source = obj_str;
2992
2993 r = prepare_init(store, NULL);
2994 if (r < 0) {
2995 return r;
2996 }
2997
2998 return 0;
2999 }
3000
3001 int RGWPutObjProcessor_Multipart::do_complete(size_t accounted_size,
3002 const string& etag,
3003 real_time *mtime, real_time set_mtime,
3004 map<string, bufferlist>& attrs,
3005 real_time delete_at,
3006 const char *if_match,
3007 const char *if_nomatch, const string *user_data, rgw_zone_set *zones_trace)
3008 {
3009 complete_writing_data();
3010
3011 RGWRados::Object op_target(store, s->bucket_info, obj_ctx, head_obj);
3012 op_target.set_versioning_disabled(true);
3013 RGWRados::Object::Write head_obj_op(&op_target);
3014
3015 head_obj_op.meta.set_mtime = set_mtime;
3016 head_obj_op.meta.mtime = mtime;
3017 head_obj_op.meta.owner = s->owner.get_id();
3018 head_obj_op.meta.delete_at = delete_at;
3019 head_obj_op.meta.zones_trace = zones_trace;
3020
3021 int r = head_obj_op.write_meta(obj_len, accounted_size, attrs);
3022 if (r < 0)
3023 return r;
3024
3025 bufferlist bl;
3026 RGWUploadPartInfo info;
3027 string p = "part.";
3028 bool sorted_omap = is_v2_upload_id(upload_id);
3029
3030 if (sorted_omap) {
3031 string err;
3032 int part_num_int = strict_strtol(part_num.c_str(), 10, &err);
3033 if (!err.empty()) {
3034 dout(10) << "bad part number specified: " << part_num << dendl;
3035 return -EINVAL;
3036 }
3037 char buf[32];
3038 snprintf(buf, sizeof(buf), "%08d", part_num_int);
3039 p.append(buf);
3040 } else {
3041 p.append(part_num);
3042 }
3043 info.num = atoi(part_num.c_str());
3044 info.etag = etag;
3045 info.size = obj_len;
3046 info.accounted_size = accounted_size;
3047 info.modified = real_clock::now();
3048 info.manifest = manifest;
3049
3050 bool compressed;
3051 r = rgw_compression_info_from_attrset(attrs, compressed, info.cs_info);
3052 if (r < 0) {
3053 dout(1) << "cannot get compression info" << dendl;
3054 return r;
3055 }
3056
3057 ::encode(info, bl);
3058
3059 string multipart_meta_obj = mp.get_meta();
3060
3061 rgw_obj meta_obj;
3062 meta_obj.init_ns(bucket, multipart_meta_obj, mp_ns);
3063 meta_obj.set_in_extra_data(true);
3064
3065 rgw_raw_obj raw_meta_obj;
3066
3067 store->obj_to_raw(s->bucket_info.placement_rule, meta_obj, &raw_meta_obj);
3068
3069 r = store->omap_set(raw_meta_obj, p, bl);
3070
3071 return r;
3072 }
3073
3074 RGWPutObjProcessor *RGWPutObj::select_processor(RGWObjectCtx& obj_ctx, bool *is_multipart)
3075 {
3076 RGWPutObjProcessor *processor;
3077
3078 bool multipart = s->info.args.exists("uploadId");
3079
3080 uint64_t part_size = s->cct->_conf->rgw_obj_stripe_size;
3081
3082 if (!multipart) {
3083 processor = new RGWPutObjProcessor_Atomic(obj_ctx, s->bucket_info, s->bucket, s->object.name, part_size, s->req_id, s->bucket_info.versioning_enabled());
3084 (static_cast<RGWPutObjProcessor_Atomic *>(processor))->set_olh_epoch(olh_epoch);
3085 (static_cast<RGWPutObjProcessor_Atomic *>(processor))->set_version_id(version_id);
3086 } else {
3087 processor = new RGWPutObjProcessor_Multipart(obj_ctx, s->bucket_info, part_size, s);
3088 }
3089
3090 if (is_multipart) {
3091 *is_multipart = multipart;
3092 }
3093
3094 return processor;
3095 }
3096
3097 void RGWPutObj::dispose_processor(RGWPutObjDataProcessor *processor)
3098 {
3099 delete processor;
3100 }
3101
3102 void RGWPutObj::pre_exec()
3103 {
3104 rgw_bucket_object_pre_exec(s);
3105 }
3106
3107 class RGWPutObj_CB : public RGWGetDataCB
3108 {
3109 RGWPutObj *op;
3110 public:
3111 RGWPutObj_CB(RGWPutObj *_op) : op(_op) {}
3112 ~RGWPutObj_CB() override {}
3113
3114 int handle_data(bufferlist& bl, off_t bl_ofs, off_t bl_len) override {
3115 return op->get_data_cb(bl, bl_ofs, bl_len);
3116 }
3117 };
3118
3119 int RGWPutObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
3120 {
3121 bufferlist bl_tmp;
3122 bl.copy(bl_ofs, bl_len, bl_tmp);
3123
3124 bl_aux.append(bl_tmp);
3125
3126 return bl_len;
3127 }
3128
3129 int RGWPutObj::get_data(const off_t fst, const off_t lst, bufferlist& bl)
3130 {
3131 RGWPutObj_CB cb(this);
3132 RGWGetDataCB* filter = &cb;
3133 boost::optional<RGWGetObj_Decompress> decompress;
3134 std::unique_ptr<RGWGetDataCB> decrypt;
3135 RGWCompressionInfo cs_info;
3136 map<string, bufferlist> attrs;
3137 map<string, bufferlist>::iterator attr_iter;
3138 int ret = 0;
3139
3140 uint64_t obj_size;
3141 int64_t new_ofs, new_end;
3142
3143 new_ofs = fst;
3144 new_end = lst;
3145
3146 rgw_obj_key obj_key(copy_source_object_name, copy_source_version_id);
3147 rgw_obj obj(copy_source_bucket_info.bucket, obj_key);
3148
3149 RGWRados::Object op_target(store, copy_source_bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
3150 RGWRados::Object::Read read_op(&op_target);
3151 read_op.params.obj_size = &obj_size;
3152 read_op.params.attrs = &attrs;
3153
3154 ret = read_op.prepare();
3155 if (ret < 0)
3156 return ret;
3157
3158 bool need_decompress;
3159 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
3160 if (op_ret < 0) {
3161 lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
3162 return -EIO;
3163 }
3164
3165 bool partial_content = true;
3166 if (need_decompress)
3167 {
3168 obj_size = cs_info.orig_size;
3169 decompress.emplace(s->cct, &cs_info, partial_content, filter);
3170 filter = &*decompress;
3171 }
3172
3173 attr_iter = attrs.find(RGW_ATTR_MANIFEST);
3174 op_ret = this->get_decrypt_filter(&decrypt,
3175 filter,
3176 attrs,
3177 attr_iter != attrs.end() ? &(attr_iter->second) : nullptr);
3178 if (decrypt != nullptr) {
3179 filter = decrypt.get();
3180 }
3181 if (op_ret < 0) {
3182 return ret;
3183 }
3184
3185 ret = read_op.range_to_ofs(obj_size, new_ofs, new_end);
3186 if (ret < 0)
3187 return ret;
3188
3189 filter->fixup_range(new_ofs, new_end);
3190 ret = read_op.iterate(new_ofs, new_end, filter);
3191
3192 if (ret >= 0)
3193 ret = filter->flush();
3194
3195 bl.claim_append(bl_aux);
3196
3197 return ret;
3198 }
3199
3200 // special handling for compression type = "random" with multipart uploads
3201 static CompressorRef get_compressor_plugin(const req_state *s,
3202 const std::string& compression_type)
3203 {
3204 if (compression_type != "random") {
3205 return Compressor::create(s->cct, compression_type);
3206 }
3207
3208 bool is_multipart{false};
3209 const auto& upload_id = s->info.args.get("uploadId", &is_multipart);
3210
3211 if (!is_multipart) {
3212 return Compressor::create(s->cct, compression_type);
3213 }
3214
3215 // use a hash of the multipart upload id so all parts use the same plugin
3216 const auto alg = std::hash<std::string>{}(upload_id) % Compressor::COMP_ALG_LAST;
3217 if (alg == Compressor::COMP_ALG_NONE) {
3218 return nullptr;
3219 }
3220 return Compressor::create(s->cct, alg);
3221 }
3222
3223 void RGWPutObj::execute()
3224 {
3225 RGWPutObjProcessor *processor = NULL;
3226 RGWPutObjDataProcessor *filter = nullptr;
3227 std::unique_ptr<RGWPutObjDataProcessor> encrypt;
3228 char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1];
3229 char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3230 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3231 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
3232 MD5 hash;
3233 bufferlist bl, aclbl, bs;
3234 int len;
3235 map<string, string>::iterator iter;
3236 bool multipart;
3237
3238 off_t fst;
3239 off_t lst;
3240 const auto& compression_type = store->get_zone_params().get_compression_type(
3241 s->bucket_info.placement_rule);
3242 CompressorRef plugin;
3243 boost::optional<RGWPutObj_Compress> compressor;
3244
3245 bool need_calc_md5 = (dlo_manifest == NULL) && (slo_info == NULL);
3246 perfcounter->inc(l_rgw_put);
3247 op_ret = -EINVAL;
3248 if (s->object.empty()) {
3249 goto done;
3250 }
3251
3252 if (!s->bucket_exists) {
3253 op_ret = -ERR_NO_SUCH_BUCKET;
3254 return;
3255 }
3256
3257 op_ret = get_params();
3258 if (op_ret < 0) {
3259 ldout(s->cct, 20) << "get_params() returned ret=" << op_ret << dendl;
3260 goto done;
3261 }
3262
3263 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
3264 if (op_ret < 0) {
3265 ldout(s->cct, 20) << "get_system_versioning_params() returned ret="
3266 << op_ret << dendl;
3267 goto done;
3268 }
3269
3270 if (supplied_md5_b64) {
3271 need_calc_md5 = true;
3272
3273 ldout(s->cct, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
3274 op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1],
3275 supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64));
3276 ldout(s->cct, 15) << "ceph_armor ret=" << op_ret << dendl;
3277 if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) {
3278 op_ret = -ERR_INVALID_DIGEST;
3279 goto done;
3280 }
3281
3282 buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5);
3283 ldout(s->cct, 15) << "supplied_md5=" << supplied_md5 << dendl;
3284 }
3285
3286 if (!chunked_upload) { /* with chunked upload we don't know how big is the upload.
3287 we also check sizes at the end anyway */
3288 op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
3289 user_quota, bucket_quota, s->content_length);
3290 if (op_ret < 0) {
3291 ldout(s->cct, 20) << "check_quota() returned ret=" << op_ret << dendl;
3292 goto done;
3293 }
3294 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3295 if (op_ret < 0) {
3296 ldout(s->cct, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl;
3297 goto done;
3298 }
3299 }
3300
3301 if (supplied_etag) {
3302 strncpy(supplied_md5, supplied_etag, sizeof(supplied_md5) - 1);
3303 supplied_md5[sizeof(supplied_md5) - 1] = '\0';
3304 }
3305
3306 processor = select_processor(*static_cast<RGWObjectCtx *>(s->obj_ctx), &multipart);
3307
3308 // no filters by default
3309 filter = processor;
3310
3311 /* Handle object versioning of Swift API. */
3312 if (! multipart) {
3313 rgw_obj obj(s->bucket, s->object);
3314 op_ret = store->swift_versioning_copy(*static_cast<RGWObjectCtx *>(s->obj_ctx),
3315 s->bucket_owner.get_id(),
3316 s->bucket_info,
3317 obj);
3318 if (op_ret < 0) {
3319 goto done;
3320 }
3321 }
3322
3323 op_ret = processor->prepare(store, NULL);
3324 if (op_ret < 0) {
3325 ldout(s->cct, 20) << "processor->prepare() returned ret=" << op_ret
3326 << dendl;
3327 goto done;
3328 }
3329
3330 fst = copy_source_range_fst;
3331 lst = copy_source_range_lst;
3332
3333 op_ret = get_encrypt_filter(&encrypt, filter);
3334 if (op_ret < 0) {
3335 goto done;
3336 }
3337 if (encrypt != nullptr) {
3338 filter = encrypt.get();
3339 } else {
3340 //no encryption, we can try compression
3341 if (compression_type != "none") {
3342 plugin = get_compressor_plugin(s, compression_type);
3343 if (!plugin) {
3344 ldout(s->cct, 1) << "Cannot load plugin for compression type "
3345 << compression_type << dendl;
3346 } else {
3347 compressor.emplace(s->cct, plugin, filter);
3348 filter = &*compressor;
3349 }
3350 }
3351 }
3352
3353 do {
3354 bufferlist data;
3355 if (fst > lst)
3356 break;
3357 if (!copy_source) {
3358 len = get_data(data);
3359 } else {
3360 uint64_t cur_lst = min(fst + s->cct->_conf->rgw_max_chunk_size - 1, lst);
3361 op_ret = get_data(fst, cur_lst, data);
3362 if (op_ret < 0)
3363 goto done;
3364 len = data.length();
3365 s->content_length += len;
3366 fst += len;
3367 }
3368 if (len < 0) {
3369 op_ret = len;
3370 goto done;
3371 }
3372
3373 if (need_calc_md5) {
3374 hash.Update((const byte *)data.c_str(), data.length());
3375 }
3376
3377 /* update torrrent */
3378 torrent.update(data);
3379
3380 /* do we need this operation to be synchronous? if we're dealing with an object with immutable
3381 * head, e.g., multipart object we need to make sure we're the first one writing to this object
3382 */
3383 bool need_to_wait = (ofs == 0) && multipart;
3384
3385 bufferlist orig_data;
3386
3387 if (need_to_wait) {
3388 orig_data = data;
3389 }
3390
3391 op_ret = put_data_and_throttle(filter, data, ofs, need_to_wait);
3392 if (op_ret < 0) {
3393 if (!need_to_wait || op_ret != -EEXIST) {
3394 ldout(s->cct, 20) << "processor->thottle_data() returned ret="
3395 << op_ret << dendl;
3396 goto done;
3397 }
3398 /* need_to_wait == true and op_ret == -EEXIST */
3399 ldout(s->cct, 5) << "NOTICE: processor->throttle_data() returned -EEXIST, need to restart write" << dendl;
3400
3401 /* restore original data */
3402 data.swap(orig_data);
3403
3404 /* restart processing with different oid suffix */
3405
3406 dispose_processor(processor);
3407 processor = select_processor(*static_cast<RGWObjectCtx *>(s->obj_ctx), &multipart);
3408 filter = processor;
3409
3410 string oid_rand;
3411 char buf[33];
3412 gen_rand_alphanumeric(store->ctx(), buf, sizeof(buf) - 1);
3413 oid_rand.append(buf);
3414
3415 op_ret = processor->prepare(store, &oid_rand);
3416 if (op_ret < 0) {
3417 ldout(s->cct, 0) << "ERROR: processor->prepare() returned "
3418 << op_ret << dendl;
3419 goto done;
3420 }
3421
3422 op_ret = get_encrypt_filter(&encrypt, filter);
3423 if (op_ret < 0) {
3424 goto done;
3425 }
3426 if (encrypt != nullptr) {
3427 filter = encrypt.get();
3428 } else {
3429 if (compressor) {
3430 compressor.emplace(s->cct, plugin, filter);
3431 filter = &*compressor;
3432 }
3433 }
3434 op_ret = put_data_and_throttle(filter, data, ofs, false);
3435 if (op_ret < 0) {
3436 goto done;
3437 }
3438 }
3439
3440 ofs += len;
3441 } while (len > 0);
3442
3443 {
3444 bufferlist flush;
3445 op_ret = put_data_and_throttle(filter, flush, ofs, false);
3446 if (op_ret < 0) {
3447 goto done;
3448 }
3449 }
3450
3451 if (!chunked_upload && ofs != s->content_length) {
3452 op_ret = -ERR_REQUEST_TIMEOUT;
3453 goto done;
3454 }
3455 s->obj_size = ofs;
3456
3457 perfcounter->inc(l_rgw_put_b, s->obj_size);
3458
3459 op_ret = do_aws4_auth_completion();
3460 if (op_ret < 0) {
3461 goto done;
3462 }
3463
3464 op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
3465 user_quota, bucket_quota, s->obj_size);
3466 if (op_ret < 0) {
3467 ldout(s->cct, 20) << "second check_quota() returned op_ret=" << op_ret << dendl;
3468 goto done;
3469 }
3470
3471 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3472 if (op_ret < 0) {
3473 ldout(s->cct, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl;
3474 goto done;
3475 }
3476
3477 hash.Final(m);
3478
3479 if (compressor && compressor->is_compressed()) {
3480 bufferlist tmp;
3481 RGWCompressionInfo cs_info;
3482 cs_info.compression_type = plugin->get_type_name();
3483 cs_info.orig_size = s->obj_size;
3484 cs_info.blocks = move(compressor->get_compression_blocks());
3485 ::encode(cs_info, tmp);
3486 attrs[RGW_ATTR_COMPRESSION] = tmp;
3487 ldout(s->cct, 20) << "storing " << RGW_ATTR_COMPRESSION
3488 << " with type=" << cs_info.compression_type
3489 << ", orig_size=" << cs_info.orig_size
3490 << ", blocks=" << cs_info.blocks.size() << dendl;
3491 }
3492
3493 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
3494
3495 etag = calc_md5;
3496
3497 if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) {
3498 op_ret = -ERR_BAD_DIGEST;
3499 goto done;
3500 }
3501
3502 policy.encode(aclbl);
3503 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
3504
3505 if (dlo_manifest) {
3506 op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
3507 if (op_ret < 0) {
3508 ldout(s->cct, 0) << "bad user manifest: " << dlo_manifest << dendl;
3509 goto done;
3510 }
3511 complete_etag(hash, &etag);
3512 ldout(s->cct, 10) << __func__ << ": calculated md5 for user manifest: " << etag << dendl;
3513 }
3514
3515 if (slo_info) {
3516 bufferlist manifest_bl;
3517 ::encode(*slo_info, manifest_bl);
3518 emplace_attr(RGW_ATTR_SLO_MANIFEST, std::move(manifest_bl));
3519
3520 hash.Update((byte *)slo_info->raw_data, slo_info->raw_data_len);
3521 complete_etag(hash, &etag);
3522 ldout(s->cct, 10) << __func__ << ": calculated md5 for user manifest: " << etag << dendl;
3523 }
3524
3525 if (supplied_etag && etag.compare(supplied_etag) != 0) {
3526 op_ret = -ERR_UNPROCESSABLE_ENTITY;
3527 goto done;
3528 }
3529 bl.append(etag.c_str(), etag.size() + 1);
3530 emplace_attr(RGW_ATTR_ETAG, std::move(bl));
3531
3532 populate_with_generic_attrs(s, attrs);
3533 rgw_get_request_metadata(s->cct, s->info, attrs);
3534 encode_delete_at_attr(delete_at, attrs);
3535 encode_obj_tags_attr(obj_tags.get(), attrs);
3536
3537 /* Add a custom metadata to expose the information whether an object
3538 * is an SLO or not. Appending the attribute must be performed AFTER
3539 * processing any input from user in order to prohibit overwriting. */
3540 if (slo_info) {
3541 bufferlist slo_userindicator_bl;
3542 slo_userindicator_bl.append("True", 4);
3543 emplace_attr(RGW_ATTR_SLO_UINDICATOR, std::move(slo_userindicator_bl));
3544 }
3545
3546 op_ret = processor->complete(s->obj_size, etag, &mtime, real_time(), attrs,
3547 (delete_at ? *delete_at : real_time()), if_match, if_nomatch,
3548 (user_data.empty() ? nullptr : &user_data));
3549
3550 /* produce torrent */
3551 if (s->cct->_conf->rgw_torrent_flag && (ofs == torrent.get_data_len()))
3552 {
3553 torrent.init(s, store);
3554 torrent.set_create_date(mtime);
3555 op_ret = torrent.complete();
3556 if (0 != op_ret)
3557 {
3558 ldout(s->cct, 0) << "ERROR: torrent.handle_data() returned " << op_ret << dendl;
3559 goto done;
3560 }
3561 }
3562
3563 done:
3564 dispose_processor(processor);
3565 perfcounter->tinc(l_rgw_put_lat,
3566 (ceph_clock_now() - s->time));
3567 }
3568
3569 int RGWPostObj::verify_permission()
3570 {
3571 return 0;
3572 }
3573 /*
3574 RGWPutObjProcessor *RGWPostObj::select_processor(RGWObjectCtx& obj_ctx)
3575 {
3576 RGWPutObjProcessor *processor;
3577
3578 uint64_t part_size = s->cct->_conf->rgw_obj_stripe_size;
3579
3580 processor = new RGWPutObjProcessor_Atomic(obj_ctx, s->bucket_info, s->bucket, s->object.name, part_size, s->req_id, s->bucket_info.versioning_enabled());
3581
3582 return processor;
3583 }
3584
3585 void RGWPostObj::dispose_processor(RGWPutObjDataProcessor *processor)
3586 {
3587 delete processor;
3588 }
3589 */
3590 void RGWPostObj::pre_exec()
3591 {
3592 rgw_bucket_object_pre_exec(s);
3593 }
3594
3595 void RGWPostObj::execute()
3596 {
3597 RGWPutObjDataProcessor *filter = nullptr;
3598 boost::optional<RGWPutObj_Compress> compressor;
3599 CompressorRef plugin;
3600 char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3601
3602 /* Read in the data from the POST form. */
3603 op_ret = get_params();
3604 if (op_ret < 0) {
3605 return;
3606 }
3607
3608 op_ret = verify_params();
3609 if (op_ret < 0) {
3610 return;
3611 }
3612
3613 if (s->iam_policy) {
3614 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
3615 rgw::IAM::s3PutObject,
3616 rgw_obj(s->bucket, s->object));
3617 if (e == Effect::Deny) {
3618 op_ret = -EACCES;
3619 return;
3620 } else if (e == Effect::Pass && !verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
3621 op_ret = -EACCES;
3622 return;
3623 }
3624 } else if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
3625 op_ret = -EACCES;
3626 return;
3627 }
3628
3629 /* Start iteration over data fields. It's necessary as Swift's FormPost
3630 * is capable to handle multiple files in single form. */
3631 do {
3632 std::unique_ptr<RGWPutObjDataProcessor> encrypt;
3633 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3634 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
3635 MD5 hash;
3636 ceph::buffer::list bl, aclbl;
3637 int len = 0;
3638
3639 op_ret = store->check_quota(s->bucket_owner.get_id(),
3640 s->bucket,
3641 user_quota,
3642 bucket_quota,
3643 s->content_length);
3644 if (op_ret < 0) {
3645 return;
3646 }
3647
3648 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3649 if (op_ret < 0) {
3650 return;
3651 }
3652
3653 if (supplied_md5_b64) {
3654 char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1];
3655 ldout(s->cct, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
3656 op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1],
3657 supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64));
3658 ldout(s->cct, 15) << "ceph_armor ret=" << op_ret << dendl;
3659 if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) {
3660 op_ret = -ERR_INVALID_DIGEST;
3661 return;
3662 }
3663
3664 buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5);
3665 ldout(s->cct, 15) << "supplied_md5=" << supplied_md5 << dendl;
3666 }
3667
3668 RGWPutObjProcessor_Atomic processor(*static_cast<RGWObjectCtx *>(s->obj_ctx),
3669 s->bucket_info,
3670 s->bucket,
3671 get_current_filename(),
3672 /* part size */
3673 s->cct->_conf->rgw_obj_stripe_size,
3674 s->req_id,
3675 s->bucket_info.versioning_enabled());
3676 /* No filters by default. */
3677 filter = &processor;
3678
3679 op_ret = processor.prepare(store, nullptr);
3680 if (op_ret < 0) {
3681 return;
3682 }
3683
3684 op_ret = get_encrypt_filter(&encrypt, filter);
3685 if (op_ret < 0) {
3686 return;
3687 }
3688 if (encrypt != nullptr) {
3689 filter = encrypt.get();
3690 } else {
3691 const auto& compression_type = store->get_zone_params().get_compression_type(
3692 s->bucket_info.placement_rule);
3693 if (compression_type != "none") {
3694 plugin = Compressor::create(s->cct, compression_type);
3695 if (!plugin) {
3696 ldout(s->cct, 1) << "Cannot load plugin for compression type "
3697 << compression_type << dendl;
3698 } else {
3699 compressor.emplace(s->cct, plugin, filter);
3700 filter = &*compressor;
3701 }
3702 }
3703 }
3704
3705 bool again;
3706 do {
3707 ceph::bufferlist data;
3708 len = get_data(data, again);
3709
3710 if (len < 0) {
3711 op_ret = len;
3712 return;
3713 }
3714
3715 if (!len) {
3716 break;
3717 }
3718
3719 hash.Update((const byte *)data.c_str(), data.length());
3720 op_ret = put_data_and_throttle(filter, data, ofs, false);
3721
3722 ofs += len;
3723
3724 if (ofs > max_len) {
3725 op_ret = -ERR_TOO_LARGE;
3726 return;
3727 }
3728 } while (again);
3729
3730 {
3731 bufferlist flush;
3732 op_ret = put_data_and_throttle(filter, flush, ofs, false);
3733 }
3734
3735 if (len < min_len) {
3736 op_ret = -ERR_TOO_SMALL;
3737 return;
3738 }
3739
3740 s->obj_size = ofs;
3741
3742 if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) {
3743 op_ret = -ERR_BAD_DIGEST;
3744 return;
3745 }
3746
3747 op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
3748 user_quota, bucket_quota, s->obj_size);
3749 if (op_ret < 0) {
3750 return;
3751 }
3752
3753 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3754 if (op_ret < 0) {
3755 return;
3756 }
3757
3758 hash.Final(m);
3759 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
3760
3761 etag = calc_md5;
3762 bl.append(etag.c_str(), etag.size() + 1);
3763 emplace_attr(RGW_ATTR_ETAG, std::move(bl));
3764
3765 policy.encode(aclbl);
3766 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
3767
3768 const std::string content_type = get_current_content_type();
3769 if (! content_type.empty()) {
3770 ceph::bufferlist ct_bl;
3771 ct_bl.append(content_type.c_str(), content_type.size() + 1);
3772 emplace_attr(RGW_ATTR_CONTENT_TYPE, std::move(ct_bl));
3773 }
3774
3775 if (compressor && compressor->is_compressed()) {
3776 ceph::bufferlist tmp;
3777 RGWCompressionInfo cs_info;
3778 cs_info.compression_type = plugin->get_type_name();
3779 cs_info.orig_size = s->obj_size;
3780 cs_info.blocks = move(compressor->get_compression_blocks());
3781 ::encode(cs_info, tmp);
3782 emplace_attr(RGW_ATTR_COMPRESSION, std::move(tmp));
3783 }
3784
3785 op_ret = processor.complete(s->obj_size, etag, nullptr, real_time(),
3786 attrs, (delete_at ? *delete_at : real_time()));
3787 } while (is_next_file_to_upload());
3788 }
3789
3790
3791 void RGWPutMetadataAccount::filter_out_temp_url(map<string, bufferlist>& add_attrs,
3792 const set<string>& rmattr_names,
3793 map<int, string>& temp_url_keys)
3794 {
3795 map<string, bufferlist>::iterator iter;
3796
3797 iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY1);
3798 if (iter != add_attrs.end()) {
3799 temp_url_keys[0] = iter->second.c_str();
3800 add_attrs.erase(iter);
3801 }
3802
3803 iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY2);
3804 if (iter != add_attrs.end()) {
3805 temp_url_keys[1] = iter->second.c_str();
3806 add_attrs.erase(iter);
3807 }
3808
3809 for (const string& name : rmattr_names) {
3810 if (name.compare(RGW_ATTR_TEMPURL_KEY1) == 0) {
3811 temp_url_keys[0] = string();
3812 }
3813 if (name.compare(RGW_ATTR_TEMPURL_KEY2) == 0) {
3814 temp_url_keys[1] = string();
3815 }
3816 }
3817 }
3818
3819 int RGWPutMetadataAccount::init_processing()
3820 {
3821 /* First, go to the base class. At the time of writing the method was
3822 * responsible only for initializing the quota. This isn't necessary
3823 * here as we are touching metadata only. I'm putting this call only
3824 * for the future. */
3825 op_ret = RGWOp::init_processing();
3826 if (op_ret < 0) {
3827 return op_ret;
3828 }
3829
3830 op_ret = get_params();
3831 if (op_ret < 0) {
3832 return op_ret;
3833 }
3834
3835 op_ret = rgw_get_user_attrs_by_uid(store, s->user->user_id, orig_attrs,
3836 &acct_op_tracker);
3837 if (op_ret < 0) {
3838 return op_ret;
3839 }
3840
3841 if (has_policy) {
3842 bufferlist acl_bl;
3843 policy.encode(acl_bl);
3844 attrs.emplace(RGW_ATTR_ACL, std::move(acl_bl));
3845 }
3846
3847 rgw_get_request_metadata(s->cct, s->info, attrs, false);
3848 prepare_add_del_attrs(orig_attrs, rmattr_names, attrs);
3849 populate_with_generic_attrs(s, attrs);
3850
3851 /* Try extract the TempURL-related stuff now to allow verify_permission
3852 * evaluate whether we need FULL_CONTROL or not. */
3853 filter_out_temp_url(attrs, rmattr_names, temp_url_keys);
3854
3855 /* The same with quota except a client needs to be reseller admin. */
3856 op_ret = filter_out_quota_info(attrs, rmattr_names, new_quota,
3857 &new_quota_extracted);
3858 if (op_ret < 0) {
3859 return op_ret;
3860 }
3861
3862 return 0;
3863 }
3864
3865 int RGWPutMetadataAccount::verify_permission()
3866 {
3867 if (s->auth.identity->is_anonymous()) {
3868 return -EACCES;
3869 }
3870
3871 if (!verify_user_permission(s, RGW_PERM_WRITE)) {
3872 return -EACCES;
3873 }
3874
3875 /* Altering TempURL keys requires FULL_CONTROL. */
3876 if (!temp_url_keys.empty() && s->perm_mask != RGW_PERM_FULL_CONTROL) {
3877 return -EPERM;
3878 }
3879
3880 /* We are failing this intensionally to allow system user/reseller admin
3881 * override in rgw_process.cc. This is the way to specify a given RGWOp
3882 * expect extra privileges. */
3883 if (new_quota_extracted) {
3884 return -EACCES;
3885 }
3886
3887 return 0;
3888 }
3889
3890 void RGWPutMetadataAccount::execute()
3891 {
3892 /* Params have been extracted earlier. See init_processing(). */
3893 RGWUserInfo new_uinfo;
3894 op_ret = rgw_get_user_info_by_uid(store, s->user->user_id, new_uinfo,
3895 &acct_op_tracker);
3896 if (op_ret < 0) {
3897 return;
3898 }
3899
3900 /* Handle the TempURL-related stuff. */
3901 if (!temp_url_keys.empty()) {
3902 for (auto& pair : temp_url_keys) {
3903 new_uinfo.temp_url_keys[pair.first] = std::move(pair.second);
3904 }
3905 }
3906
3907 /* Handle the quota extracted at the verify_permission step. */
3908 if (new_quota_extracted) {
3909 new_uinfo.user_quota = std::move(new_quota);
3910 }
3911
3912 /* We are passing here the current (old) user info to allow the function
3913 * optimize-out some operations. */
3914 op_ret = rgw_store_user_info(store, new_uinfo, s->user,
3915 &acct_op_tracker, real_time(), false, &attrs);
3916 }
3917
3918 int RGWPutMetadataBucket::verify_permission()
3919 {
3920 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
3921 return -EACCES;
3922 }
3923
3924 return 0;
3925 }
3926
3927 void RGWPutMetadataBucket::pre_exec()
3928 {
3929 rgw_bucket_object_pre_exec(s);
3930 }
3931
3932 void RGWPutMetadataBucket::execute()
3933 {
3934 op_ret = get_params();
3935 if (op_ret < 0) {
3936 return;
3937 }
3938
3939 rgw_get_request_metadata(s->cct, s->info, attrs, false);
3940
3941 if (!placement_rule.empty() &&
3942 placement_rule != s->bucket_info.placement_rule) {
3943 op_ret = -EEXIST;
3944 return;
3945 }
3946
3947 /* Encode special metadata first as we're using std::map::emplace under
3948 * the hood. This method will add the new items only if the map doesn't
3949 * contain such keys yet. */
3950 if (has_policy) {
3951 if (s->dialect.compare("swift") == 0) {
3952 auto old_policy = \
3953 static_cast<RGWAccessControlPolicy_SWIFT*>(s->bucket_acl.get());
3954 auto new_policy = static_cast<RGWAccessControlPolicy_SWIFT*>(&policy);
3955 new_policy->filter_merge(policy_rw_mask, old_policy);
3956 policy = *new_policy;
3957 }
3958 buffer::list bl;
3959 policy.encode(bl);
3960 emplace_attr(RGW_ATTR_ACL, std::move(bl));
3961 }
3962
3963 if (has_cors) {
3964 buffer::list bl;
3965 cors_config.encode(bl);
3966 emplace_attr(RGW_ATTR_CORS, std::move(bl));
3967 }
3968
3969 /* It's supposed that following functions WILL NOT change any special
3970 * attributes (like RGW_ATTR_ACL) if they are already present in attrs. */
3971 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
3972 populate_with_generic_attrs(s, attrs);
3973
3974 /* According to the Swift's behaviour and its container_quota WSGI middleware
3975 * implementation: anyone with write permissions is able to set the bucket
3976 * quota. This stays in contrast to account quotas that can be set only by
3977 * clients holding reseller admin privileges. */
3978 op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket_info.quota);
3979 if (op_ret < 0) {
3980 return;
3981 }
3982
3983 if (swift_ver_location) {
3984 s->bucket_info.swift_ver_location = *swift_ver_location;
3985 s->bucket_info.swift_versioning = (! swift_ver_location->empty());
3986 }
3987
3988 /* Web site of Swift API. */
3989 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
3990 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
3991
3992 /* Setting attributes also stores the provided bucket info. Due to this
3993 * fact, the new quota settings can be serialized with the same call. */
3994 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
3995 &s->bucket_info.objv_tracker);
3996 }
3997
3998 int RGWPutMetadataObject::verify_permission()
3999 {
4000 // This looks to be something specific to Swift. We could add
4001 // operations like swift:PutMetadataObject to the Policy Engine.
4002 if (!verify_object_permission_no_policy(s, RGW_PERM_WRITE)) {
4003 return -EACCES;
4004 }
4005
4006 return 0;
4007 }
4008
4009 void RGWPutMetadataObject::pre_exec()
4010 {
4011 rgw_bucket_object_pre_exec(s);
4012 }
4013
4014 void RGWPutMetadataObject::execute()
4015 {
4016 rgw_obj obj(s->bucket, s->object);
4017 map<string, bufferlist> attrs, orig_attrs, rmattrs;
4018
4019 store->set_atomic(s->obj_ctx, obj);
4020
4021 op_ret = get_params();
4022 if (op_ret < 0) {
4023 return;
4024 }
4025
4026 rgw_get_request_metadata(s->cct, s->info, attrs);
4027 /* check if obj exists, read orig attrs */
4028 op_ret = get_obj_attrs(store, s, obj, orig_attrs);
4029 if (op_ret < 0) {
4030 return;
4031 }
4032
4033 /* Check whether the object has expired. Swift API documentation
4034 * stands that we should return 404 Not Found in such case. */
4035 if (need_object_expiration() && object_is_expired(orig_attrs)) {
4036 op_ret = -ENOENT;
4037 return;
4038 }
4039
4040 /* Filter currently existing attributes. */
4041 prepare_add_del_attrs(orig_attrs, attrs, rmattrs);
4042 populate_with_generic_attrs(s, attrs);
4043 encode_delete_at_attr(delete_at, attrs);
4044
4045 if (dlo_manifest) {
4046 op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
4047 if (op_ret < 0) {
4048 ldout(s->cct, 0) << "bad user manifest: " << dlo_manifest << dendl;
4049 return;
4050 }
4051 }
4052
4053 op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, &rmattrs);
4054 }
4055
4056 int RGWDeleteObj::handle_slo_manifest(bufferlist& bl)
4057 {
4058 RGWSLOInfo slo_info;
4059 bufferlist::iterator bliter = bl.begin();
4060 try {
4061 ::decode(slo_info, bliter);
4062 } catch (buffer::error& err) {
4063 ldout(s->cct, 0) << "ERROR: failed to decode slo manifest" << dendl;
4064 return -EIO;
4065 }
4066
4067 try {
4068 deleter = std::unique_ptr<RGWBulkDelete::Deleter>(\
4069 new RGWBulkDelete::Deleter(store, s));
4070 } catch (std::bad_alloc) {
4071 return -ENOMEM;
4072 }
4073
4074 list<RGWBulkDelete::acct_path_t> items;
4075 for (const auto& iter : slo_info.entries) {
4076 const string& path_str = iter.path;
4077
4078 const size_t sep_pos = path_str.find('/', 1 /* skip first slash */);
4079 if (boost::string_view::npos == sep_pos) {
4080 return -EINVAL;
4081 }
4082
4083 RGWBulkDelete::acct_path_t path;
4084
4085 path.bucket_name = url_decode(path_str.substr(1, sep_pos - 1));
4086 path.obj_key = url_decode(path_str.substr(sep_pos + 1));
4087
4088 items.push_back(path);
4089 }
4090
4091 /* Request removal of the manifest object itself. */
4092 RGWBulkDelete::acct_path_t path;
4093 path.bucket_name = s->bucket_name;
4094 path.obj_key = s->object;
4095 items.push_back(path);
4096
4097 int ret = deleter->delete_chunk(items);
4098 if (ret < 0) {
4099 return ret;
4100 }
4101
4102 return 0;
4103 }
4104
4105 int RGWDeleteObj::verify_permission()
4106 {
4107 if (s->iam_policy) {
4108 auto r = s->iam_policy->eval(s->env, *s->auth.identity,
4109 s->object.instance.empty() ?
4110 rgw::IAM::s3DeleteObject :
4111 rgw::IAM::s3DeleteObjectVersion,
4112 ARN(s->bucket, s->object.name));
4113 if (r == Effect::Allow)
4114 return true;
4115 else if (r == Effect::Deny)
4116 return false;
4117 }
4118
4119 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
4120 return -EACCES;
4121 }
4122
4123 return 0;
4124 }
4125
4126 void RGWDeleteObj::pre_exec()
4127 {
4128 rgw_bucket_object_pre_exec(s);
4129 }
4130
4131 void RGWDeleteObj::execute()
4132 {
4133 if (!s->bucket_exists) {
4134 op_ret = -ERR_NO_SUCH_BUCKET;
4135 return;
4136 }
4137
4138 op_ret = get_params();
4139 if (op_ret < 0) {
4140 return;
4141 }
4142
4143 rgw_obj obj(s->bucket, s->object);
4144 map<string, bufferlist> attrs;
4145
4146
4147 if (!s->object.empty()) {
4148 if (need_object_expiration() || multipart_delete) {
4149 /* check if obj exists, read orig attrs */
4150 op_ret = get_obj_attrs(store, s, obj, attrs);
4151 if (op_ret < 0) {
4152 return;
4153 }
4154 }
4155
4156 if (multipart_delete) {
4157 const auto slo_attr = attrs.find(RGW_ATTR_SLO_MANIFEST);
4158
4159 if (slo_attr != attrs.end()) {
4160 op_ret = handle_slo_manifest(slo_attr->second);
4161 if (op_ret < 0) {
4162 ldout(s->cct, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret << dendl;
4163 }
4164 } else {
4165 op_ret = -ERR_NOT_SLO_MANIFEST;
4166 }
4167
4168 return;
4169 }
4170
4171 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
4172 obj_ctx->obj.set_atomic(obj);
4173
4174 bool ver_restored = false;
4175 op_ret = store->swift_versioning_restore(*obj_ctx, s->bucket_owner.get_id(),
4176 s->bucket_info, obj, ver_restored);
4177 if (op_ret < 0) {
4178 return;
4179 }
4180
4181 if (!ver_restored) {
4182 /* Swift's versioning mechanism hasn't found any previous version of
4183 * the object that could be restored. This means we should proceed
4184 * with the regular delete path. */
4185 RGWRados::Object del_target(store, s->bucket_info, *obj_ctx, obj);
4186 RGWRados::Object::Delete del_op(&del_target);
4187
4188 op_ret = get_system_versioning_params(s, &del_op.params.olh_epoch,
4189 &del_op.params.marker_version_id);
4190 if (op_ret < 0) {
4191 return;
4192 }
4193
4194 del_op.params.bucket_owner = s->bucket_owner.get_id();
4195 del_op.params.versioning_status = s->bucket_info.versioning_status();
4196 del_op.params.obj_owner = s->owner;
4197 del_op.params.unmod_since = unmod_since;
4198 del_op.params.high_precision_time = s->system_request; /* system request uses high precision time */
4199
4200 op_ret = del_op.delete_obj();
4201 if (op_ret >= 0) {
4202 delete_marker = del_op.result.delete_marker;
4203 version_id = del_op.result.version_id;
4204 }
4205
4206 /* Check whether the object has expired. Swift API documentation
4207 * stands that we should return 404 Not Found in such case. */
4208 if (need_object_expiration() && object_is_expired(attrs)) {
4209 op_ret = -ENOENT;
4210 return;
4211 }
4212 }
4213
4214 if (op_ret == -ERR_PRECONDITION_FAILED && no_precondition_error) {
4215 op_ret = 0;
4216 }
4217 } else {
4218 op_ret = -EINVAL;
4219 }
4220 }
4221
4222
4223 bool RGWCopyObj::parse_copy_location(const string& url_src, string& bucket_name, rgw_obj_key& key)
4224 {
4225 string name_str;
4226 string params_str;
4227
4228 size_t pos = url_src.find('?');
4229 if (pos == string::npos) {
4230 name_str = url_src;
4231 } else {
4232 name_str = url_src.substr(0, pos);
4233 params_str = url_src.substr(pos + 1);
4234 }
4235
4236 std::string dec_src = url_decode(name_str);
4237 const char *src = dec_src.c_str();
4238
4239 if (*src == '/') ++src;
4240
4241 string str(src);
4242
4243 pos = str.find('/');
4244 if (pos ==string::npos)
4245 return false;
4246
4247 bucket_name = str.substr(0, pos);
4248 key.name = str.substr(pos + 1);
4249
4250 if (key.name.empty()) {
4251 return false;
4252 }
4253
4254 if (!params_str.empty()) {
4255 RGWHTTPArgs args;
4256 args.set(params_str);
4257 args.parse();
4258
4259 key.instance = args.get("versionId", NULL);
4260 }
4261
4262 return true;
4263 }
4264
4265 int RGWCopyObj::verify_permission()
4266 {
4267 RGWAccessControlPolicy src_acl(s->cct);
4268 optional<Policy> src_policy;
4269 op_ret = get_params();
4270 if (op_ret < 0)
4271 return op_ret;
4272
4273 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
4274 if (op_ret < 0) {
4275 return op_ret;
4276 }
4277 map<string, bufferlist> src_attrs;
4278
4279 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
4280
4281 if (s->bucket_instance_id.empty()) {
4282 op_ret = store->get_bucket_info(obj_ctx, src_tenant_name, src_bucket_name, src_bucket_info, NULL, &src_attrs);
4283 } else {
4284 /* will only happen in intra region sync where the source and dest bucket is the same */
4285 op_ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, src_bucket_info, NULL, &src_attrs);
4286 }
4287 if (op_ret < 0) {
4288 if (op_ret == -ENOENT) {
4289 op_ret = -ERR_NO_SUCH_BUCKET;
4290 }
4291 return op_ret;
4292 }
4293
4294 src_bucket = src_bucket_info.bucket;
4295
4296 /* get buckets info (source and dest) */
4297 if (s->local_source && source_zone.empty()) {
4298 rgw_obj src_obj(src_bucket, src_object);
4299 store->set_atomic(s->obj_ctx, src_obj);
4300 store->set_prefetch_data(s->obj_ctx, src_obj);
4301
4302 /* check source object permissions */
4303 op_ret = read_obj_policy(store, s, src_bucket_info, src_attrs, &src_acl,
4304 src_policy, src_bucket, src_object);
4305 if (op_ret < 0) {
4306 return op_ret;
4307 }
4308
4309 /* admin request overrides permission checks */
4310 if (!s->auth.identity->is_admin_of(src_acl.get_owner().get_id())) {
4311 if (src_policy) {
4312 auto e = src_policy->eval(s->env, *s->auth.identity,
4313 src_object.instance.empty() ?
4314 rgw::IAM::s3GetObject :
4315 rgw::IAM::s3GetObjectVersion,
4316 ARN(src_obj));
4317 if (e == Effect::Deny) {
4318 return -EACCES;
4319 } else if (e == Effect::Pass &&
4320 !src_acl.verify_permission(*s->auth.identity, s->perm_mask,
4321 RGW_PERM_READ)) {
4322 return -EACCES;
4323 }
4324 } else if (!src_acl.verify_permission(*s->auth.identity,
4325 s->perm_mask,
4326 RGW_PERM_READ)) {
4327 return -EACCES;
4328 }
4329 }
4330 }
4331
4332 RGWAccessControlPolicy dest_bucket_policy(s->cct);
4333 map<string, bufferlist> dest_attrs;
4334
4335 if (src_bucket_name.compare(dest_bucket_name) == 0) { /* will only happen if s->local_source
4336 or intra region sync */
4337 dest_bucket_info = src_bucket_info;
4338 dest_attrs = src_attrs;
4339 } else {
4340 op_ret = store->get_bucket_info(obj_ctx, dest_tenant_name, dest_bucket_name,
4341 dest_bucket_info, nullptr, &dest_attrs);
4342 if (op_ret < 0) {
4343 if (op_ret == -ENOENT) {
4344 op_ret = -ERR_NO_SUCH_BUCKET;
4345 }
4346 return op_ret;
4347 }
4348 }
4349
4350 dest_bucket = dest_bucket_info.bucket;
4351
4352 rgw_obj dest_obj(dest_bucket, dest_object);
4353 store->set_atomic(s->obj_ctx, dest_obj);
4354
4355 /* check dest bucket permissions */
4356 op_ret = read_bucket_policy(store, s, dest_bucket_info, dest_attrs,
4357 &dest_bucket_policy, dest_bucket);
4358 if (op_ret < 0) {
4359 return op_ret;
4360 }
4361
4362 /* admin request overrides permission checks */
4363 if (! s->auth.identity->is_admin_of(dest_policy.get_owner().get_id()) &&
4364 ! dest_bucket_policy.verify_permission(*s->auth.identity, s->perm_mask,
4365 RGW_PERM_WRITE)) {
4366 return -EACCES;
4367 }
4368
4369 op_ret = init_dest_policy();
4370 if (op_ret < 0) {
4371 return op_ret;
4372 }
4373
4374 return 0;
4375 }
4376
4377
4378 int RGWCopyObj::init_common()
4379 {
4380 if (if_mod) {
4381 if (parse_time(if_mod, &mod_time) < 0) {
4382 op_ret = -EINVAL;
4383 return op_ret;
4384 }
4385 mod_ptr = &mod_time;
4386 }
4387
4388 if (if_unmod) {
4389 if (parse_time(if_unmod, &unmod_time) < 0) {
4390 op_ret = -EINVAL;
4391 return op_ret;
4392 }
4393 unmod_ptr = &unmod_time;
4394 }
4395
4396 bufferlist aclbl;
4397 dest_policy.encode(aclbl);
4398 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
4399
4400 rgw_get_request_metadata(s->cct, s->info, attrs);
4401 populate_with_generic_attrs(s, attrs);
4402
4403 return 0;
4404 }
4405
4406 static void copy_obj_progress_cb(off_t ofs, void *param)
4407 {
4408 RGWCopyObj *op = static_cast<RGWCopyObj *>(param);
4409 op->progress_cb(ofs);
4410 }
4411
4412 void RGWCopyObj::progress_cb(off_t ofs)
4413 {
4414 if (!s->cct->_conf->rgw_copy_obj_progress)
4415 return;
4416
4417 if (ofs - last_ofs < s->cct->_conf->rgw_copy_obj_progress_every_bytes)
4418 return;
4419
4420 send_partial_response(ofs);
4421
4422 last_ofs = ofs;
4423 }
4424
4425 void RGWCopyObj::pre_exec()
4426 {
4427 rgw_bucket_object_pre_exec(s);
4428 }
4429
4430 void RGWCopyObj::execute()
4431 {
4432 if (init_common() < 0)
4433 return;
4434
4435 rgw_obj src_obj(src_bucket, src_object);
4436 rgw_obj dst_obj(dest_bucket, dest_object);
4437
4438 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
4439 obj_ctx.obj.set_atomic(src_obj);
4440 obj_ctx.obj.set_atomic(dst_obj);
4441
4442 encode_delete_at_attr(delete_at, attrs);
4443
4444 bool high_precision_time = (s->system_request);
4445
4446 /* Handle object versioning of Swift API. In case of copying to remote this
4447 * should fail gently (op_ret == 0) as the dst_obj will not exist here. */
4448 op_ret = store->swift_versioning_copy(obj_ctx,
4449 dest_bucket_info.owner,
4450 dest_bucket_info,
4451 dst_obj);
4452 if (op_ret < 0) {
4453 return;
4454 }
4455
4456 op_ret = store->copy_obj(obj_ctx,
4457 s->user->user_id,
4458 client_id,
4459 op_id,
4460 &s->info,
4461 source_zone,
4462 dst_obj,
4463 src_obj,
4464 dest_bucket_info,
4465 src_bucket_info,
4466 &src_mtime,
4467 &mtime,
4468 mod_ptr,
4469 unmod_ptr,
4470 high_precision_time,
4471 if_match,
4472 if_nomatch,
4473 attrs_mod,
4474 copy_if_newer,
4475 attrs, RGW_OBJ_CATEGORY_MAIN,
4476 olh_epoch,
4477 (delete_at ? *delete_at : real_time()),
4478 (version_id.empty() ? NULL : &version_id),
4479 &s->req_id, /* use req_id as tag */
4480 &etag,
4481 copy_obj_progress_cb, (void *)this
4482 );
4483 }
4484
4485 int RGWGetACLs::verify_permission()
4486 {
4487 bool perm;
4488 if (!s->object.empty()) {
4489 perm = verify_object_permission(s,
4490 s->object.instance.empty() ?
4491 rgw::IAM::s3GetObjectAcl :
4492 rgw::IAM::s3GetObjectVersionAcl);
4493 } else {
4494 perm = verify_bucket_permission(s, rgw::IAM::s3GetBucketAcl);
4495 }
4496 if (!perm)
4497 return -EACCES;
4498
4499 return 0;
4500 }
4501
4502 void RGWGetACLs::pre_exec()
4503 {
4504 rgw_bucket_object_pre_exec(s);
4505 }
4506
4507 void RGWGetACLs::execute()
4508 {
4509 stringstream ss;
4510 RGWAccessControlPolicy* const acl = \
4511 (!s->object.empty() ? s->object_acl.get() : s->bucket_acl.get());
4512 RGWAccessControlPolicy_S3* const s3policy = \
4513 static_cast<RGWAccessControlPolicy_S3*>(acl);
4514 s3policy->to_xml(ss);
4515 acls = ss.str();
4516 }
4517
4518
4519
4520 int RGWPutACLs::verify_permission()
4521 {
4522 bool perm;
4523 if (!s->object.empty()) {
4524 perm = verify_object_permission(s,
4525 s->object.instance.empty() ?
4526 rgw::IAM::s3PutObjectAcl :
4527 rgw::IAM::s3PutObjectVersionAcl);
4528 } else {
4529 perm = verify_bucket_permission(s, rgw::IAM::s3PutBucketAcl);
4530 }
4531 if (!perm)
4532 return -EACCES;
4533
4534 return 0;
4535 }
4536
4537 int RGWGetLC::verify_permission()
4538 {
4539 bool perm;
4540 perm = verify_bucket_permission(s, rgw::IAM::s3GetLifecycleConfiguration);
4541 if (!perm)
4542 return -EACCES;
4543
4544 return 0;
4545 }
4546
4547 int RGWPutLC::verify_permission()
4548 {
4549 bool perm;
4550 perm = verify_bucket_permission(s, rgw::IAM::s3PutLifecycleConfiguration);
4551 if (!perm)
4552 return -EACCES;
4553
4554 return 0;
4555 }
4556
4557 int RGWDeleteLC::verify_permission()
4558 {
4559 bool perm;
4560 perm = verify_bucket_permission(s, rgw::IAM::s3PutLifecycleConfiguration);
4561 if (!perm)
4562 return -EACCES;
4563
4564 return 0;
4565 }
4566
4567 void RGWPutACLs::pre_exec()
4568 {
4569 rgw_bucket_object_pre_exec(s);
4570 }
4571
4572 void RGWGetLC::pre_exec()
4573 {
4574 rgw_bucket_object_pre_exec(s);
4575 }
4576
4577 void RGWPutLC::pre_exec()
4578 {
4579 rgw_bucket_object_pre_exec(s);
4580 }
4581
4582 void RGWDeleteLC::pre_exec()
4583 {
4584 rgw_bucket_object_pre_exec(s);
4585 }
4586
4587 void RGWPutACLs::execute()
4588 {
4589 bufferlist bl;
4590
4591 RGWAccessControlPolicy_S3 *policy = NULL;
4592 RGWACLXMLParser_S3 parser(s->cct);
4593 RGWAccessControlPolicy_S3 new_policy(s->cct);
4594 stringstream ss;
4595 char *new_data = NULL;
4596 rgw_obj obj;
4597
4598 op_ret = 0; /* XXX redundant? */
4599
4600 if (!parser.init()) {
4601 op_ret = -EINVAL;
4602 return;
4603 }
4604
4605
4606 RGWAccessControlPolicy* const existing_policy = \
4607 (s->object.empty() ? s->bucket_acl.get() : s->object_acl.get());
4608
4609 owner = existing_policy->get_owner();
4610
4611 op_ret = get_params();
4612 if (op_ret < 0) {
4613 if (op_ret == -ERANGE) {
4614 ldout(s->cct, 4) << "The size of request xml data is larger than the max limitation, data size = "
4615 << s->length << dendl;
4616 op_ret = -ERR_MALFORMED_XML;
4617 s->err.message = "The XML you provided was larger than the maximum " +
4618 std::to_string(s->cct->_conf->rgw_max_put_param_size) +
4619 " bytes allowed.";
4620 }
4621 return;
4622 }
4623
4624 ldout(s->cct, 15) << "read len=" << len << " data=" << (data ? data : "") << dendl;
4625
4626 if (!s->canned_acl.empty() && len) {
4627 op_ret = -EINVAL;
4628 return;
4629 }
4630
4631 if (!s->canned_acl.empty() || s->has_acl_header) {
4632 op_ret = get_policy_from_state(store, s, ss);
4633 if (op_ret < 0)
4634 return;
4635
4636 new_data = strdup(ss.str().c_str());
4637 free(data);
4638 data = new_data;
4639 len = ss.str().size();
4640 }
4641
4642 if (!parser.parse(data, len, 1)) {
4643 op_ret = -EINVAL;
4644 return;
4645 }
4646 policy = static_cast<RGWAccessControlPolicy_S3 *>(parser.find_first("AccessControlPolicy"));
4647 if (!policy) {
4648 op_ret = -EINVAL;
4649 return;
4650 }
4651
4652 const RGWAccessControlList& req_acl = policy->get_acl();
4653 const multimap<string, ACLGrant>& req_grant_map = req_acl.get_grant_map();
4654 #define ACL_GRANTS_MAX_NUM 100
4655 int max_num = s->cct->_conf->rgw_acl_grants_max_num;
4656 if (max_num < 0) {
4657 max_num = ACL_GRANTS_MAX_NUM;
4658 }
4659
4660 int grants_num = req_grant_map.size();
4661 if (grants_num > max_num) {
4662 ldout(s->cct, 4) << "An acl can have up to "
4663 << max_num
4664 << " grants, request acl grants num: "
4665 << grants_num << dendl;
4666 op_ret = -ERR_MALFORMED_ACL_ERROR;
4667 s->err.message = "The request is rejected, because the acl grants number you requested is larger than the maximum "
4668 + std::to_string(max_num)
4669 + " grants allowed in an acl.";
4670 return;
4671 }
4672
4673 // forward bucket acl requests to meta master zone
4674 if (s->object.empty() && !store->is_meta_master()) {
4675 bufferlist in_data;
4676 // include acl data unless it was generated from a canned_acl
4677 if (s->canned_acl.empty()) {
4678 in_data.append(data, len);
4679 }
4680 op_ret = forward_request_to_master(s, NULL, store, in_data, NULL);
4681 if (op_ret < 0) {
4682 ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
4683 return;
4684 }
4685 }
4686
4687 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
4688 ldout(s->cct, 15) << "Old AccessControlPolicy";
4689 policy->to_xml(*_dout);
4690 *_dout << dendl;
4691 }
4692
4693 op_ret = policy->rebuild(store, &owner, new_policy);
4694 if (op_ret < 0)
4695 return;
4696
4697 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
4698 ldout(s->cct, 15) << "New AccessControlPolicy:";
4699 new_policy.to_xml(*_dout);
4700 *_dout << dendl;
4701 }
4702
4703 new_policy.encode(bl);
4704 map<string, bufferlist> attrs;
4705
4706 if (!s->object.empty()) {
4707 obj = rgw_obj(s->bucket, s->object);
4708 store->set_atomic(s->obj_ctx, obj);
4709 //if instance is empty, we should modify the latest object
4710 op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_ACL, bl);
4711 } else {
4712 attrs = s->bucket_attrs;
4713 attrs[RGW_ATTR_ACL] = bl;
4714 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
4715 }
4716 if (op_ret == -ECANCELED) {
4717 op_ret = 0; /* lost a race, but it's ok because acls are immutable */
4718 }
4719 }
4720
4721 static void get_lc_oid(struct req_state *s, string& oid)
4722 {
4723 string shard_id = s->bucket.name + ':' +s->bucket.bucket_id;
4724 int max_objs = (s->cct->_conf->rgw_lc_max_objs > HASH_PRIME)?HASH_PRIME:s->cct->_conf->rgw_lc_max_objs;
4725 int index = ceph_str_hash_linux(shard_id.c_str(), shard_id.size()) % HASH_PRIME % max_objs;
4726 oid = lc_oid_prefix;
4727 char buf[32];
4728 snprintf(buf, 32, ".%d", index);
4729 oid.append(buf);
4730 return;
4731 }
4732
4733 void RGWPutLC::execute()
4734 {
4735 bufferlist bl;
4736
4737 RGWLifecycleConfiguration_S3 *config = NULL;
4738 RGWLCXMLParser_S3 parser(s->cct);
4739 RGWLifecycleConfiguration_S3 new_config(s->cct);
4740
4741 if (!parser.init()) {
4742 op_ret = -EINVAL;
4743 return;
4744 }
4745
4746 op_ret = get_params();
4747 if (op_ret < 0)
4748 return;
4749
4750 ldout(s->cct, 15) << "read len=" << len << " data=" << (data ? data : "") << dendl;
4751
4752 if (!parser.parse(data, len, 1)) {
4753 op_ret = -ERR_MALFORMED_XML;
4754 return;
4755 }
4756 config = static_cast<RGWLifecycleConfiguration_S3 *>(parser.find_first("LifecycleConfiguration"));
4757 if (!config) {
4758 op_ret = -ERR_MALFORMED_XML;
4759 return;
4760 }
4761
4762 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
4763 ldout(s->cct, 15) << "Old LifecycleConfiguration:";
4764 config->to_xml(*_dout);
4765 *_dout << dendl;
4766 }
4767
4768 op_ret = config->rebuild(store, new_config);
4769 if (op_ret < 0)
4770 return;
4771
4772 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
4773 ldout(s->cct, 15) << "New LifecycleConfiguration:";
4774 new_config.to_xml(*_dout);
4775 *_dout << dendl;
4776 }
4777
4778 new_config.encode(bl);
4779 map<string, bufferlist> attrs;
4780 attrs = s->bucket_attrs;
4781 attrs[RGW_ATTR_LC] = bl;
4782 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
4783 if (op_ret < 0)
4784 return;
4785 string shard_id = s->bucket.tenant + ':' + s->bucket.name + ':' + s->bucket.bucket_id;
4786 string oid;
4787 get_lc_oid(s, oid);
4788 pair<string, int> entry(shard_id, lc_uninitial);
4789 int max_lock_secs = s->cct->_conf->rgw_lc_lock_max_time;
4790 rados::cls::lock::Lock l(lc_index_lock_name);
4791 utime_t time(max_lock_secs, 0);
4792 l.set_duration(time);
4793 l.set_cookie(cookie);
4794 librados::IoCtx *ctx = store->get_lc_pool_ctx();
4795 do {
4796 op_ret = l.lock_exclusive(ctx, oid);
4797 if (op_ret == -EBUSY) {
4798 dout(0) << "RGWLC::RGWPutLC() failed to acquire lock on, sleep 5, try again" << oid << dendl;
4799 sleep(5);
4800 continue;
4801 }
4802 if (op_ret < 0) {
4803 dout(0) << "RGWLC::RGWPutLC() failed to acquire lock " << oid << op_ret << dendl;
4804 break;
4805 }
4806 op_ret = cls_rgw_lc_set_entry(*ctx, oid, entry);
4807 if (op_ret < 0) {
4808 dout(0) << "RGWLC::RGWPutLC() failed to set entry " << oid << op_ret << dendl;
4809 }
4810 break;
4811 }while(1);
4812 l.unlock(ctx, oid);
4813 return;
4814 }
4815
4816 void RGWDeleteLC::execute()
4817 {
4818 bufferlist bl;
4819 map<string, bufferlist> orig_attrs, attrs;
4820 map<string, bufferlist>::iterator iter;
4821 rgw_raw_obj obj;
4822 store->get_bucket_instance_obj(s->bucket, obj);
4823 store->set_prefetch_data(s->obj_ctx, obj);
4824 op_ret = get_system_obj_attrs(store, s, obj, orig_attrs, NULL, &s->bucket_info.objv_tracker);
4825 if (op_ret < 0)
4826 return;
4827
4828 for (iter = orig_attrs.begin(); iter != orig_attrs.end(); ++iter) {
4829 const string& name = iter->first;
4830 dout(10) << "DeleteLC : attr: " << name << dendl;
4831 if (name.compare(0, (sizeof(RGW_ATTR_LC) - 1), RGW_ATTR_LC) != 0) {
4832 if (attrs.find(name) == attrs.end()) {
4833 attrs[name] = iter->second;
4834 }
4835 }
4836 }
4837 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
4838 string shard_id = s->bucket.name + ':' +s->bucket.bucket_id;
4839 pair<string, int> entry(shard_id, lc_uninitial);
4840 string oid;
4841 get_lc_oid(s, oid);
4842 int max_lock_secs = s->cct->_conf->rgw_lc_lock_max_time;
4843 librados::IoCtx *ctx = store->get_lc_pool_ctx();
4844 rados::cls::lock::Lock l(lc_index_lock_name);
4845 utime_t time(max_lock_secs, 0);
4846 l.set_duration(time);
4847 do {
4848 op_ret = l.lock_exclusive(ctx, oid);
4849 if (op_ret == -EBUSY) {
4850 dout(0) << "RGWLC::RGWDeleteLC() failed to acquire lock on, sleep 5, try again" << oid << dendl;
4851 sleep(5);
4852 continue;
4853 }
4854 if (op_ret < 0) {
4855 dout(0) << "RGWLC::RGWDeleteLC() failed to acquire lock " << oid << op_ret << dendl;
4856 break;
4857 }
4858 op_ret = cls_rgw_lc_rm_entry(*ctx, oid, entry);
4859 if (op_ret < 0) {
4860 dout(0) << "RGWLC::RGWDeleteLC() failed to set entry " << oid << op_ret << dendl;
4861 }
4862 break;
4863 }while(1);
4864 l.unlock(ctx, oid);
4865 return;
4866 }
4867
4868 int RGWGetCORS::verify_permission()
4869 {
4870 if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
4871 return -EACCES;
4872 }
4873
4874 return 0;
4875 }
4876
4877 void RGWGetCORS::execute()
4878 {
4879 op_ret = read_bucket_cors();
4880 if (op_ret < 0)
4881 return ;
4882
4883 if (!cors_exist) {
4884 dout(2) << "No CORS configuration set yet for this bucket" << dendl;
4885 op_ret = -ENOENT;
4886 return;
4887 }
4888 }
4889
4890 int RGWPutCORS::verify_permission()
4891 {
4892 if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
4893 return -EACCES;
4894 }
4895
4896 return 0;
4897 }
4898
4899 void RGWPutCORS::execute()
4900 {
4901 rgw_raw_obj obj;
4902
4903 op_ret = get_params();
4904 if (op_ret < 0)
4905 return;
4906
4907 if (!store->is_meta_master()) {
4908 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
4909 if (op_ret < 0) {
4910 ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
4911 return;
4912 }
4913 }
4914
4915 map<string, bufferlist> attrs = s->bucket_attrs;
4916 attrs[RGW_ATTR_CORS] = cors_bl;
4917 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
4918 }
4919
4920 int RGWDeleteCORS::verify_permission()
4921 {
4922 if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
4923 return -EACCES;
4924 }
4925
4926 return 0;
4927 }
4928
4929 void RGWDeleteCORS::execute()
4930 {
4931 op_ret = read_bucket_cors();
4932 if (op_ret < 0)
4933 return;
4934
4935 bufferlist bl;
4936 rgw_raw_obj obj;
4937 if (!cors_exist) {
4938 dout(2) << "No CORS configuration set yet for this bucket" << dendl;
4939 op_ret = -ENOENT;
4940 return;
4941 }
4942 store->get_bucket_instance_obj(s->bucket, obj);
4943 store->set_prefetch_data(s->obj_ctx, obj);
4944 map<string, bufferlist> orig_attrs, attrs, rmattrs;
4945 map<string, bufferlist>::iterator iter;
4946
4947 op_ret = get_system_obj_attrs(store, s, obj, orig_attrs, NULL, &s->bucket_info.objv_tracker);
4948 if (op_ret < 0)
4949 return;
4950
4951 /* only remove meta attrs */
4952 for (iter = orig_attrs.begin(); iter != orig_attrs.end(); ++iter) {
4953 const string& name = iter->first;
4954 dout(10) << "DeleteCORS : attr: " << name << dendl;
4955 if (name.compare(0, (sizeof(RGW_ATTR_CORS) - 1), RGW_ATTR_CORS) == 0) {
4956 rmattrs[name] = iter->second;
4957 } else if (attrs.find(name) == attrs.end()) {
4958 attrs[name] = iter->second;
4959 }
4960 }
4961 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
4962 }
4963
4964 void RGWOptionsCORS::get_response_params(string& hdrs, string& exp_hdrs, unsigned *max_age) {
4965 get_cors_response_headers(rule, req_hdrs, hdrs, exp_hdrs, max_age);
4966 }
4967
4968 int RGWOptionsCORS::validate_cors_request(RGWCORSConfiguration *cc) {
4969 rule = cc->host_name_rule(origin);
4970 if (!rule) {
4971 dout(10) << "There is no cors rule present for " << origin << dendl;
4972 return -ENOENT;
4973 }
4974
4975 if (!validate_cors_rule_method(rule, req_meth)) {
4976 return -ENOENT;
4977 }
4978 return 0;
4979 }
4980
4981 void RGWOptionsCORS::execute()
4982 {
4983 op_ret = read_bucket_cors();
4984 if (op_ret < 0)
4985 return;
4986
4987 origin = s->info.env->get("HTTP_ORIGIN");
4988 if (!origin) {
4989 dout(0) <<
4990 "Preflight request without mandatory Origin header"
4991 << dendl;
4992 op_ret = -EINVAL;
4993 return;
4994 }
4995 req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD");
4996 if (!req_meth) {
4997 dout(0) <<
4998 "Preflight request without mandatory Access-control-request-method header"
4999 << dendl;
5000 op_ret = -EINVAL;
5001 return;
5002 }
5003 if (!cors_exist) {
5004 dout(2) << "No CORS configuration set yet for this bucket" << dendl;
5005 op_ret = -ENOENT;
5006 return;
5007 }
5008 req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS");
5009 op_ret = validate_cors_request(&bucket_cors);
5010 if (!rule) {
5011 origin = req_meth = NULL;
5012 return;
5013 }
5014 return;
5015 }
5016
5017 int RGWGetRequestPayment::verify_permission()
5018 {
5019 return 0;
5020 }
5021
5022 void RGWGetRequestPayment::pre_exec()
5023 {
5024 rgw_bucket_object_pre_exec(s);
5025 }
5026
5027 void RGWGetRequestPayment::execute()
5028 {
5029 requester_pays = s->bucket_info.requester_pays;
5030 }
5031
5032 int RGWSetRequestPayment::verify_permission()
5033 {
5034 if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
5035 return -EACCES;
5036 }
5037
5038 return 0;
5039 }
5040
5041 void RGWSetRequestPayment::pre_exec()
5042 {
5043 rgw_bucket_object_pre_exec(s);
5044 }
5045
5046 void RGWSetRequestPayment::execute()
5047 {
5048 op_ret = get_params();
5049
5050 if (op_ret < 0)
5051 return;
5052
5053 s->bucket_info.requester_pays = requester_pays;
5054 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(),
5055 &s->bucket_attrs);
5056 if (op_ret < 0) {
5057 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
5058 << " returned err=" << op_ret << dendl;
5059 return;
5060 }
5061 }
5062
5063 int RGWInitMultipart::verify_permission()
5064 {
5065 if (s->iam_policy) {
5066 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
5067 rgw::IAM::s3PutObject,
5068 rgw_obj(s->bucket, s->object));
5069 if (e == Effect::Allow) {
5070 return 0;
5071 } else if (e == Effect::Deny) {
5072 return -EACCES;
5073 }
5074 }
5075
5076 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
5077 return -EACCES;
5078 }
5079
5080 return 0;
5081 }
5082
5083 void RGWInitMultipart::pre_exec()
5084 {
5085 rgw_bucket_object_pre_exec(s);
5086 }
5087
5088 void RGWInitMultipart::execute()
5089 {
5090 bufferlist aclbl;
5091 map<string, bufferlist> attrs;
5092 rgw_obj obj;
5093
5094 if (get_params() < 0)
5095 return;
5096
5097 if (s->object.empty())
5098 return;
5099
5100 policy.encode(aclbl);
5101 attrs[RGW_ATTR_ACL] = aclbl;
5102
5103 populate_with_generic_attrs(s, attrs);
5104
5105 /* select encryption mode */
5106 op_ret = prepare_encryption(attrs);
5107 if (op_ret != 0)
5108 return;
5109
5110 rgw_get_request_metadata(s->cct, s->info, attrs);
5111
5112 do {
5113 char buf[33];
5114 gen_rand_alphanumeric(s->cct, buf, sizeof(buf) - 1);
5115 upload_id = MULTIPART_UPLOAD_ID_PREFIX; /* v2 upload id */
5116 upload_id.append(buf);
5117
5118 string tmp_obj_name;
5119 RGWMPObj mp(s->object.name, upload_id);
5120 tmp_obj_name = mp.get_meta();
5121
5122 obj.init_ns(s->bucket, tmp_obj_name, mp_ns);
5123 // the meta object will be indexed with 0 size, we c
5124 obj.set_in_extra_data(true);
5125 obj.index_hash_source = s->object.name;
5126
5127 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
5128 op_target.set_versioning_disabled(true); /* no versioning for multipart meta */
5129
5130 RGWRados::Object::Write obj_op(&op_target);
5131
5132 obj_op.meta.owner = s->owner.get_id();
5133 obj_op.meta.category = RGW_OBJ_CATEGORY_MULTIMETA;
5134 obj_op.meta.flags = PUT_OBJ_CREATE_EXCL;
5135
5136 op_ret = obj_op.write_meta(0, 0, attrs);
5137 } while (op_ret == -EEXIST);
5138 }
5139
5140 static int get_multipart_info(RGWRados *store, struct req_state *s,
5141 string& meta_oid,
5142 RGWAccessControlPolicy *policy,
5143 map<string, bufferlist>& attrs)
5144 {
5145 map<string, bufferlist>::iterator iter;
5146 bufferlist header;
5147
5148 rgw_obj obj;
5149 obj.init_ns(s->bucket, meta_oid, mp_ns);
5150 obj.set_in_extra_data(true);
5151
5152 int op_ret = get_obj_attrs(store, s, obj, attrs);
5153 if (op_ret < 0) {
5154 if (op_ret == -ENOENT) {
5155 return -ERR_NO_SUCH_UPLOAD;
5156 }
5157 return op_ret;
5158 }
5159
5160 if (policy) {
5161 for (iter = attrs.begin(); iter != attrs.end(); ++iter) {
5162 string name = iter->first;
5163 if (name.compare(RGW_ATTR_ACL) == 0) {
5164 bufferlist& bl = iter->second;
5165 bufferlist::iterator bli = bl.begin();
5166 try {
5167 ::decode(*policy, bli);
5168 } catch (buffer::error& err) {
5169 ldout(s->cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
5170 return -EIO;
5171 }
5172 break;
5173 }
5174 }
5175 }
5176
5177 return 0;
5178 }
5179
5180 int RGWCompleteMultipart::verify_permission()
5181 {
5182 if (s->iam_policy) {
5183 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
5184 rgw::IAM::s3PutObject,
5185 rgw_obj(s->bucket, s->object));
5186 if (e == Effect::Allow) {
5187 return 0;
5188 } else if (e == Effect::Deny) {
5189 return -EACCES;
5190 }
5191 }
5192
5193 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
5194 return -EACCES;
5195 }
5196
5197 return 0;
5198 }
5199
5200 void RGWCompleteMultipart::pre_exec()
5201 {
5202 rgw_bucket_object_pre_exec(s);
5203 }
5204
5205 void RGWCompleteMultipart::execute()
5206 {
5207 RGWMultiCompleteUpload *parts;
5208 map<int, string>::iterator iter;
5209 RGWMultiXMLParser parser;
5210 string meta_oid;
5211 map<uint32_t, RGWUploadPartInfo> obj_parts;
5212 map<uint32_t, RGWUploadPartInfo>::iterator obj_iter;
5213 map<string, bufferlist> attrs;
5214 off_t ofs = 0;
5215 MD5 hash;
5216 char final_etag[CEPH_CRYPTO_MD5_DIGESTSIZE];
5217 char final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 16];
5218 bufferlist etag_bl;
5219 rgw_obj meta_obj;
5220 rgw_obj target_obj;
5221 RGWMPObj mp;
5222 RGWObjManifest manifest;
5223 uint64_t olh_epoch = 0;
5224 string version_id;
5225
5226 op_ret = get_params();
5227 if (op_ret < 0)
5228 return;
5229 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
5230 if (op_ret < 0) {
5231 return;
5232 }
5233
5234 if (!data || !len) {
5235 op_ret = -ERR_MALFORMED_XML;
5236 return;
5237 }
5238
5239 if (!parser.init()) {
5240 op_ret = -EIO;
5241 return;
5242 }
5243
5244 if (!parser.parse(data, len, 1)) {
5245 op_ret = -ERR_MALFORMED_XML;
5246 return;
5247 }
5248
5249 parts = static_cast<RGWMultiCompleteUpload *>(parser.find_first("CompleteMultipartUpload"));
5250 if (!parts || parts->parts.empty()) {
5251 op_ret = -ERR_MALFORMED_XML;
5252 return;
5253 }
5254
5255 if ((int)parts->parts.size() >
5256 s->cct->_conf->rgw_multipart_part_upload_limit) {
5257 op_ret = -ERANGE;
5258 return;
5259 }
5260
5261 mp.init(s->object.name, upload_id);
5262 meta_oid = mp.get_meta();
5263
5264 int total_parts = 0;
5265 int handled_parts = 0;
5266 int max_parts = 1000;
5267 int marker = 0;
5268 bool truncated;
5269 RGWCompressionInfo cs_info;
5270 bool compressed = false;
5271 uint64_t accounted_size = 0;
5272
5273 uint64_t min_part_size = s->cct->_conf->rgw_multipart_min_part_size;
5274
5275 list<rgw_obj_index_key> remove_objs; /* objects to be removed from index listing */
5276
5277 bool versioned_object = s->bucket_info.versioning_enabled();
5278
5279 iter = parts->parts.begin();
5280
5281 meta_obj.init_ns(s->bucket, meta_oid, mp_ns);
5282 meta_obj.set_in_extra_data(true);
5283 meta_obj.index_hash_source = s->object.name;
5284
5285 /*take a cls lock on meta_obj to prevent racing completions (or retries)
5286 from deleting the parts*/
5287 rgw_pool meta_pool;
5288 rgw_raw_obj raw_obj;
5289 librados::ObjectWriteOperation op;
5290 librados::IoCtx ioctx;
5291 rados::cls::lock::Lock l("RGWCompleteMultipart");
5292 int max_lock_secs_mp = s->cct->_conf->get_val<int64_t>("rgw_mp_lock_max_time");
5293
5294 op.assert_exists();
5295 store->obj_to_raw((s->bucket_info).placement_rule, meta_obj, &raw_obj);
5296 store->get_obj_data_pool((s->bucket_info).placement_rule,meta_obj,&meta_pool);
5297 store->open_pool_ctx(meta_pool, ioctx);
5298
5299 const string raw_meta_oid = raw_obj.oid;
5300 utime_t time(max_lock_secs_mp, 0);
5301 l.set_duration(time);
5302 l.lock_exclusive(&op);
5303 op_ret = ioctx.operate(raw_meta_oid, &op);
5304
5305 if (op_ret < 0) {
5306 dout(0) << "RGWCompleteMultipart::execute() failed to acquire lock " << dendl;
5307 op_ret = -ERR_INTERNAL_ERROR;
5308 s->err.message = "This multipart completion is already in progress";
5309 return;
5310 }
5311
5312 op_ret = get_obj_attrs(store, s, meta_obj, attrs);
5313
5314 if (op_ret < 0) {
5315 ldout(s->cct, 0) << "ERROR: failed to get obj attrs, obj=" << meta_obj
5316 << " ret=" << op_ret << dendl;
5317 return;
5318 }
5319
5320 do {
5321 op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts,
5322 marker, obj_parts, &marker, &truncated);
5323 if (op_ret == -ENOENT) {
5324 op_ret = -ERR_NO_SUCH_UPLOAD;
5325 }
5326 if (op_ret < 0)
5327 return;
5328
5329 total_parts += obj_parts.size();
5330 if (!truncated && total_parts != (int)parts->parts.size()) {
5331 ldout(s->cct, 0) << "NOTICE: total parts mismatch: have: " << total_parts
5332 << " expected: " << parts->parts.size() << dendl;
5333 op_ret = -ERR_INVALID_PART;
5334 return;
5335 }
5336
5337 for (obj_iter = obj_parts.begin(); iter != parts->parts.end() && obj_iter != obj_parts.end(); ++iter, ++obj_iter, ++handled_parts) {
5338 uint64_t part_size = obj_iter->second.accounted_size;
5339 if (handled_parts < (int)parts->parts.size() - 1 &&
5340 part_size < min_part_size) {
5341 op_ret = -ERR_TOO_SMALL;
5342 return;
5343 }
5344
5345 char petag[CEPH_CRYPTO_MD5_DIGESTSIZE];
5346 if (iter->first != (int)obj_iter->first) {
5347 ldout(s->cct, 0) << "NOTICE: parts num mismatch: next requested: "
5348 << iter->first << " next uploaded: "
5349 << obj_iter->first << dendl;
5350 op_ret = -ERR_INVALID_PART;
5351 return;
5352 }
5353 string part_etag = rgw_string_unquote(iter->second);
5354 if (part_etag.compare(obj_iter->second.etag) != 0) {
5355 ldout(s->cct, 0) << "NOTICE: etag mismatch: part: " << iter->first
5356 << " etag: " << iter->second << dendl;
5357 op_ret = -ERR_INVALID_PART;
5358 return;
5359 }
5360
5361 hex_to_buf(obj_iter->second.etag.c_str(), petag,
5362 CEPH_CRYPTO_MD5_DIGESTSIZE);
5363 hash.Update((const byte *)petag, sizeof(petag));
5364
5365 RGWUploadPartInfo& obj_part = obj_iter->second;
5366
5367 /* update manifest for part */
5368 string oid = mp.get_part(obj_iter->second.num);
5369 rgw_obj src_obj;
5370 src_obj.init_ns(s->bucket, oid, mp_ns);
5371
5372 if (obj_part.manifest.empty()) {
5373 ldout(s->cct, 0) << "ERROR: empty manifest for object part: obj="
5374 << src_obj << dendl;
5375 op_ret = -ERR_INVALID_PART;
5376 return;
5377 } else {
5378 manifest.append(obj_part.manifest, store);
5379 }
5380
5381 if (obj_part.cs_info.compression_type != "none") {
5382 if (compressed && cs_info.compression_type != obj_part.cs_info.compression_type) {
5383 ldout(s->cct, 0) << "ERROR: compression type was changed during multipart upload ("
5384 << cs_info.compression_type << ">>" << obj_part.cs_info.compression_type << ")" << dendl;
5385 op_ret = -ERR_INVALID_PART;
5386 return;
5387 }
5388 int64_t new_ofs; // offset in compression data for new part
5389 if (cs_info.blocks.size() > 0)
5390 new_ofs = cs_info.blocks.back().new_ofs + cs_info.blocks.back().len;
5391 else
5392 new_ofs = 0;
5393 for (const auto& block : obj_part.cs_info.blocks) {
5394 compression_block cb;
5395 cb.old_ofs = block.old_ofs + cs_info.orig_size;
5396 cb.new_ofs = new_ofs;
5397 cb.len = block.len;
5398 cs_info.blocks.push_back(cb);
5399 new_ofs = cb.new_ofs + cb.len;
5400 }
5401 if (!compressed)
5402 cs_info.compression_type = obj_part.cs_info.compression_type;
5403 cs_info.orig_size += obj_part.cs_info.orig_size;
5404 compressed = true;
5405 }
5406
5407 rgw_obj_index_key remove_key;
5408 src_obj.key.get_index_key(&remove_key);
5409
5410 remove_objs.push_back(remove_key);
5411
5412 ofs += obj_part.size;
5413 accounted_size += obj_part.accounted_size;
5414 }
5415 } while (truncated);
5416 hash.Final((byte *)final_etag);
5417
5418 buf_to_hex((unsigned char *)final_etag, sizeof(final_etag), final_etag_str);
5419 snprintf(&final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2], sizeof(final_etag_str) - CEPH_CRYPTO_MD5_DIGESTSIZE * 2,
5420 "-%lld", (long long)parts->parts.size());
5421 etag = final_etag_str;
5422 ldout(s->cct, 10) << "calculated etag: " << final_etag_str << dendl;
5423
5424 etag_bl.append(final_etag_str, strlen(final_etag_str) + 1);
5425
5426 attrs[RGW_ATTR_ETAG] = etag_bl;
5427
5428 if (compressed) {
5429 // write compression attribute to full object
5430 bufferlist tmp;
5431 ::encode(cs_info, tmp);
5432 attrs[RGW_ATTR_COMPRESSION] = tmp;
5433 }
5434
5435 target_obj.init(s->bucket, s->object.name);
5436 if (versioned_object) {
5437 store->gen_rand_obj_instance_name(&target_obj);
5438 }
5439
5440 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
5441
5442 obj_ctx.obj.set_atomic(target_obj);
5443
5444 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), target_obj);
5445 RGWRados::Object::Write obj_op(&op_target);
5446
5447 obj_op.meta.manifest = &manifest;
5448 obj_op.meta.remove_objs = &remove_objs;
5449
5450 obj_op.meta.ptag = &s->req_id; /* use req_id as operation tag */
5451 obj_op.meta.owner = s->owner.get_id();
5452 obj_op.meta.flags = PUT_OBJ_CREATE;
5453 op_ret = obj_op.write_meta(ofs, accounted_size, attrs);
5454 if (op_ret < 0)
5455 return;
5456
5457 // remove the upload obj
5458 int r = store->delete_obj(*static_cast<RGWObjectCtx *>(s->obj_ctx),
5459 s->bucket_info, meta_obj, 0);
5460 if (r < 0) {
5461 ldout(store->ctx(), 0) << "WARNING: failed to remove object " << meta_obj << dendl;
5462 r = l.unlock(&ioctx, raw_meta_oid);
5463 if (r < 0) {
5464 ldout(store->ctx(), 0) << "WARNING: failed to unlock " << raw_meta_oid << dendl;
5465 }
5466 }
5467 }
5468
5469 int RGWAbortMultipart::verify_permission()
5470 {
5471 if (s->iam_policy) {
5472 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
5473 rgw::IAM::s3AbortMultipartUpload,
5474 rgw_obj(s->bucket, s->object));
5475 if (e == Effect::Allow) {
5476 return 0;
5477 } else if (e == Effect::Deny) {
5478 return -EACCES;
5479 }
5480 }
5481
5482 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
5483 return -EACCES;
5484 }
5485
5486 return 0;
5487 }
5488
5489 void RGWAbortMultipart::pre_exec()
5490 {
5491 rgw_bucket_object_pre_exec(s);
5492 }
5493
5494 void RGWAbortMultipart::execute()
5495 {
5496 op_ret = -EINVAL;
5497 string upload_id;
5498 string meta_oid;
5499 upload_id = s->info.args.get("uploadId");
5500 map<string, bufferlist> attrs;
5501 rgw_obj meta_obj;
5502 RGWMPObj mp;
5503
5504 if (upload_id.empty() || s->object.empty())
5505 return;
5506
5507 mp.init(s->object.name, upload_id);
5508 meta_oid = mp.get_meta();
5509
5510 op_ret = get_multipart_info(store, s, meta_oid, NULL, attrs);
5511 if (op_ret < 0)
5512 return;
5513
5514 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
5515 op_ret = abort_multipart_upload(store, s->cct, obj_ctx, s->bucket_info, mp);
5516 }
5517
5518 int RGWListMultipart::verify_permission()
5519 {
5520 if (!verify_object_permission(s, rgw::IAM::s3ListMultipartUploadParts))
5521 return -EACCES;
5522
5523 return 0;
5524 }
5525
5526 void RGWListMultipart::pre_exec()
5527 {
5528 rgw_bucket_object_pre_exec(s);
5529 }
5530
5531 void RGWListMultipart::execute()
5532 {
5533 map<string, bufferlist> xattrs;
5534 string meta_oid;
5535 RGWMPObj mp;
5536
5537 op_ret = get_params();
5538 if (op_ret < 0)
5539 return;
5540
5541 mp.init(s->object.name, upload_id);
5542 meta_oid = mp.get_meta();
5543
5544 op_ret = get_multipart_info(store, s, meta_oid, &policy, xattrs);
5545 if (op_ret < 0)
5546 return;
5547
5548 op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts,
5549 marker, parts, NULL, &truncated);
5550 }
5551
5552 int RGWListBucketMultiparts::verify_permission()
5553 {
5554 if (!verify_bucket_permission(s,
5555 rgw::IAM::s3ListBucketMultiPartUploads))
5556 return -EACCES;
5557
5558 return 0;
5559 }
5560
5561 void RGWListBucketMultiparts::pre_exec()
5562 {
5563 rgw_bucket_object_pre_exec(s);
5564 }
5565
5566 void RGWListBucketMultiparts::execute()
5567 {
5568 vector<rgw_bucket_dir_entry> objs;
5569 string marker_meta;
5570
5571 op_ret = get_params();
5572 if (op_ret < 0)
5573 return;
5574
5575 if (s->prot_flags & RGW_REST_SWIFT) {
5576 string path_args;
5577 path_args = s->info.args.get("path");
5578 if (!path_args.empty()) {
5579 if (!delimiter.empty() || !prefix.empty()) {
5580 op_ret = -EINVAL;
5581 return;
5582 }
5583 prefix = path_args;
5584 delimiter="/";
5585 }
5586 }
5587 marker_meta = marker.get_meta();
5588
5589 op_ret = list_bucket_multiparts(store, s->bucket_info, prefix, marker_meta, delimiter,
5590 max_uploads, &objs, &common_prefixes, &is_truncated);
5591 if (op_ret < 0) {
5592 return;
5593 }
5594
5595 if (!objs.empty()) {
5596 vector<rgw_bucket_dir_entry>::iterator iter;
5597 RGWMultipartUploadEntry entry;
5598 for (iter = objs.begin(); iter != objs.end(); ++iter) {
5599 rgw_obj_key key(iter->key);
5600 if (!entry.mp.from_meta(key.name))
5601 continue;
5602 entry.obj = *iter;
5603 uploads.push_back(entry);
5604 }
5605 next_marker = entry;
5606 }
5607 }
5608
5609 void RGWGetHealthCheck::execute()
5610 {
5611 if (!g_conf->rgw_healthcheck_disabling_path.empty() &&
5612 (::access(g_conf->rgw_healthcheck_disabling_path.c_str(), F_OK) == 0)) {
5613 /* Disabling path specified & existent in the filesystem. */
5614 op_ret = -ERR_SERVICE_UNAVAILABLE; /* 503 */
5615 } else {
5616 op_ret = 0; /* 200 OK */
5617 }
5618 }
5619
5620 int RGWDeleteMultiObj::verify_permission()
5621 {
5622 acl_allowed = verify_bucket_permission_no_policy(s, RGW_PERM_WRITE);
5623 if (!acl_allowed && !s->iam_policy)
5624 return -EACCES;
5625
5626 return 0;
5627 }
5628
5629 void RGWDeleteMultiObj::pre_exec()
5630 {
5631 rgw_bucket_object_pre_exec(s);
5632 }
5633
5634 void RGWDeleteMultiObj::execute()
5635 {
5636 RGWMultiDelDelete *multi_delete;
5637 vector<rgw_obj_key>::iterator iter;
5638 RGWMultiDelXMLParser parser;
5639 int num_processed = 0;
5640 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
5641
5642 op_ret = get_params();
5643 if (op_ret < 0) {
5644 goto error;
5645 }
5646
5647 if (!data) {
5648 op_ret = -EINVAL;
5649 goto error;
5650 }
5651
5652 if (!parser.init()) {
5653 op_ret = -EINVAL;
5654 goto error;
5655 }
5656
5657 if (!parser.parse(data, len, 1)) {
5658 op_ret = -EINVAL;
5659 goto error;
5660 }
5661
5662 multi_delete = static_cast<RGWMultiDelDelete *>(parser.find_first("Delete"));
5663 if (!multi_delete) {
5664 op_ret = -EINVAL;
5665 goto error;
5666 }
5667
5668 if (multi_delete->is_quiet())
5669 quiet = true;
5670
5671 begin_response();
5672 if (multi_delete->objects.empty()) {
5673 goto done;
5674 }
5675
5676 for (iter = multi_delete->objects.begin();
5677 iter != multi_delete->objects.end() && num_processed < max_to_delete;
5678 ++iter, num_processed++) {
5679 rgw_obj obj(bucket, *iter);
5680 if (s->iam_policy) {
5681 auto e = s->iam_policy->eval(s->env,
5682 *s->auth.identity,
5683 iter->instance.empty() ?
5684 rgw::IAM::s3DeleteObject :
5685 rgw::IAM::s3DeleteObjectVersion,
5686 obj);
5687 if ((e == Effect::Deny) ||
5688 (e == Effect::Pass && !acl_allowed)) {
5689 send_partial_response(*iter, false, "", -EACCES);
5690 continue;
5691 }
5692 }
5693
5694 obj_ctx->obj.set_atomic(obj);
5695
5696 RGWRados::Object del_target(store, s->bucket_info, *obj_ctx, obj);
5697 RGWRados::Object::Delete del_op(&del_target);
5698
5699 del_op.params.bucket_owner = s->bucket_owner.get_id();
5700 del_op.params.versioning_status = s->bucket_info.versioning_status();
5701 del_op.params.obj_owner = s->owner;
5702
5703 op_ret = del_op.delete_obj();
5704 if (op_ret == -ENOENT) {
5705 op_ret = 0;
5706 }
5707
5708 send_partial_response(*iter, del_op.result.delete_marker,
5709 del_op.result.version_id, op_ret);
5710 }
5711
5712 /* set the return code to zero, errors at this point will be
5713 dumped to the response */
5714 op_ret = 0;
5715
5716 done:
5717 // will likely segfault if begin_response() has not been called
5718 end_response();
5719 free(data);
5720 return;
5721
5722 error:
5723 send_status();
5724 free(data);
5725 return;
5726
5727 }
5728
5729 bool RGWBulkDelete::Deleter::verify_permission(RGWBucketInfo& binfo,
5730 map<string, bufferlist>& battrs,
5731 ACLOwner& bucket_owner /* out */)
5732 {
5733 RGWAccessControlPolicy bacl(store->ctx());
5734 int ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket);
5735 if (ret < 0) {
5736 return false;
5737 }
5738
5739 auto policy = get_iam_policy_from_attr(s->cct, store, battrs, binfo.bucket.tenant);
5740
5741 bucket_owner = bacl.get_owner();
5742
5743 /* We can use global user_acl because each BulkDelete request is allowed
5744 * to work on entities from a single account only. */
5745 return verify_bucket_permission(s, binfo.bucket, s->user_acl.get(),
5746 &bacl, policy, rgw::IAM::s3DeleteBucket);
5747 }
5748
5749 bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path)
5750 {
5751 auto& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
5752
5753 RGWBucketInfo binfo;
5754 map<string, bufferlist> battrs;
5755 ACLOwner bowner;
5756
5757 int ret = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
5758 path.bucket_name, binfo, nullptr,
5759 &battrs);
5760 if (ret < 0) {
5761 goto binfo_fail;
5762 }
5763
5764 if (!verify_permission(binfo, battrs, bowner)) {
5765 ret = -EACCES;
5766 goto auth_fail;
5767 }
5768
5769 if (!path.obj_key.empty()) {
5770 rgw_obj obj(binfo.bucket, path.obj_key);
5771 obj_ctx.obj.set_atomic(obj);
5772
5773 RGWRados::Object del_target(store, binfo, obj_ctx, obj);
5774 RGWRados::Object::Delete del_op(&del_target);
5775
5776 del_op.params.bucket_owner = binfo.owner;
5777 del_op.params.versioning_status = binfo.versioning_status();
5778 del_op.params.obj_owner = bowner;
5779
5780 ret = del_op.delete_obj();
5781 if (ret < 0) {
5782 goto delop_fail;
5783 }
5784 } else {
5785 RGWObjVersionTracker ot;
5786 ot.read_version = binfo.ep_objv;
5787
5788 ret = store->delete_bucket(binfo, ot);
5789 if (0 == ret) {
5790 ret = rgw_unlink_bucket(store, binfo.owner, binfo.bucket.tenant,
5791 binfo.bucket.name, false);
5792 if (ret < 0) {
5793 ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << ret
5794 << dendl;
5795 }
5796 }
5797 if (ret < 0) {
5798 goto delop_fail;
5799 }
5800
5801 if (!store->is_meta_master()) {
5802 bufferlist in_data;
5803 ret = forward_request_to_master(s, &ot.read_version, store, in_data,
5804 nullptr);
5805 if (ret < 0) {
5806 if (ret == -ENOENT) {
5807 /* adjust error, we want to return with NoSuchBucket and not
5808 * NoSuchKey */
5809 ret = -ERR_NO_SUCH_BUCKET;
5810 }
5811 goto delop_fail;
5812 }
5813 }
5814 }
5815
5816 num_deleted++;
5817 return true;
5818
5819
5820 binfo_fail:
5821 if (-ENOENT == ret) {
5822 ldout(store->ctx(), 20) << "cannot find bucket = " << path.bucket_name << dendl;
5823 num_unfound++;
5824 } else {
5825 ldout(store->ctx(), 20) << "cannot get bucket info, ret = " << ret
5826 << dendl;
5827
5828 fail_desc_t failed_item = {
5829 .err = ret,
5830 .path = path
5831 };
5832 failures.push_back(failed_item);
5833 }
5834 return false;
5835
5836 auth_fail:
5837 ldout(store->ctx(), 20) << "wrong auth for " << path << dendl;
5838 {
5839 fail_desc_t failed_item = {
5840 .err = ret,
5841 .path = path
5842 };
5843 failures.push_back(failed_item);
5844 }
5845 return false;
5846
5847 delop_fail:
5848 if (-ENOENT == ret) {
5849 ldout(store->ctx(), 20) << "cannot find entry " << path << dendl;
5850 num_unfound++;
5851 } else {
5852 fail_desc_t failed_item = {
5853 .err = ret,
5854 .path = path
5855 };
5856 failures.push_back(failed_item);
5857 }
5858 return false;
5859 }
5860
5861 bool RGWBulkDelete::Deleter::delete_chunk(const std::list<acct_path_t>& paths)
5862 {
5863 ldout(store->ctx(), 20) << "in delete_chunk" << dendl;
5864 for (auto path : paths) {
5865 ldout(store->ctx(), 20) << "bulk deleting path: " << path << dendl;
5866 delete_single(path);
5867 }
5868
5869 return true;
5870 }
5871
5872 int RGWBulkDelete::verify_permission()
5873 {
5874 return 0;
5875 }
5876
5877 void RGWBulkDelete::pre_exec()
5878 {
5879 rgw_bucket_object_pre_exec(s);
5880 }
5881
5882 void RGWBulkDelete::execute()
5883 {
5884 deleter = std::unique_ptr<Deleter>(new Deleter(store, s));
5885
5886 bool is_truncated = false;
5887 do {
5888 list<RGWBulkDelete::acct_path_t> items;
5889
5890 int ret = get_data(items, &is_truncated);
5891 if (ret < 0) {
5892 return;
5893 }
5894
5895 ret = deleter->delete_chunk(items);
5896 } while (!op_ret && is_truncated);
5897
5898 return;
5899 }
5900
5901
5902 constexpr std::array<int, 2> RGWBulkUploadOp::terminal_errors;
5903
5904 int RGWBulkUploadOp::verify_permission()
5905 {
5906 if (s->auth.identity->is_anonymous()) {
5907 return -EACCES;
5908 }
5909
5910 if (! verify_user_permission(s, RGW_PERM_WRITE)) {
5911 return -EACCES;
5912 }
5913
5914 if (s->user->user_id.tenant != s->bucket_tenant) {
5915 ldout(s->cct, 10) << "user cannot create a bucket in a different tenant"
5916 << " (user_id.tenant=" << s->user->user_id.tenant
5917 << " requested=" << s->bucket_tenant << ")"
5918 << dendl;
5919 return -EACCES;
5920 }
5921
5922 if (s->user->max_buckets < 0) {
5923 return -EPERM;
5924 }
5925
5926 return 0;
5927 }
5928
5929 void RGWBulkUploadOp::pre_exec()
5930 {
5931 rgw_bucket_object_pre_exec(s);
5932 }
5933
5934 boost::optional<std::pair<std::string, rgw_obj_key>>
5935 RGWBulkUploadOp::parse_path(const boost::string_ref& path)
5936 {
5937 /* We need to skip all slashes at the beginning in order to preserve
5938 * compliance with Swift. */
5939 const size_t start_pos = path.find_first_not_of('/');
5940
5941 if (boost::string_ref::npos != start_pos) {
5942 /* Seperator is the first slash after the leading ones. */
5943 const size_t sep_pos = path.substr(start_pos).find('/');
5944
5945 if (boost::string_ref::npos != sep_pos) {
5946 const auto bucket_name = path.substr(start_pos, sep_pos - start_pos);
5947 const auto obj_name = path.substr(sep_pos + 1);
5948
5949 return std::make_pair(bucket_name.to_string(),
5950 rgw_obj_key(obj_name.to_string()));
5951 } else {
5952 /* It's guaranteed here that bucket name is at least one character
5953 * long and is different than slash. */
5954 return std::make_pair(path.substr(start_pos).to_string(),
5955 rgw_obj_key());
5956 }
5957 }
5958
5959 return none;
5960 }
5961
5962 std::pair<std::string, std::string>
5963 RGWBulkUploadOp::handle_upload_path(struct req_state *s)
5964 {
5965 std::string bucket_path, file_prefix;
5966 if (! s->init_state.url_bucket.empty()) {
5967 file_prefix = bucket_path = s->init_state.url_bucket + "/";
5968 if (! s->object.empty()) {
5969 std::string& object_name = s->object.name;
5970
5971 /* As rgw_obj_key::empty() already verified emptiness of s->object.name,
5972 * we can safely examine its last element. */
5973 if (object_name.back() == '/') {
5974 file_prefix.append(object_name);
5975 } else {
5976 file_prefix.append(object_name).append("/");
5977 }
5978 }
5979 }
5980 return std::make_pair(bucket_path, file_prefix);
5981 }
5982
5983 int RGWBulkUploadOp::handle_dir_verify_permission()
5984 {
5985 if (s->user->max_buckets > 0) {
5986 RGWUserBuckets buckets;
5987 std::string marker;
5988 bool is_truncated = false;
5989 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
5990 marker, std::string(), s->user->max_buckets,
5991 false, &is_truncated);
5992 if (op_ret < 0) {
5993 return op_ret;
5994 }
5995
5996 if (buckets.count() >= static_cast<size_t>(s->user->max_buckets)) {
5997 return -ERR_TOO_MANY_BUCKETS;
5998 }
5999 }
6000
6001 return 0;
6002 }
6003
6004 static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name)
6005 {
6006 /* the request of container or object level will contain bucket name.
6007 * only at account level need to append the bucket name */
6008 if (info.script_uri.find(bucket_name) != std::string::npos) {
6009 return;
6010 }
6011
6012 ldout(cct, 20) << "append the bucket: "<< bucket_name << " to req_info" << dendl;
6013 info.script_uri.append("/").append(bucket_name);
6014 info.request_uri_aws4 = info.request_uri = info.script_uri;
6015 info.effective_uri = "/" + bucket_name;
6016 }
6017
6018 int RGWBulkUploadOp::handle_dir(const boost::string_ref path)
6019 {
6020 ldout(s->cct, 20) << "bulk upload: got directory=" << path << dendl;
6021
6022 op_ret = handle_dir_verify_permission();
6023 if (op_ret < 0) {
6024 return op_ret;
6025 }
6026
6027 std::string bucket_name;
6028 rgw_obj_key object_junk;
6029 std::tie(bucket_name, object_junk) = *parse_path(path);
6030
6031 rgw_raw_obj obj(store->get_zone_params().domain_root,
6032 rgw_make_bucket_entry_name(s->bucket_tenant, bucket_name));
6033
6034 /* we need to make sure we read bucket info, it's not read before for this
6035 * specific request */
6036 RGWBucketInfo binfo;
6037 std::map<std::string, ceph::bufferlist> battrs;
6038 op_ret = store->get_bucket_info(*dir_ctx, s->bucket_tenant, bucket_name,
6039 binfo, NULL, &battrs);
6040 if (op_ret < 0 && op_ret != -ENOENT) {
6041 return op_ret;
6042 }
6043 const bool bucket_exists = (op_ret != -ENOENT);
6044
6045 if (bucket_exists) {
6046 RGWAccessControlPolicy old_policy(s->cct);
6047 int r = get_bucket_policy_from_attr(s->cct, store, binfo,
6048 battrs, &old_policy);
6049 if (r >= 0) {
6050 if (old_policy.get_owner().get_id().compare(s->user->user_id) != 0) {
6051 op_ret = -EEXIST;
6052 return op_ret;
6053 }
6054 }
6055 }
6056
6057 RGWBucketInfo master_info;
6058 rgw_bucket *pmaster_bucket = nullptr;
6059 uint32_t *pmaster_num_shards = nullptr;
6060 real_time creation_time;
6061 obj_version objv, ep_objv, *pobjv = nullptr;
6062
6063 if (! store->is_meta_master()) {
6064 JSONParser jp;
6065 ceph::bufferlist in_data;
6066 req_info info = s->info;
6067 forward_req_info(s->cct, info, bucket_name);
6068 op_ret = forward_request_to_master(s, nullptr, store, in_data, &jp, &info);
6069 if (op_ret < 0) {
6070 return op_ret;
6071 }
6072
6073 JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp);
6074 JSONDecoder::decode_json("object_ver", objv, &jp);
6075 JSONDecoder::decode_json("bucket_info", master_info, &jp);
6076
6077 ldout(s->cct, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver="
6078 << objv.ver << dendl;
6079 ldout(s->cct, 20) << "got creation_time="<< master_info.creation_time
6080 << dendl;
6081
6082 pmaster_bucket= &master_info.bucket;
6083 creation_time = master_info.creation_time;
6084 pmaster_num_shards = &master_info.num_shards;
6085 pobjv = &objv;
6086 } else {
6087 pmaster_bucket = nullptr;
6088 pmaster_num_shards = nullptr;
6089 }
6090
6091
6092 std::string placement_rule;
6093 if (bucket_exists) {
6094 std::string selected_placement_rule;
6095 rgw_bucket bucket;
6096 bucket.tenant = s->bucket_tenant;
6097 bucket.name = s->bucket_name;
6098 op_ret = store->select_bucket_placement(*(s->user),
6099 store->get_zonegroup().get_id(),
6100 placement_rule,
6101 &selected_placement_rule,
6102 nullptr);
6103 if (selected_placement_rule != binfo.placement_rule) {
6104 op_ret = -EEXIST;
6105 ldout(s->cct, 20) << "bulk upload: non-coherent placement rule" << dendl;
6106 return op_ret;
6107 }
6108 }
6109
6110 /* Create metadata: ACLs. */
6111 std::map<std::string, ceph::bufferlist> attrs;
6112 RGWAccessControlPolicy policy;
6113 policy.create_default(s->user->user_id, s->user->display_name);
6114 ceph::bufferlist aclbl;
6115 policy.encode(aclbl);
6116 attrs.emplace(RGW_ATTR_ACL, std::move(aclbl));
6117
6118 RGWQuotaInfo quota_info;
6119 const RGWQuotaInfo * pquota_info = nullptr;
6120
6121 rgw_bucket bucket;
6122 bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */
6123 bucket.name = bucket_name;
6124
6125
6126 RGWBucketInfo out_info;
6127 op_ret = store->create_bucket(*(s->user),
6128 bucket,
6129 store->get_zonegroup().get_id(),
6130 placement_rule, binfo.swift_ver_location,
6131 pquota_info, attrs,
6132 out_info, pobjv, &ep_objv, creation_time,
6133 pmaster_bucket, pmaster_num_shards, true);
6134 /* continue if EEXIST and create_bucket will fail below. this way we can
6135 * recover from a partial create by retrying it. */
6136 ldout(s->cct, 20) << "rgw_create_bucket returned ret=" << op_ret
6137 << ", bucket=" << bucket << dendl;
6138
6139 if (op_ret && op_ret != -EEXIST) {
6140 return op_ret;
6141 }
6142
6143 const bool existed = (op_ret == -EEXIST);
6144 if (existed) {
6145 /* bucket already existed, might have raced with another bucket creation, or
6146 * might be partial bucket creation that never completed. Read existing bucket
6147 * info, verify that the reported bucket owner is the current user.
6148 * If all is ok then update the user's list of buckets.
6149 * Otherwise inform client about a name conflict.
6150 */
6151 if (out_info.owner.compare(s->user->user_id) != 0) {
6152 op_ret = -EEXIST;
6153 ldout(s->cct, 20) << "bulk upload: conflicting bucket name" << dendl;
6154 return op_ret;
6155 }
6156 bucket = out_info.bucket;
6157 }
6158
6159 op_ret = rgw_link_bucket(store, s->user->user_id, bucket,
6160 out_info.creation_time, false);
6161 if (op_ret && !existed && op_ret != -EEXIST) {
6162 /* if it exists (or previously existed), don't remove it! */
6163 op_ret = rgw_unlink_bucket(store, s->user->user_id,
6164 bucket.tenant, bucket.name);
6165 if (op_ret < 0) {
6166 ldout(s->cct, 0) << "bulk upload: WARNING: failed to unlink bucket: ret="
6167 << op_ret << dendl;
6168 }
6169 } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) {
6170 ldout(s->cct, 20) << "bulk upload: containers already exists"
6171 << dendl;
6172 op_ret = -ERR_BUCKET_EXISTS;
6173 }
6174
6175 return op_ret;
6176 }
6177
6178
6179 bool RGWBulkUploadOp::handle_file_verify_permission(RGWBucketInfo& binfo,
6180 const rgw_obj& obj,
6181 std::map<std::string, ceph::bufferlist>& battrs,
6182 ACLOwner& bucket_owner /* out */)
6183 {
6184 RGWAccessControlPolicy bacl(store->ctx());
6185 op_ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket);
6186 if (op_ret < 0) {
6187 ldout(s->cct, 20) << "bulk upload: cannot read_policy() for bucket"
6188 << dendl;
6189 return false;
6190 }
6191
6192 auto policy = get_iam_policy_from_attr(s->cct, store, battrs, binfo.bucket.tenant);
6193
6194 bucket_owner = bacl.get_owner();
6195 if (policy) {
6196 auto e = policy->eval(s->env, *s->auth.identity,
6197 rgw::IAM::s3PutObject, obj);
6198 if (e == Effect::Allow) {
6199 return true;
6200 } else if (e == Effect::Deny) {
6201 return false;
6202 }
6203 }
6204
6205 return verify_bucket_permission_no_policy(s, s->user_acl.get(),
6206 &bacl, RGW_PERM_WRITE);
6207 }
6208
6209 int RGWBulkUploadOp::handle_file(const boost::string_ref path,
6210 const size_t size,
6211 AlignedStreamGetter& body)
6212 {
6213
6214 ldout(s->cct, 20) << "bulk upload: got file=" << path << ", size=" << size
6215 << dendl;
6216
6217 RGWPutObjDataProcessor *filter = nullptr;
6218 boost::optional<RGWPutObj_Compress> compressor;
6219
6220 if (size > static_cast<const size_t>(s->cct->_conf->rgw_max_put_size)) {
6221 op_ret = -ERR_TOO_LARGE;
6222 return op_ret;
6223 }
6224
6225 std::string bucket_name;
6226 rgw_obj_key object;
6227 std::tie(bucket_name, object) = *parse_path(path);
6228
6229 auto& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
6230 RGWBucketInfo binfo;
6231 std::map<std::string, ceph::bufferlist> battrs;
6232 ACLOwner bowner;
6233 op_ret = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
6234 bucket_name, binfo, nullptr, &battrs);
6235 if (op_ret == -ENOENT) {
6236 ldout(s->cct, 20) << "bulk upload: non existent directory=" << bucket_name
6237 << dendl;
6238 } else if (op_ret < 0) {
6239 return op_ret;
6240 }
6241
6242 if (! handle_file_verify_permission(binfo,
6243 rgw_obj(binfo.bucket, object),
6244 battrs, bowner)) {
6245 ldout(s->cct, 20) << "bulk upload: object creation unauthorized" << dendl;
6246 op_ret = -EACCES;
6247 return op_ret;
6248 }
6249
6250 op_ret = store->check_quota(bowner.get_id(), binfo.bucket,
6251 user_quota, bucket_quota, size);
6252 if (op_ret < 0) {
6253 return op_ret;
6254 }
6255
6256 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
6257 if (op_ret < 0) {
6258 return op_ret;
6259 }
6260
6261 RGWPutObjProcessor_Atomic processor(obj_ctx,
6262 binfo,
6263 binfo.bucket,
6264 object.name,
6265 /* part size */
6266 s->cct->_conf->rgw_obj_stripe_size,
6267 s->req_id,
6268 binfo.versioning_enabled());
6269
6270 /* No filters by default. */
6271 filter = &processor;
6272
6273 op_ret = processor.prepare(store, nullptr);
6274 if (op_ret < 0) {
6275 ldout(s->cct, 20) << "bulk upload: cannot prepare processor due to ret="
6276 << op_ret << dendl;
6277 return op_ret;
6278 }
6279
6280 const auto& compression_type = store->get_zone_params().get_compression_type(
6281 binfo.placement_rule);
6282 CompressorRef plugin;
6283 if (compression_type != "none") {
6284 plugin = Compressor::create(s->cct, compression_type);
6285 if (! plugin) {
6286 ldout(s->cct, 1) << "Cannot load plugin for rgw_compression_type "
6287 << compression_type << dendl;
6288 } else {
6289 compressor.emplace(s->cct, plugin, filter);
6290 filter = &*compressor;
6291 }
6292 }
6293
6294 /* Upload file content. */
6295 ssize_t len = 0;
6296 size_t ofs = 0;
6297 MD5 hash;
6298 do {
6299 ceph::bufferlist data;
6300 len = body.get_at_most(s->cct->_conf->rgw_max_chunk_size, data);
6301
6302 ldout(s->cct, 20) << "bulk upload: body=" << data.c_str() << dendl;
6303 if (len < 0) {
6304 op_ret = len;
6305 return op_ret;
6306 } else if (len > 0) {
6307 hash.Update((const byte *)data.c_str(), data.length());
6308 op_ret = put_data_and_throttle(filter, data, ofs, false);
6309 if (op_ret < 0) {
6310 ldout(s->cct, 20) << "processor->thottle_data() returned ret="
6311 << op_ret << dendl;
6312 return op_ret;
6313 }
6314
6315 ofs += len;
6316 }
6317
6318 } while (len > 0);
6319
6320 if (ofs != size) {
6321 ldout(s->cct, 10) << "bulk upload: real file size different from declared"
6322 << dendl;
6323 op_ret = -EINVAL;
6324 }
6325
6326 op_ret = store->check_quota(bowner.get_id(), binfo.bucket,
6327 user_quota, bucket_quota, size);
6328 if (op_ret < 0) {
6329 ldout(s->cct, 20) << "bulk upload: quota exceeded for path=" << path
6330 << dendl;
6331 return op_ret;
6332 }
6333
6334 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
6335 if (op_ret < 0) {
6336 return op_ret;
6337 }
6338
6339 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
6340 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
6341 hash.Final(m);
6342 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
6343
6344 /* Create metadata: ETAG. */
6345 std::map<std::string, ceph::bufferlist> attrs;
6346 std::string etag = calc_md5;
6347 ceph::bufferlist etag_bl;
6348 etag_bl.append(etag.c_str(), etag.size() + 1);
6349 attrs.emplace(RGW_ATTR_ETAG, std::move(etag_bl));
6350
6351 /* Create metadata: ACLs. */
6352 RGWAccessControlPolicy policy;
6353 policy.create_default(s->user->user_id, s->user->display_name);
6354 ceph::bufferlist aclbl;
6355 policy.encode(aclbl);
6356 attrs.emplace(RGW_ATTR_ACL, std::move(aclbl));
6357
6358 /* Create metadata: compression info. */
6359 if (compressor && compressor->is_compressed()) {
6360 ceph::bufferlist tmp;
6361 RGWCompressionInfo cs_info;
6362 cs_info.compression_type = plugin->get_type_name();
6363 cs_info.orig_size = s->obj_size;
6364 cs_info.blocks = std::move(compressor->get_compression_blocks());
6365 ::encode(cs_info, tmp);
6366 attrs.emplace(RGW_ATTR_COMPRESSION, std::move(tmp));
6367 }
6368
6369 /* Complete the transaction. */
6370 op_ret = processor.complete(size, etag, nullptr, ceph::real_time(), attrs,
6371 ceph::real_time() /* delete_at */);
6372 if (op_ret < 0) {
6373 ldout(s->cct, 20) << "bulk upload: processor::complete returned op_ret="
6374 << op_ret << dendl;
6375 }
6376
6377 return op_ret;
6378 }
6379
6380 void RGWBulkUploadOp::execute()
6381 {
6382 ceph::bufferlist buffer(64 * 1024);
6383
6384 ldout(s->cct, 20) << "bulk upload: start" << dendl;
6385
6386 /* Create an instance of stream-abstracting class. Having this indirection
6387 * allows for easy introduction of decompressors like gzip and bzip2. */
6388 auto stream = create_stream();
6389 if (! stream) {
6390 return;
6391 }
6392
6393 /* Handling the $UPLOAD_PATH accordingly to the Swift's Bulk middleware. See:
6394 * https://github.com/openstack/swift/blob/2.13.0/swift/common/middleware/bulk.py#L31-L41 */
6395 std::string bucket_path, file_prefix;
6396 std::tie(bucket_path, file_prefix) = handle_upload_path(s);
6397
6398 auto status = rgw::tar::StatusIndicator::create();
6399 do {
6400 op_ret = stream->get_exactly(rgw::tar::BLOCK_SIZE, buffer);
6401 if (op_ret < 0) {
6402 ldout(s->cct, 2) << "bulk upload: cannot read header" << dendl;
6403 return;
6404 }
6405
6406 /* We need to re-interpret the buffer as a TAR block. Exactly two blocks
6407 * must be tracked to detect out end-of-archive. It occurs when both of
6408 * them are empty (zeroed). Tracing this particular inter-block dependency
6409 * is responsibility of the rgw::tar::StatusIndicator class. */
6410 boost::optional<rgw::tar::HeaderView> header;
6411 std::tie(status, header) = rgw::tar::interpret_block(status, buffer);
6412
6413 if (! status.empty() && header) {
6414 /* This specific block isn't empty (entirely zeroed), so we can parse
6415 * it as a TAR header and dispatch. At the moment we do support only
6416 * regular files and directories. Everything else (symlinks, devices)
6417 * will be ignored but won't cease the whole upload. */
6418 switch (header->get_filetype()) {
6419 case rgw::tar::FileType::NORMAL_FILE: {
6420 ldout(s->cct, 2) << "bulk upload: handling regular file" << dendl;
6421
6422 boost::string_ref filename = bucket_path.empty() ? header->get_filename() : \
6423 file_prefix + header->get_filename().to_string();
6424 auto body = AlignedStreamGetter(0, header->get_filesize(),
6425 rgw::tar::BLOCK_SIZE, *stream);
6426 op_ret = handle_file(filename,
6427 header->get_filesize(),
6428 body);
6429 if (! op_ret) {
6430 /* Only regular files counts. */
6431 num_created++;
6432 } else {
6433 failures.emplace_back(op_ret, filename.to_string());
6434 }
6435 break;
6436 }
6437 case rgw::tar::FileType::DIRECTORY: {
6438 ldout(s->cct, 2) << "bulk upload: handling regular directory" << dendl;
6439
6440 boost::string_ref dirname = bucket_path.empty() ? header->get_filename() : bucket_path;
6441 op_ret = handle_dir(dirname);
6442 if (op_ret < 0 && op_ret != -ERR_BUCKET_EXISTS) {
6443 failures.emplace_back(op_ret, dirname.to_string());
6444 }
6445 break;
6446 }
6447 default: {
6448 /* Not recognized. Skip. */
6449 op_ret = 0;
6450 break;
6451 }
6452 }
6453
6454 /* In case of any problems with sub-request authorization Swift simply
6455 * terminates whole upload immediately. */
6456 if (boost::algorithm::contains(std::initializer_list<int>{ op_ret },
6457 terminal_errors)) {
6458 ldout(s->cct, 2) << "bulk upload: terminating due to ret=" << op_ret
6459 << dendl;
6460 break;
6461 }
6462 } else {
6463 ldout(s->cct, 2) << "bulk upload: an empty block" << dendl;
6464 op_ret = 0;
6465 }
6466
6467 buffer.clear();
6468 } while (! status.eof());
6469
6470 return;
6471 }
6472
6473 RGWBulkUploadOp::AlignedStreamGetter::~AlignedStreamGetter()
6474 {
6475 const size_t aligned_legnth = length + (-length % alignment);
6476 ceph::bufferlist junk;
6477
6478 DecoratedStreamGetter::get_exactly(aligned_legnth - position, junk);
6479 }
6480
6481 ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_at_most(const size_t want,
6482 ceph::bufferlist& dst)
6483 {
6484 const size_t max_to_read = std::min(want, length - position);
6485 const auto len = DecoratedStreamGetter::get_at_most(max_to_read, dst);
6486 if (len > 0) {
6487 position += len;
6488 }
6489 return len;
6490 }
6491
6492 ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_exactly(const size_t want,
6493 ceph::bufferlist& dst)
6494 {
6495 const auto len = DecoratedStreamGetter::get_exactly(want, dst);
6496 if (len > 0) {
6497 position += len;
6498 }
6499 return len;
6500 }
6501
6502 int RGWSetAttrs::verify_permission()
6503 {
6504 // This looks to be part of the RGW-NFS machinery and has no S3 or
6505 // Swift equivalent.
6506 bool perm;
6507 if (!s->object.empty()) {
6508 perm = verify_object_permission_no_policy(s, RGW_PERM_WRITE);
6509 } else {
6510 perm = verify_bucket_permission_no_policy(s, RGW_PERM_WRITE);
6511 }
6512 if (!perm)
6513 return -EACCES;
6514
6515 return 0;
6516 }
6517
6518 void RGWSetAttrs::pre_exec()
6519 {
6520 rgw_bucket_object_pre_exec(s);
6521 }
6522
6523 void RGWSetAttrs::execute()
6524 {
6525 op_ret = get_params();
6526 if (op_ret < 0)
6527 return;
6528
6529 rgw_obj obj(s->bucket, s->object);
6530
6531 if (!s->object.empty()) {
6532 store->set_atomic(s->obj_ctx, obj);
6533 op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, nullptr);
6534 } else {
6535 for (auto& iter : attrs) {
6536 s->bucket_attrs[iter.first] = std::move(iter.second);
6537 }
6538 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, s->bucket_attrs,
6539 &s->bucket_info.objv_tracker);
6540 }
6541 }
6542
6543 void RGWGetObjLayout::pre_exec()
6544 {
6545 rgw_bucket_object_pre_exec(s);
6546 }
6547
6548 void RGWGetObjLayout::execute()
6549 {
6550 rgw_obj obj(s->bucket, s->object);
6551 RGWRados::Object target(store,
6552 s->bucket_info,
6553 *static_cast<RGWObjectCtx *>(s->obj_ctx),
6554 rgw_obj(s->bucket, s->object));
6555 RGWRados::Object::Read stat_op(&target);
6556
6557 op_ret = stat_op.prepare();
6558 if (op_ret < 0) {
6559 return;
6560 }
6561
6562 head_obj = stat_op.state.head_obj;
6563
6564 op_ret = target.get_manifest(&manifest);
6565 }
6566
6567
6568 int RGWConfigBucketMetaSearch::verify_permission()
6569 {
6570 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
6571 return -EACCES;
6572 }
6573
6574 return 0;
6575 }
6576
6577 void RGWConfigBucketMetaSearch::pre_exec()
6578 {
6579 rgw_bucket_object_pre_exec(s);
6580 }
6581
6582 void RGWConfigBucketMetaSearch::execute()
6583 {
6584 op_ret = get_params();
6585 if (op_ret < 0) {
6586 ldout(s->cct, 20) << "NOTICE: get_params() returned ret=" << op_ret << dendl;
6587 return;
6588 }
6589
6590 s->bucket_info.mdsearch_config = mdsearch_config;
6591
6592 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
6593 if (op_ret < 0) {
6594 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
6595 return;
6596 }
6597 }
6598
6599 int RGWGetBucketMetaSearch::verify_permission()
6600 {
6601 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
6602 return -EACCES;
6603 }
6604
6605 return 0;
6606 }
6607
6608 void RGWGetBucketMetaSearch::pre_exec()
6609 {
6610 rgw_bucket_object_pre_exec(s);
6611 }
6612
6613 int RGWDelBucketMetaSearch::verify_permission()
6614 {
6615 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
6616 return -EACCES;
6617 }
6618
6619 return 0;
6620 }
6621
6622 void RGWDelBucketMetaSearch::pre_exec()
6623 {
6624 rgw_bucket_object_pre_exec(s);
6625 }
6626
6627 void RGWDelBucketMetaSearch::execute()
6628 {
6629 s->bucket_info.mdsearch_config.clear();
6630
6631 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
6632 if (op_ret < 0) {
6633 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
6634 return;
6635 }
6636 }
6637
6638
6639 RGWHandler::~RGWHandler()
6640 {
6641 }
6642
6643 int RGWHandler::init(RGWRados *_store,
6644 struct req_state *_s,
6645 rgw::io::BasicClient *cio)
6646 {
6647 store = _store;
6648 s = _s;
6649
6650 return 0;
6651 }
6652
6653 int RGWHandler::do_init_permissions()
6654 {
6655 int ret = rgw_build_bucket_policies(store, s);
6656 s->env = rgw_build_iam_environment(store, s);
6657
6658 if (ret < 0) {
6659 ldout(s->cct, 10) << "read_permissions on " << s->bucket << " ret=" << ret << dendl;
6660 if (ret == -ENODATA)
6661 ret = -EACCES;
6662 }
6663
6664 return ret;
6665 }
6666
6667 int RGWHandler::do_read_permissions(RGWOp *op, bool only_bucket)
6668 {
6669 if (only_bucket) {
6670 /* already read bucket info */
6671 return 0;
6672 }
6673 int ret = rgw_build_object_policies(store, s, op->prefetch_data());
6674
6675 if (ret < 0) {
6676 ldout(s->cct, 10) << "read_permissions on " << s->bucket << ":"
6677 << s->object << " only_bucket=" << only_bucket
6678 << " ret=" << ret << dendl;
6679 if (ret == -ENODATA)
6680 ret = -EACCES;
6681 }
6682
6683 return ret;
6684 }
6685
6686 int RGWOp::error_handler(int err_no, string *error_content) {
6687 return dialect_handler->error_handler(err_no, error_content);
6688 }
6689
6690 int RGWHandler::error_handler(int err_no, string *error_content) {
6691 // This is the do-nothing error handler
6692 return err_no;
6693 }
6694
6695
6696 void RGWPutBucketPolicy::send_response()
6697 {
6698 if (op_ret) {
6699 set_req_state_err(s, op_ret);
6700 }
6701 dump_errno(s);
6702 end_header(s);
6703 }
6704
6705 int RGWPutBucketPolicy::verify_permission()
6706 {
6707 if (!verify_bucket_permission(s, rgw::IAM::s3PutBucketPolicy)) {
6708 return -EACCES;
6709 }
6710
6711 return 0;
6712 }
6713
6714 int RGWPutBucketPolicy::get_params()
6715 {
6716 const auto max_size = s->cct->_conf->rgw_max_put_param_size;
6717 // At some point when I have more time I want to make a version of
6718 // rgw_rest_read_all_input that doesn't use malloc.
6719 op_ret = rgw_rest_read_all_input(s, &data, &len, max_size, false);
6720 // And throws exceptions.
6721 return op_ret;
6722 }
6723
6724 void RGWPutBucketPolicy::execute()
6725 {
6726 op_ret = get_params();
6727 if (op_ret < 0) {
6728 return;
6729 }
6730
6731 bufferlist in_data = bufferlist::static_from_mem(data, len);
6732
6733 if (!store->is_meta_master()) {
6734 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
6735 if (op_ret < 0) {
6736 ldout(s->cct, 20) << "forward_request_to_master returned ret=" << op_ret << dendl;
6737 return;
6738 }
6739 }
6740
6741 try {
6742 Policy p(s->cct, s->bucket_tenant, in_data);
6743 auto attrs = s->bucket_attrs;
6744 attrs[RGW_ATTR_IAM_POLICY].clear();
6745 attrs[RGW_ATTR_IAM_POLICY].append(p.text);
6746 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
6747 &s->bucket_info.objv_tracker);
6748 if (op_ret == -ECANCELED) {
6749 op_ret = 0; /* lost a race, but it's ok because policies are immutable */
6750 }
6751 } catch (rgw::IAM::PolicyParseException& e) {
6752 ldout(s->cct, 20) << "failed to parse policy: " << e.what() << dendl;
6753 op_ret = -EINVAL;
6754 }
6755 }
6756
6757 void RGWGetBucketPolicy::send_response()
6758 {
6759 if (op_ret) {
6760 set_req_state_err(s, op_ret);
6761 }
6762 dump_errno(s);
6763 end_header(s, this, "application/json");
6764 dump_body(s, policy);
6765 }
6766
6767 int RGWGetBucketPolicy::verify_permission()
6768 {
6769 if (!verify_bucket_permission(s, rgw::IAM::s3GetBucketPolicy)) {
6770 return -EACCES;
6771 }
6772
6773 return 0;
6774 }
6775
6776 void RGWGetBucketPolicy::execute()
6777 {
6778 auto attrs = s->bucket_attrs;
6779 map<string, bufferlist>::iterator aiter = attrs.find(RGW_ATTR_IAM_POLICY);
6780 if (aiter == attrs.end()) {
6781 ldout(s->cct, 0) << __func__ << " can't find bucket IAM POLICY attr"
6782 << " bucket_name = " << s->bucket_name << dendl;
6783 op_ret = -ERR_NO_SUCH_BUCKET_POLICY;
6784 s->err.message = "The bucket policy does not exist";
6785 return;
6786 } else {
6787 policy = attrs[RGW_ATTR_IAM_POLICY];
6788
6789 if (policy.length() == 0) {
6790 ldout(s->cct, 10) << "The bucket policy does not exist, bucket: " << s->bucket_name << dendl;
6791 op_ret = -ERR_NO_SUCH_BUCKET_POLICY;
6792 s->err.message = "The bucket policy does not exist";
6793 return;
6794 }
6795 }
6796 }
6797
6798 void RGWDeleteBucketPolicy::send_response()
6799 {
6800 if (op_ret) {
6801 set_req_state_err(s, op_ret);
6802 }
6803 dump_errno(s);
6804 end_header(s);
6805 }
6806
6807 int RGWDeleteBucketPolicy::verify_permission()
6808 {
6809 if (!verify_bucket_permission(s, rgw::IAM::s3DeleteBucketPolicy)) {
6810 return -EACCES;
6811 }
6812
6813 return 0;
6814 }
6815
6816 void RGWDeleteBucketPolicy::execute()
6817 {
6818 auto attrs = s->bucket_attrs;
6819 attrs.erase(RGW_ATTR_IAM_POLICY);
6820 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
6821 &s->bucket_info.objv_tracker);
6822 if (op_ret == -ECANCELED) {
6823 op_ret = 0; /* lost a race, but it's ok because policies are immutable */
6824 }
6825 }