]> git.proxmox.com Git - ceph.git/blame - ceph/src/rgw/rgw_op.cc
update sources to v12.1.1
[ceph.git] / ceph / src / rgw / rgw_op.cc
CommitLineData
7c673cae
FG
1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2// vim: ts=8 sw=2 smarttab
3
4#include <errno.h>
5#include <stdlib.h>
31f18b77 6#include <system_error>
7c673cae
FG
7#include <unistd.h>
8
9#include <sstream>
10
11#include <boost/algorithm/string/predicate.hpp>
31f18b77 12#include <boost/bind.hpp>
7c673cae 13#include <boost/optional.hpp>
31f18b77
FG
14#include <boost/utility/in_place_factory.hpp>
15#include <boost/utility/string_view.hpp>
7c673cae
FG
16
17#include "common/Clock.h"
18#include "common/armor.h"
31f18b77 19#include "common/errno.h"
7c673cae
FG
20#include "common/mime.h"
21#include "common/utf8.h"
22#include "common/ceph_json.h"
23
24#include "rgw_rados.h"
25#include "rgw_op.h"
26#include "rgw_rest.h"
27#include "rgw_acl.h"
28#include "rgw_acl_s3.h"
29#include "rgw_acl_swift.h"
30#include "rgw_user.h"
31#include "rgw_bucket.h"
32#include "rgw_log.h"
33#include "rgw_multi.h"
34#include "rgw_multi_del.h"
35#include "rgw_cors.h"
36#include "rgw_cors_s3.h"
37#include "rgw_rest_conn.h"
38#include "rgw_rest_s3.h"
39#include "rgw_tar.h"
40#include "rgw_client_io.h"
41#include "rgw_compression.h"
42#include "rgw_role.h"
224ce89b 43#include "rgw_tag_s3.h"
7c673cae
FG
44#include "cls/lock/cls_lock_client.h"
45#include "cls/rgw/cls_rgw_client.h"
46
47
48#include "include/assert.h"
49
50#include "compressor/Compressor.h"
51
52#include "rgw_acl_swift.h"
53
54#define dout_context g_ceph_context
55#define dout_subsys ceph_subsys_rgw
56
57using namespace std;
58using namespace librados;
59using ceph::crypto::MD5;
31f18b77
FG
60using boost::optional;
61using boost::none;
7c673cae 62
31f18b77
FG
63using rgw::IAM::ARN;
64using rgw::IAM::Effect;
65using rgw::IAM::Policy;
66
67using rgw::IAM::Policy;
7c673cae
FG
68
69static string mp_ns = RGW_OBJ_NS_MULTIPART;
70static string shadow_ns = RGW_OBJ_NS_SHADOW;
71
72static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name);
73static int forward_request_to_master(struct req_state *s, obj_version *objv, RGWRados *store,
74 bufferlist& in_data, JSONParser *jp, req_info *forward_info = nullptr);
75
76static MultipartMetaFilter mp_filter;
77
78static int parse_range(const char *range, off_t& ofs, off_t& end, bool *partial_content)
79{
80 int r = -ERANGE;
81 string s(range);
82 string ofs_str;
83 string end_str;
84
85 *partial_content = false;
86
87 size_t pos = s.find("bytes=");
88 if (pos == string::npos) {
89 pos = 0;
90 while (isspace(s[pos]))
91 pos++;
92 int end = pos;
93 while (isalpha(s[end]))
94 end++;
95 if (strncasecmp(s.c_str(), "bytes", end - pos) != 0)
96 return 0;
97 while (isspace(s[end]))
98 end++;
99 if (s[end] != '=')
100 return 0;
101 s = s.substr(end + 1);
102 } else {
103 s = s.substr(pos + 6); /* size of("bytes=") */
104 }
105 pos = s.find('-');
106 if (pos == string::npos)
107 goto done;
108
109 *partial_content = true;
110
111 ofs_str = s.substr(0, pos);
112 end_str = s.substr(pos + 1);
113 if (end_str.length()) {
114 end = atoll(end_str.c_str());
115 if (end < 0)
116 goto done;
117 }
118
119 if (ofs_str.length()) {
120 ofs = atoll(ofs_str.c_str());
121 } else { // RFC2616 suffix-byte-range-spec
122 ofs = -end;
123 end = -1;
124 }
125
126 if (end >= 0 && end < ofs)
127 goto done;
128
129 r = 0;
130done:
131 return r;
132}
133
134static int decode_policy(CephContext *cct,
135 bufferlist& bl,
136 RGWAccessControlPolicy *policy)
137{
138 bufferlist::iterator iter = bl.begin();
139 try {
140 policy->decode(iter);
141 } catch (buffer::error& err) {
142 ldout(cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
143 return -EIO;
144 }
145 if (cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
146 RGWAccessControlPolicy_S3 *s3policy = static_cast<RGWAccessControlPolicy_S3 *>(policy);
147 ldout(cct, 15) << __func__ << " Read AccessControlPolicy";
148 s3policy->to_xml(*_dout);
149 *_dout << dendl;
150 }
151 return 0;
152}
153
154
155static int get_user_policy_from_attr(CephContext * const cct,
31f18b77
FG
156 RGWRados * const store,
157 map<string, bufferlist>& attrs,
158 RGWAccessControlPolicy& policy /* out */)
7c673cae
FG
159{
160 auto aiter = attrs.find(RGW_ATTR_ACL);
161 if (aiter != attrs.end()) {
162 int ret = decode_policy(cct, aiter->second, &policy);
163 if (ret < 0) {
164 return ret;
165 }
166 } else {
167 return -ENOENT;
168 }
169
170 return 0;
171}
172
173static int get_bucket_instance_policy_from_attr(CephContext *cct,
31f18b77
FG
174 RGWRados *store,
175 RGWBucketInfo& bucket_info,
176 map<string, bufferlist>& bucket_attrs,
177 RGWAccessControlPolicy *policy,
178 rgw_raw_obj& obj)
7c673cae
FG
179{
180 map<string, bufferlist>::iterator aiter = bucket_attrs.find(RGW_ATTR_ACL);
181
182 if (aiter != bucket_attrs.end()) {
183 int ret = decode_policy(cct, aiter->second, policy);
184 if (ret < 0)
185 return ret;
186 } else {
187 ldout(cct, 0) << "WARNING: couldn't find acl header for bucket, generating default" << dendl;
188 RGWUserInfo uinfo;
189 /* object exists, but policy is broken */
190 int r = rgw_get_user_info_by_uid(store, bucket_info.owner, uinfo);
191 if (r < 0)
192 return r;
193
194 policy->create_default(bucket_info.owner, uinfo.display_name);
195 }
196 return 0;
197}
198
199static int get_obj_policy_from_attr(CephContext *cct,
31f18b77
FG
200 RGWRados *store,
201 RGWObjectCtx& obj_ctx,
202 RGWBucketInfo& bucket_info,
203 map<string, bufferlist>& bucket_attrs,
204 RGWAccessControlPolicy *policy,
205 rgw_obj& obj)
7c673cae
FG
206{
207 bufferlist bl;
208 int ret = 0;
209
210 RGWRados::Object op_target(store, bucket_info, obj_ctx, obj);
211 RGWRados::Object::Read rop(&op_target);
212
213 ret = rop.get_attr(RGW_ATTR_ACL, bl);
214 if (ret >= 0) {
215 ret = decode_policy(cct, bl, policy);
216 if (ret < 0)
217 return ret;
218 } else if (ret == -ENODATA) {
219 /* object exists, but policy is broken */
220 ldout(cct, 0) << "WARNING: couldn't find acl header for object, generating default" << dendl;
221 RGWUserInfo uinfo;
222 ret = rgw_get_user_info_by_uid(store, bucket_info.owner, uinfo);
223 if (ret < 0)
224 return ret;
225
226 policy->create_default(bucket_info.owner, uinfo.display_name);
227 }
228 return ret;
229}
230
231
232/**
233 * Get the AccessControlPolicy for an object off of disk.
234 * policy: must point to a valid RGWACL, and will be filled upon return.
235 * bucket: name of the bucket containing the object.
236 * object: name of the object to get the ACL for.
237 * Returns: 0 on success, -ERR# otherwise.
238 */
239static int get_bucket_policy_from_attr(CephContext *cct,
31f18b77
FG
240 RGWRados *store,
241 RGWBucketInfo& bucket_info,
242 map<string, bufferlist>& bucket_attrs,
243 RGWAccessControlPolicy *policy)
7c673cae
FG
244{
245 rgw_raw_obj instance_obj;
246 store->get_bucket_instance_obj(bucket_info.bucket, instance_obj);
247 return get_bucket_instance_policy_from_attr(cct, store, bucket_info, bucket_attrs,
31f18b77
FG
248 policy, instance_obj);
249}
250
251static optional<Policy> get_iam_policy_from_attr(CephContext* cct,
252 RGWRados* store,
253 map<string, bufferlist>& attrs,
254 const string& tenant) {
255 auto i = attrs.find(RGW_ATTR_IAM_POLICY);
256 if (i != attrs.end()) {
257 return Policy(cct, tenant, i->second);
258 } else {
259 return none;
260 }
7c673cae
FG
261}
262
263static int get_obj_attrs(RGWRados *store, struct req_state *s, rgw_obj& obj, map<string, bufferlist>& attrs)
264{
265 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
266 RGWRados::Object::Read read_op(&op_target);
267
268 read_op.params.attrs = &attrs;
7c673cae
FG
269
270 return read_op.prepare();
271}
272
273static int modify_obj_attr(RGWRados *store, struct req_state *s, rgw_obj& obj, const char* attr_name, bufferlist& attr_val)
274{
275 map<string, bufferlist> attrs;
276 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
277 RGWRados::Object::Read read_op(&op_target);
278
279 read_op.params.attrs = &attrs;
7c673cae
FG
280
281 int r = read_op.prepare();
282 if (r < 0) {
283 return r;
284 }
285 store->set_atomic(s->obj_ctx, read_op.state.obj);
286 attrs[attr_name] = attr_val;
287 return store->set_attrs(s->obj_ctx, s->bucket_info, read_op.state.obj, attrs, NULL);
288}
289
290static int get_system_obj_attrs(RGWRados *store, struct req_state *s, rgw_raw_obj& obj, map<string, bufferlist>& attrs,
291 uint64_t *obj_size, RGWObjVersionTracker *objv_tracker)
292{
293 RGWRados::SystemObject src(store, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
294 RGWRados::SystemObject::Read rop(&src);
295
296 rop.stat_params.attrs = &attrs;
297 rop.stat_params.obj_size = obj_size;
298
299 int ret = rop.stat(objv_tracker);
300 return ret;
301}
302
303static int read_bucket_policy(RGWRados *store,
304 struct req_state *s,
305 RGWBucketInfo& bucket_info,
306 map<string, bufferlist>& bucket_attrs,
307 RGWAccessControlPolicy *policy,
308 rgw_bucket& bucket)
309{
310 if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) {
311 ldout(s->cct, 0) << "NOTICE: bucket " << bucket_info.bucket.name << " is suspended" << dendl;
312 return -ERR_USER_SUSPENDED;
313 }
314
315 if (bucket.name.empty()) {
316 return 0;
317 }
318
319 int ret = get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, policy);
320 if (ret == -ENOENT) {
321 ret = -ERR_NO_SUCH_BUCKET;
322 }
323
324 return ret;
325}
326
327static int read_obj_policy(RGWRados *store,
328 struct req_state *s,
329 RGWBucketInfo& bucket_info,
330 map<string, bufferlist>& bucket_attrs,
31f18b77
FG
331 RGWAccessControlPolicy* acl,
332 optional<Policy>& policy,
7c673cae
FG
333 rgw_bucket& bucket,
334 rgw_obj_key& object)
335{
336 string upload_id;
337 upload_id = s->info.args.get("uploadId");
338 rgw_obj obj;
339
340 if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) {
341 ldout(s->cct, 0) << "NOTICE: bucket " << bucket_info.bucket.name << " is suspended" << dendl;
342 return -ERR_USER_SUSPENDED;
343 }
344
345 if (!upload_id.empty()) {
346 /* multipart upload */
347 RGWMPObj mp(object.name, upload_id);
348 string oid = mp.get_meta();
349 obj.init_ns(bucket, oid, mp_ns);
350 obj.set_in_extra_data(true);
351 } else {
352 obj = rgw_obj(bucket, object);
353 }
31f18b77
FG
354 policy = get_iam_policy_from_attr(s->cct, store, bucket_attrs, bucket.tenant);
355
7c673cae
FG
356 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
357 int ret = get_obj_policy_from_attr(s->cct, store, *obj_ctx,
31f18b77 358 bucket_info, bucket_attrs, acl, obj);
7c673cae
FG
359 if (ret == -ENOENT) {
360 /* object does not exist checking the bucket's ACL to make sure
361 that we send a proper error code */
362 RGWAccessControlPolicy bucket_policy(s->cct);
363 ret = get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, &bucket_policy);
364 if (ret < 0) {
365 return ret;
366 }
367
368 const rgw_user& bucket_owner = bucket_policy.get_owner().get_id();
369 if (bucket_owner.compare(s->user->user_id) != 0 &&
370 ! s->auth.identity->is_admin_of(bucket_owner) &&
371 ! bucket_policy.verify_permission(*s->auth.identity, s->perm_mask,
372 RGW_PERM_READ)) {
373 ret = -EACCES;
374 } else {
375 ret = -ENOENT;
376 }
377 }
378
379 return ret;
380}
381
382/**
383 * Get the AccessControlPolicy for an user, bucket or object off of disk.
384 * s: The req_state to draw information from.
385 * only_bucket: If true, reads the user and bucket ACLs rather than the object ACL.
386 * Returns: 0 on success, -ERR# otherwise.
387 */
388int rgw_build_bucket_policies(RGWRados* store, struct req_state* s)
389{
390 int ret = 0;
391 rgw_obj_key obj;
392 RGWUserInfo bucket_owner_info;
393 RGWObjectCtx obj_ctx(store);
394
395 string bi = s->info.args.get(RGW_SYS_PARAM_PREFIX "bucket-instance");
396 if (!bi.empty()) {
397 ret = rgw_bucket_parse_bucket_instance(bi, &s->bucket_instance_id, &s->bucket_instance_shard_id);
398 if (ret < 0) {
399 return ret;
400 }
401 }
402
403 if(s->dialect.compare("s3") == 0) {
404 s->bucket_acl = new RGWAccessControlPolicy_S3(s->cct);
405 } else if(s->dialect.compare("swift") == 0) {
406 /* We aren't allocating the account policy for those operations using
407 * the Swift's infrastructure that don't really need req_state::user.
408 * Typical example here is the implementation of /info. */
409 if (!s->user->user_id.empty()) {
410 s->user_acl = std::unique_ptr<RGWAccessControlPolicy>(
411 new RGWAccessControlPolicy_SWIFTAcct(s->cct));
412 }
413 s->bucket_acl = new RGWAccessControlPolicy_SWIFT(s->cct);
414 } else {
415 s->bucket_acl = new RGWAccessControlPolicy(s->cct);
416 }
417
418 /* check if copy source is within the current domain */
419 if (!s->src_bucket_name.empty()) {
420 RGWBucketInfo source_info;
421
422 if (s->bucket_instance_id.empty()) {
423 ret = store->get_bucket_info(obj_ctx, s->src_tenant_name, s->src_bucket_name, source_info, NULL);
424 } else {
425 ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, source_info, NULL, NULL);
426 }
427 if (ret == 0) {
428 string& zonegroup = source_info.zonegroup;
429 s->local_source = store->get_zonegroup().equals(zonegroup);
430 }
431 }
432
433 struct {
434 rgw_user uid;
435 std::string display_name;
436 } acct_acl_user = {
437 s->user->user_id,
438 s->user->display_name,
439 };
440
441 if (!s->bucket_name.empty()) {
442 s->bucket_exists = true;
443 if (s->bucket_instance_id.empty()) {
444 ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name, s->bucket_info, NULL, &s->bucket_attrs);
445 } else {
446 ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, s->bucket_info, NULL, &s->bucket_attrs);
447 }
448 if (ret < 0) {
449 if (ret != -ENOENT) {
450 string bucket_log;
451 rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_log);
452 ldout(s->cct, 0) << "NOTICE: couldn't get bucket from bucket_name (name=" << bucket_log << ")" << dendl;
453 return ret;
454 }
455 s->bucket_exists = false;
456 }
457 s->bucket = s->bucket_info.bucket;
458
459 if (s->bucket_exists) {
460 ret = read_bucket_policy(store, s, s->bucket_info, s->bucket_attrs, s->bucket_acl, s->bucket);
461 acct_acl_user = {
462 s->bucket_info.owner,
463 s->bucket_acl->get_owner().get_display_name(),
464 };
465 } else {
466 s->bucket_acl->create_default(s->user->user_id, s->user->display_name);
467 ret = -ERR_NO_SUCH_BUCKET;
468 }
469
470 s->bucket_owner = s->bucket_acl->get_owner();
471
472 RGWZoneGroup zonegroup;
473 int r = store->get_zonegroup(s->bucket_info.zonegroup, zonegroup);
474 if (!r) {
475 if (!zonegroup.endpoints.empty()) {
476 s->zonegroup_endpoint = zonegroup.endpoints.front();
477 } else {
478 // use zonegroup's master zone endpoints
479 auto z = zonegroup.zones.find(zonegroup.master_zone);
480 if (z != zonegroup.zones.end() && !z->second.endpoints.empty()) {
481 s->zonegroup_endpoint = z->second.endpoints.front();
482 }
483 }
484 s->zonegroup_name = zonegroup.get_name();
485 }
486 if (r < 0 && ret == 0) {
487 ret = r;
488 }
489
490 if (s->bucket_exists && !store->get_zonegroup().equals(s->bucket_info.zonegroup)) {
491 ldout(s->cct, 0) << "NOTICE: request for data in a different zonegroup (" << s->bucket_info.zonegroup << " != " << store->get_zonegroup().get_id() << ")" << dendl;
492 /* we now need to make sure that the operation actually requires copy source, that is
493 * it's a copy operation
494 */
31f18b77 495 if (store->get_zonegroup().is_master_zonegroup() && s->system_request) {
7c673cae
FG
496 /*If this is the master, don't redirect*/
497 } else if (!s->local_source ||
498 (s->op != OP_PUT && s->op != OP_COPY) ||
499 s->object.empty()) {
500 return -ERR_PERMANENT_REDIRECT;
501 }
502 }
503 }
504
505 /* handle user ACL only for those APIs which support it */
506 if (s->user_acl) {
507 map<string, bufferlist> uattrs;
508
509 ret = rgw_get_user_attrs_by_uid(store, acct_acl_user.uid, uattrs);
510 if (!ret) {
511 ret = get_user_policy_from_attr(s->cct, store, uattrs, *s->user_acl);
512 }
513 if (-ENOENT == ret) {
514 /* In already existing clusters users won't have ACL. In such case
515 * assuming that only account owner has the rights seems to be
516 * reasonable. That allows to have only one verification logic.
517 * NOTE: there is small compatibility kludge for global, empty tenant:
518 * 1. if we try to reach an existing bucket, its owner is considered
519 * as account owner.
520 * 2. otherwise account owner is identity stored in s->user->user_id. */
521 s->user_acl->create_default(acct_acl_user.uid,
522 acct_acl_user.display_name);
523 ret = 0;
524 } else {
525 ldout(s->cct, 0) << "NOTICE: couldn't get user attrs for handling ACL (user_id="
526 << s->user->user_id
527 << ", ret="
528 << ret
529 << ")" << dendl;
530 return ret;
531 }
532 }
533
31f18b77
FG
534 try {
535 s->iam_policy = get_iam_policy_from_attr(s->cct, store, s->bucket_attrs,
536 s->bucket_tenant);
537 } catch (const std::exception& e) {
538 // Really this is a can't happen condition. We parse the policy
539 // when it's given to us, so perhaps we should abort or otherwise
540 // raise bloody murder.
541 lderr(s->cct) << "Error reading IAM Policy: " << e.what() << dendl;
542 ret = -EACCES;
543 }
7c673cae
FG
544
545 return ret;
546}
547
548/**
549 * Get the AccessControlPolicy for a bucket or object off of disk.
550 * s: The req_state to draw information from.
551 * only_bucket: If true, reads the bucket ACL rather than the object ACL.
552 * Returns: 0 on success, -ERR# otherwise.
553 */
554int rgw_build_object_policies(RGWRados *store, struct req_state *s,
555 bool prefetch_data)
556{
557 int ret = 0;
558
559 if (!s->object.empty()) {
560 if (!s->bucket_exists) {
561 return -ERR_NO_SUCH_BUCKET;
562 }
563 s->object_acl = new RGWAccessControlPolicy(s->cct);
564
565 rgw_obj obj(s->bucket, s->object);
566
567 store->set_atomic(s->obj_ctx, obj);
568 if (prefetch_data) {
569 store->set_prefetch_data(s->obj_ctx, obj);
570 }
31f18b77 571 ret = read_obj_policy(store, s, s->bucket_info, s->bucket_attrs, s->object_acl, s->iam_policy, s->bucket, s->object);
7c673cae
FG
572 }
573
574 return ret;
575}
576
31f18b77
FG
577rgw::IAM::Environment rgw_build_iam_environment(RGWRados* store,
578 struct req_state* s)
579{
580 rgw::IAM::Environment e;
581 const auto& m = s->info.env->get_map();
582 auto t = ceph::real_clock::now();
583 e.emplace(std::piecewise_construct,
584 std::forward_as_tuple("aws:CurrentTime"),
585 std::forward_as_tuple(std::to_string(
586 ceph::real_clock::to_time_t(t))));
587 e.emplace(std::piecewise_construct,
588 std::forward_as_tuple("aws:EpochTime"),
589 std::forward_as_tuple(ceph::to_iso_8601(t)));
590 // TODO: This is fine for now, but once we have STS we'll need to
591 // look and see. Also this won't work with the IdentityApplier
592 // model, since we need to know the actual credential.
593 e.emplace(std::piecewise_construct,
594 std::forward_as_tuple("aws:PrincipalType"),
595 std::forward_as_tuple("User"));
596
597 auto i = m.find("HTTP_REFERER");
598 if (i != m.end()) {
599 e.emplace(std::piecewise_construct,
600 std::forward_as_tuple("aws:Referer"),
601 std::forward_as_tuple(i->second));
602 }
603
604 // These seem to be the semantics, judging from rest_rgw_s3.cc
605 i = m.find("SERVER_PORT_SECURE");
606 if (i != m.end()) {
607 e.emplace(std::piecewise_construct,
608 std::forward_as_tuple("aws:SecureTransport"),
609 std::forward_as_tuple("true"));
610 }
611
612 i = m.find("HTTP_HOST");
613 if (i != m.end()) {
614 e.emplace(std::piecewise_construct,
615 std::forward_as_tuple("aws:SourceIp"),
616 std::forward_as_tuple(i->second));
617 }
618
619 i = m.find("HTTP_USER_AGENT"); {
620 if (i != m.end())
621 e.emplace(std::piecewise_construct,
622 std::forward_as_tuple("aws:UserAgent"),
623 std::forward_as_tuple(i->second));
624 }
625
626 if (s->user) {
627 // What to do about aws::userid? One can have multiple access
628 // keys so that isn't really suitable. Do we have a durable
629 // identifier that can persist through name changes?
630 e.emplace(std::piecewise_construct,
631 std::forward_as_tuple("aws:username"),
632 std::forward_as_tuple(s->user->user_id.id));
633 }
634 return e;
635}
636
637void rgw_bucket_object_pre_exec(struct req_state *s)
7c673cae
FG
638{
639 if (s->expect_cont)
640 dump_continue(s);
641
642 dump_bucket_from_state(s);
643}
644
645int RGWGetObj::verify_permission()
646{
647 obj = rgw_obj(s->bucket, s->object);
648 store->set_atomic(s->obj_ctx, obj);
649 if (get_data) {
650 store->set_prefetch_data(s->obj_ctx, obj);
651 }
652
31f18b77
FG
653 if (torrent.get_flag()) {
654 if (obj.key.instance.empty()) {
655 action = rgw::IAM::s3GetObjectTorrent;
656 } else {
657 action = rgw::IAM::s3GetObjectVersionTorrent;
658 }
659 } else {
660 if (obj.key.instance.empty()) {
661 action = rgw::IAM::s3GetObject;
662 } else {
663 action = rgw::IAM::s3GetObjectVersion;
664 }
665 }
666
667 if (!verify_object_permission(s, action)) {
7c673cae
FG
668 return -EACCES;
669 }
670
671 return 0;
672}
673
674
675int RGWOp::verify_op_mask()
676{
677 uint32_t required_mask = op_mask();
678
679 ldout(s->cct, 20) << "required_mask= " << required_mask
680 << " user.op_mask=" << s->user->op_mask << dendl;
681
682 if ((s->user->op_mask & required_mask) != required_mask) {
683 return -EPERM;
684 }
685
686 if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !store->zone_is_writeable()) {
687 ldout(s->cct, 5) << "NOTICE: modify request to a read-only zone by a non-system user, permission denied" << dendl;
688 return -EPERM;
689 }
690
691 return 0;
692}
693
224ce89b
WB
694int RGWGetObjTags::verify_permission()
695{
696 if (!verify_object_permission(s,
697 s->object.instance.empty() ?
698 rgw::IAM::s3GetObjectTagging:
699 rgw::IAM::s3GetObjectVersionTagging))
700 return -EACCES;
701
702 return 0;
703}
704
705void RGWGetObjTags::pre_exec()
706{
707 rgw_bucket_object_pre_exec(s);
708}
709
710void RGWGetObjTags::execute()
711{
712 rgw_obj obj;
713 map<string,bufferlist> attrs;
714
715 obj = rgw_obj(s->bucket, s->object);
716
717 store->set_atomic(s->obj_ctx, obj);
718
719 op_ret = get_obj_attrs(store, s, obj, attrs);
720 auto tags = attrs.find(RGW_ATTR_TAGS);
721 if(tags != attrs.end()){
722 has_tags = true;
723 tags_bl.append(tags->second);
724 }
725 send_response_data(tags_bl);
726}
727
728int RGWPutObjTags::verify_permission()
729{
730 if (!verify_object_permission(s,
731 s->object.instance.empty() ?
732 rgw::IAM::s3PutObjectTagging:
733 rgw::IAM::s3PutObjectVersionTagging))
734 return -EACCES;
735 return 0;
736}
737
738void RGWPutObjTags::execute()
739{
740 op_ret = get_params();
741 if (op_ret < 0)
742 return;
743
744 if (s->object.empty()){
745 op_ret= -EINVAL; // we only support tagging on existing objects
746 return;
747 }
748
749 rgw_obj obj;
750 obj = rgw_obj(s->bucket, s->object);
751 store->set_atomic(s->obj_ctx, obj);
752 op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_TAGS, tags_bl);
753 if (op_ret == -ECANCELED){
754 op_ret = -ERR_TAG_CONFLICT;
755 }
756}
757
758void RGWDeleteObjTags::pre_exec()
759{
760 rgw_bucket_object_pre_exec(s);
761}
762
763
764int RGWDeleteObjTags::verify_permission()
765{
766 if (!s->object.empty()) {
767 if (!verify_object_permission(s,
768 s->object.instance.empty() ?
769 rgw::IAM::s3DeleteObjectTagging:
770 rgw::IAM::s3DeleteObjectVersionTagging))
771 return -EACCES;
772 }
773 return 0;
774}
775
776void RGWDeleteObjTags::execute()
777{
778 if (s->object.empty())
779 return;
780
781 rgw_obj obj;
782 obj = rgw_obj(s->bucket, s->object);
783 store->set_atomic(s->obj_ctx, obj);
784 map <string, bufferlist> attrs;
785 map <string, bufferlist> rmattr;
786 bufferlist bl;
787 rmattr[RGW_ATTR_TAGS] = bl;
788 op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, &rmattr);
789}
790
7c673cae
FG
791int RGWOp::do_aws4_auth_completion()
792{
31f18b77
FG
793 ldout(s->cct, 5) << "NOTICE: call to do_aws4_auth_completion" << dendl;
794 if (s->auth.completer) {
795 if (!s->auth.completer->complete()) {
796 return -ERR_AMZ_CONTENT_SHA256_MISMATCH;
797 } else {
798 dout(10) << "v4 auth ok -- do_aws4_auth_completion" << dendl;
7c673cae 799 }
31f18b77
FG
800
801 /* TODO(rzarzynski): yes, we're really called twice on PUTs. Only first
802 * call passes, so we disable second one. This is old behaviour, sorry!
803 * Plan for tomorrow: seek and destroy. */
804 s->auth.completer = nullptr;
7c673cae
FG
805 }
806
807 return 0;
808}
809
810int RGWOp::init_quota()
811{
812 /* no quota enforcement for system requests */
813 if (s->system_request)
814 return 0;
815
816 /* init quota related stuff */
817 if (!(s->user->op_mask & RGW_OP_TYPE_MODIFY)) {
818 return 0;
819 }
820
821 /* only interested in object related ops */
822 if (s->object.empty()) {
823 return 0;
824 }
825
826 RGWUserInfo owner_info;
827 RGWUserInfo *uinfo;
828
829 if (s->user->user_id == s->bucket_owner.get_id()) {
830 uinfo = s->user;
831 } else {
832 int r = rgw_get_user_info_by_uid(store, s->bucket_info.owner, owner_info);
833 if (r < 0)
834 return r;
835 uinfo = &owner_info;
836 }
837
838 if (s->bucket_info.quota.enabled) {
839 bucket_quota = s->bucket_info.quota;
840 } else if (uinfo->bucket_quota.enabled) {
841 bucket_quota = uinfo->bucket_quota;
842 } else {
843 bucket_quota = store->get_bucket_quota();
844 }
845
846 if (uinfo->user_quota.enabled) {
847 user_quota = uinfo->user_quota;
848 } else {
849 user_quota = store->get_user_quota();
850 }
851
852 return 0;
853}
854
855static bool validate_cors_rule_method(RGWCORSRule *rule, const char *req_meth) {
856 uint8_t flags = 0;
857
858 if (!req_meth) {
859 dout(5) << "req_meth is null" << dendl;
860 return false;
861 }
862
863 if (strcmp(req_meth, "GET") == 0) flags = RGW_CORS_GET;
864 else if (strcmp(req_meth, "POST") == 0) flags = RGW_CORS_POST;
865 else if (strcmp(req_meth, "PUT") == 0) flags = RGW_CORS_PUT;
866 else if (strcmp(req_meth, "DELETE") == 0) flags = RGW_CORS_DELETE;
867 else if (strcmp(req_meth, "HEAD") == 0) flags = RGW_CORS_HEAD;
868
869 if ((rule->get_allowed_methods() & flags) == flags) {
870 dout(10) << "Method " << req_meth << " is supported" << dendl;
871 } else {
872 dout(5) << "Method " << req_meth << " is not supported" << dendl;
873 return false;
874 }
875
876 return true;
877}
878
879int RGWOp::read_bucket_cors()
880{
881 bufferlist bl;
882
883 map<string, bufferlist>::iterator aiter = s->bucket_attrs.find(RGW_ATTR_CORS);
884 if (aiter == s->bucket_attrs.end()) {
885 ldout(s->cct, 20) << "no CORS configuration attr found" << dendl;
886 cors_exist = false;
887 return 0; /* no CORS configuration found */
888 }
889
890 cors_exist = true;
891
892 bl = aiter->second;
893
894 bufferlist::iterator iter = bl.begin();
895 try {
896 bucket_cors.decode(iter);
897 } catch (buffer::error& err) {
898 ldout(s->cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
899 return -EIO;
900 }
901 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
902 RGWCORSConfiguration_S3 *s3cors = static_cast<RGWCORSConfiguration_S3 *>(&bucket_cors);
903 ldout(s->cct, 15) << "Read RGWCORSConfiguration";
904 s3cors->to_xml(*_dout);
905 *_dout << dendl;
906 }
907 return 0;
908}
909
910/** CORS 6.2.6.
911 * If any of the header field-names is not a ASCII case-insensitive match for
912 * any of the values in list of headers do not set any additional headers and
913 * terminate this set of steps.
914 * */
915static void get_cors_response_headers(RGWCORSRule *rule, const char *req_hdrs, string& hdrs, string& exp_hdrs, unsigned *max_age) {
916 if (req_hdrs) {
917 list<string> hl;
918 get_str_list(req_hdrs, hl);
919 for(list<string>::iterator it = hl.begin(); it != hl.end(); ++it) {
920 if (!rule->is_header_allowed((*it).c_str(), (*it).length())) {
921 dout(5) << "Header " << (*it) << " is not registered in this rule" << dendl;
922 } else {
923 if (hdrs.length() > 0) hdrs.append(",");
924 hdrs.append((*it));
925 }
926 }
927 }
928 rule->format_exp_headers(exp_hdrs);
929 *max_age = rule->get_max_age();
930}
931
932/**
933 * Generate the CORS header response
934 *
935 * This is described in the CORS standard, section 6.2.
936 */
937bool RGWOp::generate_cors_headers(string& origin, string& method, string& headers, string& exp_headers, unsigned *max_age)
938{
939 /* CORS 6.2.1. */
940 const char *orig = s->info.env->get("HTTP_ORIGIN");
941 if (!orig) {
942 return false;
943 }
944
945 /* Custom: */
946 origin = orig;
947 op_ret = read_bucket_cors();
948 if (op_ret < 0) {
949 return false;
950 }
951
952 if (!cors_exist) {
953 dout(2) << "No CORS configuration set yet for this bucket" << dendl;
954 return false;
955 }
956
957 /* CORS 6.2.2. */
958 RGWCORSRule *rule = bucket_cors.host_name_rule(orig);
959 if (!rule)
960 return false;
961
962 /*
963 * Set the Allowed-Origin header to a asterisk if this is allowed in the rule
964 * and no Authorization was send by the client
965 *
966 * The origin parameter specifies a URI that may access the resource. The browser must enforce this.
967 * For requests without credentials, the server may specify "*" as a wildcard,
968 * thereby allowing any origin to access the resource.
969 */
970 const char *authorization = s->info.env->get("HTTP_AUTHORIZATION");
971 if (!authorization && rule->has_wildcard_origin())
972 origin = "*";
973
974 /* CORS 6.2.3. */
975 const char *req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD");
976 if (!req_meth) {
977 req_meth = s->info.method;
978 }
979
980 if (req_meth) {
981 method = req_meth;
982 /* CORS 6.2.5. */
983 if (!validate_cors_rule_method(rule, req_meth)) {
984 return false;
985 }
986 }
987
988 /* CORS 6.2.4. */
989 const char *req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS");
990
991 /* CORS 6.2.6. */
992 get_cors_response_headers(rule, req_hdrs, headers, exp_headers, max_age);
993
994 return true;
995}
996
997int RGWGetObj::read_user_manifest_part(rgw_bucket& bucket,
998 const rgw_bucket_dir_entry& ent,
31f18b77
FG
999 RGWAccessControlPolicy * const bucket_acl,
1000 const optional<Policy>& bucket_policy,
7c673cae
FG
1001 const off_t start_ofs,
1002 const off_t end_ofs)
1003{
1004 ldout(s->cct, 20) << "user manifest obj=" << ent.key.name << "[" << ent.key.instance << "]" << dendl;
1005 RGWGetObj_CB cb(this);
1006 RGWGetDataCB* filter = &cb;
1007 boost::optional<RGWGetObj_Decompress> decompress;
1008
1009 int64_t cur_ofs = start_ofs;
1010 int64_t cur_end = end_ofs;
1011
1012 rgw_obj part(bucket, ent.key);
1013
1014 map<string, bufferlist> attrs;
1015
1016 uint64_t obj_size;
1017 RGWObjectCtx obj_ctx(store);
1018 RGWAccessControlPolicy obj_policy(s->cct);
1019
1020 ldout(s->cct, 20) << "reading obj=" << part << " ofs=" << cur_ofs << " end=" << cur_end << dendl;
1021
1022 obj_ctx.obj.set_atomic(part);
1023 store->set_prefetch_data(&obj_ctx, part);
1024
1025 RGWRados::Object op_target(store, s->bucket_info, obj_ctx, part);
1026 RGWRados::Object::Read read_op(&op_target);
1027
1028 read_op.conds.if_match = ent.meta.etag.c_str();
1029 read_op.params.attrs = &attrs;
1030 read_op.params.obj_size = &obj_size;
7c673cae
FG
1031
1032 op_ret = read_op.prepare();
1033 if (op_ret < 0)
1034 return op_ret;
224ce89b 1035 op_ret = read_op.range_to_ofs(ent.meta.accounted_size, cur_ofs, cur_end);
7c673cae
FG
1036 if (op_ret < 0)
1037 return op_ret;
1038 bool need_decompress;
1039 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
1040 if (op_ret < 0) {
1041 lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
1042 return -EIO;
1043 }
1044
1045 if (need_decompress)
1046 {
224ce89b 1047 if (cs_info.orig_size != ent.meta.accounted_size) {
7c673cae
FG
1048 // hmm.. something wrong, object not as expected, abort!
1049 ldout(s->cct, 0) << "ERROR: expected cs_info.orig_size=" << cs_info.orig_size <<
1050 ", actual read size=" << ent.meta.size << dendl;
1051 return -EIO;
1052 }
1053 decompress.emplace(s->cct, &cs_info, partial_content, filter);
1054 filter = &*decompress;
1055 }
1056 else
1057 {
1058 if (obj_size != ent.meta.size) {
1059 // hmm.. something wrong, object not as expected, abort!
1060 ldout(s->cct, 0) << "ERROR: expected obj_size=" << obj_size << ", actual read size=" << ent.meta.size << dendl;
1061 return -EIO;
1062 }
1063 }
1064
1065 op_ret = rgw_policy_from_attrset(s->cct, attrs, &obj_policy);
1066 if (op_ret < 0)
1067 return op_ret;
1068
1069 /* We can use global user_acl because LOs cannot have segments
1070 * stored inside different accounts. */
1071 if (s->system_request) {
1072 ldout(s->cct, 2) << "overriding permissions due to system operation" << dendl;
1073 } else if (s->auth.identity->is_admin_of(s->user->user_id)) {
1074 ldout(s->cct, 2) << "overriding permissions due to admin operation" << dendl;
31f18b77
FG
1075 } else if (!verify_object_permission(s, part, s->user_acl.get(), bucket_acl,
1076 &obj_policy, bucket_policy, action)) {
7c673cae
FG
1077 return -EPERM;
1078 }
1079
1080 if (ent.meta.size == 0) {
1081 return 0;
1082 }
1083
1084 perfcounter->inc(l_rgw_get_b, cur_end - cur_ofs);
1085 filter->fixup_range(cur_ofs, cur_end);
1086 op_ret = read_op.iterate(cur_ofs, cur_end, filter);
1087 if (op_ret >= 0)
1088 op_ret = filter->flush();
1089 return op_ret;
1090}
1091
1092static int iterate_user_manifest_parts(CephContext * const cct,
1093 RGWRados * const store,
1094 const off_t ofs,
1095 const off_t end,
1096 RGWBucketInfo *pbucket_info,
1097 const string& obj_prefix,
31f18b77
FG
1098 RGWAccessControlPolicy * const bucket_acl,
1099 const optional<Policy>& bucket_policy,
7c673cae
FG
1100 uint64_t * const ptotal_len,
1101 uint64_t * const pobj_size,
1102 string * const pobj_sum,
1103 int (*cb)(rgw_bucket& bucket,
1104 const rgw_bucket_dir_entry& ent,
31f18b77
FG
1105 RGWAccessControlPolicy * const bucket_acl,
1106 const optional<Policy>& bucket_policy,
7c673cae
FG
1107 off_t start_ofs,
1108 off_t end_ofs,
1109 void *param),
1110 void * const cb_param)
1111{
1112 rgw_bucket& bucket = pbucket_info->bucket;
1113 uint64_t obj_ofs = 0, len_count = 0;
1114 bool found_start = false, found_end = false, handled_end = false;
1115 string delim;
1116 bool is_truncated;
1117 vector<rgw_bucket_dir_entry> objs;
1118
1119 utime_t start_time = ceph_clock_now();
1120
1121 RGWRados::Bucket target(store, *pbucket_info);
1122 RGWRados::Bucket::List list_op(&target);
1123
1124 list_op.params.prefix = obj_prefix;
1125 list_op.params.delim = delim;
1126
1127 MD5 etag_sum;
1128 do {
1129#define MAX_LIST_OBJS 100
1130 int r = list_op.list_objects(MAX_LIST_OBJS, &objs, NULL, &is_truncated);
1131 if (r < 0) {
1132 return r;
1133 }
1134
1135 for (rgw_bucket_dir_entry& ent : objs) {
224ce89b
WB
1136 const uint64_t cur_total_len = obj_ofs;
1137 const uint64_t obj_size = ent.meta.accounted_size;
1138 uint64_t start_ofs = 0, end_ofs = obj_size;
7c673cae 1139
224ce89b 1140 if ((ptotal_len || cb) && !found_start && cur_total_len + obj_size > (uint64_t)ofs) {
7c673cae
FG
1141 start_ofs = ofs - obj_ofs;
1142 found_start = true;
1143 }
1144
224ce89b 1145 obj_ofs += obj_size;
7c673cae
FG
1146 if (pobj_sum) {
1147 etag_sum.Update((const byte *)ent.meta.etag.c_str(),
1148 ent.meta.etag.length());
1149 }
1150
31f18b77 1151 if ((ptotal_len || cb) && !found_end && obj_ofs > (uint64_t)end) {
7c673cae
FG
1152 end_ofs = end - cur_total_len + 1;
1153 found_end = true;
1154 }
1155
1156 perfcounter->tinc(l_rgw_get_lat,
1157 (ceph_clock_now() - start_time));
1158
1159 if (found_start && !handled_end) {
1160 len_count += end_ofs - start_ofs;
1161
1162 if (cb) {
31f18b77 1163 r = cb(bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs, cb_param);
7c673cae
FG
1164 if (r < 0) {
1165 return r;
1166 }
1167 }
1168 }
1169
1170 handled_end = found_end;
1171 start_time = ceph_clock_now();
1172 }
1173 } while (is_truncated);
1174
1175 if (ptotal_len) {
1176 *ptotal_len = len_count;
1177 }
1178 if (pobj_size) {
1179 *pobj_size = obj_ofs;
1180 }
1181 if (pobj_sum) {
1182 complete_etag(etag_sum, pobj_sum);
1183 }
1184
1185 return 0;
1186}
1187
1188struct rgw_slo_part {
31f18b77
FG
1189 RGWAccessControlPolicy *bucket_acl = nullptr;
1190 Policy* bucket_policy = nullptr;
7c673cae
FG
1191 rgw_bucket bucket;
1192 string obj_name;
31f18b77 1193 uint64_t size = 0;
7c673cae 1194 string etag;
7c673cae
FG
1195};
1196
1197static int iterate_slo_parts(CephContext *cct,
1198 RGWRados *store,
1199 off_t ofs,
1200 off_t end,
1201 map<uint64_t, rgw_slo_part>& slo_parts,
1202 int (*cb)(rgw_bucket& bucket,
1203 const rgw_bucket_dir_entry& ent,
31f18b77
FG
1204 RGWAccessControlPolicy *bucket_acl,
1205 const optional<Policy>& bucket_policy,
7c673cae
FG
1206 off_t start_ofs,
1207 off_t end_ofs,
1208 void *param),
1209 void *cb_param)
1210{
1211 bool found_start = false, found_end = false;
1212
1213 if (slo_parts.empty()) {
1214 return 0;
1215 }
1216
1217 utime_t start_time = ceph_clock_now();
1218
1219 map<uint64_t, rgw_slo_part>::iterator iter = slo_parts.upper_bound(ofs);
1220 if (iter != slo_parts.begin()) {
1221 --iter;
1222 }
1223
1224 uint64_t obj_ofs = iter->first;
1225
1226 for (; iter != slo_parts.end() && !found_end; ++iter) {
1227 rgw_slo_part& part = iter->second;
1228 rgw_bucket_dir_entry ent;
1229
1230 ent.key.name = part.obj_name;
224ce89b 1231 ent.meta.accounted_size = ent.meta.size = part.size;
7c673cae
FG
1232 ent.meta.etag = part.etag;
1233
1234 uint64_t cur_total_len = obj_ofs;
1235 uint64_t start_ofs = 0, end_ofs = ent.meta.size;
1236
1237 if (!found_start && cur_total_len + ent.meta.size > (uint64_t)ofs) {
1238 start_ofs = ofs - obj_ofs;
1239 found_start = true;
1240 }
1241
1242 obj_ofs += ent.meta.size;
1243
1244 if (!found_end && obj_ofs > (uint64_t)end) {
1245 end_ofs = end - cur_total_len + 1;
1246 found_end = true;
1247 }
1248
1249 perfcounter->tinc(l_rgw_get_lat,
1250 (ceph_clock_now() - start_time));
1251
1252 if (found_start) {
1253 if (cb) {
31f18b77
FG
1254 // SLO is a Swift thing, and Swift has no knowledge of S3 Policies.
1255 int r = cb(part.bucket, ent, part.bucket_acl,
1256 (part.bucket_policy ?
1257 optional<Policy>(*part.bucket_policy) : none),
1258 start_ofs, end_ofs, cb_param);
1259 if (r < 0)
7c673cae
FG
1260 return r;
1261 }
1262 }
1263
1264 start_time = ceph_clock_now();
1265 }
1266
1267 return 0;
1268}
1269
1270static int get_obj_user_manifest_iterate_cb(rgw_bucket& bucket,
1271 const rgw_bucket_dir_entry& ent,
31f18b77
FG
1272 RGWAccessControlPolicy * const bucket_acl,
1273 const optional<Policy>& bucket_policy,
7c673cae
FG
1274 const off_t start_ofs,
1275 const off_t end_ofs,
1276 void * const param)
1277{
1278 RGWGetObj *op = static_cast<RGWGetObj *>(param);
31f18b77 1279 return op->read_user_manifest_part(bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs);
7c673cae
FG
1280}
1281
1282int RGWGetObj::handle_user_manifest(const char *prefix)
1283{
31f18b77
FG
1284 const boost::string_view prefix_view(prefix);
1285 ldout(s->cct, 2) << "RGWGetObj::handle_user_manifest() prefix="
1286 << prefix_view << dendl;
7c673cae 1287
31f18b77
FG
1288 const size_t pos = prefix_view.find('/');
1289 if (pos == string::npos) {
7c673cae 1290 return -EINVAL;
31f18b77 1291 }
7c673cae 1292
31f18b77
FG
1293 const std::string bucket_name = url_decode(prefix_view.substr(0, pos));
1294 const std::string obj_prefix = url_decode(prefix_view.substr(pos + 1));
7c673cae
FG
1295
1296 rgw_bucket bucket;
1297
31f18b77
FG
1298 RGWAccessControlPolicy _bucket_acl(s->cct);
1299 RGWAccessControlPolicy *bucket_acl;
1300 optional<Policy> _bucket_policy;
1301 optional<Policy>* bucket_policy;
7c673cae
FG
1302 RGWBucketInfo bucket_info;
1303 RGWBucketInfo *pbucket_info;
1304
1305 if (bucket_name.compare(s->bucket.name) != 0) {
1306 map<string, bufferlist> bucket_attrs;
1307 RGWObjectCtx obj_ctx(store);
1308 int r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
1309 bucket_name, bucket_info, NULL,
1310 &bucket_attrs);
1311 if (r < 0) {
1312 ldout(s->cct, 0) << "could not get bucket info for bucket="
1313 << bucket_name << dendl;
1314 return r;
1315 }
1316 bucket = bucket_info.bucket;
1317 pbucket_info = &bucket_info;
31f18b77
FG
1318 bucket_acl = &_bucket_acl;
1319 r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl, bucket);
7c673cae
FG
1320 if (r < 0) {
1321 ldout(s->cct, 0) << "failed to read bucket policy" << dendl;
1322 return r;
1323 }
31f18b77
FG
1324 _bucket_policy = get_iam_policy_from_attr(s->cct, store, bucket_attrs,
1325 bucket_info.bucket.tenant);
1326 bucket_policy = &_bucket_policy;
7c673cae
FG
1327 } else {
1328 bucket = s->bucket;
1329 pbucket_info = &s->bucket_info;
31f18b77
FG
1330 bucket_acl = s->bucket_acl;
1331 bucket_policy = &s->iam_policy;
7c673cae
FG
1332 }
1333
1334 /* dry run to find out:
1335 * - total length (of the parts we are going to send to client),
1336 * - overall DLO's content size,
1337 * - md5 sum of overall DLO's content (for etag of Swift API). */
1338 int r = iterate_user_manifest_parts(s->cct, store, ofs, end,
31f18b77
FG
1339 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1340 nullptr, &s->obj_size, &lo_etag,
7c673cae
FG
1341 nullptr /* cb */, nullptr /* cb arg */);
1342 if (r < 0) {
1343 return r;
1344 }
1345
31f18b77
FG
1346 r = RGWRados::Object::Read::range_to_ofs(s->obj_size, ofs, end);
1347 if (r < 0) {
1348 return r;
1349 }
1350
1351 r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1352 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1353 &total_len, nullptr, nullptr,
1354 nullptr, nullptr);
1355 if (r < 0) {
1356 return r;
1357 }
1358
7c673cae
FG
1359 if (!get_data) {
1360 bufferlist bl;
1361 send_response_data(bl, 0, 0);
1362 return 0;
1363 }
1364
1365 r = iterate_user_manifest_parts(s->cct, store, ofs, end,
31f18b77 1366 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
7c673cae
FG
1367 nullptr, nullptr, nullptr,
1368 get_obj_user_manifest_iterate_cb, (void *)this);
1369 if (r < 0) {
1370 return r;
1371 }
1372
1373 if (!total_len) {
1374 bufferlist bl;
1375 send_response_data(bl, 0, 0);
1376 }
1377
1378 return 0;
1379}
1380
1381int RGWGetObj::handle_slo_manifest(bufferlist& bl)
1382{
1383 RGWSLOInfo slo_info;
1384 bufferlist::iterator bliter = bl.begin();
1385 try {
1386 ::decode(slo_info, bliter);
1387 } catch (buffer::error& err) {
1388 ldout(s->cct, 0) << "ERROR: failed to decode slo manifest" << dendl;
1389 return -EIO;
1390 }
1391 ldout(s->cct, 2) << "RGWGetObj::handle_slo_manifest()" << dendl;
1392
31f18b77
FG
1393 vector<RGWAccessControlPolicy> allocated_acls;
1394 map<string, pair<RGWAccessControlPolicy *, optional<Policy>>> policies;
7c673cae
FG
1395 map<string, rgw_bucket> buckets;
1396
1397 map<uint64_t, rgw_slo_part> slo_parts;
1398
1399 MD5 etag_sum;
1400 total_len = 0;
1401
1402 for (const auto& entry : slo_info.entries) {
1403 const string& path = entry.path;
1404
1405 /* If the path starts with slashes, strip them all. */
1406 const size_t pos_init = path.find_first_not_of('/');
1407 /* According to the documentation of std::string::find following check
1408 * is not necessary as we should get the std::string::npos propagation
1409 * here. This might be true with the accuracy to implementation's bugs.
1410 * See following question on SO:
1411 * http://stackoverflow.com/questions/1011790/why-does-stdstring-findtext-stdstringnpos-not-return-npos
1412 */
1413 if (pos_init == string::npos) {
1414 return -EINVAL;
1415 }
1416
1417 const size_t pos_sep = path.find('/', pos_init);
1418 if (pos_sep == string::npos) {
1419 return -EINVAL;
1420 }
1421
1422 string bucket_name = path.substr(pos_init, pos_sep - pos_init);
1423 string obj_name = path.substr(pos_sep + 1);
1424
1425 rgw_bucket bucket;
31f18b77
FG
1426 RGWAccessControlPolicy *bucket_acl;
1427 Policy* bucket_policy;
7c673cae
FG
1428
1429 if (bucket_name.compare(s->bucket.name) != 0) {
1430 const auto& piter = policies.find(bucket_name);
1431 if (piter != policies.end()) {
31f18b77
FG
1432 bucket_acl = piter->second.first;
1433 bucket_policy = piter->second.second.get_ptr();
1434 bucket = buckets[bucket_name];
7c673cae 1435 } else {
31f18b77
FG
1436 allocated_acls.push_back(RGWAccessControlPolicy(s->cct));
1437 RGWAccessControlPolicy& _bucket_acl = allocated_acls.back();
7c673cae
FG
1438
1439 RGWBucketInfo bucket_info;
1440 map<string, bufferlist> bucket_attrs;
1441 RGWObjectCtx obj_ctx(store);
1442 int r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
1443 bucket_name, bucket_info, nullptr,
1444 &bucket_attrs);
1445 if (r < 0) {
1446 ldout(s->cct, 0) << "could not get bucket info for bucket="
1447 << bucket_name << dendl;
1448 return r;
1449 }
1450 bucket = bucket_info.bucket;
31f18b77
FG
1451 bucket_acl = &_bucket_acl;
1452 r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl,
7c673cae
FG
1453 bucket);
1454 if (r < 0) {
31f18b77 1455 ldout(s->cct, 0) << "failed to read bucket ACL for bucket "
7c673cae
FG
1456 << bucket << dendl;
1457 return r;
31f18b77
FG
1458 }
1459 auto _bucket_policy = get_iam_policy_from_attr(
1460 s->cct, store, bucket_attrs, bucket_info.bucket.tenant);
1461 bucket_policy = _bucket_policy.get_ptr();
1462 buckets[bucket_name] = bucket;
1463 policies[bucket_name] = make_pair(bucket_acl, _bucket_policy);
7c673cae
FG
1464 }
1465 } else {
1466 bucket = s->bucket;
31f18b77
FG
1467 bucket_acl = s->bucket_acl;
1468 bucket_policy = s->iam_policy.get_ptr();
7c673cae
FG
1469 }
1470
1471 rgw_slo_part part;
31f18b77 1472 part.bucket_acl = bucket_acl;
7c673cae
FG
1473 part.bucket_policy = bucket_policy;
1474 part.bucket = bucket;
1475 part.obj_name = obj_name;
1476 part.size = entry.size_bytes;
1477 part.etag = entry.etag;
1478 ldout(s->cct, 20) << "slo_part: ofs=" << ofs
1479 << " bucket=" << part.bucket
1480 << " obj=" << part.obj_name
1481 << " size=" << part.size
1482 << " etag=" << part.etag
1483 << dendl;
1484
1485 etag_sum.Update((const byte *)entry.etag.c_str(),
1486 entry.etag.length());
1487
1488 slo_parts[total_len] = part;
1489 total_len += part.size;
1490 }
1491
1492 complete_etag(etag_sum, &lo_etag);
1493
1494 s->obj_size = slo_info.total_size;
1495 ldout(s->cct, 20) << "s->obj_size=" << s->obj_size << dendl;
1496
31f18b77
FG
1497 int r = RGWRados::Object::Read::range_to_ofs(total_len, ofs, end);
1498 if (r < 0) {
1499 return r;
7c673cae
FG
1500 }
1501
1502 total_len = end - ofs + 1;
1503
31f18b77 1504 r = iterate_slo_parts(s->cct, store, ofs, end, slo_parts,
7c673cae
FG
1505 get_obj_user_manifest_iterate_cb, (void *)this);
1506 if (r < 0) {
1507 return r;
1508 }
1509
1510 return 0;
1511}
1512
1513int RGWGetObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
1514{
1515 /* garbage collection related handling */
1516 utime_t start_time = ceph_clock_now();
1517 if (start_time > gc_invalidate_time) {
1518 int r = store->defer_gc(s->obj_ctx, s->bucket_info, obj);
1519 if (r < 0) {
1520 dout(0) << "WARNING: could not defer gc entry for obj" << dendl;
1521 }
1522 gc_invalidate_time = start_time;
1523 gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2);
1524 }
1525 return send_response_data(bl, bl_ofs, bl_len);
1526}
1527
1528bool RGWGetObj::prefetch_data()
1529{
1530 /* HEAD request, stop prefetch*/
1531 if (!get_data) {
1532 return false;
1533 }
1534
1535 bool prefetch_first_chunk = true;
1536 range_str = s->info.env->get("HTTP_RANGE");
1537
1538 if(range_str) {
1539 int r = parse_range(range_str, ofs, end, &partial_content);
1540 /* error on parsing the range, stop prefetch and will fail in execte() */
1541 if (r < 0) {
1542 range_parsed = false;
1543 return false;
1544 } else {
1545 range_parsed = true;
1546 }
1547 /* range get goes to shadown objects, stop prefetch */
1548 if (ofs >= s->cct->_conf->rgw_max_chunk_size) {
1549 prefetch_first_chunk = false;
1550 }
1551 }
1552
1553 return get_data && prefetch_first_chunk;
1554}
1555void RGWGetObj::pre_exec()
1556{
1557 rgw_bucket_object_pre_exec(s);
1558}
1559
1560static bool object_is_expired(map<string, bufferlist>& attrs) {
1561 map<string, bufferlist>::iterator iter = attrs.find(RGW_ATTR_DELETE_AT);
1562 if (iter != attrs.end()) {
1563 utime_t delete_at;
1564 try {
1565 ::decode(delete_at, iter->second);
1566 } catch (buffer::error& err) {
1567 dout(0) << "ERROR: " << __func__ << ": failed to decode " RGW_ATTR_DELETE_AT " attr" << dendl;
1568 return false;
1569 }
1570
1571 if (delete_at <= ceph_clock_now() && !delete_at.is_zero()) {
1572 return true;
1573 }
1574 }
1575
1576 return false;
1577}
1578
1579void RGWGetObj::execute()
1580{
1581 utime_t start_time = s->time;
1582 bufferlist bl;
1583 gc_invalidate_time = ceph_clock_now();
1584 gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2);
1585
1586 bool need_decompress;
1587 int64_t ofs_x, end_x;
1588
1589 RGWGetObj_CB cb(this);
1590 RGWGetDataCB* filter = (RGWGetDataCB*)&cb;
1591 boost::optional<RGWGetObj_Decompress> decompress;
1592 std::unique_ptr<RGWGetDataCB> decrypt;
1593 map<string, bufferlist>::iterator attr_iter;
1594
1595 perfcounter->inc(l_rgw_get);
1596
1597 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
1598 RGWRados::Object::Read read_op(&op_target);
1599
1600 op_ret = get_params();
1601 if (op_ret < 0)
1602 goto done_err;
1603
1604 op_ret = init_common();
1605 if (op_ret < 0)
1606 goto done_err;
1607
1608 read_op.conds.mod_ptr = mod_ptr;
1609 read_op.conds.unmod_ptr = unmod_ptr;
1610 read_op.conds.high_precision_time = s->system_request; /* system request need to use high precision time */
1611 read_op.conds.mod_zone_id = mod_zone_id;
1612 read_op.conds.mod_pg_ver = mod_pg_ver;
1613 read_op.conds.if_match = if_match;
1614 read_op.conds.if_nomatch = if_nomatch;
1615 read_op.params.attrs = &attrs;
1616 read_op.params.lastmod = &lastmod;
1617 read_op.params.obj_size = &s->obj_size;
7c673cae
FG
1618
1619 op_ret = read_op.prepare();
1620 if (op_ret < 0)
1621 goto done_err;
31f18b77 1622 version_id = read_op.state.obj.key.instance;
7c673cae
FG
1623
1624 /* STAT ops don't need data, and do no i/o */
1625 if (get_type() == RGW_OP_STAT_OBJ) {
1626 return;
1627 }
1628
1629 /* start gettorrent */
1630 if (torrent.get_flag())
1631 {
1632 torrent.init(s, store);
1633 torrent.get_torrent_file(op_ret, read_op, total_len, bl, obj);
1634 if (op_ret < 0)
1635 {
1636 ldout(s->cct, 0) << "ERROR: failed to get_torrent_file ret= " << op_ret
1637 << dendl;
1638 goto done_err;
1639 }
1640 op_ret = send_response_data(bl, 0, total_len);
1641 if (op_ret < 0)
1642 {
1643 ldout(s->cct, 0) << "ERROR: failed to send_response_data ret= " << op_ret
1644 << dendl;
1645 goto done_err;
1646 }
1647 return;
1648 }
1649 /* end gettorrent */
1650
1651 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
1652 if (op_ret < 0) {
1653 lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
1654 goto done_err;
1655 }
1656 if (need_decompress) {
1657 s->obj_size = cs_info.orig_size;
1658 decompress.emplace(s->cct, &cs_info, partial_content, filter);
1659 filter = &*decompress;
1660 }
7c673cae
FG
1661
1662 attr_iter = attrs.find(RGW_ATTR_USER_MANIFEST);
1663 if (attr_iter != attrs.end() && !skip_manifest) {
1664 op_ret = handle_user_manifest(attr_iter->second.c_str());
1665 if (op_ret < 0) {
1666 ldout(s->cct, 0) << "ERROR: failed to handle user manifest ret="
1667 << op_ret << dendl;
1668 goto done_err;
1669 }
1670 return;
1671 }
1672
1673 attr_iter = attrs.find(RGW_ATTR_SLO_MANIFEST);
1674 if (attr_iter != attrs.end() && !skip_manifest) {
1675 is_slo = true;
1676 op_ret = handle_slo_manifest(attr_iter->second);
1677 if (op_ret < 0) {
1678 ldout(s->cct, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret
1679 << dendl;
1680 goto done_err;
1681 }
1682 return;
1683 }
1684
31f18b77
FG
1685 // for range requests with obj size 0
1686 if (range_str && !(s->obj_size)) {
1687 total_len = 0;
1688 op_ret = -ERANGE;
1689 goto done_err;
1690 }
1691
1692 op_ret = read_op.range_to_ofs(s->obj_size, ofs, end);
1693 if (op_ret < 0)
1694 goto done_err;
1695 total_len = (ofs <= end ? end + 1 - ofs : 0);
1696
7c673cae
FG
1697 /* Check whether the object has expired. Swift API documentation
1698 * stands that we should return 404 Not Found in such case. */
1699 if (need_object_expiration() && object_is_expired(attrs)) {
1700 op_ret = -ENOENT;
1701 goto done_err;
1702 }
1703
1704 start = ofs;
1705
1706 /* STAT ops don't need data, and do no i/o */
1707 if (get_type() == RGW_OP_STAT_OBJ) {
1708 return;
1709 }
1710
1711 attr_iter = attrs.find(RGW_ATTR_MANIFEST);
1712 op_ret = this->get_decrypt_filter(&decrypt, filter,
1713 attr_iter != attrs.end() ? &(attr_iter->second) : nullptr);
1714 if (decrypt != nullptr) {
1715 filter = decrypt.get();
1716 }
1717 if (op_ret < 0) {
1718 goto done_err;
1719 }
1720
1721 if (!get_data || ofs > end) {
1722 send_response_data(bl, 0, 0);
1723 return;
1724 }
1725
1726 perfcounter->inc(l_rgw_get_b, end - ofs);
1727
1728 ofs_x = ofs;
1729 end_x = end;
1730 filter->fixup_range(ofs_x, end_x);
1731 op_ret = read_op.iterate(ofs_x, end_x, filter);
1732
1733 if (op_ret >= 0)
1734 op_ret = filter->flush();
1735
1736 perfcounter->tinc(l_rgw_get_lat,
1737 (ceph_clock_now() - start_time));
1738 if (op_ret < 0) {
1739 goto done_err;
1740 }
1741
1742 op_ret = send_response_data(bl, 0, 0);
1743 if (op_ret < 0) {
1744 goto done_err;
1745 }
1746 return;
1747
1748done_err:
1749 send_response_data_error();
1750}
1751
1752int RGWGetObj::init_common()
1753{
1754 if (range_str) {
1755 /* range parsed error when prefetch*/
1756 if (!range_parsed) {
1757 int r = parse_range(range_str, ofs, end, &partial_content);
1758 if (r < 0)
1759 return r;
1760 }
1761 }
1762 if (if_mod) {
1763 if (parse_time(if_mod, &mod_time) < 0)
1764 return -EINVAL;
1765 mod_ptr = &mod_time;
1766 }
1767
1768 if (if_unmod) {
1769 if (parse_time(if_unmod, &unmod_time) < 0)
1770 return -EINVAL;
1771 unmod_ptr = &unmod_time;
1772 }
1773
1774 return 0;
1775}
1776
1777int RGWListBuckets::verify_permission()
1778{
1779 if (!verify_user_permission(s, RGW_PERM_READ)) {
1780 return -EACCES;
1781 }
1782
1783 return 0;
1784}
1785
1786int RGWGetUsage::verify_permission()
1787{
1788 if (s->auth.identity->is_anonymous()) {
1789 return -EACCES;
1790 }
1791
1792 return 0;
1793}
1794
1795void RGWListBuckets::execute()
1796{
1797 bool done;
1798 bool started = false;
1799 uint64_t total_count = 0;
1800
1801 uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk;
1802
1803 op_ret = get_params();
1804 if (op_ret < 0) {
1805 goto send_end;
1806 }
1807
1808 if (supports_account_metadata()) {
1809 op_ret = rgw_get_user_attrs_by_uid(store, s->user->user_id, attrs);
1810 if (op_ret < 0) {
1811 goto send_end;
1812 }
1813 }
1814
1815 is_truncated = false;
1816 do {
1817 RGWUserBuckets buckets;
1818 uint64_t read_count;
1819 if (limit >= 0) {
1820 read_count = min(limit - total_count, (uint64_t)max_buckets);
1821 } else {
1822 read_count = max_buckets;
1823 }
1824
1825 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
1826 marker, end_marker, read_count,
1827 should_get_stats(), &is_truncated,
1828 get_default_max());
1829 if (op_ret < 0) {
1830 /* hmm.. something wrong here.. the user was authenticated, so it
1831 should exist */
1832 ldout(s->cct, 10) << "WARNING: failed on rgw_get_user_buckets uid="
1833 << s->user->user_id << dendl;
1834 break;
1835 }
1836 map<string, RGWBucketEnt>& m = buckets.get_buckets();
1837 map<string, RGWBucketEnt>::iterator iter;
1838 for (iter = m.begin(); iter != m.end(); ++iter) {
1839 RGWBucketEnt& bucket = iter->second;
1840 buckets_size += bucket.size;
1841 buckets_size_rounded += bucket.size_rounded;
1842 buckets_objcount += bucket.count;
1843 }
1844 buckets_count += m.size();
1845 total_count += m.size();
1846
1847 done = (m.size() < read_count || (limit >= 0 && total_count >= (uint64_t)limit));
1848
1849 if (!started) {
1850 send_response_begin(buckets.count() > 0);
1851 started = true;
1852 }
1853
1854 if (!m.empty()) {
1855 send_response_data(buckets);
1856
1857 map<string, RGWBucketEnt>::reverse_iterator riter = m.rbegin();
1858 marker = riter->first;
1859 }
1860 } while (is_truncated && !done);
1861
1862send_end:
1863 if (!started) {
1864 send_response_begin(false);
1865 }
1866 send_response_end();
1867}
1868
1869void RGWGetUsage::execute()
1870{
1871 uint64_t start_epoch = 0;
1872 uint64_t end_epoch = (uint64_t)-1;
1873 op_ret = get_params();
1874 if (op_ret < 0)
1875 return;
1876
1877 if (!start_date.empty()) {
1878 op_ret = utime_t::parse_date(start_date, &start_epoch, NULL);
1879 if (op_ret < 0) {
1880 ldout(store->ctx(), 0) << "ERROR: failed to parse start date" << dendl;
1881 return;
1882 }
1883 }
1884
1885 if (!end_date.empty()) {
1886 op_ret = utime_t::parse_date(end_date, &end_epoch, NULL);
1887 if (op_ret < 0) {
1888 ldout(store->ctx(), 0) << "ERROR: failed to parse end date" << dendl;
1889 return;
1890 }
1891 }
1892
1893 uint32_t max_entries = 1000;
1894
1895 bool is_truncated = true;
1896
1897 RGWUsageIter usage_iter;
1898
1899 while (is_truncated) {
1900 op_ret = store->read_usage(s->user->user_id, start_epoch, end_epoch, max_entries,
1901 &is_truncated, usage_iter, usage);
1902
1903 if (op_ret == -ENOENT) {
1904 op_ret = 0;
1905 is_truncated = false;
1906 }
1907
1908 if (op_ret < 0) {
1909 return;
1910 }
1911 }
1912
1913 op_ret = rgw_user_sync_all_stats(store, s->user->user_id);
1914 if (op_ret < 0) {
1915 ldout(store->ctx(), 0) << "ERROR: failed to sync user stats: " << dendl;
1916 return ;
1917 }
1918
1919 string user_str = s->user->user_id.to_str();
1920 op_ret = store->cls_user_get_header(user_str, &header);
1921 if (op_ret < 0) {
1922 ldout(store->ctx(), 0) << "ERROR: can't read user header: " << dendl;
1923 return ;
1924 }
1925
1926 return;
1927}
1928
1929int RGWStatAccount::verify_permission()
1930{
1931 if (!verify_user_permission(s, RGW_PERM_READ)) {
1932 return -EACCES;
1933 }
1934
1935 return 0;
1936}
1937
1938void RGWStatAccount::execute()
1939{
1940 string marker;
1941 bool is_truncated = false;
1942 uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk;
1943
1944 do {
1945 RGWUserBuckets buckets;
1946
1947 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets, marker,
1948 string(), max_buckets, true, &is_truncated);
1949 if (op_ret < 0) {
1950 /* hmm.. something wrong here.. the user was authenticated, so it
1951 should exist */
1952 ldout(s->cct, 10) << "WARNING: failed on rgw_get_user_buckets uid="
1953 << s->user->user_id << dendl;
1954 break;
1955 } else {
1956 map<string, RGWBucketEnt>& m = buckets.get_buckets();
1957 map<string, RGWBucketEnt>::iterator iter;
1958 for (iter = m.begin(); iter != m.end(); ++iter) {
1959 RGWBucketEnt& bucket = iter->second;
1960 buckets_size += bucket.size;
1961 buckets_size_rounded += bucket.size_rounded;
1962 buckets_objcount += bucket.count;
1963
1964 marker = iter->first;
1965 }
1966 buckets_count += m.size();
1967
1968 }
1969 } while (is_truncated);
1970}
1971
1972int RGWGetBucketVersioning::verify_permission()
1973{
1974 if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
1975 return -EACCES;
1976 }
1977
1978 return 0;
1979}
1980
1981void RGWGetBucketVersioning::pre_exec()
1982{
1983 rgw_bucket_object_pre_exec(s);
1984}
1985
1986void RGWGetBucketVersioning::execute()
1987{
1988 versioned = s->bucket_info.versioned();
1989 versioning_enabled = s->bucket_info.versioning_enabled();
1990}
1991
1992int RGWSetBucketVersioning::verify_permission()
1993{
1994 if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
1995 return -EACCES;
1996 }
1997
1998 return 0;
1999}
2000
2001void RGWSetBucketVersioning::pre_exec()
2002{
2003 rgw_bucket_object_pre_exec(s);
2004}
2005
2006void RGWSetBucketVersioning::execute()
2007{
2008 op_ret = get_params();
2009 if (op_ret < 0)
2010 return;
2011
2012 if (!store->is_meta_master()) {
2013 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
2014 if (op_ret < 0) {
2015 ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
2016 return;
2017 }
2018 }
2019
2020 if (enable_versioning) {
2021 s->bucket_info.flags |= BUCKET_VERSIONED;
2022 s->bucket_info.flags &= ~BUCKET_VERSIONS_SUSPENDED;
2023 } else {
2024 s->bucket_info.flags |= (BUCKET_VERSIONED | BUCKET_VERSIONS_SUSPENDED);
2025 }
2026
2027 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(),
2028 &s->bucket_attrs);
2029 if (op_ret < 0) {
2030 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
2031 << " returned err=" << op_ret << dendl;
2032 return;
2033 }
2034}
2035
2036int RGWGetBucketWebsite::verify_permission()
2037{
2038 if (s->user->user_id.compare(s->bucket_owner.get_id()) != 0)
2039 return -EACCES;
2040
2041 return 0;
2042}
2043
2044void RGWGetBucketWebsite::pre_exec()
2045{
2046 rgw_bucket_object_pre_exec(s);
2047}
2048
2049void RGWGetBucketWebsite::execute()
2050{
2051 if (!s->bucket_info.has_website) {
2052 op_ret = -ENOENT;
2053 }
2054}
2055
2056int RGWSetBucketWebsite::verify_permission()
2057{
2058 if (s->user->user_id.compare(s->bucket_owner.get_id()) != 0)
2059 return -EACCES;
2060
2061 return 0;
2062}
2063
2064void RGWSetBucketWebsite::pre_exec()
2065{
2066 rgw_bucket_object_pre_exec(s);
2067}
2068
2069void RGWSetBucketWebsite::execute()
2070{
2071 op_ret = get_params();
2072
2073 if (op_ret < 0)
2074 return;
2075
31f18b77
FG
2076 if (!store->is_meta_master()) {
2077 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
2078 if (op_ret < 0) {
2079 ldout(s->cct, 20) << __func__ << " forward_request_to_master returned ret=" << op_ret << dendl;
2080 return;
2081 }
2082 }
2083
7c673cae
FG
2084 s->bucket_info.has_website = true;
2085 s->bucket_info.website_conf = website_conf;
2086
2087 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
2088 if (op_ret < 0) {
2089 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
2090 return;
2091 }
2092}
2093
2094int RGWDeleteBucketWebsite::verify_permission()
2095{
2096 if (s->user->user_id.compare(s->bucket_owner.get_id()) != 0)
2097 return -EACCES;
2098
2099 return 0;
2100}
2101
2102void RGWDeleteBucketWebsite::pre_exec()
2103{
2104 rgw_bucket_object_pre_exec(s);
2105}
2106
2107void RGWDeleteBucketWebsite::execute()
2108{
2109 s->bucket_info.has_website = false;
2110 s->bucket_info.website_conf = RGWBucketWebsiteConf();
2111
2112 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
2113 if (op_ret < 0) {
2114 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
2115 return;
2116 }
2117}
2118
2119int RGWStatBucket::verify_permission()
2120{
31f18b77
FG
2121 // This (a HEAD request on a bucket) is governed by the s3:ListBucket permission.
2122 if (!verify_bucket_permission(s, rgw::IAM::s3ListBucket)) {
7c673cae
FG
2123 return -EACCES;
2124 }
2125
2126 return 0;
2127}
2128
2129void RGWStatBucket::pre_exec()
2130{
2131 rgw_bucket_object_pre_exec(s);
2132}
2133
2134void RGWStatBucket::execute()
2135{
2136 if (!s->bucket_exists) {
2137 op_ret = -ERR_NO_SUCH_BUCKET;
2138 return;
2139 }
2140
2141 RGWUserBuckets buckets;
2142 bucket.bucket = s->bucket;
2143 buckets.add(bucket);
2144 map<string, RGWBucketEnt>& m = buckets.get_buckets();
2145 op_ret = store->update_containers_stats(m);
2146 if (! op_ret)
2147 op_ret = -EEXIST;
2148 if (op_ret > 0) {
2149 op_ret = 0;
2150 map<string, RGWBucketEnt>::iterator iter = m.find(bucket.bucket.name);
2151 if (iter != m.end()) {
2152 bucket = iter->second;
2153 } else {
2154 op_ret = -EINVAL;
2155 }
2156 }
2157}
2158
2159int RGWListBucket::verify_permission()
2160{
31f18b77
FG
2161 op_ret = get_params();
2162 if (op_ret < 0) {
2163 return op_ret;
2164 }
2165
2166 if (!verify_bucket_permission(s,
2167 list_versions ?
2168 rgw::IAM::s3ListBucketVersions :
2169 rgw::IAM::s3ListBucket)) {
7c673cae
FG
2170 return -EACCES;
2171 }
2172
2173 return 0;
2174}
2175
2176int RGWListBucket::parse_max_keys()
2177{
2178 if (!max_keys.empty()) {
2179 char *endptr;
2180 max = strtol(max_keys.c_str(), &endptr, 10);
2181 if (endptr) {
2182 while (*endptr && isspace(*endptr)) // ignore white space
2183 endptr++;
2184 if (*endptr) {
2185 return -EINVAL;
2186 }
2187 }
2188 } else {
2189 max = default_max;
2190 }
2191
2192 return 0;
2193}
2194
2195void RGWListBucket::pre_exec()
2196{
2197 rgw_bucket_object_pre_exec(s);
2198}
2199
2200void RGWListBucket::execute()
2201{
2202 if (!s->bucket_exists) {
2203 op_ret = -ERR_NO_SUCH_BUCKET;
2204 return;
2205 }
2206
7c673cae
FG
2207 if (need_container_stats()) {
2208 map<string, RGWBucketEnt> m;
2209 m[s->bucket.name] = RGWBucketEnt();
2210 m.begin()->second.bucket = s->bucket;
2211 op_ret = store->update_containers_stats(m);
2212 if (op_ret > 0) {
2213 bucket = m.begin()->second;
2214 }
2215 }
2216
2217 RGWRados::Bucket target(store, s->bucket_info);
2218 if (shard_id >= 0) {
2219 target.set_shard_id(shard_id);
2220 }
2221 RGWRados::Bucket::List list_op(&target);
2222
2223 list_op.params.prefix = prefix;
2224 list_op.params.delim = delimiter;
2225 list_op.params.marker = marker;
2226 list_op.params.end_marker = end_marker;
2227 list_op.params.list_versions = list_versions;
2228
2229 op_ret = list_op.list_objects(max, &objs, &common_prefixes, &is_truncated);
31f18b77 2230 if (op_ret >= 0) {
7c673cae
FG
2231 next_marker = list_op.get_next_marker();
2232 }
2233}
2234
2235int RGWGetBucketLogging::verify_permission()
2236{
2237 if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
2238 return -EACCES;
2239 }
2240
2241 return 0;
2242}
2243
2244int RGWGetBucketLocation::verify_permission()
2245{
2246 if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
2247 return -EACCES;
2248 }
2249
2250 return 0;
2251}
2252
2253int RGWCreateBucket::verify_permission()
2254{
2255 /* This check is mostly needed for S3 that doesn't support account ACL.
2256 * Swift doesn't allow to delegate any permission to an anonymous user,
2257 * so it will become an early exit in such case. */
2258 if (s->auth.identity->is_anonymous()) {
2259 return -EACCES;
2260 }
2261
2262 if (!verify_user_permission(s, RGW_PERM_WRITE)) {
2263 return -EACCES;
2264 }
2265
2266 if (s->user->user_id.tenant != s->bucket_tenant) {
2267 ldout(s->cct, 10) << "user cannot create a bucket in a different tenant"
2268 << " (user_id.tenant=" << s->user->user_id.tenant
2269 << " requested=" << s->bucket_tenant << ")"
2270 << dendl;
2271 return -EACCES;
2272 }
2273 if (s->user->max_buckets < 0) {
2274 return -EPERM;
2275 }
2276
2277 if (s->user->max_buckets) {
2278 RGWUserBuckets buckets;
2279 string marker;
2280 bool is_truncated = false;
2281 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
2282 marker, string(), s->user->max_buckets,
2283 false, &is_truncated);
2284 if (op_ret < 0) {
2285 return op_ret;
2286 }
2287
2288 if ((int)buckets.count() >= s->user->max_buckets) {
2289 return -ERR_TOO_MANY_BUCKETS;
2290 }
2291 }
2292
2293 return 0;
2294}
2295
2296static int forward_request_to_master(struct req_state *s, obj_version *objv,
2297 RGWRados *store, bufferlist& in_data,
2298 JSONParser *jp, req_info *forward_info)
2299{
2300 if (!store->rest_master_conn) {
2301 ldout(s->cct, 0) << "rest connection is invalid" << dendl;
2302 return -EINVAL;
2303 }
2304 ldout(s->cct, 0) << "sending request to master zonegroup" << dendl;
2305 bufferlist response;
2306 string uid_str = s->user->user_id.to_str();
2307#define MAX_REST_RESPONSE (128 * 1024) // we expect a very small response
2308 int ret = store->rest_master_conn->forward(uid_str, (forward_info ? *forward_info : s->info),
2309 objv, MAX_REST_RESPONSE, &in_data, &response);
2310 if (ret < 0)
2311 return ret;
2312
2313 ldout(s->cct, 20) << "response: " << response.c_str() << dendl;
2314 if (jp && !jp->parse(response.c_str(), response.length())) {
2315 ldout(s->cct, 0) << "failed parsing response from master zonegroup" << dendl;
2316 return -EINVAL;
2317 }
2318
2319 return 0;
2320}
2321
2322void RGWCreateBucket::pre_exec()
2323{
2324 rgw_bucket_object_pre_exec(s);
2325}
2326
2327static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs,
2328 map<string, bufferlist>& out_attrs,
2329 map<string, bufferlist>& out_rmattrs)
2330{
2331 for (const auto& kv : orig_attrs) {
2332 const string& name = kv.first;
2333
2334 /* Check if the attr is user-defined metadata item. */
2335 if (name.compare(0, sizeof(RGW_ATTR_META_PREFIX) - 1,
2336 RGW_ATTR_META_PREFIX) == 0) {
2337 /* For the objects all existing meta attrs have to be removed. */
2338 out_rmattrs[name] = kv.second;
2339 } else if (out_attrs.find(name) == std::end(out_attrs)) {
2340 out_attrs[name] = kv.second;
2341 }
2342 }
2343}
2344
2345/* Fuse resource metadata basing on original attributes in @orig_attrs, set
2346 * of _custom_ attribute names to remove in @rmattr_names and attributes in
2347 * @out_attrs. Place results in @out_attrs.
2348 *
2349 * NOTE: it's supposed that all special attrs already present in @out_attrs
2350 * will be preserved without any change. Special attributes are those which
2351 * names start with RGW_ATTR_META_PREFIX. They're complement to custom ones
2352 * used for X-Account-Meta-*, X-Container-Meta-*, X-Amz-Meta and so on. */
2353static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs,
2354 const set<string>& rmattr_names,
2355 map<string, bufferlist>& out_attrs)
2356{
2357 for (const auto& kv : orig_attrs) {
2358 const string& name = kv.first;
2359
2360 /* Check if the attr is user-defined metadata item. */
2361 if (name.compare(0, strlen(RGW_ATTR_META_PREFIX),
2362 RGW_ATTR_META_PREFIX) == 0) {
2363 /* For the buckets all existing meta attrs are preserved,
2364 except those that are listed in rmattr_names. */
2365 if (rmattr_names.find(name) != std::end(rmattr_names)) {
2366 const auto aiter = out_attrs.find(name);
2367
2368 if (aiter != std::end(out_attrs)) {
2369 out_attrs.erase(aiter);
2370 }
2371 } else {
2372 /* emplace() won't alter the map if the key is already present.
2373 * This behaviour is fully intensional here. */
2374 out_attrs.emplace(kv);
2375 }
2376 } else if (out_attrs.find(name) == std::end(out_attrs)) {
2377 out_attrs[name] = kv.second;
2378 }
2379 }
2380}
2381
2382
2383static void populate_with_generic_attrs(const req_state * const s,
2384 map<string, bufferlist>& out_attrs)
2385{
2386 for (const auto& kv : s->generic_attrs) {
2387 bufferlist& attrbl = out_attrs[kv.first];
2388 const string& val = kv.second;
2389 attrbl.clear();
2390 attrbl.append(val.c_str(), val.size() + 1);
2391 }
2392}
2393
2394
2395static int filter_out_quota_info(std::map<std::string, bufferlist>& add_attrs,
2396 const std::set<std::string>& rmattr_names,
2397 RGWQuotaInfo& quota,
2398 bool * quota_extracted = nullptr)
2399{
2400 bool extracted = false;
2401
2402 /* Put new limit on max objects. */
2403 auto iter = add_attrs.find(RGW_ATTR_QUOTA_NOBJS);
2404 std::string err;
2405 if (std::end(add_attrs) != iter) {
2406 quota.max_objects =
2407 static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err));
2408 if (!err.empty()) {
2409 return -EINVAL;
2410 }
2411 add_attrs.erase(iter);
2412 extracted = true;
2413 }
2414
2415 /* Put new limit on bucket (container) size. */
2416 iter = add_attrs.find(RGW_ATTR_QUOTA_MSIZE);
2417 if (iter != add_attrs.end()) {
2418 quota.max_size =
2419 static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err));
2420 if (!err.empty()) {
2421 return -EINVAL;
2422 }
2423 add_attrs.erase(iter);
2424 extracted = true;
2425 }
2426
2427 for (const auto& name : rmattr_names) {
2428 /* Remove limit on max objects. */
2429 if (name.compare(RGW_ATTR_QUOTA_NOBJS) == 0) {
2430 quota.max_objects = -1;
2431 extracted = true;
2432 }
2433
2434 /* Remove limit on max bucket size. */
2435 if (name.compare(RGW_ATTR_QUOTA_MSIZE) == 0) {
2436 quota.max_size = -1;
2437 extracted = true;
2438 }
2439 }
2440
2441 /* Swift requries checking on raw usage instead of the 4 KiB rounded one. */
2442 quota.check_on_raw = true;
2443 quota.enabled = quota.max_size > 0 || quota.max_objects > 0;
2444
2445 if (quota_extracted) {
2446 *quota_extracted = extracted;
2447 }
2448
2449 return 0;
2450}
2451
2452
2453static void filter_out_website(std::map<std::string, ceph::bufferlist>& add_attrs,
2454 const std::set<std::string>& rmattr_names,
2455 RGWBucketWebsiteConf& ws_conf)
2456{
2457 std::string lstval;
2458
2459 /* Let's define a mapping between each custom attribute and the memory where
2460 * attribute's value should be stored. The memory location is expressed by
2461 * a non-const reference. */
2462 const auto mapping = {
2463 std::make_pair(RGW_ATTR_WEB_INDEX, std::ref(ws_conf.index_doc_suffix)),
2464 std::make_pair(RGW_ATTR_WEB_ERROR, std::ref(ws_conf.error_doc)),
2465 std::make_pair(RGW_ATTR_WEB_LISTINGS, std::ref(lstval)),
2466 std::make_pair(RGW_ATTR_WEB_LIST_CSS, std::ref(ws_conf.listing_css_doc)),
2467 std::make_pair(RGW_ATTR_SUBDIR_MARKER, std::ref(ws_conf.subdir_marker))
2468 };
2469
2470 for (const auto& kv : mapping) {
2471 const char * const key = kv.first;
2472 auto& target = kv.second;
2473
2474 auto iter = add_attrs.find(key);
2475
2476 if (std::end(add_attrs) != iter) {
2477 /* The "target" is a reference to ws_conf. */
2478 target = iter->second.c_str();
2479 add_attrs.erase(iter);
2480 }
2481
2482 if (rmattr_names.count(key)) {
2483 target = std::string();
2484 }
2485 }
2486
2487 if (! lstval.empty()) {
2488 ws_conf.listing_enabled = boost::algorithm::iequals(lstval, "true");
2489 }
2490}
2491
2492
2493void RGWCreateBucket::execute()
2494{
2495 RGWAccessControlPolicy old_policy(s->cct);
2496 buffer::list aclbl;
2497 buffer::list corsbl;
2498 bool existed;
2499 string bucket_name;
2500 rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_name);
2501 rgw_raw_obj obj(store->get_zone_params().domain_root, bucket_name);
2502 obj_version objv, *pobjv = NULL;
2503
2504 op_ret = get_params();
2505 if (op_ret < 0)
2506 return;
2507
31f18b77
FG
2508 if (!location_constraint.empty() &&
2509 !store->has_zonegroup_api(location_constraint)) {
2510 ldout(s->cct, 0) << "location constraint (" << location_constraint << ")"
2511 << " can't be found." << dendl;
2512 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
2513 s->err.message = "The specified location-constraint is not valid";
2514 return;
2515 }
2516
2517 if (!store->get_zonegroup().is_master_zonegroup() &&
7c673cae 2518 store->get_zonegroup().api_name != location_constraint) {
31f18b77
FG
2519 ldout(s->cct, 0) << "location constraint (" << location_constraint << ")"
2520 << " doesn't match zonegroup" << " (" << store->get_zonegroup().api_name << ")"
2521 << dendl;
2522 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
2523 s->err.message = "The specified location-constraint is not valid";
7c673cae
FG
2524 return;
2525 }
2526
2527 /* we need to make sure we read bucket info, it's not read before for this
2528 * specific request */
2529 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
2530 op_ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name,
2531 s->bucket_info, NULL, &s->bucket_attrs);
2532 if (op_ret < 0 && op_ret != -ENOENT)
2533 return;
2534 s->bucket_exists = (op_ret != -ENOENT);
2535
2536 s->bucket_owner.set_id(s->user->user_id);
2537 s->bucket_owner.set_name(s->user->display_name);
2538 if (s->bucket_exists) {
2539 int r = get_bucket_policy_from_attr(s->cct, store, s->bucket_info,
2540 s->bucket_attrs, &old_policy);
2541 if (r >= 0) {
2542 if (old_policy.get_owner().get_id().compare(s->user->user_id) != 0) {
2543 op_ret = -EEXIST;
2544 return;
2545 }
2546 }
2547 }
2548
2549 RGWBucketInfo master_info;
2550 rgw_bucket *pmaster_bucket;
2551 uint32_t *pmaster_num_shards;
2552 real_time creation_time;
2553
2554 if (!store->is_meta_master()) {
2555 JSONParser jp;
2556 op_ret = forward_request_to_master(s, NULL, store, in_data, &jp);
2557 if (op_ret < 0) {
2558 return;
2559 }
2560
2561 JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp);
2562 JSONDecoder::decode_json("object_ver", objv, &jp);
2563 JSONDecoder::decode_json("bucket_info", master_info, &jp);
2564 ldout(s->cct, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" << objv.ver << dendl;
2565 ldout(s->cct, 20) << "got creation time: << " << master_info.creation_time << dendl;
2566 pmaster_bucket= &master_info.bucket;
2567 creation_time = master_info.creation_time;
2568 pmaster_num_shards = &master_info.num_shards;
2569 pobjv = &objv;
2570 } else {
2571 pmaster_bucket = NULL;
2572 pmaster_num_shards = NULL;
2573 }
2574
2575 string zonegroup_id;
2576
2577 if (s->system_request) {
2578 zonegroup_id = s->info.args.get(RGW_SYS_PARAM_PREFIX "zonegroup");
2579 if (zonegroup_id.empty()) {
2580 zonegroup_id = store->get_zonegroup().get_id();
2581 }
2582 } else {
2583 zonegroup_id = store->get_zonegroup().get_id();
2584 }
2585
2586 if (s->bucket_exists) {
2587 string selected_placement_rule;
2588 rgw_bucket bucket;
2589 bucket.tenant = s->bucket_tenant;
2590 bucket.name = s->bucket_name;
2591 op_ret = store->select_bucket_placement(*(s->user), zonegroup_id,
2592 placement_rule,
2593 &selected_placement_rule, nullptr);
2594 if (selected_placement_rule != s->bucket_info.placement_rule) {
2595 op_ret = -EEXIST;
2596 return;
2597 }
2598 }
2599
2600 /* Encode special metadata first as we're using std::map::emplace under
2601 * the hood. This method will add the new items only if the map doesn't
2602 * contain such keys yet. */
2603 policy.encode(aclbl);
2604 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
2605
2606 if (has_cors) {
2607 cors_config.encode(corsbl);
2608 emplace_attr(RGW_ATTR_CORS, std::move(corsbl));
2609 }
2610
2611 RGWQuotaInfo quota_info;
2612 const RGWQuotaInfo * pquota_info = nullptr;
2613 if (need_metadata_upload()) {
2614 /* It's supposed that following functions WILL NOT change any special
2615 * attributes (like RGW_ATTR_ACL) if they are already present in attrs. */
2616 rgw_get_request_metadata(s->cct, s->info, attrs, false);
2617 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
2618 populate_with_generic_attrs(s, attrs);
2619
2620 op_ret = filter_out_quota_info(attrs, rmattr_names, quota_info);
2621 if (op_ret < 0) {
2622 return;
2623 } else {
2624 pquota_info = &quota_info;
2625 }
2626
2627 /* Web site of Swift API. */
2628 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
2629 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
2630 }
2631
2632 s->bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */
2633 s->bucket.name = s->bucket_name;
2634
2635 /* Handle updates of the metadata for Swift's object versioning. */
2636 if (swift_ver_location) {
2637 s->bucket_info.swift_ver_location = *swift_ver_location;
2638 s->bucket_info.swift_versioning = (! swift_ver_location->empty());
2639 }
2640
2641 op_ret = store->create_bucket(*(s->user), s->bucket, zonegroup_id,
2642 placement_rule, s->bucket_info.swift_ver_location,
2643 pquota_info, attrs,
2644 info, pobjv, &ep_objv, creation_time,
2645 pmaster_bucket, pmaster_num_shards, true);
2646 /* continue if EEXIST and create_bucket will fail below. this way we can
2647 * recover from a partial create by retrying it. */
2648 ldout(s->cct, 20) << "rgw_create_bucket returned ret=" << op_ret << " bucket=" << s->bucket << dendl;
2649
2650 if (op_ret && op_ret != -EEXIST)
2651 return;
2652
2653 existed = (op_ret == -EEXIST);
2654
2655 if (existed) {
2656 /* bucket already existed, might have raced with another bucket creation, or
2657 * might be partial bucket creation that never completed. Read existing bucket
2658 * info, verify that the reported bucket owner is the current user.
2659 * If all is ok then update the user's list of buckets.
2660 * Otherwise inform client about a name conflict.
2661 */
2662 if (info.owner.compare(s->user->user_id) != 0) {
2663 op_ret = -EEXIST;
2664 return;
2665 }
2666 s->bucket = info.bucket;
2667 }
2668
2669 op_ret = rgw_link_bucket(store, s->user->user_id, s->bucket,
2670 info.creation_time, false);
2671 if (op_ret && !existed && op_ret != -EEXIST) {
2672 /* if it exists (or previously existed), don't remove it! */
2673 op_ret = rgw_unlink_bucket(store, s->user->user_id, s->bucket.tenant,
2674 s->bucket.name);
2675 if (op_ret < 0) {
2676 ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
2677 << dendl;
2678 }
2679 } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) {
2680 op_ret = -ERR_BUCKET_EXISTS;
2681 }
2682
2683 if (need_metadata_upload() && existed) {
2684 /* OK, it looks we lost race with another request. As it's required to
2685 * handle metadata fusion and upload, the whole operation becomes very
2686 * similar in nature to PutMetadataBucket. However, as the attrs may
2687 * changed in the meantime, we have to refresh. */
2688 short tries = 0;
2689 do {
2690 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
2691 RGWBucketInfo binfo;
2692 map<string, bufferlist> battrs;
2693
2694 op_ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name,
2695 binfo, nullptr, &battrs);
2696 if (op_ret < 0) {
2697 return;
2698 } else if (binfo.owner.compare(s->user->user_id) != 0) {
2699 /* New bucket doesn't belong to the account we're operating on. */
2700 op_ret = -EEXIST;
2701 return;
2702 } else {
2703 s->bucket_info = binfo;
2704 s->bucket_attrs = battrs;
2705 }
2706
2707 attrs.clear();
2708
2709 rgw_get_request_metadata(s->cct, s->info, attrs, false);
2710 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
2711 populate_with_generic_attrs(s, attrs);
2712 op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket_info.quota);
2713 if (op_ret < 0) {
2714 return;
2715 }
2716
2717 /* Handle updates of the metadata for Swift's object versioning. */
2718 if (swift_ver_location) {
2719 s->bucket_info.swift_ver_location = *swift_ver_location;
2720 s->bucket_info.swift_versioning = (! swift_ver_location->empty());
2721 }
2722
2723 /* Web site of Swift API. */
2724 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
2725 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
2726
2727 /* This will also set the quota on the bucket. */
2728 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
2729 &s->bucket_info.objv_tracker);
2730 } while (op_ret == -ECANCELED && tries++ < 20);
2731
2732 /* Restore the proper return code. */
2733 if (op_ret >= 0) {
2734 op_ret = -ERR_BUCKET_EXISTS;
2735 }
2736 }
2737}
2738
2739int RGWDeleteBucket::verify_permission()
2740{
31f18b77 2741 if (!verify_bucket_permission(s, rgw::IAM::s3DeleteBucket)) {
7c673cae
FG
2742 return -EACCES;
2743 }
2744
2745 return 0;
2746}
2747
2748void RGWDeleteBucket::pre_exec()
2749{
2750 rgw_bucket_object_pre_exec(s);
2751}
2752
2753void RGWDeleteBucket::execute()
2754{
2755 op_ret = -EINVAL;
2756
2757 if (s->bucket_name.empty())
2758 return;
2759
2760 if (!s->bucket_exists) {
2761 ldout(s->cct, 0) << "ERROR: bucket " << s->bucket_name << " not found" << dendl;
2762 op_ret = -ERR_NO_SUCH_BUCKET;
2763 return;
2764 }
2765 RGWObjVersionTracker ot;
2766 ot.read_version = s->bucket_info.ep_objv;
2767
2768 if (s->system_request) {
2769 string tag = s->info.args.get(RGW_SYS_PARAM_PREFIX "tag");
2770 string ver_str = s->info.args.get(RGW_SYS_PARAM_PREFIX "ver");
2771 if (!tag.empty()) {
2772 ot.read_version.tag = tag;
2773 uint64_t ver;
2774 string err;
2775 ver = strict_strtol(ver_str.c_str(), 10, &err);
2776 if (!err.empty()) {
2777 ldout(s->cct, 0) << "failed to parse ver param" << dendl;
2778 op_ret = -EINVAL;
2779 return;
2780 }
2781 ot.read_version.ver = ver;
2782 }
2783 }
2784
2785 op_ret = rgw_bucket_sync_user_stats(store, s->user->user_id, s->bucket_info);
2786 if ( op_ret < 0) {
2787 ldout(s->cct, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl;
2788 }
2789
2790 op_ret = store->check_bucket_empty(s->bucket_info);
2791 if (op_ret < 0) {
2792 return;
2793 }
2794
2795 if (!store->is_meta_master()) {
2796 bufferlist in_data;
2797 op_ret = forward_request_to_master(s, &ot.read_version, store, in_data,
2798 NULL);
2799 if (op_ret < 0) {
2800 if (op_ret == -ENOENT) {
2801 /* adjust error, we want to return with NoSuchBucket and not
2802 * NoSuchKey */
2803 op_ret = -ERR_NO_SUCH_BUCKET;
2804 }
2805 return;
2806 }
2807 }
2808
224ce89b
WB
2809 string prefix, delimiter;
2810
2811 if (s->prot_flags & RGW_REST_SWIFT) {
2812 string path_args;
2813 path_args = s->info.args.get("path");
2814 if (!path_args.empty()) {
2815 if (!delimiter.empty() || !prefix.empty()) {
2816 op_ret = -EINVAL;
2817 return;
2818 }
2819 prefix = path_args;
2820 delimiter="/";
2821 }
2822 }
2823
2824 op_ret = abort_bucket_multiparts(store, s->cct, s->bucket_info, prefix, delimiter);
2825
2826 if (op_ret < 0) {
2827 return;
2828 }
2829
7c673cae
FG
2830 op_ret = store->delete_bucket(s->bucket_info, ot, false);
2831
2832 if (op_ret == -ECANCELED) {
2833 // lost a race, either with mdlog sync or another delete bucket operation.
2834 // in either case, we've already called rgw_unlink_bucket()
2835 op_ret = 0;
2836 return;
2837 }
2838
2839 if (op_ret == 0) {
2840 op_ret = rgw_unlink_bucket(store, s->user->user_id, s->bucket.tenant,
2841 s->bucket.name, false);
2842 if (op_ret < 0) {
2843 ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
2844 << dendl;
2845 }
2846 }
2847
2848 if (op_ret < 0) {
2849 return;
2850 }
2851
2852
2853}
2854
2855int RGWPutObj::verify_permission()
2856{
2857 if (copy_source) {
2858
31f18b77
FG
2859 RGWAccessControlPolicy cs_acl(s->cct);
2860 optional<Policy> policy;
7c673cae
FG
2861 map<string, bufferlist> cs_attrs;
2862 rgw_bucket cs_bucket(copy_source_bucket_info.bucket);
2863 rgw_obj_key cs_object(copy_source_object_name, copy_source_version_id);
2864
2865 rgw_obj obj(cs_bucket, cs_object);
2866 store->set_atomic(s->obj_ctx, obj);
2867 store->set_prefetch_data(s->obj_ctx, obj);
2868
2869 /* check source object permissions */
31f18b77
FG
2870 if (read_obj_policy(store, s, copy_source_bucket_info, cs_attrs, &cs_acl, policy,
2871 cs_bucket, cs_object) < 0) {
7c673cae
FG
2872 return -EACCES;
2873 }
2874
2875 /* admin request overrides permission checks */
31f18b77
FG
2876 if (! s->auth.identity->is_admin_of(cs_acl.get_owner().get_id())) {
2877 if (policy) {
2878 auto e = policy->eval(s->env, *s->auth.identity,
2879 cs_object.instance.empty() ?
2880 rgw::IAM::s3GetObject :
2881 rgw::IAM::s3GetObjectVersion,
2882 rgw::IAM::ARN(obj));
2883 if (e == Effect::Deny) {
2884 return -EACCES;
2885 } else if (e == Effect::Pass &&
2886 !cs_acl.verify_permission(*s->auth.identity, s->perm_mask,
2887 RGW_PERM_READ)) {
2888 return -EACCES;
2889 }
2890 } else if (!cs_acl.verify_permission(*s->auth.identity, s->perm_mask,
2891 RGW_PERM_READ)) {
2892 return -EACCES;
2893 }
7c673cae 2894 }
31f18b77 2895 }
7c673cae 2896
31f18b77
FG
2897 if (s->iam_policy) {
2898 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
2899 rgw::IAM::s3PutObject,
2900 rgw_obj(s->bucket, s->object));
2901 if (e == Effect::Allow) {
2902 return 0;
2903 } else if (e == Effect::Deny) {
2904 return -EACCES;
2905 }
7c673cae
FG
2906 }
2907
31f18b77 2908 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
7c673cae
FG
2909 return -EACCES;
2910 }
2911
2912 return 0;
2913}
2914
2915void RGWPutObjProcessor_Multipart::get_mp(RGWMPObj** _mp){
2916 *_mp = &mp;
2917}
2918
2919int RGWPutObjProcessor_Multipart::prepare(RGWRados *store, string *oid_rand)
2920{
2921 string oid = obj_str;
2922 upload_id = s->info.args.get("uploadId");
2923 if (!oid_rand) {
2924 mp.init(oid, upload_id);
2925 } else {
2926 mp.init(oid, upload_id, *oid_rand);
2927 }
2928
2929 part_num = s->info.args.get("partNumber");
2930 if (part_num.empty()) {
2931 ldout(s->cct, 10) << "part number is empty" << dendl;
2932 return -EINVAL;
2933 }
2934
2935 string err;
2936 uint64_t num = (uint64_t)strict_strtol(part_num.c_str(), 10, &err);
2937
2938 if (!err.empty()) {
2939 ldout(s->cct, 10) << "bad part number: " << part_num << ": " << err << dendl;
2940 return -EINVAL;
2941 }
2942
2943 string upload_prefix = oid + ".";
2944
2945 if (!oid_rand) {
2946 upload_prefix.append(upload_id);
2947 } else {
2948 upload_prefix.append(*oid_rand);
2949 }
2950
2951 rgw_obj target_obj;
2952 target_obj.init(bucket, oid);
2953
2954 manifest.set_prefix(upload_prefix);
2955
2956 manifest.set_multipart_part_rule(store->ctx()->_conf->rgw_obj_stripe_size, num);
2957
2958 int r = manifest_gen.create_begin(store->ctx(), &manifest, s->bucket_info.placement_rule, bucket, target_obj);
2959 if (r < 0) {
2960 return r;
2961 }
2962
2963 cur_obj = manifest_gen.get_cur_obj(store);
2964 rgw_raw_obj_to_obj(bucket, cur_obj, &head_obj);
2965 head_obj.index_hash_source = obj_str;
2966
2967 r = prepare_init(store, NULL);
2968 if (r < 0) {
2969 return r;
2970 }
2971
2972 return 0;
2973}
2974
2975int RGWPutObjProcessor_Multipart::do_complete(size_t accounted_size,
2976 const string& etag,
2977 real_time *mtime, real_time set_mtime,
2978 map<string, bufferlist>& attrs,
2979 real_time delete_at,
2980 const char *if_match,
31f18b77 2981 const char *if_nomatch, const string *user_data, rgw_zone_set *zones_trace)
7c673cae
FG
2982{
2983 complete_writing_data();
2984
2985 RGWRados::Object op_target(store, s->bucket_info, obj_ctx, head_obj);
31f18b77 2986 op_target.set_versioning_disabled(true);
7c673cae
FG
2987 RGWRados::Object::Write head_obj_op(&op_target);
2988
2989 head_obj_op.meta.set_mtime = set_mtime;
2990 head_obj_op.meta.mtime = mtime;
2991 head_obj_op.meta.owner = s->owner.get_id();
2992 head_obj_op.meta.delete_at = delete_at;
31f18b77 2993 head_obj_op.meta.zones_trace = zones_trace;
7c673cae
FG
2994
2995 int r = head_obj_op.write_meta(obj_len, accounted_size, attrs);
2996 if (r < 0)
2997 return r;
2998
2999 bufferlist bl;
3000 RGWUploadPartInfo info;
3001 string p = "part.";
3002 bool sorted_omap = is_v2_upload_id(upload_id);
3003
3004 if (sorted_omap) {
3005 string err;
3006 int part_num_int = strict_strtol(part_num.c_str(), 10, &err);
3007 if (!err.empty()) {
3008 dout(10) << "bad part number specified: " << part_num << dendl;
3009 return -EINVAL;
3010 }
3011 char buf[32];
3012 snprintf(buf, sizeof(buf), "%08d", part_num_int);
3013 p.append(buf);
3014 } else {
3015 p.append(part_num);
3016 }
3017 info.num = atoi(part_num.c_str());
3018 info.etag = etag;
3019 info.size = obj_len;
3020 info.accounted_size = accounted_size;
3021 info.modified = real_clock::now();
3022 info.manifest = manifest;
3023
3024 bool compressed;
3025 r = rgw_compression_info_from_attrset(attrs, compressed, info.cs_info);
3026 if (r < 0) {
3027 dout(1) << "cannot get compression info" << dendl;
3028 return r;
3029 }
3030
3031 ::encode(info, bl);
3032
3033 string multipart_meta_obj = mp.get_meta();
3034
3035 rgw_obj meta_obj;
3036 meta_obj.init_ns(bucket, multipart_meta_obj, mp_ns);
3037 meta_obj.set_in_extra_data(true);
3038
3039 rgw_raw_obj raw_meta_obj;
3040
3041 store->obj_to_raw(s->bucket_info.placement_rule, meta_obj, &raw_meta_obj);
3042
3043 r = store->omap_set(raw_meta_obj, p, bl);
3044
3045 return r;
3046}
3047
3048RGWPutObjProcessor *RGWPutObj::select_processor(RGWObjectCtx& obj_ctx, bool *is_multipart)
3049{
3050 RGWPutObjProcessor *processor;
3051
3052 bool multipart = s->info.args.exists("uploadId");
3053
3054 uint64_t part_size = s->cct->_conf->rgw_obj_stripe_size;
3055
3056 if (!multipart) {
3057 processor = new RGWPutObjProcessor_Atomic(obj_ctx, s->bucket_info, s->bucket, s->object.name, part_size, s->req_id, s->bucket_info.versioning_enabled());
3058 (static_cast<RGWPutObjProcessor_Atomic *>(processor))->set_olh_epoch(olh_epoch);
3059 (static_cast<RGWPutObjProcessor_Atomic *>(processor))->set_version_id(version_id);
3060 } else {
3061 processor = new RGWPutObjProcessor_Multipart(obj_ctx, s->bucket_info, part_size, s);
3062 }
3063
3064 if (is_multipart) {
3065 *is_multipart = multipart;
3066 }
3067
3068 return processor;
3069}
3070
3071void RGWPutObj::dispose_processor(RGWPutObjDataProcessor *processor)
3072{
3073 delete processor;
3074}
3075
3076void RGWPutObj::pre_exec()
3077{
3078 rgw_bucket_object_pre_exec(s);
3079}
3080
3081class RGWPutObj_CB : public RGWGetDataCB
3082{
3083 RGWPutObj *op;
3084public:
3085 RGWPutObj_CB(RGWPutObj *_op) : op(_op) {}
3086 ~RGWPutObj_CB() override {}
3087
3088 int handle_data(bufferlist& bl, off_t bl_ofs, off_t bl_len) override {
3089 return op->get_data_cb(bl, bl_ofs, bl_len);
3090 }
3091};
3092
3093int RGWPutObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
3094{
3095 bufferlist bl_tmp;
3096 bl.copy(bl_ofs, bl_len, bl_tmp);
3097
3098 bl_aux.append(bl_tmp);
3099
3100 return bl_len;
3101}
3102
3103int RGWPutObj::get_data(const off_t fst, const off_t lst, bufferlist& bl)
3104{
3105 RGWPutObj_CB cb(this);
3106 RGWGetDataCB* filter = &cb;
3107 boost::optional<RGWGetObj_Decompress> decompress;
3108 std::unique_ptr<RGWGetDataCB> decrypt;
3109 RGWCompressionInfo cs_info;
3110 map<string, bufferlist> attrs;
3111 map<string, bufferlist>::iterator attr_iter;
3112 int ret = 0;
3113
3114 uint64_t obj_size;
3115 int64_t new_ofs, new_end;
3116
3117 new_ofs = fst;
3118 new_end = lst;
3119
3120 rgw_obj_key obj_key(copy_source_object_name, copy_source_version_id);
3121 rgw_obj obj(copy_source_bucket_info.bucket, obj_key);
3122
3123 RGWRados::Object op_target(store, copy_source_bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
3124 RGWRados::Object::Read read_op(&op_target);
3125 read_op.params.obj_size = &obj_size;
3126 read_op.params.attrs = &attrs;
3127
3128 ret = read_op.prepare();
3129 if (ret < 0)
3130 return ret;
3131
3132 bool need_decompress;
3133 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
3134 if (op_ret < 0) {
3135 lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
3136 return -EIO;
3137 }
3138
3139 bool partial_content = true;
3140 if (need_decompress)
3141 {
3142 obj_size = cs_info.orig_size;
3143 decompress.emplace(s->cct, &cs_info, partial_content, filter);
3144 filter = &*decompress;
3145 }
3146
3147 attr_iter = attrs.find(RGW_ATTR_MANIFEST);
3148 op_ret = this->get_decrypt_filter(&decrypt,
3149 filter,
3150 attrs,
3151 attr_iter != attrs.end() ? &(attr_iter->second) : nullptr);
3152 if (decrypt != nullptr) {
3153 filter = decrypt.get();
3154 }
3155 if (op_ret < 0) {
3156 return ret;
3157 }
3158
3159 ret = read_op.range_to_ofs(obj_size, new_ofs, new_end);
3160 if (ret < 0)
3161 return ret;
3162
3163 filter->fixup_range(new_ofs, new_end);
3164 ret = read_op.iterate(new_ofs, new_end, filter);
3165
3166 if (ret >= 0)
3167 ret = filter->flush();
3168
3169 bl.claim_append(bl_aux);
3170
3171 return ret;
3172}
3173
3174// special handling for compression type = "random" with multipart uploads
3175static CompressorRef get_compressor_plugin(const req_state *s,
3176 const std::string& compression_type)
3177{
3178 if (compression_type != "random") {
3179 return Compressor::create(s->cct, compression_type);
3180 }
3181
3182 bool is_multipart{false};
3183 const auto& upload_id = s->info.args.get("uploadId", &is_multipart);
3184
3185 if (!is_multipart) {
3186 return Compressor::create(s->cct, compression_type);
3187 }
3188
3189 // use a hash of the multipart upload id so all parts use the same plugin
3190 const auto alg = std::hash<std::string>{}(upload_id) % Compressor::COMP_ALG_LAST;
3191 if (alg == Compressor::COMP_ALG_NONE) {
3192 return nullptr;
3193 }
3194 return Compressor::create(s->cct, alg);
3195}
3196
3197void RGWPutObj::execute()
3198{
3199 RGWPutObjProcessor *processor = NULL;
3200 RGWPutObjDataProcessor *filter = nullptr;
3201 std::unique_ptr<RGWPutObjDataProcessor> encrypt;
3202 char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1];
3203 char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3204 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3205 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
3206 MD5 hash;
3207 bufferlist bl, aclbl, bs;
3208 int len;
3209 map<string, string>::iterator iter;
3210 bool multipart;
3211
3212 off_t fst;
3213 off_t lst;
3214 const auto& compression_type = store->get_zone_params().get_compression_type(
3215 s->bucket_info.placement_rule);
3216 CompressorRef plugin;
3217 boost::optional<RGWPutObj_Compress> compressor;
3218
3219 bool need_calc_md5 = (dlo_manifest == NULL) && (slo_info == NULL);
3220 perfcounter->inc(l_rgw_put);
3221 op_ret = -EINVAL;
3222 if (s->object.empty()) {
3223 goto done;
3224 }
3225
3226 if (!s->bucket_exists) {
3227 op_ret = -ERR_NO_SUCH_BUCKET;
3228 return;
3229 }
3230
3231 op_ret = get_params();
3232 if (op_ret < 0) {
3233 ldout(s->cct, 20) << "get_params() returned ret=" << op_ret << dendl;
3234 goto done;
3235 }
3236
3237 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
3238 if (op_ret < 0) {
3239 ldout(s->cct, 20) << "get_system_versioning_params() returned ret="
3240 << op_ret << dendl;
3241 goto done;
3242 }
3243
3244 if (supplied_md5_b64) {
3245 need_calc_md5 = true;
3246
3247 ldout(s->cct, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
3248 op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1],
3249 supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64));
3250 ldout(s->cct, 15) << "ceph_armor ret=" << op_ret << dendl;
3251 if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) {
3252 op_ret = -ERR_INVALID_DIGEST;
3253 goto done;
3254 }
3255
3256 buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5);
3257 ldout(s->cct, 15) << "supplied_md5=" << supplied_md5 << dendl;
3258 }
3259
3260 if (!chunked_upload) { /* with chunked upload we don't know how big is the upload.
3261 we also check sizes at the end anyway */
3262 op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
3263 user_quota, bucket_quota, s->content_length);
3264 if (op_ret < 0) {
3265 ldout(s->cct, 20) << "check_quota() returned ret=" << op_ret << dendl;
3266 goto done;
3267 }
31f18b77
FG
3268 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3269 if (op_ret < 0) {
3270 ldout(s->cct, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl;
3271 goto done;
3272 }
7c673cae
FG
3273 }
3274
3275 if (supplied_etag) {
3276 strncpy(supplied_md5, supplied_etag, sizeof(supplied_md5) - 1);
3277 supplied_md5[sizeof(supplied_md5) - 1] = '\0';
3278 }
3279
3280 processor = select_processor(*static_cast<RGWObjectCtx *>(s->obj_ctx), &multipart);
3281
3282 // no filters by default
3283 filter = processor;
3284
3285 /* Handle object versioning of Swift API. */
3286 if (! multipart) {
3287 rgw_obj obj(s->bucket, s->object);
3288 op_ret = store->swift_versioning_copy(*static_cast<RGWObjectCtx *>(s->obj_ctx),
3289 s->bucket_owner.get_id(),
3290 s->bucket_info,
3291 obj);
3292 if (op_ret < 0) {
31f18b77 3293 goto done;
7c673cae
FG
3294 }
3295 }
3296
3297 op_ret = processor->prepare(store, NULL);
3298 if (op_ret < 0) {
3299 ldout(s->cct, 20) << "processor->prepare() returned ret=" << op_ret
3300 << dendl;
3301 goto done;
3302 }
3303
3304 fst = copy_source_range_fst;
3305 lst = copy_source_range_lst;
3306
3307 op_ret = get_encrypt_filter(&encrypt, filter);
3308 if (op_ret < 0) {
3309 goto done;
3310 }
3311 if (encrypt != nullptr) {
3312 filter = encrypt.get();
3313 } else {
3314 //no encryption, we can try compression
3315 if (compression_type != "none") {
3316 plugin = get_compressor_plugin(s, compression_type);
3317 if (!plugin) {
3318 ldout(s->cct, 1) << "Cannot load plugin for compression type "
3319 << compression_type << dendl;
3320 } else {
3321 compressor.emplace(s->cct, plugin, filter);
3322 filter = &*compressor;
3323 }
3324 }
3325 }
3326
3327 do {
31f18b77 3328 bufferlist data;
7c673cae
FG
3329 if (fst > lst)
3330 break;
3331 if (!copy_source) {
31f18b77 3332 len = get_data(data);
7c673cae
FG
3333 } else {
3334 uint64_t cur_lst = min(fst + s->cct->_conf->rgw_max_chunk_size - 1, lst);
31f18b77 3335 op_ret = get_data(fst, cur_lst, data);
7c673cae
FG
3336 if (op_ret < 0)
3337 goto done;
31f18b77 3338 len = data.length();
7c673cae
FG
3339 s->content_length += len;
3340 fst += len;
3341 }
3342 if (len < 0) {
3343 op_ret = len;
3344 goto done;
3345 }
3346
7c673cae
FG
3347 if (need_calc_md5) {
3348 hash.Update((const byte *)data.c_str(), data.length());
3349 }
3350
31f18b77
FG
3351 /* update torrrent */
3352 torrent.update(data);
7c673cae
FG
3353
3354 /* do we need this operation to be synchronous? if we're dealing with an object with immutable
3355 * head, e.g., multipart object we need to make sure we're the first one writing to this object
3356 */
3357 bool need_to_wait = (ofs == 0) && multipart;
3358
3359 bufferlist orig_data;
3360
3361 if (need_to_wait) {
3362 orig_data = data;
3363 }
3364
3365 op_ret = put_data_and_throttle(filter, data, ofs, need_to_wait);
3366 if (op_ret < 0) {
3367 if (!need_to_wait || op_ret != -EEXIST) {
3368 ldout(s->cct, 20) << "processor->thottle_data() returned ret="
3369 << op_ret << dendl;
3370 goto done;
3371 }
3372 /* need_to_wait == true and op_ret == -EEXIST */
3373 ldout(s->cct, 5) << "NOTICE: processor->throttle_data() returned -EEXIST, need to restart write" << dendl;
3374
3375 /* restore original data */
3376 data.swap(orig_data);
3377
3378 /* restart processing with different oid suffix */
3379
3380 dispose_processor(processor);
3381 processor = select_processor(*static_cast<RGWObjectCtx *>(s->obj_ctx), &multipart);
3382 filter = processor;
3383
3384 string oid_rand;
3385 char buf[33];
3386 gen_rand_alphanumeric(store->ctx(), buf, sizeof(buf) - 1);
3387 oid_rand.append(buf);
3388
3389 op_ret = processor->prepare(store, &oid_rand);
3390 if (op_ret < 0) {
3391 ldout(s->cct, 0) << "ERROR: processor->prepare() returned "
3392 << op_ret << dendl;
3393 goto done;
3394 }
3395
3396 op_ret = get_encrypt_filter(&encrypt, filter);
3397 if (op_ret < 0) {
3398 goto done;
3399 }
3400 if (encrypt != nullptr) {
3401 filter = encrypt.get();
3402 } else {
3403 if (compressor) {
3404 compressor.emplace(s->cct, plugin, filter);
3405 filter = &*compressor;
3406 }
3407 }
3408 op_ret = put_data_and_throttle(filter, data, ofs, false);
3409 if (op_ret < 0) {
3410 goto done;
3411 }
3412 }
3413
3414 ofs += len;
3415 } while (len > 0);
3416
3417 {
3418 bufferlist flush;
3419 op_ret = put_data_and_throttle(filter, flush, ofs, false);
3420 if (op_ret < 0) {
3421 goto done;
3422 }
3423 }
3424
31f18b77 3425 if (!chunked_upload && ofs != s->content_length) {
7c673cae
FG
3426 op_ret = -ERR_REQUEST_TIMEOUT;
3427 goto done;
3428 }
3429 s->obj_size = ofs;
3430
3431 perfcounter->inc(l_rgw_put_b, s->obj_size);
3432
31f18b77
FG
3433 op_ret = do_aws4_auth_completion();
3434 if (op_ret < 0) {
3435 goto done;
7c673cae 3436 }
31f18b77 3437
7c673cae
FG
3438 op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
3439 user_quota, bucket_quota, s->obj_size);
3440 if (op_ret < 0) {
3441 ldout(s->cct, 20) << "second check_quota() returned op_ret=" << op_ret << dendl;
3442 goto done;
3443 }
3444
31f18b77
FG
3445 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3446 if (op_ret < 0) {
3447 ldout(s->cct, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl;
3448 goto done;
3449 }
3450
7c673cae
FG
3451 hash.Final(m);
3452
3453 if (compressor && compressor->is_compressed()) {
3454 bufferlist tmp;
3455 RGWCompressionInfo cs_info;
3456 cs_info.compression_type = plugin->get_type_name();
3457 cs_info.orig_size = s->obj_size;
3458 cs_info.blocks = move(compressor->get_compression_blocks());
3459 ::encode(cs_info, tmp);
3460 attrs[RGW_ATTR_COMPRESSION] = tmp;
3461 ldout(s->cct, 20) << "storing " << RGW_ATTR_COMPRESSION
3462 << " with type=" << cs_info.compression_type
3463 << ", orig_size=" << cs_info.orig_size
3464 << ", blocks=" << cs_info.blocks.size() << dendl;
3465 }
3466
3467 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
3468
3469 etag = calc_md5;
3470
3471 if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) {
3472 op_ret = -ERR_BAD_DIGEST;
3473 goto done;
3474 }
3475
3476 policy.encode(aclbl);
3477 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
3478
3479 if (dlo_manifest) {
3480 op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
3481 if (op_ret < 0) {
3482 ldout(s->cct, 0) << "bad user manifest: " << dlo_manifest << dendl;
3483 goto done;
3484 }
3485 complete_etag(hash, &etag);
3486 ldout(s->cct, 10) << __func__ << ": calculated md5 for user manifest: " << etag << dendl;
3487 }
3488
3489 if (slo_info) {
3490 bufferlist manifest_bl;
3491 ::encode(*slo_info, manifest_bl);
3492 emplace_attr(RGW_ATTR_SLO_MANIFEST, std::move(manifest_bl));
3493
3494 hash.Update((byte *)slo_info->raw_data, slo_info->raw_data_len);
3495 complete_etag(hash, &etag);
3496 ldout(s->cct, 10) << __func__ << ": calculated md5 for user manifest: " << etag << dendl;
3497 }
3498
3499 if (supplied_etag && etag.compare(supplied_etag) != 0) {
3500 op_ret = -ERR_UNPROCESSABLE_ENTITY;
3501 goto done;
3502 }
3503 bl.append(etag.c_str(), etag.size() + 1);
3504 emplace_attr(RGW_ATTR_ETAG, std::move(bl));
3505
3506 populate_with_generic_attrs(s, attrs);
3507 rgw_get_request_metadata(s->cct, s->info, attrs);
3508 encode_delete_at_attr(delete_at, attrs);
224ce89b 3509 encode_obj_tags_attr(obj_tags.get(), attrs);
7c673cae
FG
3510
3511 /* Add a custom metadata to expose the information whether an object
3512 * is an SLO or not. Appending the attribute must be performed AFTER
3513 * processing any input from user in order to prohibit overwriting. */
3514 if (slo_info) {
3515 bufferlist slo_userindicator_bl;
31f18b77 3516 slo_userindicator_bl.append("True", 4);
7c673cae
FG
3517 emplace_attr(RGW_ATTR_SLO_UINDICATOR, std::move(slo_userindicator_bl));
3518 }
3519
3520 op_ret = processor->complete(s->obj_size, etag, &mtime, real_time(), attrs,
3521 (delete_at ? *delete_at : real_time()), if_match, if_nomatch,
3522 (user_data.empty() ? nullptr : &user_data));
3523
3524 /* produce torrent */
3525 if (s->cct->_conf->rgw_torrent_flag && (ofs == torrent.get_data_len()))
3526 {
3527 torrent.init(s, store);
3528 torrent.set_create_date(mtime);
31f18b77 3529 op_ret = torrent.complete();
7c673cae
FG
3530 if (0 != op_ret)
3531 {
3532 ldout(s->cct, 0) << "ERROR: torrent.handle_data() returned " << op_ret << dendl;
3533 goto done;
3534 }
3535 }
3536
3537done:
3538 dispose_processor(processor);
3539 perfcounter->tinc(l_rgw_put_lat,
3540 (ceph_clock_now() - s->time));
3541}
3542
3543int RGWPostObj::verify_permission()
3544{
3545 return 0;
3546}
3547/*
3548RGWPutObjProcessor *RGWPostObj::select_processor(RGWObjectCtx& obj_ctx)
3549{
3550 RGWPutObjProcessor *processor;
3551
3552 uint64_t part_size = s->cct->_conf->rgw_obj_stripe_size;
3553
3554 processor = new RGWPutObjProcessor_Atomic(obj_ctx, s->bucket_info, s->bucket, s->object.name, part_size, s->req_id, s->bucket_info.versioning_enabled());
3555
3556 return processor;
3557}
3558
3559void RGWPostObj::dispose_processor(RGWPutObjDataProcessor *processor)
3560{
3561 delete processor;
3562}
3563*/
3564void RGWPostObj::pre_exec()
3565{
3566 rgw_bucket_object_pre_exec(s);
3567}
3568
3569void RGWPostObj::execute()
3570{
3571 RGWPutObjDataProcessor *filter = nullptr;
3572 boost::optional<RGWPutObj_Compress> compressor;
3573 CompressorRef plugin;
224ce89b 3574 char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
7c673cae
FG
3575
3576 /* Read in the data from the POST form. */
3577 op_ret = get_params();
3578 if (op_ret < 0) {
3579 return;
3580 }
3581
3582 op_ret = verify_params();
3583 if (op_ret < 0) {
3584 return;
3585 }
3586
31f18b77
FG
3587 if (s->iam_policy) {
3588 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
3589 rgw::IAM::s3PutObject,
3590 rgw_obj(s->bucket, s->object));
3591 if (e == Effect::Deny) {
3592 op_ret = -EACCES;
3593 return;
3594 } else if (e == Effect::Pass && !verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
3595 op_ret = -EACCES;
3596 return;
3597 }
3598 } else if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
7c673cae
FG
3599 op_ret = -EACCES;
3600 return;
3601 }
3602
3603 /* Start iteration over data fields. It's necessary as Swift's FormPost
3604 * is capable to handle multiple files in single form. */
3605 do {
3606 std::unique_ptr<RGWPutObjDataProcessor> encrypt;
3607 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3608 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
3609 MD5 hash;
3610 ceph::buffer::list bl, aclbl;
3611 int len = 0;
3612
3613 op_ret = store->check_quota(s->bucket_owner.get_id(),
3614 s->bucket,
3615 user_quota,
3616 bucket_quota,
3617 s->content_length);
3618 if (op_ret < 0) {
3619 return;
3620 }
3621
31f18b77
FG
3622 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3623 if (op_ret < 0) {
3624 return;
3625 }
3626
224ce89b
WB
3627 if (supplied_md5_b64) {
3628 char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1];
3629 ldout(s->cct, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
3630 op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1],
3631 supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64));
3632 ldout(s->cct, 15) << "ceph_armor ret=" << op_ret << dendl;
3633 if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) {
3634 op_ret = -ERR_INVALID_DIGEST;
3635 return;
3636 }
3637
3638 buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5);
3639 ldout(s->cct, 15) << "supplied_md5=" << supplied_md5 << dendl;
3640 }
3641
7c673cae
FG
3642 RGWPutObjProcessor_Atomic processor(*static_cast<RGWObjectCtx *>(s->obj_ctx),
3643 s->bucket_info,
3644 s->bucket,
3645 get_current_filename(),
3646 /* part size */
3647 s->cct->_conf->rgw_obj_stripe_size,
3648 s->req_id,
3649 s->bucket_info.versioning_enabled());
3650 /* No filters by default. */
3651 filter = &processor;
3652
3653 op_ret = processor.prepare(store, nullptr);
3654 if (op_ret < 0) {
3655 return;
3656 }
3657
3658 op_ret = get_encrypt_filter(&encrypt, filter);
3659 if (op_ret < 0) {
3660 return;
3661 }
3662 if (encrypt != nullptr) {
3663 filter = encrypt.get();
3664 } else {
3665 const auto& compression_type = store->get_zone_params().get_compression_type(
3666 s->bucket_info.placement_rule);
3667 if (compression_type != "none") {
3668 plugin = Compressor::create(s->cct, compression_type);
3669 if (!plugin) {
3670 ldout(s->cct, 1) << "Cannot load plugin for compression type "
3671 << compression_type << dendl;
3672 } else {
3673 compressor.emplace(s->cct, plugin, filter);
3674 filter = &*compressor;
3675 }
3676 }
3677 }
3678
3679 bool again;
3680 do {
3681 ceph::bufferlist data;
3682 len = get_data(data, again);
3683
3684 if (len < 0) {
3685 op_ret = len;
3686 return;
3687 }
3688
3689 if (!len) {
3690 break;
3691 }
3692
3693 hash.Update((const byte *)data.c_str(), data.length());
3694 op_ret = put_data_and_throttle(filter, data, ofs, false);
3695
3696 ofs += len;
3697
3698 if (ofs > max_len) {
3699 op_ret = -ERR_TOO_LARGE;
3700 return;
3701 }
3702 } while (again);
3703
3704 {
3705 bufferlist flush;
3706 op_ret = put_data_and_throttle(filter, flush, ofs, false);
3707 }
3708
3709 if (len < min_len) {
3710 op_ret = -ERR_TOO_SMALL;
3711 return;
3712 }
3713
3714 s->obj_size = ofs;
3715
224ce89b
WB
3716 if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) {
3717 op_ret = -ERR_BAD_DIGEST;
3718 return;
3719 }
3720
7c673cae
FG
3721 op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
3722 user_quota, bucket_quota, s->obj_size);
3723 if (op_ret < 0) {
3724 return;
3725 }
3726
31f18b77
FG
3727 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3728 if (op_ret < 0) {
3729 return;
3730 }
3731
7c673cae
FG
3732 hash.Final(m);
3733 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
3734
3735 etag = calc_md5;
3736 bl.append(etag.c_str(), etag.size() + 1);
3737 emplace_attr(RGW_ATTR_ETAG, std::move(bl));
3738
3739 policy.encode(aclbl);
3740 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
3741
3742 const std::string content_type = get_current_content_type();
3743 if (! content_type.empty()) {
3744 ceph::bufferlist ct_bl;
3745 ct_bl.append(content_type.c_str(), content_type.size() + 1);
3746 emplace_attr(RGW_ATTR_CONTENT_TYPE, std::move(ct_bl));
3747 }
3748
3749 if (compressor && compressor->is_compressed()) {
3750 ceph::bufferlist tmp;
3751 RGWCompressionInfo cs_info;
3752 cs_info.compression_type = plugin->get_type_name();
3753 cs_info.orig_size = s->obj_size;
3754 cs_info.blocks = move(compressor->get_compression_blocks());
3755 ::encode(cs_info, tmp);
3756 emplace_attr(RGW_ATTR_COMPRESSION, std::move(tmp));
3757 }
3758
3759 op_ret = processor.complete(s->obj_size, etag, nullptr, real_time(),
3760 attrs, (delete_at ? *delete_at : real_time()));
3761 } while (is_next_file_to_upload());
3762}
3763
3764
3765void RGWPutMetadataAccount::filter_out_temp_url(map<string, bufferlist>& add_attrs,
3766 const set<string>& rmattr_names,
3767 map<int, string>& temp_url_keys)
3768{
3769 map<string, bufferlist>::iterator iter;
3770
3771 iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY1);
3772 if (iter != add_attrs.end()) {
3773 temp_url_keys[0] = iter->second.c_str();
3774 add_attrs.erase(iter);
3775 }
3776
3777 iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY2);
3778 if (iter != add_attrs.end()) {
3779 temp_url_keys[1] = iter->second.c_str();
3780 add_attrs.erase(iter);
3781 }
3782
3783 for (const string& name : rmattr_names) {
3784 if (name.compare(RGW_ATTR_TEMPURL_KEY1) == 0) {
3785 temp_url_keys[0] = string();
3786 }
3787 if (name.compare(RGW_ATTR_TEMPURL_KEY2) == 0) {
3788 temp_url_keys[1] = string();
3789 }
3790 }
3791}
3792
3793int RGWPutMetadataAccount::init_processing()
3794{
3795 /* First, go to the base class. At the time of writing the method was
3796 * responsible only for initializing the quota. This isn't necessary
3797 * here as we are touching metadata only. I'm putting this call only
3798 * for the future. */
3799 op_ret = RGWOp::init_processing();
3800 if (op_ret < 0) {
3801 return op_ret;
3802 }
3803
3804 op_ret = get_params();
3805 if (op_ret < 0) {
3806 return op_ret;
3807 }
3808
3809 op_ret = rgw_get_user_attrs_by_uid(store, s->user->user_id, orig_attrs,
3810 &acct_op_tracker);
3811 if (op_ret < 0) {
3812 return op_ret;
3813 }
3814
3815 if (has_policy) {
3816 bufferlist acl_bl;
3817 policy.encode(acl_bl);
3818 attrs.emplace(RGW_ATTR_ACL, std::move(acl_bl));
3819 }
3820
3821 rgw_get_request_metadata(s->cct, s->info, attrs, false);
3822 prepare_add_del_attrs(orig_attrs, rmattr_names, attrs);
3823 populate_with_generic_attrs(s, attrs);
3824
3825 /* Try extract the TempURL-related stuff now to allow verify_permission
3826 * evaluate whether we need FULL_CONTROL or not. */
3827 filter_out_temp_url(attrs, rmattr_names, temp_url_keys);
3828
3829 /* The same with quota except a client needs to be reseller admin. */
3830 op_ret = filter_out_quota_info(attrs, rmattr_names, new_quota,
3831 &new_quota_extracted);
3832 if (op_ret < 0) {
3833 return op_ret;
3834 }
3835
3836 return 0;
3837}
3838
3839int RGWPutMetadataAccount::verify_permission()
3840{
3841 if (s->auth.identity->is_anonymous()) {
3842 return -EACCES;
3843 }
3844
3845 if (!verify_user_permission(s, RGW_PERM_WRITE)) {
3846 return -EACCES;
3847 }
3848
3849 /* Altering TempURL keys requires FULL_CONTROL. */
3850 if (!temp_url_keys.empty() && s->perm_mask != RGW_PERM_FULL_CONTROL) {
3851 return -EPERM;
3852 }
3853
3854 /* We are failing this intensionally to allow system user/reseller admin
3855 * override in rgw_process.cc. This is the way to specify a given RGWOp
3856 * expect extra privileges. */
3857 if (new_quota_extracted) {
3858 return -EACCES;
3859 }
3860
3861 return 0;
3862}
3863
3864void RGWPutMetadataAccount::execute()
3865{
3866 /* Params have been extracted earlier. See init_processing(). */
3867 RGWUserInfo new_uinfo;
3868 op_ret = rgw_get_user_info_by_uid(store, s->user->user_id, new_uinfo,
3869 &acct_op_tracker);
3870 if (op_ret < 0) {
3871 return;
3872 }
3873
3874 /* Handle the TempURL-related stuff. */
3875 if (!temp_url_keys.empty()) {
3876 for (auto& pair : temp_url_keys) {
3877 new_uinfo.temp_url_keys[pair.first] = std::move(pair.second);
3878 }
3879 }
3880
3881 /* Handle the quota extracted at the verify_permission step. */
3882 if (new_quota_extracted) {
3883 new_uinfo.user_quota = std::move(new_quota);
3884 }
3885
3886 /* We are passing here the current (old) user info to allow the function
3887 * optimize-out some operations. */
3888 op_ret = rgw_store_user_info(store, new_uinfo, s->user,
3889 &acct_op_tracker, real_time(), false, &attrs);
3890}
3891
3892int RGWPutMetadataBucket::verify_permission()
3893{
31f18b77 3894 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
7c673cae
FG
3895 return -EACCES;
3896 }
3897
3898 return 0;
3899}
3900
3901void RGWPutMetadataBucket::pre_exec()
3902{
3903 rgw_bucket_object_pre_exec(s);
3904}
3905
3906void RGWPutMetadataBucket::execute()
3907{
3908 op_ret = get_params();
3909 if (op_ret < 0) {
3910 return;
3911 }
3912
3913 rgw_get_request_metadata(s->cct, s->info, attrs, false);
3914
3915 if (!placement_rule.empty() &&
3916 placement_rule != s->bucket_info.placement_rule) {
3917 op_ret = -EEXIST;
3918 return;
3919 }
3920
3921 /* Encode special metadata first as we're using std::map::emplace under
3922 * the hood. This method will add the new items only if the map doesn't
3923 * contain such keys yet. */
3924 if (has_policy) {
3925 if (s->dialect.compare("swift") == 0) {
3926 auto old_policy = static_cast<RGWAccessControlPolicy_SWIFT*>(s->bucket_acl);
3927 auto new_policy = static_cast<RGWAccessControlPolicy_SWIFT*>(&policy);
3928 new_policy->filter_merge(policy_rw_mask, old_policy);
3929 policy = *new_policy;
3930 }
3931 buffer::list bl;
3932 policy.encode(bl);
3933 emplace_attr(RGW_ATTR_ACL, std::move(bl));
3934 }
3935
3936 if (has_cors) {
3937 buffer::list bl;
3938 cors_config.encode(bl);
3939 emplace_attr(RGW_ATTR_CORS, std::move(bl));
3940 }
3941
3942 /* It's supposed that following functions WILL NOT change any special
3943 * attributes (like RGW_ATTR_ACL) if they are already present in attrs. */
3944 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
3945 populate_with_generic_attrs(s, attrs);
3946
3947 /* According to the Swift's behaviour and its container_quota WSGI middleware
3948 * implementation: anyone with write permissions is able to set the bucket
3949 * quota. This stays in contrast to account quotas that can be set only by
3950 * clients holding reseller admin privileges. */
3951 op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket_info.quota);
3952 if (op_ret < 0) {
3953 return;
3954 }
3955
3956 if (swift_ver_location) {
3957 s->bucket_info.swift_ver_location = *swift_ver_location;
3958 s->bucket_info.swift_versioning = (! swift_ver_location->empty());
3959 }
3960
3961 /* Web site of Swift API. */
3962 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
3963 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
3964
3965 /* Setting attributes also stores the provided bucket info. Due to this
3966 * fact, the new quota settings can be serialized with the same call. */
3967 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
3968 &s->bucket_info.objv_tracker);
3969}
3970
3971int RGWPutMetadataObject::verify_permission()
3972{
31f18b77
FG
3973 // This looks to be something specific to Swift. We could add
3974 // operations like swift:PutMetadataObject to the Policy Engine.
3975 if (!verify_object_permission_no_policy(s, RGW_PERM_WRITE)) {
7c673cae
FG
3976 return -EACCES;
3977 }
3978
3979 return 0;
3980}
3981
3982void RGWPutMetadataObject::pre_exec()
3983{
3984 rgw_bucket_object_pre_exec(s);
3985}
3986
3987void RGWPutMetadataObject::execute()
3988{
3989 rgw_obj obj(s->bucket, s->object);
3990 map<string, bufferlist> attrs, orig_attrs, rmattrs;
3991
3992 store->set_atomic(s->obj_ctx, obj);
3993
3994 op_ret = get_params();
3995 if (op_ret < 0) {
3996 return;
3997 }
3998
3999 rgw_get_request_metadata(s->cct, s->info, attrs);
4000 /* check if obj exists, read orig attrs */
4001 op_ret = get_obj_attrs(store, s, obj, orig_attrs);
4002 if (op_ret < 0) {
4003 return;
4004 }
4005
4006 /* Check whether the object has expired. Swift API documentation
4007 * stands that we should return 404 Not Found in such case. */
4008 if (need_object_expiration() && object_is_expired(orig_attrs)) {
4009 op_ret = -ENOENT;
4010 return;
4011 }
4012
4013 /* Filter currently existing attributes. */
4014 prepare_add_del_attrs(orig_attrs, attrs, rmattrs);
4015 populate_with_generic_attrs(s, attrs);
4016 encode_delete_at_attr(delete_at, attrs);
4017
4018 if (dlo_manifest) {
4019 op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
4020 if (op_ret < 0) {
4021 ldout(s->cct, 0) << "bad user manifest: " << dlo_manifest << dendl;
4022 return;
4023 }
4024 }
4025
4026 op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, &rmattrs);
4027}
4028
4029int RGWDeleteObj::handle_slo_manifest(bufferlist& bl)
4030{
4031 RGWSLOInfo slo_info;
4032 bufferlist::iterator bliter = bl.begin();
4033 try {
4034 ::decode(slo_info, bliter);
4035 } catch (buffer::error& err) {
4036 ldout(s->cct, 0) << "ERROR: failed to decode slo manifest" << dendl;
4037 return -EIO;
4038 }
4039
4040 try {
4041 deleter = std::unique_ptr<RGWBulkDelete::Deleter>(\
4042 new RGWBulkDelete::Deleter(store, s));
4043 } catch (std::bad_alloc) {
4044 return -ENOMEM;
4045 }
4046
4047 list<RGWBulkDelete::acct_path_t> items;
4048 for (const auto& iter : slo_info.entries) {
4049 const string& path_str = iter.path;
4050
4051 const size_t sep_pos = path_str.find('/', 1 /* skip first slash */);
31f18b77 4052 if (boost::string_view::npos == sep_pos) {
7c673cae
FG
4053 return -EINVAL;
4054 }
4055
4056 RGWBulkDelete::acct_path_t path;
4057
31f18b77
FG
4058 path.bucket_name = url_decode(path_str.substr(1, sep_pos - 1));
4059 path.obj_key = url_decode(path_str.substr(sep_pos + 1));
7c673cae
FG
4060
4061 items.push_back(path);
4062 }
4063
4064 /* Request removal of the manifest object itself. */
4065 RGWBulkDelete::acct_path_t path;
4066 path.bucket_name = s->bucket_name;
4067 path.obj_key = s->object;
4068 items.push_back(path);
4069
4070 int ret = deleter->delete_chunk(items);
4071 if (ret < 0) {
4072 return ret;
4073 }
4074
4075 return 0;
4076}
4077
4078int RGWDeleteObj::verify_permission()
4079{
31f18b77
FG
4080 if (s->iam_policy) {
4081 auto r = s->iam_policy->eval(s->env, *s->auth.identity,
4082 s->object.instance.empty() ?
4083 rgw::IAM::s3DeleteObject :
4084 rgw::IAM::s3DeleteObjectVersion,
4085 ARN(s->bucket, s->object.name));
4086 if (r == Effect::Allow)
4087 return true;
4088 else if (r == Effect::Deny)
4089 return false;
4090 }
4091
4092 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
7c673cae
FG
4093 return -EACCES;
4094 }
4095
4096 return 0;
4097}
4098
4099void RGWDeleteObj::pre_exec()
4100{
4101 rgw_bucket_object_pre_exec(s);
4102}
4103
4104void RGWDeleteObj::execute()
4105{
4106 if (!s->bucket_exists) {
4107 op_ret = -ERR_NO_SUCH_BUCKET;
4108 return;
4109 }
4110
4111 op_ret = get_params();
4112 if (op_ret < 0) {
4113 return;
4114 }
4115
4116 rgw_obj obj(s->bucket, s->object);
4117 map<string, bufferlist> attrs;
4118
4119
4120 if (!s->object.empty()) {
4121 if (need_object_expiration() || multipart_delete) {
4122 /* check if obj exists, read orig attrs */
4123 op_ret = get_obj_attrs(store, s, obj, attrs);
4124 if (op_ret < 0) {
4125 return;
4126 }
4127 }
4128
4129 if (multipart_delete) {
4130 const auto slo_attr = attrs.find(RGW_ATTR_SLO_MANIFEST);
4131
4132 if (slo_attr != attrs.end()) {
4133 op_ret = handle_slo_manifest(slo_attr->second);
4134 if (op_ret < 0) {
4135 ldout(s->cct, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret << dendl;
4136 }
4137 } else {
4138 op_ret = -ERR_NOT_SLO_MANIFEST;
4139 }
4140
4141 return;
4142 }
4143
4144 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
4145 obj_ctx->obj.set_atomic(obj);
4146
4147 bool ver_restored = false;
4148 op_ret = store->swift_versioning_restore(*obj_ctx, s->bucket_owner.get_id(),
4149 s->bucket_info, obj, ver_restored);
4150 if (op_ret < 0) {
4151 return;
4152 }
4153
4154 if (!ver_restored) {
4155 /* Swift's versioning mechanism hasn't found any previous version of
4156 * the object that could be restored. This means we should proceed
4157 * with the regular delete path. */
4158 RGWRados::Object del_target(store, s->bucket_info, *obj_ctx, obj);
4159 RGWRados::Object::Delete del_op(&del_target);
4160
4161 op_ret = get_system_versioning_params(s, &del_op.params.olh_epoch,
4162 &del_op.params.marker_version_id);
4163 if (op_ret < 0) {
4164 return;
4165 }
4166
4167 del_op.params.bucket_owner = s->bucket_owner.get_id();
4168 del_op.params.versioning_status = s->bucket_info.versioning_status();
4169 del_op.params.obj_owner = s->owner;
4170 del_op.params.unmod_since = unmod_since;
4171 del_op.params.high_precision_time = s->system_request; /* system request uses high precision time */
4172
4173 op_ret = del_op.delete_obj();
4174 if (op_ret >= 0) {
4175 delete_marker = del_op.result.delete_marker;
4176 version_id = del_op.result.version_id;
4177 }
4178
4179 /* Check whether the object has expired. Swift API documentation
4180 * stands that we should return 404 Not Found in such case. */
4181 if (need_object_expiration() && object_is_expired(attrs)) {
4182 op_ret = -ENOENT;
4183 return;
4184 }
4185 }
4186
4187 if (op_ret == -ERR_PRECONDITION_FAILED && no_precondition_error) {
4188 op_ret = 0;
4189 }
4190 } else {
4191 op_ret = -EINVAL;
4192 }
4193}
4194
4195
4196bool RGWCopyObj::parse_copy_location(const string& url_src, string& bucket_name, rgw_obj_key& key)
4197{
4198 string name_str;
4199 string params_str;
4200
4201 size_t pos = url_src.find('?');
4202 if (pos == string::npos) {
4203 name_str = url_src;
4204 } else {
4205 name_str = url_src.substr(0, pos);
4206 params_str = url_src.substr(pos + 1);
4207 }
4208
31f18b77 4209 std::string dec_src = url_decode(name_str);
7c673cae
FG
4210 const char *src = dec_src.c_str();
4211
4212 if (*src == '/') ++src;
4213
4214 string str(src);
4215
4216 pos = str.find('/');
4217 if (pos ==string::npos)
4218 return false;
4219
4220 bucket_name = str.substr(0, pos);
4221 key.name = str.substr(pos + 1);
4222
4223 if (key.name.empty()) {
4224 return false;
4225 }
4226
4227 if (!params_str.empty()) {
4228 RGWHTTPArgs args;
4229 args.set(params_str);
4230 args.parse();
4231
4232 key.instance = args.get("versionId", NULL);
4233 }
4234
4235 return true;
4236}
4237
4238int RGWCopyObj::verify_permission()
4239{
31f18b77
FG
4240 RGWAccessControlPolicy src_acl(s->cct);
4241 optional<Policy> src_policy;
7c673cae
FG
4242 op_ret = get_params();
4243 if (op_ret < 0)
4244 return op_ret;
4245
4246 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
4247 if (op_ret < 0) {
4248 return op_ret;
4249 }
4250 map<string, bufferlist> src_attrs;
4251
4252 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
4253
4254 if (s->bucket_instance_id.empty()) {
4255 op_ret = store->get_bucket_info(obj_ctx, src_tenant_name, src_bucket_name, src_bucket_info, NULL, &src_attrs);
4256 } else {
4257 /* will only happen in intra region sync where the source and dest bucket is the same */
4258 op_ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, src_bucket_info, NULL, &src_attrs);
4259 }
4260 if (op_ret < 0) {
4261 if (op_ret == -ENOENT) {
4262 op_ret = -ERR_NO_SUCH_BUCKET;
4263 }
4264 return op_ret;
4265 }
4266
4267 src_bucket = src_bucket_info.bucket;
4268
4269 /* get buckets info (source and dest) */
4270 if (s->local_source && source_zone.empty()) {
4271 rgw_obj src_obj(src_bucket, src_object);
4272 store->set_atomic(s->obj_ctx, src_obj);
4273 store->set_prefetch_data(s->obj_ctx, src_obj);
4274
4275 /* check source object permissions */
31f18b77
FG
4276 op_ret = read_obj_policy(store, s, src_bucket_info, src_attrs, &src_acl,
4277 src_policy, src_bucket, src_object);
7c673cae
FG
4278 if (op_ret < 0) {
4279 return op_ret;
4280 }
4281
4282 /* admin request overrides permission checks */
31f18b77
FG
4283 if (!s->auth.identity->is_admin_of(src_acl.get_owner().get_id())) {
4284 if (src_policy) {
4285 auto e = src_policy->eval(s->env, *s->auth.identity,
4286 src_object.instance.empty() ?
4287 rgw::IAM::s3GetObject :
4288 rgw::IAM::s3GetObjectVersion,
4289 ARN(src_obj));
4290 if (e == Effect::Deny) {
4291 return -EACCES;
4292 } else if (e == Effect::Pass &&
4293 !src_acl.verify_permission(*s->auth.identity, s->perm_mask,
4294 RGW_PERM_READ)) {
4295 return -EACCES;
4296 }
4297 } else if (!src_acl.verify_permission(*s->auth.identity,
4298 s->perm_mask,
4299 RGW_PERM_READ)) {
4300 return -EACCES;
4301 }
7c673cae
FG
4302 }
4303 }
4304
4305 RGWAccessControlPolicy dest_bucket_policy(s->cct);
4306 map<string, bufferlist> dest_attrs;
4307
4308 if (src_bucket_name.compare(dest_bucket_name) == 0) { /* will only happen if s->local_source
4309 or intra region sync */
4310 dest_bucket_info = src_bucket_info;
4311 dest_attrs = src_attrs;
4312 } else {
4313 op_ret = store->get_bucket_info(obj_ctx, dest_tenant_name, dest_bucket_name,
4314 dest_bucket_info, nullptr, &dest_attrs);
4315 if (op_ret < 0) {
4316 if (op_ret == -ENOENT) {
4317 op_ret = -ERR_NO_SUCH_BUCKET;
4318 }
4319 return op_ret;
4320 }
4321 }
4322
4323 dest_bucket = dest_bucket_info.bucket;
4324
4325 rgw_obj dest_obj(dest_bucket, dest_object);
4326 store->set_atomic(s->obj_ctx, dest_obj);
4327
4328 /* check dest bucket permissions */
4329 op_ret = read_bucket_policy(store, s, dest_bucket_info, dest_attrs,
4330 &dest_bucket_policy, dest_bucket);
4331 if (op_ret < 0) {
4332 return op_ret;
4333 }
4334
4335 /* admin request overrides permission checks */
4336 if (! s->auth.identity->is_admin_of(dest_policy.get_owner().get_id()) &&
4337 ! dest_bucket_policy.verify_permission(*s->auth.identity, s->perm_mask,
4338 RGW_PERM_WRITE)) {
4339 return -EACCES;
4340 }
4341
4342 op_ret = init_dest_policy();
4343 if (op_ret < 0) {
4344 return op_ret;
4345 }
4346
4347 return 0;
4348}
4349
4350
4351int RGWCopyObj::init_common()
4352{
4353 if (if_mod) {
4354 if (parse_time(if_mod, &mod_time) < 0) {
4355 op_ret = -EINVAL;
4356 return op_ret;
4357 }
4358 mod_ptr = &mod_time;
4359 }
4360
4361 if (if_unmod) {
4362 if (parse_time(if_unmod, &unmod_time) < 0) {
4363 op_ret = -EINVAL;
4364 return op_ret;
4365 }
4366 unmod_ptr = &unmod_time;
4367 }
4368
4369 bufferlist aclbl;
4370 dest_policy.encode(aclbl);
4371 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
4372
4373 rgw_get_request_metadata(s->cct, s->info, attrs);
4374 populate_with_generic_attrs(s, attrs);
4375
4376 return 0;
4377}
4378
4379static void copy_obj_progress_cb(off_t ofs, void *param)
4380{
4381 RGWCopyObj *op = static_cast<RGWCopyObj *>(param);
4382 op->progress_cb(ofs);
4383}
4384
4385void RGWCopyObj::progress_cb(off_t ofs)
4386{
4387 if (!s->cct->_conf->rgw_copy_obj_progress)
4388 return;
4389
4390 if (ofs - last_ofs < s->cct->_conf->rgw_copy_obj_progress_every_bytes)
4391 return;
4392
4393 send_partial_response(ofs);
4394
4395 last_ofs = ofs;
4396}
4397
4398void RGWCopyObj::pre_exec()
4399{
4400 rgw_bucket_object_pre_exec(s);
4401}
4402
4403void RGWCopyObj::execute()
4404{
4405 if (init_common() < 0)
4406 return;
4407
4408 rgw_obj src_obj(src_bucket, src_object);
4409 rgw_obj dst_obj(dest_bucket, dest_object);
4410
4411 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
4412 obj_ctx.obj.set_atomic(src_obj);
4413 obj_ctx.obj.set_atomic(dst_obj);
4414
4415 encode_delete_at_attr(delete_at, attrs);
4416
4417 bool high_precision_time = (s->system_request);
4418
4419 /* Handle object versioning of Swift API. In case of copying to remote this
4420 * should fail gently (op_ret == 0) as the dst_obj will not exist here. */
4421 op_ret = store->swift_versioning_copy(obj_ctx,
4422 dest_bucket_info.owner,
4423 dest_bucket_info,
4424 dst_obj);
4425 if (op_ret < 0) {
4426 return;
4427 }
4428
4429 op_ret = store->copy_obj(obj_ctx,
4430 s->user->user_id,
4431 client_id,
4432 op_id,
4433 &s->info,
4434 source_zone,
4435 dst_obj,
4436 src_obj,
4437 dest_bucket_info,
4438 src_bucket_info,
4439 &src_mtime,
4440 &mtime,
4441 mod_ptr,
4442 unmod_ptr,
4443 high_precision_time,
4444 if_match,
4445 if_nomatch,
4446 attrs_mod,
4447 copy_if_newer,
4448 attrs, RGW_OBJ_CATEGORY_MAIN,
4449 olh_epoch,
4450 (delete_at ? *delete_at : real_time()),
4451 (version_id.empty() ? NULL : &version_id),
4452 &s->req_id, /* use req_id as tag */
4453 &etag,
7c673cae
FG
4454 copy_obj_progress_cb, (void *)this
4455 );
4456}
4457
4458int RGWGetACLs::verify_permission()
4459{
4460 bool perm;
4461 if (!s->object.empty()) {
31f18b77
FG
4462 perm = verify_object_permission(s,
4463 s->object.instance.empty() ?
4464 rgw::IAM::s3GetObjectAcl :
4465 rgw::IAM::s3GetObjectVersionAcl);
7c673cae 4466 } else {
31f18b77 4467 perm = verify_bucket_permission(s, rgw::IAM::s3GetObjectAcl);
7c673cae
FG
4468 }
4469 if (!perm)
4470 return -EACCES;
4471
4472 return 0;
4473}
4474
4475void RGWGetACLs::pre_exec()
4476{
4477 rgw_bucket_object_pre_exec(s);
4478}
4479
4480void RGWGetACLs::execute()
4481{
4482 stringstream ss;
4483 RGWAccessControlPolicy *acl = (!s->object.empty() ? s->object_acl : s->bucket_acl);
4484 RGWAccessControlPolicy_S3 *s3policy = static_cast<RGWAccessControlPolicy_S3 *>(acl);
4485 s3policy->to_xml(ss);
4486 acls = ss.str();
4487}
4488
4489
4490
4491int RGWPutACLs::verify_permission()
4492{
4493 bool perm;
4494 if (!s->object.empty()) {
31f18b77
FG
4495 perm = verify_object_permission(s,
4496 s->object.instance.empty() ?
4497 rgw::IAM::s3PutObjectAcl :
4498 rgw::IAM::s3PutObjectVersionAcl);
7c673cae 4499 } else {
31f18b77 4500 perm = verify_bucket_permission(s, rgw::IAM::s3PutBucketAcl);
7c673cae
FG
4501 }
4502 if (!perm)
4503 return -EACCES;
4504
4505 return 0;
4506}
4507
4508int RGWGetLC::verify_permission()
4509{
4510 bool perm;
31f18b77 4511 perm = verify_bucket_permission(s, rgw::IAM::s3GetLifecycleConfiguration);
7c673cae
FG
4512 if (!perm)
4513 return -EACCES;
4514
4515 return 0;
4516}
4517
4518int RGWPutLC::verify_permission()
4519{
4520 bool perm;
31f18b77 4521 perm = verify_bucket_permission(s, rgw::IAM::s3PutLifecycleConfiguration);
7c673cae
FG
4522 if (!perm)
4523 return -EACCES;
4524
4525 return 0;
4526}
4527
4528int RGWDeleteLC::verify_permission()
4529{
4530 bool perm;
31f18b77 4531 perm = verify_bucket_permission(s, rgw::IAM::s3PutLifecycleConfiguration);
7c673cae
FG
4532 if (!perm)
4533 return -EACCES;
4534
4535 return 0;
4536}
4537
4538void RGWPutACLs::pre_exec()
4539{
4540 rgw_bucket_object_pre_exec(s);
4541}
4542
4543void RGWGetLC::pre_exec()
4544{
4545 rgw_bucket_object_pre_exec(s);
4546}
4547
4548void RGWPutLC::pre_exec()
4549{
4550 rgw_bucket_object_pre_exec(s);
4551}
4552
4553void RGWDeleteLC::pre_exec()
4554{
4555 rgw_bucket_object_pre_exec(s);
4556}
4557
4558void RGWPutACLs::execute()
4559{
4560 bufferlist bl;
4561
4562 RGWAccessControlPolicy_S3 *policy = NULL;
4563 RGWACLXMLParser_S3 parser(s->cct);
4564 RGWAccessControlPolicy_S3 new_policy(s->cct);
4565 stringstream ss;
4566 char *new_data = NULL;
4567 rgw_obj obj;
4568
4569 op_ret = 0; /* XXX redundant? */
4570
4571 if (!parser.init()) {
4572 op_ret = -EINVAL;
4573 return;
4574 }
4575
4576
4577 RGWAccessControlPolicy *existing_policy = (s->object.empty() ? s->bucket_acl : s->object_acl);
4578
4579 owner = existing_policy->get_owner();
4580
4581 op_ret = get_params();
4582 if (op_ret < 0)
4583 return;
4584
4585 ldout(s->cct, 15) << "read len=" << len << " data=" << (data ? data : "") << dendl;
4586
4587 if (!s->canned_acl.empty() && len) {
4588 op_ret = -EINVAL;
4589 return;
4590 }
4591
4592 if (!s->canned_acl.empty() || s->has_acl_header) {
4593 op_ret = get_policy_from_state(store, s, ss);
4594 if (op_ret < 0)
4595 return;
4596
4597 new_data = strdup(ss.str().c_str());
4598 free(data);
4599 data = new_data;
4600 len = ss.str().size();
4601 }
4602
4603 if (!parser.parse(data, len, 1)) {
4604 op_ret = -EINVAL;
4605 return;
4606 }
4607 policy = static_cast<RGWAccessControlPolicy_S3 *>(parser.find_first("AccessControlPolicy"));
4608 if (!policy) {
4609 op_ret = -EINVAL;
4610 return;
4611 }
4612
4613 // forward bucket acl requests to meta master zone
4614 if (s->object.empty() && !store->is_meta_master()) {
4615 bufferlist in_data;
4616 // include acl data unless it was generated from a canned_acl
4617 if (s->canned_acl.empty()) {
4618 in_data.append(data, len);
4619 }
4620 op_ret = forward_request_to_master(s, NULL, store, in_data, NULL);
4621 if (op_ret < 0) {
4622 ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
4623 return;
4624 }
4625 }
4626
4627 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
4628 ldout(s->cct, 15) << "Old AccessControlPolicy";
4629 policy->to_xml(*_dout);
4630 *_dout << dendl;
4631 }
4632
4633 op_ret = policy->rebuild(store, &owner, new_policy);
4634 if (op_ret < 0)
4635 return;
4636
4637 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
4638 ldout(s->cct, 15) << "New AccessControlPolicy:";
4639 new_policy.to_xml(*_dout);
4640 *_dout << dendl;
4641 }
4642
4643 new_policy.encode(bl);
4644 map<string, bufferlist> attrs;
4645
4646 if (!s->object.empty()) {
4647 obj = rgw_obj(s->bucket, s->object);
4648 store->set_atomic(s->obj_ctx, obj);
4649 //if instance is empty, we should modify the latest object
4650 op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_ACL, bl);
4651 } else {
4652 attrs = s->bucket_attrs;
4653 attrs[RGW_ATTR_ACL] = bl;
4654 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
4655 }
4656 if (op_ret == -ECANCELED) {
4657 op_ret = 0; /* lost a race, but it's ok because acls are immutable */
4658 }
4659}
4660
4661static void get_lc_oid(struct req_state *s, string& oid)
4662{
4663 string shard_id = s->bucket.name + ':' +s->bucket.bucket_id;
4664 int max_objs = (s->cct->_conf->rgw_lc_max_objs > HASH_PRIME)?HASH_PRIME:s->cct->_conf->rgw_lc_max_objs;
4665 int index = ceph_str_hash_linux(shard_id.c_str(), shard_id.size()) % HASH_PRIME % max_objs;
4666 oid = lc_oid_prefix;
4667 char buf[32];
4668 snprintf(buf, 32, ".%d", index);
4669 oid.append(buf);
4670 return;
4671}
4672
4673void RGWPutLC::execute()
4674{
4675 bufferlist bl;
4676
4677 RGWLifecycleConfiguration_S3 *config = NULL;
4678 RGWLCXMLParser_S3 parser(s->cct);
4679 RGWLifecycleConfiguration_S3 new_config(s->cct);
4680
4681 if (!parser.init()) {
4682 op_ret = -EINVAL;
4683 return;
4684 }
4685
4686 op_ret = get_params();
4687 if (op_ret < 0)
4688 return;
4689
4690 ldout(s->cct, 15) << "read len=" << len << " data=" << (data ? data : "") << dendl;
4691
4692 if (!parser.parse(data, len, 1)) {
4693 op_ret = -ERR_MALFORMED_XML;
4694 return;
4695 }
4696 config = static_cast<RGWLifecycleConfiguration_S3 *>(parser.find_first("LifecycleConfiguration"));
4697 if (!config) {
4698 op_ret = -ERR_MALFORMED_XML;
4699 return;
4700 }
4701
4702 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
4703 ldout(s->cct, 15) << "Old LifecycleConfiguration:";
4704 config->to_xml(*_dout);
4705 *_dout << dendl;
4706 }
4707
4708 op_ret = config->rebuild(store, new_config);
4709 if (op_ret < 0)
4710 return;
4711
4712 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
4713 ldout(s->cct, 15) << "New LifecycleConfiguration:";
4714 new_config.to_xml(*_dout);
4715 *_dout << dendl;
4716 }
4717
4718 new_config.encode(bl);
4719 map<string, bufferlist> attrs;
4720 attrs = s->bucket_attrs;
4721 attrs[RGW_ATTR_LC] = bl;
4722 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
4723 if (op_ret < 0)
4724 return;
4725 string shard_id = s->bucket.tenant + ':' + s->bucket.name + ':' + s->bucket.bucket_id;
4726 string oid;
4727 get_lc_oid(s, oid);
4728 pair<string, int> entry(shard_id, lc_uninitial);
4729 int max_lock_secs = s->cct->_conf->rgw_lc_lock_max_time;
4730 rados::cls::lock::Lock l(lc_index_lock_name);
4731 utime_t time(max_lock_secs, 0);
4732 l.set_duration(time);
4733 l.set_cookie(cookie);
4734 librados::IoCtx *ctx = store->get_lc_pool_ctx();
4735 do {
4736 op_ret = l.lock_exclusive(ctx, oid);
4737 if (op_ret == -EBUSY) {
4738 dout(0) << "RGWLC::RGWPutLC() failed to acquire lock on, sleep 5, try again" << oid << dendl;
4739 sleep(5);
4740 continue;
4741 }
4742 if (op_ret < 0) {
4743 dout(0) << "RGWLC::RGWPutLC() failed to acquire lock " << oid << op_ret << dendl;
4744 break;
4745 }
4746 op_ret = cls_rgw_lc_set_entry(*ctx, oid, entry);
4747 if (op_ret < 0) {
4748 dout(0) << "RGWLC::RGWPutLC() failed to set entry " << oid << op_ret << dendl;
4749 }
4750 break;
4751 }while(1);
4752 l.unlock(ctx, oid);
4753 return;
4754}
4755
4756void RGWDeleteLC::execute()
4757{
4758 bufferlist bl;
4759 map<string, bufferlist> orig_attrs, attrs;
4760 map<string, bufferlist>::iterator iter;
4761 rgw_raw_obj obj;
4762 store->get_bucket_instance_obj(s->bucket, obj);
4763 store->set_prefetch_data(s->obj_ctx, obj);
4764 op_ret = get_system_obj_attrs(store, s, obj, orig_attrs, NULL, &s->bucket_info.objv_tracker);
4765 if (op_ret < 0)
4766 return;
4767
4768 for (iter = orig_attrs.begin(); iter != orig_attrs.end(); ++iter) {
4769 const string& name = iter->first;
4770 dout(10) << "DeleteLC : attr: " << name << dendl;
4771 if (name.compare(0, (sizeof(RGW_ATTR_LC) - 1), RGW_ATTR_LC) != 0) {
4772 if (attrs.find(name) == attrs.end()) {
4773 attrs[name] = iter->second;
4774 }
4775 }
4776 }
4777 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
4778 string shard_id = s->bucket.name + ':' +s->bucket.bucket_id;
4779 pair<string, int> entry(shard_id, lc_uninitial);
4780 string oid;
4781 get_lc_oid(s, oid);
4782 int max_lock_secs = s->cct->_conf->rgw_lc_lock_max_time;
4783 librados::IoCtx *ctx = store->get_lc_pool_ctx();
4784 rados::cls::lock::Lock l(lc_index_lock_name);
4785 utime_t time(max_lock_secs, 0);
4786 l.set_duration(time);
4787 do {
4788 op_ret = l.lock_exclusive(ctx, oid);
4789 if (op_ret == -EBUSY) {
4790 dout(0) << "RGWLC::RGWDeleteLC() failed to acquire lock on, sleep 5, try again" << oid << dendl;
4791 sleep(5);
4792 continue;
4793 }
4794 if (op_ret < 0) {
4795 dout(0) << "RGWLC::RGWDeleteLC() failed to acquire lock " << oid << op_ret << dendl;
4796 break;
4797 }
4798 op_ret = cls_rgw_lc_rm_entry(*ctx, oid, entry);
4799 if (op_ret < 0) {
4800 dout(0) << "RGWLC::RGWDeleteLC() failed to set entry " << oid << op_ret << dendl;
4801 }
4802 break;
4803 }while(1);
4804 l.unlock(ctx, oid);
4805 return;
4806}
4807
4808int RGWGetCORS::verify_permission()
4809{
4810 if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
4811 return -EACCES;
4812 }
4813
4814 return 0;
4815}
4816
4817void RGWGetCORS::execute()
4818{
4819 op_ret = read_bucket_cors();
4820 if (op_ret < 0)
4821 return ;
4822
4823 if (!cors_exist) {
4824 dout(2) << "No CORS configuration set yet for this bucket" << dendl;
4825 op_ret = -ENOENT;
4826 return;
4827 }
4828}
4829
4830int RGWPutCORS::verify_permission()
4831{
4832 if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
4833 return -EACCES;
4834 }
4835
4836 return 0;
4837}
4838
4839void RGWPutCORS::execute()
4840{
4841 rgw_raw_obj obj;
4842
4843 op_ret = get_params();
4844 if (op_ret < 0)
4845 return;
4846
31f18b77
FG
4847 if (!store->is_meta_master()) {
4848 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
4849 if (op_ret < 0) {
4850 ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
4851 return;
4852 }
4853 }
4854
7c673cae
FG
4855 map<string, bufferlist> attrs = s->bucket_attrs;
4856 attrs[RGW_ATTR_CORS] = cors_bl;
4857 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
4858}
4859
4860int RGWDeleteCORS::verify_permission()
4861{
4862 if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
4863 return -EACCES;
4864 }
4865
4866 return 0;
4867}
4868
4869void RGWDeleteCORS::execute()
4870{
4871 op_ret = read_bucket_cors();
4872 if (op_ret < 0)
4873 return;
4874
4875 bufferlist bl;
4876 rgw_raw_obj obj;
4877 if (!cors_exist) {
4878 dout(2) << "No CORS configuration set yet for this bucket" << dendl;
4879 op_ret = -ENOENT;
4880 return;
4881 }
4882 store->get_bucket_instance_obj(s->bucket, obj);
4883 store->set_prefetch_data(s->obj_ctx, obj);
4884 map<string, bufferlist> orig_attrs, attrs, rmattrs;
4885 map<string, bufferlist>::iterator iter;
4886
4887 op_ret = get_system_obj_attrs(store, s, obj, orig_attrs, NULL, &s->bucket_info.objv_tracker);
4888 if (op_ret < 0)
4889 return;
4890
4891 /* only remove meta attrs */
4892 for (iter = orig_attrs.begin(); iter != orig_attrs.end(); ++iter) {
4893 const string& name = iter->first;
4894 dout(10) << "DeleteCORS : attr: " << name << dendl;
4895 if (name.compare(0, (sizeof(RGW_ATTR_CORS) - 1), RGW_ATTR_CORS) == 0) {
4896 rmattrs[name] = iter->second;
4897 } else if (attrs.find(name) == attrs.end()) {
4898 attrs[name] = iter->second;
4899 }
4900 }
4901 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
4902}
4903
4904void RGWOptionsCORS::get_response_params(string& hdrs, string& exp_hdrs, unsigned *max_age) {
4905 get_cors_response_headers(rule, req_hdrs, hdrs, exp_hdrs, max_age);
4906}
4907
4908int RGWOptionsCORS::validate_cors_request(RGWCORSConfiguration *cc) {
4909 rule = cc->host_name_rule(origin);
4910 if (!rule) {
4911 dout(10) << "There is no cors rule present for " << origin << dendl;
4912 return -ENOENT;
4913 }
4914
4915 if (!validate_cors_rule_method(rule, req_meth)) {
4916 return -ENOENT;
4917 }
4918 return 0;
4919}
4920
4921void RGWOptionsCORS::execute()
4922{
4923 op_ret = read_bucket_cors();
4924 if (op_ret < 0)
4925 return;
4926
4927 origin = s->info.env->get("HTTP_ORIGIN");
4928 if (!origin) {
4929 dout(0) <<
4930 "Preflight request without mandatory Origin header"
4931 << dendl;
4932 op_ret = -EINVAL;
4933 return;
4934 }
4935 req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD");
4936 if (!req_meth) {
4937 dout(0) <<
4938 "Preflight request without mandatory Access-control-request-method header"
4939 << dendl;
4940 op_ret = -EINVAL;
4941 return;
4942 }
4943 if (!cors_exist) {
4944 dout(2) << "No CORS configuration set yet for this bucket" << dendl;
4945 op_ret = -ENOENT;
4946 return;
4947 }
4948 req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS");
4949 op_ret = validate_cors_request(&bucket_cors);
4950 if (!rule) {
4951 origin = req_meth = NULL;
4952 return;
4953 }
4954 return;
4955}
4956
4957int RGWGetRequestPayment::verify_permission()
4958{
4959 return 0;
4960}
4961
4962void RGWGetRequestPayment::pre_exec()
4963{
4964 rgw_bucket_object_pre_exec(s);
4965}
4966
4967void RGWGetRequestPayment::execute()
4968{
4969 requester_pays = s->bucket_info.requester_pays;
4970}
4971
4972int RGWSetRequestPayment::verify_permission()
4973{
4974 if (false == s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
4975 return -EACCES;
4976 }
4977
4978 return 0;
4979}
4980
4981void RGWSetRequestPayment::pre_exec()
4982{
4983 rgw_bucket_object_pre_exec(s);
4984}
4985
4986void RGWSetRequestPayment::execute()
4987{
4988 op_ret = get_params();
4989
4990 if (op_ret < 0)
4991 return;
4992
4993 s->bucket_info.requester_pays = requester_pays;
4994 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(),
4995 &s->bucket_attrs);
4996 if (op_ret < 0) {
4997 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
4998 << " returned err=" << op_ret << dendl;
4999 return;
5000 }
5001}
5002
5003int RGWInitMultipart::verify_permission()
5004{
31f18b77
FG
5005 if (s->iam_policy) {
5006 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
5007 rgw::IAM::s3PutObject,
5008 rgw_obj(s->bucket, s->object));
5009 if (e == Effect::Allow) {
5010 return 0;
5011 } else if (e == Effect::Deny) {
5012 return -EACCES;
5013 }
5014 }
5015
5016 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
7c673cae 5017 return -EACCES;
31f18b77 5018 }
7c673cae
FG
5019
5020 return 0;
5021}
5022
5023void RGWInitMultipart::pre_exec()
5024{
5025 rgw_bucket_object_pre_exec(s);
5026}
5027
5028void RGWInitMultipart::execute()
5029{
5030 bufferlist aclbl;
5031 map<string, bufferlist> attrs;
5032 rgw_obj obj;
5033
5034 if (get_params() < 0)
5035 return;
5036
5037 if (s->object.empty())
5038 return;
5039
5040 policy.encode(aclbl);
5041 attrs[RGW_ATTR_ACL] = aclbl;
5042
5043 populate_with_generic_attrs(s, attrs);
5044
5045 /* select encryption mode */
5046 op_ret = prepare_encryption(attrs);
5047 if (op_ret != 0)
5048 return;
5049
5050 rgw_get_request_metadata(s->cct, s->info, attrs);
5051
5052 do {
5053 char buf[33];
5054 gen_rand_alphanumeric(s->cct, buf, sizeof(buf) - 1);
5055 upload_id = MULTIPART_UPLOAD_ID_PREFIX; /* v2 upload id */
5056 upload_id.append(buf);
5057
5058 string tmp_obj_name;
5059 RGWMPObj mp(s->object.name, upload_id);
5060 tmp_obj_name = mp.get_meta();
5061
5062 obj.init_ns(s->bucket, tmp_obj_name, mp_ns);
5063 // the meta object will be indexed with 0 size, we c
5064 obj.set_in_extra_data(true);
5065 obj.index_hash_source = s->object.name;
5066
5067 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
5068 op_target.set_versioning_disabled(true); /* no versioning for multipart meta */
5069
5070 RGWRados::Object::Write obj_op(&op_target);
5071
5072 obj_op.meta.owner = s->owner.get_id();
5073 obj_op.meta.category = RGW_OBJ_CATEGORY_MULTIMETA;
5074 obj_op.meta.flags = PUT_OBJ_CREATE_EXCL;
5075
5076 op_ret = obj_op.write_meta(0, 0, attrs);
5077 } while (op_ret == -EEXIST);
5078}
5079
5080static int get_multipart_info(RGWRados *store, struct req_state *s,
5081 string& meta_oid,
5082 RGWAccessControlPolicy *policy,
5083 map<string, bufferlist>& attrs)
5084{
5085 map<string, bufferlist>::iterator iter;
5086 bufferlist header;
5087
5088 rgw_obj obj;
5089 obj.init_ns(s->bucket, meta_oid, mp_ns);
5090 obj.set_in_extra_data(true);
5091
5092 int op_ret = get_obj_attrs(store, s, obj, attrs);
5093 if (op_ret < 0) {
5094 if (op_ret == -ENOENT) {
5095 return -ERR_NO_SUCH_UPLOAD;
5096 }
5097 return op_ret;
5098 }
5099
5100 if (policy) {
5101 for (iter = attrs.begin(); iter != attrs.end(); ++iter) {
5102 string name = iter->first;
5103 if (name.compare(RGW_ATTR_ACL) == 0) {
5104 bufferlist& bl = iter->second;
5105 bufferlist::iterator bli = bl.begin();
5106 try {
5107 ::decode(*policy, bli);
5108 } catch (buffer::error& err) {
5109 ldout(s->cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
5110 return -EIO;
5111 }
5112 break;
5113 }
5114 }
5115 }
5116
5117 return 0;
5118}
5119
5120int RGWCompleteMultipart::verify_permission()
5121{
31f18b77
FG
5122 if (s->iam_policy) {
5123 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
5124 rgw::IAM::s3PutObject,
5125 rgw_obj(s->bucket, s->object));
5126 if (e == Effect::Allow) {
5127 return 0;
5128 } else if (e == Effect::Deny) {
5129 return -EACCES;
5130 }
5131 }
5132
5133 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
7c673cae 5134 return -EACCES;
31f18b77 5135 }
7c673cae
FG
5136
5137 return 0;
5138}
5139
5140void RGWCompleteMultipart::pre_exec()
5141{
5142 rgw_bucket_object_pre_exec(s);
5143}
5144
5145void RGWCompleteMultipart::execute()
5146{
5147 RGWMultiCompleteUpload *parts;
5148 map<int, string>::iterator iter;
5149 RGWMultiXMLParser parser;
5150 string meta_oid;
5151 map<uint32_t, RGWUploadPartInfo> obj_parts;
5152 map<uint32_t, RGWUploadPartInfo>::iterator obj_iter;
5153 map<string, bufferlist> attrs;
5154 off_t ofs = 0;
5155 MD5 hash;
5156 char final_etag[CEPH_CRYPTO_MD5_DIGESTSIZE];
5157 char final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 16];
5158 bufferlist etag_bl;
5159 rgw_obj meta_obj;
5160 rgw_obj target_obj;
5161 RGWMPObj mp;
5162 RGWObjManifest manifest;
5163 uint64_t olh_epoch = 0;
5164 string version_id;
5165
5166 op_ret = get_params();
5167 if (op_ret < 0)
5168 return;
5169 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
5170 if (op_ret < 0) {
5171 return;
5172 }
5173
5174 if (!data || !len) {
5175 op_ret = -ERR_MALFORMED_XML;
5176 return;
5177 }
5178
5179 if (!parser.init()) {
5180 op_ret = -EIO;
5181 return;
5182 }
5183
5184 if (!parser.parse(data, len, 1)) {
5185 op_ret = -ERR_MALFORMED_XML;
5186 return;
5187 }
5188
5189 parts = static_cast<RGWMultiCompleteUpload *>(parser.find_first("CompleteMultipartUpload"));
5190 if (!parts || parts->parts.empty()) {
5191 op_ret = -ERR_MALFORMED_XML;
5192 return;
5193 }
5194
5195 if ((int)parts->parts.size() >
5196 s->cct->_conf->rgw_multipart_part_upload_limit) {
5197 op_ret = -ERANGE;
5198 return;
5199 }
5200
5201 mp.init(s->object.name, upload_id);
5202 meta_oid = mp.get_meta();
5203
5204 int total_parts = 0;
5205 int handled_parts = 0;
5206 int max_parts = 1000;
5207 int marker = 0;
5208 bool truncated;
5209 RGWCompressionInfo cs_info;
5210 bool compressed = false;
5211 uint64_t accounted_size = 0;
5212
5213 uint64_t min_part_size = s->cct->_conf->rgw_multipart_min_part_size;
5214
5215 list<rgw_obj_index_key> remove_objs; /* objects to be removed from index listing */
5216
5217 bool versioned_object = s->bucket_info.versioning_enabled();
5218
5219 iter = parts->parts.begin();
5220
5221 meta_obj.init_ns(s->bucket, meta_oid, mp_ns);
5222 meta_obj.set_in_extra_data(true);
5223 meta_obj.index_hash_source = s->object.name;
5224
5225 op_ret = get_obj_attrs(store, s, meta_obj, attrs);
5226
5227 if (op_ret < 0) {
5228 ldout(s->cct, 0) << "ERROR: failed to get obj attrs, obj=" << meta_obj
5229 << " ret=" << op_ret << dendl;
5230 return;
5231 }
5232
5233 do {
5234 op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts,
5235 marker, obj_parts, &marker, &truncated);
5236 if (op_ret == -ENOENT) {
5237 op_ret = -ERR_NO_SUCH_UPLOAD;
5238 }
5239 if (op_ret < 0)
5240 return;
5241
5242 total_parts += obj_parts.size();
5243 if (!truncated && total_parts != (int)parts->parts.size()) {
5244 ldout(s->cct, 0) << "NOTICE: total parts mismatch: have: " << total_parts
5245 << " expected: " << parts->parts.size() << dendl;
5246 op_ret = -ERR_INVALID_PART;
5247 return;
5248 }
5249
5250 for (obj_iter = obj_parts.begin(); iter != parts->parts.end() && obj_iter != obj_parts.end(); ++iter, ++obj_iter, ++handled_parts) {
5251 uint64_t part_size = obj_iter->second.accounted_size;
5252 if (handled_parts < (int)parts->parts.size() - 1 &&
5253 part_size < min_part_size) {
5254 op_ret = -ERR_TOO_SMALL;
5255 return;
5256 }
5257
5258 char petag[CEPH_CRYPTO_MD5_DIGESTSIZE];
5259 if (iter->first != (int)obj_iter->first) {
5260 ldout(s->cct, 0) << "NOTICE: parts num mismatch: next requested: "
5261 << iter->first << " next uploaded: "
5262 << obj_iter->first << dendl;
5263 op_ret = -ERR_INVALID_PART;
5264 return;
5265 }
5266 string part_etag = rgw_string_unquote(iter->second);
5267 if (part_etag.compare(obj_iter->second.etag) != 0) {
5268 ldout(s->cct, 0) << "NOTICE: etag mismatch: part: " << iter->first
5269 << " etag: " << iter->second << dendl;
5270 op_ret = -ERR_INVALID_PART;
5271 return;
5272 }
5273
5274 hex_to_buf(obj_iter->second.etag.c_str(), petag,
5275 CEPH_CRYPTO_MD5_DIGESTSIZE);
5276 hash.Update((const byte *)petag, sizeof(petag));
5277
5278 RGWUploadPartInfo& obj_part = obj_iter->second;
5279
5280 /* update manifest for part */
5281 string oid = mp.get_part(obj_iter->second.num);
5282 rgw_obj src_obj;
5283 src_obj.init_ns(s->bucket, oid, mp_ns);
5284
5285 if (obj_part.manifest.empty()) {
5286 ldout(s->cct, 0) << "ERROR: empty manifest for object part: obj="
5287 << src_obj << dendl;
5288 op_ret = -ERR_INVALID_PART;
5289 return;
5290 } else {
5291 manifest.append(obj_part.manifest, store);
5292 }
5293
5294 if (obj_part.cs_info.compression_type != "none") {
5295 if (compressed && cs_info.compression_type != obj_part.cs_info.compression_type) {
5296 ldout(s->cct, 0) << "ERROR: compression type was changed during multipart upload ("
5297 << cs_info.compression_type << ">>" << obj_part.cs_info.compression_type << ")" << dendl;
5298 op_ret = -ERR_INVALID_PART;
5299 return;
5300 }
224ce89b 5301 int64_t new_ofs; // offset in compression data for new part
7c673cae
FG
5302 if (cs_info.blocks.size() > 0)
5303 new_ofs = cs_info.blocks.back().new_ofs + cs_info.blocks.back().len;
5304 else
5305 new_ofs = 0;
5306 for (const auto& block : obj_part.cs_info.blocks) {
5307 compression_block cb;
5308 cb.old_ofs = block.old_ofs + cs_info.orig_size;
5309 cb.new_ofs = new_ofs;
5310 cb.len = block.len;
5311 cs_info.blocks.push_back(cb);
5312 new_ofs = cb.new_ofs + cb.len;
5313 }
5314 if (!compressed)
5315 cs_info.compression_type = obj_part.cs_info.compression_type;
5316 cs_info.orig_size += obj_part.cs_info.orig_size;
5317 compressed = true;
5318 }
5319
5320 rgw_obj_index_key remove_key;
5321 src_obj.key.get_index_key(&remove_key);
5322
5323 remove_objs.push_back(remove_key);
5324
5325 ofs += obj_part.size;
5326 accounted_size += obj_part.accounted_size;
5327 }
5328 } while (truncated);
5329 hash.Final((byte *)final_etag);
5330
5331 buf_to_hex((unsigned char *)final_etag, sizeof(final_etag), final_etag_str);
5332 snprintf(&final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2], sizeof(final_etag_str) - CEPH_CRYPTO_MD5_DIGESTSIZE * 2,
5333 "-%lld", (long long)parts->parts.size());
5334 etag = final_etag_str;
5335 ldout(s->cct, 10) << "calculated etag: " << final_etag_str << dendl;
5336
5337 etag_bl.append(final_etag_str, strlen(final_etag_str) + 1);
5338
5339 attrs[RGW_ATTR_ETAG] = etag_bl;
5340
5341 if (compressed) {
5342 // write compression attribute to full object
5343 bufferlist tmp;
5344 ::encode(cs_info, tmp);
5345 attrs[RGW_ATTR_COMPRESSION] = tmp;
5346 }
5347
5348 target_obj.init(s->bucket, s->object.name);
5349 if (versioned_object) {
5350 store->gen_rand_obj_instance_name(&target_obj);
5351 }
5352
5353 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
5354
5355 obj_ctx.obj.set_atomic(target_obj);
5356
5357 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), target_obj);
5358 RGWRados::Object::Write obj_op(&op_target);
5359
5360 obj_op.meta.manifest = &manifest;
5361 obj_op.meta.remove_objs = &remove_objs;
5362
5363 obj_op.meta.ptag = &s->req_id; /* use req_id as operation tag */
5364 obj_op.meta.owner = s->owner.get_id();
5365 obj_op.meta.flags = PUT_OBJ_CREATE;
5366 op_ret = obj_op.write_meta(ofs, accounted_size, attrs);
5367 if (op_ret < 0)
5368 return;
5369
5370 // remove the upload obj
5371 int r = store->delete_obj(*static_cast<RGWObjectCtx *>(s->obj_ctx),
5372 s->bucket_info, meta_obj, 0);
5373 if (r < 0) {
5374 ldout(store->ctx(), 0) << "WARNING: failed to remove object " << meta_obj << dendl;
5375 }
5376}
5377
5378int RGWAbortMultipart::verify_permission()
5379{
31f18b77
FG
5380 if (s->iam_policy) {
5381 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
5382 rgw::IAM::s3AbortMultipartUpload,
5383 rgw_obj(s->bucket, s->object));
5384 if (e == Effect::Allow) {
5385 return 0;
5386 } else if (e == Effect::Deny) {
5387 return -EACCES;
5388 }
5389 }
5390
5391 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
7c673cae 5392 return -EACCES;
31f18b77 5393 }
7c673cae
FG
5394
5395 return 0;
5396}
5397
5398void RGWAbortMultipart::pre_exec()
5399{
5400 rgw_bucket_object_pre_exec(s);
5401}
5402
5403void RGWAbortMultipart::execute()
5404{
5405 op_ret = -EINVAL;
5406 string upload_id;
5407 string meta_oid;
5408 upload_id = s->info.args.get("uploadId");
5409 map<string, bufferlist> attrs;
5410 rgw_obj meta_obj;
5411 RGWMPObj mp;
5412
5413 if (upload_id.empty() || s->object.empty())
5414 return;
5415
5416 mp.init(s->object.name, upload_id);
5417 meta_oid = mp.get_meta();
5418
5419 op_ret = get_multipart_info(store, s, meta_oid, NULL, attrs);
5420 if (op_ret < 0)
5421 return;
5422
5423 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
5424 op_ret = abort_multipart_upload(store, s->cct, obj_ctx, s->bucket_info, mp);
5425}
5426
5427int RGWListMultipart::verify_permission()
5428{
31f18b77 5429 if (!verify_object_permission(s, rgw::IAM::s3ListMultipartUploadParts))
7c673cae
FG
5430 return -EACCES;
5431
5432 return 0;
5433}
5434
5435void RGWListMultipart::pre_exec()
5436{
5437 rgw_bucket_object_pre_exec(s);
5438}
5439
5440void RGWListMultipart::execute()
5441{
5442 map<string, bufferlist> xattrs;
5443 string meta_oid;
5444 RGWMPObj mp;
5445
5446 op_ret = get_params();
5447 if (op_ret < 0)
5448 return;
5449
5450 mp.init(s->object.name, upload_id);
5451 meta_oid = mp.get_meta();
5452
5453 op_ret = get_multipart_info(store, s, meta_oid, &policy, xattrs);
5454 if (op_ret < 0)
5455 return;
5456
5457 op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts,
5458 marker, parts, NULL, &truncated);
5459}
5460
5461int RGWListBucketMultiparts::verify_permission()
5462{
31f18b77
FG
5463 if (!verify_bucket_permission(s,
5464 rgw::IAM::s3ListBucketMultiPartUploads))
7c673cae
FG
5465 return -EACCES;
5466
5467 return 0;
5468}
5469
5470void RGWListBucketMultiparts::pre_exec()
5471{
5472 rgw_bucket_object_pre_exec(s);
5473}
5474
5475void RGWListBucketMultiparts::execute()
5476{
5477 vector<rgw_bucket_dir_entry> objs;
5478 string marker_meta;
5479
5480 op_ret = get_params();
5481 if (op_ret < 0)
5482 return;
5483
5484 if (s->prot_flags & RGW_REST_SWIFT) {
5485 string path_args;
5486 path_args = s->info.args.get("path");
5487 if (!path_args.empty()) {
5488 if (!delimiter.empty() || !prefix.empty()) {
5489 op_ret = -EINVAL;
5490 return;
5491 }
5492 prefix = path_args;
5493 delimiter="/";
5494 }
5495 }
5496 marker_meta = marker.get_meta();
5497
224ce89b
WB
5498 op_ret = list_bucket_multiparts(store, s->bucket_info, prefix, marker_meta, delimiter,
5499 max_uploads, &objs, &common_prefixes, &is_truncated);
5500 if (op_ret < 0) {
5501 return;
5502 }
7c673cae 5503
7c673cae
FG
5504 if (!objs.empty()) {
5505 vector<rgw_bucket_dir_entry>::iterator iter;
5506 RGWMultipartUploadEntry entry;
5507 for (iter = objs.begin(); iter != objs.end(); ++iter) {
5508 rgw_obj_key key(iter->key);
5509 if (!entry.mp.from_meta(key.name))
5510 continue;
5511 entry.obj = *iter;
5512 uploads.push_back(entry);
5513 }
5514 next_marker = entry;
5515 }
5516}
5517
5518void RGWGetHealthCheck::execute()
5519{
5520 if (!g_conf->rgw_healthcheck_disabling_path.empty() &&
5521 (::access(g_conf->rgw_healthcheck_disabling_path.c_str(), F_OK) == 0)) {
5522 /* Disabling path specified & existent in the filesystem. */
5523 op_ret = -ERR_SERVICE_UNAVAILABLE; /* 503 */
5524 } else {
5525 op_ret = 0; /* 200 OK */
5526 }
5527}
5528
5529int RGWDeleteMultiObj::verify_permission()
5530{
31f18b77
FG
5531 acl_allowed = verify_bucket_permission_no_policy(s, RGW_PERM_WRITE);
5532 if (!acl_allowed && !s->iam_policy)
7c673cae
FG
5533 return -EACCES;
5534
5535 return 0;
5536}
5537
5538void RGWDeleteMultiObj::pre_exec()
5539{
5540 rgw_bucket_object_pre_exec(s);
5541}
5542
5543void RGWDeleteMultiObj::execute()
5544{
5545 RGWMultiDelDelete *multi_delete;
5546 vector<rgw_obj_key>::iterator iter;
5547 RGWMultiDelXMLParser parser;
5548 int num_processed = 0;
5549 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
5550
5551 op_ret = get_params();
5552 if (op_ret < 0) {
5553 goto error;
5554 }
5555
5556 if (!data) {
5557 op_ret = -EINVAL;
5558 goto error;
5559 }
5560
5561 if (!parser.init()) {
5562 op_ret = -EINVAL;
5563 goto error;
5564 }
5565
5566 if (!parser.parse(data, len, 1)) {
5567 op_ret = -EINVAL;
5568 goto error;
5569 }
5570
5571 multi_delete = static_cast<RGWMultiDelDelete *>(parser.find_first("Delete"));
5572 if (!multi_delete) {
5573 op_ret = -EINVAL;
5574 goto error;
5575 }
5576
5577 if (multi_delete->is_quiet())
5578 quiet = true;
5579
5580 begin_response();
5581 if (multi_delete->objects.empty()) {
5582 goto done;
5583 }
5584
5585 for (iter = multi_delete->objects.begin();
5586 iter != multi_delete->objects.end() && num_processed < max_to_delete;
5587 ++iter, num_processed++) {
5588 rgw_obj obj(bucket, *iter);
31f18b77
FG
5589 if (s->iam_policy) {
5590 auto e = s->iam_policy->eval(s->env,
5591 *s->auth.identity,
5592 iter->instance.empty() ?
5593 rgw::IAM::s3DeleteObject :
5594 rgw::IAM::s3DeleteObjectVersion,
5595 obj);
5596 if ((e == Effect::Deny) ||
5597 (e == Effect::Pass && !acl_allowed)) {
5598 send_partial_response(*iter, false, "", -EACCES);
5599 continue;
5600 }
5601 }
7c673cae
FG
5602
5603 obj_ctx->obj.set_atomic(obj);
5604
5605 RGWRados::Object del_target(store, s->bucket_info, *obj_ctx, obj);
5606 RGWRados::Object::Delete del_op(&del_target);
5607
5608 del_op.params.bucket_owner = s->bucket_owner.get_id();
5609 del_op.params.versioning_status = s->bucket_info.versioning_status();
5610 del_op.params.obj_owner = s->owner;
5611
5612 op_ret = del_op.delete_obj();
5613 if (op_ret == -ENOENT) {
5614 op_ret = 0;
5615 }
5616
5617 send_partial_response(*iter, del_op.result.delete_marker,
5618 del_op.result.version_id, op_ret);
5619 }
5620
5621 /* set the return code to zero, errors at this point will be
5622 dumped to the response */
5623 op_ret = 0;
5624
5625done:
5626 // will likely segfault if begin_response() has not been called
5627 end_response();
5628 free(data);
5629 return;
5630
5631error:
5632 send_status();
5633 free(data);
5634 return;
5635
5636}
5637
5638bool RGWBulkDelete::Deleter::verify_permission(RGWBucketInfo& binfo,
5639 map<string, bufferlist>& battrs,
5640 ACLOwner& bucket_owner /* out */)
5641{
5642 RGWAccessControlPolicy bacl(store->ctx());
5643 int ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket);
5644 if (ret < 0) {
5645 return false;
5646 }
5647
31f18b77
FG
5648 auto policy = get_iam_policy_from_attr(s->cct, store, battrs, binfo.bucket.tenant);
5649
7c673cae
FG
5650 bucket_owner = bacl.get_owner();
5651
5652 /* We can use global user_acl because each BulkDelete request is allowed
5653 * to work on entities from a single account only. */
31f18b77
FG
5654 return verify_bucket_permission(s, binfo.bucket, s->user_acl.get(),
5655 &bacl, policy, rgw::IAM::s3DeleteBucket);
7c673cae
FG
5656}
5657
5658bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path)
5659{
5660 auto& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
5661
5662 RGWBucketInfo binfo;
5663 map<string, bufferlist> battrs;
5664 ACLOwner bowner;
5665
5666 int ret = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
5667 path.bucket_name, binfo, nullptr,
5668 &battrs);
5669 if (ret < 0) {
5670 goto binfo_fail;
5671 }
5672
5673 if (!verify_permission(binfo, battrs, bowner)) {
5674 ret = -EACCES;
5675 goto auth_fail;
5676 }
5677
5678 if (!path.obj_key.empty()) {
5679 rgw_obj obj(binfo.bucket, path.obj_key);
5680 obj_ctx.obj.set_atomic(obj);
5681
5682 RGWRados::Object del_target(store, binfo, obj_ctx, obj);
5683 RGWRados::Object::Delete del_op(&del_target);
5684
5685 del_op.params.bucket_owner = binfo.owner;
5686 del_op.params.versioning_status = binfo.versioning_status();
5687 del_op.params.obj_owner = bowner;
5688
5689 ret = del_op.delete_obj();
5690 if (ret < 0) {
5691 goto delop_fail;
5692 }
5693 } else {
5694 RGWObjVersionTracker ot;
5695 ot.read_version = binfo.ep_objv;
5696
5697 ret = store->delete_bucket(binfo, ot);
5698 if (0 == ret) {
5699 ret = rgw_unlink_bucket(store, binfo.owner, binfo.bucket.tenant,
5700 binfo.bucket.name, false);
5701 if (ret < 0) {
5702 ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << ret
5703 << dendl;
5704 }
5705 }
5706 if (ret < 0) {
5707 goto delop_fail;
5708 }
5709
224ce89b 5710 if (!store->is_meta_master()) {
7c673cae
FG
5711 bufferlist in_data;
5712 ret = forward_request_to_master(s, &ot.read_version, store, in_data,
5713 nullptr);
5714 if (ret < 0) {
5715 if (ret == -ENOENT) {
5716 /* adjust error, we want to return with NoSuchBucket and not
5717 * NoSuchKey */
5718 ret = -ERR_NO_SUCH_BUCKET;
5719 }
5720 goto delop_fail;
5721 }
5722 }
5723 }
5724
5725 num_deleted++;
5726 return true;
5727
5728
5729binfo_fail:
5730 if (-ENOENT == ret) {
5731 ldout(store->ctx(), 20) << "cannot find bucket = " << path.bucket_name << dendl;
5732 num_unfound++;
5733 } else {
5734 ldout(store->ctx(), 20) << "cannot get bucket info, ret = " << ret
5735 << dendl;
5736
5737 fail_desc_t failed_item = {
5738 .err = ret,
5739 .path = path
5740 };
5741 failures.push_back(failed_item);
5742 }
5743 return false;
5744
5745auth_fail:
5746 ldout(store->ctx(), 20) << "wrong auth for " << path << dendl;
5747 {
5748 fail_desc_t failed_item = {
5749 .err = ret,
5750 .path = path
5751 };
5752 failures.push_back(failed_item);
5753 }
5754 return false;
5755
5756delop_fail:
5757 if (-ENOENT == ret) {
5758 ldout(store->ctx(), 20) << "cannot find entry " << path << dendl;
5759 num_unfound++;
5760 } else {
5761 fail_desc_t failed_item = {
5762 .err = ret,
5763 .path = path
5764 };
5765 failures.push_back(failed_item);
5766 }
5767 return false;
5768}
5769
5770bool RGWBulkDelete::Deleter::delete_chunk(const std::list<acct_path_t>& paths)
5771{
5772 ldout(store->ctx(), 20) << "in delete_chunk" << dendl;
5773 for (auto path : paths) {
5774 ldout(store->ctx(), 20) << "bulk deleting path: " << path << dendl;
5775 delete_single(path);
5776 }
5777
5778 return true;
5779}
5780
5781int RGWBulkDelete::verify_permission()
5782{
5783 return 0;
5784}
5785
5786void RGWBulkDelete::pre_exec()
5787{
5788 rgw_bucket_object_pre_exec(s);
5789}
5790
5791void RGWBulkDelete::execute()
5792{
5793 deleter = std::unique_ptr<Deleter>(new Deleter(store, s));
5794
5795 bool is_truncated = false;
5796 do {
5797 list<RGWBulkDelete::acct_path_t> items;
5798
5799 int ret = get_data(items, &is_truncated);
5800 if (ret < 0) {
5801 return;
5802 }
5803
5804 ret = deleter->delete_chunk(items);
5805 } while (!op_ret && is_truncated);
5806
5807 return;
5808}
5809
5810
5811constexpr std::array<int, 2> RGWBulkUploadOp::terminal_errors;
5812
5813int RGWBulkUploadOp::verify_permission()
5814{
5815 if (s->auth.identity->is_anonymous()) {
5816 return -EACCES;
5817 }
5818
5819 if (! verify_user_permission(s, RGW_PERM_WRITE)) {
5820 return -EACCES;
5821 }
5822
5823 if (s->user->user_id.tenant != s->bucket_tenant) {
5824 ldout(s->cct, 10) << "user cannot create a bucket in a different tenant"
5825 << " (user_id.tenant=" << s->user->user_id.tenant
5826 << " requested=" << s->bucket_tenant << ")"
5827 << dendl;
5828 return -EACCES;
5829 }
5830
5831 if (s->user->max_buckets < 0) {
5832 return -EPERM;
5833 }
5834
5835 return 0;
5836}
5837
5838void RGWBulkUploadOp::pre_exec()
5839{
5840 rgw_bucket_object_pre_exec(s);
5841}
5842
5843boost::optional<std::pair<std::string, rgw_obj_key>>
5844RGWBulkUploadOp::parse_path(const boost::string_ref& path)
5845{
5846 /* We need to skip all slashes at the beginning in order to preserve
5847 * compliance with Swift. */
5848 const size_t start_pos = path.find_first_not_of('/');
5849
5850 if (boost::string_ref::npos != start_pos) {
5851 /* Seperator is the first slash after the leading ones. */
5852 const size_t sep_pos = path.substr(start_pos).find('/');
5853
5854 if (boost::string_ref::npos != sep_pos) {
5855 const auto bucket_name = path.substr(start_pos, sep_pos - start_pos);
5856 const auto obj_name = path.substr(sep_pos + 1);
5857
5858 return std::make_pair(bucket_name.to_string(),
5859 rgw_obj_key(obj_name.to_string()));
5860 } else {
5861 /* It's guaranteed here that bucket name is at least one character
5862 * long and is different than slash. */
5863 return std::make_pair(path.substr(start_pos).to_string(),
5864 rgw_obj_key());
5865 }
5866 }
5867
31f18b77 5868 return none;
7c673cae
FG
5869}
5870
5871std::pair<std::string, std::string>
5872RGWBulkUploadOp::handle_upload_path(struct req_state *s)
5873{
5874 std::string bucket_path, file_prefix;
5875 if (! s->init_state.url_bucket.empty()) {
5876 file_prefix = bucket_path = s->init_state.url_bucket + "/";
5877 if (! s->object.empty()) {
5878 std::string& object_name = s->object.name;
5879
5880 /* As rgw_obj_key::empty() already verified emptiness of s->object.name,
5881 * we can safely examine its last element. */
5882 if (object_name.back() == '/') {
5883 file_prefix.append(object_name);
5884 } else {
5885 file_prefix.append(object_name).append("/");
5886 }
5887 }
5888 }
5889 return std::make_pair(bucket_path, file_prefix);
5890}
5891
5892int RGWBulkUploadOp::handle_dir_verify_permission()
5893{
5894 if (s->user->max_buckets > 0) {
5895 RGWUserBuckets buckets;
5896 std::string marker;
5897 bool is_truncated = false;
5898 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
5899 marker, std::string(), s->user->max_buckets,
5900 false, &is_truncated);
5901 if (op_ret < 0) {
5902 return op_ret;
5903 }
5904
5905 if (buckets.count() >= static_cast<size_t>(s->user->max_buckets)) {
5906 return -ERR_TOO_MANY_BUCKETS;
5907 }
5908 }
5909
5910 return 0;
5911}
5912
5913static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name)
5914{
5915 /* the request of container or object level will contain bucket name.
5916 * only at account level need to append the bucket name */
5917 if (info.script_uri.find(bucket_name) != std::string::npos) {
5918 return;
5919 }
5920
5921 ldout(cct, 20) << "append the bucket: "<< bucket_name << " to req_info" << dendl;
5922 info.script_uri.append("/").append(bucket_name);
5923 info.request_uri_aws4 = info.request_uri = info.script_uri;
5924 info.effective_uri = "/" + bucket_name;
5925}
5926
5927int RGWBulkUploadOp::handle_dir(const boost::string_ref path)
5928{
5929 ldout(s->cct, 20) << "bulk upload: got directory=" << path << dendl;
5930
5931 op_ret = handle_dir_verify_permission();
5932 if (op_ret < 0) {
5933 return op_ret;
5934 }
5935
5936 std::string bucket_name;
5937 rgw_obj_key object_junk;
5938 std::tie(bucket_name, object_junk) = *parse_path(path);
5939
5940 rgw_raw_obj obj(store->get_zone_params().domain_root,
5941 rgw_make_bucket_entry_name(s->bucket_tenant, bucket_name));
5942
5943 /* we need to make sure we read bucket info, it's not read before for this
5944 * specific request */
5945 RGWBucketInfo binfo;
5946 std::map<std::string, ceph::bufferlist> battrs;
5947 op_ret = store->get_bucket_info(*dir_ctx, s->bucket_tenant, bucket_name,
5948 binfo, NULL, &battrs);
5949 if (op_ret < 0 && op_ret != -ENOENT) {
5950 return op_ret;
5951 }
5952 const bool bucket_exists = (op_ret != -ENOENT);
5953
5954 if (bucket_exists) {
5955 RGWAccessControlPolicy old_policy(s->cct);
5956 int r = get_bucket_policy_from_attr(s->cct, store, binfo,
5957 battrs, &old_policy);
5958 if (r >= 0) {
5959 if (old_policy.get_owner().get_id().compare(s->user->user_id) != 0) {
5960 op_ret = -EEXIST;
5961 return op_ret;
5962 }
5963 }
5964 }
5965
5966 RGWBucketInfo master_info;
5967 rgw_bucket *pmaster_bucket = nullptr;
5968 uint32_t *pmaster_num_shards = nullptr;
5969 real_time creation_time;
5970 obj_version objv, ep_objv, *pobjv = nullptr;
5971
5972 if (! store->is_meta_master()) {
5973 JSONParser jp;
5974 ceph::bufferlist in_data;
5975 req_info info = s->info;
5976 forward_req_info(s->cct, info, bucket_name);
5977 op_ret = forward_request_to_master(s, nullptr, store, in_data, &jp, &info);
5978 if (op_ret < 0) {
5979 return op_ret;
5980 }
5981
5982 JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp);
5983 JSONDecoder::decode_json("object_ver", objv, &jp);
5984 JSONDecoder::decode_json("bucket_info", master_info, &jp);
5985
5986 ldout(s->cct, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver="
5987 << objv.ver << dendl;
5988 ldout(s->cct, 20) << "got creation_time="<< master_info.creation_time
5989 << dendl;
5990
5991 pmaster_bucket= &master_info.bucket;
5992 creation_time = master_info.creation_time;
5993 pmaster_num_shards = &master_info.num_shards;
5994 pobjv = &objv;
5995 } else {
5996 pmaster_bucket = nullptr;
5997 pmaster_num_shards = nullptr;
5998 }
5999
6000
6001 std::string placement_rule;
6002 if (bucket_exists) {
6003 std::string selected_placement_rule;
6004 rgw_bucket bucket;
6005 bucket.tenant = s->bucket_tenant;
6006 bucket.name = s->bucket_name;
6007 op_ret = store->select_bucket_placement(*(s->user),
6008 store->get_zonegroup().get_id(),
6009 placement_rule,
6010 &selected_placement_rule,
6011 nullptr);
6012 if (selected_placement_rule != binfo.placement_rule) {
6013 op_ret = -EEXIST;
6014 ldout(s->cct, 20) << "bulk upload: non-coherent placement rule" << dendl;
6015 return op_ret;
6016 }
6017 }
6018
6019 /* Create metadata: ACLs. */
6020 std::map<std::string, ceph::bufferlist> attrs;
6021 RGWAccessControlPolicy policy;
6022 policy.create_default(s->user->user_id, s->user->display_name);
6023 ceph::bufferlist aclbl;
6024 policy.encode(aclbl);
6025 attrs.emplace(RGW_ATTR_ACL, std::move(aclbl));
6026
6027 RGWQuotaInfo quota_info;
6028 const RGWQuotaInfo * pquota_info = nullptr;
6029
6030 rgw_bucket bucket;
6031 bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */
6032 bucket.name = bucket_name;
6033
6034
6035 RGWBucketInfo out_info;
6036 op_ret = store->create_bucket(*(s->user),
6037 bucket,
6038 store->get_zonegroup().get_id(),
6039 placement_rule, binfo.swift_ver_location,
6040 pquota_info, attrs,
6041 out_info, pobjv, &ep_objv, creation_time,
6042 pmaster_bucket, pmaster_num_shards, true);
6043 /* continue if EEXIST and create_bucket will fail below. this way we can
6044 * recover from a partial create by retrying it. */
6045 ldout(s->cct, 20) << "rgw_create_bucket returned ret=" << op_ret
6046 << ", bucket=" << bucket << dendl;
6047
6048 if (op_ret && op_ret != -EEXIST) {
6049 return op_ret;
6050 }
6051
6052 const bool existed = (op_ret == -EEXIST);
6053 if (existed) {
6054 /* bucket already existed, might have raced with another bucket creation, or
6055 * might be partial bucket creation that never completed. Read existing bucket
6056 * info, verify that the reported bucket owner is the current user.
6057 * If all is ok then update the user's list of buckets.
6058 * Otherwise inform client about a name conflict.
6059 */
6060 if (out_info.owner.compare(s->user->user_id) != 0) {
6061 op_ret = -EEXIST;
6062 ldout(s->cct, 20) << "bulk upload: conflicting bucket name" << dendl;
6063 return op_ret;
6064 }
6065 bucket = out_info.bucket;
6066 }
6067
6068 op_ret = rgw_link_bucket(store, s->user->user_id, bucket,
6069 out_info.creation_time, false);
6070 if (op_ret && !existed && op_ret != -EEXIST) {
6071 /* if it exists (or previously existed), don't remove it! */
6072 op_ret = rgw_unlink_bucket(store, s->user->user_id,
6073 bucket.tenant, bucket.name);
6074 if (op_ret < 0) {
6075 ldout(s->cct, 0) << "bulk upload: WARNING: failed to unlink bucket: ret="
6076 << op_ret << dendl;
6077 }
6078 } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) {
6079 ldout(s->cct, 20) << "bulk upload: containers already exists"
6080 << dendl;
6081 op_ret = -ERR_BUCKET_EXISTS;
6082 }
6083
6084 return op_ret;
6085}
6086
6087
6088bool RGWBulkUploadOp::handle_file_verify_permission(RGWBucketInfo& binfo,
31f18b77 6089 const rgw_obj& obj,
7c673cae
FG
6090 std::map<std::string, ceph::bufferlist>& battrs,
6091 ACLOwner& bucket_owner /* out */)
6092{
6093 RGWAccessControlPolicy bacl(store->ctx());
6094 op_ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket);
6095 if (op_ret < 0) {
6096 ldout(s->cct, 20) << "bulk upload: cannot read_policy() for bucket"
6097 << dendl;
6098 return false;
6099 }
6100
31f18b77
FG
6101 auto policy = get_iam_policy_from_attr(s->cct, store, battrs, binfo.bucket.tenant);
6102
7c673cae 6103 bucket_owner = bacl.get_owner();
31f18b77
FG
6104 if (policy) {
6105 auto e = policy->eval(s->env, *s->auth.identity,
6106 rgw::IAM::s3PutObject, obj);
6107 if (e == Effect::Allow) {
6108 return true;
6109 } else if (e == Effect::Deny) {
6110 return false;
6111 }
6112 }
6113
6114 return verify_bucket_permission_no_policy(s, s->user_acl.get(),
6115 &bacl, RGW_PERM_WRITE);
7c673cae
FG
6116}
6117
6118int RGWBulkUploadOp::handle_file(const boost::string_ref path,
6119 const size_t size,
6120 AlignedStreamGetter& body)
6121{
6122
6123 ldout(s->cct, 20) << "bulk upload: got file=" << path << ", size=" << size
6124 << dendl;
6125
6126 RGWPutObjDataProcessor *filter = nullptr;
6127 boost::optional<RGWPutObj_Compress> compressor;
6128
6129 if (size > static_cast<const size_t>(s->cct->_conf->rgw_max_put_size)) {
6130 op_ret = -ERR_TOO_LARGE;
6131 return op_ret;
6132 }
6133
6134 std::string bucket_name;
6135 rgw_obj_key object;
6136 std::tie(bucket_name, object) = *parse_path(path);
6137
6138 auto& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
6139 RGWBucketInfo binfo;
6140 std::map<std::string, ceph::bufferlist> battrs;
6141 ACLOwner bowner;
6142 op_ret = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
6143 bucket_name, binfo, nullptr, &battrs);
6144 if (op_ret == -ENOENT) {
6145 ldout(s->cct, 20) << "bulk upload: non existent directory=" << bucket_name
6146 << dendl;
6147 } else if (op_ret < 0) {
6148 return op_ret;
6149 }
6150
31f18b77
FG
6151 if (! handle_file_verify_permission(binfo,
6152 rgw_obj(binfo.bucket, object),
6153 battrs, bowner)) {
7c673cae
FG
6154 ldout(s->cct, 20) << "bulk upload: object creation unauthorized" << dendl;
6155 op_ret = -EACCES;
6156 return op_ret;
6157 }
6158
6159 op_ret = store->check_quota(bowner.get_id(), binfo.bucket,
6160 user_quota, bucket_quota, size);
6161 if (op_ret < 0) {
6162 return op_ret;
6163 }
6164
31f18b77
FG
6165 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
6166 if (op_ret < 0) {
6167 return op_ret;
6168 }
6169
7c673cae
FG
6170 RGWPutObjProcessor_Atomic processor(obj_ctx,
6171 binfo,
6172 binfo.bucket,
6173 object.name,
6174 /* part size */
6175 s->cct->_conf->rgw_obj_stripe_size,
6176 s->req_id,
6177 binfo.versioning_enabled());
6178
6179 /* No filters by default. */
6180 filter = &processor;
6181
6182 op_ret = processor.prepare(store, nullptr);
6183 if (op_ret < 0) {
6184 ldout(s->cct, 20) << "bulk upload: cannot prepare processor due to ret="
6185 << op_ret << dendl;
6186 return op_ret;
6187 }
6188
6189 const auto& compression_type = store->get_zone_params().get_compression_type(
6190 binfo.placement_rule);
6191 CompressorRef plugin;
6192 if (compression_type != "none") {
6193 plugin = Compressor::create(s->cct, compression_type);
6194 if (! plugin) {
6195 ldout(s->cct, 1) << "Cannot load plugin for rgw_compression_type "
6196 << compression_type << dendl;
6197 } else {
6198 compressor.emplace(s->cct, plugin, filter);
6199 filter = &*compressor;
6200 }
6201 }
6202
6203 /* Upload file content. */
6204 ssize_t len = 0;
6205 size_t ofs = 0;
6206 MD5 hash;
6207 do {
6208 ceph::bufferlist data;
6209 len = body.get_at_most(s->cct->_conf->rgw_max_chunk_size, data);
6210
6211 ldout(s->cct, 20) << "bulk upload: body=" << data.c_str() << dendl;
6212 if (len < 0) {
6213 op_ret = len;
6214 return op_ret;
6215 } else if (len > 0) {
6216 hash.Update((const byte *)data.c_str(), data.length());
6217 op_ret = put_data_and_throttle(filter, data, ofs, false);
6218 if (op_ret < 0) {
6219 ldout(s->cct, 20) << "processor->thottle_data() returned ret="
6220 << op_ret << dendl;
6221 return op_ret;
6222 }
6223
6224 ofs += len;
6225 }
6226
6227 } while (len > 0);
6228
6229 if (ofs != size) {
6230 ldout(s->cct, 10) << "bulk upload: real file size different from declared"
6231 << dendl;
6232 op_ret = -EINVAL;
6233 }
6234
6235 op_ret = store->check_quota(bowner.get_id(), binfo.bucket,
6236 user_quota, bucket_quota, size);
6237 if (op_ret < 0) {
6238 ldout(s->cct, 20) << "bulk upload: quota exceeded for path=" << path
6239 << dendl;
6240 return op_ret;
6241 }
6242
31f18b77
FG
6243 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
6244 if (op_ret < 0) {
6245 return op_ret;
6246 }
6247
7c673cae
FG
6248 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
6249 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
6250 hash.Final(m);
6251 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
6252
6253 /* Create metadata: ETAG. */
6254 std::map<std::string, ceph::bufferlist> attrs;
6255 std::string etag = calc_md5;
6256 ceph::bufferlist etag_bl;
6257 etag_bl.append(etag.c_str(), etag.size() + 1);
6258 attrs.emplace(RGW_ATTR_ETAG, std::move(etag_bl));
6259
6260 /* Create metadata: ACLs. */
6261 RGWAccessControlPolicy policy;
6262 policy.create_default(s->user->user_id, s->user->display_name);
6263 ceph::bufferlist aclbl;
6264 policy.encode(aclbl);
6265 attrs.emplace(RGW_ATTR_ACL, std::move(aclbl));
6266
6267 /* Create metadata: compression info. */
6268 if (compressor && compressor->is_compressed()) {
6269 ceph::bufferlist tmp;
6270 RGWCompressionInfo cs_info;
6271 cs_info.compression_type = plugin->get_type_name();
6272 cs_info.orig_size = s->obj_size;
6273 cs_info.blocks = std::move(compressor->get_compression_blocks());
6274 ::encode(cs_info, tmp);
6275 attrs.emplace(RGW_ATTR_COMPRESSION, std::move(tmp));
6276 }
6277
6278 /* Complete the transaction. */
6279 op_ret = processor.complete(size, etag, nullptr, ceph::real_time(), attrs,
6280 ceph::real_time() /* delete_at */);
6281 if (op_ret < 0) {
6282 ldout(s->cct, 20) << "bulk upload: processor::complete returned op_ret="
6283 << op_ret << dendl;
6284 }
6285
6286 return op_ret;
6287}
6288
6289void RGWBulkUploadOp::execute()
6290{
6291 ceph::bufferlist buffer(64 * 1024);
6292
6293 ldout(s->cct, 20) << "bulk upload: start" << dendl;
6294
6295 /* Create an instance of stream-abstracting class. Having this indirection
6296 * allows for easy introduction of decompressors like gzip and bzip2. */
6297 auto stream = create_stream();
6298 if (! stream) {
6299 return;
6300 }
6301
6302 /* Handling the $UPLOAD_PATH accordingly to the Swift's Bulk middleware. See:
6303 * https://github.com/openstack/swift/blob/2.13.0/swift/common/middleware/bulk.py#L31-L41 */
6304 std::string bucket_path, file_prefix;
6305 std::tie(bucket_path, file_prefix) = handle_upload_path(s);
6306
6307 auto status = rgw::tar::StatusIndicator::create();
6308 do {
6309 op_ret = stream->get_exactly(rgw::tar::BLOCK_SIZE, buffer);
6310 if (op_ret < 0) {
6311 ldout(s->cct, 2) << "bulk upload: cannot read header" << dendl;
6312 return;
6313 }
6314
6315 /* We need to re-interpret the buffer as a TAR block. Exactly two blocks
6316 * must be tracked to detect out end-of-archive. It occurs when both of
6317 * them are empty (zeroed). Tracing this particular inter-block dependency
6318 * is responsibility of the rgw::tar::StatusIndicator class. */
6319 boost::optional<rgw::tar::HeaderView> header;
6320 std::tie(status, header) = rgw::tar::interpret_block(status, buffer);
6321
6322 if (! status.empty() && header) {
6323 /* This specific block isn't empty (entirely zeroed), so we can parse
6324 * it as a TAR header and dispatch. At the moment we do support only
6325 * regular files and directories. Everything else (symlinks, devices)
6326 * will be ignored but won't cease the whole upload. */
6327 switch (header->get_filetype()) {
6328 case rgw::tar::FileType::NORMAL_FILE: {
6329 ldout(s->cct, 2) << "bulk upload: handling regular file" << dendl;
6330
6331 boost::string_ref filename = bucket_path.empty() ? header->get_filename() : \
6332 file_prefix + header->get_filename().to_string();
6333 auto body = AlignedStreamGetter(0, header->get_filesize(),
6334 rgw::tar::BLOCK_SIZE, *stream);
6335 op_ret = handle_file(filename,
6336 header->get_filesize(),
6337 body);
6338 if (! op_ret) {
6339 /* Only regular files counts. */
6340 num_created++;
6341 } else {
6342 failures.emplace_back(op_ret, filename.to_string());
6343 }
6344 break;
6345 }
6346 case rgw::tar::FileType::DIRECTORY: {
6347 ldout(s->cct, 2) << "bulk upload: handling regular directory" << dendl;
6348
6349 boost::string_ref dirname = bucket_path.empty() ? header->get_filename() : bucket_path;
6350 op_ret = handle_dir(dirname);
6351 if (op_ret < 0 && op_ret != -ERR_BUCKET_EXISTS) {
6352 failures.emplace_back(op_ret, dirname.to_string());
6353 }
6354 break;
6355 }
6356 default: {
6357 /* Not recognized. Skip. */
6358 op_ret = 0;
6359 break;
6360 }
6361 }
6362
6363 /* In case of any problems with sub-request authorization Swift simply
6364 * terminates whole upload immediately. */
6365 if (boost::algorithm::contains(std::initializer_list<int>{ op_ret },
6366 terminal_errors)) {
6367 ldout(s->cct, 2) << "bulk upload: terminating due to ret=" << op_ret
6368 << dendl;
6369 break;
6370 }
6371 } else {
6372 ldout(s->cct, 2) << "bulk upload: an empty block" << dendl;
6373 op_ret = 0;
6374 }
6375
6376 buffer.clear();
6377 } while (! status.eof());
6378
6379 return;
6380}
6381
6382RGWBulkUploadOp::AlignedStreamGetter::~AlignedStreamGetter()
6383{
6384 const size_t aligned_legnth = length + (-length % alignment);
6385 ceph::bufferlist junk;
6386
6387 DecoratedStreamGetter::get_exactly(aligned_legnth - position, junk);
6388}
6389
6390ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_at_most(const size_t want,
6391 ceph::bufferlist& dst)
6392{
6393 const size_t max_to_read = std::min(want, length - position);
6394 const auto len = DecoratedStreamGetter::get_at_most(max_to_read, dst);
6395 if (len > 0) {
6396 position += len;
6397 }
6398 return len;
6399}
6400
6401ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_exactly(const size_t want,
6402 ceph::bufferlist& dst)
6403{
6404 const auto len = DecoratedStreamGetter::get_exactly(want, dst);
6405 if (len > 0) {
6406 position += len;
6407 }
6408 return len;
6409}
6410
6411int RGWSetAttrs::verify_permission()
6412{
31f18b77
FG
6413 // This looks to be part of the RGW-NFS machinery and has no S3 or
6414 // Swift equivalent.
7c673cae
FG
6415 bool perm;
6416 if (!s->object.empty()) {
31f18b77 6417 perm = verify_object_permission_no_policy(s, RGW_PERM_WRITE);
7c673cae 6418 } else {
31f18b77 6419 perm = verify_bucket_permission_no_policy(s, RGW_PERM_WRITE);
7c673cae
FG
6420 }
6421 if (!perm)
6422 return -EACCES;
6423
6424 return 0;
6425}
6426
6427void RGWSetAttrs::pre_exec()
6428{
6429 rgw_bucket_object_pre_exec(s);
6430}
6431
6432void RGWSetAttrs::execute()
6433{
6434 op_ret = get_params();
6435 if (op_ret < 0)
6436 return;
6437
6438 rgw_obj obj(s->bucket, s->object);
6439
7c673cae 6440 if (!s->object.empty()) {
31f18b77 6441 store->set_atomic(s->obj_ctx, obj);
7c673cae
FG
6442 op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, nullptr);
6443 } else {
6444 for (auto& iter : attrs) {
6445 s->bucket_attrs[iter.first] = std::move(iter.second);
6446 }
6447 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, s->bucket_attrs,
6448 &s->bucket_info.objv_tracker);
6449 }
6450}
6451
6452void RGWGetObjLayout::pre_exec()
6453{
6454 rgw_bucket_object_pre_exec(s);
6455}
6456
6457void RGWGetObjLayout::execute()
6458{
6459 rgw_obj obj(s->bucket, s->object);
6460 RGWRados::Object target(store,
6461 s->bucket_info,
6462 *static_cast<RGWObjectCtx *>(s->obj_ctx),
6463 rgw_obj(s->bucket, s->object));
6464 RGWRados::Object::Read stat_op(&target);
6465
6466 op_ret = stat_op.prepare();
6467 if (op_ret < 0) {
6468 return;
6469 }
6470
6471 head_obj = stat_op.state.head_obj;
6472
6473 op_ret = target.get_manifest(&manifest);
6474}
6475
6476
31f18b77
FG
6477int RGWConfigBucketMetaSearch::verify_permission()
6478{
6479 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
6480 return -EACCES;
6481 }
6482
6483 return 0;
6484}
6485
6486void RGWConfigBucketMetaSearch::pre_exec()
6487{
6488 rgw_bucket_object_pre_exec(s);
6489}
6490
6491void RGWConfigBucketMetaSearch::execute()
6492{
6493 op_ret = get_params();
6494 if (op_ret < 0) {
6495 ldout(s->cct, 20) << "NOTICE: get_params() returned ret=" << op_ret << dendl;
6496 return;
6497 }
6498
6499 s->bucket_info.mdsearch_config = mdsearch_config;
6500
6501 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
6502 if (op_ret < 0) {
6503 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
6504 return;
6505 }
6506}
6507
6508int RGWGetBucketMetaSearch::verify_permission()
6509{
6510 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
6511 return -EACCES;
6512 }
6513
6514 return 0;
6515}
6516
6517void RGWGetBucketMetaSearch::pre_exec()
6518{
6519 rgw_bucket_object_pre_exec(s);
6520}
6521
6522int RGWDelBucketMetaSearch::verify_permission()
6523{
6524 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
6525 return -EACCES;
6526 }
6527
6528 return 0;
6529}
6530
6531void RGWDelBucketMetaSearch::pre_exec()
6532{
6533 rgw_bucket_object_pre_exec(s);
6534}
6535
6536void RGWDelBucketMetaSearch::execute()
6537{
6538 s->bucket_info.mdsearch_config.clear();
6539
6540 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
6541 if (op_ret < 0) {
6542 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
6543 return;
6544 }
6545}
6546
6547
7c673cae
FG
6548RGWHandler::~RGWHandler()
6549{
6550}
6551
6552int RGWHandler::init(RGWRados *_store,
6553 struct req_state *_s,
6554 rgw::io::BasicClient *cio)
6555{
6556 store = _store;
6557 s = _s;
6558
6559 return 0;
6560}
6561
6562int RGWHandler::do_init_permissions()
6563{
6564 int ret = rgw_build_bucket_policies(store, s);
31f18b77 6565 s->env = rgw_build_iam_environment(store, s);
7c673cae
FG
6566
6567 if (ret < 0) {
6568 ldout(s->cct, 10) << "read_permissions on " << s->bucket << " ret=" << ret << dendl;
6569 if (ret == -ENODATA)
6570 ret = -EACCES;
6571 }
6572
6573 return ret;
6574}
6575
6576int RGWHandler::do_read_permissions(RGWOp *op, bool only_bucket)
6577{
6578 if (only_bucket) {
6579 /* already read bucket info */
6580 return 0;
6581 }
6582 int ret = rgw_build_object_policies(store, s, op->prefetch_data());
6583
6584 if (ret < 0) {
6585 ldout(s->cct, 10) << "read_permissions on " << s->bucket << ":"
6586 << s->object << " only_bucket=" << only_bucket
6587 << " ret=" << ret << dendl;
6588 if (ret == -ENODATA)
6589 ret = -EACCES;
6590 }
6591
6592 return ret;
6593}
6594
6595int RGWOp::error_handler(int err_no, string *error_content) {
6596 return dialect_handler->error_handler(err_no, error_content);
6597}
6598
6599int RGWHandler::error_handler(int err_no, string *error_content) {
6600 // This is the do-nothing error handler
6601 return err_no;
6602}
31f18b77
FG
6603
6604
6605void RGWPutBucketPolicy::send_response()
6606{
6607 if (op_ret) {
6608 set_req_state_err(s, op_ret);
6609 }
6610 dump_errno(s);
6611 end_header(s);
6612}
6613
6614int RGWPutBucketPolicy::verify_permission()
6615{
6616 if (!verify_bucket_permission(s, rgw::IAM::s3PutBucketPolicy)) {
6617 return -EACCES;
6618 }
6619
6620 return 0;
6621}
6622
6623int RGWPutBucketPolicy::get_params()
6624{
6625 const auto max_size = s->cct->_conf->rgw_max_put_param_size;
6626 // At some point when I have more time I want to make a version of
6627 // rgw_rest_read_all_input that doesn't use malloc.
6628 op_ret = rgw_rest_read_all_input(s, &data, &len, max_size, false);
6629 // And throws exceptions.
6630 return op_ret;
6631}
6632
6633void RGWPutBucketPolicy::execute()
6634{
6635 op_ret = get_params();
6636 if (op_ret < 0) {
6637 return;
6638 }
6639
6640 bufferlist in_data = bufferlist::static_from_mem(data, len);
6641
6642 if (!store->is_meta_master()) {
6643 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
6644 if (op_ret < 0) {
6645 ldout(s->cct, 20) << "forward_request_to_master returned ret=" << op_ret << dendl;
6646 return;
6647 }
6648 }
6649
6650 try {
6651 Policy p(s->cct, s->bucket_tenant, in_data);
6652 auto attrs = s->bucket_attrs;
6653 attrs[RGW_ATTR_IAM_POLICY].clear();
6654 attrs[RGW_ATTR_IAM_POLICY].append(p.text);
6655 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
6656 &s->bucket_info.objv_tracker);
6657 if (op_ret == -ECANCELED) {
6658 op_ret = 0; /* lost a race, but it's ok because policies are immutable */
6659 }
6660 } catch (rgw::IAM::PolicyParseException& e) {
6661 ldout(s->cct, 20) << "failed to parse policy: " << e.what() << dendl;
6662 op_ret = -EINVAL;
6663 }
6664}
6665
6666void RGWGetBucketPolicy::send_response()
6667{
6668 if (op_ret) {
6669 set_req_state_err(s, op_ret);
6670 }
6671 dump_errno(s);
6672 end_header(s, this, "application/json");
6673 dump_body(s, policy);
6674}
6675
6676int RGWGetBucketPolicy::verify_permission()
6677{
6678 if (!verify_bucket_permission(s, rgw::IAM::s3GetBucketPolicy)) {
6679 return -EACCES;
6680 }
6681
6682 return 0;
6683}
6684
6685void RGWGetBucketPolicy::execute()
6686{
6687 auto attrs = s->bucket_attrs;
6688 map<string, bufferlist>::iterator aiter = attrs.find(RGW_ATTR_IAM_POLICY);
6689 if (aiter == attrs.end()) {
6690 ldout(s->cct, 0) << __func__ << " can't find bucket IAM POLICY attr"
6691 << " bucket_name = " << s->bucket_name << dendl;
6692 op_ret = -ERR_NO_SUCH_BUCKET_POLICY;
6693 s->err.message = "The bucket policy does not exist";
6694 return;
6695 } else {
6696 policy = attrs[RGW_ATTR_IAM_POLICY];
6697
6698 if (policy.length() == 0) {
6699 ldout(s->cct, 10) << "The bucket policy does not exist, bucket: " << s->bucket_name << dendl;
6700 op_ret = -ERR_NO_SUCH_BUCKET_POLICY;
6701 s->err.message = "The bucket policy does not exist";
6702 return;
6703 }
6704 }
6705}
6706
6707void RGWDeleteBucketPolicy::send_response()
6708{
6709 if (op_ret) {
6710 set_req_state_err(s, op_ret);
6711 }
6712 dump_errno(s);
6713 end_header(s);
6714}
6715
6716int RGWDeleteBucketPolicy::verify_permission()
6717{
6718 if (!verify_bucket_permission(s, rgw::IAM::s3DeleteBucketPolicy)) {
6719 return -EACCES;
6720 }
6721
6722 return 0;
6723}
6724
6725void RGWDeleteBucketPolicy::execute()
6726{
6727 auto attrs = s->bucket_attrs;
6728 attrs.erase(RGW_ATTR_IAM_POLICY);
6729 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
6730 &s->bucket_info.objv_tracker);
6731 if (op_ret == -ECANCELED) {
6732 op_ret = 0; /* lost a race, but it's ok because policies are immutable */
6733 }
6734}