]> git.proxmox.com Git - ceph.git/blob - ceph/src/rgw/rgw_op.cc
import ceph 12.2.12
[ceph.git] / ceph / src / rgw / rgw_op.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include <errno.h>
5 #include <stdlib.h>
6 #include <system_error>
7 #include <unistd.h>
8
9 #include <sstream>
10
11 #include <boost/algorithm/string/predicate.hpp>
12 #include <boost/bind.hpp>
13 #include <boost/optional.hpp>
14 #include <boost/utility/in_place_factory.hpp>
15 #include <boost/utility/string_view.hpp>
16
17 #include "common/Clock.h"
18 #include "common/armor.h"
19 #include "common/backport14.h"
20 #include "common/errno.h"
21 #include "common/mime.h"
22 #include "common/utf8.h"
23 #include "common/ceph_json.h"
24
25 #include "rgw_rados.h"
26 #include "rgw_op.h"
27 #include "rgw_rest.h"
28 #include "rgw_acl.h"
29 #include "rgw_acl_s3.h"
30 #include "rgw_acl_swift.h"
31 #include "rgw_user.h"
32 #include "rgw_bucket.h"
33 #include "rgw_log.h"
34 #include "rgw_multi.h"
35 #include "rgw_multi_del.h"
36 #include "rgw_cors.h"
37 #include "rgw_cors_s3.h"
38 #include "rgw_rest_conn.h"
39 #include "rgw_rest_s3.h"
40 #include "rgw_tar.h"
41 #include "rgw_client_io.h"
42 #include "rgw_compression.h"
43 #include "rgw_role.h"
44 #include "rgw_tag_s3.h"
45 #include "cls/lock/cls_lock_client.h"
46 #include "cls/rgw/cls_rgw_client.h"
47
48
49 #include "include/assert.h"
50
51 #include "compressor/Compressor.h"
52
53 #include "rgw_acl_swift.h"
54
55 #define dout_context g_ceph_context
56 #define dout_subsys ceph_subsys_rgw
57
58 using namespace std;
59 using namespace librados;
60 using ceph::crypto::MD5;
61 using boost::optional;
62 using boost::none;
63
64 using rgw::IAM::ARN;
65 using rgw::IAM::Effect;
66 using rgw::IAM::Policy;
67
68 using rgw::IAM::Policy;
69
70 static string mp_ns = RGW_OBJ_NS_MULTIPART;
71 static string shadow_ns = RGW_OBJ_NS_SHADOW;
72
73 static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name);
74 static int forward_request_to_master(struct req_state *s, obj_version *objv, RGWRados *store,
75 bufferlist& in_data, JSONParser *jp, req_info *forward_info = nullptr);
76
77 static MultipartMetaFilter mp_filter;
78
79 int RGWGetObj::parse_range(void)
80 {
81 int r = -ERANGE;
82 string rs(range_str);
83 string ofs_str;
84 string end_str;
85
86 ignore_invalid_range = s->cct->_conf->rgw_ignore_get_invalid_range;
87 partial_content = false;
88
89 size_t pos = rs.find("bytes=");
90 if (pos == string::npos) {
91 pos = 0;
92 while (isspace(rs[pos]))
93 pos++;
94 int end = pos;
95 while (isalpha(rs[end]))
96 end++;
97 if (strncasecmp(rs.c_str(), "bytes", end - pos) != 0)
98 return 0;
99 while (isspace(rs[end]))
100 end++;
101 if (rs[end] != '=')
102 return 0;
103 rs = rs.substr(end + 1);
104 } else {
105 rs = rs.substr(pos + 6); /* size of("bytes=") */
106 }
107 pos = rs.find('-');
108 if (pos == string::npos)
109 goto done;
110
111 partial_content = true;
112
113 ofs_str = rs.substr(0, pos);
114 end_str = rs.substr(pos + 1);
115 if (end_str.length()) {
116 end = atoll(end_str.c_str());
117 if (end < 0)
118 goto done;
119 }
120
121 if (ofs_str.length()) {
122 ofs = atoll(ofs_str.c_str());
123 } else { // RFC2616 suffix-byte-range-spec
124 ofs = -end;
125 end = -1;
126 }
127
128 if (end >= 0 && end < ofs)
129 goto done;
130
131 range_parsed = true;
132 return 0;
133
134 done:
135 if (ignore_invalid_range) {
136 partial_content = false;
137 ofs = 0;
138 end = -1;
139 range_parsed = false; // allow retry
140 r = 0;
141 }
142
143 return r;
144 }
145
146 static int decode_policy(CephContext *cct,
147 bufferlist& bl,
148 RGWAccessControlPolicy *policy)
149 {
150 bufferlist::iterator iter = bl.begin();
151 try {
152 policy->decode(iter);
153 } catch (buffer::error& err) {
154 ldout(cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
155 return -EIO;
156 }
157 if (cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
158 RGWAccessControlPolicy_S3 *s3policy = static_cast<RGWAccessControlPolicy_S3 *>(policy);
159 ldout(cct, 15) << __func__ << " Read AccessControlPolicy";
160 s3policy->to_xml(*_dout);
161 *_dout << dendl;
162 }
163 return 0;
164 }
165
166
167 static int get_user_policy_from_attr(CephContext * const cct,
168 RGWRados * const store,
169 map<string, bufferlist>& attrs,
170 RGWAccessControlPolicy& policy /* out */)
171 {
172 auto aiter = attrs.find(RGW_ATTR_ACL);
173 if (aiter != attrs.end()) {
174 int ret = decode_policy(cct, aiter->second, &policy);
175 if (ret < 0) {
176 return ret;
177 }
178 } else {
179 return -ENOENT;
180 }
181
182 return 0;
183 }
184
185 static int get_bucket_instance_policy_from_attr(CephContext *cct,
186 RGWRados *store,
187 RGWBucketInfo& bucket_info,
188 map<string, bufferlist>& bucket_attrs,
189 RGWAccessControlPolicy *policy,
190 rgw_raw_obj& obj)
191 {
192 map<string, bufferlist>::iterator aiter = bucket_attrs.find(RGW_ATTR_ACL);
193
194 if (aiter != bucket_attrs.end()) {
195 int ret = decode_policy(cct, aiter->second, policy);
196 if (ret < 0)
197 return ret;
198 } else {
199 ldout(cct, 0) << "WARNING: couldn't find acl header for bucket, generating default" << dendl;
200 RGWUserInfo uinfo;
201 /* object exists, but policy is broken */
202 int r = rgw_get_user_info_by_uid(store, bucket_info.owner, uinfo);
203 if (r < 0)
204 return r;
205
206 policy->create_default(bucket_info.owner, uinfo.display_name);
207 }
208 return 0;
209 }
210
211 static int get_obj_policy_from_attr(CephContext *cct,
212 RGWRados *store,
213 RGWObjectCtx& obj_ctx,
214 RGWBucketInfo& bucket_info,
215 map<string, bufferlist>& bucket_attrs,
216 RGWAccessControlPolicy *policy,
217 rgw_obj& obj)
218 {
219 bufferlist bl;
220 int ret = 0;
221
222 RGWRados::Object op_target(store, bucket_info, obj_ctx, obj);
223 RGWRados::Object::Read rop(&op_target);
224
225 ret = rop.get_attr(RGW_ATTR_ACL, bl);
226 if (ret >= 0) {
227 ret = decode_policy(cct, bl, policy);
228 if (ret < 0)
229 return ret;
230 } else if (ret == -ENODATA) {
231 /* object exists, but policy is broken */
232 ldout(cct, 0) << "WARNING: couldn't find acl header for object, generating default" << dendl;
233 RGWUserInfo uinfo;
234 ret = rgw_get_user_info_by_uid(store, bucket_info.owner, uinfo);
235 if (ret < 0)
236 return ret;
237
238 policy->create_default(bucket_info.owner, uinfo.display_name);
239 }
240 return ret;
241 }
242
243
244 /**
245 * Get the AccessControlPolicy for an object off of disk.
246 * policy: must point to a valid RGWACL, and will be filled upon return.
247 * bucket: name of the bucket containing the object.
248 * object: name of the object to get the ACL for.
249 * Returns: 0 on success, -ERR# otherwise.
250 */
251 static int get_bucket_policy_from_attr(CephContext *cct,
252 RGWRados *store,
253 RGWBucketInfo& bucket_info,
254 map<string, bufferlist>& bucket_attrs,
255 RGWAccessControlPolicy *policy)
256 {
257 rgw_raw_obj instance_obj;
258 store->get_bucket_instance_obj(bucket_info.bucket, instance_obj);
259 return get_bucket_instance_policy_from_attr(cct, store, bucket_info, bucket_attrs,
260 policy, instance_obj);
261 }
262
263 static optional<Policy> get_iam_policy_from_attr(CephContext* cct,
264 RGWRados* store,
265 map<string, bufferlist>& attrs,
266 const string& tenant) {
267 auto i = attrs.find(RGW_ATTR_IAM_POLICY);
268 if (i != attrs.end()) {
269 return Policy(cct, tenant, i->second);
270 } else {
271 return none;
272 }
273 }
274
275 static int get_obj_attrs(RGWRados *store, struct req_state *s, rgw_obj& obj, map<string, bufferlist>& attrs)
276 {
277 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
278 RGWRados::Object::Read read_op(&op_target);
279
280 read_op.params.attrs = &attrs;
281
282 return read_op.prepare();
283 }
284
285 static int modify_obj_attr(RGWRados *store, struct req_state *s, rgw_obj& obj, const char* attr_name, bufferlist& attr_val)
286 {
287 map<string, bufferlist> attrs;
288 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
289 RGWRados::Object::Read read_op(&op_target);
290
291 read_op.params.attrs = &attrs;
292
293 int r = read_op.prepare();
294 if (r < 0) {
295 return r;
296 }
297 store->set_atomic(s->obj_ctx, read_op.state.obj);
298 attrs[attr_name] = attr_val;
299 return store->set_attrs(s->obj_ctx, s->bucket_info, read_op.state.obj, attrs, NULL);
300 }
301
302 static int get_system_obj_attrs(RGWRados *store, struct req_state *s, rgw_raw_obj& obj, map<string, bufferlist>& attrs,
303 uint64_t *obj_size, RGWObjVersionTracker *objv_tracker)
304 {
305 RGWRados::SystemObject src(store, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
306 RGWRados::SystemObject::Read rop(&src);
307
308 rop.stat_params.attrs = &attrs;
309 rop.stat_params.obj_size = obj_size;
310
311 int ret = rop.stat(objv_tracker);
312 return ret;
313 }
314
315 static int read_bucket_policy(RGWRados *store,
316 struct req_state *s,
317 RGWBucketInfo& bucket_info,
318 map<string, bufferlist>& bucket_attrs,
319 RGWAccessControlPolicy *policy,
320 rgw_bucket& bucket)
321 {
322 if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) {
323 ldout(s->cct, 0) << "NOTICE: bucket " << bucket_info.bucket.name << " is suspended" << dendl;
324 return -ERR_USER_SUSPENDED;
325 }
326
327 if (bucket.name.empty()) {
328 return 0;
329 }
330
331 int ret = get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, policy);
332 if (ret == -ENOENT) {
333 ret = -ERR_NO_SUCH_BUCKET;
334 }
335
336 return ret;
337 }
338
339 static int read_obj_policy(RGWRados *store,
340 struct req_state *s,
341 RGWBucketInfo& bucket_info,
342 map<string, bufferlist>& bucket_attrs,
343 RGWAccessControlPolicy* acl,
344 optional<Policy>& policy,
345 rgw_bucket& bucket,
346 rgw_obj_key& object)
347 {
348 string upload_id;
349 upload_id = s->info.args.get("uploadId");
350 rgw_obj obj;
351
352 if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) {
353 ldout(s->cct, 0) << "NOTICE: bucket " << bucket_info.bucket.name << " is suspended" << dendl;
354 return -ERR_USER_SUSPENDED;
355 }
356
357 if (!upload_id.empty()) {
358 /* multipart upload */
359 RGWMPObj mp(object.name, upload_id);
360 string oid = mp.get_meta();
361 obj.init_ns(bucket, oid, mp_ns);
362 obj.set_in_extra_data(true);
363 } else {
364 obj = rgw_obj(bucket, object);
365 }
366 policy = get_iam_policy_from_attr(s->cct, store, bucket_attrs, bucket.tenant);
367
368 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
369 int ret = get_obj_policy_from_attr(s->cct, store, *obj_ctx,
370 bucket_info, bucket_attrs, acl, obj);
371 if (ret == -ENOENT) {
372 /* object does not exist checking the bucket's ACL to make sure
373 that we send a proper error code */
374 RGWAccessControlPolicy bucket_policy(s->cct);
375 ret = get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, &bucket_policy);
376 if (ret < 0) {
377 return ret;
378 }
379
380 const rgw_user& bucket_owner = bucket_policy.get_owner().get_id();
381 if (bucket_owner.compare(s->user->user_id) != 0 &&
382 ! s->auth.identity->is_admin_of(bucket_owner) &&
383 ! bucket_policy.verify_permission(*s->auth.identity, s->perm_mask,
384 RGW_PERM_READ)) {
385 ret = -EACCES;
386 } else {
387 ret = -ENOENT;
388 }
389 }
390
391 return ret;
392 }
393
394 /**
395 * Get the AccessControlPolicy for an user, bucket or object off of disk.
396 * s: The req_state to draw information from.
397 * only_bucket: If true, reads the user and bucket ACLs rather than the object ACL.
398 * Returns: 0 on success, -ERR# otherwise.
399 */
400 int rgw_build_bucket_policies(RGWRados* store, struct req_state* s)
401 {
402 int ret = 0;
403 rgw_obj_key obj;
404 RGWUserInfo bucket_owner_info;
405 RGWObjectCtx obj_ctx(store);
406
407 string bi = s->info.args.get(RGW_SYS_PARAM_PREFIX "bucket-instance");
408 if (!bi.empty()) {
409 ret = rgw_bucket_parse_bucket_instance(bi, &s->bucket_instance_id, &s->bucket_instance_shard_id);
410 if (ret < 0) {
411 return ret;
412 }
413 }
414
415 if(s->dialect.compare("s3") == 0) {
416 s->bucket_acl = ceph::make_unique<RGWAccessControlPolicy_S3>(s->cct);
417 } else if(s->dialect.compare("swift") == 0) {
418 /* We aren't allocating the account policy for those operations using
419 * the Swift's infrastructure that don't really need req_state::user.
420 * Typical example here is the implementation of /info. */
421 if (!s->user->user_id.empty()) {
422 s->user_acl = ceph::make_unique<RGWAccessControlPolicy_SWIFTAcct>(s->cct);
423 }
424 s->bucket_acl = ceph::make_unique<RGWAccessControlPolicy_SWIFT>(s->cct);
425 } else {
426 s->bucket_acl = ceph::make_unique<RGWAccessControlPolicy>(s->cct);
427 }
428
429 /* check if copy source is within the current domain */
430 if (!s->src_bucket_name.empty()) {
431 RGWBucketInfo source_info;
432
433 if (s->bucket_instance_id.empty()) {
434 ret = store->get_bucket_info(obj_ctx, s->src_tenant_name, s->src_bucket_name, source_info, NULL);
435 } else {
436 ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, source_info, NULL, NULL);
437 }
438 if (ret == 0) {
439 string& zonegroup = source_info.zonegroup;
440 s->local_source = store->get_zonegroup().equals(zonegroup);
441 }
442 }
443
444 struct {
445 rgw_user uid;
446 std::string display_name;
447 } acct_acl_user = {
448 s->user->user_id,
449 s->user->display_name,
450 };
451
452 if (!s->bucket_name.empty()) {
453 s->bucket_exists = true;
454 if (s->bucket_instance_id.empty()) {
455 ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name, s->bucket_info, NULL, &s->bucket_attrs);
456 } else {
457 ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, s->bucket_info, NULL, &s->bucket_attrs);
458 }
459 if (ret < 0) {
460 if (ret != -ENOENT) {
461 string bucket_log;
462 rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_log);
463 ldout(s->cct, 0) << "NOTICE: couldn't get bucket from bucket_name (name=" << bucket_log << ")" << dendl;
464 return ret;
465 }
466 s->bucket_exists = false;
467 }
468 s->bucket = s->bucket_info.bucket;
469
470 if (s->bucket_exists) {
471 ret = read_bucket_policy(store, s, s->bucket_info, s->bucket_attrs,
472 s->bucket_acl.get(), s->bucket);
473 acct_acl_user = {
474 s->bucket_info.owner,
475 s->bucket_acl->get_owner().get_display_name(),
476 };
477 } else {
478 s->bucket_acl->create_default(s->user->user_id, s->user->display_name);
479 ret = -ERR_NO_SUCH_BUCKET;
480 }
481
482 s->bucket_owner = s->bucket_acl->get_owner();
483
484 RGWZoneGroup zonegroup;
485 int r = store->get_zonegroup(s->bucket_info.zonegroup, zonegroup);
486 if (!r) {
487 if (!zonegroup.endpoints.empty()) {
488 s->zonegroup_endpoint = zonegroup.endpoints.front();
489 } else {
490 // use zonegroup's master zone endpoints
491 auto z = zonegroup.zones.find(zonegroup.master_zone);
492 if (z != zonegroup.zones.end() && !z->second.endpoints.empty()) {
493 s->zonegroup_endpoint = z->second.endpoints.front();
494 }
495 }
496 s->zonegroup_name = zonegroup.get_name();
497 }
498 if (r < 0 && ret == 0) {
499 ret = r;
500 }
501
502 if (s->bucket_exists && !store->get_zonegroup().equals(s->bucket_info.zonegroup)) {
503 ldout(s->cct, 0) << "NOTICE: request for data in a different zonegroup (" << s->bucket_info.zonegroup << " != " << store->get_zonegroup().get_id() << ")" << dendl;
504 /* we now need to make sure that the operation actually requires copy source, that is
505 * it's a copy operation
506 */
507 if (store->get_zonegroup().is_master_zonegroup() && s->system_request) {
508 /*If this is the master, don't redirect*/
509 } else if (s->op_type == RGW_OP_GET_BUCKET_LOCATION ) {
510 /* If op is get bucket location, don't redirect */
511 } else if (!s->local_source ||
512 (s->op != OP_PUT && s->op != OP_COPY) ||
513 s->object.empty()) {
514 return -ERR_PERMANENT_REDIRECT;
515 }
516 }
517 }
518
519 /* handle user ACL only for those APIs which support it */
520 if (s->user_acl) {
521 map<string, bufferlist> uattrs;
522
523 ret = rgw_get_user_attrs_by_uid(store, acct_acl_user.uid, uattrs);
524 if (!ret) {
525 ret = get_user_policy_from_attr(s->cct, store, uattrs, *s->user_acl);
526 }
527 if (-ENOENT == ret) {
528 /* In already existing clusters users won't have ACL. In such case
529 * assuming that only account owner has the rights seems to be
530 * reasonable. That allows to have only one verification logic.
531 * NOTE: there is small compatibility kludge for global, empty tenant:
532 * 1. if we try to reach an existing bucket, its owner is considered
533 * as account owner.
534 * 2. otherwise account owner is identity stored in s->user->user_id. */
535 s->user_acl->create_default(acct_acl_user.uid,
536 acct_acl_user.display_name);
537 ret = 0;
538 } else {
539 ldout(s->cct, 0) << "NOTICE: couldn't get user attrs for handling ACL (user_id="
540 << s->user->user_id
541 << ", ret="
542 << ret
543 << ")" << dendl;
544 return ret;
545 }
546 }
547
548 try {
549 s->iam_policy = get_iam_policy_from_attr(s->cct, store, s->bucket_attrs,
550 s->bucket_tenant);
551 } catch (const std::exception& e) {
552 // Really this is a can't happen condition. We parse the policy
553 // when it's given to us, so perhaps we should abort or otherwise
554 // raise bloody murder.
555 lderr(s->cct) << "Error reading IAM Policy: " << e.what() << dendl;
556 ret = -EACCES;
557 }
558
559 return ret;
560 }
561
562 /**
563 * Get the AccessControlPolicy for a bucket or object off of disk.
564 * s: The req_state to draw information from.
565 * only_bucket: If true, reads the bucket ACL rather than the object ACL.
566 * Returns: 0 on success, -ERR# otherwise.
567 */
568 int rgw_build_object_policies(RGWRados *store, struct req_state *s,
569 bool prefetch_data)
570 {
571 int ret = 0;
572
573 if (!s->object.empty()) {
574 if (!s->bucket_exists) {
575 return -ERR_NO_SUCH_BUCKET;
576 }
577 s->object_acl = ceph::make_unique<RGWAccessControlPolicy>(s->cct);
578
579 rgw_obj obj(s->bucket, s->object);
580
581 store->set_atomic(s->obj_ctx, obj);
582 if (prefetch_data) {
583 store->set_prefetch_data(s->obj_ctx, obj);
584 }
585 ret = read_obj_policy(store, s, s->bucket_info, s->bucket_attrs,
586 s->object_acl.get(), s->iam_policy, s->bucket,
587 s->object);
588 }
589
590 return ret;
591 }
592
593 rgw::IAM::Environment rgw_build_iam_environment(RGWRados* store,
594 struct req_state* s)
595 {
596 rgw::IAM::Environment e;
597 const auto& m = s->info.env->get_map();
598 auto t = ceph::real_clock::now();
599 e.emplace("aws:CurrentTime", std::to_string(ceph::real_clock::to_time_t(t)));
600 e.emplace("aws:EpochTime", ceph::to_iso_8601(t));
601 // TODO: This is fine for now, but once we have STS we'll need to
602 // look and see. Also this won't work with the IdentityApplier
603 // model, since we need to know the actual credential.
604 e.emplace("aws:PrincipalType", "User");
605
606 auto i = m.find("HTTP_REFERER");
607 if (i != m.end()) {
608 e.emplace("aws:Referer", i->second);
609 }
610
611 if (rgw_transport_is_secure(s->cct, *s->info.env)) {
612 e.emplace("aws:SecureTransport", "true");
613 }
614
615 const auto remote_addr_param = s->cct->_conf->rgw_remote_addr_param;
616 if (remote_addr_param.length()) {
617 i = m.find(remote_addr_param);
618 } else {
619 i = m.find("REMOTE_ADDR");
620 }
621 if (i != m.end()) {
622 const string* ip = &(i->second);
623 string temp;
624 if (remote_addr_param == "HTTP_X_FORWARDED_FOR") {
625 const auto comma = ip->find(',');
626 if (comma != string::npos) {
627 temp.assign(*ip, 0, comma);
628 ip = &temp;
629 }
630 }
631 e.emplace("aws:SourceIp", *ip);
632 }
633
634 i = m.find("HTTP_USER_AGENT"); {
635 if (i != m.end())
636 e.emplace("aws:UserAgent", i->second);
637 }
638
639 if (s->user) {
640 // What to do about aws::userid? One can have multiple access
641 // keys so that isn't really suitable. Do we have a durable
642 // identifier that can persist through name changes?
643 e.emplace("aws:username", s->user->user_id.id);
644 }
645 return e;
646 }
647
648 void rgw_bucket_object_pre_exec(struct req_state *s)
649 {
650 if (s->expect_cont)
651 dump_continue(s);
652
653 dump_bucket_from_state(s);
654 }
655
656 // So! Now and then when we try to update bucket information, the
657 // bucket has changed during the course of the operation. (Or we have
658 // a cache consistency problem that Watch/Notify isn't ruling out
659 // completely.)
660 //
661 // When this happens, we need to update the bucket info and try
662 // again. We have, however, to try the right *part* again. We can't
663 // simply re-send, since that will obliterate the previous update.
664 //
665 // Thus, callers of this function should include everything that
666 // merges information to be changed into the bucket information as
667 // well as the call to set it.
668 //
669 // The called function must return an integer, negative on error. In
670 // general, they should just return op_ret.
671 namespace {
672 template<typename F>
673 int retry_raced_bucket_write(RGWRados* g, req_state* s, const F& f) {
674 auto r = f();
675 for (auto i = 0u; i < 15u && r == -ECANCELED; ++i) {
676 r = g->try_refresh_bucket_info(s->bucket_info, nullptr,
677 &s->bucket_attrs);
678 if (r >= 0) {
679 r = f();
680 }
681 }
682 return r;
683 }
684 }
685
686
687 int RGWGetObj::verify_permission()
688 {
689 obj = rgw_obj(s->bucket, s->object);
690 store->set_atomic(s->obj_ctx, obj);
691 if (get_data) {
692 store->set_prefetch_data(s->obj_ctx, obj);
693 }
694
695 if (torrent.get_flag()) {
696 if (obj.key.instance.empty()) {
697 action = rgw::IAM::s3GetObjectTorrent;
698 } else {
699 action = rgw::IAM::s3GetObjectVersionTorrent;
700 }
701 } else {
702 if (obj.key.instance.empty()) {
703 action = rgw::IAM::s3GetObject;
704 } else {
705 action = rgw::IAM::s3GetObjectVersion;
706 }
707 }
708
709 if (!verify_object_permission(s, action)) {
710 return -EACCES;
711 }
712
713 return 0;
714 }
715
716
717 int RGWOp::verify_op_mask()
718 {
719 uint32_t required_mask = op_mask();
720
721 ldout(s->cct, 20) << "required_mask= " << required_mask
722 << " user.op_mask=" << s->user->op_mask << dendl;
723
724 if ((s->user->op_mask & required_mask) != required_mask) {
725 return -EPERM;
726 }
727
728 if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !store->zone_is_writeable()) {
729 ldout(s->cct, 5) << "NOTICE: modify request to a read-only zone by a non-system user, permission denied" << dendl;
730 return -EPERM;
731 }
732
733 return 0;
734 }
735
736 int RGWGetObjTags::verify_permission()
737 {
738 if (!verify_object_permission(s,
739 s->object.instance.empty() ?
740 rgw::IAM::s3GetObjectTagging:
741 rgw::IAM::s3GetObjectVersionTagging))
742 return -EACCES;
743
744 return 0;
745 }
746
747 void RGWGetObjTags::pre_exec()
748 {
749 rgw_bucket_object_pre_exec(s);
750 }
751
752 void RGWGetObjTags::execute()
753 {
754 rgw_obj obj;
755 map<string,bufferlist> attrs;
756
757 obj = rgw_obj(s->bucket, s->object);
758
759 store->set_atomic(s->obj_ctx, obj);
760
761 op_ret = get_obj_attrs(store, s, obj, attrs);
762 if (op_ret < 0) {
763 ldout(s->cct, 0) << "ERROR: failed to get obj attrs, obj=" << obj
764 << " ret=" << op_ret << dendl;
765 return;
766 }
767
768 auto tags = attrs.find(RGW_ATTR_TAGS);
769 if(tags != attrs.end()){
770 has_tags = true;
771 tags_bl.append(tags->second);
772 }
773 send_response_data(tags_bl);
774 }
775
776 int RGWPutObjTags::verify_permission()
777 {
778 if (!verify_object_permission(s,
779 s->object.instance.empty() ?
780 rgw::IAM::s3PutObjectTagging:
781 rgw::IAM::s3PutObjectVersionTagging))
782 return -EACCES;
783 return 0;
784 }
785
786 void RGWPutObjTags::execute()
787 {
788 op_ret = get_params();
789 if (op_ret < 0)
790 return;
791
792 if (s->object.empty()){
793 op_ret= -EINVAL; // we only support tagging on existing objects
794 return;
795 }
796
797 rgw_obj obj;
798 obj = rgw_obj(s->bucket, s->object);
799 store->set_atomic(s->obj_ctx, obj);
800 op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_TAGS, tags_bl);
801 if (op_ret == -ECANCELED){
802 op_ret = -ERR_TAG_CONFLICT;
803 }
804 }
805
806 void RGWDeleteObjTags::pre_exec()
807 {
808 rgw_bucket_object_pre_exec(s);
809 }
810
811
812 int RGWDeleteObjTags::verify_permission()
813 {
814 if (!s->object.empty()) {
815 if (!verify_object_permission(s,
816 s->object.instance.empty() ?
817 rgw::IAM::s3DeleteObjectTagging:
818 rgw::IAM::s3DeleteObjectVersionTagging))
819 return -EACCES;
820 }
821 return 0;
822 }
823
824 void RGWDeleteObjTags::execute()
825 {
826 if (s->object.empty())
827 return;
828
829 rgw_obj obj;
830 obj = rgw_obj(s->bucket, s->object);
831 store->set_atomic(s->obj_ctx, obj);
832 map <string, bufferlist> attrs;
833 map <string, bufferlist> rmattr;
834 bufferlist bl;
835 rmattr[RGW_ATTR_TAGS] = bl;
836 op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, &rmattr);
837 }
838
839 int RGWOp::do_aws4_auth_completion()
840 {
841 ldout(s->cct, 5) << "NOTICE: call to do_aws4_auth_completion" << dendl;
842 if (s->auth.completer) {
843 if (!s->auth.completer->complete()) {
844 return -ERR_AMZ_CONTENT_SHA256_MISMATCH;
845 } else {
846 dout(10) << "v4 auth ok -- do_aws4_auth_completion" << dendl;
847 }
848
849 /* TODO(rzarzynski): yes, we're really called twice on PUTs. Only first
850 * call passes, so we disable second one. This is old behaviour, sorry!
851 * Plan for tomorrow: seek and destroy. */
852 s->auth.completer = nullptr;
853 }
854
855 return 0;
856 }
857
858 int RGWOp::init_quota()
859 {
860 /* no quota enforcement for system requests */
861 if (s->system_request)
862 return 0;
863
864 /* init quota related stuff */
865 if (!(s->user->op_mask & RGW_OP_TYPE_MODIFY)) {
866 return 0;
867 }
868
869 /* only interested in object related ops */
870 if (s->object.empty()) {
871 return 0;
872 }
873
874 RGWUserInfo owner_info;
875 RGWUserInfo *uinfo;
876
877 if (s->user->user_id == s->bucket_owner.get_id()) {
878 uinfo = s->user;
879 } else {
880 int r = rgw_get_user_info_by_uid(store, s->bucket_info.owner, owner_info);
881 if (r < 0)
882 return r;
883 uinfo = &owner_info;
884 }
885
886 if (s->bucket_info.quota.enabled) {
887 bucket_quota = s->bucket_info.quota;
888 } else if (uinfo->bucket_quota.enabled) {
889 bucket_quota = uinfo->bucket_quota;
890 } else {
891 bucket_quota = store->get_bucket_quota();
892 }
893
894 if (uinfo->user_quota.enabled) {
895 user_quota = uinfo->user_quota;
896 } else {
897 user_quota = store->get_user_quota();
898 }
899
900 return 0;
901 }
902
903 static bool validate_cors_rule_method(RGWCORSRule *rule, const char *req_meth) {
904 uint8_t flags = 0;
905
906 if (!req_meth) {
907 dout(5) << "req_meth is null" << dendl;
908 return false;
909 }
910
911 if (strcmp(req_meth, "GET") == 0) flags = RGW_CORS_GET;
912 else if (strcmp(req_meth, "POST") == 0) flags = RGW_CORS_POST;
913 else if (strcmp(req_meth, "PUT") == 0) flags = RGW_CORS_PUT;
914 else if (strcmp(req_meth, "DELETE") == 0) flags = RGW_CORS_DELETE;
915 else if (strcmp(req_meth, "HEAD") == 0) flags = RGW_CORS_HEAD;
916
917 if (rule->get_allowed_methods() & flags) {
918 dout(10) << "Method " << req_meth << " is supported" << dendl;
919 } else {
920 dout(5) << "Method " << req_meth << " is not supported" << dendl;
921 return false;
922 }
923
924 return true;
925 }
926
927 static bool validate_cors_rule_header(RGWCORSRule *rule, const char *req_hdrs) {
928 if (req_hdrs) {
929 vector<string> hdrs;
930 get_str_vec(req_hdrs, hdrs);
931 for (const auto& hdr : hdrs) {
932 if (!rule->is_header_allowed(hdr.c_str(), hdr.length())) {
933 dout(5) << "Header " << hdr << " is not registered in this rule" << dendl;
934 return false;
935 }
936 }
937 }
938 return true;
939 }
940
941 int RGWOp::read_bucket_cors()
942 {
943 bufferlist bl;
944
945 map<string, bufferlist>::iterator aiter = s->bucket_attrs.find(RGW_ATTR_CORS);
946 if (aiter == s->bucket_attrs.end()) {
947 ldout(s->cct, 20) << "no CORS configuration attr found" << dendl;
948 cors_exist = false;
949 return 0; /* no CORS configuration found */
950 }
951
952 cors_exist = true;
953
954 bl = aiter->second;
955
956 bufferlist::iterator iter = bl.begin();
957 try {
958 bucket_cors.decode(iter);
959 } catch (buffer::error& err) {
960 ldout(s->cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
961 return -EIO;
962 }
963 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
964 RGWCORSConfiguration_S3 *s3cors = static_cast<RGWCORSConfiguration_S3 *>(&bucket_cors);
965 ldout(s->cct, 15) << "Read RGWCORSConfiguration";
966 s3cors->to_xml(*_dout);
967 *_dout << dendl;
968 }
969 return 0;
970 }
971
972 /** CORS 6.2.6.
973 * If any of the header field-names is not a ASCII case-insensitive match for
974 * any of the values in list of headers do not set any additional headers and
975 * terminate this set of steps.
976 * */
977 static void get_cors_response_headers(RGWCORSRule *rule, const char *req_hdrs, string& hdrs, string& exp_hdrs, unsigned *max_age) {
978 if (req_hdrs) {
979 list<string> hl;
980 get_str_list(req_hdrs, hl);
981 for(list<string>::iterator it = hl.begin(); it != hl.end(); ++it) {
982 if (!rule->is_header_allowed((*it).c_str(), (*it).length())) {
983 dout(5) << "Header " << (*it) << " is not registered in this rule" << dendl;
984 } else {
985 if (hdrs.length() > 0) hdrs.append(",");
986 hdrs.append((*it));
987 }
988 }
989 }
990 rule->format_exp_headers(exp_hdrs);
991 *max_age = rule->get_max_age();
992 }
993
994 /**
995 * Generate the CORS header response
996 *
997 * This is described in the CORS standard, section 6.2.
998 */
999 bool RGWOp::generate_cors_headers(string& origin, string& method, string& headers, string& exp_headers, unsigned *max_age)
1000 {
1001 /* CORS 6.2.1. */
1002 const char *orig = s->info.env->get("HTTP_ORIGIN");
1003 if (!orig) {
1004 return false;
1005 }
1006
1007 /* Custom: */
1008 origin = orig;
1009 op_ret = read_bucket_cors();
1010 if (op_ret < 0) {
1011 return false;
1012 }
1013
1014 if (!cors_exist) {
1015 dout(2) << "No CORS configuration set yet for this bucket" << dendl;
1016 return false;
1017 }
1018
1019 /* CORS 6.2.2. */
1020 RGWCORSRule *rule = bucket_cors.host_name_rule(orig);
1021 if (!rule)
1022 return false;
1023
1024 /*
1025 * Set the Allowed-Origin header to a asterisk if this is allowed in the rule
1026 * and no Authorization was send by the client
1027 *
1028 * The origin parameter specifies a URI that may access the resource. The browser must enforce this.
1029 * For requests without credentials, the server may specify "*" as a wildcard,
1030 * thereby allowing any origin to access the resource.
1031 */
1032 const char *authorization = s->info.env->get("HTTP_AUTHORIZATION");
1033 if (!authorization && rule->has_wildcard_origin())
1034 origin = "*";
1035
1036 /* CORS 6.2.3. */
1037 const char *req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD");
1038 if (!req_meth) {
1039 req_meth = s->info.method;
1040 }
1041
1042 if (req_meth) {
1043 method = req_meth;
1044 /* CORS 6.2.5. */
1045 if (!validate_cors_rule_method(rule, req_meth)) {
1046 return false;
1047 }
1048 }
1049
1050 /* CORS 6.2.4. */
1051 const char *req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS");
1052
1053 /* CORS 6.2.6. */
1054 get_cors_response_headers(rule, req_hdrs, headers, exp_headers, max_age);
1055
1056 return true;
1057 }
1058
1059 int RGWGetObj::read_user_manifest_part(rgw_bucket& bucket,
1060 const rgw_bucket_dir_entry& ent,
1061 RGWAccessControlPolicy * const bucket_acl,
1062 const optional<Policy>& bucket_policy,
1063 const off_t start_ofs,
1064 const off_t end_ofs)
1065 {
1066 ldout(s->cct, 20) << "user manifest obj=" << ent.key.name << "[" << ent.key.instance << "]" << dendl;
1067 RGWGetObj_CB cb(this);
1068 RGWGetDataCB* filter = &cb;
1069 boost::optional<RGWGetObj_Decompress> decompress;
1070
1071 int64_t cur_ofs = start_ofs;
1072 int64_t cur_end = end_ofs;
1073
1074 rgw_obj part(bucket, ent.key);
1075
1076 map<string, bufferlist> attrs;
1077
1078 uint64_t obj_size;
1079 RGWObjectCtx obj_ctx(store);
1080 RGWAccessControlPolicy obj_policy(s->cct);
1081
1082 ldout(s->cct, 20) << "reading obj=" << part << " ofs=" << cur_ofs << " end=" << cur_end << dendl;
1083
1084 obj_ctx.obj.set_atomic(part);
1085 store->set_prefetch_data(&obj_ctx, part);
1086
1087 RGWRados::Object op_target(store, s->bucket_info, obj_ctx, part);
1088 RGWRados::Object::Read read_op(&op_target);
1089
1090 read_op.conds.if_match = ent.meta.etag.c_str();
1091 read_op.params.attrs = &attrs;
1092 read_op.params.obj_size = &obj_size;
1093
1094 op_ret = read_op.prepare();
1095 if (op_ret < 0)
1096 return op_ret;
1097 op_ret = read_op.range_to_ofs(ent.meta.accounted_size, cur_ofs, cur_end);
1098 if (op_ret < 0)
1099 return op_ret;
1100 bool need_decompress;
1101 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
1102 if (op_ret < 0) {
1103 lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
1104 return -EIO;
1105 }
1106
1107 if (need_decompress)
1108 {
1109 if (cs_info.orig_size != ent.meta.accounted_size) {
1110 // hmm.. something wrong, object not as expected, abort!
1111 ldout(s->cct, 0) << "ERROR: expected cs_info.orig_size=" << cs_info.orig_size <<
1112 ", actual read size=" << ent.meta.size << dendl;
1113 return -EIO;
1114 }
1115 decompress.emplace(s->cct, &cs_info, partial_content, filter);
1116 filter = &*decompress;
1117 }
1118 else
1119 {
1120 if (obj_size != ent.meta.size) {
1121 // hmm.. something wrong, object not as expected, abort!
1122 ldout(s->cct, 0) << "ERROR: expected obj_size=" << obj_size << ", actual read size=" << ent.meta.size << dendl;
1123 return -EIO;
1124 }
1125 }
1126
1127 op_ret = rgw_policy_from_attrset(s->cct, attrs, &obj_policy);
1128 if (op_ret < 0)
1129 return op_ret;
1130
1131 /* We can use global user_acl because LOs cannot have segments
1132 * stored inside different accounts. */
1133 if (s->system_request) {
1134 ldout(s->cct, 2) << "overriding permissions due to system operation" << dendl;
1135 } else if (s->auth.identity->is_admin_of(s->user->user_id)) {
1136 ldout(s->cct, 2) << "overriding permissions due to admin operation" << dendl;
1137 } else if (!verify_object_permission(s, part, s->user_acl.get(), bucket_acl,
1138 &obj_policy, bucket_policy, action)) {
1139 return -EPERM;
1140 }
1141
1142 if (ent.meta.size == 0) {
1143 return 0;
1144 }
1145
1146 perfcounter->inc(l_rgw_get_b, cur_end - cur_ofs);
1147 filter->fixup_range(cur_ofs, cur_end);
1148 op_ret = read_op.iterate(cur_ofs, cur_end, filter);
1149 if (op_ret >= 0)
1150 op_ret = filter->flush();
1151 return op_ret;
1152 }
1153
1154 static int iterate_user_manifest_parts(CephContext * const cct,
1155 RGWRados * const store,
1156 const off_t ofs,
1157 const off_t end,
1158 RGWBucketInfo *pbucket_info,
1159 const string& obj_prefix,
1160 RGWAccessControlPolicy * const bucket_acl,
1161 const optional<Policy>& bucket_policy,
1162 uint64_t * const ptotal_len,
1163 uint64_t * const pobj_size,
1164 string * const pobj_sum,
1165 int (*cb)(rgw_bucket& bucket,
1166 const rgw_bucket_dir_entry& ent,
1167 RGWAccessControlPolicy * const bucket_acl,
1168 const optional<Policy>& bucket_policy,
1169 off_t start_ofs,
1170 off_t end_ofs,
1171 void *param),
1172 void * const cb_param)
1173 {
1174 rgw_bucket& bucket = pbucket_info->bucket;
1175 uint64_t obj_ofs = 0, len_count = 0;
1176 bool found_start = false, found_end = false, handled_end = false;
1177 string delim;
1178 bool is_truncated;
1179 vector<rgw_bucket_dir_entry> objs;
1180
1181 utime_t start_time = ceph_clock_now();
1182
1183 RGWRados::Bucket target(store, *pbucket_info);
1184 RGWRados::Bucket::List list_op(&target);
1185
1186 list_op.params.prefix = obj_prefix;
1187 list_op.params.delim = delim;
1188
1189 MD5 etag_sum;
1190 do {
1191 #define MAX_LIST_OBJS 100
1192 int r = list_op.list_objects(MAX_LIST_OBJS, &objs, NULL, &is_truncated);
1193 if (r < 0) {
1194 return r;
1195 }
1196
1197 for (rgw_bucket_dir_entry& ent : objs) {
1198 const uint64_t cur_total_len = obj_ofs;
1199 const uint64_t obj_size = ent.meta.accounted_size;
1200 uint64_t start_ofs = 0, end_ofs = obj_size;
1201
1202 if ((ptotal_len || cb) && !found_start && cur_total_len + obj_size > (uint64_t)ofs) {
1203 start_ofs = ofs - obj_ofs;
1204 found_start = true;
1205 }
1206
1207 obj_ofs += obj_size;
1208 if (pobj_sum) {
1209 etag_sum.Update((const byte *)ent.meta.etag.c_str(),
1210 ent.meta.etag.length());
1211 }
1212
1213 if ((ptotal_len || cb) && !found_end && obj_ofs > (uint64_t)end) {
1214 end_ofs = end - cur_total_len + 1;
1215 found_end = true;
1216 }
1217
1218 perfcounter->tinc(l_rgw_get_lat,
1219 (ceph_clock_now() - start_time));
1220
1221 if (found_start && !handled_end) {
1222 len_count += end_ofs - start_ofs;
1223
1224 if (cb) {
1225 r = cb(bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs, cb_param);
1226 if (r < 0) {
1227 return r;
1228 }
1229 }
1230 }
1231
1232 handled_end = found_end;
1233 start_time = ceph_clock_now();
1234 }
1235 } while (is_truncated);
1236
1237 if (ptotal_len) {
1238 *ptotal_len = len_count;
1239 }
1240 if (pobj_size) {
1241 *pobj_size = obj_ofs;
1242 }
1243 if (pobj_sum) {
1244 complete_etag(etag_sum, pobj_sum);
1245 }
1246
1247 return 0;
1248 }
1249
1250 struct rgw_slo_part {
1251 RGWAccessControlPolicy *bucket_acl = nullptr;
1252 Policy* bucket_policy = nullptr;
1253 rgw_bucket bucket;
1254 string obj_name;
1255 uint64_t size = 0;
1256 string etag;
1257 };
1258
1259 static int iterate_slo_parts(CephContext *cct,
1260 RGWRados *store,
1261 off_t ofs,
1262 off_t end,
1263 map<uint64_t, rgw_slo_part>& slo_parts,
1264 int (*cb)(rgw_bucket& bucket,
1265 const rgw_bucket_dir_entry& ent,
1266 RGWAccessControlPolicy *bucket_acl,
1267 const optional<Policy>& bucket_policy,
1268 off_t start_ofs,
1269 off_t end_ofs,
1270 void *param),
1271 void *cb_param)
1272 {
1273 bool found_start = false, found_end = false;
1274
1275 if (slo_parts.empty()) {
1276 return 0;
1277 }
1278
1279 utime_t start_time = ceph_clock_now();
1280
1281 map<uint64_t, rgw_slo_part>::iterator iter = slo_parts.upper_bound(ofs);
1282 if (iter != slo_parts.begin()) {
1283 --iter;
1284 }
1285
1286 uint64_t obj_ofs = iter->first;
1287
1288 for (; iter != slo_parts.end() && !found_end; ++iter) {
1289 rgw_slo_part& part = iter->second;
1290 rgw_bucket_dir_entry ent;
1291
1292 ent.key.name = part.obj_name;
1293 ent.meta.accounted_size = ent.meta.size = part.size;
1294 ent.meta.etag = part.etag;
1295
1296 uint64_t cur_total_len = obj_ofs;
1297 uint64_t start_ofs = 0, end_ofs = ent.meta.size;
1298
1299 if (!found_start && cur_total_len + ent.meta.size > (uint64_t)ofs) {
1300 start_ofs = ofs - obj_ofs;
1301 found_start = true;
1302 }
1303
1304 obj_ofs += ent.meta.size;
1305
1306 if (!found_end && obj_ofs > (uint64_t)end) {
1307 end_ofs = end - cur_total_len + 1;
1308 found_end = true;
1309 }
1310
1311 perfcounter->tinc(l_rgw_get_lat,
1312 (ceph_clock_now() - start_time));
1313
1314 if (found_start) {
1315 if (cb) {
1316 // SLO is a Swift thing, and Swift has no knowledge of S3 Policies.
1317 int r = cb(part.bucket, ent, part.bucket_acl,
1318 (part.bucket_policy ?
1319 optional<Policy>(*part.bucket_policy) : none),
1320 start_ofs, end_ofs, cb_param);
1321 if (r < 0)
1322 return r;
1323 }
1324 }
1325
1326 start_time = ceph_clock_now();
1327 }
1328
1329 return 0;
1330 }
1331
1332 static int get_obj_user_manifest_iterate_cb(rgw_bucket& bucket,
1333 const rgw_bucket_dir_entry& ent,
1334 RGWAccessControlPolicy * const bucket_acl,
1335 const optional<Policy>& bucket_policy,
1336 const off_t start_ofs,
1337 const off_t end_ofs,
1338 void * const param)
1339 {
1340 RGWGetObj *op = static_cast<RGWGetObj *>(param);
1341 return op->read_user_manifest_part(bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs);
1342 }
1343
1344 int RGWGetObj::handle_user_manifest(const char *prefix)
1345 {
1346 const boost::string_view prefix_view(prefix);
1347 ldout(s->cct, 2) << "RGWGetObj::handle_user_manifest() prefix="
1348 << prefix_view << dendl;
1349
1350 const size_t pos = prefix_view.find('/');
1351 if (pos == string::npos) {
1352 return -EINVAL;
1353 }
1354
1355 const std::string bucket_name = url_decode(prefix_view.substr(0, pos));
1356 const std::string obj_prefix = url_decode(prefix_view.substr(pos + 1));
1357
1358 rgw_bucket bucket;
1359
1360 RGWAccessControlPolicy _bucket_acl(s->cct);
1361 RGWAccessControlPolicy *bucket_acl;
1362 optional<Policy> _bucket_policy;
1363 optional<Policy>* bucket_policy;
1364 RGWBucketInfo bucket_info;
1365 RGWBucketInfo *pbucket_info;
1366
1367 if (bucket_name.compare(s->bucket.name) != 0) {
1368 map<string, bufferlist> bucket_attrs;
1369 RGWObjectCtx obj_ctx(store);
1370 int r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
1371 bucket_name, bucket_info, NULL,
1372 &bucket_attrs);
1373 if (r < 0) {
1374 ldout(s->cct, 0) << "could not get bucket info for bucket="
1375 << bucket_name << dendl;
1376 return r;
1377 }
1378 bucket = bucket_info.bucket;
1379 pbucket_info = &bucket_info;
1380 bucket_acl = &_bucket_acl;
1381 r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl, bucket);
1382 if (r < 0) {
1383 ldout(s->cct, 0) << "failed to read bucket policy" << dendl;
1384 return r;
1385 }
1386 _bucket_policy = get_iam_policy_from_attr(s->cct, store, bucket_attrs,
1387 bucket_info.bucket.tenant);
1388 bucket_policy = &_bucket_policy;
1389 } else {
1390 bucket = s->bucket;
1391 pbucket_info = &s->bucket_info;
1392 bucket_acl = s->bucket_acl.get();
1393 bucket_policy = &s->iam_policy;
1394 }
1395
1396 /* dry run to find out:
1397 * - total length (of the parts we are going to send to client),
1398 * - overall DLO's content size,
1399 * - md5 sum of overall DLO's content (for etag of Swift API). */
1400 int r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1401 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1402 nullptr, &s->obj_size, &lo_etag,
1403 nullptr /* cb */, nullptr /* cb arg */);
1404 if (r < 0) {
1405 return r;
1406 }
1407
1408 r = RGWRados::Object::Read::range_to_ofs(s->obj_size, ofs, end);
1409 if (r < 0) {
1410 return r;
1411 }
1412
1413 r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1414 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1415 &total_len, nullptr, nullptr,
1416 nullptr, nullptr);
1417 if (r < 0) {
1418 return r;
1419 }
1420
1421 if (!get_data) {
1422 bufferlist bl;
1423 send_response_data(bl, 0, 0);
1424 return 0;
1425 }
1426
1427 r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1428 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1429 nullptr, nullptr, nullptr,
1430 get_obj_user_manifest_iterate_cb, (void *)this);
1431 if (r < 0) {
1432 return r;
1433 }
1434
1435 if (!total_len) {
1436 bufferlist bl;
1437 send_response_data(bl, 0, 0);
1438 }
1439
1440 return 0;
1441 }
1442
1443 int RGWGetObj::handle_slo_manifest(bufferlist& bl)
1444 {
1445 RGWSLOInfo slo_info;
1446 bufferlist::iterator bliter = bl.begin();
1447 try {
1448 ::decode(slo_info, bliter);
1449 } catch (buffer::error& err) {
1450 ldout(s->cct, 0) << "ERROR: failed to decode slo manifest" << dendl;
1451 return -EIO;
1452 }
1453 ldout(s->cct, 2) << "RGWGetObj::handle_slo_manifest()" << dendl;
1454
1455 vector<RGWAccessControlPolicy> allocated_acls;
1456 map<string, pair<RGWAccessControlPolicy *, optional<Policy>>> policies;
1457 map<string, rgw_bucket> buckets;
1458
1459 map<uint64_t, rgw_slo_part> slo_parts;
1460
1461 MD5 etag_sum;
1462 total_len = 0;
1463
1464 for (const auto& entry : slo_info.entries) {
1465 const string& path = entry.path;
1466
1467 /* If the path starts with slashes, strip them all. */
1468 const size_t pos_init = path.find_first_not_of('/');
1469 /* According to the documentation of std::string::find following check
1470 * is not necessary as we should get the std::string::npos propagation
1471 * here. This might be true with the accuracy to implementation's bugs.
1472 * See following question on SO:
1473 * http://stackoverflow.com/questions/1011790/why-does-stdstring-findtext-stdstringnpos-not-return-npos
1474 */
1475 if (pos_init == string::npos) {
1476 return -EINVAL;
1477 }
1478
1479 const size_t pos_sep = path.find('/', pos_init);
1480 if (pos_sep == string::npos) {
1481 return -EINVAL;
1482 }
1483
1484 string bucket_name = path.substr(pos_init, pos_sep - pos_init);
1485 string obj_name = path.substr(pos_sep + 1);
1486
1487 rgw_bucket bucket;
1488 RGWAccessControlPolicy *bucket_acl;
1489 Policy* bucket_policy;
1490
1491 if (bucket_name.compare(s->bucket.name) != 0) {
1492 const auto& piter = policies.find(bucket_name);
1493 if (piter != policies.end()) {
1494 bucket_acl = piter->second.first;
1495 bucket_policy = piter->second.second.get_ptr();
1496 bucket = buckets[bucket_name];
1497 } else {
1498 allocated_acls.push_back(RGWAccessControlPolicy(s->cct));
1499 RGWAccessControlPolicy& _bucket_acl = allocated_acls.back();
1500
1501 RGWBucketInfo bucket_info;
1502 map<string, bufferlist> bucket_attrs;
1503 RGWObjectCtx obj_ctx(store);
1504 int r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
1505 bucket_name, bucket_info, nullptr,
1506 &bucket_attrs);
1507 if (r < 0) {
1508 ldout(s->cct, 0) << "could not get bucket info for bucket="
1509 << bucket_name << dendl;
1510 return r;
1511 }
1512 bucket = bucket_info.bucket;
1513 bucket_acl = &_bucket_acl;
1514 r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl,
1515 bucket);
1516 if (r < 0) {
1517 ldout(s->cct, 0) << "failed to read bucket ACL for bucket "
1518 << bucket << dendl;
1519 return r;
1520 }
1521 auto _bucket_policy = get_iam_policy_from_attr(
1522 s->cct, store, bucket_attrs, bucket_info.bucket.tenant);
1523 bucket_policy = _bucket_policy.get_ptr();
1524 buckets[bucket_name] = bucket;
1525 policies[bucket_name] = make_pair(bucket_acl, _bucket_policy);
1526 }
1527 } else {
1528 bucket = s->bucket;
1529 bucket_acl = s->bucket_acl.get();
1530 bucket_policy = s->iam_policy.get_ptr();
1531 }
1532
1533 rgw_slo_part part;
1534 part.bucket_acl = bucket_acl;
1535 part.bucket_policy = bucket_policy;
1536 part.bucket = bucket;
1537 part.obj_name = obj_name;
1538 part.size = entry.size_bytes;
1539 part.etag = entry.etag;
1540 ldout(s->cct, 20) << "slo_part: ofs=" << ofs
1541 << " bucket=" << part.bucket
1542 << " obj=" << part.obj_name
1543 << " size=" << part.size
1544 << " etag=" << part.etag
1545 << dendl;
1546
1547 etag_sum.Update((const byte *)entry.etag.c_str(),
1548 entry.etag.length());
1549
1550 slo_parts[total_len] = part;
1551 total_len += part.size;
1552 }
1553
1554 complete_etag(etag_sum, &lo_etag);
1555
1556 s->obj_size = slo_info.total_size;
1557 ldout(s->cct, 20) << "s->obj_size=" << s->obj_size << dendl;
1558
1559 int r = RGWRados::Object::Read::range_to_ofs(total_len, ofs, end);
1560 if (r < 0) {
1561 return r;
1562 }
1563
1564 total_len = end - ofs + 1;
1565
1566 r = iterate_slo_parts(s->cct, store, ofs, end, slo_parts,
1567 get_obj_user_manifest_iterate_cb, (void *)this);
1568 if (r < 0) {
1569 return r;
1570 }
1571
1572 return 0;
1573 }
1574
1575 int RGWGetObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
1576 {
1577 /* garbage collection related handling */
1578 utime_t start_time = ceph_clock_now();
1579 if (start_time > gc_invalidate_time) {
1580 int r = store->defer_gc(s->obj_ctx, s->bucket_info, obj);
1581 if (r < 0) {
1582 dout(0) << "WARNING: could not defer gc entry for obj" << dendl;
1583 }
1584 gc_invalidate_time = start_time;
1585 gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2);
1586 }
1587 return send_response_data(bl, bl_ofs, bl_len);
1588 }
1589
1590 bool RGWGetObj::prefetch_data()
1591 {
1592 /* HEAD request, stop prefetch*/
1593 if (!get_data) {
1594 return false;
1595 }
1596
1597 bool prefetch_first_chunk = true;
1598 range_str = s->info.env->get("HTTP_RANGE");
1599
1600 if (range_str) {
1601 int r = parse_range();
1602 /* error on parsing the range, stop prefetch and will fail in execute() */
1603 if (r < 0) {
1604 return false; /* range_parsed==false */
1605 }
1606 /* range get goes to shadow objects, stop prefetch */
1607 if (ofs >= s->cct->_conf->rgw_max_chunk_size) {
1608 prefetch_first_chunk = false;
1609 }
1610 }
1611
1612 return get_data && prefetch_first_chunk;
1613 }
1614
1615 void RGWGetObj::pre_exec()
1616 {
1617 rgw_bucket_object_pre_exec(s);
1618 }
1619
1620 static bool object_is_expired(map<string, bufferlist>& attrs) {
1621 map<string, bufferlist>::iterator iter = attrs.find(RGW_ATTR_DELETE_AT);
1622 if (iter != attrs.end()) {
1623 utime_t delete_at;
1624 try {
1625 ::decode(delete_at, iter->second);
1626 } catch (buffer::error& err) {
1627 dout(0) << "ERROR: " << __func__ << ": failed to decode " RGW_ATTR_DELETE_AT " attr" << dendl;
1628 return false;
1629 }
1630
1631 if (delete_at <= ceph_clock_now() && !delete_at.is_zero()) {
1632 return true;
1633 }
1634 }
1635
1636 return false;
1637 }
1638
1639 void RGWGetObj::execute()
1640 {
1641 utime_t start_time = s->time;
1642 bufferlist bl;
1643 gc_invalidate_time = ceph_clock_now();
1644 gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2);
1645
1646 bool need_decompress;
1647 int64_t ofs_x, end_x;
1648
1649 RGWGetObj_CB cb(this);
1650 RGWGetDataCB* filter = (RGWGetDataCB*)&cb;
1651 boost::optional<RGWGetObj_Decompress> decompress;
1652 std::unique_ptr<RGWGetDataCB> decrypt;
1653 map<string, bufferlist>::iterator attr_iter;
1654
1655 perfcounter->inc(l_rgw_get);
1656
1657 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
1658 RGWRados::Object::Read read_op(&op_target);
1659
1660 op_ret = get_params();
1661 if (op_ret < 0)
1662 goto done_err;
1663
1664 op_ret = init_common();
1665 if (op_ret < 0)
1666 goto done_err;
1667
1668 read_op.conds.mod_ptr = mod_ptr;
1669 read_op.conds.unmod_ptr = unmod_ptr;
1670 read_op.conds.high_precision_time = s->system_request; /* system request need to use high precision time */
1671 read_op.conds.mod_zone_id = mod_zone_id;
1672 read_op.conds.mod_pg_ver = mod_pg_ver;
1673 read_op.conds.if_match = if_match;
1674 read_op.conds.if_nomatch = if_nomatch;
1675 read_op.params.attrs = &attrs;
1676 read_op.params.lastmod = &lastmod;
1677 read_op.params.obj_size = &s->obj_size;
1678
1679 op_ret = read_op.prepare();
1680 if (op_ret < 0)
1681 goto done_err;
1682 version_id = read_op.state.obj.key.instance;
1683
1684 /* STAT ops don't need data, and do no i/o */
1685 if (get_type() == RGW_OP_STAT_OBJ) {
1686 return;
1687 }
1688
1689 /* start gettorrent */
1690 if (torrent.get_flag())
1691 {
1692 attr_iter = attrs.find(RGW_ATTR_CRYPT_MODE);
1693 if (attr_iter != attrs.end() && attr_iter->second.to_str() == "SSE-C-AES256") {
1694 ldout(s->cct, 0) << "ERROR: torrents are not supported for objects "
1695 "encrypted with SSE-C" << dendl;
1696 op_ret = -EINVAL;
1697 goto done_err;
1698 }
1699 torrent.init(s, store);
1700 op_ret = torrent.get_torrent_file(read_op, total_len, bl, obj);
1701 if (op_ret < 0)
1702 {
1703 ldout(s->cct, 0) << "ERROR: failed to get_torrent_file ret= " << op_ret
1704 << dendl;
1705 goto done_err;
1706 }
1707 op_ret = send_response_data(bl, 0, total_len);
1708 if (op_ret < 0)
1709 {
1710 ldout(s->cct, 0) << "ERROR: failed to send_response_data ret= " << op_ret
1711 << dendl;
1712 goto done_err;
1713 }
1714 return;
1715 }
1716 /* end gettorrent */
1717
1718 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
1719 if (op_ret < 0) {
1720 lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
1721 goto done_err;
1722 }
1723 if (need_decompress) {
1724 s->obj_size = cs_info.orig_size;
1725 decompress.emplace(s->cct, &cs_info, partial_content, filter);
1726 filter = &*decompress;
1727 }
1728
1729 attr_iter = attrs.find(RGW_ATTR_USER_MANIFEST);
1730 if (attr_iter != attrs.end() && !skip_manifest) {
1731 op_ret = handle_user_manifest(attr_iter->second.c_str());
1732 if (op_ret < 0) {
1733 ldout(s->cct, 0) << "ERROR: failed to handle user manifest ret="
1734 << op_ret << dendl;
1735 goto done_err;
1736 }
1737 return;
1738 }
1739
1740 attr_iter = attrs.find(RGW_ATTR_SLO_MANIFEST);
1741 if (attr_iter != attrs.end() && !skip_manifest) {
1742 is_slo = true;
1743 op_ret = handle_slo_manifest(attr_iter->second);
1744 if (op_ret < 0) {
1745 ldout(s->cct, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret
1746 << dendl;
1747 goto done_err;
1748 }
1749 return;
1750 }
1751
1752 // for range requests with obj size 0
1753 if (range_str && !(s->obj_size)) {
1754 total_len = 0;
1755 op_ret = -ERANGE;
1756 goto done_err;
1757 }
1758
1759 op_ret = read_op.range_to_ofs(s->obj_size, ofs, end);
1760 if (op_ret < 0)
1761 goto done_err;
1762 total_len = (ofs <= end ? end + 1 - ofs : 0);
1763
1764 /* Check whether the object has expired. Swift API documentation
1765 * stands that we should return 404 Not Found in such case. */
1766 if (need_object_expiration() && object_is_expired(attrs)) {
1767 op_ret = -ENOENT;
1768 goto done_err;
1769 }
1770
1771 start = ofs;
1772
1773 /* STAT ops don't need data, and do no i/o */
1774 if (get_type() == RGW_OP_STAT_OBJ) {
1775 return;
1776 }
1777
1778 attr_iter = attrs.find(RGW_ATTR_MANIFEST);
1779 op_ret = this->get_decrypt_filter(&decrypt, filter,
1780 attr_iter != attrs.end() ? &(attr_iter->second) : nullptr);
1781 if (decrypt != nullptr) {
1782 filter = decrypt.get();
1783 }
1784 if (op_ret < 0) {
1785 goto done_err;
1786 }
1787
1788 if (!get_data || ofs > end) {
1789 send_response_data(bl, 0, 0);
1790 return;
1791 }
1792
1793 perfcounter->inc(l_rgw_get_b, end - ofs);
1794
1795 ofs_x = ofs;
1796 end_x = end;
1797 filter->fixup_range(ofs_x, end_x);
1798 op_ret = read_op.iterate(ofs_x, end_x, filter);
1799
1800 if (op_ret >= 0)
1801 op_ret = filter->flush();
1802
1803 perfcounter->tinc(l_rgw_get_lat,
1804 (ceph_clock_now() - start_time));
1805 if (op_ret < 0) {
1806 goto done_err;
1807 }
1808
1809 op_ret = send_response_data(bl, 0, 0);
1810 if (op_ret < 0) {
1811 goto done_err;
1812 }
1813 return;
1814
1815 done_err:
1816 send_response_data_error();
1817 }
1818
1819 int RGWGetObj::init_common()
1820 {
1821 if (range_str) {
1822 /* range parsed error when prefetch */
1823 if (!range_parsed) {
1824 int r = parse_range();
1825 if (r < 0)
1826 return r;
1827 }
1828 }
1829 if (if_mod) {
1830 if (parse_time(if_mod, &mod_time) < 0)
1831 return -EINVAL;
1832 mod_ptr = &mod_time;
1833 }
1834
1835 if (if_unmod) {
1836 if (parse_time(if_unmod, &unmod_time) < 0)
1837 return -EINVAL;
1838 unmod_ptr = &unmod_time;
1839 }
1840
1841 return 0;
1842 }
1843
1844 int RGWListBuckets::verify_permission()
1845 {
1846 if (!verify_user_permission(s, RGW_PERM_READ)) {
1847 return -EACCES;
1848 }
1849
1850 return 0;
1851 }
1852
1853 int RGWGetUsage::verify_permission()
1854 {
1855 if (s->auth.identity->is_anonymous()) {
1856 return -EACCES;
1857 }
1858
1859 return 0;
1860 }
1861
1862 void RGWListBuckets::execute()
1863 {
1864 bool done;
1865 bool started = false;
1866 uint64_t total_count = 0;
1867
1868 const uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk;
1869
1870 op_ret = get_params();
1871 if (op_ret < 0) {
1872 goto send_end;
1873 }
1874
1875 if (supports_account_metadata()) {
1876 op_ret = rgw_get_user_attrs_by_uid(store, s->user->user_id, attrs);
1877 if (op_ret < 0) {
1878 goto send_end;
1879 }
1880 }
1881
1882 is_truncated = false;
1883 do {
1884 RGWUserBuckets buckets;
1885 uint64_t read_count;
1886 if (limit >= 0) {
1887 read_count = min(limit - total_count, (uint64_t)max_buckets);
1888 } else {
1889 read_count = max_buckets;
1890 }
1891
1892 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
1893 marker, end_marker, read_count,
1894 should_get_stats(), &is_truncated,
1895 get_default_max());
1896 if (op_ret < 0) {
1897 /* hmm.. something wrong here.. the user was authenticated, so it
1898 should exist */
1899 ldout(s->cct, 10) << "WARNING: failed on rgw_get_user_buckets uid="
1900 << s->user->user_id << dendl;
1901 break;
1902 }
1903
1904 /* We need to have stats for all our policies - even if a given policy
1905 * isn't actually used in a given account. In such situation its usage
1906 * stats would be simply full of zeros. */
1907 for (const auto& policy : store->get_zonegroup().placement_targets) {
1908 policies_stats.emplace(policy.second.name,
1909 decltype(policies_stats)::mapped_type());
1910 }
1911
1912 std::map<std::string, RGWBucketEnt>& m = buckets.get_buckets();
1913 for (const auto& kv : m) {
1914 const auto& bucket = kv.second;
1915
1916 global_stats.bytes_used += bucket.size;
1917 global_stats.bytes_used_rounded += bucket.size_rounded;
1918 global_stats.objects_count += bucket.count;
1919
1920 /* operator[] still can create a new entry for storage policy seen
1921 * for first time. */
1922 auto& policy_stats = policies_stats[bucket.placement_rule];
1923 policy_stats.bytes_used += bucket.size;
1924 policy_stats.bytes_used_rounded += bucket.size_rounded;
1925 policy_stats.buckets_count++;
1926 policy_stats.objects_count += bucket.count;
1927 }
1928 global_stats.buckets_count += m.size();
1929 total_count += m.size();
1930
1931 done = (m.size() < read_count || (limit >= 0 && total_count >= (uint64_t)limit));
1932
1933 if (!started) {
1934 send_response_begin(buckets.count() > 0);
1935 started = true;
1936 }
1937
1938 if (!m.empty()) {
1939 map<string, RGWBucketEnt>::reverse_iterator riter = m.rbegin();
1940 marker = riter->first;
1941
1942 handle_listing_chunk(std::move(buckets));
1943 }
1944 } while (is_truncated && !done);
1945
1946 send_end:
1947 if (!started) {
1948 send_response_begin(false);
1949 }
1950 send_response_end();
1951 }
1952
1953 void RGWGetUsage::execute()
1954 {
1955 uint64_t start_epoch = 0;
1956 uint64_t end_epoch = (uint64_t)-1;
1957 op_ret = get_params();
1958 if (op_ret < 0)
1959 return;
1960
1961 if (!start_date.empty()) {
1962 op_ret = utime_t::parse_date(start_date, &start_epoch, NULL);
1963 if (op_ret < 0) {
1964 ldout(store->ctx(), 0) << "ERROR: failed to parse start date" << dendl;
1965 return;
1966 }
1967 }
1968
1969 if (!end_date.empty()) {
1970 op_ret = utime_t::parse_date(end_date, &end_epoch, NULL);
1971 if (op_ret < 0) {
1972 ldout(store->ctx(), 0) << "ERROR: failed to parse end date" << dendl;
1973 return;
1974 }
1975 }
1976
1977 uint32_t max_entries = 1000;
1978
1979 bool is_truncated = true;
1980
1981 RGWUsageIter usage_iter;
1982
1983 while (is_truncated) {
1984 op_ret = store->read_usage(s->user->user_id, start_epoch, end_epoch, max_entries,
1985 &is_truncated, usage_iter, usage);
1986
1987 if (op_ret == -ENOENT) {
1988 op_ret = 0;
1989 is_truncated = false;
1990 }
1991
1992 if (op_ret < 0) {
1993 return;
1994 }
1995 }
1996
1997 op_ret = rgw_user_sync_all_stats(store, s->user->user_id);
1998 if (op_ret < 0) {
1999 ldout(store->ctx(), 0) << "ERROR: failed to sync user stats: " << dendl;
2000 return;
2001 }
2002
2003 op_ret = rgw_user_get_all_buckets_stats(store, s->user->user_id, buckets_usage);
2004 if (op_ret < 0) {
2005 cerr << "ERROR: failed to sync user stats: " << std::endl;
2006 return ;
2007 }
2008
2009 string user_str = s->user->user_id.to_str();
2010 op_ret = store->cls_user_get_header(user_str, &header);
2011 if (op_ret < 0) {
2012 ldout(store->ctx(), 0) << "ERROR: can't read user header: " << dendl;
2013 return;
2014 }
2015
2016 return;
2017 }
2018
2019 int RGWStatAccount::verify_permission()
2020 {
2021 if (!verify_user_permission(s, RGW_PERM_READ)) {
2022 return -EACCES;
2023 }
2024
2025 return 0;
2026 }
2027
2028 void RGWStatAccount::execute()
2029 {
2030 string marker;
2031 bool is_truncated = false;
2032 uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk;
2033
2034 do {
2035 RGWUserBuckets buckets;
2036
2037 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets, marker,
2038 string(), max_buckets, true, &is_truncated);
2039 if (op_ret < 0) {
2040 /* hmm.. something wrong here.. the user was authenticated, so it
2041 should exist */
2042 ldout(s->cct, 10) << "WARNING: failed on rgw_get_user_buckets uid="
2043 << s->user->user_id << dendl;
2044 break;
2045 } else {
2046 /* We need to have stats for all our policies - even if a given policy
2047 * isn't actually used in a given account. In such situation its usage
2048 * stats would be simply full of zeros. */
2049 for (const auto& policy : store->get_zonegroup().placement_targets) {
2050 policies_stats.emplace(policy.second.name,
2051 decltype(policies_stats)::mapped_type());
2052 }
2053
2054 std::map<std::string, RGWBucketEnt>& m = buckets.get_buckets();
2055 for (const auto& kv : m) {
2056 const auto& bucket = kv.second;
2057
2058 global_stats.bytes_used += bucket.size;
2059 global_stats.bytes_used_rounded += bucket.size_rounded;
2060 global_stats.objects_count += bucket.count;
2061
2062 /* operator[] still can create a new entry for storage policy seen
2063 * for first time. */
2064 auto& policy_stats = policies_stats[bucket.placement_rule];
2065 policy_stats.bytes_used += bucket.size;
2066 policy_stats.bytes_used_rounded += bucket.size_rounded;
2067 policy_stats.buckets_count++;
2068 policy_stats.objects_count += bucket.count;
2069 }
2070 global_stats.buckets_count += m.size();
2071
2072 }
2073 } while (is_truncated);
2074 }
2075
2076 int RGWGetBucketVersioning::verify_permission()
2077 {
2078 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketVersioning);
2079 }
2080
2081 void RGWGetBucketVersioning::pre_exec()
2082 {
2083 rgw_bucket_object_pre_exec(s);
2084 }
2085
2086 void RGWGetBucketVersioning::execute()
2087 {
2088 versioned = s->bucket_info.versioned();
2089 versioning_enabled = s->bucket_info.versioning_enabled();
2090 }
2091
2092 int RGWSetBucketVersioning::verify_permission()
2093 {
2094 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketVersioning);
2095 }
2096
2097 void RGWSetBucketVersioning::pre_exec()
2098 {
2099 rgw_bucket_object_pre_exec(s);
2100 }
2101
2102 void RGWSetBucketVersioning::execute()
2103 {
2104 op_ret = get_params();
2105 if (op_ret < 0)
2106 return;
2107
2108 if (!store->is_meta_master()) {
2109 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
2110 if (op_ret < 0) {
2111 ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
2112 return;
2113 }
2114 }
2115
2116 op_ret = retry_raced_bucket_write(store, s, [this] {
2117 if (enable_versioning) {
2118 s->bucket_info.flags |= BUCKET_VERSIONED;
2119 s->bucket_info.flags &= ~BUCKET_VERSIONS_SUSPENDED;
2120 } else {
2121 s->bucket_info.flags |= (BUCKET_VERSIONED | BUCKET_VERSIONS_SUSPENDED);
2122 }
2123
2124 return store->put_bucket_instance_info(s->bucket_info, false, real_time(),
2125 &s->bucket_attrs);
2126 });
2127
2128 if (op_ret < 0) {
2129 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
2130 << " returned err=" << op_ret << dendl;
2131 return;
2132 }
2133 }
2134
2135 int RGWGetBucketWebsite::verify_permission()
2136 {
2137 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketWebsite);
2138 }
2139
2140 void RGWGetBucketWebsite::pre_exec()
2141 {
2142 rgw_bucket_object_pre_exec(s);
2143 }
2144
2145 void RGWGetBucketWebsite::execute()
2146 {
2147 if (!s->bucket_info.has_website) {
2148 op_ret = -ERR_NO_SUCH_WEBSITE_CONFIGURATION;
2149 }
2150 }
2151
2152 int RGWSetBucketWebsite::verify_permission()
2153 {
2154 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketWebsite);
2155 }
2156
2157 void RGWSetBucketWebsite::pre_exec()
2158 {
2159 rgw_bucket_object_pre_exec(s);
2160 }
2161
2162 void RGWSetBucketWebsite::execute()
2163 {
2164 op_ret = get_params();
2165
2166 if (op_ret < 0)
2167 return;
2168
2169 if (!store->is_meta_master()) {
2170 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
2171 if (op_ret < 0) {
2172 ldout(s->cct, 20) << __func__ << " forward_request_to_master returned ret=" << op_ret << dendl;
2173 return;
2174 }
2175 }
2176
2177 op_ret = retry_raced_bucket_write(store, s, [this] {
2178 s->bucket_info.has_website = true;
2179 s->bucket_info.website_conf = website_conf;
2180 op_ret = store->put_bucket_instance_info(s->bucket_info, false,
2181 real_time(), &s->bucket_attrs);
2182 return op_ret;
2183 });
2184
2185 if (op_ret < 0) {
2186 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
2187 return;
2188 }
2189 }
2190
2191 int RGWDeleteBucketWebsite::verify_permission()
2192 {
2193 return verify_bucket_owner_or_policy(s, rgw::IAM::s3DeleteBucketWebsite);
2194 }
2195
2196 void RGWDeleteBucketWebsite::pre_exec()
2197 {
2198 rgw_bucket_object_pre_exec(s);
2199 }
2200
2201 void RGWDeleteBucketWebsite::execute()
2202 {
2203 op_ret = retry_raced_bucket_write(store, s, [this] {
2204 s->bucket_info.has_website = false;
2205 s->bucket_info.website_conf = RGWBucketWebsiteConf();
2206 op_ret = store->put_bucket_instance_info(s->bucket_info, false,
2207 real_time(), &s->bucket_attrs);
2208 return op_ret;
2209 });
2210 if (op_ret < 0) {
2211 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
2212 return;
2213 }
2214 }
2215
2216 int RGWStatBucket::verify_permission()
2217 {
2218 // This (a HEAD request on a bucket) is governed by the s3:ListBucket permission.
2219 if (!verify_bucket_permission(s, rgw::IAM::s3ListBucket)) {
2220 return -EACCES;
2221 }
2222
2223 return 0;
2224 }
2225
2226 void RGWStatBucket::pre_exec()
2227 {
2228 rgw_bucket_object_pre_exec(s);
2229 }
2230
2231 void RGWStatBucket::execute()
2232 {
2233 if (!s->bucket_exists) {
2234 op_ret = -ERR_NO_SUCH_BUCKET;
2235 return;
2236 }
2237
2238 RGWUserBuckets buckets;
2239 bucket.bucket = s->bucket;
2240 buckets.add(bucket);
2241 map<string, RGWBucketEnt>& m = buckets.get_buckets();
2242 op_ret = store->update_containers_stats(m);
2243 if (! op_ret)
2244 op_ret = -EEXIST;
2245 if (op_ret > 0) {
2246 op_ret = 0;
2247 map<string, RGWBucketEnt>::iterator iter = m.find(bucket.bucket.name);
2248 if (iter != m.end()) {
2249 bucket = iter->second;
2250 } else {
2251 op_ret = -EINVAL;
2252 }
2253 }
2254 }
2255
2256 int RGWListBucket::verify_permission()
2257 {
2258 op_ret = get_params();
2259 if (op_ret < 0) {
2260 return op_ret;
2261 }
2262 if (!prefix.empty())
2263 s->env.emplace("s3:prefix", prefix);
2264
2265 if (!delimiter.empty())
2266 s->env.emplace("s3:delimiter", delimiter);
2267
2268 s->env.emplace("s3:max-keys", std::to_string(max));
2269
2270 if (!verify_bucket_permission(s,
2271 list_versions ?
2272 rgw::IAM::s3ListBucketVersions :
2273 rgw::IAM::s3ListBucket)) {
2274 return -EACCES;
2275 }
2276
2277 return 0;
2278 }
2279
2280 int RGWListBucket::parse_max_keys()
2281 {
2282 // Bound max value of max-keys to configured value for security
2283 // Bound min value of max-keys to '0'
2284 // Some S3 clients explicitly send max-keys=0 to detect if the bucket is
2285 // empty without listing any items.
2286 return parse_value_and_bound(max_keys, max, 0,
2287 s->cct->_conf->get_val<uint64_t>("rgw_max_listing_results"),
2288 default_max);
2289 }
2290
2291 void RGWListBucket::pre_exec()
2292 {
2293 rgw_bucket_object_pre_exec(s);
2294 }
2295
2296 void RGWListBucket::execute()
2297 {
2298 if (!s->bucket_exists) {
2299 op_ret = -ERR_NO_SUCH_BUCKET;
2300 return;
2301 }
2302
2303 if (allow_unordered && !delimiter.empty()) {
2304 ldout(s->cct, 0) <<
2305 "ERROR: unordered bucket listing requested with a delimiter" << dendl;
2306 op_ret = -EINVAL;
2307 return;
2308 }
2309
2310 if (need_container_stats()) {
2311 map<string, RGWBucketEnt> m;
2312 m[s->bucket.name] = RGWBucketEnt();
2313 m.begin()->second.bucket = s->bucket;
2314 op_ret = store->update_containers_stats(m);
2315 if (op_ret > 0) {
2316 bucket = m.begin()->second;
2317 }
2318 }
2319
2320 RGWRados::Bucket target(store, s->bucket_info);
2321 if (shard_id >= 0) {
2322 target.set_shard_id(shard_id);
2323 }
2324 RGWRados::Bucket::List list_op(&target);
2325
2326 list_op.params.prefix = prefix;
2327 list_op.params.delim = delimiter;
2328 list_op.params.marker = marker;
2329 list_op.params.end_marker = end_marker;
2330 list_op.params.list_versions = list_versions;
2331 list_op.params.allow_unordered = allow_unordered;
2332
2333 op_ret = list_op.list_objects(max, &objs, &common_prefixes, &is_truncated);
2334 if (op_ret >= 0) {
2335 next_marker = list_op.get_next_marker();
2336 }
2337 }
2338
2339 int RGWGetBucketLogging::verify_permission()
2340 {
2341 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketLogging);
2342 }
2343
2344 int RGWGetBucketLocation::verify_permission()
2345 {
2346 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketLocation);
2347 }
2348
2349 int RGWCreateBucket::verify_permission()
2350 {
2351 /* This check is mostly needed for S3 that doesn't support account ACL.
2352 * Swift doesn't allow to delegate any permission to an anonymous user,
2353 * so it will become an early exit in such case. */
2354 if (s->auth.identity->is_anonymous()) {
2355 return -EACCES;
2356 }
2357
2358 if (!verify_user_permission(s, RGW_PERM_WRITE)) {
2359 return -EACCES;
2360 }
2361
2362 if (s->user->user_id.tenant != s->bucket_tenant) {
2363 ldout(s->cct, 10) << "user cannot create a bucket in a different tenant"
2364 << " (user_id.tenant=" << s->user->user_id.tenant
2365 << " requested=" << s->bucket_tenant << ")"
2366 << dendl;
2367 return -EACCES;
2368 }
2369 if (s->user->max_buckets < 0) {
2370 return -EPERM;
2371 }
2372
2373 if (s->user->max_buckets) {
2374 RGWUserBuckets buckets;
2375 string marker;
2376 bool is_truncated = false;
2377 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
2378 marker, string(), s->user->max_buckets,
2379 false, &is_truncated);
2380 if (op_ret < 0) {
2381 return op_ret;
2382 }
2383
2384 if ((int)buckets.count() >= s->user->max_buckets) {
2385 return -ERR_TOO_MANY_BUCKETS;
2386 }
2387 }
2388
2389 return 0;
2390 }
2391
2392 static int forward_request_to_master(struct req_state *s, obj_version *objv,
2393 RGWRados *store, bufferlist& in_data,
2394 JSONParser *jp, req_info *forward_info)
2395 {
2396 if (!store->rest_master_conn) {
2397 ldout(s->cct, 0) << "rest connection is invalid" << dendl;
2398 return -EINVAL;
2399 }
2400 ldout(s->cct, 0) << "sending request to master zonegroup" << dendl;
2401 bufferlist response;
2402 string uid_str = s->user->user_id.to_str();
2403 #define MAX_REST_RESPONSE (128 * 1024) // we expect a very small response
2404 int ret = store->rest_master_conn->forward(uid_str, (forward_info ? *forward_info : s->info),
2405 objv, MAX_REST_RESPONSE, &in_data, &response);
2406 if (ret < 0)
2407 return ret;
2408
2409 ldout(s->cct, 20) << "response: " << response.c_str() << dendl;
2410 if (jp && !jp->parse(response.c_str(), response.length())) {
2411 ldout(s->cct, 0) << "failed parsing response from master zonegroup" << dendl;
2412 return -EINVAL;
2413 }
2414
2415 return 0;
2416 }
2417
2418 void RGWCreateBucket::pre_exec()
2419 {
2420 rgw_bucket_object_pre_exec(s);
2421 }
2422
2423 static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs,
2424 map<string, bufferlist>& out_attrs,
2425 map<string, bufferlist>& out_rmattrs)
2426 {
2427 for (const auto& kv : orig_attrs) {
2428 const string& name = kv.first;
2429
2430 /* Check if the attr is user-defined metadata item. */
2431 if (name.compare(0, sizeof(RGW_ATTR_META_PREFIX) - 1,
2432 RGW_ATTR_META_PREFIX) == 0) {
2433 /* For the objects all existing meta attrs have to be removed. */
2434 out_rmattrs[name] = kv.second;
2435 } else if (out_attrs.find(name) == std::end(out_attrs)) {
2436 out_attrs[name] = kv.second;
2437 }
2438 }
2439 }
2440
2441 /* Fuse resource metadata basing on original attributes in @orig_attrs, set
2442 * of _custom_ attribute names to remove in @rmattr_names and attributes in
2443 * @out_attrs. Place results in @out_attrs.
2444 *
2445 * NOTE: it's supposed that all special attrs already present in @out_attrs
2446 * will be preserved without any change. Special attributes are those which
2447 * names start with RGW_ATTR_META_PREFIX. They're complement to custom ones
2448 * used for X-Account-Meta-*, X-Container-Meta-*, X-Amz-Meta and so on. */
2449 static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs,
2450 const set<string>& rmattr_names,
2451 map<string, bufferlist>& out_attrs)
2452 {
2453 for (const auto& kv : orig_attrs) {
2454 const string& name = kv.first;
2455
2456 /* Check if the attr is user-defined metadata item. */
2457 if (name.compare(0, strlen(RGW_ATTR_META_PREFIX),
2458 RGW_ATTR_META_PREFIX) == 0) {
2459 /* For the buckets all existing meta attrs are preserved,
2460 except those that are listed in rmattr_names. */
2461 if (rmattr_names.find(name) != std::end(rmattr_names)) {
2462 const auto aiter = out_attrs.find(name);
2463
2464 if (aiter != std::end(out_attrs)) {
2465 out_attrs.erase(aiter);
2466 }
2467 } else {
2468 /* emplace() won't alter the map if the key is already present.
2469 * This behaviour is fully intensional here. */
2470 out_attrs.emplace(kv);
2471 }
2472 } else if (out_attrs.find(name) == std::end(out_attrs)) {
2473 out_attrs[name] = kv.second;
2474 }
2475 }
2476 }
2477
2478
2479 static void populate_with_generic_attrs(const req_state * const s,
2480 map<string, bufferlist>& out_attrs)
2481 {
2482 for (const auto& kv : s->generic_attrs) {
2483 bufferlist& attrbl = out_attrs[kv.first];
2484 const string& val = kv.second;
2485 attrbl.clear();
2486 attrbl.append(val.c_str(), val.size() + 1);
2487 }
2488 }
2489
2490
2491 static int filter_out_quota_info(std::map<std::string, bufferlist>& add_attrs,
2492 const std::set<std::string>& rmattr_names,
2493 RGWQuotaInfo& quota,
2494 bool * quota_extracted = nullptr)
2495 {
2496 bool extracted = false;
2497
2498 /* Put new limit on max objects. */
2499 auto iter = add_attrs.find(RGW_ATTR_QUOTA_NOBJS);
2500 std::string err;
2501 if (std::end(add_attrs) != iter) {
2502 quota.max_objects =
2503 static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err));
2504 if (!err.empty()) {
2505 return -EINVAL;
2506 }
2507 add_attrs.erase(iter);
2508 extracted = true;
2509 }
2510
2511 /* Put new limit on bucket (container) size. */
2512 iter = add_attrs.find(RGW_ATTR_QUOTA_MSIZE);
2513 if (iter != add_attrs.end()) {
2514 quota.max_size =
2515 static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err));
2516 if (!err.empty()) {
2517 return -EINVAL;
2518 }
2519 add_attrs.erase(iter);
2520 extracted = true;
2521 }
2522
2523 for (const auto& name : rmattr_names) {
2524 /* Remove limit on max objects. */
2525 if (name.compare(RGW_ATTR_QUOTA_NOBJS) == 0) {
2526 quota.max_objects = -1;
2527 extracted = true;
2528 }
2529
2530 /* Remove limit on max bucket size. */
2531 if (name.compare(RGW_ATTR_QUOTA_MSIZE) == 0) {
2532 quota.max_size = -1;
2533 extracted = true;
2534 }
2535 }
2536
2537 /* Swift requries checking on raw usage instead of the 4 KiB rounded one. */
2538 quota.check_on_raw = true;
2539 quota.enabled = quota.max_size > 0 || quota.max_objects > 0;
2540
2541 if (quota_extracted) {
2542 *quota_extracted = extracted;
2543 }
2544
2545 return 0;
2546 }
2547
2548
2549 static void filter_out_website(std::map<std::string, ceph::bufferlist>& add_attrs,
2550 const std::set<std::string>& rmattr_names,
2551 RGWBucketWebsiteConf& ws_conf)
2552 {
2553 std::string lstval;
2554
2555 /* Let's define a mapping between each custom attribute and the memory where
2556 * attribute's value should be stored. The memory location is expressed by
2557 * a non-const reference. */
2558 const auto mapping = {
2559 std::make_pair(RGW_ATTR_WEB_INDEX, std::ref(ws_conf.index_doc_suffix)),
2560 std::make_pair(RGW_ATTR_WEB_ERROR, std::ref(ws_conf.error_doc)),
2561 std::make_pair(RGW_ATTR_WEB_LISTINGS, std::ref(lstval)),
2562 std::make_pair(RGW_ATTR_WEB_LIST_CSS, std::ref(ws_conf.listing_css_doc)),
2563 std::make_pair(RGW_ATTR_SUBDIR_MARKER, std::ref(ws_conf.subdir_marker))
2564 };
2565
2566 for (const auto& kv : mapping) {
2567 const char * const key = kv.first;
2568 auto& target = kv.second;
2569
2570 auto iter = add_attrs.find(key);
2571
2572 if (std::end(add_attrs) != iter) {
2573 /* The "target" is a reference to ws_conf. */
2574 target = iter->second.c_str();
2575 add_attrs.erase(iter);
2576 }
2577
2578 if (rmattr_names.count(key)) {
2579 target = std::string();
2580 }
2581 }
2582
2583 if (! lstval.empty()) {
2584 ws_conf.listing_enabled = boost::algorithm::iequals(lstval, "true");
2585 }
2586 }
2587
2588
2589 void RGWCreateBucket::execute()
2590 {
2591 RGWAccessControlPolicy old_policy(s->cct);
2592 buffer::list aclbl;
2593 buffer::list corsbl;
2594 bool existed;
2595 string bucket_name;
2596 rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_name);
2597 rgw_raw_obj obj(store->get_zone_params().domain_root, bucket_name);
2598 obj_version objv, *pobjv = NULL;
2599
2600 op_ret = get_params();
2601 if (op_ret < 0)
2602 return;
2603
2604 if (!location_constraint.empty() &&
2605 !store->has_zonegroup_api(location_constraint)) {
2606 ldout(s->cct, 0) << "location constraint (" << location_constraint << ")"
2607 << " can't be found." << dendl;
2608 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
2609 s->err.message = "The specified location-constraint is not valid";
2610 return;
2611 }
2612
2613 if (!store->get_zonegroup().is_master_zonegroup() && !location_constraint.empty() &&
2614 store->get_zonegroup().api_name != location_constraint) {
2615 ldout(s->cct, 0) << "location constraint (" << location_constraint << ")"
2616 << " doesn't match zonegroup" << " (" << store->get_zonegroup().api_name << ")"
2617 << dendl;
2618 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
2619 s->err.message = "The specified location-constraint is not valid";
2620 return;
2621 }
2622
2623 const auto& zonegroup = store->get_zonegroup();
2624 if (!placement_rule.empty() &&
2625 !zonegroup.placement_targets.count(placement_rule)) {
2626 ldout(s->cct, 0) << "placement target (" << placement_rule << ")"
2627 << " doesn't exist in the placement targets of zonegroup"
2628 << " (" << store->get_zonegroup().api_name << ")" << dendl;
2629 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
2630 s->err.message = "The specified placement target does not exist";
2631 return;
2632 }
2633
2634 /* we need to make sure we read bucket info, it's not read before for this
2635 * specific request */
2636 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
2637 op_ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name,
2638 s->bucket_info, NULL, &s->bucket_attrs);
2639 if (op_ret < 0 && op_ret != -ENOENT)
2640 return;
2641 s->bucket_exists = (op_ret != -ENOENT);
2642
2643 s->bucket_owner.set_id(s->user->user_id);
2644 s->bucket_owner.set_name(s->user->display_name);
2645 if (s->bucket_exists) {
2646 int r = get_bucket_policy_from_attr(s->cct, store, s->bucket_info,
2647 s->bucket_attrs, &old_policy);
2648 if (r >= 0) {
2649 if (old_policy.get_owner().get_id().compare(s->user->user_id) != 0) {
2650 op_ret = -EEXIST;
2651 return;
2652 }
2653 }
2654 }
2655
2656 RGWBucketInfo master_info;
2657 rgw_bucket *pmaster_bucket;
2658 uint32_t *pmaster_num_shards;
2659 real_time creation_time;
2660
2661 if (!store->is_meta_master()) {
2662 JSONParser jp;
2663 op_ret = forward_request_to_master(s, NULL, store, in_data, &jp);
2664 if (op_ret < 0) {
2665 return;
2666 }
2667
2668 JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp);
2669 JSONDecoder::decode_json("object_ver", objv, &jp);
2670 JSONDecoder::decode_json("bucket_info", master_info, &jp);
2671 ldout(s->cct, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" << objv.ver << dendl;
2672 ldout(s->cct, 20) << "got creation time: << " << master_info.creation_time << dendl;
2673 pmaster_bucket= &master_info.bucket;
2674 creation_time = master_info.creation_time;
2675 pmaster_num_shards = &master_info.num_shards;
2676 pobjv = &objv;
2677 } else {
2678 pmaster_bucket = NULL;
2679 pmaster_num_shards = NULL;
2680 }
2681
2682 string zonegroup_id;
2683
2684 if (s->system_request) {
2685 zonegroup_id = s->info.args.get(RGW_SYS_PARAM_PREFIX "zonegroup");
2686 if (zonegroup_id.empty()) {
2687 zonegroup_id = store->get_zonegroup().get_id();
2688 }
2689 } else {
2690 zonegroup_id = store->get_zonegroup().get_id();
2691 }
2692
2693 if (s->bucket_exists) {
2694 string selected_placement_rule;
2695 rgw_bucket bucket;
2696 bucket.tenant = s->bucket_tenant;
2697 bucket.name = s->bucket_name;
2698 op_ret = store->select_bucket_placement(*(s->user), zonegroup_id,
2699 placement_rule,
2700 &selected_placement_rule, nullptr);
2701 if (selected_placement_rule != s->bucket_info.placement_rule) {
2702 op_ret = -EEXIST;
2703 return;
2704 }
2705 }
2706
2707 /* Encode special metadata first as we're using std::map::emplace under
2708 * the hood. This method will add the new items only if the map doesn't
2709 * contain such keys yet. */
2710 policy.encode(aclbl);
2711 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
2712
2713 if (has_cors) {
2714 cors_config.encode(corsbl);
2715 emplace_attr(RGW_ATTR_CORS, std::move(corsbl));
2716 }
2717
2718 RGWQuotaInfo quota_info;
2719 const RGWQuotaInfo * pquota_info = nullptr;
2720 if (need_metadata_upload()) {
2721 /* It's supposed that following functions WILL NOT change any special
2722 * attributes (like RGW_ATTR_ACL) if they are already present in attrs. */
2723 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
2724 if (op_ret < 0) {
2725 return;
2726 }
2727 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
2728 populate_with_generic_attrs(s, attrs);
2729
2730 op_ret = filter_out_quota_info(attrs, rmattr_names, quota_info);
2731 if (op_ret < 0) {
2732 return;
2733 } else {
2734 pquota_info = &quota_info;
2735 }
2736
2737 /* Web site of Swift API. */
2738 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
2739 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
2740 }
2741
2742 s->bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */
2743 s->bucket.name = s->bucket_name;
2744
2745 /* Handle updates of the metadata for Swift's object versioning. */
2746 if (swift_ver_location) {
2747 s->bucket_info.swift_ver_location = *swift_ver_location;
2748 s->bucket_info.swift_versioning = (! swift_ver_location->empty());
2749 }
2750
2751 op_ret = store->create_bucket(*(s->user), s->bucket, zonegroup_id,
2752 placement_rule, s->bucket_info.swift_ver_location,
2753 pquota_info, attrs,
2754 info, pobjv, &ep_objv, creation_time,
2755 pmaster_bucket, pmaster_num_shards, true);
2756 /* continue if EEXIST and create_bucket will fail below. this way we can
2757 * recover from a partial create by retrying it. */
2758 ldout(s->cct, 20) << "rgw_create_bucket returned ret=" << op_ret << " bucket=" << s->bucket << dendl;
2759
2760 if (op_ret && op_ret != -EEXIST)
2761 return;
2762
2763 existed = (op_ret == -EEXIST);
2764
2765 if (existed) {
2766 /* bucket already existed, might have raced with another bucket creation, or
2767 * might be partial bucket creation that never completed. Read existing bucket
2768 * info, verify that the reported bucket owner is the current user.
2769 * If all is ok then update the user's list of buckets.
2770 * Otherwise inform client about a name conflict.
2771 */
2772 if (info.owner.compare(s->user->user_id) != 0) {
2773 op_ret = -EEXIST;
2774 return;
2775 }
2776 s->bucket = info.bucket;
2777 }
2778
2779 op_ret = rgw_link_bucket(store, s->user->user_id, s->bucket,
2780 info.creation_time, false);
2781 if (op_ret && !existed && op_ret != -EEXIST) {
2782 /* if it exists (or previously existed), don't remove it! */
2783 op_ret = rgw_unlink_bucket(store, s->user->user_id, s->bucket.tenant,
2784 s->bucket.name);
2785 if (op_ret < 0) {
2786 ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
2787 << dendl;
2788 }
2789 } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) {
2790 op_ret = -ERR_BUCKET_EXISTS;
2791 }
2792
2793 if (need_metadata_upload() && existed) {
2794 /* OK, it looks we lost race with another request. As it's required to
2795 * handle metadata fusion and upload, the whole operation becomes very
2796 * similar in nature to PutMetadataBucket. However, as the attrs may
2797 * changed in the meantime, we have to refresh. */
2798 short tries = 0;
2799 do {
2800 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
2801 RGWBucketInfo binfo;
2802 map<string, bufferlist> battrs;
2803
2804 op_ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name,
2805 binfo, nullptr, &battrs);
2806 if (op_ret < 0) {
2807 return;
2808 } else if (binfo.owner.compare(s->user->user_id) != 0) {
2809 /* New bucket doesn't belong to the account we're operating on. */
2810 op_ret = -EEXIST;
2811 return;
2812 } else {
2813 s->bucket_info = binfo;
2814 s->bucket_attrs = battrs;
2815 }
2816
2817 attrs.clear();
2818
2819 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
2820 if (op_ret < 0) {
2821 return;
2822 }
2823 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
2824 populate_with_generic_attrs(s, attrs);
2825 op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket_info.quota);
2826 if (op_ret < 0) {
2827 return;
2828 }
2829
2830 /* Handle updates of the metadata for Swift's object versioning. */
2831 if (swift_ver_location) {
2832 s->bucket_info.swift_ver_location = *swift_ver_location;
2833 s->bucket_info.swift_versioning = (! swift_ver_location->empty());
2834 }
2835
2836 /* Web site of Swift API. */
2837 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
2838 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
2839
2840 /* This will also set the quota on the bucket. */
2841 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
2842 &s->bucket_info.objv_tracker);
2843 } while (op_ret == -ECANCELED && tries++ < 20);
2844
2845 /* Restore the proper return code. */
2846 if (op_ret >= 0) {
2847 op_ret = -ERR_BUCKET_EXISTS;
2848 }
2849 }
2850 }
2851
2852 int RGWDeleteBucket::verify_permission()
2853 {
2854 if (!verify_bucket_permission(s, rgw::IAM::s3DeleteBucket)) {
2855 return -EACCES;
2856 }
2857
2858 return 0;
2859 }
2860
2861 void RGWDeleteBucket::pre_exec()
2862 {
2863 rgw_bucket_object_pre_exec(s);
2864 }
2865
2866 void RGWDeleteBucket::execute()
2867 {
2868 op_ret = -EINVAL;
2869
2870 if (s->bucket_name.empty())
2871 return;
2872
2873 if (!s->bucket_exists) {
2874 ldout(s->cct, 0) << "ERROR: bucket " << s->bucket_name << " not found" << dendl;
2875 op_ret = -ERR_NO_SUCH_BUCKET;
2876 return;
2877 }
2878 RGWObjVersionTracker ot;
2879 ot.read_version = s->bucket_info.ep_objv;
2880
2881 if (s->system_request) {
2882 string tag = s->info.args.get(RGW_SYS_PARAM_PREFIX "tag");
2883 string ver_str = s->info.args.get(RGW_SYS_PARAM_PREFIX "ver");
2884 if (!tag.empty()) {
2885 ot.read_version.tag = tag;
2886 uint64_t ver;
2887 string err;
2888 ver = strict_strtol(ver_str.c_str(), 10, &err);
2889 if (!err.empty()) {
2890 ldout(s->cct, 0) << "failed to parse ver param" << dendl;
2891 op_ret = -EINVAL;
2892 return;
2893 }
2894 ot.read_version.ver = ver;
2895 }
2896 }
2897
2898 op_ret = rgw_bucket_sync_user_stats(store, s->user->user_id, s->bucket_info);
2899 if ( op_ret < 0) {
2900 ldout(s->cct, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl;
2901 }
2902
2903 op_ret = store->check_bucket_empty(s->bucket_info);
2904 if (op_ret < 0) {
2905 return;
2906 }
2907
2908 if (!store->is_meta_master()) {
2909 bufferlist in_data;
2910 op_ret = forward_request_to_master(s, &ot.read_version, store, in_data,
2911 NULL);
2912 if (op_ret < 0) {
2913 if (op_ret == -ENOENT) {
2914 /* adjust error, we want to return with NoSuchBucket and not
2915 * NoSuchKey */
2916 op_ret = -ERR_NO_SUCH_BUCKET;
2917 }
2918 return;
2919 }
2920 }
2921
2922 string prefix, delimiter;
2923
2924 if (s->prot_flags & RGW_REST_SWIFT) {
2925 string path_args;
2926 path_args = s->info.args.get("path");
2927 if (!path_args.empty()) {
2928 if (!delimiter.empty() || !prefix.empty()) {
2929 op_ret = -EINVAL;
2930 return;
2931 }
2932 prefix = path_args;
2933 delimiter="/";
2934 }
2935 }
2936
2937 op_ret = abort_bucket_multiparts(store, s->cct, s->bucket_info, prefix, delimiter);
2938
2939 if (op_ret < 0) {
2940 return;
2941 }
2942
2943 op_ret = store->delete_bucket(s->bucket_info, ot, false);
2944
2945 if (op_ret == -ECANCELED) {
2946 // lost a race, either with mdlog sync or another delete bucket operation.
2947 // in either case, we've already called rgw_unlink_bucket()
2948 op_ret = 0;
2949 return;
2950 }
2951
2952 if (op_ret == 0) {
2953 op_ret = rgw_unlink_bucket(store, s->bucket_info.owner, s->bucket.tenant,
2954 s->bucket.name, false);
2955 if (op_ret < 0) {
2956 ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
2957 << dendl;
2958 }
2959 }
2960
2961 if (op_ret < 0) {
2962 return;
2963 }
2964
2965
2966 }
2967
2968 int RGWPutObj::verify_permission()
2969 {
2970 if (! copy_source.empty()) {
2971
2972 RGWAccessControlPolicy cs_acl(s->cct);
2973 optional<Policy> policy;
2974 map<string, bufferlist> cs_attrs;
2975 rgw_bucket cs_bucket(copy_source_bucket_info.bucket);
2976 rgw_obj_key cs_object(copy_source_object_name, copy_source_version_id);
2977
2978 rgw_obj obj(cs_bucket, cs_object);
2979 store->set_atomic(s->obj_ctx, obj);
2980 store->set_prefetch_data(s->obj_ctx, obj);
2981
2982 /* check source object permissions */
2983 if (read_obj_policy(store, s, copy_source_bucket_info, cs_attrs, &cs_acl,
2984 policy, cs_bucket, cs_object) < 0) {
2985 return -EACCES;
2986 }
2987
2988 /* admin request overrides permission checks */
2989 if (! s->auth.identity->is_admin_of(cs_acl.get_owner().get_id())) {
2990 if (policy) {
2991 auto e = policy->eval(s->env, *s->auth.identity,
2992 cs_object.instance.empty() ?
2993 rgw::IAM::s3GetObject :
2994 rgw::IAM::s3GetObjectVersion,
2995 rgw::IAM::ARN(obj));
2996 if (e == Effect::Deny) {
2997 return -EACCES;
2998 } else if (e == Effect::Pass &&
2999 !cs_acl.verify_permission(*s->auth.identity, s->perm_mask,
3000 RGW_PERM_READ)) {
3001 return -EACCES;
3002 }
3003 } else if (!cs_acl.verify_permission(*s->auth.identity, s->perm_mask,
3004 RGW_PERM_READ)) {
3005 return -EACCES;
3006 }
3007 }
3008 }
3009
3010 if (s->iam_policy) {
3011 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
3012 rgw::IAM::s3PutObject,
3013 rgw_obj(s->bucket, s->object));
3014 if (e == Effect::Allow) {
3015 return 0;
3016 } else if (e == Effect::Deny) {
3017 return -EACCES;
3018 }
3019 }
3020
3021 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
3022 return -EACCES;
3023 }
3024
3025 return 0;
3026 }
3027
3028 void RGWPutObjProcessor_Multipart::get_mp(RGWMPObj** _mp){
3029 *_mp = &mp;
3030 }
3031
3032 int RGWPutObjProcessor_Multipart::prepare(RGWRados *store, string *oid_rand)
3033 {
3034 string oid = obj_str;
3035 upload_id = s->info.args.get("uploadId");
3036 if (!oid_rand) {
3037 mp.init(oid, upload_id);
3038 } else {
3039 mp.init(oid, upload_id, *oid_rand);
3040 }
3041
3042 part_num = s->info.args.get("partNumber");
3043 if (part_num.empty()) {
3044 ldout(s->cct, 10) << "part number is empty" << dendl;
3045 return -EINVAL;
3046 }
3047
3048 string err;
3049 uint64_t num = (uint64_t)strict_strtol(part_num.c_str(), 10, &err);
3050
3051 if (!err.empty()) {
3052 ldout(s->cct, 10) << "bad part number: " << part_num << ": " << err << dendl;
3053 return -EINVAL;
3054 }
3055
3056 string upload_prefix = oid + ".";
3057
3058 if (!oid_rand) {
3059 upload_prefix.append(upload_id);
3060 } else {
3061 upload_prefix.append(*oid_rand);
3062 }
3063
3064 rgw_obj target_obj;
3065 target_obj.init(bucket, oid);
3066
3067 manifest.set_prefix(upload_prefix);
3068
3069 manifest.set_multipart_part_rule(store->ctx()->_conf->rgw_obj_stripe_size, num);
3070
3071 int r = manifest_gen.create_begin(store->ctx(), &manifest, s->bucket_info.placement_rule, bucket, target_obj);
3072 if (r < 0) {
3073 return r;
3074 }
3075
3076 cur_obj = manifest_gen.get_cur_obj(store);
3077 rgw_raw_obj_to_obj(bucket, cur_obj, &head_obj);
3078 head_obj.index_hash_source = obj_str;
3079
3080 r = prepare_init(store, NULL);
3081 if (r < 0) {
3082 return r;
3083 }
3084
3085 return 0;
3086 }
3087
3088 int RGWPutObjProcessor_Multipart::do_complete(size_t accounted_size,
3089 const string& etag,
3090 real_time *mtime, real_time set_mtime,
3091 map<string, bufferlist>& attrs,
3092 real_time delete_at,
3093 const char *if_match,
3094 const char *if_nomatch, const string *user_data, rgw_zone_set *zones_trace)
3095 {
3096 complete_writing_data();
3097
3098 RGWRados::Object op_target(store, s->bucket_info, obj_ctx, head_obj);
3099 op_target.set_versioning_disabled(true);
3100 RGWRados::Object::Write head_obj_op(&op_target);
3101
3102 head_obj_op.meta.set_mtime = set_mtime;
3103 head_obj_op.meta.mtime = mtime;
3104 head_obj_op.meta.owner = s->owner.get_id();
3105 head_obj_op.meta.delete_at = delete_at;
3106 head_obj_op.meta.zones_trace = zones_trace;
3107 head_obj_op.meta.modify_tail = true;
3108
3109 int r = head_obj_op.write_meta(obj_len, accounted_size, attrs);
3110 if (r < 0)
3111 return r;
3112
3113 bufferlist bl;
3114 RGWUploadPartInfo info;
3115 string p = "part.";
3116 bool sorted_omap = is_v2_upload_id(upload_id);
3117
3118 if (sorted_omap) {
3119 string err;
3120 int part_num_int = strict_strtol(part_num.c_str(), 10, &err);
3121 if (!err.empty()) {
3122 dout(10) << "bad part number specified: " << part_num << dendl;
3123 return -EINVAL;
3124 }
3125 char buf[32];
3126 snprintf(buf, sizeof(buf), "%08d", part_num_int);
3127 p.append(buf);
3128 } else {
3129 p.append(part_num);
3130 }
3131 info.num = atoi(part_num.c_str());
3132 info.etag = etag;
3133 info.size = obj_len;
3134 info.accounted_size = accounted_size;
3135 info.modified = real_clock::now();
3136 info.manifest = manifest;
3137
3138 bool compressed;
3139 r = rgw_compression_info_from_attrset(attrs, compressed, info.cs_info);
3140 if (r < 0) {
3141 dout(1) << "cannot get compression info" << dendl;
3142 return r;
3143 }
3144
3145 ::encode(info, bl);
3146
3147 string multipart_meta_obj = mp.get_meta();
3148
3149 rgw_obj meta_obj;
3150 meta_obj.init_ns(bucket, multipart_meta_obj, mp_ns);
3151 meta_obj.set_in_extra_data(true);
3152
3153 rgw_raw_obj raw_meta_obj;
3154
3155 store->obj_to_raw(s->bucket_info.placement_rule, meta_obj, &raw_meta_obj);
3156
3157 r = store->omap_set(raw_meta_obj, p, bl);
3158
3159 return r;
3160 }
3161
3162 RGWPutObjProcessor *RGWPutObj::select_processor(RGWObjectCtx& obj_ctx, bool *is_multipart)
3163 {
3164 RGWPutObjProcessor *processor;
3165
3166 bool multipart = s->info.args.exists("uploadId");
3167
3168 uint64_t part_size = s->cct->_conf->rgw_obj_stripe_size;
3169
3170 if (!multipart) {
3171 processor = new RGWPutObjProcessor_Atomic(obj_ctx, s->bucket_info, s->bucket, s->object.name, part_size, s->req_id, s->bucket_info.versioning_enabled());
3172 (static_cast<RGWPutObjProcessor_Atomic *>(processor))->set_olh_epoch(olh_epoch);
3173 (static_cast<RGWPutObjProcessor_Atomic *>(processor))->set_version_id(version_id);
3174 } else {
3175 processor = new RGWPutObjProcessor_Multipart(obj_ctx, s->bucket_info, part_size, s);
3176 }
3177
3178 if (is_multipart) {
3179 *is_multipart = multipart;
3180 }
3181
3182 return processor;
3183 }
3184
3185 void RGWPutObj::dispose_processor(RGWPutObjDataProcessor *processor)
3186 {
3187 delete processor;
3188 }
3189
3190 void RGWPutObj::pre_exec()
3191 {
3192 rgw_bucket_object_pre_exec(s);
3193 }
3194
3195 class RGWPutObj_CB : public RGWGetDataCB
3196 {
3197 RGWPutObj *op;
3198 public:
3199 RGWPutObj_CB(RGWPutObj *_op) : op(_op) {}
3200 ~RGWPutObj_CB() override {}
3201
3202 int handle_data(bufferlist& bl, off_t bl_ofs, off_t bl_len) override {
3203 return op->get_data_cb(bl, bl_ofs, bl_len);
3204 }
3205 };
3206
3207 int RGWPutObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
3208 {
3209 bufferlist bl_tmp;
3210 bl.copy(bl_ofs, bl_len, bl_tmp);
3211
3212 bl_aux.append(bl_tmp);
3213
3214 return bl_len;
3215 }
3216
3217 int RGWPutObj::get_data(const off_t fst, const off_t lst, bufferlist& bl)
3218 {
3219 RGWPutObj_CB cb(this);
3220 RGWGetDataCB* filter = &cb;
3221 boost::optional<RGWGetObj_Decompress> decompress;
3222 std::unique_ptr<RGWGetDataCB> decrypt;
3223 RGWCompressionInfo cs_info;
3224 map<string, bufferlist> attrs;
3225 map<string, bufferlist>::iterator attr_iter;
3226 int ret = 0;
3227
3228 uint64_t obj_size;
3229 int64_t new_ofs, new_end;
3230
3231 new_ofs = fst;
3232 new_end = lst;
3233
3234 rgw_obj_key obj_key(copy_source_object_name, copy_source_version_id);
3235 rgw_obj obj(copy_source_bucket_info.bucket, obj_key);
3236
3237 RGWRados::Object op_target(store, copy_source_bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
3238 RGWRados::Object::Read read_op(&op_target);
3239 read_op.params.obj_size = &obj_size;
3240 read_op.params.attrs = &attrs;
3241
3242 ret = read_op.prepare();
3243 if (ret < 0)
3244 return ret;
3245
3246 bool need_decompress;
3247 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
3248 if (op_ret < 0) {
3249 lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
3250 return -EIO;
3251 }
3252
3253 bool partial_content = true;
3254 if (need_decompress)
3255 {
3256 obj_size = cs_info.orig_size;
3257 decompress.emplace(s->cct, &cs_info, partial_content, filter);
3258 filter = &*decompress;
3259 }
3260
3261 attr_iter = attrs.find(RGW_ATTR_MANIFEST);
3262 op_ret = this->get_decrypt_filter(&decrypt,
3263 filter,
3264 attrs,
3265 attr_iter != attrs.end() ? &(attr_iter->second) : nullptr);
3266 if (decrypt != nullptr) {
3267 filter = decrypt.get();
3268 }
3269 if (op_ret < 0) {
3270 return ret;
3271 }
3272
3273 ret = read_op.range_to_ofs(obj_size, new_ofs, new_end);
3274 if (ret < 0)
3275 return ret;
3276
3277 filter->fixup_range(new_ofs, new_end);
3278 ret = read_op.iterate(new_ofs, new_end, filter);
3279
3280 if (ret >= 0)
3281 ret = filter->flush();
3282
3283 bl.claim_append(bl_aux);
3284
3285 return ret;
3286 }
3287
3288 // special handling for compression type = "random" with multipart uploads
3289 static CompressorRef get_compressor_plugin(const req_state *s,
3290 const std::string& compression_type)
3291 {
3292 if (compression_type != "random") {
3293 return Compressor::create(s->cct, compression_type);
3294 }
3295
3296 bool is_multipart{false};
3297 const auto& upload_id = s->info.args.get("uploadId", &is_multipart);
3298
3299 if (!is_multipart) {
3300 return Compressor::create(s->cct, compression_type);
3301 }
3302
3303 // use a hash of the multipart upload id so all parts use the same plugin
3304 const auto alg = std::hash<std::string>{}(upload_id) % Compressor::COMP_ALG_LAST;
3305 if (alg == Compressor::COMP_ALG_NONE) {
3306 return nullptr;
3307 }
3308 return Compressor::create(s->cct, alg);
3309 }
3310
3311 void RGWPutObj::execute()
3312 {
3313 RGWPutObjProcessor *processor = NULL;
3314 RGWPutObjDataProcessor *filter = nullptr;
3315 std::unique_ptr<RGWPutObjDataProcessor> encrypt;
3316 char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1];
3317 char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3318 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3319 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
3320 MD5 hash;
3321 bufferlist bl, aclbl, bs;
3322 int len;
3323 map<string, string>::iterator iter;
3324 bool multipart;
3325
3326 off_t fst;
3327 off_t lst;
3328 const auto& compression_type = store->get_zone_params().get_compression_type(
3329 s->bucket_info.placement_rule);
3330 CompressorRef plugin;
3331 boost::optional<RGWPutObj_Compress> compressor;
3332
3333 bool need_calc_md5 = (dlo_manifest == NULL) && (slo_info == NULL);
3334 perfcounter->inc(l_rgw_put);
3335 op_ret = -EINVAL;
3336 if (s->object.empty()) {
3337 goto done;
3338 }
3339
3340 if (!s->bucket_exists) {
3341 op_ret = -ERR_NO_SUCH_BUCKET;
3342 return;
3343 }
3344
3345 op_ret = get_params();
3346 if (op_ret < 0) {
3347 ldout(s->cct, 20) << "get_params() returned ret=" << op_ret << dendl;
3348 goto done;
3349 }
3350
3351 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
3352 if (op_ret < 0) {
3353 ldout(s->cct, 20) << "get_system_versioning_params() returned ret="
3354 << op_ret << dendl;
3355 goto done;
3356 }
3357
3358 if (supplied_md5_b64) {
3359 need_calc_md5 = true;
3360
3361 ldout(s->cct, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
3362 op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1],
3363 supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64));
3364 ldout(s->cct, 15) << "ceph_armor ret=" << op_ret << dendl;
3365 if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) {
3366 op_ret = -ERR_INVALID_DIGEST;
3367 goto done;
3368 }
3369
3370 buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5);
3371 ldout(s->cct, 15) << "supplied_md5=" << supplied_md5 << dendl;
3372 }
3373
3374 if (!chunked_upload) { /* with chunked upload we don't know how big is the upload.
3375 we also check sizes at the end anyway */
3376 op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
3377 user_quota, bucket_quota, s->content_length);
3378 if (op_ret < 0) {
3379 ldout(s->cct, 20) << "check_quota() returned ret=" << op_ret << dendl;
3380 goto done;
3381 }
3382 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3383 if (op_ret < 0) {
3384 ldout(s->cct, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl;
3385 goto done;
3386 }
3387 }
3388
3389 if (supplied_etag) {
3390 strncpy(supplied_md5, supplied_etag, sizeof(supplied_md5) - 1);
3391 supplied_md5[sizeof(supplied_md5) - 1] = '\0';
3392 }
3393
3394 processor = select_processor(*static_cast<RGWObjectCtx *>(s->obj_ctx), &multipart);
3395
3396 // no filters by default
3397 filter = processor;
3398
3399 /* Handle object versioning of Swift API. */
3400 if (! multipart) {
3401 rgw_obj obj(s->bucket, s->object);
3402 op_ret = store->swift_versioning_copy(*static_cast<RGWObjectCtx *>(s->obj_ctx),
3403 s->bucket_owner.get_id(),
3404 s->bucket_info,
3405 obj);
3406 if (op_ret < 0) {
3407 goto done;
3408 }
3409 }
3410
3411 op_ret = processor->prepare(store, NULL);
3412 if (op_ret < 0) {
3413 ldout(s->cct, 20) << "processor->prepare() returned ret=" << op_ret
3414 << dendl;
3415 goto done;
3416 }
3417
3418 if ((! copy_source.empty()) && !copy_source_range) {
3419 rgw_obj_key obj_key(copy_source_object_name, copy_source_version_id);
3420 rgw_obj obj(copy_source_bucket_info.bucket, obj_key.name);
3421
3422 RGWObjState *astate;
3423 op_ret = store->get_obj_state(static_cast<RGWObjectCtx *>(s->obj_ctx),
3424 copy_source_bucket_info, obj, &astate, true, false);
3425 if (op_ret < 0) {
3426 ldout(s->cct, 0) << "ERROR: get copy source obj state returned with error" << op_ret << dendl;
3427 goto done;
3428 }
3429 if (!astate->exists){
3430 op_ret = -ENOENT;
3431 goto done;
3432 }
3433 lst = astate->accounted_size - 1;
3434 } else {
3435 lst = copy_source_range_lst;
3436 }
3437
3438 fst = copy_source_range_fst;
3439
3440 op_ret = get_encrypt_filter(&encrypt, filter);
3441 if (op_ret < 0) {
3442 goto done;
3443 }
3444 if (encrypt != nullptr) {
3445 filter = encrypt.get();
3446 } else {
3447 //no encryption, we can try compression
3448 if (compression_type != "none") {
3449 plugin = get_compressor_plugin(s, compression_type);
3450 if (!plugin) {
3451 ldout(s->cct, 1) << "Cannot load plugin for compression type "
3452 << compression_type << dendl;
3453 } else {
3454 compressor.emplace(s->cct, plugin, filter);
3455 filter = &*compressor;
3456 }
3457 }
3458 }
3459
3460 do {
3461 bufferlist data;
3462 if (fst > lst)
3463 break;
3464 if (copy_source.empty()) {
3465 len = get_data(data);
3466 } else {
3467 uint64_t cur_lst = min(fst + s->cct->_conf->rgw_max_chunk_size - 1, lst);
3468 op_ret = get_data(fst, cur_lst, data);
3469 if (op_ret < 0)
3470 goto done;
3471 len = data.length();
3472 s->content_length += len;
3473 fst += len;
3474 }
3475 if (len < 0) {
3476 op_ret = len;
3477 goto done;
3478 }
3479
3480 if (need_calc_md5) {
3481 hash.Update((const byte *)data.c_str(), data.length());
3482 }
3483
3484 /* update torrrent */
3485 torrent.update(data);
3486
3487 /* do we need this operation to be synchronous? if we're dealing with an object with immutable
3488 * head, e.g., multipart object we need to make sure we're the first one writing to this object
3489 */
3490 bool need_to_wait = (ofs == 0) && multipart;
3491
3492 bufferlist orig_data;
3493
3494 if (need_to_wait) {
3495 orig_data = data;
3496 }
3497
3498 op_ret = put_data_and_throttle(filter, data, ofs, need_to_wait);
3499 if (op_ret < 0) {
3500 if (op_ret != -EEXIST) {
3501 ldout(s->cct, 20) << "processor->thottle_data() returned ret="
3502 << op_ret << dendl;
3503 goto done;
3504 }
3505 /* need_to_wait == true and op_ret == -EEXIST */
3506 ldout(s->cct, 5) << "NOTICE: processor->throttle_data() returned -EEXIST, need to restart write" << dendl;
3507
3508 /* restore original data */
3509 data.swap(orig_data);
3510
3511 /* restart processing with different oid suffix */
3512
3513 dispose_processor(processor);
3514 processor = select_processor(*static_cast<RGWObjectCtx *>(s->obj_ctx), &multipart);
3515 filter = processor;
3516
3517 string oid_rand;
3518 char buf[33];
3519 gen_rand_alphanumeric(store->ctx(), buf, sizeof(buf) - 1);
3520 oid_rand.append(buf);
3521
3522 op_ret = processor->prepare(store, &oid_rand);
3523 if (op_ret < 0) {
3524 ldout(s->cct, 0) << "ERROR: processor->prepare() returned "
3525 << op_ret << dendl;
3526 goto done;
3527 }
3528
3529 op_ret = get_encrypt_filter(&encrypt, filter);
3530 if (op_ret < 0) {
3531 goto done;
3532 }
3533 if (encrypt != nullptr) {
3534 filter = encrypt.get();
3535 } else {
3536 if (compressor) {
3537 compressor.emplace(s->cct, plugin, filter);
3538 filter = &*compressor;
3539 }
3540 }
3541 op_ret = put_data_and_throttle(filter, data, ofs, false);
3542 if (op_ret < 0) {
3543 goto done;
3544 }
3545 }
3546
3547 ofs += len;
3548 } while (len > 0);
3549
3550 {
3551 bufferlist flush;
3552 op_ret = put_data_and_throttle(filter, flush, ofs, false);
3553 if (op_ret < 0) {
3554 goto done;
3555 }
3556 }
3557
3558 if (!chunked_upload && ofs != s->content_length) {
3559 op_ret = -ERR_REQUEST_TIMEOUT;
3560 goto done;
3561 }
3562 s->obj_size = ofs;
3563
3564 perfcounter->inc(l_rgw_put_b, s->obj_size);
3565
3566 op_ret = do_aws4_auth_completion();
3567 if (op_ret < 0) {
3568 goto done;
3569 }
3570
3571 op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
3572 user_quota, bucket_quota, s->obj_size);
3573 if (op_ret < 0) {
3574 ldout(s->cct, 20) << "second check_quota() returned op_ret=" << op_ret << dendl;
3575 goto done;
3576 }
3577
3578 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3579 if (op_ret < 0) {
3580 ldout(s->cct, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl;
3581 goto done;
3582 }
3583
3584 hash.Final(m);
3585
3586 if (compressor && compressor->is_compressed()) {
3587 bufferlist tmp;
3588 RGWCompressionInfo cs_info;
3589 cs_info.compression_type = plugin->get_type_name();
3590 cs_info.orig_size = s->obj_size;
3591 cs_info.blocks = move(compressor->get_compression_blocks());
3592 ::encode(cs_info, tmp);
3593 attrs[RGW_ATTR_COMPRESSION] = tmp;
3594 ldout(s->cct, 20) << "storing " << RGW_ATTR_COMPRESSION
3595 << " with type=" << cs_info.compression_type
3596 << ", orig_size=" << cs_info.orig_size
3597 << ", blocks=" << cs_info.blocks.size() << dendl;
3598 }
3599
3600 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
3601
3602 etag = calc_md5;
3603
3604 if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) {
3605 op_ret = -ERR_BAD_DIGEST;
3606 goto done;
3607 }
3608
3609 policy.encode(aclbl);
3610 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
3611
3612 if (dlo_manifest) {
3613 op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
3614 if (op_ret < 0) {
3615 ldout(s->cct, 0) << "bad user manifest: " << dlo_manifest << dendl;
3616 goto done;
3617 }
3618 complete_etag(hash, &etag);
3619 ldout(s->cct, 10) << __func__ << ": calculated md5 for user manifest: " << etag << dendl;
3620 }
3621
3622 if (slo_info) {
3623 bufferlist manifest_bl;
3624 ::encode(*slo_info, manifest_bl);
3625 emplace_attr(RGW_ATTR_SLO_MANIFEST, std::move(manifest_bl));
3626
3627 hash.Update((byte *)slo_info->raw_data, slo_info->raw_data_len);
3628 complete_etag(hash, &etag);
3629 ldout(s->cct, 10) << __func__ << ": calculated md5 for user manifest: " << etag << dendl;
3630 }
3631
3632 if (supplied_etag && etag.compare(supplied_etag) != 0) {
3633 op_ret = -ERR_UNPROCESSABLE_ENTITY;
3634 goto done;
3635 }
3636 bl.append(etag.c_str(), etag.size() + 1);
3637 emplace_attr(RGW_ATTR_ETAG, std::move(bl));
3638
3639 populate_with_generic_attrs(s, attrs);
3640 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
3641 if (op_ret < 0) {
3642 goto done;
3643 }
3644 encode_delete_at_attr(delete_at, attrs);
3645 encode_obj_tags_attr(obj_tags.get(), attrs);
3646
3647 /* Add a custom metadata to expose the information whether an object
3648 * is an SLO or not. Appending the attribute must be performed AFTER
3649 * processing any input from user in order to prohibit overwriting. */
3650 if (slo_info) {
3651 bufferlist slo_userindicator_bl;
3652 slo_userindicator_bl.append("True", 4);
3653 emplace_attr(RGW_ATTR_SLO_UINDICATOR, std::move(slo_userindicator_bl));
3654 }
3655
3656 op_ret = processor->complete(s->obj_size, etag, &mtime, real_time(), attrs,
3657 (delete_at ? *delete_at : real_time()), if_match, if_nomatch,
3658 (user_data.empty() ? nullptr : &user_data));
3659
3660 /* produce torrent */
3661 if (s->cct->_conf->rgw_torrent_flag && (ofs == torrent.get_data_len()))
3662 {
3663 torrent.init(s, store);
3664 torrent.set_create_date(mtime);
3665 op_ret = torrent.complete();
3666 if (0 != op_ret)
3667 {
3668 ldout(s->cct, 0) << "ERROR: torrent.handle_data() returned " << op_ret << dendl;
3669 goto done;
3670 }
3671 }
3672
3673 done:
3674 dispose_processor(processor);
3675 perfcounter->tinc(l_rgw_put_lat,
3676 (ceph_clock_now() - s->time));
3677 }
3678
3679 int RGWPostObj::verify_permission()
3680 {
3681 return 0;
3682 }
3683 /*
3684 RGWPutObjProcessor *RGWPostObj::select_processor(RGWObjectCtx& obj_ctx)
3685 {
3686 RGWPutObjProcessor *processor;
3687
3688 uint64_t part_size = s->cct->_conf->rgw_obj_stripe_size;
3689
3690 processor = new RGWPutObjProcessor_Atomic(obj_ctx, s->bucket_info, s->bucket, s->object.name, part_size, s->req_id, s->bucket_info.versioning_enabled());
3691
3692 return processor;
3693 }
3694
3695 void RGWPostObj::dispose_processor(RGWPutObjDataProcessor *processor)
3696 {
3697 delete processor;
3698 }
3699 */
3700 void RGWPostObj::pre_exec()
3701 {
3702 rgw_bucket_object_pre_exec(s);
3703 }
3704
3705 void RGWPostObj::execute()
3706 {
3707 RGWPutObjDataProcessor *filter = nullptr;
3708 boost::optional<RGWPutObj_Compress> compressor;
3709 CompressorRef plugin;
3710 char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3711
3712 /* Read in the data from the POST form. */
3713 op_ret = get_params();
3714 if (op_ret < 0) {
3715 return;
3716 }
3717
3718 op_ret = verify_params();
3719 if (op_ret < 0) {
3720 return;
3721 }
3722
3723 if (s->iam_policy) {
3724 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
3725 rgw::IAM::s3PutObject,
3726 rgw_obj(s->bucket, s->object));
3727 if (e == Effect::Deny) {
3728 op_ret = -EACCES;
3729 return;
3730 } else if (e == Effect::Pass && !verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
3731 op_ret = -EACCES;
3732 return;
3733 }
3734 } else if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
3735 op_ret = -EACCES;
3736 return;
3737 }
3738
3739 /* Start iteration over data fields. It's necessary as Swift's FormPost
3740 * is capable to handle multiple files in single form. */
3741 do {
3742 std::unique_ptr<RGWPutObjDataProcessor> encrypt;
3743 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3744 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
3745 MD5 hash;
3746 ceph::buffer::list bl, aclbl;
3747 int len = 0;
3748
3749 op_ret = store->check_quota(s->bucket_owner.get_id(),
3750 s->bucket,
3751 user_quota,
3752 bucket_quota,
3753 s->content_length);
3754 if (op_ret < 0) {
3755 return;
3756 }
3757
3758 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3759 if (op_ret < 0) {
3760 return;
3761 }
3762
3763 if (supplied_md5_b64) {
3764 char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1];
3765 ldout(s->cct, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
3766 op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1],
3767 supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64));
3768 ldout(s->cct, 15) << "ceph_armor ret=" << op_ret << dendl;
3769 if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) {
3770 op_ret = -ERR_INVALID_DIGEST;
3771 return;
3772 }
3773
3774 buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5);
3775 ldout(s->cct, 15) << "supplied_md5=" << supplied_md5 << dendl;
3776 }
3777
3778 RGWPutObjProcessor_Atomic processor(*static_cast<RGWObjectCtx *>(s->obj_ctx),
3779 s->bucket_info,
3780 s->bucket,
3781 get_current_filename(),
3782 /* part size */
3783 s->cct->_conf->rgw_obj_stripe_size,
3784 s->req_id,
3785 s->bucket_info.versioning_enabled());
3786 processor.set_olh_epoch(0);
3787 /* No filters by default. */
3788 filter = &processor;
3789
3790 op_ret = processor.prepare(store, nullptr);
3791 if (op_ret < 0) {
3792 return;
3793 }
3794
3795 op_ret = get_encrypt_filter(&encrypt, filter);
3796 if (op_ret < 0) {
3797 return;
3798 }
3799 if (encrypt != nullptr) {
3800 filter = encrypt.get();
3801 } else {
3802 const auto& compression_type = store->get_zone_params().get_compression_type(
3803 s->bucket_info.placement_rule);
3804 if (compression_type != "none") {
3805 plugin = Compressor::create(s->cct, compression_type);
3806 if (!plugin) {
3807 ldout(s->cct, 1) << "Cannot load plugin for compression type "
3808 << compression_type << dendl;
3809 } else {
3810 compressor.emplace(s->cct, plugin, filter);
3811 filter = &*compressor;
3812 }
3813 }
3814 }
3815
3816 bool again;
3817 do {
3818 ceph::bufferlist data;
3819 len = get_data(data, again);
3820
3821 if (len < 0) {
3822 op_ret = len;
3823 return;
3824 }
3825
3826 if (!len) {
3827 break;
3828 }
3829
3830 hash.Update((const byte *)data.c_str(), data.length());
3831 op_ret = put_data_and_throttle(filter, data, ofs, false);
3832
3833 ofs += len;
3834
3835 if (ofs > max_len) {
3836 op_ret = -ERR_TOO_LARGE;
3837 return;
3838 }
3839 } while (again);
3840
3841 {
3842 bufferlist flush;
3843 op_ret = put_data_and_throttle(filter, flush, ofs, false);
3844 }
3845
3846 if (len < min_len) {
3847 op_ret = -ERR_TOO_SMALL;
3848 return;
3849 }
3850
3851 s->obj_size = ofs;
3852
3853 if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) {
3854 op_ret = -ERR_BAD_DIGEST;
3855 return;
3856 }
3857
3858 op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
3859 user_quota, bucket_quota, s->obj_size);
3860 if (op_ret < 0) {
3861 return;
3862 }
3863
3864 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3865 if (op_ret < 0) {
3866 return;
3867 }
3868
3869 hash.Final(m);
3870 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
3871
3872 etag = calc_md5;
3873 bl.append(etag.c_str(), etag.size() + 1);
3874 emplace_attr(RGW_ATTR_ETAG, std::move(bl));
3875
3876 policy.encode(aclbl);
3877 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
3878
3879 const std::string content_type = get_current_content_type();
3880 if (! content_type.empty()) {
3881 ceph::bufferlist ct_bl;
3882 ct_bl.append(content_type.c_str(), content_type.size() + 1);
3883 emplace_attr(RGW_ATTR_CONTENT_TYPE, std::move(ct_bl));
3884 }
3885
3886 if (compressor && compressor->is_compressed()) {
3887 ceph::bufferlist tmp;
3888 RGWCompressionInfo cs_info;
3889 cs_info.compression_type = plugin->get_type_name();
3890 cs_info.orig_size = s->obj_size;
3891 cs_info.blocks = move(compressor->get_compression_blocks());
3892 ::encode(cs_info, tmp);
3893 emplace_attr(RGW_ATTR_COMPRESSION, std::move(tmp));
3894 }
3895
3896 op_ret = processor.complete(s->obj_size, etag, nullptr, real_time(),
3897 attrs, (delete_at ? *delete_at : real_time()));
3898 } while (is_next_file_to_upload());
3899 }
3900
3901
3902 void RGWPutMetadataAccount::filter_out_temp_url(map<string, bufferlist>& add_attrs,
3903 const set<string>& rmattr_names,
3904 map<int, string>& temp_url_keys)
3905 {
3906 map<string, bufferlist>::iterator iter;
3907
3908 iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY1);
3909 if (iter != add_attrs.end()) {
3910 temp_url_keys[0] = iter->second.c_str();
3911 add_attrs.erase(iter);
3912 }
3913
3914 iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY2);
3915 if (iter != add_attrs.end()) {
3916 temp_url_keys[1] = iter->second.c_str();
3917 add_attrs.erase(iter);
3918 }
3919
3920 for (const string& name : rmattr_names) {
3921 if (name.compare(RGW_ATTR_TEMPURL_KEY1) == 0) {
3922 temp_url_keys[0] = string();
3923 }
3924 if (name.compare(RGW_ATTR_TEMPURL_KEY2) == 0) {
3925 temp_url_keys[1] = string();
3926 }
3927 }
3928 }
3929
3930 int RGWPutMetadataAccount::init_processing()
3931 {
3932 /* First, go to the base class. At the time of writing the method was
3933 * responsible only for initializing the quota. This isn't necessary
3934 * here as we are touching metadata only. I'm putting this call only
3935 * for the future. */
3936 op_ret = RGWOp::init_processing();
3937 if (op_ret < 0) {
3938 return op_ret;
3939 }
3940
3941 op_ret = get_params();
3942 if (op_ret < 0) {
3943 return op_ret;
3944 }
3945
3946 op_ret = rgw_get_user_attrs_by_uid(store, s->user->user_id, orig_attrs,
3947 &acct_op_tracker);
3948 if (op_ret < 0) {
3949 return op_ret;
3950 }
3951
3952 if (has_policy) {
3953 bufferlist acl_bl;
3954 policy.encode(acl_bl);
3955 attrs.emplace(RGW_ATTR_ACL, std::move(acl_bl));
3956 }
3957
3958 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
3959 if (op_ret < 0) {
3960 return op_ret;
3961 }
3962 prepare_add_del_attrs(orig_attrs, rmattr_names, attrs);
3963 populate_with_generic_attrs(s, attrs);
3964
3965 /* Try extract the TempURL-related stuff now to allow verify_permission
3966 * evaluate whether we need FULL_CONTROL or not. */
3967 filter_out_temp_url(attrs, rmattr_names, temp_url_keys);
3968
3969 /* The same with quota except a client needs to be reseller admin. */
3970 op_ret = filter_out_quota_info(attrs, rmattr_names, new_quota,
3971 &new_quota_extracted);
3972 if (op_ret < 0) {
3973 return op_ret;
3974 }
3975
3976 return 0;
3977 }
3978
3979 int RGWPutMetadataAccount::verify_permission()
3980 {
3981 if (s->auth.identity->is_anonymous()) {
3982 return -EACCES;
3983 }
3984
3985 if (!verify_user_permission(s, RGW_PERM_WRITE)) {
3986 return -EACCES;
3987 }
3988
3989 /* Altering TempURL keys requires FULL_CONTROL. */
3990 if (!temp_url_keys.empty() && s->perm_mask != RGW_PERM_FULL_CONTROL) {
3991 return -EPERM;
3992 }
3993
3994 /* We are failing this intensionally to allow system user/reseller admin
3995 * override in rgw_process.cc. This is the way to specify a given RGWOp
3996 * expect extra privileges. */
3997 if (new_quota_extracted) {
3998 return -EACCES;
3999 }
4000
4001 return 0;
4002 }
4003
4004 void RGWPutMetadataAccount::execute()
4005 {
4006 /* Params have been extracted earlier. See init_processing(). */
4007 RGWUserInfo new_uinfo;
4008 op_ret = rgw_get_user_info_by_uid(store, s->user->user_id, new_uinfo,
4009 &acct_op_tracker);
4010 if (op_ret < 0) {
4011 return;
4012 }
4013
4014 /* Handle the TempURL-related stuff. */
4015 if (!temp_url_keys.empty()) {
4016 for (auto& pair : temp_url_keys) {
4017 new_uinfo.temp_url_keys[pair.first] = std::move(pair.second);
4018 }
4019 }
4020
4021 /* Handle the quota extracted at the verify_permission step. */
4022 if (new_quota_extracted) {
4023 new_uinfo.user_quota = std::move(new_quota);
4024 }
4025
4026 /* We are passing here the current (old) user info to allow the function
4027 * optimize-out some operations. */
4028 op_ret = rgw_store_user_info(store, new_uinfo, s->user,
4029 &acct_op_tracker, real_time(), false, &attrs);
4030 }
4031
4032 int RGWPutMetadataBucket::verify_permission()
4033 {
4034 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
4035 return -EACCES;
4036 }
4037
4038 return 0;
4039 }
4040
4041 void RGWPutMetadataBucket::pre_exec()
4042 {
4043 rgw_bucket_object_pre_exec(s);
4044 }
4045
4046 void RGWPutMetadataBucket::execute()
4047 {
4048 op_ret = get_params();
4049 if (op_ret < 0) {
4050 return;
4051 }
4052
4053 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
4054 if (op_ret < 0) {
4055 return;
4056 }
4057
4058 if (!placement_rule.empty() &&
4059 placement_rule != s->bucket_info.placement_rule) {
4060 op_ret = -EEXIST;
4061 return;
4062 }
4063
4064 op_ret = retry_raced_bucket_write(store, s, [this] {
4065 /* Encode special metadata first as we're using std::map::emplace under
4066 * the hood. This method will add the new items only if the map doesn't
4067 * contain such keys yet. */
4068 if (has_policy) {
4069 if (s->dialect.compare("swift") == 0) {
4070 auto old_policy = \
4071 static_cast<RGWAccessControlPolicy_SWIFT*>(s->bucket_acl.get());
4072 auto new_policy = static_cast<RGWAccessControlPolicy_SWIFT*>(&policy);
4073 new_policy->filter_merge(policy_rw_mask, old_policy);
4074 policy = *new_policy;
4075 }
4076 buffer::list bl;
4077 policy.encode(bl);
4078 emplace_attr(RGW_ATTR_ACL, std::move(bl));
4079 }
4080
4081 if (has_cors) {
4082 buffer::list bl;
4083 cors_config.encode(bl);
4084 emplace_attr(RGW_ATTR_CORS, std::move(bl));
4085 }
4086
4087 /* It's supposed that following functions WILL NOT change any
4088 * special attributes (like RGW_ATTR_ACL) if they are already
4089 * present in attrs. */
4090 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
4091 populate_with_generic_attrs(s, attrs);
4092
4093 /* According to the Swift's behaviour and its container_quota
4094 * WSGI middleware implementation: anyone with write permissions
4095 * is able to set the bucket quota. This stays in contrast to
4096 * account quotas that can be set only by clients holding
4097 * reseller admin privileges. */
4098 op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket_info.quota);
4099 if (op_ret < 0) {
4100 return op_ret;
4101 }
4102
4103 if (swift_ver_location) {
4104 s->bucket_info.swift_ver_location = *swift_ver_location;
4105 s->bucket_info.swift_versioning = (!swift_ver_location->empty());
4106 }
4107
4108 /* Web site of Swift API. */
4109 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
4110 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
4111
4112 /* Setting attributes also stores the provided bucket info. Due
4113 * to this fact, the new quota settings can be serialized with
4114 * the same call. */
4115 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
4116 &s->bucket_info.objv_tracker);
4117 return op_ret;
4118 });
4119 }
4120
4121 int RGWPutMetadataObject::verify_permission()
4122 {
4123 // This looks to be something specific to Swift. We could add
4124 // operations like swift:PutMetadataObject to the Policy Engine.
4125 if (!verify_object_permission_no_policy(s, RGW_PERM_WRITE)) {
4126 return -EACCES;
4127 }
4128
4129 return 0;
4130 }
4131
4132 void RGWPutMetadataObject::pre_exec()
4133 {
4134 rgw_bucket_object_pre_exec(s);
4135 }
4136
4137 void RGWPutMetadataObject::execute()
4138 {
4139 rgw_obj obj(s->bucket, s->object);
4140 map<string, bufferlist> attrs, orig_attrs, rmattrs;
4141
4142 store->set_atomic(s->obj_ctx, obj);
4143
4144 op_ret = get_params();
4145 if (op_ret < 0) {
4146 return;
4147 }
4148
4149 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
4150 if (op_ret < 0) {
4151 return;
4152 }
4153
4154 /* check if obj exists, read orig attrs */
4155 op_ret = get_obj_attrs(store, s, obj, orig_attrs);
4156 if (op_ret < 0) {
4157 return;
4158 }
4159
4160 /* Check whether the object has expired. Swift API documentation
4161 * stands that we should return 404 Not Found in such case. */
4162 if (need_object_expiration() && object_is_expired(orig_attrs)) {
4163 op_ret = -ENOENT;
4164 return;
4165 }
4166
4167 /* Filter currently existing attributes. */
4168 prepare_add_del_attrs(orig_attrs, attrs, rmattrs);
4169 populate_with_generic_attrs(s, attrs);
4170 encode_delete_at_attr(delete_at, attrs);
4171
4172 if (dlo_manifest) {
4173 op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
4174 if (op_ret < 0) {
4175 ldout(s->cct, 0) << "bad user manifest: " << dlo_manifest << dendl;
4176 return;
4177 }
4178 }
4179
4180 op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, &rmattrs);
4181 }
4182
4183 int RGWDeleteObj::handle_slo_manifest(bufferlist& bl)
4184 {
4185 RGWSLOInfo slo_info;
4186 bufferlist::iterator bliter = bl.begin();
4187 try {
4188 ::decode(slo_info, bliter);
4189 } catch (buffer::error& err) {
4190 ldout(s->cct, 0) << "ERROR: failed to decode slo manifest" << dendl;
4191 return -EIO;
4192 }
4193
4194 try {
4195 deleter = std::unique_ptr<RGWBulkDelete::Deleter>(\
4196 new RGWBulkDelete::Deleter(store, s));
4197 } catch (std::bad_alloc) {
4198 return -ENOMEM;
4199 }
4200
4201 list<RGWBulkDelete::acct_path_t> items;
4202 for (const auto& iter : slo_info.entries) {
4203 const string& path_str = iter.path;
4204
4205 const size_t sep_pos = path_str.find('/', 1 /* skip first slash */);
4206 if (boost::string_view::npos == sep_pos) {
4207 return -EINVAL;
4208 }
4209
4210 RGWBulkDelete::acct_path_t path;
4211
4212 path.bucket_name = url_decode(path_str.substr(1, sep_pos - 1));
4213 path.obj_key = url_decode(path_str.substr(sep_pos + 1));
4214
4215 items.push_back(path);
4216 }
4217
4218 /* Request removal of the manifest object itself. */
4219 RGWBulkDelete::acct_path_t path;
4220 path.bucket_name = s->bucket_name;
4221 path.obj_key = s->object;
4222 items.push_back(path);
4223
4224 int ret = deleter->delete_chunk(items);
4225 if (ret < 0) {
4226 return ret;
4227 }
4228
4229 return 0;
4230 }
4231
4232 int RGWDeleteObj::verify_permission()
4233 {
4234 if (s->iam_policy) {
4235 auto r = s->iam_policy->eval(s->env, *s->auth.identity,
4236 s->object.instance.empty() ?
4237 rgw::IAM::s3DeleteObject :
4238 rgw::IAM::s3DeleteObjectVersion,
4239 ARN(s->bucket, s->object.name));
4240 if (r == Effect::Allow)
4241 return 0;
4242 else if (r == Effect::Deny)
4243 return -EACCES;
4244 }
4245
4246 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
4247 return -EACCES;
4248 }
4249
4250 return 0;
4251 }
4252
4253 void RGWDeleteObj::pre_exec()
4254 {
4255 rgw_bucket_object_pre_exec(s);
4256 }
4257
4258 void RGWDeleteObj::execute()
4259 {
4260 if (!s->bucket_exists) {
4261 op_ret = -ERR_NO_SUCH_BUCKET;
4262 return;
4263 }
4264
4265 op_ret = get_params();
4266 if (op_ret < 0) {
4267 return;
4268 }
4269
4270 rgw_obj obj(s->bucket, s->object);
4271 map<string, bufferlist> attrs;
4272
4273
4274 if (!s->object.empty()) {
4275 if (need_object_expiration() || multipart_delete) {
4276 /* check if obj exists, read orig attrs */
4277 op_ret = get_obj_attrs(store, s, obj, attrs);
4278 if (op_ret < 0) {
4279 return;
4280 }
4281 }
4282
4283 if (multipart_delete) {
4284 const auto slo_attr = attrs.find(RGW_ATTR_SLO_MANIFEST);
4285
4286 if (slo_attr != attrs.end()) {
4287 op_ret = handle_slo_manifest(slo_attr->second);
4288 if (op_ret < 0) {
4289 ldout(s->cct, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret << dendl;
4290 }
4291 } else {
4292 op_ret = -ERR_NOT_SLO_MANIFEST;
4293 }
4294
4295 return;
4296 }
4297
4298 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
4299 obj_ctx->obj.set_atomic(obj);
4300
4301 bool ver_restored = false;
4302 op_ret = store->swift_versioning_restore(*obj_ctx, s->bucket_owner.get_id(),
4303 s->bucket_info, obj, ver_restored);
4304 if (op_ret < 0) {
4305 return;
4306 }
4307
4308 if (!ver_restored) {
4309 /* Swift's versioning mechanism hasn't found any previous version of
4310 * the object that could be restored. This means we should proceed
4311 * with the regular delete path. */
4312 RGWRados::Object del_target(store, s->bucket_info, *obj_ctx, obj);
4313 RGWRados::Object::Delete del_op(&del_target);
4314
4315 op_ret = get_system_versioning_params(s, &del_op.params.olh_epoch,
4316 &del_op.params.marker_version_id);
4317 if (op_ret < 0) {
4318 return;
4319 }
4320
4321 del_op.params.bucket_owner = s->bucket_owner.get_id();
4322 del_op.params.versioning_status = s->bucket_info.versioning_status();
4323 del_op.params.obj_owner = s->owner;
4324 del_op.params.unmod_since = unmod_since;
4325 del_op.params.high_precision_time = s->system_request; /* system request uses high precision time */
4326
4327 op_ret = del_op.delete_obj();
4328 if (op_ret >= 0) {
4329 delete_marker = del_op.result.delete_marker;
4330 version_id = del_op.result.version_id;
4331 }
4332
4333 /* Check whether the object has expired. Swift API documentation
4334 * stands that we should return 404 Not Found in such case. */
4335 if (need_object_expiration() && object_is_expired(attrs)) {
4336 op_ret = -ENOENT;
4337 return;
4338 }
4339 }
4340
4341 if (op_ret == -ECANCELED) {
4342 op_ret = 0;
4343 }
4344 if (op_ret == -ERR_PRECONDITION_FAILED && no_precondition_error) {
4345 op_ret = 0;
4346 }
4347 } else {
4348 op_ret = -EINVAL;
4349 }
4350 }
4351
4352 bool RGWCopyObj::parse_copy_location(const boost::string_view& url_src,
4353 string& bucket_name,
4354 rgw_obj_key& key)
4355 {
4356 boost::string_view name_str;
4357 boost::string_view params_str;
4358
4359 size_t pos = url_src.find('?');
4360 if (pos == string::npos) {
4361 name_str = url_src;
4362 } else {
4363 name_str = url_src.substr(0, pos);
4364 params_str = url_src.substr(pos + 1);
4365 }
4366
4367 boost::string_view dec_src{name_str};
4368 if (dec_src[0] == '/')
4369 dec_src.remove_prefix(1);
4370
4371 pos = dec_src.find('/');
4372 if (pos ==string::npos)
4373 return false;
4374
4375 boost::string_view bn_view{dec_src.substr(0, pos)};
4376 bucket_name = std::string{bn_view.data(), bn_view.size()};
4377
4378 boost::string_view kn_view{dec_src.substr(pos + 1)};
4379 key.name = std::string{kn_view.data(), kn_view.size()};
4380
4381 if (key.name.empty()) {
4382 return false;
4383 }
4384
4385 if (! params_str.empty()) {
4386 RGWHTTPArgs args;
4387 args.set(params_str.to_string());
4388 args.parse();
4389
4390 key.instance = args.get("versionId", NULL);
4391 }
4392
4393 return true;
4394 }
4395
4396 int RGWCopyObj::verify_permission()
4397 {
4398 RGWAccessControlPolicy src_acl(s->cct);
4399 optional<Policy> src_policy;
4400 op_ret = get_params();
4401 if (op_ret < 0)
4402 return op_ret;
4403
4404 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
4405 if (op_ret < 0) {
4406 return op_ret;
4407 }
4408 map<string, bufferlist> src_attrs;
4409
4410 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
4411
4412 if (s->bucket_instance_id.empty()) {
4413 op_ret = store->get_bucket_info(obj_ctx, src_tenant_name, src_bucket_name, src_bucket_info, NULL, &src_attrs);
4414 } else {
4415 /* will only happen in intra region sync where the source and dest bucket is the same */
4416 op_ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, src_bucket_info, NULL, &src_attrs);
4417 }
4418 if (op_ret < 0) {
4419 if (op_ret == -ENOENT) {
4420 op_ret = -ERR_NO_SUCH_BUCKET;
4421 }
4422 return op_ret;
4423 }
4424
4425 src_bucket = src_bucket_info.bucket;
4426
4427 /* get buckets info (source and dest) */
4428 if (s->local_source && source_zone.empty()) {
4429 rgw_obj src_obj(src_bucket, src_object);
4430 store->set_atomic(s->obj_ctx, src_obj);
4431 store->set_prefetch_data(s->obj_ctx, src_obj);
4432
4433 /* check source object permissions */
4434 op_ret = read_obj_policy(store, s, src_bucket_info, src_attrs, &src_acl,
4435 src_policy, src_bucket, src_object);
4436 if (op_ret < 0) {
4437 return op_ret;
4438 }
4439
4440 /* admin request overrides permission checks */
4441 if (!s->auth.identity->is_admin_of(src_acl.get_owner().get_id())) {
4442 if (src_policy) {
4443 auto e = src_policy->eval(s->env, *s->auth.identity,
4444 src_object.instance.empty() ?
4445 rgw::IAM::s3GetObject :
4446 rgw::IAM::s3GetObjectVersion,
4447 ARN(src_obj));
4448 if (e == Effect::Deny) {
4449 return -EACCES;
4450 } else if (e == Effect::Pass &&
4451 !src_acl.verify_permission(*s->auth.identity, s->perm_mask,
4452 RGW_PERM_READ)) {
4453 return -EACCES;
4454 }
4455 } else if (!src_acl.verify_permission(*s->auth.identity,
4456 s->perm_mask,
4457 RGW_PERM_READ)) {
4458 return -EACCES;
4459 }
4460 }
4461 }
4462
4463 RGWAccessControlPolicy dest_bucket_policy(s->cct);
4464 map<string, bufferlist> dest_attrs;
4465
4466 if (src_bucket_name.compare(dest_bucket_name) == 0) { /* will only happen if s->local_source
4467 or intra region sync */
4468 dest_bucket_info = src_bucket_info;
4469 dest_attrs = src_attrs;
4470 } else {
4471 op_ret = store->get_bucket_info(obj_ctx, dest_tenant_name, dest_bucket_name,
4472 dest_bucket_info, nullptr, &dest_attrs);
4473 if (op_ret < 0) {
4474 if (op_ret == -ENOENT) {
4475 op_ret = -ERR_NO_SUCH_BUCKET;
4476 }
4477 return op_ret;
4478 }
4479 }
4480
4481 dest_bucket = dest_bucket_info.bucket;
4482
4483 rgw_obj dest_obj(dest_bucket, dest_object);
4484 store->set_atomic(s->obj_ctx, dest_obj);
4485
4486 /* check dest bucket permissions */
4487 op_ret = read_bucket_policy(store, s, dest_bucket_info, dest_attrs,
4488 &dest_bucket_policy, dest_bucket);
4489 if (op_ret < 0) {
4490 return op_ret;
4491 }
4492
4493 /* admin request overrides permission checks */
4494 if (! s->auth.identity->is_admin_of(dest_policy.get_owner().get_id()) &&
4495 ! dest_bucket_policy.verify_permission(*s->auth.identity, s->perm_mask,
4496 RGW_PERM_WRITE)) {
4497 return -EACCES;
4498 }
4499
4500 op_ret = init_dest_policy();
4501 if (op_ret < 0) {
4502 return op_ret;
4503 }
4504
4505 return 0;
4506 }
4507
4508
4509 int RGWCopyObj::init_common()
4510 {
4511 if (if_mod) {
4512 if (parse_time(if_mod, &mod_time) < 0) {
4513 op_ret = -EINVAL;
4514 return op_ret;
4515 }
4516 mod_ptr = &mod_time;
4517 }
4518
4519 if (if_unmod) {
4520 if (parse_time(if_unmod, &unmod_time) < 0) {
4521 op_ret = -EINVAL;
4522 return op_ret;
4523 }
4524 unmod_ptr = &unmod_time;
4525 }
4526
4527 bufferlist aclbl;
4528 dest_policy.encode(aclbl);
4529 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
4530
4531 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
4532 if (op_ret < 0) {
4533 return op_ret;
4534 }
4535 populate_with_generic_attrs(s, attrs);
4536
4537 return 0;
4538 }
4539
4540 static void copy_obj_progress_cb(off_t ofs, void *param)
4541 {
4542 RGWCopyObj *op = static_cast<RGWCopyObj *>(param);
4543 op->progress_cb(ofs);
4544 }
4545
4546 void RGWCopyObj::progress_cb(off_t ofs)
4547 {
4548 if (!s->cct->_conf->rgw_copy_obj_progress)
4549 return;
4550
4551 if (ofs - last_ofs < s->cct->_conf->rgw_copy_obj_progress_every_bytes)
4552 return;
4553
4554 send_partial_response(ofs);
4555
4556 last_ofs = ofs;
4557 }
4558
4559 void RGWCopyObj::pre_exec()
4560 {
4561 rgw_bucket_object_pre_exec(s);
4562 }
4563
4564 void RGWCopyObj::execute()
4565 {
4566 if (init_common() < 0)
4567 return;
4568
4569 rgw_obj src_obj(src_bucket, src_object);
4570 rgw_obj dst_obj(dest_bucket, dest_object);
4571
4572 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
4573 obj_ctx.obj.set_atomic(src_obj);
4574 obj_ctx.obj.set_atomic(dst_obj);
4575
4576 encode_delete_at_attr(delete_at, attrs);
4577
4578 bool high_precision_time = (s->system_request);
4579
4580 /* Handle object versioning of Swift API. In case of copying to remote this
4581 * should fail gently (op_ret == 0) as the dst_obj will not exist here. */
4582 op_ret = store->swift_versioning_copy(obj_ctx,
4583 dest_bucket_info.owner,
4584 dest_bucket_info,
4585 dst_obj);
4586 if (op_ret < 0) {
4587 return;
4588 }
4589
4590 op_ret = store->copy_obj(obj_ctx,
4591 s->user->user_id,
4592 client_id,
4593 op_id,
4594 &s->info,
4595 source_zone,
4596 dst_obj,
4597 src_obj,
4598 dest_bucket_info,
4599 src_bucket_info,
4600 &src_mtime,
4601 &mtime,
4602 mod_ptr,
4603 unmod_ptr,
4604 high_precision_time,
4605 if_match,
4606 if_nomatch,
4607 attrs_mod,
4608 copy_if_newer,
4609 attrs, RGW_OBJ_CATEGORY_MAIN,
4610 olh_epoch,
4611 (delete_at ? *delete_at : real_time()),
4612 (version_id.empty() ? NULL : &version_id),
4613 &s->req_id, /* use req_id as tag */
4614 &etag,
4615 copy_obj_progress_cb, (void *)this
4616 );
4617 }
4618
4619 int RGWGetACLs::verify_permission()
4620 {
4621 bool perm;
4622 if (!s->object.empty()) {
4623 perm = verify_object_permission(s,
4624 s->object.instance.empty() ?
4625 rgw::IAM::s3GetObjectAcl :
4626 rgw::IAM::s3GetObjectVersionAcl);
4627 } else {
4628 if (!s->bucket_exists) {
4629 return -ERR_NO_SUCH_BUCKET;
4630 }
4631 perm = verify_bucket_permission(s, rgw::IAM::s3GetBucketAcl);
4632 }
4633 if (!perm)
4634 return -EACCES;
4635
4636 return 0;
4637 }
4638
4639 void RGWGetACLs::pre_exec()
4640 {
4641 rgw_bucket_object_pre_exec(s);
4642 }
4643
4644 void RGWGetACLs::execute()
4645 {
4646 stringstream ss;
4647 RGWAccessControlPolicy* const acl = \
4648 (!s->object.empty() ? s->object_acl.get() : s->bucket_acl.get());
4649 RGWAccessControlPolicy_S3* const s3policy = \
4650 static_cast<RGWAccessControlPolicy_S3*>(acl);
4651 s3policy->to_xml(ss);
4652 acls = ss.str();
4653 }
4654
4655
4656
4657 int RGWPutACLs::verify_permission()
4658 {
4659 bool perm;
4660 if (!s->object.empty()) {
4661 perm = verify_object_permission(s,
4662 s->object.instance.empty() ?
4663 rgw::IAM::s3PutObjectAcl :
4664 rgw::IAM::s3PutObjectVersionAcl);
4665 } else {
4666 perm = verify_bucket_permission(s, rgw::IAM::s3PutBucketAcl);
4667 }
4668 if (!perm)
4669 return -EACCES;
4670
4671 return 0;
4672 }
4673
4674 int RGWGetLC::verify_permission()
4675 {
4676 bool perm;
4677 perm = verify_bucket_permission(s, rgw::IAM::s3GetLifecycleConfiguration);
4678 if (!perm)
4679 return -EACCES;
4680
4681 return 0;
4682 }
4683
4684 int RGWPutLC::verify_permission()
4685 {
4686 bool perm;
4687 perm = verify_bucket_permission(s, rgw::IAM::s3PutLifecycleConfiguration);
4688 if (!perm)
4689 return -EACCES;
4690
4691 return 0;
4692 }
4693
4694 int RGWDeleteLC::verify_permission()
4695 {
4696 bool perm;
4697 perm = verify_bucket_permission(s, rgw::IAM::s3PutLifecycleConfiguration);
4698 if (!perm)
4699 return -EACCES;
4700
4701 return 0;
4702 }
4703
4704 void RGWPutACLs::pre_exec()
4705 {
4706 rgw_bucket_object_pre_exec(s);
4707 }
4708
4709 void RGWGetLC::pre_exec()
4710 {
4711 rgw_bucket_object_pre_exec(s);
4712 }
4713
4714 void RGWPutLC::pre_exec()
4715 {
4716 rgw_bucket_object_pre_exec(s);
4717 }
4718
4719 void RGWDeleteLC::pre_exec()
4720 {
4721 rgw_bucket_object_pre_exec(s);
4722 }
4723
4724 void RGWPutACLs::execute()
4725 {
4726 bufferlist bl;
4727
4728 RGWAccessControlPolicy_S3 *policy = NULL;
4729 RGWACLXMLParser_S3 parser(s->cct);
4730 RGWAccessControlPolicy_S3 new_policy(s->cct);
4731 stringstream ss;
4732 char *new_data = NULL;
4733 rgw_obj obj;
4734
4735 op_ret = 0; /* XXX redundant? */
4736
4737 if (!parser.init()) {
4738 op_ret = -EINVAL;
4739 return;
4740 }
4741
4742
4743 RGWAccessControlPolicy* const existing_policy = \
4744 (s->object.empty() ? s->bucket_acl.get() : s->object_acl.get());
4745
4746 owner = existing_policy->get_owner();
4747
4748 op_ret = get_params();
4749 if (op_ret < 0) {
4750 if (op_ret == -ERANGE) {
4751 ldout(s->cct, 4) << "The size of request xml data is larger than the max limitation, data size = "
4752 << s->length << dendl;
4753 op_ret = -ERR_MALFORMED_XML;
4754 s->err.message = "The XML you provided was larger than the maximum " +
4755 std::to_string(s->cct->_conf->rgw_max_put_param_size) +
4756 " bytes allowed.";
4757 }
4758 return;
4759 }
4760
4761 ldout(s->cct, 15) << "read len=" << len << " data=" << (data ? data : "") << dendl;
4762
4763 if (!s->canned_acl.empty() && len) {
4764 op_ret = -EINVAL;
4765 return;
4766 }
4767
4768 if (!s->canned_acl.empty() || s->has_acl_header) {
4769 op_ret = get_policy_from_state(store, s, ss);
4770 if (op_ret < 0)
4771 return;
4772
4773 new_data = strdup(ss.str().c_str());
4774 free(data);
4775 data = new_data;
4776 len = ss.str().size();
4777 }
4778
4779 if (!parser.parse(data, len, 1)) {
4780 op_ret = -EINVAL;
4781 return;
4782 }
4783 policy = static_cast<RGWAccessControlPolicy_S3 *>(parser.find_first("AccessControlPolicy"));
4784 if (!policy) {
4785 op_ret = -EINVAL;
4786 return;
4787 }
4788
4789 const RGWAccessControlList& req_acl = policy->get_acl();
4790 const multimap<string, ACLGrant>& req_grant_map = req_acl.get_grant_map();
4791 #define ACL_GRANTS_MAX_NUM 100
4792 int max_num = s->cct->_conf->rgw_acl_grants_max_num;
4793 if (max_num < 0) {
4794 max_num = ACL_GRANTS_MAX_NUM;
4795 }
4796
4797 int grants_num = req_grant_map.size();
4798 if (grants_num > max_num) {
4799 ldout(s->cct, 4) << "An acl can have up to "
4800 << max_num
4801 << " grants, request acl grants num: "
4802 << grants_num << dendl;
4803 op_ret = -ERR_MALFORMED_ACL_ERROR;
4804 s->err.message = "The request is rejected, because the acl grants number you requested is larger than the maximum "
4805 + std::to_string(max_num)
4806 + " grants allowed in an acl.";
4807 return;
4808 }
4809
4810 // forward bucket acl requests to meta master zone
4811 if (s->object.empty() && !store->is_meta_master()) {
4812 bufferlist in_data;
4813 // include acl data unless it was generated from a canned_acl
4814 if (s->canned_acl.empty()) {
4815 in_data.append(data, len);
4816 }
4817 op_ret = forward_request_to_master(s, NULL, store, in_data, NULL);
4818 if (op_ret < 0) {
4819 ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
4820 return;
4821 }
4822 }
4823
4824 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
4825 ldout(s->cct, 15) << "Old AccessControlPolicy";
4826 policy->to_xml(*_dout);
4827 *_dout << dendl;
4828 }
4829
4830 op_ret = policy->rebuild(store, &owner, new_policy);
4831 if (op_ret < 0)
4832 return;
4833
4834 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
4835 ldout(s->cct, 15) << "New AccessControlPolicy:";
4836 new_policy.to_xml(*_dout);
4837 *_dout << dendl;
4838 }
4839
4840 new_policy.encode(bl);
4841 map<string, bufferlist> attrs;
4842
4843 if (!s->object.empty()) {
4844 obj = rgw_obj(s->bucket, s->object);
4845 store->set_atomic(s->obj_ctx, obj);
4846 //if instance is empty, we should modify the latest object
4847 op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_ACL, bl);
4848 } else {
4849 attrs = s->bucket_attrs;
4850 attrs[RGW_ATTR_ACL] = bl;
4851 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
4852 }
4853 if (op_ret == -ECANCELED) {
4854 op_ret = 0; /* lost a race, but it's ok because acls are immutable */
4855 }
4856 }
4857
4858 static void get_lc_oid(struct req_state *s, string& oid)
4859 {
4860 string shard_id = s->bucket.name + ':' +s->bucket.bucket_id;
4861 int max_objs = (s->cct->_conf->rgw_lc_max_objs > HASH_PRIME)?HASH_PRIME:s->cct->_conf->rgw_lc_max_objs;
4862 int index = ceph_str_hash_linux(shard_id.c_str(), shard_id.size()) % HASH_PRIME % max_objs;
4863 oid = lc_oid_prefix;
4864 char buf[32];
4865 snprintf(buf, 32, ".%d", index);
4866 oid.append(buf);
4867 return;
4868 }
4869
4870 void RGWPutLC::execute()
4871 {
4872 bufferlist bl;
4873
4874 RGWLifecycleConfiguration_S3 *config = NULL;
4875 RGWLCXMLParser_S3 parser(s->cct);
4876 RGWLifecycleConfiguration_S3 new_config(s->cct);
4877
4878 content_md5 = s->info.env->get("HTTP_CONTENT_MD5");
4879 if (content_md5 == nullptr) {
4880 op_ret = -ERR_INVALID_REQUEST;
4881 s->err.message = "Missing required header for this request: Content-MD5";
4882 ldout(s->cct, 5) << s->err.message << dendl;
4883 return;
4884 }
4885
4886 std::string content_md5_bin;
4887 try {
4888 content_md5_bin = rgw::from_base64(boost::string_view(content_md5));
4889 } catch (...) {
4890 s->err.message = "Request header Content-MD5 contains character "
4891 "that is not base64 encoded.";
4892 ldout(s->cct, 5) << s->err.message << dendl;
4893 op_ret = -ERR_BAD_DIGEST;
4894 return;
4895 }
4896
4897 if (!parser.init()) {
4898 op_ret = -EINVAL;
4899 return;
4900 }
4901
4902 op_ret = get_params();
4903 if (op_ret < 0)
4904 return;
4905
4906 ldout(s->cct, 15) << "read len=" << len << " data=" << (data ? data : "") << dendl;
4907
4908 MD5 data_hash;
4909 unsigned char data_hash_res[CEPH_CRYPTO_MD5_DIGESTSIZE];
4910 data_hash.Update(reinterpret_cast<const byte*>(data), len);
4911 data_hash.Final(data_hash_res);
4912
4913 if (memcmp(data_hash_res, content_md5_bin.c_str(), CEPH_CRYPTO_MD5_DIGESTSIZE) != 0) {
4914 op_ret = -ERR_BAD_DIGEST;
4915 s->err.message = "The Content-MD5 you specified did not match what we received.";
4916 ldout(s->cct, 5) << s->err.message
4917 << " Specified content md5: " << content_md5
4918 << ", calculated content md5: " << data_hash_res
4919 << dendl;
4920 return;
4921 }
4922
4923 if (!parser.parse(data, len, 1)) {
4924 op_ret = -ERR_MALFORMED_XML;
4925 return;
4926 }
4927 config = static_cast<RGWLifecycleConfiguration_S3 *>(parser.find_first("LifecycleConfiguration"));
4928 if (!config) {
4929 op_ret = -ERR_MALFORMED_XML;
4930 return;
4931 }
4932
4933 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
4934 ldout(s->cct, 15) << "Old LifecycleConfiguration:";
4935 config->to_xml(*_dout);
4936 *_dout << dendl;
4937 }
4938
4939 op_ret = config->rebuild(store, new_config);
4940 if (op_ret < 0)
4941 return;
4942
4943 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
4944 ldout(s->cct, 15) << "New LifecycleConfiguration:";
4945 new_config.to_xml(*_dout);
4946 *_dout << dendl;
4947 }
4948
4949 new_config.encode(bl);
4950 map<string, bufferlist> attrs;
4951 attrs = s->bucket_attrs;
4952 attrs[RGW_ATTR_LC] = bl;
4953 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
4954 if (op_ret < 0)
4955 return;
4956 string shard_id = s->bucket.tenant + ':' + s->bucket.name + ':' + s->bucket.bucket_id;
4957 string oid;
4958 get_lc_oid(s, oid);
4959 pair<string, int> entry(shard_id, lc_uninitial);
4960 int max_lock_secs = s->cct->_conf->rgw_lc_lock_max_time;
4961 rados::cls::lock::Lock l(lc_index_lock_name);
4962 utime_t time(max_lock_secs, 0);
4963 l.set_duration(time);
4964 l.set_cookie(cookie);
4965 librados::IoCtx *ctx = store->get_lc_pool_ctx();
4966 do {
4967 op_ret = l.lock_exclusive(ctx, oid);
4968 if (op_ret == -EBUSY) {
4969 dout(0) << "RGWLC::RGWPutLC() failed to acquire lock on, sleep 5, try again" << oid << dendl;
4970 sleep(5);
4971 continue;
4972 }
4973 if (op_ret < 0) {
4974 dout(0) << "RGWLC::RGWPutLC() failed to acquire lock " << oid << op_ret << dendl;
4975 break;
4976 }
4977 op_ret = cls_rgw_lc_set_entry(*ctx, oid, entry);
4978 if (op_ret < 0) {
4979 dout(0) << "RGWLC::RGWPutLC() failed to set entry " << oid << op_ret << dendl;
4980 }
4981 break;
4982 }while(1);
4983 l.unlock(ctx, oid);
4984 return;
4985 }
4986
4987 void RGWDeleteLC::execute()
4988 {
4989 bufferlist bl;
4990 map<string, bufferlist> orig_attrs, attrs;
4991 map<string, bufferlist>::iterator iter;
4992 rgw_raw_obj obj;
4993 store->get_bucket_instance_obj(s->bucket, obj);
4994 store->set_prefetch_data(s->obj_ctx, obj);
4995 op_ret = get_system_obj_attrs(store, s, obj, orig_attrs, NULL, &s->bucket_info.objv_tracker);
4996 if (op_ret < 0)
4997 return;
4998
4999 for (iter = orig_attrs.begin(); iter != orig_attrs.end(); ++iter) {
5000 const string& name = iter->first;
5001 dout(10) << "DeleteLC : attr: " << name << dendl;
5002 if (name.compare(0, (sizeof(RGW_ATTR_LC) - 1), RGW_ATTR_LC) != 0) {
5003 if (attrs.find(name) == attrs.end()) {
5004 attrs[name] = iter->second;
5005 }
5006 }
5007 }
5008 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
5009 string shard_id = s->bucket.tenant + ':' + s->bucket.name + ':' + s->bucket.bucket_id;
5010 pair<string, int> entry(shard_id, lc_uninitial);
5011 string oid;
5012 get_lc_oid(s, oid);
5013 int max_lock_secs = s->cct->_conf->rgw_lc_lock_max_time;
5014 librados::IoCtx *ctx = store->get_lc_pool_ctx();
5015 rados::cls::lock::Lock l(lc_index_lock_name);
5016 utime_t time(max_lock_secs, 0);
5017 l.set_duration(time);
5018 do {
5019 op_ret = l.lock_exclusive(ctx, oid);
5020 if (op_ret == -EBUSY) {
5021 dout(0) << "RGWLC::RGWDeleteLC() failed to acquire lock on, sleep 5, try again" << oid << dendl;
5022 sleep(5);
5023 continue;
5024 }
5025 if (op_ret < 0) {
5026 dout(0) << "RGWLC::RGWDeleteLC() failed to acquire lock " << oid << op_ret << dendl;
5027 break;
5028 }
5029 op_ret = cls_rgw_lc_rm_entry(*ctx, oid, entry);
5030 if (op_ret < 0) {
5031 dout(0) << "RGWLC::RGWDeleteLC() failed to set entry " << oid << op_ret << dendl;
5032 }
5033 break;
5034 }while(1);
5035 l.unlock(ctx, oid);
5036 return;
5037 }
5038
5039 int RGWGetCORS::verify_permission()
5040 {
5041 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketCORS);
5042 }
5043
5044 void RGWGetCORS::execute()
5045 {
5046 op_ret = read_bucket_cors();
5047 if (op_ret < 0)
5048 return ;
5049
5050 if (!cors_exist) {
5051 dout(2) << "No CORS configuration set yet for this bucket" << dendl;
5052 op_ret = -ENOENT;
5053 return;
5054 }
5055 }
5056
5057 int RGWPutCORS::verify_permission()
5058 {
5059 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketCORS);
5060 }
5061
5062 void RGWPutCORS::execute()
5063 {
5064 rgw_raw_obj obj;
5065
5066 op_ret = get_params();
5067 if (op_ret < 0)
5068 return;
5069
5070 if (!store->is_meta_master()) {
5071 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
5072 if (op_ret < 0) {
5073 ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
5074 return;
5075 }
5076 }
5077
5078 op_ret = retry_raced_bucket_write(store, s, [this] {
5079 map<string, bufferlist> attrs = s->bucket_attrs;
5080 attrs[RGW_ATTR_CORS] = cors_bl;
5081 return rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
5082 });
5083 }
5084
5085 int RGWDeleteCORS::verify_permission()
5086 {
5087 // No separate delete permission
5088 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketCORS);
5089 }
5090
5091 void RGWDeleteCORS::execute()
5092 {
5093 op_ret = read_bucket_cors();
5094 if (op_ret < 0)
5095 return;
5096
5097 bufferlist bl;
5098 if (!cors_exist) {
5099 dout(2) << "No CORS configuration set yet for this bucket" << dendl;
5100 op_ret = -ENOENT;
5101 return;
5102 }
5103 op_ret = retry_raced_bucket_write(store, s, [this] {
5104 rgw_raw_obj obj;
5105 store->get_bucket_instance_obj(s->bucket, obj);
5106 store->set_prefetch_data(s->obj_ctx, obj);
5107 map<string, bufferlist> orig_attrs, attrs, rmattrs;
5108 map<string, bufferlist>::iterator iter;
5109
5110 op_ret = get_system_obj_attrs(store, s, obj, orig_attrs, NULL, &s->bucket_info.objv_tracker);
5111 if (op_ret < 0)
5112 return op_ret;
5113
5114 /* only remove meta attrs */
5115 for (iter = orig_attrs.begin(); iter != orig_attrs.end(); ++iter) {
5116 const string& name = iter->first;
5117 dout(10) << "DeleteCORS : attr: " << name << dendl;
5118 if (name.compare(0, (sizeof(RGW_ATTR_CORS) - 1), RGW_ATTR_CORS) == 0) {
5119 rmattrs[name] = iter->second;
5120 } else if (attrs.find(name) == attrs.end()) {
5121 attrs[name] = iter->second;
5122 }
5123 }
5124 return rgw_bucket_set_attrs(store, s->bucket_info, attrs,
5125 &s->bucket_info.objv_tracker);
5126 });
5127 }
5128
5129 void RGWOptionsCORS::get_response_params(string& hdrs, string& exp_hdrs, unsigned *max_age) {
5130 get_cors_response_headers(rule, req_hdrs, hdrs, exp_hdrs, max_age);
5131 }
5132
5133 int RGWOptionsCORS::validate_cors_request(RGWCORSConfiguration *cc) {
5134 rule = cc->host_name_rule(origin);
5135 if (!rule) {
5136 dout(10) << "There is no cors rule present for " << origin << dendl;
5137 return -ENOENT;
5138 }
5139
5140 if (!validate_cors_rule_method(rule, req_meth)) {
5141 return -ENOENT;
5142 }
5143
5144 if (!validate_cors_rule_header(rule, req_hdrs)) {
5145 return -ENOENT;
5146 }
5147
5148 return 0;
5149 }
5150
5151 void RGWOptionsCORS::execute()
5152 {
5153 op_ret = read_bucket_cors();
5154 if (op_ret < 0)
5155 return;
5156
5157 origin = s->info.env->get("HTTP_ORIGIN");
5158 if (!origin) {
5159 dout(0) <<
5160 "Preflight request without mandatory Origin header"
5161 << dendl;
5162 op_ret = -EINVAL;
5163 return;
5164 }
5165 req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD");
5166 if (!req_meth) {
5167 dout(0) <<
5168 "Preflight request without mandatory Access-control-request-method header"
5169 << dendl;
5170 op_ret = -EINVAL;
5171 return;
5172 }
5173 if (!cors_exist) {
5174 dout(2) << "No CORS configuration set yet for this bucket" << dendl;
5175 op_ret = -ENOENT;
5176 return;
5177 }
5178 req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS");
5179 op_ret = validate_cors_request(&bucket_cors);
5180 if (!rule) {
5181 origin = req_meth = NULL;
5182 return;
5183 }
5184 return;
5185 }
5186
5187 int RGWGetRequestPayment::verify_permission()
5188 {
5189 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketRequestPayment);
5190 }
5191
5192 void RGWGetRequestPayment::pre_exec()
5193 {
5194 rgw_bucket_object_pre_exec(s);
5195 }
5196
5197 void RGWGetRequestPayment::execute()
5198 {
5199 requester_pays = s->bucket_info.requester_pays;
5200 }
5201
5202 int RGWSetRequestPayment::verify_permission()
5203 {
5204 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketRequestPayment);
5205 }
5206
5207 void RGWSetRequestPayment::pre_exec()
5208 {
5209 rgw_bucket_object_pre_exec(s);
5210 }
5211
5212 void RGWSetRequestPayment::execute()
5213 {
5214 op_ret = get_params();
5215
5216 if (op_ret < 0)
5217 return;
5218
5219 s->bucket_info.requester_pays = requester_pays;
5220 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(),
5221 &s->bucket_attrs);
5222 if (op_ret < 0) {
5223 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
5224 << " returned err=" << op_ret << dendl;
5225 return;
5226 }
5227 }
5228
5229 int RGWInitMultipart::verify_permission()
5230 {
5231 if (s->iam_policy) {
5232 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
5233 rgw::IAM::s3PutObject,
5234 rgw_obj(s->bucket, s->object));
5235 if (e == Effect::Allow) {
5236 return 0;
5237 } else if (e == Effect::Deny) {
5238 return -EACCES;
5239 }
5240 }
5241
5242 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
5243 return -EACCES;
5244 }
5245
5246 return 0;
5247 }
5248
5249 void RGWInitMultipart::pre_exec()
5250 {
5251 rgw_bucket_object_pre_exec(s);
5252 }
5253
5254 void RGWInitMultipart::execute()
5255 {
5256 bufferlist aclbl;
5257 map<string, bufferlist> attrs;
5258 rgw_obj obj;
5259
5260 if (get_params() < 0)
5261 return;
5262
5263 if (s->object.empty())
5264 return;
5265
5266 policy.encode(aclbl);
5267 attrs[RGW_ATTR_ACL] = aclbl;
5268
5269 populate_with_generic_attrs(s, attrs);
5270
5271 /* select encryption mode */
5272 op_ret = prepare_encryption(attrs);
5273 if (op_ret != 0)
5274 return;
5275
5276 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
5277 if (op_ret < 0) {
5278 return;
5279 }
5280
5281 do {
5282 char buf[33];
5283 gen_rand_alphanumeric(s->cct, buf, sizeof(buf) - 1);
5284 upload_id = MULTIPART_UPLOAD_ID_PREFIX; /* v2 upload id */
5285 upload_id.append(buf);
5286
5287 string tmp_obj_name;
5288 RGWMPObj mp(s->object.name, upload_id);
5289 tmp_obj_name = mp.get_meta();
5290
5291 obj.init_ns(s->bucket, tmp_obj_name, mp_ns);
5292 // the meta object will be indexed with 0 size, we c
5293 obj.set_in_extra_data(true);
5294 obj.index_hash_source = s->object.name;
5295
5296 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
5297 op_target.set_versioning_disabled(true); /* no versioning for multipart meta */
5298
5299 RGWRados::Object::Write obj_op(&op_target);
5300
5301 obj_op.meta.owner = s->owner.get_id();
5302 obj_op.meta.category = RGW_OBJ_CATEGORY_MULTIMETA;
5303 obj_op.meta.flags = PUT_OBJ_CREATE_EXCL;
5304
5305 op_ret = obj_op.write_meta(0, 0, attrs);
5306 } while (op_ret == -EEXIST);
5307 }
5308
5309 static int get_multipart_info(RGWRados *store, struct req_state *s,
5310 string& meta_oid,
5311 RGWAccessControlPolicy *policy,
5312 map<string, bufferlist>& attrs)
5313 {
5314 map<string, bufferlist>::iterator iter;
5315 bufferlist header;
5316
5317 rgw_obj obj;
5318 obj.init_ns(s->bucket, meta_oid, mp_ns);
5319 obj.set_in_extra_data(true);
5320
5321 int op_ret = get_obj_attrs(store, s, obj, attrs);
5322 if (op_ret < 0) {
5323 if (op_ret == -ENOENT) {
5324 return -ERR_NO_SUCH_UPLOAD;
5325 }
5326 return op_ret;
5327 }
5328
5329 if (policy) {
5330 for (iter = attrs.begin(); iter != attrs.end(); ++iter) {
5331 string name = iter->first;
5332 if (name.compare(RGW_ATTR_ACL) == 0) {
5333 bufferlist& bl = iter->second;
5334 bufferlist::iterator bli = bl.begin();
5335 try {
5336 ::decode(*policy, bli);
5337 } catch (buffer::error& err) {
5338 ldout(s->cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
5339 return -EIO;
5340 }
5341 break;
5342 }
5343 }
5344 }
5345
5346 return 0;
5347 }
5348
5349 int RGWCompleteMultipart::verify_permission()
5350 {
5351 if (s->iam_policy) {
5352 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
5353 rgw::IAM::s3PutObject,
5354 rgw_obj(s->bucket, s->object));
5355 if (e == Effect::Allow) {
5356 return 0;
5357 } else if (e == Effect::Deny) {
5358 return -EACCES;
5359 }
5360 }
5361
5362 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
5363 return -EACCES;
5364 }
5365
5366 return 0;
5367 }
5368
5369 void RGWCompleteMultipart::pre_exec()
5370 {
5371 rgw_bucket_object_pre_exec(s);
5372 }
5373
5374 void RGWCompleteMultipart::execute()
5375 {
5376 RGWMultiCompleteUpload *parts;
5377 map<int, string>::iterator iter;
5378 RGWMultiXMLParser parser;
5379 string meta_oid;
5380 map<uint32_t, RGWUploadPartInfo> obj_parts;
5381 map<uint32_t, RGWUploadPartInfo>::iterator obj_iter;
5382 map<string, bufferlist> attrs;
5383 off_t ofs = 0;
5384 MD5 hash;
5385 char final_etag[CEPH_CRYPTO_MD5_DIGESTSIZE];
5386 char final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 16];
5387 bufferlist etag_bl;
5388 rgw_obj meta_obj;
5389 rgw_obj target_obj;
5390 RGWMPObj mp;
5391 RGWObjManifest manifest;
5392 uint64_t olh_epoch = 0;
5393 string version_id;
5394
5395 op_ret = get_params();
5396 if (op_ret < 0)
5397 return;
5398 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
5399 if (op_ret < 0) {
5400 return;
5401 }
5402
5403 if (!data || !len) {
5404 op_ret = -ERR_MALFORMED_XML;
5405 return;
5406 }
5407
5408 if (!parser.init()) {
5409 op_ret = -EIO;
5410 return;
5411 }
5412
5413 if (!parser.parse(data, len, 1)) {
5414 op_ret = -ERR_MALFORMED_XML;
5415 return;
5416 }
5417
5418 parts = static_cast<RGWMultiCompleteUpload *>(parser.find_first("CompleteMultipartUpload"));
5419 if (!parts || parts->parts.empty()) {
5420 op_ret = -ERR_MALFORMED_XML;
5421 return;
5422 }
5423
5424 if ((int)parts->parts.size() >
5425 s->cct->_conf->rgw_multipart_part_upload_limit) {
5426 op_ret = -ERANGE;
5427 return;
5428 }
5429
5430 mp.init(s->object.name, upload_id);
5431 meta_oid = mp.get_meta();
5432
5433 int total_parts = 0;
5434 int handled_parts = 0;
5435 int max_parts = 1000;
5436 int marker = 0;
5437 bool truncated;
5438 RGWCompressionInfo cs_info;
5439 bool compressed = false;
5440 uint64_t accounted_size = 0;
5441
5442 uint64_t min_part_size = s->cct->_conf->rgw_multipart_min_part_size;
5443
5444 list<rgw_obj_index_key> remove_objs; /* objects to be removed from index listing */
5445
5446 bool versioned_object = s->bucket_info.versioning_enabled();
5447
5448 iter = parts->parts.begin();
5449
5450 meta_obj.init_ns(s->bucket, meta_oid, mp_ns);
5451 meta_obj.set_in_extra_data(true);
5452 meta_obj.index_hash_source = s->object.name;
5453
5454 /*take a cls lock on meta_obj to prevent racing completions (or retries)
5455 from deleting the parts*/
5456 rgw_pool meta_pool;
5457 rgw_raw_obj raw_obj;
5458 int max_lock_secs_mp =
5459 s->cct->_conf->get_val<int64_t>("rgw_mp_lock_max_time");
5460 utime_t dur(max_lock_secs_mp, 0);
5461
5462 store->obj_to_raw((s->bucket_info).placement_rule, meta_obj, &raw_obj);
5463 store->get_obj_data_pool((s->bucket_info).placement_rule,
5464 meta_obj,&meta_pool);
5465 store->open_pool_ctx(meta_pool, serializer.ioctx);
5466
5467 op_ret = serializer.try_lock(raw_obj.oid, dur);
5468 if (op_ret < 0) {
5469 dout(0) << "RGWCompleteMultipart::execute() failed to acquire lock " << dendl;
5470 op_ret = -ERR_INTERNAL_ERROR;
5471 s->err.message = "This multipart completion is already in progress";
5472 return;
5473 }
5474
5475 op_ret = get_obj_attrs(store, s, meta_obj, attrs);
5476
5477 if (op_ret < 0) {
5478 ldout(s->cct, 0) << "ERROR: failed to get obj attrs, obj=" << meta_obj
5479 << " ret=" << op_ret << dendl;
5480 return;
5481 }
5482
5483 do {
5484 op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts,
5485 marker, obj_parts, &marker, &truncated);
5486 if (op_ret == -ENOENT) {
5487 op_ret = -ERR_NO_SUCH_UPLOAD;
5488 }
5489 if (op_ret < 0)
5490 return;
5491
5492 total_parts += obj_parts.size();
5493 if (!truncated && total_parts != (int)parts->parts.size()) {
5494 ldout(s->cct, 0) << "NOTICE: total parts mismatch: have: " << total_parts
5495 << " expected: " << parts->parts.size() << dendl;
5496 op_ret = -ERR_INVALID_PART;
5497 return;
5498 }
5499
5500 for (obj_iter = obj_parts.begin(); iter != parts->parts.end() && obj_iter != obj_parts.end(); ++iter, ++obj_iter, ++handled_parts) {
5501 uint64_t part_size = obj_iter->second.accounted_size;
5502 if (handled_parts < (int)parts->parts.size() - 1 &&
5503 part_size < min_part_size) {
5504 op_ret = -ERR_TOO_SMALL;
5505 return;
5506 }
5507
5508 char petag[CEPH_CRYPTO_MD5_DIGESTSIZE];
5509 if (iter->first != (int)obj_iter->first) {
5510 ldout(s->cct, 0) << "NOTICE: parts num mismatch: next requested: "
5511 << iter->first << " next uploaded: "
5512 << obj_iter->first << dendl;
5513 op_ret = -ERR_INVALID_PART;
5514 return;
5515 }
5516 string part_etag = rgw_string_unquote(iter->second);
5517 if (part_etag.compare(obj_iter->second.etag) != 0) {
5518 ldout(s->cct, 0) << "NOTICE: etag mismatch: part: " << iter->first
5519 << " etag: " << iter->second << dendl;
5520 op_ret = -ERR_INVALID_PART;
5521 return;
5522 }
5523
5524 hex_to_buf(obj_iter->second.etag.c_str(), petag,
5525 CEPH_CRYPTO_MD5_DIGESTSIZE);
5526 hash.Update((const byte *)petag, sizeof(petag));
5527
5528 RGWUploadPartInfo& obj_part = obj_iter->second;
5529
5530 /* update manifest for part */
5531 string oid = mp.get_part(obj_iter->second.num);
5532 rgw_obj src_obj;
5533 src_obj.init_ns(s->bucket, oid, mp_ns);
5534
5535 if (obj_part.manifest.empty()) {
5536 ldout(s->cct, 0) << "ERROR: empty manifest for object part: obj="
5537 << src_obj << dendl;
5538 op_ret = -ERR_INVALID_PART;
5539 return;
5540 } else {
5541 manifest.append(obj_part.manifest, store);
5542 }
5543
5544 if (obj_part.cs_info.compression_type != "none") {
5545 if (compressed && cs_info.compression_type != obj_part.cs_info.compression_type) {
5546 ldout(s->cct, 0) << "ERROR: compression type was changed during multipart upload ("
5547 << cs_info.compression_type << ">>" << obj_part.cs_info.compression_type << ")" << dendl;
5548 op_ret = -ERR_INVALID_PART;
5549 return;
5550 }
5551 int64_t new_ofs; // offset in compression data for new part
5552 if (cs_info.blocks.size() > 0)
5553 new_ofs = cs_info.blocks.back().new_ofs + cs_info.blocks.back().len;
5554 else
5555 new_ofs = 0;
5556 for (const auto& block : obj_part.cs_info.blocks) {
5557 compression_block cb;
5558 cb.old_ofs = block.old_ofs + cs_info.orig_size;
5559 cb.new_ofs = new_ofs;
5560 cb.len = block.len;
5561 cs_info.blocks.push_back(cb);
5562 new_ofs = cb.new_ofs + cb.len;
5563 }
5564 if (!compressed)
5565 cs_info.compression_type = obj_part.cs_info.compression_type;
5566 cs_info.orig_size += obj_part.cs_info.orig_size;
5567 compressed = true;
5568 }
5569
5570 rgw_obj_index_key remove_key;
5571 src_obj.key.get_index_key(&remove_key);
5572
5573 remove_objs.push_back(remove_key);
5574
5575 ofs += obj_part.size;
5576 accounted_size += obj_part.accounted_size;
5577 }
5578 } while (truncated);
5579 hash.Final((byte *)final_etag);
5580
5581 buf_to_hex((unsigned char *)final_etag, sizeof(final_etag), final_etag_str);
5582 snprintf(&final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2], sizeof(final_etag_str) - CEPH_CRYPTO_MD5_DIGESTSIZE * 2,
5583 "-%lld", (long long)parts->parts.size());
5584 etag = final_etag_str;
5585 ldout(s->cct, 10) << "calculated etag: " << final_etag_str << dendl;
5586
5587 etag_bl.append(final_etag_str, strlen(final_etag_str) + 1);
5588
5589 attrs[RGW_ATTR_ETAG] = etag_bl;
5590
5591 if (compressed) {
5592 // write compression attribute to full object
5593 bufferlist tmp;
5594 ::encode(cs_info, tmp);
5595 attrs[RGW_ATTR_COMPRESSION] = tmp;
5596 }
5597
5598 target_obj.init(s->bucket, s->object.name);
5599 if (versioned_object) {
5600 store->gen_rand_obj_instance_name(&target_obj);
5601 }
5602
5603 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
5604
5605 obj_ctx.obj.set_atomic(target_obj);
5606
5607 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), target_obj);
5608 RGWRados::Object::Write obj_op(&op_target);
5609
5610 obj_op.meta.manifest = &manifest;
5611 obj_op.meta.remove_objs = &remove_objs;
5612
5613 obj_op.meta.ptag = &s->req_id; /* use req_id as operation tag */
5614 obj_op.meta.owner = s->owner.get_id();
5615 obj_op.meta.flags = PUT_OBJ_CREATE;
5616 obj_op.meta.modify_tail = true;
5617 obj_op.meta.completeMultipart = true;
5618 obj_op.meta.olh_epoch = olh_epoch;
5619 op_ret = obj_op.write_meta(ofs, accounted_size, attrs);
5620 if (op_ret < 0)
5621 return;
5622
5623 // remove the upload obj
5624 int r = store->delete_obj(*static_cast<RGWObjectCtx *>(s->obj_ctx),
5625 s->bucket_info, meta_obj, 0);
5626 if (r >= 0) {
5627 /* serializer's exclusive lock is released */
5628 serializer.clear_locked();
5629 } else {
5630 ldout(store->ctx(), 0) << "WARNING: failed to remove object "
5631 << meta_obj << dendl;
5632 }
5633 }
5634
5635 int RGWCompleteMultipart::MPSerializer::try_lock(
5636 const std::string& _oid,
5637 utime_t dur)
5638 {
5639 oid = _oid;
5640 op.assert_exists();
5641 lock.set_duration(dur);
5642 lock.lock_exclusive(&op);
5643 int ret = ioctx.operate(oid, &op);
5644 if (! ret) {
5645 locked = true;
5646 }
5647 return ret;
5648 }
5649
5650 void RGWCompleteMultipart::complete()
5651 {
5652 /* release exclusive lock iff not already */
5653 if (unlikely(serializer.locked)) {
5654 int r = serializer.unlock();
5655 if (r < 0) {
5656 ldout(store->ctx(), 0) << "WARNING: failed to unlock "
5657 << serializer.oid << dendl;
5658 }
5659 }
5660 send_response();
5661 }
5662
5663 int RGWAbortMultipart::verify_permission()
5664 {
5665 if (s->iam_policy) {
5666 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
5667 rgw::IAM::s3AbortMultipartUpload,
5668 rgw_obj(s->bucket, s->object));
5669 if (e == Effect::Allow) {
5670 return 0;
5671 } else if (e == Effect::Deny) {
5672 return -EACCES;
5673 }
5674 }
5675
5676 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
5677 return -EACCES;
5678 }
5679
5680 return 0;
5681 }
5682
5683 void RGWAbortMultipart::pre_exec()
5684 {
5685 rgw_bucket_object_pre_exec(s);
5686 }
5687
5688 void RGWAbortMultipart::execute()
5689 {
5690 op_ret = -EINVAL;
5691 string upload_id;
5692 string meta_oid;
5693 upload_id = s->info.args.get("uploadId");
5694 map<string, bufferlist> attrs;
5695 rgw_obj meta_obj;
5696 RGWMPObj mp;
5697
5698 if (upload_id.empty() || s->object.empty())
5699 return;
5700
5701 mp.init(s->object.name, upload_id);
5702 meta_oid = mp.get_meta();
5703
5704 op_ret = get_multipart_info(store, s, meta_oid, NULL, attrs);
5705 if (op_ret < 0)
5706 return;
5707
5708 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
5709 op_ret = abort_multipart_upload(store, s->cct, obj_ctx, s->bucket_info, mp);
5710 }
5711
5712 int RGWListMultipart::verify_permission()
5713 {
5714 if (!verify_object_permission(s, rgw::IAM::s3ListMultipartUploadParts))
5715 return -EACCES;
5716
5717 return 0;
5718 }
5719
5720 void RGWListMultipart::pre_exec()
5721 {
5722 rgw_bucket_object_pre_exec(s);
5723 }
5724
5725 void RGWListMultipart::execute()
5726 {
5727 map<string, bufferlist> xattrs;
5728 string meta_oid;
5729 RGWMPObj mp;
5730
5731 op_ret = get_params();
5732 if (op_ret < 0)
5733 return;
5734
5735 mp.init(s->object.name, upload_id);
5736 meta_oid = mp.get_meta();
5737
5738 op_ret = get_multipart_info(store, s, meta_oid, &policy, xattrs);
5739 if (op_ret < 0)
5740 return;
5741
5742 op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts,
5743 marker, parts, NULL, &truncated);
5744 }
5745
5746 int RGWListBucketMultiparts::verify_permission()
5747 {
5748 if (!verify_bucket_permission(s,
5749 rgw::IAM::s3ListBucketMultipartUploads))
5750 return -EACCES;
5751
5752 return 0;
5753 }
5754
5755 void RGWListBucketMultiparts::pre_exec()
5756 {
5757 rgw_bucket_object_pre_exec(s);
5758 }
5759
5760 void RGWListBucketMultiparts::execute()
5761 {
5762 vector<rgw_bucket_dir_entry> objs;
5763 string marker_meta;
5764
5765 op_ret = get_params();
5766 if (op_ret < 0)
5767 return;
5768
5769 if (s->prot_flags & RGW_REST_SWIFT) {
5770 string path_args;
5771 path_args = s->info.args.get("path");
5772 if (!path_args.empty()) {
5773 if (!delimiter.empty() || !prefix.empty()) {
5774 op_ret = -EINVAL;
5775 return;
5776 }
5777 prefix = path_args;
5778 delimiter="/";
5779 }
5780 }
5781 marker_meta = marker.get_meta();
5782
5783 op_ret = list_bucket_multiparts(store, s->bucket_info, prefix, marker_meta, delimiter,
5784 max_uploads, &objs, &common_prefixes, &is_truncated);
5785 if (op_ret < 0) {
5786 return;
5787 }
5788
5789 if (!objs.empty()) {
5790 vector<rgw_bucket_dir_entry>::iterator iter;
5791 RGWMultipartUploadEntry entry;
5792 for (iter = objs.begin(); iter != objs.end(); ++iter) {
5793 rgw_obj_key key(iter->key);
5794 if (!entry.mp.from_meta(key.name))
5795 continue;
5796 entry.obj = *iter;
5797 uploads.push_back(entry);
5798 }
5799 next_marker = entry;
5800 }
5801 }
5802
5803 void RGWGetHealthCheck::execute()
5804 {
5805 if (!g_conf->rgw_healthcheck_disabling_path.empty() &&
5806 (::access(g_conf->rgw_healthcheck_disabling_path.c_str(), F_OK) == 0)) {
5807 /* Disabling path specified & existent in the filesystem. */
5808 op_ret = -ERR_SERVICE_UNAVAILABLE; /* 503 */
5809 } else {
5810 op_ret = 0; /* 200 OK */
5811 }
5812 }
5813
5814 int RGWDeleteMultiObj::verify_permission()
5815 {
5816 acl_allowed = verify_bucket_permission_no_policy(s, RGW_PERM_WRITE);
5817 if (!acl_allowed && !s->iam_policy)
5818 return -EACCES;
5819
5820 return 0;
5821 }
5822
5823 void RGWDeleteMultiObj::pre_exec()
5824 {
5825 rgw_bucket_object_pre_exec(s);
5826 }
5827
5828 void RGWDeleteMultiObj::execute()
5829 {
5830 RGWMultiDelDelete *multi_delete;
5831 vector<rgw_obj_key>::iterator iter;
5832 RGWMultiDelXMLParser parser;
5833 int num_processed = 0;
5834 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
5835
5836 op_ret = get_params();
5837 if (op_ret < 0) {
5838 goto error;
5839 }
5840
5841 if (!data) {
5842 op_ret = -EINVAL;
5843 goto error;
5844 }
5845
5846 if (!parser.init()) {
5847 op_ret = -EINVAL;
5848 goto error;
5849 }
5850
5851 if (!parser.parse(data, len, 1)) {
5852 op_ret = -EINVAL;
5853 goto error;
5854 }
5855
5856 multi_delete = static_cast<RGWMultiDelDelete *>(parser.find_first("Delete"));
5857 if (!multi_delete) {
5858 op_ret = -EINVAL;
5859 goto error;
5860 }
5861
5862 if (multi_delete->is_quiet())
5863 quiet = true;
5864
5865 begin_response();
5866 if (multi_delete->objects.empty()) {
5867 goto done;
5868 }
5869
5870 for (iter = multi_delete->objects.begin();
5871 iter != multi_delete->objects.end() && num_processed < max_to_delete;
5872 ++iter, num_processed++) {
5873 rgw_obj obj(bucket, *iter);
5874 if (s->iam_policy) {
5875 auto e = s->iam_policy->eval(s->env,
5876 *s->auth.identity,
5877 iter->instance.empty() ?
5878 rgw::IAM::s3DeleteObject :
5879 rgw::IAM::s3DeleteObjectVersion,
5880 obj);
5881 if ((e == Effect::Deny) ||
5882 (e == Effect::Pass && !acl_allowed)) {
5883 send_partial_response(*iter, false, "", -EACCES);
5884 continue;
5885 }
5886 }
5887
5888 obj_ctx->obj.set_atomic(obj);
5889
5890 RGWRados::Object del_target(store, s->bucket_info, *obj_ctx, obj);
5891 RGWRados::Object::Delete del_op(&del_target);
5892
5893 del_op.params.bucket_owner = s->bucket_owner.get_id();
5894 del_op.params.versioning_status = s->bucket_info.versioning_status();
5895 del_op.params.obj_owner = s->owner;
5896
5897 op_ret = del_op.delete_obj();
5898 if (op_ret == -ENOENT) {
5899 op_ret = 0;
5900 }
5901
5902 send_partial_response(*iter, del_op.result.delete_marker,
5903 del_op.result.version_id, op_ret);
5904 }
5905
5906 /* set the return code to zero, errors at this point will be
5907 dumped to the response */
5908 op_ret = 0;
5909
5910 done:
5911 // will likely segfault if begin_response() has not been called
5912 end_response();
5913 free(data);
5914 return;
5915
5916 error:
5917 send_status();
5918 free(data);
5919 return;
5920
5921 }
5922
5923 bool RGWBulkDelete::Deleter::verify_permission(RGWBucketInfo& binfo,
5924 map<string, bufferlist>& battrs,
5925 ACLOwner& bucket_owner /* out */)
5926 {
5927 RGWAccessControlPolicy bacl(store->ctx());
5928 int ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket);
5929 if (ret < 0) {
5930 return false;
5931 }
5932
5933 auto policy = get_iam_policy_from_attr(s->cct, store, battrs, binfo.bucket.tenant);
5934
5935 bucket_owner = bacl.get_owner();
5936
5937 /* We can use global user_acl because each BulkDelete request is allowed
5938 * to work on entities from a single account only. */
5939 return verify_bucket_permission(s, binfo.bucket, s->user_acl.get(),
5940 &bacl, policy, rgw::IAM::s3DeleteBucket);
5941 }
5942
5943 bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path)
5944 {
5945 auto& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
5946
5947 RGWBucketInfo binfo;
5948 map<string, bufferlist> battrs;
5949 ACLOwner bowner;
5950
5951 int ret = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
5952 path.bucket_name, binfo, nullptr,
5953 &battrs);
5954 if (ret < 0) {
5955 goto binfo_fail;
5956 }
5957
5958 if (!verify_permission(binfo, battrs, bowner)) {
5959 ret = -EACCES;
5960 goto auth_fail;
5961 }
5962
5963 if (!path.obj_key.empty()) {
5964 rgw_obj obj(binfo.bucket, path.obj_key);
5965 obj_ctx.obj.set_atomic(obj);
5966
5967 RGWRados::Object del_target(store, binfo, obj_ctx, obj);
5968 RGWRados::Object::Delete del_op(&del_target);
5969
5970 del_op.params.bucket_owner = binfo.owner;
5971 del_op.params.versioning_status = binfo.versioning_status();
5972 del_op.params.obj_owner = bowner;
5973
5974 ret = del_op.delete_obj();
5975 if (ret < 0) {
5976 goto delop_fail;
5977 }
5978 } else {
5979 RGWObjVersionTracker ot;
5980 ot.read_version = binfo.ep_objv;
5981
5982 ret = store->delete_bucket(binfo, ot);
5983 if (0 == ret) {
5984 ret = rgw_unlink_bucket(store, binfo.owner, binfo.bucket.tenant,
5985 binfo.bucket.name, false);
5986 if (ret < 0) {
5987 ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << ret
5988 << dendl;
5989 }
5990 }
5991 if (ret < 0) {
5992 goto delop_fail;
5993 }
5994
5995 if (!store->is_meta_master()) {
5996 bufferlist in_data;
5997 ret = forward_request_to_master(s, &ot.read_version, store, in_data,
5998 nullptr);
5999 if (ret < 0) {
6000 if (ret == -ENOENT) {
6001 /* adjust error, we want to return with NoSuchBucket and not
6002 * NoSuchKey */
6003 ret = -ERR_NO_SUCH_BUCKET;
6004 }
6005 goto delop_fail;
6006 }
6007 }
6008 }
6009
6010 num_deleted++;
6011 return true;
6012
6013
6014 binfo_fail:
6015 if (-ENOENT == ret) {
6016 ldout(store->ctx(), 20) << "cannot find bucket = " << path.bucket_name << dendl;
6017 num_unfound++;
6018 } else {
6019 ldout(store->ctx(), 20) << "cannot get bucket info, ret = " << ret
6020 << dendl;
6021
6022 fail_desc_t failed_item = {
6023 .err = ret,
6024 .path = path
6025 };
6026 failures.push_back(failed_item);
6027 }
6028 return false;
6029
6030 auth_fail:
6031 ldout(store->ctx(), 20) << "wrong auth for " << path << dendl;
6032 {
6033 fail_desc_t failed_item = {
6034 .err = ret,
6035 .path = path
6036 };
6037 failures.push_back(failed_item);
6038 }
6039 return false;
6040
6041 delop_fail:
6042 if (-ENOENT == ret) {
6043 ldout(store->ctx(), 20) << "cannot find entry " << path << dendl;
6044 num_unfound++;
6045 } else {
6046 fail_desc_t failed_item = {
6047 .err = ret,
6048 .path = path
6049 };
6050 failures.push_back(failed_item);
6051 }
6052 return false;
6053 }
6054
6055 bool RGWBulkDelete::Deleter::delete_chunk(const std::list<acct_path_t>& paths)
6056 {
6057 ldout(store->ctx(), 20) << "in delete_chunk" << dendl;
6058 for (auto path : paths) {
6059 ldout(store->ctx(), 20) << "bulk deleting path: " << path << dendl;
6060 delete_single(path);
6061 }
6062
6063 return true;
6064 }
6065
6066 int RGWBulkDelete::verify_permission()
6067 {
6068 return 0;
6069 }
6070
6071 void RGWBulkDelete::pre_exec()
6072 {
6073 rgw_bucket_object_pre_exec(s);
6074 }
6075
6076 void RGWBulkDelete::execute()
6077 {
6078 deleter = std::unique_ptr<Deleter>(new Deleter(store, s));
6079
6080 bool is_truncated = false;
6081 do {
6082 list<RGWBulkDelete::acct_path_t> items;
6083
6084 int ret = get_data(items, &is_truncated);
6085 if (ret < 0) {
6086 return;
6087 }
6088
6089 ret = deleter->delete_chunk(items);
6090 } while (!op_ret && is_truncated);
6091
6092 return;
6093 }
6094
6095
6096 constexpr std::array<int, 2> RGWBulkUploadOp::terminal_errors;
6097
6098 int RGWBulkUploadOp::verify_permission()
6099 {
6100 if (s->auth.identity->is_anonymous()) {
6101 return -EACCES;
6102 }
6103
6104 if (! verify_user_permission(s, RGW_PERM_WRITE)) {
6105 return -EACCES;
6106 }
6107
6108 if (s->user->user_id.tenant != s->bucket_tenant) {
6109 ldout(s->cct, 10) << "user cannot create a bucket in a different tenant"
6110 << " (user_id.tenant=" << s->user->user_id.tenant
6111 << " requested=" << s->bucket_tenant << ")"
6112 << dendl;
6113 return -EACCES;
6114 }
6115
6116 if (s->user->max_buckets < 0) {
6117 return -EPERM;
6118 }
6119
6120 return 0;
6121 }
6122
6123 void RGWBulkUploadOp::pre_exec()
6124 {
6125 rgw_bucket_object_pre_exec(s);
6126 }
6127
6128 boost::optional<std::pair<std::string, rgw_obj_key>>
6129 RGWBulkUploadOp::parse_path(const boost::string_ref& path)
6130 {
6131 /* We need to skip all slashes at the beginning in order to preserve
6132 * compliance with Swift. */
6133 const size_t start_pos = path.find_first_not_of('/');
6134
6135 if (boost::string_ref::npos != start_pos) {
6136 /* Seperator is the first slash after the leading ones. */
6137 const size_t sep_pos = path.substr(start_pos).find('/');
6138
6139 if (boost::string_ref::npos != sep_pos) {
6140 const auto bucket_name = path.substr(start_pos, sep_pos - start_pos);
6141 const auto obj_name = path.substr(sep_pos + 1);
6142
6143 return std::make_pair(bucket_name.to_string(),
6144 rgw_obj_key(obj_name.to_string()));
6145 } else {
6146 /* It's guaranteed here that bucket name is at least one character
6147 * long and is different than slash. */
6148 return std::make_pair(path.substr(start_pos).to_string(),
6149 rgw_obj_key());
6150 }
6151 }
6152
6153 return none;
6154 }
6155
6156 std::pair<std::string, std::string>
6157 RGWBulkUploadOp::handle_upload_path(struct req_state *s)
6158 {
6159 std::string bucket_path, file_prefix;
6160 if (! s->init_state.url_bucket.empty()) {
6161 file_prefix = bucket_path = s->init_state.url_bucket + "/";
6162 if (! s->object.empty()) {
6163 std::string& object_name = s->object.name;
6164
6165 /* As rgw_obj_key::empty() already verified emptiness of s->object.name,
6166 * we can safely examine its last element. */
6167 if (object_name.back() == '/') {
6168 file_prefix.append(object_name);
6169 } else {
6170 file_prefix.append(object_name).append("/");
6171 }
6172 }
6173 }
6174 return std::make_pair(bucket_path, file_prefix);
6175 }
6176
6177 int RGWBulkUploadOp::handle_dir_verify_permission()
6178 {
6179 if (s->user->max_buckets > 0) {
6180 RGWUserBuckets buckets;
6181 std::string marker;
6182 bool is_truncated = false;
6183 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
6184 marker, std::string(), s->user->max_buckets,
6185 false, &is_truncated);
6186 if (op_ret < 0) {
6187 return op_ret;
6188 }
6189
6190 if (buckets.count() >= static_cast<size_t>(s->user->max_buckets)) {
6191 return -ERR_TOO_MANY_BUCKETS;
6192 }
6193 }
6194
6195 return 0;
6196 }
6197
6198 static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name)
6199 {
6200 /* the request of container or object level will contain bucket name.
6201 * only at account level need to append the bucket name */
6202 if (info.script_uri.find(bucket_name) != std::string::npos) {
6203 return;
6204 }
6205
6206 ldout(cct, 20) << "append the bucket: "<< bucket_name << " to req_info" << dendl;
6207 info.script_uri.append("/").append(bucket_name);
6208 info.request_uri_aws4 = info.request_uri = info.script_uri;
6209 info.effective_uri = "/" + bucket_name;
6210 }
6211
6212 int RGWBulkUploadOp::handle_dir(const boost::string_ref path)
6213 {
6214 ldout(s->cct, 20) << "bulk upload: got directory=" << path << dendl;
6215
6216 op_ret = handle_dir_verify_permission();
6217 if (op_ret < 0) {
6218 return op_ret;
6219 }
6220
6221 std::string bucket_name;
6222 rgw_obj_key object_junk;
6223 std::tie(bucket_name, object_junk) = *parse_path(path);
6224
6225 rgw_raw_obj obj(store->get_zone_params().domain_root,
6226 rgw_make_bucket_entry_name(s->bucket_tenant, bucket_name));
6227
6228 /* we need to make sure we read bucket info, it's not read before for this
6229 * specific request */
6230 RGWBucketInfo binfo;
6231 std::map<std::string, ceph::bufferlist> battrs;
6232 op_ret = store->get_bucket_info(*dir_ctx, s->bucket_tenant, bucket_name,
6233 binfo, NULL, &battrs);
6234 if (op_ret < 0 && op_ret != -ENOENT) {
6235 return op_ret;
6236 }
6237 const bool bucket_exists = (op_ret != -ENOENT);
6238
6239 if (bucket_exists) {
6240 RGWAccessControlPolicy old_policy(s->cct);
6241 int r = get_bucket_policy_from_attr(s->cct, store, binfo,
6242 battrs, &old_policy);
6243 if (r >= 0) {
6244 if (old_policy.get_owner().get_id().compare(s->user->user_id) != 0) {
6245 op_ret = -EEXIST;
6246 return op_ret;
6247 }
6248 }
6249 }
6250
6251 RGWBucketInfo master_info;
6252 rgw_bucket *pmaster_bucket = nullptr;
6253 uint32_t *pmaster_num_shards = nullptr;
6254 real_time creation_time;
6255 obj_version objv, ep_objv, *pobjv = nullptr;
6256
6257 if (! store->is_meta_master()) {
6258 JSONParser jp;
6259 ceph::bufferlist in_data;
6260 req_info info = s->info;
6261 forward_req_info(s->cct, info, bucket_name);
6262 op_ret = forward_request_to_master(s, nullptr, store, in_data, &jp, &info);
6263 if (op_ret < 0) {
6264 return op_ret;
6265 }
6266
6267 JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp);
6268 JSONDecoder::decode_json("object_ver", objv, &jp);
6269 JSONDecoder::decode_json("bucket_info", master_info, &jp);
6270
6271 ldout(s->cct, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver="
6272 << objv.ver << dendl;
6273 ldout(s->cct, 20) << "got creation_time="<< master_info.creation_time
6274 << dendl;
6275
6276 pmaster_bucket= &master_info.bucket;
6277 creation_time = master_info.creation_time;
6278 pmaster_num_shards = &master_info.num_shards;
6279 pobjv = &objv;
6280 } else {
6281 pmaster_bucket = nullptr;
6282 pmaster_num_shards = nullptr;
6283 }
6284
6285
6286 std::string placement_rule;
6287 if (bucket_exists) {
6288 std::string selected_placement_rule;
6289 rgw_bucket bucket;
6290 bucket.tenant = s->bucket_tenant;
6291 bucket.name = s->bucket_name;
6292 op_ret = store->select_bucket_placement(*(s->user),
6293 store->get_zonegroup().get_id(),
6294 placement_rule,
6295 &selected_placement_rule,
6296 nullptr);
6297 if (selected_placement_rule != binfo.placement_rule) {
6298 op_ret = -EEXIST;
6299 ldout(s->cct, 20) << "bulk upload: non-coherent placement rule" << dendl;
6300 return op_ret;
6301 }
6302 }
6303
6304 /* Create metadata: ACLs. */
6305 std::map<std::string, ceph::bufferlist> attrs;
6306 RGWAccessControlPolicy policy;
6307 policy.create_default(s->user->user_id, s->user->display_name);
6308 ceph::bufferlist aclbl;
6309 policy.encode(aclbl);
6310 attrs.emplace(RGW_ATTR_ACL, std::move(aclbl));
6311
6312 RGWQuotaInfo quota_info;
6313 const RGWQuotaInfo * pquota_info = nullptr;
6314
6315 rgw_bucket bucket;
6316 bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */
6317 bucket.name = bucket_name;
6318
6319
6320 RGWBucketInfo out_info;
6321 op_ret = store->create_bucket(*(s->user),
6322 bucket,
6323 store->get_zonegroup().get_id(),
6324 placement_rule, binfo.swift_ver_location,
6325 pquota_info, attrs,
6326 out_info, pobjv, &ep_objv, creation_time,
6327 pmaster_bucket, pmaster_num_shards, true);
6328 /* continue if EEXIST and create_bucket will fail below. this way we can
6329 * recover from a partial create by retrying it. */
6330 ldout(s->cct, 20) << "rgw_create_bucket returned ret=" << op_ret
6331 << ", bucket=" << bucket << dendl;
6332
6333 if (op_ret && op_ret != -EEXIST) {
6334 return op_ret;
6335 }
6336
6337 const bool existed = (op_ret == -EEXIST);
6338 if (existed) {
6339 /* bucket already existed, might have raced with another bucket creation, or
6340 * might be partial bucket creation that never completed. Read existing bucket
6341 * info, verify that the reported bucket owner is the current user.
6342 * If all is ok then update the user's list of buckets.
6343 * Otherwise inform client about a name conflict.
6344 */
6345 if (out_info.owner.compare(s->user->user_id) != 0) {
6346 op_ret = -EEXIST;
6347 ldout(s->cct, 20) << "bulk upload: conflicting bucket name" << dendl;
6348 return op_ret;
6349 }
6350 bucket = out_info.bucket;
6351 }
6352
6353 op_ret = rgw_link_bucket(store, s->user->user_id, bucket,
6354 out_info.creation_time, false);
6355 if (op_ret && !existed && op_ret != -EEXIST) {
6356 /* if it exists (or previously existed), don't remove it! */
6357 op_ret = rgw_unlink_bucket(store, s->user->user_id,
6358 bucket.tenant, bucket.name);
6359 if (op_ret < 0) {
6360 ldout(s->cct, 0) << "bulk upload: WARNING: failed to unlink bucket: ret="
6361 << op_ret << dendl;
6362 }
6363 } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) {
6364 ldout(s->cct, 20) << "bulk upload: containers already exists"
6365 << dendl;
6366 op_ret = -ERR_BUCKET_EXISTS;
6367 }
6368
6369 return op_ret;
6370 }
6371
6372
6373 bool RGWBulkUploadOp::handle_file_verify_permission(RGWBucketInfo& binfo,
6374 const rgw_obj& obj,
6375 std::map<std::string, ceph::bufferlist>& battrs,
6376 ACLOwner& bucket_owner /* out */)
6377 {
6378 RGWAccessControlPolicy bacl(store->ctx());
6379 op_ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket);
6380 if (op_ret < 0) {
6381 ldout(s->cct, 20) << "bulk upload: cannot read_policy() for bucket"
6382 << dendl;
6383 return false;
6384 }
6385
6386 auto policy = get_iam_policy_from_attr(s->cct, store, battrs, binfo.bucket.tenant);
6387
6388 bucket_owner = bacl.get_owner();
6389 if (policy) {
6390 auto e = policy->eval(s->env, *s->auth.identity,
6391 rgw::IAM::s3PutObject, obj);
6392 if (e == Effect::Allow) {
6393 return true;
6394 } else if (e == Effect::Deny) {
6395 return false;
6396 }
6397 }
6398
6399 return verify_bucket_permission_no_policy(s, s->user_acl.get(),
6400 &bacl, RGW_PERM_WRITE);
6401 }
6402
6403 int RGWBulkUploadOp::handle_file(const boost::string_ref path,
6404 const size_t size,
6405 AlignedStreamGetter& body)
6406 {
6407
6408 ldout(s->cct, 20) << "bulk upload: got file=" << path << ", size=" << size
6409 << dendl;
6410
6411 RGWPutObjDataProcessor *filter = nullptr;
6412 boost::optional<RGWPutObj_Compress> compressor;
6413
6414 if (size > static_cast<const size_t>(s->cct->_conf->rgw_max_put_size)) {
6415 op_ret = -ERR_TOO_LARGE;
6416 return op_ret;
6417 }
6418
6419 std::string bucket_name;
6420 rgw_obj_key object;
6421 std::tie(bucket_name, object) = *parse_path(path);
6422
6423 auto& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
6424 RGWBucketInfo binfo;
6425 std::map<std::string, ceph::bufferlist> battrs;
6426 ACLOwner bowner;
6427 op_ret = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
6428 bucket_name, binfo, nullptr, &battrs);
6429 if (op_ret == -ENOENT) {
6430 ldout(s->cct, 20) << "bulk upload: non existent directory=" << bucket_name
6431 << dendl;
6432 } else if (op_ret < 0) {
6433 return op_ret;
6434 }
6435
6436 if (! handle_file_verify_permission(binfo,
6437 rgw_obj(binfo.bucket, object),
6438 battrs, bowner)) {
6439 ldout(s->cct, 20) << "bulk upload: object creation unauthorized" << dendl;
6440 op_ret = -EACCES;
6441 return op_ret;
6442 }
6443
6444 op_ret = store->check_quota(bowner.get_id(), binfo.bucket,
6445 user_quota, bucket_quota, size);
6446 if (op_ret < 0) {
6447 return op_ret;
6448 }
6449
6450 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
6451 if (op_ret < 0) {
6452 return op_ret;
6453 }
6454
6455 RGWPutObjProcessor_Atomic processor(obj_ctx,
6456 binfo,
6457 binfo.bucket,
6458 object.name,
6459 /* part size */
6460 s->cct->_conf->rgw_obj_stripe_size,
6461 s->req_id,
6462 binfo.versioning_enabled());
6463
6464 /* No filters by default. */
6465 filter = &processor;
6466
6467 op_ret = processor.prepare(store, nullptr);
6468 if (op_ret < 0) {
6469 ldout(s->cct, 20) << "bulk upload: cannot prepare processor due to ret="
6470 << op_ret << dendl;
6471 return op_ret;
6472 }
6473
6474 const auto& compression_type = store->get_zone_params().get_compression_type(
6475 binfo.placement_rule);
6476 CompressorRef plugin;
6477 if (compression_type != "none") {
6478 plugin = Compressor::create(s->cct, compression_type);
6479 if (! plugin) {
6480 ldout(s->cct, 1) << "Cannot load plugin for rgw_compression_type "
6481 << compression_type << dendl;
6482 } else {
6483 compressor.emplace(s->cct, plugin, filter);
6484 filter = &*compressor;
6485 }
6486 }
6487
6488 /* Upload file content. */
6489 ssize_t len = 0;
6490 size_t ofs = 0;
6491 MD5 hash;
6492 do {
6493 ceph::bufferlist data;
6494 len = body.get_at_most(s->cct->_conf->rgw_max_chunk_size, data);
6495
6496 ldout(s->cct, 20) << "bulk upload: body=" << data.c_str() << dendl;
6497 if (len < 0) {
6498 op_ret = len;
6499 return op_ret;
6500 } else if (len > 0) {
6501 hash.Update((const byte *)data.c_str(), data.length());
6502 op_ret = put_data_and_throttle(filter, data, ofs, false);
6503 if (op_ret < 0) {
6504 ldout(s->cct, 20) << "processor->thottle_data() returned ret="
6505 << op_ret << dendl;
6506 return op_ret;
6507 }
6508
6509 ofs += len;
6510 }
6511
6512 } while (len > 0);
6513
6514 if (ofs != size) {
6515 ldout(s->cct, 10) << "bulk upload: real file size different from declared"
6516 << dendl;
6517 op_ret = -EINVAL;
6518 }
6519
6520 op_ret = store->check_quota(bowner.get_id(), binfo.bucket,
6521 user_quota, bucket_quota, size);
6522 if (op_ret < 0) {
6523 ldout(s->cct, 20) << "bulk upload: quota exceeded for path=" << path
6524 << dendl;
6525 return op_ret;
6526 }
6527
6528 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
6529 if (op_ret < 0) {
6530 return op_ret;
6531 }
6532
6533 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
6534 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
6535 hash.Final(m);
6536 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
6537
6538 /* Create metadata: ETAG. */
6539 std::map<std::string, ceph::bufferlist> attrs;
6540 std::string etag = calc_md5;
6541 ceph::bufferlist etag_bl;
6542 etag_bl.append(etag.c_str(), etag.size() + 1);
6543 attrs.emplace(RGW_ATTR_ETAG, std::move(etag_bl));
6544
6545 /* Create metadata: ACLs. */
6546 RGWAccessControlPolicy policy;
6547 policy.create_default(s->user->user_id, s->user->display_name);
6548 ceph::bufferlist aclbl;
6549 policy.encode(aclbl);
6550 attrs.emplace(RGW_ATTR_ACL, std::move(aclbl));
6551
6552 /* Create metadata: compression info. */
6553 if (compressor && compressor->is_compressed()) {
6554 ceph::bufferlist tmp;
6555 RGWCompressionInfo cs_info;
6556 cs_info.compression_type = plugin->get_type_name();
6557 cs_info.orig_size = s->obj_size;
6558 cs_info.blocks = std::move(compressor->get_compression_blocks());
6559 ::encode(cs_info, tmp);
6560 attrs.emplace(RGW_ATTR_COMPRESSION, std::move(tmp));
6561 }
6562
6563 /* Complete the transaction. */
6564 op_ret = processor.complete(size, etag, nullptr, ceph::real_time(), attrs,
6565 ceph::real_time() /* delete_at */);
6566 if (op_ret < 0) {
6567 ldout(s->cct, 20) << "bulk upload: processor::complete returned op_ret="
6568 << op_ret << dendl;
6569 }
6570
6571 return op_ret;
6572 }
6573
6574 void RGWBulkUploadOp::execute()
6575 {
6576 ceph::bufferlist buffer(64 * 1024);
6577
6578 ldout(s->cct, 20) << "bulk upload: start" << dendl;
6579
6580 /* Create an instance of stream-abstracting class. Having this indirection
6581 * allows for easy introduction of decompressors like gzip and bzip2. */
6582 auto stream = create_stream();
6583 if (! stream) {
6584 return;
6585 }
6586
6587 /* Handling the $UPLOAD_PATH accordingly to the Swift's Bulk middleware. See:
6588 * https://github.com/openstack/swift/blob/2.13.0/swift/common/middleware/bulk.py#L31-L41 */
6589 std::string bucket_path, file_prefix;
6590 std::tie(bucket_path, file_prefix) = handle_upload_path(s);
6591
6592 auto status = rgw::tar::StatusIndicator::create();
6593 do {
6594 op_ret = stream->get_exactly(rgw::tar::BLOCK_SIZE, buffer);
6595 if (op_ret < 0) {
6596 ldout(s->cct, 2) << "bulk upload: cannot read header" << dendl;
6597 return;
6598 }
6599
6600 /* We need to re-interpret the buffer as a TAR block. Exactly two blocks
6601 * must be tracked to detect out end-of-archive. It occurs when both of
6602 * them are empty (zeroed). Tracing this particular inter-block dependency
6603 * is responsibility of the rgw::tar::StatusIndicator class. */
6604 boost::optional<rgw::tar::HeaderView> header;
6605 std::tie(status, header) = rgw::tar::interpret_block(status, buffer);
6606
6607 if (! status.empty() && header) {
6608 /* This specific block isn't empty (entirely zeroed), so we can parse
6609 * it as a TAR header and dispatch. At the moment we do support only
6610 * regular files and directories. Everything else (symlinks, devices)
6611 * will be ignored but won't cease the whole upload. */
6612 switch (header->get_filetype()) {
6613 case rgw::tar::FileType::NORMAL_FILE: {
6614 ldout(s->cct, 2) << "bulk upload: handling regular file" << dendl;
6615
6616 boost::string_ref filename = bucket_path.empty() ? header->get_filename() : \
6617 file_prefix + header->get_filename().to_string();
6618 auto body = AlignedStreamGetter(0, header->get_filesize(),
6619 rgw::tar::BLOCK_SIZE, *stream);
6620 op_ret = handle_file(filename,
6621 header->get_filesize(),
6622 body);
6623 if (! op_ret) {
6624 /* Only regular files counts. */
6625 num_created++;
6626 } else {
6627 failures.emplace_back(op_ret, filename.to_string());
6628 }
6629 break;
6630 }
6631 case rgw::tar::FileType::DIRECTORY: {
6632 ldout(s->cct, 2) << "bulk upload: handling regular directory" << dendl;
6633
6634 boost::string_ref dirname = bucket_path.empty() ? header->get_filename() : bucket_path;
6635 op_ret = handle_dir(dirname);
6636 if (op_ret < 0 && op_ret != -ERR_BUCKET_EXISTS) {
6637 failures.emplace_back(op_ret, dirname.to_string());
6638 }
6639 break;
6640 }
6641 default: {
6642 /* Not recognized. Skip. */
6643 op_ret = 0;
6644 break;
6645 }
6646 }
6647
6648 /* In case of any problems with sub-request authorization Swift simply
6649 * terminates whole upload immediately. */
6650 if (boost::algorithm::contains(std::initializer_list<int>{ op_ret },
6651 terminal_errors)) {
6652 ldout(s->cct, 2) << "bulk upload: terminating due to ret=" << op_ret
6653 << dendl;
6654 break;
6655 }
6656 } else {
6657 ldout(s->cct, 2) << "bulk upload: an empty block" << dendl;
6658 op_ret = 0;
6659 }
6660
6661 buffer.clear();
6662 } while (! status.eof());
6663
6664 return;
6665 }
6666
6667 RGWBulkUploadOp::AlignedStreamGetter::~AlignedStreamGetter()
6668 {
6669 const size_t aligned_legnth = length + (-length % alignment);
6670 ceph::bufferlist junk;
6671
6672 DecoratedStreamGetter::get_exactly(aligned_legnth - position, junk);
6673 }
6674
6675 ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_at_most(const size_t want,
6676 ceph::bufferlist& dst)
6677 {
6678 const size_t max_to_read = std::min(want, length - position);
6679 const auto len = DecoratedStreamGetter::get_at_most(max_to_read, dst);
6680 if (len > 0) {
6681 position += len;
6682 }
6683 return len;
6684 }
6685
6686 ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_exactly(const size_t want,
6687 ceph::bufferlist& dst)
6688 {
6689 const auto len = DecoratedStreamGetter::get_exactly(want, dst);
6690 if (len > 0) {
6691 position += len;
6692 }
6693 return len;
6694 }
6695
6696 int RGWSetAttrs::verify_permission()
6697 {
6698 // This looks to be part of the RGW-NFS machinery and has no S3 or
6699 // Swift equivalent.
6700 bool perm;
6701 if (!s->object.empty()) {
6702 perm = verify_object_permission_no_policy(s, RGW_PERM_WRITE);
6703 } else {
6704 perm = verify_bucket_permission_no_policy(s, RGW_PERM_WRITE);
6705 }
6706 if (!perm)
6707 return -EACCES;
6708
6709 return 0;
6710 }
6711
6712 void RGWSetAttrs::pre_exec()
6713 {
6714 rgw_bucket_object_pre_exec(s);
6715 }
6716
6717 void RGWSetAttrs::execute()
6718 {
6719 op_ret = get_params();
6720 if (op_ret < 0)
6721 return;
6722
6723 rgw_obj obj(s->bucket, s->object);
6724
6725 if (!s->object.empty()) {
6726 store->set_atomic(s->obj_ctx, obj);
6727 op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, nullptr);
6728 } else {
6729 for (auto& iter : attrs) {
6730 s->bucket_attrs[iter.first] = std::move(iter.second);
6731 }
6732 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, s->bucket_attrs,
6733 &s->bucket_info.objv_tracker);
6734 }
6735 }
6736
6737 void RGWGetObjLayout::pre_exec()
6738 {
6739 rgw_bucket_object_pre_exec(s);
6740 }
6741
6742 void RGWGetObjLayout::execute()
6743 {
6744 rgw_obj obj(s->bucket, s->object);
6745 RGWRados::Object target(store,
6746 s->bucket_info,
6747 *static_cast<RGWObjectCtx *>(s->obj_ctx),
6748 rgw_obj(s->bucket, s->object));
6749 RGWRados::Object::Read stat_op(&target);
6750
6751 op_ret = stat_op.prepare();
6752 if (op_ret < 0) {
6753 return;
6754 }
6755
6756 head_obj = stat_op.state.head_obj;
6757
6758 op_ret = target.get_manifest(&manifest);
6759 }
6760
6761
6762 int RGWConfigBucketMetaSearch::verify_permission()
6763 {
6764 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
6765 return -EACCES;
6766 }
6767
6768 return 0;
6769 }
6770
6771 void RGWConfigBucketMetaSearch::pre_exec()
6772 {
6773 rgw_bucket_object_pre_exec(s);
6774 }
6775
6776 void RGWConfigBucketMetaSearch::execute()
6777 {
6778 op_ret = get_params();
6779 if (op_ret < 0) {
6780 ldout(s->cct, 20) << "NOTICE: get_params() returned ret=" << op_ret << dendl;
6781 return;
6782 }
6783
6784 s->bucket_info.mdsearch_config = mdsearch_config;
6785
6786 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
6787 if (op_ret < 0) {
6788 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
6789 return;
6790 }
6791 }
6792
6793 int RGWGetBucketMetaSearch::verify_permission()
6794 {
6795 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
6796 return -EACCES;
6797 }
6798
6799 return 0;
6800 }
6801
6802 void RGWGetBucketMetaSearch::pre_exec()
6803 {
6804 rgw_bucket_object_pre_exec(s);
6805 }
6806
6807 int RGWDelBucketMetaSearch::verify_permission()
6808 {
6809 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
6810 return -EACCES;
6811 }
6812
6813 return 0;
6814 }
6815
6816 void RGWDelBucketMetaSearch::pre_exec()
6817 {
6818 rgw_bucket_object_pre_exec(s);
6819 }
6820
6821 void RGWDelBucketMetaSearch::execute()
6822 {
6823 s->bucket_info.mdsearch_config.clear();
6824
6825 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
6826 if (op_ret < 0) {
6827 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
6828 return;
6829 }
6830 }
6831
6832
6833 RGWHandler::~RGWHandler()
6834 {
6835 }
6836
6837 int RGWHandler::init(RGWRados *_store,
6838 struct req_state *_s,
6839 rgw::io::BasicClient *cio)
6840 {
6841 store = _store;
6842 s = _s;
6843
6844 return 0;
6845 }
6846
6847 int RGWHandler::do_init_permissions()
6848 {
6849 int ret = rgw_build_bucket_policies(store, s);
6850 s->env = rgw_build_iam_environment(store, s);
6851
6852 if (ret < 0) {
6853 ldout(s->cct, 10) << "read_permissions on " << s->bucket << " ret=" << ret << dendl;
6854 if (ret == -ENODATA)
6855 ret = -EACCES;
6856 }
6857
6858 return ret;
6859 }
6860
6861 int RGWHandler::do_read_permissions(RGWOp *op, bool only_bucket)
6862 {
6863 if (only_bucket) {
6864 /* already read bucket info */
6865 return 0;
6866 }
6867 int ret = rgw_build_object_policies(store, s, op->prefetch_data());
6868
6869 if (ret < 0) {
6870 ldout(s->cct, 10) << "read_permissions on " << s->bucket << ":"
6871 << s->object << " only_bucket=" << only_bucket
6872 << " ret=" << ret << dendl;
6873 if (ret == -ENODATA)
6874 ret = -EACCES;
6875 }
6876
6877 return ret;
6878 }
6879
6880 int RGWOp::error_handler(int err_no, string *error_content) {
6881 return dialect_handler->error_handler(err_no, error_content);
6882 }
6883
6884 int RGWHandler::error_handler(int err_no, string *error_content) {
6885 // This is the do-nothing error handler
6886 return err_no;
6887 }
6888
6889
6890 void RGWPutBucketPolicy::send_response()
6891 {
6892 if (op_ret) {
6893 set_req_state_err(s, op_ret);
6894 }
6895 dump_errno(s);
6896 end_header(s);
6897 }
6898
6899 int RGWPutBucketPolicy::verify_permission()
6900 {
6901 if (!verify_bucket_permission(s, rgw::IAM::s3PutBucketPolicy)) {
6902 return -EACCES;
6903 }
6904
6905 return 0;
6906 }
6907
6908 int RGWPutBucketPolicy::get_params()
6909 {
6910 const auto max_size = s->cct->_conf->rgw_max_put_param_size;
6911 // At some point when I have more time I want to make a version of
6912 // rgw_rest_read_all_input that doesn't use malloc.
6913 op_ret = rgw_rest_read_all_input(s, &data, &len, max_size, false);
6914 // And throws exceptions.
6915 return op_ret;
6916 }
6917
6918 void RGWPutBucketPolicy::execute()
6919 {
6920 op_ret = get_params();
6921 if (op_ret < 0) {
6922 return;
6923 }
6924
6925 bufferlist in_data = bufferlist::static_from_mem(data, len);
6926
6927 if (!store->is_meta_master()) {
6928 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
6929 if (op_ret < 0) {
6930 ldout(s->cct, 20) << "forward_request_to_master returned ret=" << op_ret << dendl;
6931 return;
6932 }
6933 }
6934
6935 try {
6936 const Policy p(s->cct, s->bucket_tenant, in_data);
6937 op_ret = retry_raced_bucket_write(store, s, [&p, this] {
6938 auto attrs = s->bucket_attrs;
6939 attrs[RGW_ATTR_IAM_POLICY].clear();
6940 attrs[RGW_ATTR_IAM_POLICY].append(p.text);
6941 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
6942 &s->bucket_info.objv_tracker);
6943 return op_ret;
6944 });
6945 } catch (rgw::IAM::PolicyParseException& e) {
6946 ldout(s->cct, 20) << "failed to parse policy: " << e.what() << dendl;
6947 op_ret = -EINVAL;
6948 }
6949 }
6950
6951 void RGWGetBucketPolicy::send_response()
6952 {
6953 if (op_ret) {
6954 set_req_state_err(s, op_ret);
6955 }
6956 dump_errno(s);
6957 end_header(s, this, "application/json");
6958 dump_body(s, policy);
6959 }
6960
6961 int RGWGetBucketPolicy::verify_permission()
6962 {
6963 if (!verify_bucket_permission(s, rgw::IAM::s3GetBucketPolicy)) {
6964 return -EACCES;
6965 }
6966
6967 return 0;
6968 }
6969
6970 void RGWGetBucketPolicy::execute()
6971 {
6972 auto attrs = s->bucket_attrs;
6973 map<string, bufferlist>::iterator aiter = attrs.find(RGW_ATTR_IAM_POLICY);
6974 if (aiter == attrs.end()) {
6975 ldout(s->cct, 0) << __func__ << " can't find bucket IAM POLICY attr"
6976 << " bucket_name = " << s->bucket_name << dendl;
6977 op_ret = -ERR_NO_SUCH_BUCKET_POLICY;
6978 s->err.message = "The bucket policy does not exist";
6979 return;
6980 } else {
6981 policy = attrs[RGW_ATTR_IAM_POLICY];
6982
6983 if (policy.length() == 0) {
6984 ldout(s->cct, 10) << "The bucket policy does not exist, bucket: " << s->bucket_name << dendl;
6985 op_ret = -ERR_NO_SUCH_BUCKET_POLICY;
6986 s->err.message = "The bucket policy does not exist";
6987 return;
6988 }
6989 }
6990 }
6991
6992 void RGWDeleteBucketPolicy::send_response()
6993 {
6994 if (op_ret) {
6995 set_req_state_err(s, op_ret);
6996 }
6997 dump_errno(s);
6998 end_header(s);
6999 }
7000
7001 int RGWDeleteBucketPolicy::verify_permission()
7002 {
7003 if (!verify_bucket_permission(s, rgw::IAM::s3DeleteBucketPolicy)) {
7004 return -EACCES;
7005 }
7006
7007 return 0;
7008 }
7009
7010 void RGWDeleteBucketPolicy::execute()
7011 {
7012 op_ret = retry_raced_bucket_write(store, s, [this] {
7013 auto attrs = s->bucket_attrs;
7014 attrs.erase(RGW_ATTR_IAM_POLICY);
7015 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
7016 &s->bucket_info.objv_tracker);
7017 return op_ret;
7018 });
7019 }
7020
7021 void RGWGetClusterStat::execute()
7022 {
7023 op_ret = this->store->get_rados_handle()->cluster_stat(stats_op);
7024 }
7025
7026