]> git.proxmox.com Git - ceph.git/blob - ceph/src/rgw/rgw_op.cc
update source to 12.2.11
[ceph.git] / ceph / src / rgw / rgw_op.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include <errno.h>
5 #include <stdlib.h>
6 #include <system_error>
7 #include <unistd.h>
8
9 #include <sstream>
10
11 #include <boost/algorithm/string/predicate.hpp>
12 #include <boost/bind.hpp>
13 #include <boost/optional.hpp>
14 #include <boost/utility/in_place_factory.hpp>
15 #include <boost/utility/string_view.hpp>
16
17 #include "common/Clock.h"
18 #include "common/armor.h"
19 #include "common/backport14.h"
20 #include "common/errno.h"
21 #include "common/mime.h"
22 #include "common/utf8.h"
23 #include "common/ceph_json.h"
24
25 #include "rgw_rados.h"
26 #include "rgw_op.h"
27 #include "rgw_rest.h"
28 #include "rgw_acl.h"
29 #include "rgw_acl_s3.h"
30 #include "rgw_acl_swift.h"
31 #include "rgw_user.h"
32 #include "rgw_bucket.h"
33 #include "rgw_log.h"
34 #include "rgw_multi.h"
35 #include "rgw_multi_del.h"
36 #include "rgw_cors.h"
37 #include "rgw_cors_s3.h"
38 #include "rgw_rest_conn.h"
39 #include "rgw_rest_s3.h"
40 #include "rgw_tar.h"
41 #include "rgw_client_io.h"
42 #include "rgw_compression.h"
43 #include "rgw_role.h"
44 #include "rgw_tag_s3.h"
45 #include "cls/lock/cls_lock_client.h"
46 #include "cls/rgw/cls_rgw_client.h"
47
48
49 #include "include/assert.h"
50
51 #include "compressor/Compressor.h"
52
53 #include "rgw_acl_swift.h"
54
55 #define dout_context g_ceph_context
56 #define dout_subsys ceph_subsys_rgw
57
58 using namespace std;
59 using namespace librados;
60 using ceph::crypto::MD5;
61 using boost::optional;
62 using boost::none;
63
64 using rgw::IAM::ARN;
65 using rgw::IAM::Effect;
66 using rgw::IAM::Policy;
67
68 using rgw::IAM::Policy;
69
70 static string mp_ns = RGW_OBJ_NS_MULTIPART;
71 static string shadow_ns = RGW_OBJ_NS_SHADOW;
72
73 static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name);
74 static int forward_request_to_master(struct req_state *s, obj_version *objv, RGWRados *store,
75 bufferlist& in_data, JSONParser *jp, req_info *forward_info = nullptr);
76
77 static MultipartMetaFilter mp_filter;
78
79 int RGWGetObj::parse_range(void)
80 {
81 int r = -ERANGE;
82 string rs(range_str);
83 string ofs_str;
84 string end_str;
85
86 ignore_invalid_range = s->cct->_conf->rgw_ignore_get_invalid_range;
87 partial_content = false;
88
89 size_t pos = rs.find("bytes=");
90 if (pos == string::npos) {
91 pos = 0;
92 while (isspace(rs[pos]))
93 pos++;
94 int end = pos;
95 while (isalpha(rs[end]))
96 end++;
97 if (strncasecmp(rs.c_str(), "bytes", end - pos) != 0)
98 return 0;
99 while (isspace(rs[end]))
100 end++;
101 if (rs[end] != '=')
102 return 0;
103 rs = rs.substr(end + 1);
104 } else {
105 rs = rs.substr(pos + 6); /* size of("bytes=") */
106 }
107 pos = rs.find('-');
108 if (pos == string::npos)
109 goto done;
110
111 partial_content = true;
112
113 ofs_str = rs.substr(0, pos);
114 end_str = rs.substr(pos + 1);
115 if (end_str.length()) {
116 end = atoll(end_str.c_str());
117 if (end < 0)
118 goto done;
119 }
120
121 if (ofs_str.length()) {
122 ofs = atoll(ofs_str.c_str());
123 } else { // RFC2616 suffix-byte-range-spec
124 ofs = -end;
125 end = -1;
126 }
127
128 if (end >= 0 && end < ofs)
129 goto done;
130
131 range_parsed = true;
132 return 0;
133
134 done:
135 if (ignore_invalid_range) {
136 partial_content = false;
137 ofs = 0;
138 end = -1;
139 range_parsed = false; // allow retry
140 r = 0;
141 }
142
143 return r;
144 }
145
146 static int decode_policy(CephContext *cct,
147 bufferlist& bl,
148 RGWAccessControlPolicy *policy)
149 {
150 bufferlist::iterator iter = bl.begin();
151 try {
152 policy->decode(iter);
153 } catch (buffer::error& err) {
154 ldout(cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
155 return -EIO;
156 }
157 if (cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
158 RGWAccessControlPolicy_S3 *s3policy = static_cast<RGWAccessControlPolicy_S3 *>(policy);
159 ldout(cct, 15) << __func__ << " Read AccessControlPolicy";
160 s3policy->to_xml(*_dout);
161 *_dout << dendl;
162 }
163 return 0;
164 }
165
166
167 static int get_user_policy_from_attr(CephContext * const cct,
168 RGWRados * const store,
169 map<string, bufferlist>& attrs,
170 RGWAccessControlPolicy& policy /* out */)
171 {
172 auto aiter = attrs.find(RGW_ATTR_ACL);
173 if (aiter != attrs.end()) {
174 int ret = decode_policy(cct, aiter->second, &policy);
175 if (ret < 0) {
176 return ret;
177 }
178 } else {
179 return -ENOENT;
180 }
181
182 return 0;
183 }
184
185 static int get_bucket_instance_policy_from_attr(CephContext *cct,
186 RGWRados *store,
187 RGWBucketInfo& bucket_info,
188 map<string, bufferlist>& bucket_attrs,
189 RGWAccessControlPolicy *policy,
190 rgw_raw_obj& obj)
191 {
192 map<string, bufferlist>::iterator aiter = bucket_attrs.find(RGW_ATTR_ACL);
193
194 if (aiter != bucket_attrs.end()) {
195 int ret = decode_policy(cct, aiter->second, policy);
196 if (ret < 0)
197 return ret;
198 } else {
199 ldout(cct, 0) << "WARNING: couldn't find acl header for bucket, generating default" << dendl;
200 RGWUserInfo uinfo;
201 /* object exists, but policy is broken */
202 int r = rgw_get_user_info_by_uid(store, bucket_info.owner, uinfo);
203 if (r < 0)
204 return r;
205
206 policy->create_default(bucket_info.owner, uinfo.display_name);
207 }
208 return 0;
209 }
210
211 static int get_obj_policy_from_attr(CephContext *cct,
212 RGWRados *store,
213 RGWObjectCtx& obj_ctx,
214 RGWBucketInfo& bucket_info,
215 map<string, bufferlist>& bucket_attrs,
216 RGWAccessControlPolicy *policy,
217 rgw_obj& obj)
218 {
219 bufferlist bl;
220 int ret = 0;
221
222 RGWRados::Object op_target(store, bucket_info, obj_ctx, obj);
223 RGWRados::Object::Read rop(&op_target);
224
225 ret = rop.get_attr(RGW_ATTR_ACL, bl);
226 if (ret >= 0) {
227 ret = decode_policy(cct, bl, policy);
228 if (ret < 0)
229 return ret;
230 } else if (ret == -ENODATA) {
231 /* object exists, but policy is broken */
232 ldout(cct, 0) << "WARNING: couldn't find acl header for object, generating default" << dendl;
233 RGWUserInfo uinfo;
234 ret = rgw_get_user_info_by_uid(store, bucket_info.owner, uinfo);
235 if (ret < 0)
236 return ret;
237
238 policy->create_default(bucket_info.owner, uinfo.display_name);
239 }
240 return ret;
241 }
242
243
244 /**
245 * Get the AccessControlPolicy for an object off of disk.
246 * policy: must point to a valid RGWACL, and will be filled upon return.
247 * bucket: name of the bucket containing the object.
248 * object: name of the object to get the ACL for.
249 * Returns: 0 on success, -ERR# otherwise.
250 */
251 static int get_bucket_policy_from_attr(CephContext *cct,
252 RGWRados *store,
253 RGWBucketInfo& bucket_info,
254 map<string, bufferlist>& bucket_attrs,
255 RGWAccessControlPolicy *policy)
256 {
257 rgw_raw_obj instance_obj;
258 store->get_bucket_instance_obj(bucket_info.bucket, instance_obj);
259 return get_bucket_instance_policy_from_attr(cct, store, bucket_info, bucket_attrs,
260 policy, instance_obj);
261 }
262
263 static optional<Policy> get_iam_policy_from_attr(CephContext* cct,
264 RGWRados* store,
265 map<string, bufferlist>& attrs,
266 const string& tenant) {
267 auto i = attrs.find(RGW_ATTR_IAM_POLICY);
268 if (i != attrs.end()) {
269 return Policy(cct, tenant, i->second);
270 } else {
271 return none;
272 }
273 }
274
275 static int get_obj_attrs(RGWRados *store, struct req_state *s, rgw_obj& obj, map<string, bufferlist>& attrs)
276 {
277 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
278 RGWRados::Object::Read read_op(&op_target);
279
280 read_op.params.attrs = &attrs;
281
282 return read_op.prepare();
283 }
284
285 static int modify_obj_attr(RGWRados *store, struct req_state *s, rgw_obj& obj, const char* attr_name, bufferlist& attr_val)
286 {
287 map<string, bufferlist> attrs;
288 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
289 RGWRados::Object::Read read_op(&op_target);
290
291 read_op.params.attrs = &attrs;
292
293 int r = read_op.prepare();
294 if (r < 0) {
295 return r;
296 }
297 store->set_atomic(s->obj_ctx, read_op.state.obj);
298 attrs[attr_name] = attr_val;
299 return store->set_attrs(s->obj_ctx, s->bucket_info, read_op.state.obj, attrs, NULL);
300 }
301
302 static int get_system_obj_attrs(RGWRados *store, struct req_state *s, rgw_raw_obj& obj, map<string, bufferlist>& attrs,
303 uint64_t *obj_size, RGWObjVersionTracker *objv_tracker)
304 {
305 RGWRados::SystemObject src(store, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
306 RGWRados::SystemObject::Read rop(&src);
307
308 rop.stat_params.attrs = &attrs;
309 rop.stat_params.obj_size = obj_size;
310
311 int ret = rop.stat(objv_tracker);
312 return ret;
313 }
314
315 static int read_bucket_policy(RGWRados *store,
316 struct req_state *s,
317 RGWBucketInfo& bucket_info,
318 map<string, bufferlist>& bucket_attrs,
319 RGWAccessControlPolicy *policy,
320 rgw_bucket& bucket)
321 {
322 if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) {
323 ldout(s->cct, 0) << "NOTICE: bucket " << bucket_info.bucket.name << " is suspended" << dendl;
324 return -ERR_USER_SUSPENDED;
325 }
326
327 if (bucket.name.empty()) {
328 return 0;
329 }
330
331 int ret = get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, policy);
332 if (ret == -ENOENT) {
333 ret = -ERR_NO_SUCH_BUCKET;
334 }
335
336 return ret;
337 }
338
339 static int read_obj_policy(RGWRados *store,
340 struct req_state *s,
341 RGWBucketInfo& bucket_info,
342 map<string, bufferlist>& bucket_attrs,
343 RGWAccessControlPolicy* acl,
344 optional<Policy>& policy,
345 rgw_bucket& bucket,
346 rgw_obj_key& object)
347 {
348 string upload_id;
349 upload_id = s->info.args.get("uploadId");
350 rgw_obj obj;
351
352 if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) {
353 ldout(s->cct, 0) << "NOTICE: bucket " << bucket_info.bucket.name << " is suspended" << dendl;
354 return -ERR_USER_SUSPENDED;
355 }
356
357 if (!upload_id.empty()) {
358 /* multipart upload */
359 RGWMPObj mp(object.name, upload_id);
360 string oid = mp.get_meta();
361 obj.init_ns(bucket, oid, mp_ns);
362 obj.set_in_extra_data(true);
363 } else {
364 obj = rgw_obj(bucket, object);
365 }
366 policy = get_iam_policy_from_attr(s->cct, store, bucket_attrs, bucket.tenant);
367
368 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
369 int ret = get_obj_policy_from_attr(s->cct, store, *obj_ctx,
370 bucket_info, bucket_attrs, acl, obj);
371 if (ret == -ENOENT) {
372 /* object does not exist checking the bucket's ACL to make sure
373 that we send a proper error code */
374 RGWAccessControlPolicy bucket_policy(s->cct);
375 ret = get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, &bucket_policy);
376 if (ret < 0) {
377 return ret;
378 }
379
380 const rgw_user& bucket_owner = bucket_policy.get_owner().get_id();
381 if (bucket_owner.compare(s->user->user_id) != 0 &&
382 ! s->auth.identity->is_admin_of(bucket_owner) &&
383 ! bucket_policy.verify_permission(*s->auth.identity, s->perm_mask,
384 RGW_PERM_READ)) {
385 ret = -EACCES;
386 } else {
387 ret = -ENOENT;
388 }
389 }
390
391 return ret;
392 }
393
394 /**
395 * Get the AccessControlPolicy for an user, bucket or object off of disk.
396 * s: The req_state to draw information from.
397 * only_bucket: If true, reads the user and bucket ACLs rather than the object ACL.
398 * Returns: 0 on success, -ERR# otherwise.
399 */
400 int rgw_build_bucket_policies(RGWRados* store, struct req_state* s)
401 {
402 int ret = 0;
403 rgw_obj_key obj;
404 RGWUserInfo bucket_owner_info;
405 RGWObjectCtx obj_ctx(store);
406
407 string bi = s->info.args.get(RGW_SYS_PARAM_PREFIX "bucket-instance");
408 if (!bi.empty()) {
409 ret = rgw_bucket_parse_bucket_instance(bi, &s->bucket_instance_id, &s->bucket_instance_shard_id);
410 if (ret < 0) {
411 return ret;
412 }
413 }
414
415 if(s->dialect.compare("s3") == 0) {
416 s->bucket_acl = ceph::make_unique<RGWAccessControlPolicy_S3>(s->cct);
417 } else if(s->dialect.compare("swift") == 0) {
418 /* We aren't allocating the account policy for those operations using
419 * the Swift's infrastructure that don't really need req_state::user.
420 * Typical example here is the implementation of /info. */
421 if (!s->user->user_id.empty()) {
422 s->user_acl = ceph::make_unique<RGWAccessControlPolicy_SWIFTAcct>(s->cct);
423 }
424 s->bucket_acl = ceph::make_unique<RGWAccessControlPolicy_SWIFT>(s->cct);
425 } else {
426 s->bucket_acl = ceph::make_unique<RGWAccessControlPolicy>(s->cct);
427 }
428
429 /* check if copy source is within the current domain */
430 if (!s->src_bucket_name.empty()) {
431 RGWBucketInfo source_info;
432
433 if (s->bucket_instance_id.empty()) {
434 ret = store->get_bucket_info(obj_ctx, s->src_tenant_name, s->src_bucket_name, source_info, NULL);
435 } else {
436 ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, source_info, NULL, NULL);
437 }
438 if (ret == 0) {
439 string& zonegroup = source_info.zonegroup;
440 s->local_source = store->get_zonegroup().equals(zonegroup);
441 }
442 }
443
444 struct {
445 rgw_user uid;
446 std::string display_name;
447 } acct_acl_user = {
448 s->user->user_id,
449 s->user->display_name,
450 };
451
452 if (!s->bucket_name.empty()) {
453 s->bucket_exists = true;
454 if (s->bucket_instance_id.empty()) {
455 ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name, s->bucket_info, NULL, &s->bucket_attrs);
456 } else {
457 ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, s->bucket_info, NULL, &s->bucket_attrs);
458 }
459 if (ret < 0) {
460 if (ret != -ENOENT) {
461 string bucket_log;
462 rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_log);
463 ldout(s->cct, 0) << "NOTICE: couldn't get bucket from bucket_name (name=" << bucket_log << ")" << dendl;
464 return ret;
465 }
466 s->bucket_exists = false;
467 }
468 s->bucket = s->bucket_info.bucket;
469
470 if (s->bucket_exists) {
471 ret = read_bucket_policy(store, s, s->bucket_info, s->bucket_attrs,
472 s->bucket_acl.get(), s->bucket);
473 acct_acl_user = {
474 s->bucket_info.owner,
475 s->bucket_acl->get_owner().get_display_name(),
476 };
477 } else {
478 s->bucket_acl->create_default(s->user->user_id, s->user->display_name);
479 ret = -ERR_NO_SUCH_BUCKET;
480 }
481
482 s->bucket_owner = s->bucket_acl->get_owner();
483
484 RGWZoneGroup zonegroup;
485 int r = store->get_zonegroup(s->bucket_info.zonegroup, zonegroup);
486 if (!r) {
487 if (!zonegroup.endpoints.empty()) {
488 s->zonegroup_endpoint = zonegroup.endpoints.front();
489 } else {
490 // use zonegroup's master zone endpoints
491 auto z = zonegroup.zones.find(zonegroup.master_zone);
492 if (z != zonegroup.zones.end() && !z->second.endpoints.empty()) {
493 s->zonegroup_endpoint = z->second.endpoints.front();
494 }
495 }
496 s->zonegroup_name = zonegroup.get_name();
497 }
498 if (r < 0 && ret == 0) {
499 ret = r;
500 }
501
502 if (s->bucket_exists && !store->get_zonegroup().equals(s->bucket_info.zonegroup)) {
503 ldout(s->cct, 0) << "NOTICE: request for data in a different zonegroup (" << s->bucket_info.zonegroup << " != " << store->get_zonegroup().get_id() << ")" << dendl;
504 /* we now need to make sure that the operation actually requires copy source, that is
505 * it's a copy operation
506 */
507 if (store->get_zonegroup().is_master_zonegroup() && s->system_request) {
508 /*If this is the master, don't redirect*/
509 } else if (s->op_type == RGW_OP_GET_BUCKET_LOCATION ) {
510 /* If op is get bucket location, don't redirect */
511 } else if (!s->local_source ||
512 (s->op != OP_PUT && s->op != OP_COPY) ||
513 s->object.empty()) {
514 return -ERR_PERMANENT_REDIRECT;
515 }
516 }
517 }
518
519 /* handle user ACL only for those APIs which support it */
520 if (s->user_acl) {
521 map<string, bufferlist> uattrs;
522
523 ret = rgw_get_user_attrs_by_uid(store, acct_acl_user.uid, uattrs);
524 if (!ret) {
525 ret = get_user_policy_from_attr(s->cct, store, uattrs, *s->user_acl);
526 }
527 if (-ENOENT == ret) {
528 /* In already existing clusters users won't have ACL. In such case
529 * assuming that only account owner has the rights seems to be
530 * reasonable. That allows to have only one verification logic.
531 * NOTE: there is small compatibility kludge for global, empty tenant:
532 * 1. if we try to reach an existing bucket, its owner is considered
533 * as account owner.
534 * 2. otherwise account owner is identity stored in s->user->user_id. */
535 s->user_acl->create_default(acct_acl_user.uid,
536 acct_acl_user.display_name);
537 ret = 0;
538 } else {
539 ldout(s->cct, 0) << "NOTICE: couldn't get user attrs for handling ACL (user_id="
540 << s->user->user_id
541 << ", ret="
542 << ret
543 << ")" << dendl;
544 return ret;
545 }
546 }
547
548 try {
549 s->iam_policy = get_iam_policy_from_attr(s->cct, store, s->bucket_attrs,
550 s->bucket_tenant);
551 } catch (const std::exception& e) {
552 // Really this is a can't happen condition. We parse the policy
553 // when it's given to us, so perhaps we should abort or otherwise
554 // raise bloody murder.
555 lderr(s->cct) << "Error reading IAM Policy: " << e.what() << dendl;
556 ret = -EACCES;
557 }
558
559 return ret;
560 }
561
562 /**
563 * Get the AccessControlPolicy for a bucket or object off of disk.
564 * s: The req_state to draw information from.
565 * only_bucket: If true, reads the bucket ACL rather than the object ACL.
566 * Returns: 0 on success, -ERR# otherwise.
567 */
568 int rgw_build_object_policies(RGWRados *store, struct req_state *s,
569 bool prefetch_data)
570 {
571 int ret = 0;
572
573 if (!s->object.empty()) {
574 if (!s->bucket_exists) {
575 return -ERR_NO_SUCH_BUCKET;
576 }
577 s->object_acl = ceph::make_unique<RGWAccessControlPolicy>(s->cct);
578
579 rgw_obj obj(s->bucket, s->object);
580
581 store->set_atomic(s->obj_ctx, obj);
582 if (prefetch_data) {
583 store->set_prefetch_data(s->obj_ctx, obj);
584 }
585 ret = read_obj_policy(store, s, s->bucket_info, s->bucket_attrs,
586 s->object_acl.get(), s->iam_policy, s->bucket,
587 s->object);
588 }
589
590 return ret;
591 }
592
593 rgw::IAM::Environment rgw_build_iam_environment(RGWRados* store,
594 struct req_state* s)
595 {
596 rgw::IAM::Environment e;
597 const auto& m = s->info.env->get_map();
598 auto t = ceph::real_clock::now();
599 e.emplace("aws:CurrentTime", std::to_string(ceph::real_clock::to_time_t(t)));
600 e.emplace("aws:EpochTime", ceph::to_iso_8601(t));
601 // TODO: This is fine for now, but once we have STS we'll need to
602 // look and see. Also this won't work with the IdentityApplier
603 // model, since we need to know the actual credential.
604 e.emplace("aws:PrincipalType", "User");
605
606 auto i = m.find("HTTP_REFERER");
607 if (i != m.end()) {
608 e.emplace("aws:Referer", i->second);
609 }
610
611 if (rgw_transport_is_secure(s->cct, *s->info.env)) {
612 e.emplace("aws:SecureTransport", "true");
613 }
614
615 const auto remote_addr_param = s->cct->_conf->rgw_remote_addr_param;
616 if (remote_addr_param.length()) {
617 i = m.find(remote_addr_param);
618 } else {
619 i = m.find("REMOTE_ADDR");
620 }
621 if (i != m.end()) {
622 const string* ip = &(i->second);
623 string temp;
624 if (remote_addr_param == "HTTP_X_FORWARDED_FOR") {
625 const auto comma = ip->find(',');
626 if (comma != string::npos) {
627 temp.assign(*ip, 0, comma);
628 ip = &temp;
629 }
630 }
631 e.emplace("aws:SourceIp", *ip);
632 }
633
634 i = m.find("HTTP_USER_AGENT"); {
635 if (i != m.end())
636 e.emplace("aws:UserAgent", i->second);
637 }
638
639 if (s->user) {
640 // What to do about aws::userid? One can have multiple access
641 // keys so that isn't really suitable. Do we have a durable
642 // identifier that can persist through name changes?
643 e.emplace("aws:username", s->user->user_id.id);
644 }
645 return e;
646 }
647
648 void rgw_bucket_object_pre_exec(struct req_state *s)
649 {
650 if (s->expect_cont)
651 dump_continue(s);
652
653 dump_bucket_from_state(s);
654 }
655
656 // So! Now and then when we try to update bucket information, the
657 // bucket has changed during the course of the operation. (Or we have
658 // a cache consistency problem that Watch/Notify isn't ruling out
659 // completely.)
660 //
661 // When this happens, we need to update the bucket info and try
662 // again. We have, however, to try the right *part* again. We can't
663 // simply re-send, since that will obliterate the previous update.
664 //
665 // Thus, callers of this function should include everything that
666 // merges information to be changed into the bucket information as
667 // well as the call to set it.
668 //
669 // The called function must return an integer, negative on error. In
670 // general, they should just return op_ret.
671 namespace {
672 template<typename F>
673 int retry_raced_bucket_write(RGWRados* g, req_state* s, const F& f) {
674 auto r = f();
675 for (auto i = 0u; i < 15u && r == -ECANCELED; ++i) {
676 r = g->try_refresh_bucket_info(s->bucket_info, nullptr,
677 &s->bucket_attrs);
678 if (r >= 0) {
679 r = f();
680 }
681 }
682 return r;
683 }
684 }
685
686
687 int RGWGetObj::verify_permission()
688 {
689 obj = rgw_obj(s->bucket, s->object);
690 store->set_atomic(s->obj_ctx, obj);
691 if (get_data) {
692 store->set_prefetch_data(s->obj_ctx, obj);
693 }
694
695 if (torrent.get_flag()) {
696 if (obj.key.instance.empty()) {
697 action = rgw::IAM::s3GetObjectTorrent;
698 } else {
699 action = rgw::IAM::s3GetObjectVersionTorrent;
700 }
701 } else {
702 if (obj.key.instance.empty()) {
703 action = rgw::IAM::s3GetObject;
704 } else {
705 action = rgw::IAM::s3GetObjectVersion;
706 }
707 }
708
709 if (!verify_object_permission(s, action)) {
710 return -EACCES;
711 }
712
713 return 0;
714 }
715
716
717 int RGWOp::verify_op_mask()
718 {
719 uint32_t required_mask = op_mask();
720
721 ldout(s->cct, 20) << "required_mask= " << required_mask
722 << " user.op_mask=" << s->user->op_mask << dendl;
723
724 if ((s->user->op_mask & required_mask) != required_mask) {
725 return -EPERM;
726 }
727
728 if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !store->zone_is_writeable()) {
729 ldout(s->cct, 5) << "NOTICE: modify request to a read-only zone by a non-system user, permission denied" << dendl;
730 return -EPERM;
731 }
732
733 return 0;
734 }
735
736 int RGWGetObjTags::verify_permission()
737 {
738 if (!verify_object_permission(s,
739 s->object.instance.empty() ?
740 rgw::IAM::s3GetObjectTagging:
741 rgw::IAM::s3GetObjectVersionTagging))
742 return -EACCES;
743
744 return 0;
745 }
746
747 void RGWGetObjTags::pre_exec()
748 {
749 rgw_bucket_object_pre_exec(s);
750 }
751
752 void RGWGetObjTags::execute()
753 {
754 rgw_obj obj;
755 map<string,bufferlist> attrs;
756
757 obj = rgw_obj(s->bucket, s->object);
758
759 store->set_atomic(s->obj_ctx, obj);
760
761 op_ret = get_obj_attrs(store, s, obj, attrs);
762 if (op_ret < 0) {
763 ldout(s->cct, 0) << "ERROR: failed to get obj attrs, obj=" << obj
764 << " ret=" << op_ret << dendl;
765 return;
766 }
767
768 auto tags = attrs.find(RGW_ATTR_TAGS);
769 if(tags != attrs.end()){
770 has_tags = true;
771 tags_bl.append(tags->second);
772 }
773 send_response_data(tags_bl);
774 }
775
776 int RGWPutObjTags::verify_permission()
777 {
778 if (!verify_object_permission(s,
779 s->object.instance.empty() ?
780 rgw::IAM::s3PutObjectTagging:
781 rgw::IAM::s3PutObjectVersionTagging))
782 return -EACCES;
783 return 0;
784 }
785
786 void RGWPutObjTags::execute()
787 {
788 op_ret = get_params();
789 if (op_ret < 0)
790 return;
791
792 if (s->object.empty()){
793 op_ret= -EINVAL; // we only support tagging on existing objects
794 return;
795 }
796
797 rgw_obj obj;
798 obj = rgw_obj(s->bucket, s->object);
799 store->set_atomic(s->obj_ctx, obj);
800 op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_TAGS, tags_bl);
801 if (op_ret == -ECANCELED){
802 op_ret = -ERR_TAG_CONFLICT;
803 }
804 }
805
806 void RGWDeleteObjTags::pre_exec()
807 {
808 rgw_bucket_object_pre_exec(s);
809 }
810
811
812 int RGWDeleteObjTags::verify_permission()
813 {
814 if (!s->object.empty()) {
815 if (!verify_object_permission(s,
816 s->object.instance.empty() ?
817 rgw::IAM::s3DeleteObjectTagging:
818 rgw::IAM::s3DeleteObjectVersionTagging))
819 return -EACCES;
820 }
821 return 0;
822 }
823
824 void RGWDeleteObjTags::execute()
825 {
826 if (s->object.empty())
827 return;
828
829 rgw_obj obj;
830 obj = rgw_obj(s->bucket, s->object);
831 store->set_atomic(s->obj_ctx, obj);
832 map <string, bufferlist> attrs;
833 map <string, bufferlist> rmattr;
834 bufferlist bl;
835 rmattr[RGW_ATTR_TAGS] = bl;
836 op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, &rmattr);
837 }
838
839 int RGWOp::do_aws4_auth_completion()
840 {
841 ldout(s->cct, 5) << "NOTICE: call to do_aws4_auth_completion" << dendl;
842 if (s->auth.completer) {
843 if (!s->auth.completer->complete()) {
844 return -ERR_AMZ_CONTENT_SHA256_MISMATCH;
845 } else {
846 dout(10) << "v4 auth ok -- do_aws4_auth_completion" << dendl;
847 }
848
849 /* TODO(rzarzynski): yes, we're really called twice on PUTs. Only first
850 * call passes, so we disable second one. This is old behaviour, sorry!
851 * Plan for tomorrow: seek and destroy. */
852 s->auth.completer = nullptr;
853 }
854
855 return 0;
856 }
857
858 int RGWOp::init_quota()
859 {
860 /* no quota enforcement for system requests */
861 if (s->system_request)
862 return 0;
863
864 /* init quota related stuff */
865 if (!(s->user->op_mask & RGW_OP_TYPE_MODIFY)) {
866 return 0;
867 }
868
869 /* only interested in object related ops */
870 if (s->object.empty()) {
871 return 0;
872 }
873
874 RGWUserInfo owner_info;
875 RGWUserInfo *uinfo;
876
877 if (s->user->user_id == s->bucket_owner.get_id()) {
878 uinfo = s->user;
879 } else {
880 int r = rgw_get_user_info_by_uid(store, s->bucket_info.owner, owner_info);
881 if (r < 0)
882 return r;
883 uinfo = &owner_info;
884 }
885
886 if (s->bucket_info.quota.enabled) {
887 bucket_quota = s->bucket_info.quota;
888 } else if (uinfo->bucket_quota.enabled) {
889 bucket_quota = uinfo->bucket_quota;
890 } else {
891 bucket_quota = store->get_bucket_quota();
892 }
893
894 if (uinfo->user_quota.enabled) {
895 user_quota = uinfo->user_quota;
896 } else {
897 user_quota = store->get_user_quota();
898 }
899
900 return 0;
901 }
902
903 static bool validate_cors_rule_method(RGWCORSRule *rule, const char *req_meth) {
904 uint8_t flags = 0;
905
906 if (!req_meth) {
907 dout(5) << "req_meth is null" << dendl;
908 return false;
909 }
910
911 if (strcmp(req_meth, "GET") == 0) flags = RGW_CORS_GET;
912 else if (strcmp(req_meth, "POST") == 0) flags = RGW_CORS_POST;
913 else if (strcmp(req_meth, "PUT") == 0) flags = RGW_CORS_PUT;
914 else if (strcmp(req_meth, "DELETE") == 0) flags = RGW_CORS_DELETE;
915 else if (strcmp(req_meth, "HEAD") == 0) flags = RGW_CORS_HEAD;
916
917 if (rule->get_allowed_methods() & flags) {
918 dout(10) << "Method " << req_meth << " is supported" << dendl;
919 } else {
920 dout(5) << "Method " << req_meth << " is not supported" << dendl;
921 return false;
922 }
923
924 return true;
925 }
926
927 static bool validate_cors_rule_header(RGWCORSRule *rule, const char *req_hdrs) {
928 if (req_hdrs) {
929 vector<string> hdrs;
930 get_str_vec(req_hdrs, hdrs);
931 for (const auto& hdr : hdrs) {
932 if (!rule->is_header_allowed(hdr.c_str(), hdr.length())) {
933 dout(5) << "Header " << hdr << " is not registered in this rule" << dendl;
934 return false;
935 }
936 }
937 }
938 return true;
939 }
940
941 int RGWOp::read_bucket_cors()
942 {
943 bufferlist bl;
944
945 map<string, bufferlist>::iterator aiter = s->bucket_attrs.find(RGW_ATTR_CORS);
946 if (aiter == s->bucket_attrs.end()) {
947 ldout(s->cct, 20) << "no CORS configuration attr found" << dendl;
948 cors_exist = false;
949 return 0; /* no CORS configuration found */
950 }
951
952 cors_exist = true;
953
954 bl = aiter->second;
955
956 bufferlist::iterator iter = bl.begin();
957 try {
958 bucket_cors.decode(iter);
959 } catch (buffer::error& err) {
960 ldout(s->cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
961 return -EIO;
962 }
963 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
964 RGWCORSConfiguration_S3 *s3cors = static_cast<RGWCORSConfiguration_S3 *>(&bucket_cors);
965 ldout(s->cct, 15) << "Read RGWCORSConfiguration";
966 s3cors->to_xml(*_dout);
967 *_dout << dendl;
968 }
969 return 0;
970 }
971
972 /** CORS 6.2.6.
973 * If any of the header field-names is not a ASCII case-insensitive match for
974 * any of the values in list of headers do not set any additional headers and
975 * terminate this set of steps.
976 * */
977 static void get_cors_response_headers(RGWCORSRule *rule, const char *req_hdrs, string& hdrs, string& exp_hdrs, unsigned *max_age) {
978 if (req_hdrs) {
979 list<string> hl;
980 get_str_list(req_hdrs, hl);
981 for(list<string>::iterator it = hl.begin(); it != hl.end(); ++it) {
982 if (!rule->is_header_allowed((*it).c_str(), (*it).length())) {
983 dout(5) << "Header " << (*it) << " is not registered in this rule" << dendl;
984 } else {
985 if (hdrs.length() > 0) hdrs.append(",");
986 hdrs.append((*it));
987 }
988 }
989 }
990 rule->format_exp_headers(exp_hdrs);
991 *max_age = rule->get_max_age();
992 }
993
994 /**
995 * Generate the CORS header response
996 *
997 * This is described in the CORS standard, section 6.2.
998 */
999 bool RGWOp::generate_cors_headers(string& origin, string& method, string& headers, string& exp_headers, unsigned *max_age)
1000 {
1001 /* CORS 6.2.1. */
1002 const char *orig = s->info.env->get("HTTP_ORIGIN");
1003 if (!orig) {
1004 return false;
1005 }
1006
1007 /* Custom: */
1008 origin = orig;
1009 op_ret = read_bucket_cors();
1010 if (op_ret < 0) {
1011 return false;
1012 }
1013
1014 if (!cors_exist) {
1015 dout(2) << "No CORS configuration set yet for this bucket" << dendl;
1016 return false;
1017 }
1018
1019 /* CORS 6.2.2. */
1020 RGWCORSRule *rule = bucket_cors.host_name_rule(orig);
1021 if (!rule)
1022 return false;
1023
1024 /*
1025 * Set the Allowed-Origin header to a asterisk if this is allowed in the rule
1026 * and no Authorization was send by the client
1027 *
1028 * The origin parameter specifies a URI that may access the resource. The browser must enforce this.
1029 * For requests without credentials, the server may specify "*" as a wildcard,
1030 * thereby allowing any origin to access the resource.
1031 */
1032 const char *authorization = s->info.env->get("HTTP_AUTHORIZATION");
1033 if (!authorization && rule->has_wildcard_origin())
1034 origin = "*";
1035
1036 /* CORS 6.2.3. */
1037 const char *req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD");
1038 if (!req_meth) {
1039 req_meth = s->info.method;
1040 }
1041
1042 if (req_meth) {
1043 method = req_meth;
1044 /* CORS 6.2.5. */
1045 if (!validate_cors_rule_method(rule, req_meth)) {
1046 return false;
1047 }
1048 }
1049
1050 /* CORS 6.2.4. */
1051 const char *req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS");
1052
1053 /* CORS 6.2.6. */
1054 get_cors_response_headers(rule, req_hdrs, headers, exp_headers, max_age);
1055
1056 return true;
1057 }
1058
1059 int RGWGetObj::read_user_manifest_part(rgw_bucket& bucket,
1060 const rgw_bucket_dir_entry& ent,
1061 RGWAccessControlPolicy * const bucket_acl,
1062 const optional<Policy>& bucket_policy,
1063 const off_t start_ofs,
1064 const off_t end_ofs)
1065 {
1066 ldout(s->cct, 20) << "user manifest obj=" << ent.key.name << "[" << ent.key.instance << "]" << dendl;
1067 RGWGetObj_CB cb(this);
1068 RGWGetDataCB* filter = &cb;
1069 boost::optional<RGWGetObj_Decompress> decompress;
1070
1071 int64_t cur_ofs = start_ofs;
1072 int64_t cur_end = end_ofs;
1073
1074 rgw_obj part(bucket, ent.key);
1075
1076 map<string, bufferlist> attrs;
1077
1078 uint64_t obj_size;
1079 RGWObjectCtx obj_ctx(store);
1080 RGWAccessControlPolicy obj_policy(s->cct);
1081
1082 ldout(s->cct, 20) << "reading obj=" << part << " ofs=" << cur_ofs << " end=" << cur_end << dendl;
1083
1084 obj_ctx.obj.set_atomic(part);
1085 store->set_prefetch_data(&obj_ctx, part);
1086
1087 RGWRados::Object op_target(store, s->bucket_info, obj_ctx, part);
1088 RGWRados::Object::Read read_op(&op_target);
1089
1090 read_op.conds.if_match = ent.meta.etag.c_str();
1091 read_op.params.attrs = &attrs;
1092 read_op.params.obj_size = &obj_size;
1093
1094 op_ret = read_op.prepare();
1095 if (op_ret < 0)
1096 return op_ret;
1097 op_ret = read_op.range_to_ofs(ent.meta.accounted_size, cur_ofs, cur_end);
1098 if (op_ret < 0)
1099 return op_ret;
1100 bool need_decompress;
1101 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
1102 if (op_ret < 0) {
1103 lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
1104 return -EIO;
1105 }
1106
1107 if (need_decompress)
1108 {
1109 if (cs_info.orig_size != ent.meta.accounted_size) {
1110 // hmm.. something wrong, object not as expected, abort!
1111 ldout(s->cct, 0) << "ERROR: expected cs_info.orig_size=" << cs_info.orig_size <<
1112 ", actual read size=" << ent.meta.size << dendl;
1113 return -EIO;
1114 }
1115 decompress.emplace(s->cct, &cs_info, partial_content, filter);
1116 filter = &*decompress;
1117 }
1118 else
1119 {
1120 if (obj_size != ent.meta.size) {
1121 // hmm.. something wrong, object not as expected, abort!
1122 ldout(s->cct, 0) << "ERROR: expected obj_size=" << obj_size << ", actual read size=" << ent.meta.size << dendl;
1123 return -EIO;
1124 }
1125 }
1126
1127 op_ret = rgw_policy_from_attrset(s->cct, attrs, &obj_policy);
1128 if (op_ret < 0)
1129 return op_ret;
1130
1131 /* We can use global user_acl because LOs cannot have segments
1132 * stored inside different accounts. */
1133 if (s->system_request) {
1134 ldout(s->cct, 2) << "overriding permissions due to system operation" << dendl;
1135 } else if (s->auth.identity->is_admin_of(s->user->user_id)) {
1136 ldout(s->cct, 2) << "overriding permissions due to admin operation" << dendl;
1137 } else if (!verify_object_permission(s, part, s->user_acl.get(), bucket_acl,
1138 &obj_policy, bucket_policy, action)) {
1139 return -EPERM;
1140 }
1141
1142 if (ent.meta.size == 0) {
1143 return 0;
1144 }
1145
1146 perfcounter->inc(l_rgw_get_b, cur_end - cur_ofs);
1147 filter->fixup_range(cur_ofs, cur_end);
1148 op_ret = read_op.iterate(cur_ofs, cur_end, filter);
1149 if (op_ret >= 0)
1150 op_ret = filter->flush();
1151 return op_ret;
1152 }
1153
1154 static int iterate_user_manifest_parts(CephContext * const cct,
1155 RGWRados * const store,
1156 const off_t ofs,
1157 const off_t end,
1158 RGWBucketInfo *pbucket_info,
1159 const string& obj_prefix,
1160 RGWAccessControlPolicy * const bucket_acl,
1161 const optional<Policy>& bucket_policy,
1162 uint64_t * const ptotal_len,
1163 uint64_t * const pobj_size,
1164 string * const pobj_sum,
1165 int (*cb)(rgw_bucket& bucket,
1166 const rgw_bucket_dir_entry& ent,
1167 RGWAccessControlPolicy * const bucket_acl,
1168 const optional<Policy>& bucket_policy,
1169 off_t start_ofs,
1170 off_t end_ofs,
1171 void *param),
1172 void * const cb_param)
1173 {
1174 rgw_bucket& bucket = pbucket_info->bucket;
1175 uint64_t obj_ofs = 0, len_count = 0;
1176 bool found_start = false, found_end = false, handled_end = false;
1177 string delim;
1178 bool is_truncated;
1179 vector<rgw_bucket_dir_entry> objs;
1180
1181 utime_t start_time = ceph_clock_now();
1182
1183 RGWRados::Bucket target(store, *pbucket_info);
1184 RGWRados::Bucket::List list_op(&target);
1185
1186 list_op.params.prefix = obj_prefix;
1187 list_op.params.delim = delim;
1188
1189 MD5 etag_sum;
1190 do {
1191 #define MAX_LIST_OBJS 100
1192 int r = list_op.list_objects(MAX_LIST_OBJS, &objs, NULL, &is_truncated);
1193 if (r < 0) {
1194 return r;
1195 }
1196
1197 for (rgw_bucket_dir_entry& ent : objs) {
1198 const uint64_t cur_total_len = obj_ofs;
1199 const uint64_t obj_size = ent.meta.accounted_size;
1200 uint64_t start_ofs = 0, end_ofs = obj_size;
1201
1202 if ((ptotal_len || cb) && !found_start && cur_total_len + obj_size > (uint64_t)ofs) {
1203 start_ofs = ofs - obj_ofs;
1204 found_start = true;
1205 }
1206
1207 obj_ofs += obj_size;
1208 if (pobj_sum) {
1209 etag_sum.Update((const byte *)ent.meta.etag.c_str(),
1210 ent.meta.etag.length());
1211 }
1212
1213 if ((ptotal_len || cb) && !found_end && obj_ofs > (uint64_t)end) {
1214 end_ofs = end - cur_total_len + 1;
1215 found_end = true;
1216 }
1217
1218 perfcounter->tinc(l_rgw_get_lat,
1219 (ceph_clock_now() - start_time));
1220
1221 if (found_start && !handled_end) {
1222 len_count += end_ofs - start_ofs;
1223
1224 if (cb) {
1225 r = cb(bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs, cb_param);
1226 if (r < 0) {
1227 return r;
1228 }
1229 }
1230 }
1231
1232 handled_end = found_end;
1233 start_time = ceph_clock_now();
1234 }
1235 } while (is_truncated);
1236
1237 if (ptotal_len) {
1238 *ptotal_len = len_count;
1239 }
1240 if (pobj_size) {
1241 *pobj_size = obj_ofs;
1242 }
1243 if (pobj_sum) {
1244 complete_etag(etag_sum, pobj_sum);
1245 }
1246
1247 return 0;
1248 }
1249
1250 struct rgw_slo_part {
1251 RGWAccessControlPolicy *bucket_acl = nullptr;
1252 Policy* bucket_policy = nullptr;
1253 rgw_bucket bucket;
1254 string obj_name;
1255 uint64_t size = 0;
1256 string etag;
1257 };
1258
1259 static int iterate_slo_parts(CephContext *cct,
1260 RGWRados *store,
1261 off_t ofs,
1262 off_t end,
1263 map<uint64_t, rgw_slo_part>& slo_parts,
1264 int (*cb)(rgw_bucket& bucket,
1265 const rgw_bucket_dir_entry& ent,
1266 RGWAccessControlPolicy *bucket_acl,
1267 const optional<Policy>& bucket_policy,
1268 off_t start_ofs,
1269 off_t end_ofs,
1270 void *param),
1271 void *cb_param)
1272 {
1273 bool found_start = false, found_end = false;
1274
1275 if (slo_parts.empty()) {
1276 return 0;
1277 }
1278
1279 utime_t start_time = ceph_clock_now();
1280
1281 map<uint64_t, rgw_slo_part>::iterator iter = slo_parts.upper_bound(ofs);
1282 if (iter != slo_parts.begin()) {
1283 --iter;
1284 }
1285
1286 uint64_t obj_ofs = iter->first;
1287
1288 for (; iter != slo_parts.end() && !found_end; ++iter) {
1289 rgw_slo_part& part = iter->second;
1290 rgw_bucket_dir_entry ent;
1291
1292 ent.key.name = part.obj_name;
1293 ent.meta.accounted_size = ent.meta.size = part.size;
1294 ent.meta.etag = part.etag;
1295
1296 uint64_t cur_total_len = obj_ofs;
1297 uint64_t start_ofs = 0, end_ofs = ent.meta.size;
1298
1299 if (!found_start && cur_total_len + ent.meta.size > (uint64_t)ofs) {
1300 start_ofs = ofs - obj_ofs;
1301 found_start = true;
1302 }
1303
1304 obj_ofs += ent.meta.size;
1305
1306 if (!found_end && obj_ofs > (uint64_t)end) {
1307 end_ofs = end - cur_total_len + 1;
1308 found_end = true;
1309 }
1310
1311 perfcounter->tinc(l_rgw_get_lat,
1312 (ceph_clock_now() - start_time));
1313
1314 if (found_start) {
1315 if (cb) {
1316 // SLO is a Swift thing, and Swift has no knowledge of S3 Policies.
1317 int r = cb(part.bucket, ent, part.bucket_acl,
1318 (part.bucket_policy ?
1319 optional<Policy>(*part.bucket_policy) : none),
1320 start_ofs, end_ofs, cb_param);
1321 if (r < 0)
1322 return r;
1323 }
1324 }
1325
1326 start_time = ceph_clock_now();
1327 }
1328
1329 return 0;
1330 }
1331
1332 static int get_obj_user_manifest_iterate_cb(rgw_bucket& bucket,
1333 const rgw_bucket_dir_entry& ent,
1334 RGWAccessControlPolicy * const bucket_acl,
1335 const optional<Policy>& bucket_policy,
1336 const off_t start_ofs,
1337 const off_t end_ofs,
1338 void * const param)
1339 {
1340 RGWGetObj *op = static_cast<RGWGetObj *>(param);
1341 return op->read_user_manifest_part(bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs);
1342 }
1343
1344 int RGWGetObj::handle_user_manifest(const char *prefix)
1345 {
1346 const boost::string_view prefix_view(prefix);
1347 ldout(s->cct, 2) << "RGWGetObj::handle_user_manifest() prefix="
1348 << prefix_view << dendl;
1349
1350 const size_t pos = prefix_view.find('/');
1351 if (pos == string::npos) {
1352 return -EINVAL;
1353 }
1354
1355 const std::string bucket_name = url_decode(prefix_view.substr(0, pos));
1356 const std::string obj_prefix = url_decode(prefix_view.substr(pos + 1));
1357
1358 rgw_bucket bucket;
1359
1360 RGWAccessControlPolicy _bucket_acl(s->cct);
1361 RGWAccessControlPolicy *bucket_acl;
1362 optional<Policy> _bucket_policy;
1363 optional<Policy>* bucket_policy;
1364 RGWBucketInfo bucket_info;
1365 RGWBucketInfo *pbucket_info;
1366
1367 if (bucket_name.compare(s->bucket.name) != 0) {
1368 map<string, bufferlist> bucket_attrs;
1369 RGWObjectCtx obj_ctx(store);
1370 int r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
1371 bucket_name, bucket_info, NULL,
1372 &bucket_attrs);
1373 if (r < 0) {
1374 ldout(s->cct, 0) << "could not get bucket info for bucket="
1375 << bucket_name << dendl;
1376 return r;
1377 }
1378 bucket = bucket_info.bucket;
1379 pbucket_info = &bucket_info;
1380 bucket_acl = &_bucket_acl;
1381 r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl, bucket);
1382 if (r < 0) {
1383 ldout(s->cct, 0) << "failed to read bucket policy" << dendl;
1384 return r;
1385 }
1386 _bucket_policy = get_iam_policy_from_attr(s->cct, store, bucket_attrs,
1387 bucket_info.bucket.tenant);
1388 bucket_policy = &_bucket_policy;
1389 } else {
1390 bucket = s->bucket;
1391 pbucket_info = &s->bucket_info;
1392 bucket_acl = s->bucket_acl.get();
1393 bucket_policy = &s->iam_policy;
1394 }
1395
1396 /* dry run to find out:
1397 * - total length (of the parts we are going to send to client),
1398 * - overall DLO's content size,
1399 * - md5 sum of overall DLO's content (for etag of Swift API). */
1400 int r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1401 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1402 nullptr, &s->obj_size, &lo_etag,
1403 nullptr /* cb */, nullptr /* cb arg */);
1404 if (r < 0) {
1405 return r;
1406 }
1407
1408 r = RGWRados::Object::Read::range_to_ofs(s->obj_size, ofs, end);
1409 if (r < 0) {
1410 return r;
1411 }
1412
1413 r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1414 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1415 &total_len, nullptr, nullptr,
1416 nullptr, nullptr);
1417 if (r < 0) {
1418 return r;
1419 }
1420
1421 if (!get_data) {
1422 bufferlist bl;
1423 send_response_data(bl, 0, 0);
1424 return 0;
1425 }
1426
1427 r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1428 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1429 nullptr, nullptr, nullptr,
1430 get_obj_user_manifest_iterate_cb, (void *)this);
1431 if (r < 0) {
1432 return r;
1433 }
1434
1435 if (!total_len) {
1436 bufferlist bl;
1437 send_response_data(bl, 0, 0);
1438 }
1439
1440 return 0;
1441 }
1442
1443 int RGWGetObj::handle_slo_manifest(bufferlist& bl)
1444 {
1445 RGWSLOInfo slo_info;
1446 bufferlist::iterator bliter = bl.begin();
1447 try {
1448 ::decode(slo_info, bliter);
1449 } catch (buffer::error& err) {
1450 ldout(s->cct, 0) << "ERROR: failed to decode slo manifest" << dendl;
1451 return -EIO;
1452 }
1453 ldout(s->cct, 2) << "RGWGetObj::handle_slo_manifest()" << dendl;
1454
1455 vector<RGWAccessControlPolicy> allocated_acls;
1456 map<string, pair<RGWAccessControlPolicy *, optional<Policy>>> policies;
1457 map<string, rgw_bucket> buckets;
1458
1459 map<uint64_t, rgw_slo_part> slo_parts;
1460
1461 MD5 etag_sum;
1462 total_len = 0;
1463
1464 for (const auto& entry : slo_info.entries) {
1465 const string& path = entry.path;
1466
1467 /* If the path starts with slashes, strip them all. */
1468 const size_t pos_init = path.find_first_not_of('/');
1469 /* According to the documentation of std::string::find following check
1470 * is not necessary as we should get the std::string::npos propagation
1471 * here. This might be true with the accuracy to implementation's bugs.
1472 * See following question on SO:
1473 * http://stackoverflow.com/questions/1011790/why-does-stdstring-findtext-stdstringnpos-not-return-npos
1474 */
1475 if (pos_init == string::npos) {
1476 return -EINVAL;
1477 }
1478
1479 const size_t pos_sep = path.find('/', pos_init);
1480 if (pos_sep == string::npos) {
1481 return -EINVAL;
1482 }
1483
1484 string bucket_name = path.substr(pos_init, pos_sep - pos_init);
1485 string obj_name = path.substr(pos_sep + 1);
1486
1487 rgw_bucket bucket;
1488 RGWAccessControlPolicy *bucket_acl;
1489 Policy* bucket_policy;
1490
1491 if (bucket_name.compare(s->bucket.name) != 0) {
1492 const auto& piter = policies.find(bucket_name);
1493 if (piter != policies.end()) {
1494 bucket_acl = piter->second.first;
1495 bucket_policy = piter->second.second.get_ptr();
1496 bucket = buckets[bucket_name];
1497 } else {
1498 allocated_acls.push_back(RGWAccessControlPolicy(s->cct));
1499 RGWAccessControlPolicy& _bucket_acl = allocated_acls.back();
1500
1501 RGWBucketInfo bucket_info;
1502 map<string, bufferlist> bucket_attrs;
1503 RGWObjectCtx obj_ctx(store);
1504 int r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
1505 bucket_name, bucket_info, nullptr,
1506 &bucket_attrs);
1507 if (r < 0) {
1508 ldout(s->cct, 0) << "could not get bucket info for bucket="
1509 << bucket_name << dendl;
1510 return r;
1511 }
1512 bucket = bucket_info.bucket;
1513 bucket_acl = &_bucket_acl;
1514 r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl,
1515 bucket);
1516 if (r < 0) {
1517 ldout(s->cct, 0) << "failed to read bucket ACL for bucket "
1518 << bucket << dendl;
1519 return r;
1520 }
1521 auto _bucket_policy = get_iam_policy_from_attr(
1522 s->cct, store, bucket_attrs, bucket_info.bucket.tenant);
1523 bucket_policy = _bucket_policy.get_ptr();
1524 buckets[bucket_name] = bucket;
1525 policies[bucket_name] = make_pair(bucket_acl, _bucket_policy);
1526 }
1527 } else {
1528 bucket = s->bucket;
1529 bucket_acl = s->bucket_acl.get();
1530 bucket_policy = s->iam_policy.get_ptr();
1531 }
1532
1533 rgw_slo_part part;
1534 part.bucket_acl = bucket_acl;
1535 part.bucket_policy = bucket_policy;
1536 part.bucket = bucket;
1537 part.obj_name = obj_name;
1538 part.size = entry.size_bytes;
1539 part.etag = entry.etag;
1540 ldout(s->cct, 20) << "slo_part: ofs=" << ofs
1541 << " bucket=" << part.bucket
1542 << " obj=" << part.obj_name
1543 << " size=" << part.size
1544 << " etag=" << part.etag
1545 << dendl;
1546
1547 etag_sum.Update((const byte *)entry.etag.c_str(),
1548 entry.etag.length());
1549
1550 slo_parts[total_len] = part;
1551 total_len += part.size;
1552 }
1553
1554 complete_etag(etag_sum, &lo_etag);
1555
1556 s->obj_size = slo_info.total_size;
1557 ldout(s->cct, 20) << "s->obj_size=" << s->obj_size << dendl;
1558
1559 int r = RGWRados::Object::Read::range_to_ofs(total_len, ofs, end);
1560 if (r < 0) {
1561 return r;
1562 }
1563
1564 total_len = end - ofs + 1;
1565
1566 r = iterate_slo_parts(s->cct, store, ofs, end, slo_parts,
1567 get_obj_user_manifest_iterate_cb, (void *)this);
1568 if (r < 0) {
1569 return r;
1570 }
1571
1572 return 0;
1573 }
1574
1575 int RGWGetObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
1576 {
1577 /* garbage collection related handling */
1578 utime_t start_time = ceph_clock_now();
1579 if (start_time > gc_invalidate_time) {
1580 int r = store->defer_gc(s->obj_ctx, s->bucket_info, obj);
1581 if (r < 0) {
1582 dout(0) << "WARNING: could not defer gc entry for obj" << dendl;
1583 }
1584 gc_invalidate_time = start_time;
1585 gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2);
1586 }
1587 return send_response_data(bl, bl_ofs, bl_len);
1588 }
1589
1590 bool RGWGetObj::prefetch_data()
1591 {
1592 /* HEAD request, stop prefetch*/
1593 if (!get_data) {
1594 return false;
1595 }
1596
1597 bool prefetch_first_chunk = true;
1598 range_str = s->info.env->get("HTTP_RANGE");
1599
1600 if (range_str) {
1601 int r = parse_range();
1602 /* error on parsing the range, stop prefetch and will fail in execute() */
1603 if (r < 0) {
1604 return false; /* range_parsed==false */
1605 }
1606 /* range get goes to shadow objects, stop prefetch */
1607 if (ofs >= s->cct->_conf->rgw_max_chunk_size) {
1608 prefetch_first_chunk = false;
1609 }
1610 }
1611
1612 return get_data && prefetch_first_chunk;
1613 }
1614
1615 void RGWGetObj::pre_exec()
1616 {
1617 rgw_bucket_object_pre_exec(s);
1618 }
1619
1620 static bool object_is_expired(map<string, bufferlist>& attrs) {
1621 map<string, bufferlist>::iterator iter = attrs.find(RGW_ATTR_DELETE_AT);
1622 if (iter != attrs.end()) {
1623 utime_t delete_at;
1624 try {
1625 ::decode(delete_at, iter->second);
1626 } catch (buffer::error& err) {
1627 dout(0) << "ERROR: " << __func__ << ": failed to decode " RGW_ATTR_DELETE_AT " attr" << dendl;
1628 return false;
1629 }
1630
1631 if (delete_at <= ceph_clock_now() && !delete_at.is_zero()) {
1632 return true;
1633 }
1634 }
1635
1636 return false;
1637 }
1638
1639 void RGWGetObj::execute()
1640 {
1641 utime_t start_time = s->time;
1642 bufferlist bl;
1643 gc_invalidate_time = ceph_clock_now();
1644 gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2);
1645
1646 bool need_decompress;
1647 int64_t ofs_x, end_x;
1648
1649 RGWGetObj_CB cb(this);
1650 RGWGetDataCB* filter = (RGWGetDataCB*)&cb;
1651 boost::optional<RGWGetObj_Decompress> decompress;
1652 std::unique_ptr<RGWGetDataCB> decrypt;
1653 map<string, bufferlist>::iterator attr_iter;
1654
1655 perfcounter->inc(l_rgw_get);
1656
1657 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
1658 RGWRados::Object::Read read_op(&op_target);
1659
1660 op_ret = get_params();
1661 if (op_ret < 0)
1662 goto done_err;
1663
1664 op_ret = init_common();
1665 if (op_ret < 0)
1666 goto done_err;
1667
1668 read_op.conds.mod_ptr = mod_ptr;
1669 read_op.conds.unmod_ptr = unmod_ptr;
1670 read_op.conds.high_precision_time = s->system_request; /* system request need to use high precision time */
1671 read_op.conds.mod_zone_id = mod_zone_id;
1672 read_op.conds.mod_pg_ver = mod_pg_ver;
1673 read_op.conds.if_match = if_match;
1674 read_op.conds.if_nomatch = if_nomatch;
1675 read_op.params.attrs = &attrs;
1676 read_op.params.lastmod = &lastmod;
1677 read_op.params.obj_size = &s->obj_size;
1678
1679 op_ret = read_op.prepare();
1680 if (op_ret < 0)
1681 goto done_err;
1682 version_id = read_op.state.obj.key.instance;
1683
1684 /* STAT ops don't need data, and do no i/o */
1685 if (get_type() == RGW_OP_STAT_OBJ) {
1686 return;
1687 }
1688
1689 /* start gettorrent */
1690 if (torrent.get_flag())
1691 {
1692 attr_iter = attrs.find(RGW_ATTR_CRYPT_MODE);
1693 if (attr_iter != attrs.end() && attr_iter->second.to_str() == "SSE-C-AES256") {
1694 ldout(s->cct, 0) << "ERROR: torrents are not supported for objects "
1695 "encrypted with SSE-C" << dendl;
1696 op_ret = -EINVAL;
1697 goto done_err;
1698 }
1699 torrent.init(s, store);
1700 op_ret = torrent.get_torrent_file(read_op, total_len, bl, obj);
1701 if (op_ret < 0)
1702 {
1703 ldout(s->cct, 0) << "ERROR: failed to get_torrent_file ret= " << op_ret
1704 << dendl;
1705 goto done_err;
1706 }
1707 op_ret = send_response_data(bl, 0, total_len);
1708 if (op_ret < 0)
1709 {
1710 ldout(s->cct, 0) << "ERROR: failed to send_response_data ret= " << op_ret
1711 << dendl;
1712 goto done_err;
1713 }
1714 return;
1715 }
1716 /* end gettorrent */
1717
1718 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
1719 if (op_ret < 0) {
1720 lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
1721 goto done_err;
1722 }
1723 if (need_decompress) {
1724 s->obj_size = cs_info.orig_size;
1725 decompress.emplace(s->cct, &cs_info, partial_content, filter);
1726 filter = &*decompress;
1727 }
1728
1729 attr_iter = attrs.find(RGW_ATTR_USER_MANIFEST);
1730 if (attr_iter != attrs.end() && !skip_manifest) {
1731 op_ret = handle_user_manifest(attr_iter->second.c_str());
1732 if (op_ret < 0) {
1733 ldout(s->cct, 0) << "ERROR: failed to handle user manifest ret="
1734 << op_ret << dendl;
1735 goto done_err;
1736 }
1737 return;
1738 }
1739
1740 attr_iter = attrs.find(RGW_ATTR_SLO_MANIFEST);
1741 if (attr_iter != attrs.end() && !skip_manifest) {
1742 is_slo = true;
1743 op_ret = handle_slo_manifest(attr_iter->second);
1744 if (op_ret < 0) {
1745 ldout(s->cct, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret
1746 << dendl;
1747 goto done_err;
1748 }
1749 return;
1750 }
1751
1752 // for range requests with obj size 0
1753 if (range_str && !(s->obj_size)) {
1754 total_len = 0;
1755 op_ret = -ERANGE;
1756 goto done_err;
1757 }
1758
1759 op_ret = read_op.range_to_ofs(s->obj_size, ofs, end);
1760 if (op_ret < 0)
1761 goto done_err;
1762 total_len = (ofs <= end ? end + 1 - ofs : 0);
1763
1764 /* Check whether the object has expired. Swift API documentation
1765 * stands that we should return 404 Not Found in such case. */
1766 if (need_object_expiration() && object_is_expired(attrs)) {
1767 op_ret = -ENOENT;
1768 goto done_err;
1769 }
1770
1771 start = ofs;
1772
1773 /* STAT ops don't need data, and do no i/o */
1774 if (get_type() == RGW_OP_STAT_OBJ) {
1775 return;
1776 }
1777
1778 attr_iter = attrs.find(RGW_ATTR_MANIFEST);
1779 op_ret = this->get_decrypt_filter(&decrypt, filter,
1780 attr_iter != attrs.end() ? &(attr_iter->second) : nullptr);
1781 if (decrypt != nullptr) {
1782 filter = decrypt.get();
1783 }
1784 if (op_ret < 0) {
1785 goto done_err;
1786 }
1787
1788 if (!get_data || ofs > end) {
1789 send_response_data(bl, 0, 0);
1790 return;
1791 }
1792
1793 perfcounter->inc(l_rgw_get_b, end - ofs);
1794
1795 ofs_x = ofs;
1796 end_x = end;
1797 filter->fixup_range(ofs_x, end_x);
1798 op_ret = read_op.iterate(ofs_x, end_x, filter);
1799
1800 if (op_ret >= 0)
1801 op_ret = filter->flush();
1802
1803 perfcounter->tinc(l_rgw_get_lat,
1804 (ceph_clock_now() - start_time));
1805 if (op_ret < 0) {
1806 goto done_err;
1807 }
1808
1809 op_ret = send_response_data(bl, 0, 0);
1810 if (op_ret < 0) {
1811 goto done_err;
1812 }
1813 return;
1814
1815 done_err:
1816 send_response_data_error();
1817 }
1818
1819 int RGWGetObj::init_common()
1820 {
1821 if (range_str) {
1822 /* range parsed error when prefetch */
1823 if (!range_parsed) {
1824 int r = parse_range();
1825 if (r < 0)
1826 return r;
1827 }
1828 }
1829 if (if_mod) {
1830 if (parse_time(if_mod, &mod_time) < 0)
1831 return -EINVAL;
1832 mod_ptr = &mod_time;
1833 }
1834
1835 if (if_unmod) {
1836 if (parse_time(if_unmod, &unmod_time) < 0)
1837 return -EINVAL;
1838 unmod_ptr = &unmod_time;
1839 }
1840
1841 return 0;
1842 }
1843
1844 int RGWListBuckets::verify_permission()
1845 {
1846 if (!verify_user_permission(s, RGW_PERM_READ)) {
1847 return -EACCES;
1848 }
1849
1850 return 0;
1851 }
1852
1853 int RGWGetUsage::verify_permission()
1854 {
1855 if (s->auth.identity->is_anonymous()) {
1856 return -EACCES;
1857 }
1858
1859 return 0;
1860 }
1861
1862 void RGWListBuckets::execute()
1863 {
1864 bool done;
1865 bool started = false;
1866 uint64_t total_count = 0;
1867
1868 const uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk;
1869
1870 op_ret = get_params();
1871 if (op_ret < 0) {
1872 goto send_end;
1873 }
1874
1875 if (supports_account_metadata()) {
1876 op_ret = rgw_get_user_attrs_by_uid(store, s->user->user_id, attrs);
1877 if (op_ret < 0) {
1878 goto send_end;
1879 }
1880 }
1881
1882 is_truncated = false;
1883 do {
1884 RGWUserBuckets buckets;
1885 uint64_t read_count;
1886 if (limit >= 0) {
1887 read_count = min(limit - total_count, (uint64_t)max_buckets);
1888 } else {
1889 read_count = max_buckets;
1890 }
1891
1892 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
1893 marker, end_marker, read_count,
1894 should_get_stats(), &is_truncated,
1895 get_default_max());
1896 if (op_ret < 0) {
1897 /* hmm.. something wrong here.. the user was authenticated, so it
1898 should exist */
1899 ldout(s->cct, 10) << "WARNING: failed on rgw_get_user_buckets uid="
1900 << s->user->user_id << dendl;
1901 break;
1902 }
1903
1904 /* We need to have stats for all our policies - even if a given policy
1905 * isn't actually used in a given account. In such situation its usage
1906 * stats would be simply full of zeros. */
1907 for (const auto& policy : store->get_zonegroup().placement_targets) {
1908 policies_stats.emplace(policy.second.name,
1909 decltype(policies_stats)::mapped_type());
1910 }
1911
1912 std::map<std::string, RGWBucketEnt>& m = buckets.get_buckets();
1913 for (const auto& kv : m) {
1914 const auto& bucket = kv.second;
1915
1916 global_stats.bytes_used += bucket.size;
1917 global_stats.bytes_used_rounded += bucket.size_rounded;
1918 global_stats.objects_count += bucket.count;
1919
1920 /* operator[] still can create a new entry for storage policy seen
1921 * for first time. */
1922 auto& policy_stats = policies_stats[bucket.placement_rule];
1923 policy_stats.bytes_used += bucket.size;
1924 policy_stats.bytes_used_rounded += bucket.size_rounded;
1925 policy_stats.buckets_count++;
1926 policy_stats.objects_count += bucket.count;
1927 }
1928 global_stats.buckets_count += m.size();
1929 total_count += m.size();
1930
1931 done = (m.size() < read_count || (limit >= 0 && total_count >= (uint64_t)limit));
1932
1933 if (!started) {
1934 send_response_begin(buckets.count() > 0);
1935 started = true;
1936 }
1937
1938 if (!m.empty()) {
1939 map<string, RGWBucketEnt>::reverse_iterator riter = m.rbegin();
1940 marker = riter->first;
1941
1942 handle_listing_chunk(std::move(buckets));
1943 }
1944 } while (is_truncated && !done);
1945
1946 send_end:
1947 if (!started) {
1948 send_response_begin(false);
1949 }
1950 send_response_end();
1951 }
1952
1953 void RGWGetUsage::execute()
1954 {
1955 uint64_t start_epoch = 0;
1956 uint64_t end_epoch = (uint64_t)-1;
1957 op_ret = get_params();
1958 if (op_ret < 0)
1959 return;
1960
1961 if (!start_date.empty()) {
1962 op_ret = utime_t::parse_date(start_date, &start_epoch, NULL);
1963 if (op_ret < 0) {
1964 ldout(store->ctx(), 0) << "ERROR: failed to parse start date" << dendl;
1965 return;
1966 }
1967 }
1968
1969 if (!end_date.empty()) {
1970 op_ret = utime_t::parse_date(end_date, &end_epoch, NULL);
1971 if (op_ret < 0) {
1972 ldout(store->ctx(), 0) << "ERROR: failed to parse end date" << dendl;
1973 return;
1974 }
1975 }
1976
1977 uint32_t max_entries = 1000;
1978
1979 bool is_truncated = true;
1980
1981 RGWUsageIter usage_iter;
1982
1983 while (is_truncated) {
1984 op_ret = store->read_usage(s->user->user_id, start_epoch, end_epoch, max_entries,
1985 &is_truncated, usage_iter, usage);
1986
1987 if (op_ret == -ENOENT) {
1988 op_ret = 0;
1989 is_truncated = false;
1990 }
1991
1992 if (op_ret < 0) {
1993 return;
1994 }
1995 }
1996
1997 op_ret = rgw_user_sync_all_stats(store, s->user->user_id);
1998 if (op_ret < 0) {
1999 ldout(store->ctx(), 0) << "ERROR: failed to sync user stats: " << dendl;
2000 return;
2001 }
2002
2003 op_ret = rgw_user_get_all_buckets_stats(store, s->user->user_id, buckets_usage);
2004 if (op_ret < 0) {
2005 cerr << "ERROR: failed to sync user stats: " << std::endl;
2006 return ;
2007 }
2008
2009 string user_str = s->user->user_id.to_str();
2010 op_ret = store->cls_user_get_header(user_str, &header);
2011 if (op_ret < 0) {
2012 ldout(store->ctx(), 0) << "ERROR: can't read user header: " << dendl;
2013 return;
2014 }
2015
2016 return;
2017 }
2018
2019 int RGWStatAccount::verify_permission()
2020 {
2021 if (!verify_user_permission(s, RGW_PERM_READ)) {
2022 return -EACCES;
2023 }
2024
2025 return 0;
2026 }
2027
2028 void RGWStatAccount::execute()
2029 {
2030 string marker;
2031 bool is_truncated = false;
2032 uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk;
2033
2034 do {
2035 RGWUserBuckets buckets;
2036
2037 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets, marker,
2038 string(), max_buckets, true, &is_truncated);
2039 if (op_ret < 0) {
2040 /* hmm.. something wrong here.. the user was authenticated, so it
2041 should exist */
2042 ldout(s->cct, 10) << "WARNING: failed on rgw_get_user_buckets uid="
2043 << s->user->user_id << dendl;
2044 break;
2045 } else {
2046 /* We need to have stats for all our policies - even if a given policy
2047 * isn't actually used in a given account. In such situation its usage
2048 * stats would be simply full of zeros. */
2049 for (const auto& policy : store->get_zonegroup().placement_targets) {
2050 policies_stats.emplace(policy.second.name,
2051 decltype(policies_stats)::mapped_type());
2052 }
2053
2054 std::map<std::string, RGWBucketEnt>& m = buckets.get_buckets();
2055 for (const auto& kv : m) {
2056 const auto& bucket = kv.second;
2057
2058 global_stats.bytes_used += bucket.size;
2059 global_stats.bytes_used_rounded += bucket.size_rounded;
2060 global_stats.objects_count += bucket.count;
2061
2062 /* operator[] still can create a new entry for storage policy seen
2063 * for first time. */
2064 auto& policy_stats = policies_stats[bucket.placement_rule];
2065 policy_stats.bytes_used += bucket.size;
2066 policy_stats.bytes_used_rounded += bucket.size_rounded;
2067 policy_stats.buckets_count++;
2068 policy_stats.objects_count += bucket.count;
2069 }
2070 global_stats.buckets_count += m.size();
2071
2072 }
2073 } while (is_truncated);
2074 }
2075
2076 int RGWGetBucketVersioning::verify_permission()
2077 {
2078 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketVersioning);
2079 }
2080
2081 void RGWGetBucketVersioning::pre_exec()
2082 {
2083 rgw_bucket_object_pre_exec(s);
2084 }
2085
2086 void RGWGetBucketVersioning::execute()
2087 {
2088 versioned = s->bucket_info.versioned();
2089 versioning_enabled = s->bucket_info.versioning_enabled();
2090 }
2091
2092 int RGWSetBucketVersioning::verify_permission()
2093 {
2094 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketVersioning);
2095 }
2096
2097 void RGWSetBucketVersioning::pre_exec()
2098 {
2099 rgw_bucket_object_pre_exec(s);
2100 }
2101
2102 void RGWSetBucketVersioning::execute()
2103 {
2104 op_ret = get_params();
2105 if (op_ret < 0)
2106 return;
2107
2108 if (!store->is_meta_master()) {
2109 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
2110 if (op_ret < 0) {
2111 ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
2112 return;
2113 }
2114 }
2115
2116 op_ret = retry_raced_bucket_write(store, s, [this] {
2117 if (enable_versioning) {
2118 s->bucket_info.flags |= BUCKET_VERSIONED;
2119 s->bucket_info.flags &= ~BUCKET_VERSIONS_SUSPENDED;
2120 } else {
2121 s->bucket_info.flags |= (BUCKET_VERSIONED | BUCKET_VERSIONS_SUSPENDED);
2122 }
2123
2124 return store->put_bucket_instance_info(s->bucket_info, false, real_time(),
2125 &s->bucket_attrs);
2126 });
2127
2128 if (op_ret < 0) {
2129 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
2130 << " returned err=" << op_ret << dendl;
2131 return;
2132 }
2133 }
2134
2135 int RGWGetBucketWebsite::verify_permission()
2136 {
2137 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketWebsite);
2138 }
2139
2140 void RGWGetBucketWebsite::pre_exec()
2141 {
2142 rgw_bucket_object_pre_exec(s);
2143 }
2144
2145 void RGWGetBucketWebsite::execute()
2146 {
2147 if (!s->bucket_info.has_website) {
2148 op_ret = -ERR_NO_SUCH_WEBSITE_CONFIGURATION;
2149 }
2150 }
2151
2152 int RGWSetBucketWebsite::verify_permission()
2153 {
2154 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketWebsite);
2155 }
2156
2157 void RGWSetBucketWebsite::pre_exec()
2158 {
2159 rgw_bucket_object_pre_exec(s);
2160 }
2161
2162 void RGWSetBucketWebsite::execute()
2163 {
2164 op_ret = get_params();
2165
2166 if (op_ret < 0)
2167 return;
2168
2169 if (!store->is_meta_master()) {
2170 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
2171 if (op_ret < 0) {
2172 ldout(s->cct, 20) << __func__ << " forward_request_to_master returned ret=" << op_ret << dendl;
2173 return;
2174 }
2175 }
2176
2177 op_ret = retry_raced_bucket_write(store, s, [this] {
2178 s->bucket_info.has_website = true;
2179 s->bucket_info.website_conf = website_conf;
2180 op_ret = store->put_bucket_instance_info(s->bucket_info, false,
2181 real_time(), &s->bucket_attrs);
2182 return op_ret;
2183 });
2184
2185 if (op_ret < 0) {
2186 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
2187 return;
2188 }
2189 }
2190
2191 int RGWDeleteBucketWebsite::verify_permission()
2192 {
2193 return verify_bucket_owner_or_policy(s, rgw::IAM::s3DeleteBucketWebsite);
2194 }
2195
2196 void RGWDeleteBucketWebsite::pre_exec()
2197 {
2198 rgw_bucket_object_pre_exec(s);
2199 }
2200
2201 void RGWDeleteBucketWebsite::execute()
2202 {
2203 op_ret = retry_raced_bucket_write(store, s, [this] {
2204 s->bucket_info.has_website = false;
2205 s->bucket_info.website_conf = RGWBucketWebsiteConf();
2206 op_ret = store->put_bucket_instance_info(s->bucket_info, false,
2207 real_time(), &s->bucket_attrs);
2208 return op_ret;
2209 });
2210 if (op_ret < 0) {
2211 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
2212 return;
2213 }
2214 }
2215
2216 int RGWStatBucket::verify_permission()
2217 {
2218 // This (a HEAD request on a bucket) is governed by the s3:ListBucket permission.
2219 if (!verify_bucket_permission(s, rgw::IAM::s3ListBucket)) {
2220 return -EACCES;
2221 }
2222
2223 return 0;
2224 }
2225
2226 void RGWStatBucket::pre_exec()
2227 {
2228 rgw_bucket_object_pre_exec(s);
2229 }
2230
2231 void RGWStatBucket::execute()
2232 {
2233 if (!s->bucket_exists) {
2234 op_ret = -ERR_NO_SUCH_BUCKET;
2235 return;
2236 }
2237
2238 RGWUserBuckets buckets;
2239 bucket.bucket = s->bucket;
2240 buckets.add(bucket);
2241 map<string, RGWBucketEnt>& m = buckets.get_buckets();
2242 op_ret = store->update_containers_stats(m);
2243 if (! op_ret)
2244 op_ret = -EEXIST;
2245 if (op_ret > 0) {
2246 op_ret = 0;
2247 map<string, RGWBucketEnt>::iterator iter = m.find(bucket.bucket.name);
2248 if (iter != m.end()) {
2249 bucket = iter->second;
2250 } else {
2251 op_ret = -EINVAL;
2252 }
2253 }
2254 }
2255
2256 int RGWListBucket::verify_permission()
2257 {
2258 op_ret = get_params();
2259 if (op_ret < 0) {
2260 return op_ret;
2261 }
2262 if (!prefix.empty())
2263 s->env.emplace("s3:prefix", prefix);
2264
2265 if (!delimiter.empty())
2266 s->env.emplace("s3:delimiter", delimiter);
2267
2268 s->env.emplace("s3:max-keys", std::to_string(max));
2269
2270 if (!verify_bucket_permission(s,
2271 list_versions ?
2272 rgw::IAM::s3ListBucketVersions :
2273 rgw::IAM::s3ListBucket)) {
2274 return -EACCES;
2275 }
2276
2277 return 0;
2278 }
2279
2280 int RGWListBucket::parse_max_keys()
2281 {
2282 // Bound max value of max-keys to configured value for security
2283 // Bound min value of max-keys to '0'
2284 // Some S3 clients explicitly send max-keys=0 to detect if the bucket is
2285 // empty without listing any items.
2286 return parse_value_and_bound(max_keys, max, 0,
2287 s->cct->_conf->get_val<uint64_t>("rgw_max_listing_results"),
2288 default_max);
2289 }
2290
2291 void RGWListBucket::pre_exec()
2292 {
2293 rgw_bucket_object_pre_exec(s);
2294 }
2295
2296 void RGWListBucket::execute()
2297 {
2298 if (!s->bucket_exists) {
2299 op_ret = -ERR_NO_SUCH_BUCKET;
2300 return;
2301 }
2302
2303 if (allow_unordered && !delimiter.empty()) {
2304 ldout(s->cct, 0) <<
2305 "ERROR: unordered bucket listing requested with a delimiter" << dendl;
2306 op_ret = -EINVAL;
2307 return;
2308 }
2309
2310 if (need_container_stats()) {
2311 map<string, RGWBucketEnt> m;
2312 m[s->bucket.name] = RGWBucketEnt();
2313 m.begin()->second.bucket = s->bucket;
2314 op_ret = store->update_containers_stats(m);
2315 if (op_ret > 0) {
2316 bucket = m.begin()->second;
2317 }
2318 }
2319
2320 RGWRados::Bucket target(store, s->bucket_info);
2321 if (shard_id >= 0) {
2322 target.set_shard_id(shard_id);
2323 }
2324 RGWRados::Bucket::List list_op(&target);
2325
2326 list_op.params.prefix = prefix;
2327 list_op.params.delim = delimiter;
2328 list_op.params.marker = marker;
2329 list_op.params.end_marker = end_marker;
2330 list_op.params.list_versions = list_versions;
2331 list_op.params.allow_unordered = allow_unordered;
2332
2333 op_ret = list_op.list_objects(max, &objs, &common_prefixes, &is_truncated);
2334 if (op_ret >= 0) {
2335 next_marker = list_op.get_next_marker();
2336 }
2337 }
2338
2339 int RGWGetBucketLogging::verify_permission()
2340 {
2341 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketLogging);
2342 }
2343
2344 int RGWGetBucketLocation::verify_permission()
2345 {
2346 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketLocation);
2347 }
2348
2349 int RGWCreateBucket::verify_permission()
2350 {
2351 /* This check is mostly needed for S3 that doesn't support account ACL.
2352 * Swift doesn't allow to delegate any permission to an anonymous user,
2353 * so it will become an early exit in such case. */
2354 if (s->auth.identity->is_anonymous()) {
2355 return -EACCES;
2356 }
2357
2358 if (!verify_user_permission(s, RGW_PERM_WRITE)) {
2359 return -EACCES;
2360 }
2361
2362 if (s->user->user_id.tenant != s->bucket_tenant) {
2363 ldout(s->cct, 10) << "user cannot create a bucket in a different tenant"
2364 << " (user_id.tenant=" << s->user->user_id.tenant
2365 << " requested=" << s->bucket_tenant << ")"
2366 << dendl;
2367 return -EACCES;
2368 }
2369 if (s->user->max_buckets < 0) {
2370 return -EPERM;
2371 }
2372
2373 if (s->user->max_buckets) {
2374 RGWUserBuckets buckets;
2375 string marker;
2376 bool is_truncated = false;
2377 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
2378 marker, string(), s->user->max_buckets,
2379 false, &is_truncated);
2380 if (op_ret < 0) {
2381 return op_ret;
2382 }
2383
2384 if ((int)buckets.count() >= s->user->max_buckets) {
2385 return -ERR_TOO_MANY_BUCKETS;
2386 }
2387 }
2388
2389 return 0;
2390 }
2391
2392 static int forward_request_to_master(struct req_state *s, obj_version *objv,
2393 RGWRados *store, bufferlist& in_data,
2394 JSONParser *jp, req_info *forward_info)
2395 {
2396 if (!store->rest_master_conn) {
2397 ldout(s->cct, 0) << "rest connection is invalid" << dendl;
2398 return -EINVAL;
2399 }
2400 ldout(s->cct, 0) << "sending request to master zonegroup" << dendl;
2401 bufferlist response;
2402 string uid_str = s->user->user_id.to_str();
2403 #define MAX_REST_RESPONSE (128 * 1024) // we expect a very small response
2404 int ret = store->rest_master_conn->forward(uid_str, (forward_info ? *forward_info : s->info),
2405 objv, MAX_REST_RESPONSE, &in_data, &response);
2406 if (ret < 0)
2407 return ret;
2408
2409 ldout(s->cct, 20) << "response: " << response.c_str() << dendl;
2410 if (jp && !jp->parse(response.c_str(), response.length())) {
2411 ldout(s->cct, 0) << "failed parsing response from master zonegroup" << dendl;
2412 return -EINVAL;
2413 }
2414
2415 return 0;
2416 }
2417
2418 void RGWCreateBucket::pre_exec()
2419 {
2420 rgw_bucket_object_pre_exec(s);
2421 }
2422
2423 static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs,
2424 map<string, bufferlist>& out_attrs,
2425 map<string, bufferlist>& out_rmattrs)
2426 {
2427 for (const auto& kv : orig_attrs) {
2428 const string& name = kv.first;
2429
2430 /* Check if the attr is user-defined metadata item. */
2431 if (name.compare(0, sizeof(RGW_ATTR_META_PREFIX) - 1,
2432 RGW_ATTR_META_PREFIX) == 0) {
2433 /* For the objects all existing meta attrs have to be removed. */
2434 out_rmattrs[name] = kv.second;
2435 } else if (out_attrs.find(name) == std::end(out_attrs)) {
2436 out_attrs[name] = kv.second;
2437 }
2438 }
2439 }
2440
2441 /* Fuse resource metadata basing on original attributes in @orig_attrs, set
2442 * of _custom_ attribute names to remove in @rmattr_names and attributes in
2443 * @out_attrs. Place results in @out_attrs.
2444 *
2445 * NOTE: it's supposed that all special attrs already present in @out_attrs
2446 * will be preserved without any change. Special attributes are those which
2447 * names start with RGW_ATTR_META_PREFIX. They're complement to custom ones
2448 * used for X-Account-Meta-*, X-Container-Meta-*, X-Amz-Meta and so on. */
2449 static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs,
2450 const set<string>& rmattr_names,
2451 map<string, bufferlist>& out_attrs)
2452 {
2453 for (const auto& kv : orig_attrs) {
2454 const string& name = kv.first;
2455
2456 /* Check if the attr is user-defined metadata item. */
2457 if (name.compare(0, strlen(RGW_ATTR_META_PREFIX),
2458 RGW_ATTR_META_PREFIX) == 0) {
2459 /* For the buckets all existing meta attrs are preserved,
2460 except those that are listed in rmattr_names. */
2461 if (rmattr_names.find(name) != std::end(rmattr_names)) {
2462 const auto aiter = out_attrs.find(name);
2463
2464 if (aiter != std::end(out_attrs)) {
2465 out_attrs.erase(aiter);
2466 }
2467 } else {
2468 /* emplace() won't alter the map if the key is already present.
2469 * This behaviour is fully intensional here. */
2470 out_attrs.emplace(kv);
2471 }
2472 } else if (out_attrs.find(name) == std::end(out_attrs)) {
2473 out_attrs[name] = kv.second;
2474 }
2475 }
2476 }
2477
2478
2479 static void populate_with_generic_attrs(const req_state * const s,
2480 map<string, bufferlist>& out_attrs)
2481 {
2482 for (const auto& kv : s->generic_attrs) {
2483 bufferlist& attrbl = out_attrs[kv.first];
2484 const string& val = kv.second;
2485 attrbl.clear();
2486 attrbl.append(val.c_str(), val.size() + 1);
2487 }
2488 }
2489
2490
2491 static int filter_out_quota_info(std::map<std::string, bufferlist>& add_attrs,
2492 const std::set<std::string>& rmattr_names,
2493 RGWQuotaInfo& quota,
2494 bool * quota_extracted = nullptr)
2495 {
2496 bool extracted = false;
2497
2498 /* Put new limit on max objects. */
2499 auto iter = add_attrs.find(RGW_ATTR_QUOTA_NOBJS);
2500 std::string err;
2501 if (std::end(add_attrs) != iter) {
2502 quota.max_objects =
2503 static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err));
2504 if (!err.empty()) {
2505 return -EINVAL;
2506 }
2507 add_attrs.erase(iter);
2508 extracted = true;
2509 }
2510
2511 /* Put new limit on bucket (container) size. */
2512 iter = add_attrs.find(RGW_ATTR_QUOTA_MSIZE);
2513 if (iter != add_attrs.end()) {
2514 quota.max_size =
2515 static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err));
2516 if (!err.empty()) {
2517 return -EINVAL;
2518 }
2519 add_attrs.erase(iter);
2520 extracted = true;
2521 }
2522
2523 for (const auto& name : rmattr_names) {
2524 /* Remove limit on max objects. */
2525 if (name.compare(RGW_ATTR_QUOTA_NOBJS) == 0) {
2526 quota.max_objects = -1;
2527 extracted = true;
2528 }
2529
2530 /* Remove limit on max bucket size. */
2531 if (name.compare(RGW_ATTR_QUOTA_MSIZE) == 0) {
2532 quota.max_size = -1;
2533 extracted = true;
2534 }
2535 }
2536
2537 /* Swift requries checking on raw usage instead of the 4 KiB rounded one. */
2538 quota.check_on_raw = true;
2539 quota.enabled = quota.max_size > 0 || quota.max_objects > 0;
2540
2541 if (quota_extracted) {
2542 *quota_extracted = extracted;
2543 }
2544
2545 return 0;
2546 }
2547
2548
2549 static void filter_out_website(std::map<std::string, ceph::bufferlist>& add_attrs,
2550 const std::set<std::string>& rmattr_names,
2551 RGWBucketWebsiteConf& ws_conf)
2552 {
2553 std::string lstval;
2554
2555 /* Let's define a mapping between each custom attribute and the memory where
2556 * attribute's value should be stored. The memory location is expressed by
2557 * a non-const reference. */
2558 const auto mapping = {
2559 std::make_pair(RGW_ATTR_WEB_INDEX, std::ref(ws_conf.index_doc_suffix)),
2560 std::make_pair(RGW_ATTR_WEB_ERROR, std::ref(ws_conf.error_doc)),
2561 std::make_pair(RGW_ATTR_WEB_LISTINGS, std::ref(lstval)),
2562 std::make_pair(RGW_ATTR_WEB_LIST_CSS, std::ref(ws_conf.listing_css_doc)),
2563 std::make_pair(RGW_ATTR_SUBDIR_MARKER, std::ref(ws_conf.subdir_marker))
2564 };
2565
2566 for (const auto& kv : mapping) {
2567 const char * const key = kv.first;
2568 auto& target = kv.second;
2569
2570 auto iter = add_attrs.find(key);
2571
2572 if (std::end(add_attrs) != iter) {
2573 /* The "target" is a reference to ws_conf. */
2574 target = iter->second.c_str();
2575 add_attrs.erase(iter);
2576 }
2577
2578 if (rmattr_names.count(key)) {
2579 target = std::string();
2580 }
2581 }
2582
2583 if (! lstval.empty()) {
2584 ws_conf.listing_enabled = boost::algorithm::iequals(lstval, "true");
2585 }
2586 }
2587
2588
2589 void RGWCreateBucket::execute()
2590 {
2591 RGWAccessControlPolicy old_policy(s->cct);
2592 buffer::list aclbl;
2593 buffer::list corsbl;
2594 bool existed;
2595 string bucket_name;
2596 rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_name);
2597 rgw_raw_obj obj(store->get_zone_params().domain_root, bucket_name);
2598 obj_version objv, *pobjv = NULL;
2599
2600 op_ret = get_params();
2601 if (op_ret < 0)
2602 return;
2603
2604 if (!location_constraint.empty() &&
2605 !store->has_zonegroup_api(location_constraint)) {
2606 ldout(s->cct, 0) << "location constraint (" << location_constraint << ")"
2607 << " can't be found." << dendl;
2608 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
2609 s->err.message = "The specified location-constraint is not valid";
2610 return;
2611 }
2612
2613 if (!store->get_zonegroup().is_master_zonegroup() && !location_constraint.empty() &&
2614 store->get_zonegroup().api_name != location_constraint) {
2615 ldout(s->cct, 0) << "location constraint (" << location_constraint << ")"
2616 << " doesn't match zonegroup" << " (" << store->get_zonegroup().api_name << ")"
2617 << dendl;
2618 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
2619 s->err.message = "The specified location-constraint is not valid";
2620 return;
2621 }
2622
2623 const auto& zonegroup = store->get_zonegroup();
2624 if (!placement_rule.empty() &&
2625 !zonegroup.placement_targets.count(placement_rule)) {
2626 ldout(s->cct, 0) << "placement target (" << placement_rule << ")"
2627 << " doesn't exist in the placement targets of zonegroup"
2628 << " (" << store->get_zonegroup().api_name << ")" << dendl;
2629 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
2630 s->err.message = "The specified placement target does not exist";
2631 return;
2632 }
2633
2634 /* we need to make sure we read bucket info, it's not read before for this
2635 * specific request */
2636 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
2637 op_ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name,
2638 s->bucket_info, NULL, &s->bucket_attrs);
2639 if (op_ret < 0 && op_ret != -ENOENT)
2640 return;
2641 s->bucket_exists = (op_ret != -ENOENT);
2642
2643 s->bucket_owner.set_id(s->user->user_id);
2644 s->bucket_owner.set_name(s->user->display_name);
2645 if (s->bucket_exists) {
2646 int r = get_bucket_policy_from_attr(s->cct, store, s->bucket_info,
2647 s->bucket_attrs, &old_policy);
2648 if (r >= 0) {
2649 if (old_policy.get_owner().get_id().compare(s->user->user_id) != 0) {
2650 op_ret = -EEXIST;
2651 return;
2652 }
2653 }
2654 }
2655
2656 RGWBucketInfo master_info;
2657 rgw_bucket *pmaster_bucket;
2658 uint32_t *pmaster_num_shards;
2659 real_time creation_time;
2660
2661 if (!store->is_meta_master()) {
2662 JSONParser jp;
2663 op_ret = forward_request_to_master(s, NULL, store, in_data, &jp);
2664 if (op_ret < 0) {
2665 return;
2666 }
2667
2668 JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp);
2669 JSONDecoder::decode_json("object_ver", objv, &jp);
2670 JSONDecoder::decode_json("bucket_info", master_info, &jp);
2671 ldout(s->cct, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" << objv.ver << dendl;
2672 ldout(s->cct, 20) << "got creation time: << " << master_info.creation_time << dendl;
2673 pmaster_bucket= &master_info.bucket;
2674 creation_time = master_info.creation_time;
2675 pmaster_num_shards = &master_info.num_shards;
2676 pobjv = &objv;
2677 } else {
2678 pmaster_bucket = NULL;
2679 pmaster_num_shards = NULL;
2680 }
2681
2682 string zonegroup_id;
2683
2684 if (s->system_request) {
2685 zonegroup_id = s->info.args.get(RGW_SYS_PARAM_PREFIX "zonegroup");
2686 if (zonegroup_id.empty()) {
2687 zonegroup_id = store->get_zonegroup().get_id();
2688 }
2689 } else {
2690 zonegroup_id = store->get_zonegroup().get_id();
2691 }
2692
2693 if (s->bucket_exists) {
2694 string selected_placement_rule;
2695 rgw_bucket bucket;
2696 bucket.tenant = s->bucket_tenant;
2697 bucket.name = s->bucket_name;
2698 op_ret = store->select_bucket_placement(*(s->user), zonegroup_id,
2699 placement_rule,
2700 &selected_placement_rule, nullptr);
2701 if (selected_placement_rule != s->bucket_info.placement_rule) {
2702 op_ret = -EEXIST;
2703 return;
2704 }
2705 }
2706
2707 /* Encode special metadata first as we're using std::map::emplace under
2708 * the hood. This method will add the new items only if the map doesn't
2709 * contain such keys yet. */
2710 policy.encode(aclbl);
2711 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
2712
2713 if (has_cors) {
2714 cors_config.encode(corsbl);
2715 emplace_attr(RGW_ATTR_CORS, std::move(corsbl));
2716 }
2717
2718 RGWQuotaInfo quota_info;
2719 const RGWQuotaInfo * pquota_info = nullptr;
2720 if (need_metadata_upload()) {
2721 /* It's supposed that following functions WILL NOT change any special
2722 * attributes (like RGW_ATTR_ACL) if they are already present in attrs. */
2723 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
2724 if (op_ret < 0) {
2725 return;
2726 }
2727 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
2728 populate_with_generic_attrs(s, attrs);
2729
2730 op_ret = filter_out_quota_info(attrs, rmattr_names, quota_info);
2731 if (op_ret < 0) {
2732 return;
2733 } else {
2734 pquota_info = &quota_info;
2735 }
2736
2737 /* Web site of Swift API. */
2738 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
2739 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
2740 }
2741
2742 s->bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */
2743 s->bucket.name = s->bucket_name;
2744
2745 /* Handle updates of the metadata for Swift's object versioning. */
2746 if (swift_ver_location) {
2747 s->bucket_info.swift_ver_location = *swift_ver_location;
2748 s->bucket_info.swift_versioning = (! swift_ver_location->empty());
2749 }
2750
2751 op_ret = store->create_bucket(*(s->user), s->bucket, zonegroup_id,
2752 placement_rule, s->bucket_info.swift_ver_location,
2753 pquota_info, attrs,
2754 info, pobjv, &ep_objv, creation_time,
2755 pmaster_bucket, pmaster_num_shards, true);
2756 /* continue if EEXIST and create_bucket will fail below. this way we can
2757 * recover from a partial create by retrying it. */
2758 ldout(s->cct, 20) << "rgw_create_bucket returned ret=" << op_ret << " bucket=" << s->bucket << dendl;
2759
2760 if (op_ret && op_ret != -EEXIST)
2761 return;
2762
2763 existed = (op_ret == -EEXIST);
2764
2765 if (existed) {
2766 /* bucket already existed, might have raced with another bucket creation, or
2767 * might be partial bucket creation that never completed. Read existing bucket
2768 * info, verify that the reported bucket owner is the current user.
2769 * If all is ok then update the user's list of buckets.
2770 * Otherwise inform client about a name conflict.
2771 */
2772 if (info.owner.compare(s->user->user_id) != 0) {
2773 op_ret = -EEXIST;
2774 return;
2775 }
2776 s->bucket = info.bucket;
2777 }
2778
2779 op_ret = rgw_link_bucket(store, s->user->user_id, s->bucket,
2780 info.creation_time, false);
2781 if (op_ret && !existed && op_ret != -EEXIST) {
2782 /* if it exists (or previously existed), don't remove it! */
2783 op_ret = rgw_unlink_bucket(store, s->user->user_id, s->bucket.tenant,
2784 s->bucket.name);
2785 if (op_ret < 0) {
2786 ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
2787 << dendl;
2788 }
2789 } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) {
2790 op_ret = -ERR_BUCKET_EXISTS;
2791 }
2792
2793 if (need_metadata_upload() && existed) {
2794 /* OK, it looks we lost race with another request. As it's required to
2795 * handle metadata fusion and upload, the whole operation becomes very
2796 * similar in nature to PutMetadataBucket. However, as the attrs may
2797 * changed in the meantime, we have to refresh. */
2798 short tries = 0;
2799 do {
2800 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
2801 RGWBucketInfo binfo;
2802 map<string, bufferlist> battrs;
2803
2804 op_ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name,
2805 binfo, nullptr, &battrs);
2806 if (op_ret < 0) {
2807 return;
2808 } else if (binfo.owner.compare(s->user->user_id) != 0) {
2809 /* New bucket doesn't belong to the account we're operating on. */
2810 op_ret = -EEXIST;
2811 return;
2812 } else {
2813 s->bucket_info = binfo;
2814 s->bucket_attrs = battrs;
2815 }
2816
2817 attrs.clear();
2818
2819 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
2820 if (op_ret < 0) {
2821 return;
2822 }
2823 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
2824 populate_with_generic_attrs(s, attrs);
2825 op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket_info.quota);
2826 if (op_ret < 0) {
2827 return;
2828 }
2829
2830 /* Handle updates of the metadata for Swift's object versioning. */
2831 if (swift_ver_location) {
2832 s->bucket_info.swift_ver_location = *swift_ver_location;
2833 s->bucket_info.swift_versioning = (! swift_ver_location->empty());
2834 }
2835
2836 /* Web site of Swift API. */
2837 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
2838 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
2839
2840 /* This will also set the quota on the bucket. */
2841 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
2842 &s->bucket_info.objv_tracker);
2843 } while (op_ret == -ECANCELED && tries++ < 20);
2844
2845 /* Restore the proper return code. */
2846 if (op_ret >= 0) {
2847 op_ret = -ERR_BUCKET_EXISTS;
2848 }
2849 }
2850 }
2851
2852 int RGWDeleteBucket::verify_permission()
2853 {
2854 if (!verify_bucket_permission(s, rgw::IAM::s3DeleteBucket)) {
2855 return -EACCES;
2856 }
2857
2858 return 0;
2859 }
2860
2861 void RGWDeleteBucket::pre_exec()
2862 {
2863 rgw_bucket_object_pre_exec(s);
2864 }
2865
2866 void RGWDeleteBucket::execute()
2867 {
2868 op_ret = -EINVAL;
2869
2870 if (s->bucket_name.empty())
2871 return;
2872
2873 if (!s->bucket_exists) {
2874 ldout(s->cct, 0) << "ERROR: bucket " << s->bucket_name << " not found" << dendl;
2875 op_ret = -ERR_NO_SUCH_BUCKET;
2876 return;
2877 }
2878 RGWObjVersionTracker ot;
2879 ot.read_version = s->bucket_info.ep_objv;
2880
2881 if (s->system_request) {
2882 string tag = s->info.args.get(RGW_SYS_PARAM_PREFIX "tag");
2883 string ver_str = s->info.args.get(RGW_SYS_PARAM_PREFIX "ver");
2884 if (!tag.empty()) {
2885 ot.read_version.tag = tag;
2886 uint64_t ver;
2887 string err;
2888 ver = strict_strtol(ver_str.c_str(), 10, &err);
2889 if (!err.empty()) {
2890 ldout(s->cct, 0) << "failed to parse ver param" << dendl;
2891 op_ret = -EINVAL;
2892 return;
2893 }
2894 ot.read_version.ver = ver;
2895 }
2896 }
2897
2898 op_ret = rgw_bucket_sync_user_stats(store, s->user->user_id, s->bucket_info);
2899 if ( op_ret < 0) {
2900 ldout(s->cct, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl;
2901 }
2902
2903 op_ret = store->check_bucket_empty(s->bucket_info);
2904 if (op_ret < 0) {
2905 return;
2906 }
2907
2908 if (!store->is_meta_master()) {
2909 bufferlist in_data;
2910 op_ret = forward_request_to_master(s, &ot.read_version, store, in_data,
2911 NULL);
2912 if (op_ret < 0) {
2913 if (op_ret == -ENOENT) {
2914 /* adjust error, we want to return with NoSuchBucket and not
2915 * NoSuchKey */
2916 op_ret = -ERR_NO_SUCH_BUCKET;
2917 }
2918 return;
2919 }
2920 }
2921
2922 string prefix, delimiter;
2923
2924 if (s->prot_flags & RGW_REST_SWIFT) {
2925 string path_args;
2926 path_args = s->info.args.get("path");
2927 if (!path_args.empty()) {
2928 if (!delimiter.empty() || !prefix.empty()) {
2929 op_ret = -EINVAL;
2930 return;
2931 }
2932 prefix = path_args;
2933 delimiter="/";
2934 }
2935 }
2936
2937 op_ret = abort_bucket_multiparts(store, s->cct, s->bucket_info, prefix, delimiter);
2938
2939 if (op_ret < 0) {
2940 return;
2941 }
2942
2943 op_ret = store->delete_bucket(s->bucket_info, ot, false);
2944
2945 if (op_ret == -ECANCELED) {
2946 // lost a race, either with mdlog sync or another delete bucket operation.
2947 // in either case, we've already called rgw_unlink_bucket()
2948 op_ret = 0;
2949 return;
2950 }
2951
2952 if (op_ret == 0) {
2953 op_ret = rgw_unlink_bucket(store, s->bucket_info.owner, s->bucket.tenant,
2954 s->bucket.name, false);
2955 if (op_ret < 0) {
2956 ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
2957 << dendl;
2958 }
2959 }
2960
2961 if (op_ret < 0) {
2962 return;
2963 }
2964
2965
2966 }
2967
2968 int RGWPutObj::verify_permission()
2969 {
2970 if (! copy_source.empty()) {
2971
2972 RGWAccessControlPolicy cs_acl(s->cct);
2973 optional<Policy> policy;
2974 map<string, bufferlist> cs_attrs;
2975 rgw_bucket cs_bucket(copy_source_bucket_info.bucket);
2976 rgw_obj_key cs_object(copy_source_object_name, copy_source_version_id);
2977
2978 rgw_obj obj(cs_bucket, cs_object);
2979 store->set_atomic(s->obj_ctx, obj);
2980 store->set_prefetch_data(s->obj_ctx, obj);
2981
2982 /* check source object permissions */
2983 if (read_obj_policy(store, s, copy_source_bucket_info, cs_attrs, &cs_acl,
2984 policy, cs_bucket, cs_object) < 0) {
2985 return -EACCES;
2986 }
2987
2988 /* admin request overrides permission checks */
2989 if (! s->auth.identity->is_admin_of(cs_acl.get_owner().get_id())) {
2990 if (policy) {
2991 auto e = policy->eval(s->env, *s->auth.identity,
2992 cs_object.instance.empty() ?
2993 rgw::IAM::s3GetObject :
2994 rgw::IAM::s3GetObjectVersion,
2995 rgw::IAM::ARN(obj));
2996 if (e == Effect::Deny) {
2997 return -EACCES;
2998 } else if (e == Effect::Pass &&
2999 !cs_acl.verify_permission(*s->auth.identity, s->perm_mask,
3000 RGW_PERM_READ)) {
3001 return -EACCES;
3002 }
3003 } else if (!cs_acl.verify_permission(*s->auth.identity, s->perm_mask,
3004 RGW_PERM_READ)) {
3005 return -EACCES;
3006 }
3007 }
3008 }
3009
3010 if (s->iam_policy) {
3011 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
3012 rgw::IAM::s3PutObject,
3013 rgw_obj(s->bucket, s->object));
3014 if (e == Effect::Allow) {
3015 return 0;
3016 } else if (e == Effect::Deny) {
3017 return -EACCES;
3018 }
3019 }
3020
3021 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
3022 return -EACCES;
3023 }
3024
3025 return 0;
3026 }
3027
3028 void RGWPutObjProcessor_Multipart::get_mp(RGWMPObj** _mp){
3029 *_mp = &mp;
3030 }
3031
3032 int RGWPutObjProcessor_Multipart::prepare(RGWRados *store, string *oid_rand)
3033 {
3034 string oid = obj_str;
3035 upload_id = s->info.args.get("uploadId");
3036 if (!oid_rand) {
3037 mp.init(oid, upload_id);
3038 } else {
3039 mp.init(oid, upload_id, *oid_rand);
3040 }
3041
3042 part_num = s->info.args.get("partNumber");
3043 if (part_num.empty()) {
3044 ldout(s->cct, 10) << "part number is empty" << dendl;
3045 return -EINVAL;
3046 }
3047
3048 string err;
3049 uint64_t num = (uint64_t)strict_strtol(part_num.c_str(), 10, &err);
3050
3051 if (!err.empty()) {
3052 ldout(s->cct, 10) << "bad part number: " << part_num << ": " << err << dendl;
3053 return -EINVAL;
3054 }
3055
3056 string upload_prefix = oid + ".";
3057
3058 if (!oid_rand) {
3059 upload_prefix.append(upload_id);
3060 } else {
3061 upload_prefix.append(*oid_rand);
3062 }
3063
3064 rgw_obj target_obj;
3065 target_obj.init(bucket, oid);
3066
3067 manifest.set_prefix(upload_prefix);
3068
3069 manifest.set_multipart_part_rule(store->ctx()->_conf->rgw_obj_stripe_size, num);
3070
3071 int r = manifest_gen.create_begin(store->ctx(), &manifest, s->bucket_info.placement_rule, bucket, target_obj);
3072 if (r < 0) {
3073 return r;
3074 }
3075
3076 cur_obj = manifest_gen.get_cur_obj(store);
3077 rgw_raw_obj_to_obj(bucket, cur_obj, &head_obj);
3078 head_obj.index_hash_source = obj_str;
3079
3080 r = prepare_init(store, NULL);
3081 if (r < 0) {
3082 return r;
3083 }
3084
3085 return 0;
3086 }
3087
3088 int RGWPutObjProcessor_Multipart::do_complete(size_t accounted_size,
3089 const string& etag,
3090 real_time *mtime, real_time set_mtime,
3091 map<string, bufferlist>& attrs,
3092 real_time delete_at,
3093 const char *if_match,
3094 const char *if_nomatch, const string *user_data, rgw_zone_set *zones_trace)
3095 {
3096 complete_writing_data();
3097
3098 RGWRados::Object op_target(store, s->bucket_info, obj_ctx, head_obj);
3099 op_target.set_versioning_disabled(true);
3100 RGWRados::Object::Write head_obj_op(&op_target);
3101
3102 head_obj_op.meta.set_mtime = set_mtime;
3103 head_obj_op.meta.mtime = mtime;
3104 head_obj_op.meta.owner = s->owner.get_id();
3105 head_obj_op.meta.delete_at = delete_at;
3106 head_obj_op.meta.zones_trace = zones_trace;
3107 head_obj_op.meta.modify_tail = true;
3108
3109 int r = head_obj_op.write_meta(obj_len, accounted_size, attrs);
3110 if (r < 0)
3111 return r;
3112
3113 bufferlist bl;
3114 RGWUploadPartInfo info;
3115 string p = "part.";
3116 bool sorted_omap = is_v2_upload_id(upload_id);
3117
3118 if (sorted_omap) {
3119 string err;
3120 int part_num_int = strict_strtol(part_num.c_str(), 10, &err);
3121 if (!err.empty()) {
3122 dout(10) << "bad part number specified: " << part_num << dendl;
3123 return -EINVAL;
3124 }
3125 char buf[32];
3126 snprintf(buf, sizeof(buf), "%08d", part_num_int);
3127 p.append(buf);
3128 } else {
3129 p.append(part_num);
3130 }
3131 info.num = atoi(part_num.c_str());
3132 info.etag = etag;
3133 info.size = obj_len;
3134 info.accounted_size = accounted_size;
3135 info.modified = real_clock::now();
3136 info.manifest = manifest;
3137
3138 bool compressed;
3139 r = rgw_compression_info_from_attrset(attrs, compressed, info.cs_info);
3140 if (r < 0) {
3141 dout(1) << "cannot get compression info" << dendl;
3142 return r;
3143 }
3144
3145 ::encode(info, bl);
3146
3147 string multipart_meta_obj = mp.get_meta();
3148
3149 rgw_obj meta_obj;
3150 meta_obj.init_ns(bucket, multipart_meta_obj, mp_ns);
3151 meta_obj.set_in_extra_data(true);
3152
3153 rgw_raw_obj raw_meta_obj;
3154
3155 store->obj_to_raw(s->bucket_info.placement_rule, meta_obj, &raw_meta_obj);
3156
3157 r = store->omap_set(raw_meta_obj, p, bl);
3158
3159 return r;
3160 }
3161
3162 RGWPutObjProcessor *RGWPutObj::select_processor(RGWObjectCtx& obj_ctx, bool *is_multipart)
3163 {
3164 RGWPutObjProcessor *processor;
3165
3166 bool multipart = s->info.args.exists("uploadId");
3167
3168 uint64_t part_size = s->cct->_conf->rgw_obj_stripe_size;
3169
3170 if (!multipart) {
3171 processor = new RGWPutObjProcessor_Atomic(obj_ctx, s->bucket_info, s->bucket, s->object.name, part_size, s->req_id, s->bucket_info.versioning_enabled());
3172 (static_cast<RGWPutObjProcessor_Atomic *>(processor))->set_olh_epoch(olh_epoch);
3173 (static_cast<RGWPutObjProcessor_Atomic *>(processor))->set_version_id(version_id);
3174 } else {
3175 processor = new RGWPutObjProcessor_Multipart(obj_ctx, s->bucket_info, part_size, s);
3176 }
3177
3178 if (is_multipart) {
3179 *is_multipart = multipart;
3180 }
3181
3182 return processor;
3183 }
3184
3185 void RGWPutObj::dispose_processor(RGWPutObjDataProcessor *processor)
3186 {
3187 delete processor;
3188 }
3189
3190 void RGWPutObj::pre_exec()
3191 {
3192 rgw_bucket_object_pre_exec(s);
3193 }
3194
3195 class RGWPutObj_CB : public RGWGetDataCB
3196 {
3197 RGWPutObj *op;
3198 public:
3199 RGWPutObj_CB(RGWPutObj *_op) : op(_op) {}
3200 ~RGWPutObj_CB() override {}
3201
3202 int handle_data(bufferlist& bl, off_t bl_ofs, off_t bl_len) override {
3203 return op->get_data_cb(bl, bl_ofs, bl_len);
3204 }
3205 };
3206
3207 int RGWPutObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
3208 {
3209 bufferlist bl_tmp;
3210 bl.copy(bl_ofs, bl_len, bl_tmp);
3211
3212 bl_aux.append(bl_tmp);
3213
3214 return bl_len;
3215 }
3216
3217 int RGWPutObj::get_data(const off_t fst, const off_t lst, bufferlist& bl)
3218 {
3219 RGWPutObj_CB cb(this);
3220 RGWGetDataCB* filter = &cb;
3221 boost::optional<RGWGetObj_Decompress> decompress;
3222 std::unique_ptr<RGWGetDataCB> decrypt;
3223 RGWCompressionInfo cs_info;
3224 map<string, bufferlist> attrs;
3225 map<string, bufferlist>::iterator attr_iter;
3226 int ret = 0;
3227
3228 uint64_t obj_size;
3229 int64_t new_ofs, new_end;
3230
3231 new_ofs = fst;
3232 new_end = lst;
3233
3234 rgw_obj_key obj_key(copy_source_object_name, copy_source_version_id);
3235 rgw_obj obj(copy_source_bucket_info.bucket, obj_key);
3236
3237 RGWRados::Object op_target(store, copy_source_bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
3238 RGWRados::Object::Read read_op(&op_target);
3239 read_op.params.obj_size = &obj_size;
3240 read_op.params.attrs = &attrs;
3241
3242 ret = read_op.prepare();
3243 if (ret < 0)
3244 return ret;
3245
3246 bool need_decompress;
3247 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
3248 if (op_ret < 0) {
3249 lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
3250 return -EIO;
3251 }
3252
3253 bool partial_content = true;
3254 if (need_decompress)
3255 {
3256 obj_size = cs_info.orig_size;
3257 decompress.emplace(s->cct, &cs_info, partial_content, filter);
3258 filter = &*decompress;
3259 }
3260
3261 attr_iter = attrs.find(RGW_ATTR_MANIFEST);
3262 op_ret = this->get_decrypt_filter(&decrypt,
3263 filter,
3264 attrs,
3265 attr_iter != attrs.end() ? &(attr_iter->second) : nullptr);
3266 if (decrypt != nullptr) {
3267 filter = decrypt.get();
3268 }
3269 if (op_ret < 0) {
3270 return ret;
3271 }
3272
3273 ret = read_op.range_to_ofs(obj_size, new_ofs, new_end);
3274 if (ret < 0)
3275 return ret;
3276
3277 filter->fixup_range(new_ofs, new_end);
3278 ret = read_op.iterate(new_ofs, new_end, filter);
3279
3280 if (ret >= 0)
3281 ret = filter->flush();
3282
3283 bl.claim_append(bl_aux);
3284
3285 return ret;
3286 }
3287
3288 // special handling for compression type = "random" with multipart uploads
3289 static CompressorRef get_compressor_plugin(const req_state *s,
3290 const std::string& compression_type)
3291 {
3292 if (compression_type != "random") {
3293 return Compressor::create(s->cct, compression_type);
3294 }
3295
3296 bool is_multipart{false};
3297 const auto& upload_id = s->info.args.get("uploadId", &is_multipart);
3298
3299 if (!is_multipart) {
3300 return Compressor::create(s->cct, compression_type);
3301 }
3302
3303 // use a hash of the multipart upload id so all parts use the same plugin
3304 const auto alg = std::hash<std::string>{}(upload_id) % Compressor::COMP_ALG_LAST;
3305 if (alg == Compressor::COMP_ALG_NONE) {
3306 return nullptr;
3307 }
3308 return Compressor::create(s->cct, alg);
3309 }
3310
3311 void RGWPutObj::execute()
3312 {
3313 RGWPutObjProcessor *processor = NULL;
3314 RGWPutObjDataProcessor *filter = nullptr;
3315 std::unique_ptr<RGWPutObjDataProcessor> encrypt;
3316 char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1];
3317 char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3318 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3319 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
3320 MD5 hash;
3321 bufferlist bl, aclbl, bs;
3322 int len;
3323 map<string, string>::iterator iter;
3324 bool multipart;
3325
3326 off_t fst;
3327 off_t lst;
3328 const auto& compression_type = store->get_zone_params().get_compression_type(
3329 s->bucket_info.placement_rule);
3330 CompressorRef plugin;
3331 boost::optional<RGWPutObj_Compress> compressor;
3332
3333 bool need_calc_md5 = (dlo_manifest == NULL) && (slo_info == NULL);
3334 perfcounter->inc(l_rgw_put);
3335 op_ret = -EINVAL;
3336 if (s->object.empty()) {
3337 goto done;
3338 }
3339
3340 if (!s->bucket_exists) {
3341 op_ret = -ERR_NO_SUCH_BUCKET;
3342 return;
3343 }
3344
3345 op_ret = get_params();
3346 if (op_ret < 0) {
3347 ldout(s->cct, 20) << "get_params() returned ret=" << op_ret << dendl;
3348 goto done;
3349 }
3350
3351 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
3352 if (op_ret < 0) {
3353 ldout(s->cct, 20) << "get_system_versioning_params() returned ret="
3354 << op_ret << dendl;
3355 goto done;
3356 }
3357
3358 if (supplied_md5_b64) {
3359 need_calc_md5 = true;
3360
3361 ldout(s->cct, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
3362 op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1],
3363 supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64));
3364 ldout(s->cct, 15) << "ceph_armor ret=" << op_ret << dendl;
3365 if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) {
3366 op_ret = -ERR_INVALID_DIGEST;
3367 goto done;
3368 }
3369
3370 buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5);
3371 ldout(s->cct, 15) << "supplied_md5=" << supplied_md5 << dendl;
3372 }
3373
3374 if (!chunked_upload) { /* with chunked upload we don't know how big is the upload.
3375 we also check sizes at the end anyway */
3376 op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
3377 user_quota, bucket_quota, s->content_length);
3378 if (op_ret < 0) {
3379 ldout(s->cct, 20) << "check_quota() returned ret=" << op_ret << dendl;
3380 goto done;
3381 }
3382 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3383 if (op_ret < 0) {
3384 ldout(s->cct, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl;
3385 goto done;
3386 }
3387 }
3388
3389 if (supplied_etag) {
3390 strncpy(supplied_md5, supplied_etag, sizeof(supplied_md5) - 1);
3391 supplied_md5[sizeof(supplied_md5) - 1] = '\0';
3392 }
3393
3394 processor = select_processor(*static_cast<RGWObjectCtx *>(s->obj_ctx), &multipart);
3395
3396 // no filters by default
3397 filter = processor;
3398
3399 /* Handle object versioning of Swift API. */
3400 if (! multipart) {
3401 rgw_obj obj(s->bucket, s->object);
3402 op_ret = store->swift_versioning_copy(*static_cast<RGWObjectCtx *>(s->obj_ctx),
3403 s->bucket_owner.get_id(),
3404 s->bucket_info,
3405 obj);
3406 if (op_ret < 0) {
3407 goto done;
3408 }
3409 }
3410
3411 op_ret = processor->prepare(store, NULL);
3412 if (op_ret < 0) {
3413 ldout(s->cct, 20) << "processor->prepare() returned ret=" << op_ret
3414 << dendl;
3415 goto done;
3416 }
3417
3418 if ((! copy_source.empty()) && !copy_source_range) {
3419 rgw_obj_key obj_key(copy_source_object_name, copy_source_version_id);
3420 rgw_obj obj(copy_source_bucket_info.bucket, obj_key.name);
3421
3422 RGWObjState *astate;
3423 op_ret = store->get_obj_state(static_cast<RGWObjectCtx *>(s->obj_ctx),
3424 copy_source_bucket_info, obj, &astate, true, false);
3425 if (op_ret < 0) {
3426 ldout(s->cct, 0) << "ERROR: get copy source obj state returned with error" << op_ret << dendl;
3427 goto done;
3428 }
3429 if (!astate->exists){
3430 op_ret = -ENOENT;
3431 goto done;
3432 }
3433 lst = astate->accounted_size - 1;
3434 } else {
3435 lst = copy_source_range_lst;
3436 }
3437
3438 fst = copy_source_range_fst;
3439
3440 op_ret = get_encrypt_filter(&encrypt, filter);
3441 if (op_ret < 0) {
3442 goto done;
3443 }
3444 if (encrypt != nullptr) {
3445 filter = encrypt.get();
3446 } else {
3447 //no encryption, we can try compression
3448 if (compression_type != "none") {
3449 plugin = get_compressor_plugin(s, compression_type);
3450 if (!plugin) {
3451 ldout(s->cct, 1) << "Cannot load plugin for compression type "
3452 << compression_type << dendl;
3453 } else {
3454 compressor.emplace(s->cct, plugin, filter);
3455 filter = &*compressor;
3456 }
3457 }
3458 }
3459
3460 do {
3461 bufferlist data;
3462 if (fst > lst)
3463 break;
3464 if (copy_source.empty()) {
3465 len = get_data(data);
3466 } else {
3467 uint64_t cur_lst = min(fst + s->cct->_conf->rgw_max_chunk_size - 1, lst);
3468 op_ret = get_data(fst, cur_lst, data);
3469 if (op_ret < 0)
3470 goto done;
3471 len = data.length();
3472 s->content_length += len;
3473 fst += len;
3474 }
3475 if (len < 0) {
3476 op_ret = len;
3477 goto done;
3478 }
3479
3480 if (need_calc_md5) {
3481 hash.Update((const byte *)data.c_str(), data.length());
3482 }
3483
3484 /* update torrrent */
3485 torrent.update(data);
3486
3487 /* do we need this operation to be synchronous? if we're dealing with an object with immutable
3488 * head, e.g., multipart object we need to make sure we're the first one writing to this object
3489 */
3490 bool need_to_wait = (ofs == 0) && multipart;
3491
3492 bufferlist orig_data;
3493
3494 if (need_to_wait) {
3495 orig_data = data;
3496 }
3497
3498 op_ret = put_data_and_throttle(filter, data, ofs, need_to_wait);
3499 if (op_ret < 0) {
3500 if (op_ret != -EEXIST) {
3501 ldout(s->cct, 20) << "processor->thottle_data() returned ret="
3502 << op_ret << dendl;
3503 goto done;
3504 }
3505 /* need_to_wait == true and op_ret == -EEXIST */
3506 ldout(s->cct, 5) << "NOTICE: processor->throttle_data() returned -EEXIST, need to restart write" << dendl;
3507
3508 /* restore original data */
3509 data.swap(orig_data);
3510
3511 /* restart processing with different oid suffix */
3512
3513 dispose_processor(processor);
3514 processor = select_processor(*static_cast<RGWObjectCtx *>(s->obj_ctx), &multipart);
3515 filter = processor;
3516
3517 string oid_rand;
3518 char buf[33];
3519 gen_rand_alphanumeric(store->ctx(), buf, sizeof(buf) - 1);
3520 oid_rand.append(buf);
3521
3522 op_ret = processor->prepare(store, &oid_rand);
3523 if (op_ret < 0) {
3524 ldout(s->cct, 0) << "ERROR: processor->prepare() returned "
3525 << op_ret << dendl;
3526 goto done;
3527 }
3528
3529 op_ret = get_encrypt_filter(&encrypt, filter);
3530 if (op_ret < 0) {
3531 goto done;
3532 }
3533 if (encrypt != nullptr) {
3534 filter = encrypt.get();
3535 } else {
3536 if (compressor) {
3537 compressor.emplace(s->cct, plugin, filter);
3538 filter = &*compressor;
3539 }
3540 }
3541 op_ret = put_data_and_throttle(filter, data, ofs, false);
3542 if (op_ret < 0) {
3543 goto done;
3544 }
3545 }
3546
3547 ofs += len;
3548 } while (len > 0);
3549
3550 {
3551 bufferlist flush;
3552 op_ret = put_data_and_throttle(filter, flush, ofs, false);
3553 if (op_ret < 0) {
3554 goto done;
3555 }
3556 }
3557
3558 if (!chunked_upload && ofs != s->content_length) {
3559 op_ret = -ERR_REQUEST_TIMEOUT;
3560 goto done;
3561 }
3562 s->obj_size = ofs;
3563
3564 perfcounter->inc(l_rgw_put_b, s->obj_size);
3565
3566 op_ret = do_aws4_auth_completion();
3567 if (op_ret < 0) {
3568 goto done;
3569 }
3570
3571 op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
3572 user_quota, bucket_quota, s->obj_size);
3573 if (op_ret < 0) {
3574 ldout(s->cct, 20) << "second check_quota() returned op_ret=" << op_ret << dendl;
3575 goto done;
3576 }
3577
3578 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3579 if (op_ret < 0) {
3580 ldout(s->cct, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl;
3581 goto done;
3582 }
3583
3584 hash.Final(m);
3585
3586 if (compressor && compressor->is_compressed()) {
3587 bufferlist tmp;
3588 RGWCompressionInfo cs_info;
3589 cs_info.compression_type = plugin->get_type_name();
3590 cs_info.orig_size = s->obj_size;
3591 cs_info.blocks = move(compressor->get_compression_blocks());
3592 ::encode(cs_info, tmp);
3593 attrs[RGW_ATTR_COMPRESSION] = tmp;
3594 ldout(s->cct, 20) << "storing " << RGW_ATTR_COMPRESSION
3595 << " with type=" << cs_info.compression_type
3596 << ", orig_size=" << cs_info.orig_size
3597 << ", blocks=" << cs_info.blocks.size() << dendl;
3598 }
3599
3600 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
3601
3602 etag = calc_md5;
3603
3604 if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) {
3605 op_ret = -ERR_BAD_DIGEST;
3606 goto done;
3607 }
3608
3609 policy.encode(aclbl);
3610 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
3611
3612 if (dlo_manifest) {
3613 op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
3614 if (op_ret < 0) {
3615 ldout(s->cct, 0) << "bad user manifest: " << dlo_manifest << dendl;
3616 goto done;
3617 }
3618 complete_etag(hash, &etag);
3619 ldout(s->cct, 10) << __func__ << ": calculated md5 for user manifest: " << etag << dendl;
3620 }
3621
3622 if (slo_info) {
3623 bufferlist manifest_bl;
3624 ::encode(*slo_info, manifest_bl);
3625 emplace_attr(RGW_ATTR_SLO_MANIFEST, std::move(manifest_bl));
3626
3627 hash.Update((byte *)slo_info->raw_data, slo_info->raw_data_len);
3628 complete_etag(hash, &etag);
3629 ldout(s->cct, 10) << __func__ << ": calculated md5 for user manifest: " << etag << dendl;
3630 }
3631
3632 if (supplied_etag && etag.compare(supplied_etag) != 0) {
3633 op_ret = -ERR_UNPROCESSABLE_ENTITY;
3634 goto done;
3635 }
3636 bl.append(etag.c_str(), etag.size() + 1);
3637 emplace_attr(RGW_ATTR_ETAG, std::move(bl));
3638
3639 populate_with_generic_attrs(s, attrs);
3640 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
3641 if (op_ret < 0) {
3642 goto done;
3643 }
3644 encode_delete_at_attr(delete_at, attrs);
3645 encode_obj_tags_attr(obj_tags.get(), attrs);
3646
3647 /* Add a custom metadata to expose the information whether an object
3648 * is an SLO or not. Appending the attribute must be performed AFTER
3649 * processing any input from user in order to prohibit overwriting. */
3650 if (slo_info) {
3651 bufferlist slo_userindicator_bl;
3652 slo_userindicator_bl.append("True", 4);
3653 emplace_attr(RGW_ATTR_SLO_UINDICATOR, std::move(slo_userindicator_bl));
3654 }
3655
3656 op_ret = processor->complete(s->obj_size, etag, &mtime, real_time(), attrs,
3657 (delete_at ? *delete_at : real_time()), if_match, if_nomatch,
3658 (user_data.empty() ? nullptr : &user_data));
3659
3660 /* produce torrent */
3661 if (s->cct->_conf->rgw_torrent_flag && (ofs == torrent.get_data_len()))
3662 {
3663 torrent.init(s, store);
3664 torrent.set_create_date(mtime);
3665 op_ret = torrent.complete();
3666 if (0 != op_ret)
3667 {
3668 ldout(s->cct, 0) << "ERROR: torrent.handle_data() returned " << op_ret << dendl;
3669 goto done;
3670 }
3671 }
3672
3673 done:
3674 dispose_processor(processor);
3675 perfcounter->tinc(l_rgw_put_lat,
3676 (ceph_clock_now() - s->time));
3677 }
3678
3679 int RGWPostObj::verify_permission()
3680 {
3681 return 0;
3682 }
3683 /*
3684 RGWPutObjProcessor *RGWPostObj::select_processor(RGWObjectCtx& obj_ctx)
3685 {
3686 RGWPutObjProcessor *processor;
3687
3688 uint64_t part_size = s->cct->_conf->rgw_obj_stripe_size;
3689
3690 processor = new RGWPutObjProcessor_Atomic(obj_ctx, s->bucket_info, s->bucket, s->object.name, part_size, s->req_id, s->bucket_info.versioning_enabled());
3691
3692 return processor;
3693 }
3694
3695 void RGWPostObj::dispose_processor(RGWPutObjDataProcessor *processor)
3696 {
3697 delete processor;
3698 }
3699 */
3700 void RGWPostObj::pre_exec()
3701 {
3702 rgw_bucket_object_pre_exec(s);
3703 }
3704
3705 void RGWPostObj::execute()
3706 {
3707 RGWPutObjDataProcessor *filter = nullptr;
3708 boost::optional<RGWPutObj_Compress> compressor;
3709 CompressorRef plugin;
3710 char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3711
3712 /* Read in the data from the POST form. */
3713 op_ret = get_params();
3714 if (op_ret < 0) {
3715 return;
3716 }
3717
3718 op_ret = verify_params();
3719 if (op_ret < 0) {
3720 return;
3721 }
3722
3723 if (s->iam_policy) {
3724 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
3725 rgw::IAM::s3PutObject,
3726 rgw_obj(s->bucket, s->object));
3727 if (e == Effect::Deny) {
3728 op_ret = -EACCES;
3729 return;
3730 } else if (e == Effect::Pass && !verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
3731 op_ret = -EACCES;
3732 return;
3733 }
3734 } else if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
3735 op_ret = -EACCES;
3736 return;
3737 }
3738
3739 /* Start iteration over data fields. It's necessary as Swift's FormPost
3740 * is capable to handle multiple files in single form. */
3741 do {
3742 std::unique_ptr<RGWPutObjDataProcessor> encrypt;
3743 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3744 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
3745 MD5 hash;
3746 ceph::buffer::list bl, aclbl;
3747 int len = 0;
3748
3749 op_ret = store->check_quota(s->bucket_owner.get_id(),
3750 s->bucket,
3751 user_quota,
3752 bucket_quota,
3753 s->content_length);
3754 if (op_ret < 0) {
3755 return;
3756 }
3757
3758 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3759 if (op_ret < 0) {
3760 return;
3761 }
3762
3763 if (supplied_md5_b64) {
3764 char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1];
3765 ldout(s->cct, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
3766 op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1],
3767 supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64));
3768 ldout(s->cct, 15) << "ceph_armor ret=" << op_ret << dendl;
3769 if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) {
3770 op_ret = -ERR_INVALID_DIGEST;
3771 return;
3772 }
3773
3774 buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5);
3775 ldout(s->cct, 15) << "supplied_md5=" << supplied_md5 << dendl;
3776 }
3777
3778 RGWPutObjProcessor_Atomic processor(*static_cast<RGWObjectCtx *>(s->obj_ctx),
3779 s->bucket_info,
3780 s->bucket,
3781 get_current_filename(),
3782 /* part size */
3783 s->cct->_conf->rgw_obj_stripe_size,
3784 s->req_id,
3785 s->bucket_info.versioning_enabled());
3786 processor.set_olh_epoch(0);
3787 /* No filters by default. */
3788 filter = &processor;
3789
3790 op_ret = processor.prepare(store, nullptr);
3791 if (op_ret < 0) {
3792 return;
3793 }
3794
3795 op_ret = get_encrypt_filter(&encrypt, filter);
3796 if (op_ret < 0) {
3797 return;
3798 }
3799 if (encrypt != nullptr) {
3800 filter = encrypt.get();
3801 } else {
3802 const auto& compression_type = store->get_zone_params().get_compression_type(
3803 s->bucket_info.placement_rule);
3804 if (compression_type != "none") {
3805 plugin = Compressor::create(s->cct, compression_type);
3806 if (!plugin) {
3807 ldout(s->cct, 1) << "Cannot load plugin for compression type "
3808 << compression_type << dendl;
3809 } else {
3810 compressor.emplace(s->cct, plugin, filter);
3811 filter = &*compressor;
3812 }
3813 }
3814 }
3815
3816 bool again;
3817 do {
3818 ceph::bufferlist data;
3819 len = get_data(data, again);
3820
3821 if (len < 0) {
3822 op_ret = len;
3823 return;
3824 }
3825
3826 if (!len) {
3827 break;
3828 }
3829
3830 hash.Update((const byte *)data.c_str(), data.length());
3831 op_ret = put_data_and_throttle(filter, data, ofs, false);
3832
3833 ofs += len;
3834
3835 if (ofs > max_len) {
3836 op_ret = -ERR_TOO_LARGE;
3837 return;
3838 }
3839 } while (again);
3840
3841 {
3842 bufferlist flush;
3843 op_ret = put_data_and_throttle(filter, flush, ofs, false);
3844 }
3845
3846 if (len < min_len) {
3847 op_ret = -ERR_TOO_SMALL;
3848 return;
3849 }
3850
3851 s->obj_size = ofs;
3852
3853 if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) {
3854 op_ret = -ERR_BAD_DIGEST;
3855 return;
3856 }
3857
3858 op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
3859 user_quota, bucket_quota, s->obj_size);
3860 if (op_ret < 0) {
3861 return;
3862 }
3863
3864 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3865 if (op_ret < 0) {
3866 return;
3867 }
3868
3869 hash.Final(m);
3870 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
3871
3872 etag = calc_md5;
3873 bl.append(etag.c_str(), etag.size() + 1);
3874 emplace_attr(RGW_ATTR_ETAG, std::move(bl));
3875
3876 policy.encode(aclbl);
3877 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
3878
3879 const std::string content_type = get_current_content_type();
3880 if (! content_type.empty()) {
3881 ceph::bufferlist ct_bl;
3882 ct_bl.append(content_type.c_str(), content_type.size() + 1);
3883 emplace_attr(RGW_ATTR_CONTENT_TYPE, std::move(ct_bl));
3884 }
3885
3886 if (compressor && compressor->is_compressed()) {
3887 ceph::bufferlist tmp;
3888 RGWCompressionInfo cs_info;
3889 cs_info.compression_type = plugin->get_type_name();
3890 cs_info.orig_size = s->obj_size;
3891 cs_info.blocks = move(compressor->get_compression_blocks());
3892 ::encode(cs_info, tmp);
3893 emplace_attr(RGW_ATTR_COMPRESSION, std::move(tmp));
3894 }
3895
3896 op_ret = processor.complete(s->obj_size, etag, nullptr, real_time(),
3897 attrs, (delete_at ? *delete_at : real_time()));
3898 } while (is_next_file_to_upload());
3899 }
3900
3901
3902 void RGWPutMetadataAccount::filter_out_temp_url(map<string, bufferlist>& add_attrs,
3903 const set<string>& rmattr_names,
3904 map<int, string>& temp_url_keys)
3905 {
3906 map<string, bufferlist>::iterator iter;
3907
3908 iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY1);
3909 if (iter != add_attrs.end()) {
3910 temp_url_keys[0] = iter->second.c_str();
3911 add_attrs.erase(iter);
3912 }
3913
3914 iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY2);
3915 if (iter != add_attrs.end()) {
3916 temp_url_keys[1] = iter->second.c_str();
3917 add_attrs.erase(iter);
3918 }
3919
3920 for (const string& name : rmattr_names) {
3921 if (name.compare(RGW_ATTR_TEMPURL_KEY1) == 0) {
3922 temp_url_keys[0] = string();
3923 }
3924 if (name.compare(RGW_ATTR_TEMPURL_KEY2) == 0) {
3925 temp_url_keys[1] = string();
3926 }
3927 }
3928 }
3929
3930 int RGWPutMetadataAccount::init_processing()
3931 {
3932 /* First, go to the base class. At the time of writing the method was
3933 * responsible only for initializing the quota. This isn't necessary
3934 * here as we are touching metadata only. I'm putting this call only
3935 * for the future. */
3936 op_ret = RGWOp::init_processing();
3937 if (op_ret < 0) {
3938 return op_ret;
3939 }
3940
3941 op_ret = get_params();
3942 if (op_ret < 0) {
3943 return op_ret;
3944 }
3945
3946 op_ret = rgw_get_user_attrs_by_uid(store, s->user->user_id, orig_attrs,
3947 &acct_op_tracker);
3948 if (op_ret < 0) {
3949 return op_ret;
3950 }
3951
3952 if (has_policy) {
3953 bufferlist acl_bl;
3954 policy.encode(acl_bl);
3955 attrs.emplace(RGW_ATTR_ACL, std::move(acl_bl));
3956 }
3957
3958 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
3959 if (op_ret < 0) {
3960 return op_ret;
3961 }
3962 prepare_add_del_attrs(orig_attrs, rmattr_names, attrs);
3963 populate_with_generic_attrs(s, attrs);
3964
3965 /* Try extract the TempURL-related stuff now to allow verify_permission
3966 * evaluate whether we need FULL_CONTROL or not. */
3967 filter_out_temp_url(attrs, rmattr_names, temp_url_keys);
3968
3969 /* The same with quota except a client needs to be reseller admin. */
3970 op_ret = filter_out_quota_info(attrs, rmattr_names, new_quota,
3971 &new_quota_extracted);
3972 if (op_ret < 0) {
3973 return op_ret;
3974 }
3975
3976 return 0;
3977 }
3978
3979 int RGWPutMetadataAccount::verify_permission()
3980 {
3981 if (s->auth.identity->is_anonymous()) {
3982 return -EACCES;
3983 }
3984
3985 if (!verify_user_permission(s, RGW_PERM_WRITE)) {
3986 return -EACCES;
3987 }
3988
3989 /* Altering TempURL keys requires FULL_CONTROL. */
3990 if (!temp_url_keys.empty() && s->perm_mask != RGW_PERM_FULL_CONTROL) {
3991 return -EPERM;
3992 }
3993
3994 /* We are failing this intensionally to allow system user/reseller admin
3995 * override in rgw_process.cc. This is the way to specify a given RGWOp
3996 * expect extra privileges. */
3997 if (new_quota_extracted) {
3998 return -EACCES;
3999 }
4000
4001 return 0;
4002 }
4003
4004 void RGWPutMetadataAccount::execute()
4005 {
4006 /* Params have been extracted earlier. See init_processing(). */
4007 RGWUserInfo new_uinfo;
4008 op_ret = rgw_get_user_info_by_uid(store, s->user->user_id, new_uinfo,
4009 &acct_op_tracker);
4010 if (op_ret < 0) {
4011 return;
4012 }
4013
4014 /* Handle the TempURL-related stuff. */
4015 if (!temp_url_keys.empty()) {
4016 for (auto& pair : temp_url_keys) {
4017 new_uinfo.temp_url_keys[pair.first] = std::move(pair.second);
4018 }
4019 }
4020
4021 /* Handle the quota extracted at the verify_permission step. */
4022 if (new_quota_extracted) {
4023 new_uinfo.user_quota = std::move(new_quota);
4024 }
4025
4026 /* We are passing here the current (old) user info to allow the function
4027 * optimize-out some operations. */
4028 op_ret = rgw_store_user_info(store, new_uinfo, s->user,
4029 &acct_op_tracker, real_time(), false, &attrs);
4030 }
4031
4032 int RGWPutMetadataBucket::verify_permission()
4033 {
4034 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
4035 return -EACCES;
4036 }
4037
4038 return 0;
4039 }
4040
4041 void RGWPutMetadataBucket::pre_exec()
4042 {
4043 rgw_bucket_object_pre_exec(s);
4044 }
4045
4046 void RGWPutMetadataBucket::execute()
4047 {
4048 op_ret = get_params();
4049 if (op_ret < 0) {
4050 return;
4051 }
4052
4053 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
4054 if (op_ret < 0) {
4055 return;
4056 }
4057
4058 if (!placement_rule.empty() &&
4059 placement_rule != s->bucket_info.placement_rule) {
4060 op_ret = -EEXIST;
4061 return;
4062 }
4063
4064 op_ret = retry_raced_bucket_write(store, s, [this] {
4065 /* Encode special metadata first as we're using std::map::emplace under
4066 * the hood. This method will add the new items only if the map doesn't
4067 * contain such keys yet. */
4068 if (has_policy) {
4069 if (s->dialect.compare("swift") == 0) {
4070 auto old_policy = \
4071 static_cast<RGWAccessControlPolicy_SWIFT*>(s->bucket_acl.get());
4072 auto new_policy = static_cast<RGWAccessControlPolicy_SWIFT*>(&policy);
4073 new_policy->filter_merge(policy_rw_mask, old_policy);
4074 policy = *new_policy;
4075 }
4076 buffer::list bl;
4077 policy.encode(bl);
4078 emplace_attr(RGW_ATTR_ACL, std::move(bl));
4079 }
4080
4081 if (has_cors) {
4082 buffer::list bl;
4083 cors_config.encode(bl);
4084 emplace_attr(RGW_ATTR_CORS, std::move(bl));
4085 }
4086
4087 /* It's supposed that following functions WILL NOT change any
4088 * special attributes (like RGW_ATTR_ACL) if they are already
4089 * present in attrs. */
4090 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
4091 populate_with_generic_attrs(s, attrs);
4092
4093 /* According to the Swift's behaviour and its container_quota
4094 * WSGI middleware implementation: anyone with write permissions
4095 * is able to set the bucket quota. This stays in contrast to
4096 * account quotas that can be set only by clients holding
4097 * reseller admin privileges. */
4098 op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket_info.quota);
4099 if (op_ret < 0) {
4100 return op_ret;
4101 }
4102
4103 if (swift_ver_location) {
4104 s->bucket_info.swift_ver_location = *swift_ver_location;
4105 s->bucket_info.swift_versioning = (!swift_ver_location->empty());
4106 }
4107
4108 /* Web site of Swift API. */
4109 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
4110 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
4111
4112 /* Setting attributes also stores the provided bucket info. Due
4113 * to this fact, the new quota settings can be serialized with
4114 * the same call. */
4115 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
4116 &s->bucket_info.objv_tracker);
4117 return op_ret;
4118 });
4119 }
4120
4121 int RGWPutMetadataObject::verify_permission()
4122 {
4123 // This looks to be something specific to Swift. We could add
4124 // operations like swift:PutMetadataObject to the Policy Engine.
4125 if (!verify_object_permission_no_policy(s, RGW_PERM_WRITE)) {
4126 return -EACCES;
4127 }
4128
4129 return 0;
4130 }
4131
4132 void RGWPutMetadataObject::pre_exec()
4133 {
4134 rgw_bucket_object_pre_exec(s);
4135 }
4136
4137 void RGWPutMetadataObject::execute()
4138 {
4139 rgw_obj obj(s->bucket, s->object);
4140 map<string, bufferlist> attrs, orig_attrs, rmattrs;
4141
4142 store->set_atomic(s->obj_ctx, obj);
4143
4144 op_ret = get_params();
4145 if (op_ret < 0) {
4146 return;
4147 }
4148
4149 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
4150 if (op_ret < 0) {
4151 return;
4152 }
4153
4154 /* check if obj exists, read orig attrs */
4155 op_ret = get_obj_attrs(store, s, obj, orig_attrs);
4156 if (op_ret < 0) {
4157 return;
4158 }
4159
4160 /* Check whether the object has expired. Swift API documentation
4161 * stands that we should return 404 Not Found in such case. */
4162 if (need_object_expiration() && object_is_expired(orig_attrs)) {
4163 op_ret = -ENOENT;
4164 return;
4165 }
4166
4167 /* Filter currently existing attributes. */
4168 prepare_add_del_attrs(orig_attrs, attrs, rmattrs);
4169 populate_with_generic_attrs(s, attrs);
4170 encode_delete_at_attr(delete_at, attrs);
4171
4172 if (dlo_manifest) {
4173 op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
4174 if (op_ret < 0) {
4175 ldout(s->cct, 0) << "bad user manifest: " << dlo_manifest << dendl;
4176 return;
4177 }
4178 }
4179
4180 op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, &rmattrs);
4181 }
4182
4183 int RGWDeleteObj::handle_slo_manifest(bufferlist& bl)
4184 {
4185 RGWSLOInfo slo_info;
4186 bufferlist::iterator bliter = bl.begin();
4187 try {
4188 ::decode(slo_info, bliter);
4189 } catch (buffer::error& err) {
4190 ldout(s->cct, 0) << "ERROR: failed to decode slo manifest" << dendl;
4191 return -EIO;
4192 }
4193
4194 try {
4195 deleter = std::unique_ptr<RGWBulkDelete::Deleter>(\
4196 new RGWBulkDelete::Deleter(store, s));
4197 } catch (std::bad_alloc) {
4198 return -ENOMEM;
4199 }
4200
4201 list<RGWBulkDelete::acct_path_t> items;
4202 for (const auto& iter : slo_info.entries) {
4203 const string& path_str = iter.path;
4204
4205 const size_t sep_pos = path_str.find('/', 1 /* skip first slash */);
4206 if (boost::string_view::npos == sep_pos) {
4207 return -EINVAL;
4208 }
4209
4210 RGWBulkDelete::acct_path_t path;
4211
4212 path.bucket_name = url_decode(path_str.substr(1, sep_pos - 1));
4213 path.obj_key = url_decode(path_str.substr(sep_pos + 1));
4214
4215 items.push_back(path);
4216 }
4217
4218 /* Request removal of the manifest object itself. */
4219 RGWBulkDelete::acct_path_t path;
4220 path.bucket_name = s->bucket_name;
4221 path.obj_key = s->object;
4222 items.push_back(path);
4223
4224 int ret = deleter->delete_chunk(items);
4225 if (ret < 0) {
4226 return ret;
4227 }
4228
4229 return 0;
4230 }
4231
4232 int RGWDeleteObj::verify_permission()
4233 {
4234 if (s->iam_policy) {
4235 auto r = s->iam_policy->eval(s->env, *s->auth.identity,
4236 s->object.instance.empty() ?
4237 rgw::IAM::s3DeleteObject :
4238 rgw::IAM::s3DeleteObjectVersion,
4239 ARN(s->bucket, s->object.name));
4240 if (r == Effect::Allow)
4241 return true;
4242 else if (r == Effect::Deny)
4243 return false;
4244 }
4245
4246 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
4247 return -EACCES;
4248 }
4249
4250 return 0;
4251 }
4252
4253 void RGWDeleteObj::pre_exec()
4254 {
4255 rgw_bucket_object_pre_exec(s);
4256 }
4257
4258 void RGWDeleteObj::execute()
4259 {
4260 if (!s->bucket_exists) {
4261 op_ret = -ERR_NO_SUCH_BUCKET;
4262 return;
4263 }
4264
4265 op_ret = get_params();
4266 if (op_ret < 0) {
4267 return;
4268 }
4269
4270 rgw_obj obj(s->bucket, s->object);
4271 map<string, bufferlist> attrs;
4272
4273
4274 if (!s->object.empty()) {
4275 if (need_object_expiration() || multipart_delete) {
4276 /* check if obj exists, read orig attrs */
4277 op_ret = get_obj_attrs(store, s, obj, attrs);
4278 if (op_ret < 0) {
4279 return;
4280 }
4281 }
4282
4283 if (multipart_delete) {
4284 const auto slo_attr = attrs.find(RGW_ATTR_SLO_MANIFEST);
4285
4286 if (slo_attr != attrs.end()) {
4287 op_ret = handle_slo_manifest(slo_attr->second);
4288 if (op_ret < 0) {
4289 ldout(s->cct, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret << dendl;
4290 }
4291 } else {
4292 op_ret = -ERR_NOT_SLO_MANIFEST;
4293 }
4294
4295 return;
4296 }
4297
4298 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
4299 obj_ctx->obj.set_atomic(obj);
4300
4301 bool ver_restored = false;
4302 op_ret = store->swift_versioning_restore(*obj_ctx, s->bucket_owner.get_id(),
4303 s->bucket_info, obj, ver_restored);
4304 if (op_ret < 0) {
4305 return;
4306 }
4307
4308 if (!ver_restored) {
4309 /* Swift's versioning mechanism hasn't found any previous version of
4310 * the object that could be restored. This means we should proceed
4311 * with the regular delete path. */
4312 RGWRados::Object del_target(store, s->bucket_info, *obj_ctx, obj);
4313 RGWRados::Object::Delete del_op(&del_target);
4314
4315 op_ret = get_system_versioning_params(s, &del_op.params.olh_epoch,
4316 &del_op.params.marker_version_id);
4317 if (op_ret < 0) {
4318 return;
4319 }
4320
4321 del_op.params.bucket_owner = s->bucket_owner.get_id();
4322 del_op.params.versioning_status = s->bucket_info.versioning_status();
4323 del_op.params.obj_owner = s->owner;
4324 del_op.params.unmod_since = unmod_since;
4325 del_op.params.high_precision_time = s->system_request; /* system request uses high precision time */
4326
4327 op_ret = del_op.delete_obj();
4328 if (op_ret >= 0) {
4329 delete_marker = del_op.result.delete_marker;
4330 version_id = del_op.result.version_id;
4331 }
4332
4333 /* Check whether the object has expired. Swift API documentation
4334 * stands that we should return 404 Not Found in such case. */
4335 if (need_object_expiration() && object_is_expired(attrs)) {
4336 op_ret = -ENOENT;
4337 return;
4338 }
4339 }
4340
4341 if (op_ret == -ECANCELED) {
4342 op_ret = 0;
4343 }
4344 if (op_ret == -ERR_PRECONDITION_FAILED && no_precondition_error) {
4345 op_ret = 0;
4346 }
4347 } else {
4348 op_ret = -EINVAL;
4349 }
4350 }
4351
4352 bool RGWCopyObj::parse_copy_location(const boost::string_view& url_src,
4353 string& bucket_name,
4354 rgw_obj_key& key)
4355 {
4356 boost::string_view name_str;
4357 boost::string_view params_str;
4358
4359 size_t pos = url_src.find('?');
4360 if (pos == string::npos) {
4361 name_str = url_src;
4362 } else {
4363 name_str = url_src.substr(0, pos);
4364 params_str = url_src.substr(pos + 1);
4365 }
4366
4367 boost::string_view dec_src{name_str};
4368 if (dec_src[0] == '/')
4369 dec_src.remove_prefix(1);
4370
4371 pos = dec_src.find('/');
4372 if (pos ==string::npos)
4373 return false;
4374
4375 boost::string_view bn_view{dec_src.substr(0, pos)};
4376 bucket_name = std::string{bn_view.data(), bn_view.size()};
4377
4378 boost::string_view kn_view{dec_src.substr(pos + 1)};
4379 key.name = std::string{kn_view.data(), kn_view.size()};
4380
4381 if (key.name.empty()) {
4382 return false;
4383 }
4384
4385 if (! params_str.empty()) {
4386 RGWHTTPArgs args;
4387 args.set(params_str.to_string());
4388 args.parse();
4389
4390 key.instance = args.get("versionId", NULL);
4391 }
4392
4393 return true;
4394 }
4395
4396 int RGWCopyObj::verify_permission()
4397 {
4398 RGWAccessControlPolicy src_acl(s->cct);
4399 optional<Policy> src_policy;
4400 op_ret = get_params();
4401 if (op_ret < 0)
4402 return op_ret;
4403
4404 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
4405 if (op_ret < 0) {
4406 return op_ret;
4407 }
4408 map<string, bufferlist> src_attrs;
4409
4410 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
4411
4412 if (s->bucket_instance_id.empty()) {
4413 op_ret = store->get_bucket_info(obj_ctx, src_tenant_name, src_bucket_name, src_bucket_info, NULL, &src_attrs);
4414 } else {
4415 /* will only happen in intra region sync where the source and dest bucket is the same */
4416 op_ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, src_bucket_info, NULL, &src_attrs);
4417 }
4418 if (op_ret < 0) {
4419 if (op_ret == -ENOENT) {
4420 op_ret = -ERR_NO_SUCH_BUCKET;
4421 }
4422 return op_ret;
4423 }
4424
4425 src_bucket = src_bucket_info.bucket;
4426
4427 /* get buckets info (source and dest) */
4428 if (s->local_source && source_zone.empty()) {
4429 rgw_obj src_obj(src_bucket, src_object);
4430 store->set_atomic(s->obj_ctx, src_obj);
4431 store->set_prefetch_data(s->obj_ctx, src_obj);
4432
4433 /* check source object permissions */
4434 op_ret = read_obj_policy(store, s, src_bucket_info, src_attrs, &src_acl,
4435 src_policy, src_bucket, src_object);
4436 if (op_ret < 0) {
4437 return op_ret;
4438 }
4439
4440 /* admin request overrides permission checks */
4441 if (!s->auth.identity->is_admin_of(src_acl.get_owner().get_id())) {
4442 if (src_policy) {
4443 auto e = src_policy->eval(s->env, *s->auth.identity,
4444 src_object.instance.empty() ?
4445 rgw::IAM::s3GetObject :
4446 rgw::IAM::s3GetObjectVersion,
4447 ARN(src_obj));
4448 if (e == Effect::Deny) {
4449 return -EACCES;
4450 } else if (e == Effect::Pass &&
4451 !src_acl.verify_permission(*s->auth.identity, s->perm_mask,
4452 RGW_PERM_READ)) {
4453 return -EACCES;
4454 }
4455 } else if (!src_acl.verify_permission(*s->auth.identity,
4456 s->perm_mask,
4457 RGW_PERM_READ)) {
4458 return -EACCES;
4459 }
4460 }
4461 }
4462
4463 RGWAccessControlPolicy dest_bucket_policy(s->cct);
4464 map<string, bufferlist> dest_attrs;
4465
4466 if (src_bucket_name.compare(dest_bucket_name) == 0) { /* will only happen if s->local_source
4467 or intra region sync */
4468 dest_bucket_info = src_bucket_info;
4469 dest_attrs = src_attrs;
4470 } else {
4471 op_ret = store->get_bucket_info(obj_ctx, dest_tenant_name, dest_bucket_name,
4472 dest_bucket_info, nullptr, &dest_attrs);
4473 if (op_ret < 0) {
4474 if (op_ret == -ENOENT) {
4475 op_ret = -ERR_NO_SUCH_BUCKET;
4476 }
4477 return op_ret;
4478 }
4479 }
4480
4481 dest_bucket = dest_bucket_info.bucket;
4482
4483 rgw_obj dest_obj(dest_bucket, dest_object);
4484 store->set_atomic(s->obj_ctx, dest_obj);
4485
4486 /* check dest bucket permissions */
4487 op_ret = read_bucket_policy(store, s, dest_bucket_info, dest_attrs,
4488 &dest_bucket_policy, dest_bucket);
4489 if (op_ret < 0) {
4490 return op_ret;
4491 }
4492
4493 /* admin request overrides permission checks */
4494 if (! s->auth.identity->is_admin_of(dest_policy.get_owner().get_id()) &&
4495 ! dest_bucket_policy.verify_permission(*s->auth.identity, s->perm_mask,
4496 RGW_PERM_WRITE)) {
4497 return -EACCES;
4498 }
4499
4500 op_ret = init_dest_policy();
4501 if (op_ret < 0) {
4502 return op_ret;
4503 }
4504
4505 return 0;
4506 }
4507
4508
4509 int RGWCopyObj::init_common()
4510 {
4511 if (if_mod) {
4512 if (parse_time(if_mod, &mod_time) < 0) {
4513 op_ret = -EINVAL;
4514 return op_ret;
4515 }
4516 mod_ptr = &mod_time;
4517 }
4518
4519 if (if_unmod) {
4520 if (parse_time(if_unmod, &unmod_time) < 0) {
4521 op_ret = -EINVAL;
4522 return op_ret;
4523 }
4524 unmod_ptr = &unmod_time;
4525 }
4526
4527 bufferlist aclbl;
4528 dest_policy.encode(aclbl);
4529 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
4530
4531 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
4532 if (op_ret < 0) {
4533 return op_ret;
4534 }
4535 populate_with_generic_attrs(s, attrs);
4536
4537 return 0;
4538 }
4539
4540 static void copy_obj_progress_cb(off_t ofs, void *param)
4541 {
4542 RGWCopyObj *op = static_cast<RGWCopyObj *>(param);
4543 op->progress_cb(ofs);
4544 }
4545
4546 void RGWCopyObj::progress_cb(off_t ofs)
4547 {
4548 if (!s->cct->_conf->rgw_copy_obj_progress)
4549 return;
4550
4551 if (ofs - last_ofs < s->cct->_conf->rgw_copy_obj_progress_every_bytes)
4552 return;
4553
4554 send_partial_response(ofs);
4555
4556 last_ofs = ofs;
4557 }
4558
4559 void RGWCopyObj::pre_exec()
4560 {
4561 rgw_bucket_object_pre_exec(s);
4562 }
4563
4564 void RGWCopyObj::execute()
4565 {
4566 if (init_common() < 0)
4567 return;
4568
4569 rgw_obj src_obj(src_bucket, src_object);
4570 rgw_obj dst_obj(dest_bucket, dest_object);
4571
4572 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
4573 obj_ctx.obj.set_atomic(src_obj);
4574 obj_ctx.obj.set_atomic(dst_obj);
4575
4576 encode_delete_at_attr(delete_at, attrs);
4577
4578 bool high_precision_time = (s->system_request);
4579
4580 /* Handle object versioning of Swift API. In case of copying to remote this
4581 * should fail gently (op_ret == 0) as the dst_obj will not exist here. */
4582 op_ret = store->swift_versioning_copy(obj_ctx,
4583 dest_bucket_info.owner,
4584 dest_bucket_info,
4585 dst_obj);
4586 if (op_ret < 0) {
4587 return;
4588 }
4589
4590 op_ret = store->copy_obj(obj_ctx,
4591 s->user->user_id,
4592 client_id,
4593 op_id,
4594 &s->info,
4595 source_zone,
4596 dst_obj,
4597 src_obj,
4598 dest_bucket_info,
4599 src_bucket_info,
4600 &src_mtime,
4601 &mtime,
4602 mod_ptr,
4603 unmod_ptr,
4604 high_precision_time,
4605 if_match,
4606 if_nomatch,
4607 attrs_mod,
4608 copy_if_newer,
4609 attrs, RGW_OBJ_CATEGORY_MAIN,
4610 olh_epoch,
4611 (delete_at ? *delete_at : real_time()),
4612 (version_id.empty() ? NULL : &version_id),
4613 &s->req_id, /* use req_id as tag */
4614 &etag,
4615 copy_obj_progress_cb, (void *)this
4616 );
4617 }
4618
4619 int RGWGetACLs::verify_permission()
4620 {
4621 bool perm;
4622 if (!s->object.empty()) {
4623 perm = verify_object_permission(s,
4624 s->object.instance.empty() ?
4625 rgw::IAM::s3GetObjectAcl :
4626 rgw::IAM::s3GetObjectVersionAcl);
4627 } else {
4628 perm = verify_bucket_permission(s, rgw::IAM::s3GetBucketAcl);
4629 }
4630 if (!perm)
4631 return -EACCES;
4632
4633 return 0;
4634 }
4635
4636 void RGWGetACLs::pre_exec()
4637 {
4638 rgw_bucket_object_pre_exec(s);
4639 }
4640
4641 void RGWGetACLs::execute()
4642 {
4643 stringstream ss;
4644 RGWAccessControlPolicy* const acl = \
4645 (!s->object.empty() ? s->object_acl.get() : s->bucket_acl.get());
4646 RGWAccessControlPolicy_S3* const s3policy = \
4647 static_cast<RGWAccessControlPolicy_S3*>(acl);
4648 s3policy->to_xml(ss);
4649 acls = ss.str();
4650 }
4651
4652
4653
4654 int RGWPutACLs::verify_permission()
4655 {
4656 bool perm;
4657 if (!s->object.empty()) {
4658 perm = verify_object_permission(s,
4659 s->object.instance.empty() ?
4660 rgw::IAM::s3PutObjectAcl :
4661 rgw::IAM::s3PutObjectVersionAcl);
4662 } else {
4663 perm = verify_bucket_permission(s, rgw::IAM::s3PutBucketAcl);
4664 }
4665 if (!perm)
4666 return -EACCES;
4667
4668 return 0;
4669 }
4670
4671 int RGWGetLC::verify_permission()
4672 {
4673 bool perm;
4674 perm = verify_bucket_permission(s, rgw::IAM::s3GetLifecycleConfiguration);
4675 if (!perm)
4676 return -EACCES;
4677
4678 return 0;
4679 }
4680
4681 int RGWPutLC::verify_permission()
4682 {
4683 bool perm;
4684 perm = verify_bucket_permission(s, rgw::IAM::s3PutLifecycleConfiguration);
4685 if (!perm)
4686 return -EACCES;
4687
4688 return 0;
4689 }
4690
4691 int RGWDeleteLC::verify_permission()
4692 {
4693 bool perm;
4694 perm = verify_bucket_permission(s, rgw::IAM::s3PutLifecycleConfiguration);
4695 if (!perm)
4696 return -EACCES;
4697
4698 return 0;
4699 }
4700
4701 void RGWPutACLs::pre_exec()
4702 {
4703 rgw_bucket_object_pre_exec(s);
4704 }
4705
4706 void RGWGetLC::pre_exec()
4707 {
4708 rgw_bucket_object_pre_exec(s);
4709 }
4710
4711 void RGWPutLC::pre_exec()
4712 {
4713 rgw_bucket_object_pre_exec(s);
4714 }
4715
4716 void RGWDeleteLC::pre_exec()
4717 {
4718 rgw_bucket_object_pre_exec(s);
4719 }
4720
4721 void RGWPutACLs::execute()
4722 {
4723 bufferlist bl;
4724
4725 RGWAccessControlPolicy_S3 *policy = NULL;
4726 RGWACLXMLParser_S3 parser(s->cct);
4727 RGWAccessControlPolicy_S3 new_policy(s->cct);
4728 stringstream ss;
4729 char *new_data = NULL;
4730 rgw_obj obj;
4731
4732 op_ret = 0; /* XXX redundant? */
4733
4734 if (!parser.init()) {
4735 op_ret = -EINVAL;
4736 return;
4737 }
4738
4739
4740 RGWAccessControlPolicy* const existing_policy = \
4741 (s->object.empty() ? s->bucket_acl.get() : s->object_acl.get());
4742
4743 owner = existing_policy->get_owner();
4744
4745 op_ret = get_params();
4746 if (op_ret < 0) {
4747 if (op_ret == -ERANGE) {
4748 ldout(s->cct, 4) << "The size of request xml data is larger than the max limitation, data size = "
4749 << s->length << dendl;
4750 op_ret = -ERR_MALFORMED_XML;
4751 s->err.message = "The XML you provided was larger than the maximum " +
4752 std::to_string(s->cct->_conf->rgw_max_put_param_size) +
4753 " bytes allowed.";
4754 }
4755 return;
4756 }
4757
4758 ldout(s->cct, 15) << "read len=" << len << " data=" << (data ? data : "") << dendl;
4759
4760 if (!s->canned_acl.empty() && len) {
4761 op_ret = -EINVAL;
4762 return;
4763 }
4764
4765 if (!s->canned_acl.empty() || s->has_acl_header) {
4766 op_ret = get_policy_from_state(store, s, ss);
4767 if (op_ret < 0)
4768 return;
4769
4770 new_data = strdup(ss.str().c_str());
4771 free(data);
4772 data = new_data;
4773 len = ss.str().size();
4774 }
4775
4776 if (!parser.parse(data, len, 1)) {
4777 op_ret = -EINVAL;
4778 return;
4779 }
4780 policy = static_cast<RGWAccessControlPolicy_S3 *>(parser.find_first("AccessControlPolicy"));
4781 if (!policy) {
4782 op_ret = -EINVAL;
4783 return;
4784 }
4785
4786 const RGWAccessControlList& req_acl = policy->get_acl();
4787 const multimap<string, ACLGrant>& req_grant_map = req_acl.get_grant_map();
4788 #define ACL_GRANTS_MAX_NUM 100
4789 int max_num = s->cct->_conf->rgw_acl_grants_max_num;
4790 if (max_num < 0) {
4791 max_num = ACL_GRANTS_MAX_NUM;
4792 }
4793
4794 int grants_num = req_grant_map.size();
4795 if (grants_num > max_num) {
4796 ldout(s->cct, 4) << "An acl can have up to "
4797 << max_num
4798 << " grants, request acl grants num: "
4799 << grants_num << dendl;
4800 op_ret = -ERR_MALFORMED_ACL_ERROR;
4801 s->err.message = "The request is rejected, because the acl grants number you requested is larger than the maximum "
4802 + std::to_string(max_num)
4803 + " grants allowed in an acl.";
4804 return;
4805 }
4806
4807 // forward bucket acl requests to meta master zone
4808 if (s->object.empty() && !store->is_meta_master()) {
4809 bufferlist in_data;
4810 // include acl data unless it was generated from a canned_acl
4811 if (s->canned_acl.empty()) {
4812 in_data.append(data, len);
4813 }
4814 op_ret = forward_request_to_master(s, NULL, store, in_data, NULL);
4815 if (op_ret < 0) {
4816 ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
4817 return;
4818 }
4819 }
4820
4821 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
4822 ldout(s->cct, 15) << "Old AccessControlPolicy";
4823 policy->to_xml(*_dout);
4824 *_dout << dendl;
4825 }
4826
4827 op_ret = policy->rebuild(store, &owner, new_policy);
4828 if (op_ret < 0)
4829 return;
4830
4831 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
4832 ldout(s->cct, 15) << "New AccessControlPolicy:";
4833 new_policy.to_xml(*_dout);
4834 *_dout << dendl;
4835 }
4836
4837 new_policy.encode(bl);
4838 map<string, bufferlist> attrs;
4839
4840 if (!s->object.empty()) {
4841 obj = rgw_obj(s->bucket, s->object);
4842 store->set_atomic(s->obj_ctx, obj);
4843 //if instance is empty, we should modify the latest object
4844 op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_ACL, bl);
4845 } else {
4846 attrs = s->bucket_attrs;
4847 attrs[RGW_ATTR_ACL] = bl;
4848 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
4849 }
4850 if (op_ret == -ECANCELED) {
4851 op_ret = 0; /* lost a race, but it's ok because acls are immutable */
4852 }
4853 }
4854
4855 static void get_lc_oid(struct req_state *s, string& oid)
4856 {
4857 string shard_id = s->bucket.name + ':' +s->bucket.bucket_id;
4858 int max_objs = (s->cct->_conf->rgw_lc_max_objs > HASH_PRIME)?HASH_PRIME:s->cct->_conf->rgw_lc_max_objs;
4859 int index = ceph_str_hash_linux(shard_id.c_str(), shard_id.size()) % HASH_PRIME % max_objs;
4860 oid = lc_oid_prefix;
4861 char buf[32];
4862 snprintf(buf, 32, ".%d", index);
4863 oid.append(buf);
4864 return;
4865 }
4866
4867 void RGWPutLC::execute()
4868 {
4869 bufferlist bl;
4870
4871 RGWLifecycleConfiguration_S3 *config = NULL;
4872 RGWLCXMLParser_S3 parser(s->cct);
4873 RGWLifecycleConfiguration_S3 new_config(s->cct);
4874
4875 content_md5 = s->info.env->get("HTTP_CONTENT_MD5");
4876 if (content_md5 == nullptr) {
4877 op_ret = -ERR_INVALID_REQUEST;
4878 s->err.message = "Missing required header for this request: Content-MD5";
4879 ldout(s->cct, 5) << s->err.message << dendl;
4880 return;
4881 }
4882
4883 std::string content_md5_bin;
4884 try {
4885 content_md5_bin = rgw::from_base64(boost::string_view(content_md5));
4886 } catch (...) {
4887 s->err.message = "Request header Content-MD5 contains character "
4888 "that is not base64 encoded.";
4889 ldout(s->cct, 5) << s->err.message << dendl;
4890 op_ret = -ERR_BAD_DIGEST;
4891 return;
4892 }
4893
4894 if (!parser.init()) {
4895 op_ret = -EINVAL;
4896 return;
4897 }
4898
4899 op_ret = get_params();
4900 if (op_ret < 0)
4901 return;
4902
4903 ldout(s->cct, 15) << "read len=" << len << " data=" << (data ? data : "") << dendl;
4904
4905 MD5 data_hash;
4906 unsigned char data_hash_res[CEPH_CRYPTO_MD5_DIGESTSIZE];
4907 data_hash.Update(reinterpret_cast<const byte*>(data), len);
4908 data_hash.Final(data_hash_res);
4909
4910 if (memcmp(data_hash_res, content_md5_bin.c_str(), CEPH_CRYPTO_MD5_DIGESTSIZE) != 0) {
4911 op_ret = -ERR_BAD_DIGEST;
4912 s->err.message = "The Content-MD5 you specified did not match what we received.";
4913 ldout(s->cct, 5) << s->err.message
4914 << " Specified content md5: " << content_md5
4915 << ", calculated content md5: " << data_hash_res
4916 << dendl;
4917 return;
4918 }
4919
4920 if (!parser.parse(data, len, 1)) {
4921 op_ret = -ERR_MALFORMED_XML;
4922 return;
4923 }
4924 config = static_cast<RGWLifecycleConfiguration_S3 *>(parser.find_first("LifecycleConfiguration"));
4925 if (!config) {
4926 op_ret = -ERR_MALFORMED_XML;
4927 return;
4928 }
4929
4930 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
4931 ldout(s->cct, 15) << "Old LifecycleConfiguration:";
4932 config->to_xml(*_dout);
4933 *_dout << dendl;
4934 }
4935
4936 op_ret = config->rebuild(store, new_config);
4937 if (op_ret < 0)
4938 return;
4939
4940 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
4941 ldout(s->cct, 15) << "New LifecycleConfiguration:";
4942 new_config.to_xml(*_dout);
4943 *_dout << dendl;
4944 }
4945
4946 new_config.encode(bl);
4947 map<string, bufferlist> attrs;
4948 attrs = s->bucket_attrs;
4949 attrs[RGW_ATTR_LC] = bl;
4950 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
4951 if (op_ret < 0)
4952 return;
4953 string shard_id = s->bucket.tenant + ':' + s->bucket.name + ':' + s->bucket.bucket_id;
4954 string oid;
4955 get_lc_oid(s, oid);
4956 pair<string, int> entry(shard_id, lc_uninitial);
4957 int max_lock_secs = s->cct->_conf->rgw_lc_lock_max_time;
4958 rados::cls::lock::Lock l(lc_index_lock_name);
4959 utime_t time(max_lock_secs, 0);
4960 l.set_duration(time);
4961 l.set_cookie(cookie);
4962 librados::IoCtx *ctx = store->get_lc_pool_ctx();
4963 do {
4964 op_ret = l.lock_exclusive(ctx, oid);
4965 if (op_ret == -EBUSY) {
4966 dout(0) << "RGWLC::RGWPutLC() failed to acquire lock on, sleep 5, try again" << oid << dendl;
4967 sleep(5);
4968 continue;
4969 }
4970 if (op_ret < 0) {
4971 dout(0) << "RGWLC::RGWPutLC() failed to acquire lock " << oid << op_ret << dendl;
4972 break;
4973 }
4974 op_ret = cls_rgw_lc_set_entry(*ctx, oid, entry);
4975 if (op_ret < 0) {
4976 dout(0) << "RGWLC::RGWPutLC() failed to set entry " << oid << op_ret << dendl;
4977 }
4978 break;
4979 }while(1);
4980 l.unlock(ctx, oid);
4981 return;
4982 }
4983
4984 void RGWDeleteLC::execute()
4985 {
4986 bufferlist bl;
4987 map<string, bufferlist> orig_attrs, attrs;
4988 map<string, bufferlist>::iterator iter;
4989 rgw_raw_obj obj;
4990 store->get_bucket_instance_obj(s->bucket, obj);
4991 store->set_prefetch_data(s->obj_ctx, obj);
4992 op_ret = get_system_obj_attrs(store, s, obj, orig_attrs, NULL, &s->bucket_info.objv_tracker);
4993 if (op_ret < 0)
4994 return;
4995
4996 for (iter = orig_attrs.begin(); iter != orig_attrs.end(); ++iter) {
4997 const string& name = iter->first;
4998 dout(10) << "DeleteLC : attr: " << name << dendl;
4999 if (name.compare(0, (sizeof(RGW_ATTR_LC) - 1), RGW_ATTR_LC) != 0) {
5000 if (attrs.find(name) == attrs.end()) {
5001 attrs[name] = iter->second;
5002 }
5003 }
5004 }
5005 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
5006 string shard_id = s->bucket.tenant + ':' + s->bucket.name + ':' + s->bucket.bucket_id;
5007 pair<string, int> entry(shard_id, lc_uninitial);
5008 string oid;
5009 get_lc_oid(s, oid);
5010 int max_lock_secs = s->cct->_conf->rgw_lc_lock_max_time;
5011 librados::IoCtx *ctx = store->get_lc_pool_ctx();
5012 rados::cls::lock::Lock l(lc_index_lock_name);
5013 utime_t time(max_lock_secs, 0);
5014 l.set_duration(time);
5015 do {
5016 op_ret = l.lock_exclusive(ctx, oid);
5017 if (op_ret == -EBUSY) {
5018 dout(0) << "RGWLC::RGWDeleteLC() failed to acquire lock on, sleep 5, try again" << oid << dendl;
5019 sleep(5);
5020 continue;
5021 }
5022 if (op_ret < 0) {
5023 dout(0) << "RGWLC::RGWDeleteLC() failed to acquire lock " << oid << op_ret << dendl;
5024 break;
5025 }
5026 op_ret = cls_rgw_lc_rm_entry(*ctx, oid, entry);
5027 if (op_ret < 0) {
5028 dout(0) << "RGWLC::RGWDeleteLC() failed to set entry " << oid << op_ret << dendl;
5029 }
5030 break;
5031 }while(1);
5032 l.unlock(ctx, oid);
5033 return;
5034 }
5035
5036 int RGWGetCORS::verify_permission()
5037 {
5038 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketCORS);
5039 }
5040
5041 void RGWGetCORS::execute()
5042 {
5043 op_ret = read_bucket_cors();
5044 if (op_ret < 0)
5045 return ;
5046
5047 if (!cors_exist) {
5048 dout(2) << "No CORS configuration set yet for this bucket" << dendl;
5049 op_ret = -ENOENT;
5050 return;
5051 }
5052 }
5053
5054 int RGWPutCORS::verify_permission()
5055 {
5056 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketCORS);
5057 }
5058
5059 void RGWPutCORS::execute()
5060 {
5061 rgw_raw_obj obj;
5062
5063 op_ret = get_params();
5064 if (op_ret < 0)
5065 return;
5066
5067 if (!store->is_meta_master()) {
5068 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
5069 if (op_ret < 0) {
5070 ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
5071 return;
5072 }
5073 }
5074
5075 op_ret = retry_raced_bucket_write(store, s, [this] {
5076 map<string, bufferlist> attrs = s->bucket_attrs;
5077 attrs[RGW_ATTR_CORS] = cors_bl;
5078 return rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
5079 });
5080 }
5081
5082 int RGWDeleteCORS::verify_permission()
5083 {
5084 // No separate delete permission
5085 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketCORS);
5086 }
5087
5088 void RGWDeleteCORS::execute()
5089 {
5090 op_ret = read_bucket_cors();
5091 if (op_ret < 0)
5092 return;
5093
5094 bufferlist bl;
5095 if (!cors_exist) {
5096 dout(2) << "No CORS configuration set yet for this bucket" << dendl;
5097 op_ret = -ENOENT;
5098 return;
5099 }
5100 op_ret = retry_raced_bucket_write(store, s, [this] {
5101 rgw_raw_obj obj;
5102 store->get_bucket_instance_obj(s->bucket, obj);
5103 store->set_prefetch_data(s->obj_ctx, obj);
5104 map<string, bufferlist> orig_attrs, attrs, rmattrs;
5105 map<string, bufferlist>::iterator iter;
5106
5107 op_ret = get_system_obj_attrs(store, s, obj, orig_attrs, NULL, &s->bucket_info.objv_tracker);
5108 if (op_ret < 0)
5109 return op_ret;
5110
5111 /* only remove meta attrs */
5112 for (iter = orig_attrs.begin(); iter != orig_attrs.end(); ++iter) {
5113 const string& name = iter->first;
5114 dout(10) << "DeleteCORS : attr: " << name << dendl;
5115 if (name.compare(0, (sizeof(RGW_ATTR_CORS) - 1), RGW_ATTR_CORS) == 0) {
5116 rmattrs[name] = iter->second;
5117 } else if (attrs.find(name) == attrs.end()) {
5118 attrs[name] = iter->second;
5119 }
5120 }
5121 return rgw_bucket_set_attrs(store, s->bucket_info, attrs,
5122 &s->bucket_info.objv_tracker);
5123 });
5124 }
5125
5126 void RGWOptionsCORS::get_response_params(string& hdrs, string& exp_hdrs, unsigned *max_age) {
5127 get_cors_response_headers(rule, req_hdrs, hdrs, exp_hdrs, max_age);
5128 }
5129
5130 int RGWOptionsCORS::validate_cors_request(RGWCORSConfiguration *cc) {
5131 rule = cc->host_name_rule(origin);
5132 if (!rule) {
5133 dout(10) << "There is no cors rule present for " << origin << dendl;
5134 return -ENOENT;
5135 }
5136
5137 if (!validate_cors_rule_method(rule, req_meth)) {
5138 return -ENOENT;
5139 }
5140
5141 if (!validate_cors_rule_header(rule, req_hdrs)) {
5142 return -ENOENT;
5143 }
5144
5145 return 0;
5146 }
5147
5148 void RGWOptionsCORS::execute()
5149 {
5150 op_ret = read_bucket_cors();
5151 if (op_ret < 0)
5152 return;
5153
5154 origin = s->info.env->get("HTTP_ORIGIN");
5155 if (!origin) {
5156 dout(0) <<
5157 "Preflight request without mandatory Origin header"
5158 << dendl;
5159 op_ret = -EINVAL;
5160 return;
5161 }
5162 req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD");
5163 if (!req_meth) {
5164 dout(0) <<
5165 "Preflight request without mandatory Access-control-request-method header"
5166 << dendl;
5167 op_ret = -EINVAL;
5168 return;
5169 }
5170 if (!cors_exist) {
5171 dout(2) << "No CORS configuration set yet for this bucket" << dendl;
5172 op_ret = -ENOENT;
5173 return;
5174 }
5175 req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS");
5176 op_ret = validate_cors_request(&bucket_cors);
5177 if (!rule) {
5178 origin = req_meth = NULL;
5179 return;
5180 }
5181 return;
5182 }
5183
5184 int RGWGetRequestPayment::verify_permission()
5185 {
5186 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketRequestPayment);
5187 }
5188
5189 void RGWGetRequestPayment::pre_exec()
5190 {
5191 rgw_bucket_object_pre_exec(s);
5192 }
5193
5194 void RGWGetRequestPayment::execute()
5195 {
5196 requester_pays = s->bucket_info.requester_pays;
5197 }
5198
5199 int RGWSetRequestPayment::verify_permission()
5200 {
5201 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketRequestPayment);
5202 }
5203
5204 void RGWSetRequestPayment::pre_exec()
5205 {
5206 rgw_bucket_object_pre_exec(s);
5207 }
5208
5209 void RGWSetRequestPayment::execute()
5210 {
5211 op_ret = get_params();
5212
5213 if (op_ret < 0)
5214 return;
5215
5216 s->bucket_info.requester_pays = requester_pays;
5217 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(),
5218 &s->bucket_attrs);
5219 if (op_ret < 0) {
5220 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
5221 << " returned err=" << op_ret << dendl;
5222 return;
5223 }
5224 }
5225
5226 int RGWInitMultipart::verify_permission()
5227 {
5228 if (s->iam_policy) {
5229 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
5230 rgw::IAM::s3PutObject,
5231 rgw_obj(s->bucket, s->object));
5232 if (e == Effect::Allow) {
5233 return 0;
5234 } else if (e == Effect::Deny) {
5235 return -EACCES;
5236 }
5237 }
5238
5239 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
5240 return -EACCES;
5241 }
5242
5243 return 0;
5244 }
5245
5246 void RGWInitMultipart::pre_exec()
5247 {
5248 rgw_bucket_object_pre_exec(s);
5249 }
5250
5251 void RGWInitMultipart::execute()
5252 {
5253 bufferlist aclbl;
5254 map<string, bufferlist> attrs;
5255 rgw_obj obj;
5256
5257 if (get_params() < 0)
5258 return;
5259
5260 if (s->object.empty())
5261 return;
5262
5263 policy.encode(aclbl);
5264 attrs[RGW_ATTR_ACL] = aclbl;
5265
5266 populate_with_generic_attrs(s, attrs);
5267
5268 /* select encryption mode */
5269 op_ret = prepare_encryption(attrs);
5270 if (op_ret != 0)
5271 return;
5272
5273 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
5274 if (op_ret < 0) {
5275 return;
5276 }
5277
5278 do {
5279 char buf[33];
5280 gen_rand_alphanumeric(s->cct, buf, sizeof(buf) - 1);
5281 upload_id = MULTIPART_UPLOAD_ID_PREFIX; /* v2 upload id */
5282 upload_id.append(buf);
5283
5284 string tmp_obj_name;
5285 RGWMPObj mp(s->object.name, upload_id);
5286 tmp_obj_name = mp.get_meta();
5287
5288 obj.init_ns(s->bucket, tmp_obj_name, mp_ns);
5289 // the meta object will be indexed with 0 size, we c
5290 obj.set_in_extra_data(true);
5291 obj.index_hash_source = s->object.name;
5292
5293 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
5294 op_target.set_versioning_disabled(true); /* no versioning for multipart meta */
5295
5296 RGWRados::Object::Write obj_op(&op_target);
5297
5298 obj_op.meta.owner = s->owner.get_id();
5299 obj_op.meta.category = RGW_OBJ_CATEGORY_MULTIMETA;
5300 obj_op.meta.flags = PUT_OBJ_CREATE_EXCL;
5301
5302 op_ret = obj_op.write_meta(0, 0, attrs);
5303 } while (op_ret == -EEXIST);
5304 }
5305
5306 static int get_multipart_info(RGWRados *store, struct req_state *s,
5307 string& meta_oid,
5308 RGWAccessControlPolicy *policy,
5309 map<string, bufferlist>& attrs)
5310 {
5311 map<string, bufferlist>::iterator iter;
5312 bufferlist header;
5313
5314 rgw_obj obj;
5315 obj.init_ns(s->bucket, meta_oid, mp_ns);
5316 obj.set_in_extra_data(true);
5317
5318 int op_ret = get_obj_attrs(store, s, obj, attrs);
5319 if (op_ret < 0) {
5320 if (op_ret == -ENOENT) {
5321 return -ERR_NO_SUCH_UPLOAD;
5322 }
5323 return op_ret;
5324 }
5325
5326 if (policy) {
5327 for (iter = attrs.begin(); iter != attrs.end(); ++iter) {
5328 string name = iter->first;
5329 if (name.compare(RGW_ATTR_ACL) == 0) {
5330 bufferlist& bl = iter->second;
5331 bufferlist::iterator bli = bl.begin();
5332 try {
5333 ::decode(*policy, bli);
5334 } catch (buffer::error& err) {
5335 ldout(s->cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
5336 return -EIO;
5337 }
5338 break;
5339 }
5340 }
5341 }
5342
5343 return 0;
5344 }
5345
5346 int RGWCompleteMultipart::verify_permission()
5347 {
5348 if (s->iam_policy) {
5349 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
5350 rgw::IAM::s3PutObject,
5351 rgw_obj(s->bucket, s->object));
5352 if (e == Effect::Allow) {
5353 return 0;
5354 } else if (e == Effect::Deny) {
5355 return -EACCES;
5356 }
5357 }
5358
5359 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
5360 return -EACCES;
5361 }
5362
5363 return 0;
5364 }
5365
5366 void RGWCompleteMultipart::pre_exec()
5367 {
5368 rgw_bucket_object_pre_exec(s);
5369 }
5370
5371 void RGWCompleteMultipart::execute()
5372 {
5373 RGWMultiCompleteUpload *parts;
5374 map<int, string>::iterator iter;
5375 RGWMultiXMLParser parser;
5376 string meta_oid;
5377 map<uint32_t, RGWUploadPartInfo> obj_parts;
5378 map<uint32_t, RGWUploadPartInfo>::iterator obj_iter;
5379 map<string, bufferlist> attrs;
5380 off_t ofs = 0;
5381 MD5 hash;
5382 char final_etag[CEPH_CRYPTO_MD5_DIGESTSIZE];
5383 char final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 16];
5384 bufferlist etag_bl;
5385 rgw_obj meta_obj;
5386 rgw_obj target_obj;
5387 RGWMPObj mp;
5388 RGWObjManifest manifest;
5389 uint64_t olh_epoch = 0;
5390 string version_id;
5391
5392 op_ret = get_params();
5393 if (op_ret < 0)
5394 return;
5395 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
5396 if (op_ret < 0) {
5397 return;
5398 }
5399
5400 if (!data || !len) {
5401 op_ret = -ERR_MALFORMED_XML;
5402 return;
5403 }
5404
5405 if (!parser.init()) {
5406 op_ret = -EIO;
5407 return;
5408 }
5409
5410 if (!parser.parse(data, len, 1)) {
5411 op_ret = -ERR_MALFORMED_XML;
5412 return;
5413 }
5414
5415 parts = static_cast<RGWMultiCompleteUpload *>(parser.find_first("CompleteMultipartUpload"));
5416 if (!parts || parts->parts.empty()) {
5417 op_ret = -ERR_MALFORMED_XML;
5418 return;
5419 }
5420
5421 if ((int)parts->parts.size() >
5422 s->cct->_conf->rgw_multipart_part_upload_limit) {
5423 op_ret = -ERANGE;
5424 return;
5425 }
5426
5427 mp.init(s->object.name, upload_id);
5428 meta_oid = mp.get_meta();
5429
5430 int total_parts = 0;
5431 int handled_parts = 0;
5432 int max_parts = 1000;
5433 int marker = 0;
5434 bool truncated;
5435 RGWCompressionInfo cs_info;
5436 bool compressed = false;
5437 uint64_t accounted_size = 0;
5438
5439 uint64_t min_part_size = s->cct->_conf->rgw_multipart_min_part_size;
5440
5441 list<rgw_obj_index_key> remove_objs; /* objects to be removed from index listing */
5442
5443 bool versioned_object = s->bucket_info.versioning_enabled();
5444
5445 iter = parts->parts.begin();
5446
5447 meta_obj.init_ns(s->bucket, meta_oid, mp_ns);
5448 meta_obj.set_in_extra_data(true);
5449 meta_obj.index_hash_source = s->object.name;
5450
5451 /*take a cls lock on meta_obj to prevent racing completions (or retries)
5452 from deleting the parts*/
5453 rgw_pool meta_pool;
5454 rgw_raw_obj raw_obj;
5455 int max_lock_secs_mp =
5456 s->cct->_conf->get_val<int64_t>("rgw_mp_lock_max_time");
5457 utime_t dur(max_lock_secs_mp, 0);
5458
5459 store->obj_to_raw((s->bucket_info).placement_rule, meta_obj, &raw_obj);
5460 store->get_obj_data_pool((s->bucket_info).placement_rule,
5461 meta_obj,&meta_pool);
5462 store->open_pool_ctx(meta_pool, serializer.ioctx);
5463
5464 op_ret = serializer.try_lock(raw_obj.oid, dur);
5465 if (op_ret < 0) {
5466 dout(0) << "RGWCompleteMultipart::execute() failed to acquire lock " << dendl;
5467 op_ret = -ERR_INTERNAL_ERROR;
5468 s->err.message = "This multipart completion is already in progress";
5469 return;
5470 }
5471
5472 op_ret = get_obj_attrs(store, s, meta_obj, attrs);
5473
5474 if (op_ret < 0) {
5475 ldout(s->cct, 0) << "ERROR: failed to get obj attrs, obj=" << meta_obj
5476 << " ret=" << op_ret << dendl;
5477 return;
5478 }
5479
5480 do {
5481 op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts,
5482 marker, obj_parts, &marker, &truncated);
5483 if (op_ret == -ENOENT) {
5484 op_ret = -ERR_NO_SUCH_UPLOAD;
5485 }
5486 if (op_ret < 0)
5487 return;
5488
5489 total_parts += obj_parts.size();
5490 if (!truncated && total_parts != (int)parts->parts.size()) {
5491 ldout(s->cct, 0) << "NOTICE: total parts mismatch: have: " << total_parts
5492 << " expected: " << parts->parts.size() << dendl;
5493 op_ret = -ERR_INVALID_PART;
5494 return;
5495 }
5496
5497 for (obj_iter = obj_parts.begin(); iter != parts->parts.end() && obj_iter != obj_parts.end(); ++iter, ++obj_iter, ++handled_parts) {
5498 uint64_t part_size = obj_iter->second.accounted_size;
5499 if (handled_parts < (int)parts->parts.size() - 1 &&
5500 part_size < min_part_size) {
5501 op_ret = -ERR_TOO_SMALL;
5502 return;
5503 }
5504
5505 char petag[CEPH_CRYPTO_MD5_DIGESTSIZE];
5506 if (iter->first != (int)obj_iter->first) {
5507 ldout(s->cct, 0) << "NOTICE: parts num mismatch: next requested: "
5508 << iter->first << " next uploaded: "
5509 << obj_iter->first << dendl;
5510 op_ret = -ERR_INVALID_PART;
5511 return;
5512 }
5513 string part_etag = rgw_string_unquote(iter->second);
5514 if (part_etag.compare(obj_iter->second.etag) != 0) {
5515 ldout(s->cct, 0) << "NOTICE: etag mismatch: part: " << iter->first
5516 << " etag: " << iter->second << dendl;
5517 op_ret = -ERR_INVALID_PART;
5518 return;
5519 }
5520
5521 hex_to_buf(obj_iter->second.etag.c_str(), petag,
5522 CEPH_CRYPTO_MD5_DIGESTSIZE);
5523 hash.Update((const byte *)petag, sizeof(petag));
5524
5525 RGWUploadPartInfo& obj_part = obj_iter->second;
5526
5527 /* update manifest for part */
5528 string oid = mp.get_part(obj_iter->second.num);
5529 rgw_obj src_obj;
5530 src_obj.init_ns(s->bucket, oid, mp_ns);
5531
5532 if (obj_part.manifest.empty()) {
5533 ldout(s->cct, 0) << "ERROR: empty manifest for object part: obj="
5534 << src_obj << dendl;
5535 op_ret = -ERR_INVALID_PART;
5536 return;
5537 } else {
5538 manifest.append(obj_part.manifest, store);
5539 }
5540
5541 if (obj_part.cs_info.compression_type != "none") {
5542 if (compressed && cs_info.compression_type != obj_part.cs_info.compression_type) {
5543 ldout(s->cct, 0) << "ERROR: compression type was changed during multipart upload ("
5544 << cs_info.compression_type << ">>" << obj_part.cs_info.compression_type << ")" << dendl;
5545 op_ret = -ERR_INVALID_PART;
5546 return;
5547 }
5548 int64_t new_ofs; // offset in compression data for new part
5549 if (cs_info.blocks.size() > 0)
5550 new_ofs = cs_info.blocks.back().new_ofs + cs_info.blocks.back().len;
5551 else
5552 new_ofs = 0;
5553 for (const auto& block : obj_part.cs_info.blocks) {
5554 compression_block cb;
5555 cb.old_ofs = block.old_ofs + cs_info.orig_size;
5556 cb.new_ofs = new_ofs;
5557 cb.len = block.len;
5558 cs_info.blocks.push_back(cb);
5559 new_ofs = cb.new_ofs + cb.len;
5560 }
5561 if (!compressed)
5562 cs_info.compression_type = obj_part.cs_info.compression_type;
5563 cs_info.orig_size += obj_part.cs_info.orig_size;
5564 compressed = true;
5565 }
5566
5567 rgw_obj_index_key remove_key;
5568 src_obj.key.get_index_key(&remove_key);
5569
5570 remove_objs.push_back(remove_key);
5571
5572 ofs += obj_part.size;
5573 accounted_size += obj_part.accounted_size;
5574 }
5575 } while (truncated);
5576 hash.Final((byte *)final_etag);
5577
5578 buf_to_hex((unsigned char *)final_etag, sizeof(final_etag), final_etag_str);
5579 snprintf(&final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2], sizeof(final_etag_str) - CEPH_CRYPTO_MD5_DIGESTSIZE * 2,
5580 "-%lld", (long long)parts->parts.size());
5581 etag = final_etag_str;
5582 ldout(s->cct, 10) << "calculated etag: " << final_etag_str << dendl;
5583
5584 etag_bl.append(final_etag_str, strlen(final_etag_str) + 1);
5585
5586 attrs[RGW_ATTR_ETAG] = etag_bl;
5587
5588 if (compressed) {
5589 // write compression attribute to full object
5590 bufferlist tmp;
5591 ::encode(cs_info, tmp);
5592 attrs[RGW_ATTR_COMPRESSION] = tmp;
5593 }
5594
5595 target_obj.init(s->bucket, s->object.name);
5596 if (versioned_object) {
5597 store->gen_rand_obj_instance_name(&target_obj);
5598 }
5599
5600 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
5601
5602 obj_ctx.obj.set_atomic(target_obj);
5603
5604 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), target_obj);
5605 RGWRados::Object::Write obj_op(&op_target);
5606
5607 obj_op.meta.manifest = &manifest;
5608 obj_op.meta.remove_objs = &remove_objs;
5609
5610 obj_op.meta.ptag = &s->req_id; /* use req_id as operation tag */
5611 obj_op.meta.owner = s->owner.get_id();
5612 obj_op.meta.flags = PUT_OBJ_CREATE;
5613 obj_op.meta.modify_tail = true;
5614 obj_op.meta.completeMultipart = true;
5615 obj_op.meta.olh_epoch = olh_epoch;
5616 op_ret = obj_op.write_meta(ofs, accounted_size, attrs);
5617 if (op_ret < 0)
5618 return;
5619
5620 // remove the upload obj
5621 int r = store->delete_obj(*static_cast<RGWObjectCtx *>(s->obj_ctx),
5622 s->bucket_info, meta_obj, 0);
5623 if (r >= 0) {
5624 /* serializer's exclusive lock is released */
5625 serializer.clear_locked();
5626 } else {
5627 ldout(store->ctx(), 0) << "WARNING: failed to remove object "
5628 << meta_obj << dendl;
5629 }
5630 }
5631
5632 int RGWCompleteMultipart::MPSerializer::try_lock(
5633 const std::string& _oid,
5634 utime_t dur)
5635 {
5636 oid = _oid;
5637 op.assert_exists();
5638 lock.set_duration(dur);
5639 lock.lock_exclusive(&op);
5640 int ret = ioctx.operate(oid, &op);
5641 if (! ret) {
5642 locked = true;
5643 }
5644 return ret;
5645 }
5646
5647 void RGWCompleteMultipart::complete()
5648 {
5649 /* release exclusive lock iff not already */
5650 if (unlikely(serializer.locked)) {
5651 int r = serializer.unlock();
5652 if (r < 0) {
5653 ldout(store->ctx(), 0) << "WARNING: failed to unlock "
5654 << serializer.oid << dendl;
5655 }
5656 }
5657 send_response();
5658 }
5659
5660 int RGWAbortMultipart::verify_permission()
5661 {
5662 if (s->iam_policy) {
5663 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
5664 rgw::IAM::s3AbortMultipartUpload,
5665 rgw_obj(s->bucket, s->object));
5666 if (e == Effect::Allow) {
5667 return 0;
5668 } else if (e == Effect::Deny) {
5669 return -EACCES;
5670 }
5671 }
5672
5673 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
5674 return -EACCES;
5675 }
5676
5677 return 0;
5678 }
5679
5680 void RGWAbortMultipart::pre_exec()
5681 {
5682 rgw_bucket_object_pre_exec(s);
5683 }
5684
5685 void RGWAbortMultipart::execute()
5686 {
5687 op_ret = -EINVAL;
5688 string upload_id;
5689 string meta_oid;
5690 upload_id = s->info.args.get("uploadId");
5691 map<string, bufferlist> attrs;
5692 rgw_obj meta_obj;
5693 RGWMPObj mp;
5694
5695 if (upload_id.empty() || s->object.empty())
5696 return;
5697
5698 mp.init(s->object.name, upload_id);
5699 meta_oid = mp.get_meta();
5700
5701 op_ret = get_multipart_info(store, s, meta_oid, NULL, attrs);
5702 if (op_ret < 0)
5703 return;
5704
5705 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
5706 op_ret = abort_multipart_upload(store, s->cct, obj_ctx, s->bucket_info, mp);
5707 }
5708
5709 int RGWListMultipart::verify_permission()
5710 {
5711 if (!verify_object_permission(s, rgw::IAM::s3ListMultipartUploadParts))
5712 return -EACCES;
5713
5714 return 0;
5715 }
5716
5717 void RGWListMultipart::pre_exec()
5718 {
5719 rgw_bucket_object_pre_exec(s);
5720 }
5721
5722 void RGWListMultipart::execute()
5723 {
5724 map<string, bufferlist> xattrs;
5725 string meta_oid;
5726 RGWMPObj mp;
5727
5728 op_ret = get_params();
5729 if (op_ret < 0)
5730 return;
5731
5732 mp.init(s->object.name, upload_id);
5733 meta_oid = mp.get_meta();
5734
5735 op_ret = get_multipart_info(store, s, meta_oid, &policy, xattrs);
5736 if (op_ret < 0)
5737 return;
5738
5739 op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts,
5740 marker, parts, NULL, &truncated);
5741 }
5742
5743 int RGWListBucketMultiparts::verify_permission()
5744 {
5745 if (!verify_bucket_permission(s,
5746 rgw::IAM::s3ListBucketMultipartUploads))
5747 return -EACCES;
5748
5749 return 0;
5750 }
5751
5752 void RGWListBucketMultiparts::pre_exec()
5753 {
5754 rgw_bucket_object_pre_exec(s);
5755 }
5756
5757 void RGWListBucketMultiparts::execute()
5758 {
5759 vector<rgw_bucket_dir_entry> objs;
5760 string marker_meta;
5761
5762 op_ret = get_params();
5763 if (op_ret < 0)
5764 return;
5765
5766 if (s->prot_flags & RGW_REST_SWIFT) {
5767 string path_args;
5768 path_args = s->info.args.get("path");
5769 if (!path_args.empty()) {
5770 if (!delimiter.empty() || !prefix.empty()) {
5771 op_ret = -EINVAL;
5772 return;
5773 }
5774 prefix = path_args;
5775 delimiter="/";
5776 }
5777 }
5778 marker_meta = marker.get_meta();
5779
5780 op_ret = list_bucket_multiparts(store, s->bucket_info, prefix, marker_meta, delimiter,
5781 max_uploads, &objs, &common_prefixes, &is_truncated);
5782 if (op_ret < 0) {
5783 return;
5784 }
5785
5786 if (!objs.empty()) {
5787 vector<rgw_bucket_dir_entry>::iterator iter;
5788 RGWMultipartUploadEntry entry;
5789 for (iter = objs.begin(); iter != objs.end(); ++iter) {
5790 rgw_obj_key key(iter->key);
5791 if (!entry.mp.from_meta(key.name))
5792 continue;
5793 entry.obj = *iter;
5794 uploads.push_back(entry);
5795 }
5796 next_marker = entry;
5797 }
5798 }
5799
5800 void RGWGetHealthCheck::execute()
5801 {
5802 if (!g_conf->rgw_healthcheck_disabling_path.empty() &&
5803 (::access(g_conf->rgw_healthcheck_disabling_path.c_str(), F_OK) == 0)) {
5804 /* Disabling path specified & existent in the filesystem. */
5805 op_ret = -ERR_SERVICE_UNAVAILABLE; /* 503 */
5806 } else {
5807 op_ret = 0; /* 200 OK */
5808 }
5809 }
5810
5811 int RGWDeleteMultiObj::verify_permission()
5812 {
5813 acl_allowed = verify_bucket_permission_no_policy(s, RGW_PERM_WRITE);
5814 if (!acl_allowed && !s->iam_policy)
5815 return -EACCES;
5816
5817 return 0;
5818 }
5819
5820 void RGWDeleteMultiObj::pre_exec()
5821 {
5822 rgw_bucket_object_pre_exec(s);
5823 }
5824
5825 void RGWDeleteMultiObj::execute()
5826 {
5827 RGWMultiDelDelete *multi_delete;
5828 vector<rgw_obj_key>::iterator iter;
5829 RGWMultiDelXMLParser parser;
5830 int num_processed = 0;
5831 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
5832
5833 op_ret = get_params();
5834 if (op_ret < 0) {
5835 goto error;
5836 }
5837
5838 if (!data) {
5839 op_ret = -EINVAL;
5840 goto error;
5841 }
5842
5843 if (!parser.init()) {
5844 op_ret = -EINVAL;
5845 goto error;
5846 }
5847
5848 if (!parser.parse(data, len, 1)) {
5849 op_ret = -EINVAL;
5850 goto error;
5851 }
5852
5853 multi_delete = static_cast<RGWMultiDelDelete *>(parser.find_first("Delete"));
5854 if (!multi_delete) {
5855 op_ret = -EINVAL;
5856 goto error;
5857 }
5858
5859 if (multi_delete->is_quiet())
5860 quiet = true;
5861
5862 begin_response();
5863 if (multi_delete->objects.empty()) {
5864 goto done;
5865 }
5866
5867 for (iter = multi_delete->objects.begin();
5868 iter != multi_delete->objects.end() && num_processed < max_to_delete;
5869 ++iter, num_processed++) {
5870 rgw_obj obj(bucket, *iter);
5871 if (s->iam_policy) {
5872 auto e = s->iam_policy->eval(s->env,
5873 *s->auth.identity,
5874 iter->instance.empty() ?
5875 rgw::IAM::s3DeleteObject :
5876 rgw::IAM::s3DeleteObjectVersion,
5877 obj);
5878 if ((e == Effect::Deny) ||
5879 (e == Effect::Pass && !acl_allowed)) {
5880 send_partial_response(*iter, false, "", -EACCES);
5881 continue;
5882 }
5883 }
5884
5885 obj_ctx->obj.set_atomic(obj);
5886
5887 RGWRados::Object del_target(store, s->bucket_info, *obj_ctx, obj);
5888 RGWRados::Object::Delete del_op(&del_target);
5889
5890 del_op.params.bucket_owner = s->bucket_owner.get_id();
5891 del_op.params.versioning_status = s->bucket_info.versioning_status();
5892 del_op.params.obj_owner = s->owner;
5893
5894 op_ret = del_op.delete_obj();
5895 if (op_ret == -ENOENT) {
5896 op_ret = 0;
5897 }
5898
5899 send_partial_response(*iter, del_op.result.delete_marker,
5900 del_op.result.version_id, op_ret);
5901 }
5902
5903 /* set the return code to zero, errors at this point will be
5904 dumped to the response */
5905 op_ret = 0;
5906
5907 done:
5908 // will likely segfault if begin_response() has not been called
5909 end_response();
5910 free(data);
5911 return;
5912
5913 error:
5914 send_status();
5915 free(data);
5916 return;
5917
5918 }
5919
5920 bool RGWBulkDelete::Deleter::verify_permission(RGWBucketInfo& binfo,
5921 map<string, bufferlist>& battrs,
5922 ACLOwner& bucket_owner /* out */)
5923 {
5924 RGWAccessControlPolicy bacl(store->ctx());
5925 int ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket);
5926 if (ret < 0) {
5927 return false;
5928 }
5929
5930 auto policy = get_iam_policy_from_attr(s->cct, store, battrs, binfo.bucket.tenant);
5931
5932 bucket_owner = bacl.get_owner();
5933
5934 /* We can use global user_acl because each BulkDelete request is allowed
5935 * to work on entities from a single account only. */
5936 return verify_bucket_permission(s, binfo.bucket, s->user_acl.get(),
5937 &bacl, policy, rgw::IAM::s3DeleteBucket);
5938 }
5939
5940 bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path)
5941 {
5942 auto& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
5943
5944 RGWBucketInfo binfo;
5945 map<string, bufferlist> battrs;
5946 ACLOwner bowner;
5947
5948 int ret = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
5949 path.bucket_name, binfo, nullptr,
5950 &battrs);
5951 if (ret < 0) {
5952 goto binfo_fail;
5953 }
5954
5955 if (!verify_permission(binfo, battrs, bowner)) {
5956 ret = -EACCES;
5957 goto auth_fail;
5958 }
5959
5960 if (!path.obj_key.empty()) {
5961 rgw_obj obj(binfo.bucket, path.obj_key);
5962 obj_ctx.obj.set_atomic(obj);
5963
5964 RGWRados::Object del_target(store, binfo, obj_ctx, obj);
5965 RGWRados::Object::Delete del_op(&del_target);
5966
5967 del_op.params.bucket_owner = binfo.owner;
5968 del_op.params.versioning_status = binfo.versioning_status();
5969 del_op.params.obj_owner = bowner;
5970
5971 ret = del_op.delete_obj();
5972 if (ret < 0) {
5973 goto delop_fail;
5974 }
5975 } else {
5976 RGWObjVersionTracker ot;
5977 ot.read_version = binfo.ep_objv;
5978
5979 ret = store->delete_bucket(binfo, ot);
5980 if (0 == ret) {
5981 ret = rgw_unlink_bucket(store, binfo.owner, binfo.bucket.tenant,
5982 binfo.bucket.name, false);
5983 if (ret < 0) {
5984 ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << ret
5985 << dendl;
5986 }
5987 }
5988 if (ret < 0) {
5989 goto delop_fail;
5990 }
5991
5992 if (!store->is_meta_master()) {
5993 bufferlist in_data;
5994 ret = forward_request_to_master(s, &ot.read_version, store, in_data,
5995 nullptr);
5996 if (ret < 0) {
5997 if (ret == -ENOENT) {
5998 /* adjust error, we want to return with NoSuchBucket and not
5999 * NoSuchKey */
6000 ret = -ERR_NO_SUCH_BUCKET;
6001 }
6002 goto delop_fail;
6003 }
6004 }
6005 }
6006
6007 num_deleted++;
6008 return true;
6009
6010
6011 binfo_fail:
6012 if (-ENOENT == ret) {
6013 ldout(store->ctx(), 20) << "cannot find bucket = " << path.bucket_name << dendl;
6014 num_unfound++;
6015 } else {
6016 ldout(store->ctx(), 20) << "cannot get bucket info, ret = " << ret
6017 << dendl;
6018
6019 fail_desc_t failed_item = {
6020 .err = ret,
6021 .path = path
6022 };
6023 failures.push_back(failed_item);
6024 }
6025 return false;
6026
6027 auth_fail:
6028 ldout(store->ctx(), 20) << "wrong auth for " << path << dendl;
6029 {
6030 fail_desc_t failed_item = {
6031 .err = ret,
6032 .path = path
6033 };
6034 failures.push_back(failed_item);
6035 }
6036 return false;
6037
6038 delop_fail:
6039 if (-ENOENT == ret) {
6040 ldout(store->ctx(), 20) << "cannot find entry " << path << dendl;
6041 num_unfound++;
6042 } else {
6043 fail_desc_t failed_item = {
6044 .err = ret,
6045 .path = path
6046 };
6047 failures.push_back(failed_item);
6048 }
6049 return false;
6050 }
6051
6052 bool RGWBulkDelete::Deleter::delete_chunk(const std::list<acct_path_t>& paths)
6053 {
6054 ldout(store->ctx(), 20) << "in delete_chunk" << dendl;
6055 for (auto path : paths) {
6056 ldout(store->ctx(), 20) << "bulk deleting path: " << path << dendl;
6057 delete_single(path);
6058 }
6059
6060 return true;
6061 }
6062
6063 int RGWBulkDelete::verify_permission()
6064 {
6065 return 0;
6066 }
6067
6068 void RGWBulkDelete::pre_exec()
6069 {
6070 rgw_bucket_object_pre_exec(s);
6071 }
6072
6073 void RGWBulkDelete::execute()
6074 {
6075 deleter = std::unique_ptr<Deleter>(new Deleter(store, s));
6076
6077 bool is_truncated = false;
6078 do {
6079 list<RGWBulkDelete::acct_path_t> items;
6080
6081 int ret = get_data(items, &is_truncated);
6082 if (ret < 0) {
6083 return;
6084 }
6085
6086 ret = deleter->delete_chunk(items);
6087 } while (!op_ret && is_truncated);
6088
6089 return;
6090 }
6091
6092
6093 constexpr std::array<int, 2> RGWBulkUploadOp::terminal_errors;
6094
6095 int RGWBulkUploadOp::verify_permission()
6096 {
6097 if (s->auth.identity->is_anonymous()) {
6098 return -EACCES;
6099 }
6100
6101 if (! verify_user_permission(s, RGW_PERM_WRITE)) {
6102 return -EACCES;
6103 }
6104
6105 if (s->user->user_id.tenant != s->bucket_tenant) {
6106 ldout(s->cct, 10) << "user cannot create a bucket in a different tenant"
6107 << " (user_id.tenant=" << s->user->user_id.tenant
6108 << " requested=" << s->bucket_tenant << ")"
6109 << dendl;
6110 return -EACCES;
6111 }
6112
6113 if (s->user->max_buckets < 0) {
6114 return -EPERM;
6115 }
6116
6117 return 0;
6118 }
6119
6120 void RGWBulkUploadOp::pre_exec()
6121 {
6122 rgw_bucket_object_pre_exec(s);
6123 }
6124
6125 boost::optional<std::pair<std::string, rgw_obj_key>>
6126 RGWBulkUploadOp::parse_path(const boost::string_ref& path)
6127 {
6128 /* We need to skip all slashes at the beginning in order to preserve
6129 * compliance with Swift. */
6130 const size_t start_pos = path.find_first_not_of('/');
6131
6132 if (boost::string_ref::npos != start_pos) {
6133 /* Seperator is the first slash after the leading ones. */
6134 const size_t sep_pos = path.substr(start_pos).find('/');
6135
6136 if (boost::string_ref::npos != sep_pos) {
6137 const auto bucket_name = path.substr(start_pos, sep_pos - start_pos);
6138 const auto obj_name = path.substr(sep_pos + 1);
6139
6140 return std::make_pair(bucket_name.to_string(),
6141 rgw_obj_key(obj_name.to_string()));
6142 } else {
6143 /* It's guaranteed here that bucket name is at least one character
6144 * long and is different than slash. */
6145 return std::make_pair(path.substr(start_pos).to_string(),
6146 rgw_obj_key());
6147 }
6148 }
6149
6150 return none;
6151 }
6152
6153 std::pair<std::string, std::string>
6154 RGWBulkUploadOp::handle_upload_path(struct req_state *s)
6155 {
6156 std::string bucket_path, file_prefix;
6157 if (! s->init_state.url_bucket.empty()) {
6158 file_prefix = bucket_path = s->init_state.url_bucket + "/";
6159 if (! s->object.empty()) {
6160 std::string& object_name = s->object.name;
6161
6162 /* As rgw_obj_key::empty() already verified emptiness of s->object.name,
6163 * we can safely examine its last element. */
6164 if (object_name.back() == '/') {
6165 file_prefix.append(object_name);
6166 } else {
6167 file_prefix.append(object_name).append("/");
6168 }
6169 }
6170 }
6171 return std::make_pair(bucket_path, file_prefix);
6172 }
6173
6174 int RGWBulkUploadOp::handle_dir_verify_permission()
6175 {
6176 if (s->user->max_buckets > 0) {
6177 RGWUserBuckets buckets;
6178 std::string marker;
6179 bool is_truncated = false;
6180 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
6181 marker, std::string(), s->user->max_buckets,
6182 false, &is_truncated);
6183 if (op_ret < 0) {
6184 return op_ret;
6185 }
6186
6187 if (buckets.count() >= static_cast<size_t>(s->user->max_buckets)) {
6188 return -ERR_TOO_MANY_BUCKETS;
6189 }
6190 }
6191
6192 return 0;
6193 }
6194
6195 static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name)
6196 {
6197 /* the request of container or object level will contain bucket name.
6198 * only at account level need to append the bucket name */
6199 if (info.script_uri.find(bucket_name) != std::string::npos) {
6200 return;
6201 }
6202
6203 ldout(cct, 20) << "append the bucket: "<< bucket_name << " to req_info" << dendl;
6204 info.script_uri.append("/").append(bucket_name);
6205 info.request_uri_aws4 = info.request_uri = info.script_uri;
6206 info.effective_uri = "/" + bucket_name;
6207 }
6208
6209 int RGWBulkUploadOp::handle_dir(const boost::string_ref path)
6210 {
6211 ldout(s->cct, 20) << "bulk upload: got directory=" << path << dendl;
6212
6213 op_ret = handle_dir_verify_permission();
6214 if (op_ret < 0) {
6215 return op_ret;
6216 }
6217
6218 std::string bucket_name;
6219 rgw_obj_key object_junk;
6220 std::tie(bucket_name, object_junk) = *parse_path(path);
6221
6222 rgw_raw_obj obj(store->get_zone_params().domain_root,
6223 rgw_make_bucket_entry_name(s->bucket_tenant, bucket_name));
6224
6225 /* we need to make sure we read bucket info, it's not read before for this
6226 * specific request */
6227 RGWBucketInfo binfo;
6228 std::map<std::string, ceph::bufferlist> battrs;
6229 op_ret = store->get_bucket_info(*dir_ctx, s->bucket_tenant, bucket_name,
6230 binfo, NULL, &battrs);
6231 if (op_ret < 0 && op_ret != -ENOENT) {
6232 return op_ret;
6233 }
6234 const bool bucket_exists = (op_ret != -ENOENT);
6235
6236 if (bucket_exists) {
6237 RGWAccessControlPolicy old_policy(s->cct);
6238 int r = get_bucket_policy_from_attr(s->cct, store, binfo,
6239 battrs, &old_policy);
6240 if (r >= 0) {
6241 if (old_policy.get_owner().get_id().compare(s->user->user_id) != 0) {
6242 op_ret = -EEXIST;
6243 return op_ret;
6244 }
6245 }
6246 }
6247
6248 RGWBucketInfo master_info;
6249 rgw_bucket *pmaster_bucket = nullptr;
6250 uint32_t *pmaster_num_shards = nullptr;
6251 real_time creation_time;
6252 obj_version objv, ep_objv, *pobjv = nullptr;
6253
6254 if (! store->is_meta_master()) {
6255 JSONParser jp;
6256 ceph::bufferlist in_data;
6257 req_info info = s->info;
6258 forward_req_info(s->cct, info, bucket_name);
6259 op_ret = forward_request_to_master(s, nullptr, store, in_data, &jp, &info);
6260 if (op_ret < 0) {
6261 return op_ret;
6262 }
6263
6264 JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp);
6265 JSONDecoder::decode_json("object_ver", objv, &jp);
6266 JSONDecoder::decode_json("bucket_info", master_info, &jp);
6267
6268 ldout(s->cct, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver="
6269 << objv.ver << dendl;
6270 ldout(s->cct, 20) << "got creation_time="<< master_info.creation_time
6271 << dendl;
6272
6273 pmaster_bucket= &master_info.bucket;
6274 creation_time = master_info.creation_time;
6275 pmaster_num_shards = &master_info.num_shards;
6276 pobjv = &objv;
6277 } else {
6278 pmaster_bucket = nullptr;
6279 pmaster_num_shards = nullptr;
6280 }
6281
6282
6283 std::string placement_rule;
6284 if (bucket_exists) {
6285 std::string selected_placement_rule;
6286 rgw_bucket bucket;
6287 bucket.tenant = s->bucket_tenant;
6288 bucket.name = s->bucket_name;
6289 op_ret = store->select_bucket_placement(*(s->user),
6290 store->get_zonegroup().get_id(),
6291 placement_rule,
6292 &selected_placement_rule,
6293 nullptr);
6294 if (selected_placement_rule != binfo.placement_rule) {
6295 op_ret = -EEXIST;
6296 ldout(s->cct, 20) << "bulk upload: non-coherent placement rule" << dendl;
6297 return op_ret;
6298 }
6299 }
6300
6301 /* Create metadata: ACLs. */
6302 std::map<std::string, ceph::bufferlist> attrs;
6303 RGWAccessControlPolicy policy;
6304 policy.create_default(s->user->user_id, s->user->display_name);
6305 ceph::bufferlist aclbl;
6306 policy.encode(aclbl);
6307 attrs.emplace(RGW_ATTR_ACL, std::move(aclbl));
6308
6309 RGWQuotaInfo quota_info;
6310 const RGWQuotaInfo * pquota_info = nullptr;
6311
6312 rgw_bucket bucket;
6313 bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */
6314 bucket.name = bucket_name;
6315
6316
6317 RGWBucketInfo out_info;
6318 op_ret = store->create_bucket(*(s->user),
6319 bucket,
6320 store->get_zonegroup().get_id(),
6321 placement_rule, binfo.swift_ver_location,
6322 pquota_info, attrs,
6323 out_info, pobjv, &ep_objv, creation_time,
6324 pmaster_bucket, pmaster_num_shards, true);
6325 /* continue if EEXIST and create_bucket will fail below. this way we can
6326 * recover from a partial create by retrying it. */
6327 ldout(s->cct, 20) << "rgw_create_bucket returned ret=" << op_ret
6328 << ", bucket=" << bucket << dendl;
6329
6330 if (op_ret && op_ret != -EEXIST) {
6331 return op_ret;
6332 }
6333
6334 const bool existed = (op_ret == -EEXIST);
6335 if (existed) {
6336 /* bucket already existed, might have raced with another bucket creation, or
6337 * might be partial bucket creation that never completed. Read existing bucket
6338 * info, verify that the reported bucket owner is the current user.
6339 * If all is ok then update the user's list of buckets.
6340 * Otherwise inform client about a name conflict.
6341 */
6342 if (out_info.owner.compare(s->user->user_id) != 0) {
6343 op_ret = -EEXIST;
6344 ldout(s->cct, 20) << "bulk upload: conflicting bucket name" << dendl;
6345 return op_ret;
6346 }
6347 bucket = out_info.bucket;
6348 }
6349
6350 op_ret = rgw_link_bucket(store, s->user->user_id, bucket,
6351 out_info.creation_time, false);
6352 if (op_ret && !existed && op_ret != -EEXIST) {
6353 /* if it exists (or previously existed), don't remove it! */
6354 op_ret = rgw_unlink_bucket(store, s->user->user_id,
6355 bucket.tenant, bucket.name);
6356 if (op_ret < 0) {
6357 ldout(s->cct, 0) << "bulk upload: WARNING: failed to unlink bucket: ret="
6358 << op_ret << dendl;
6359 }
6360 } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) {
6361 ldout(s->cct, 20) << "bulk upload: containers already exists"
6362 << dendl;
6363 op_ret = -ERR_BUCKET_EXISTS;
6364 }
6365
6366 return op_ret;
6367 }
6368
6369
6370 bool RGWBulkUploadOp::handle_file_verify_permission(RGWBucketInfo& binfo,
6371 const rgw_obj& obj,
6372 std::map<std::string, ceph::bufferlist>& battrs,
6373 ACLOwner& bucket_owner /* out */)
6374 {
6375 RGWAccessControlPolicy bacl(store->ctx());
6376 op_ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket);
6377 if (op_ret < 0) {
6378 ldout(s->cct, 20) << "bulk upload: cannot read_policy() for bucket"
6379 << dendl;
6380 return false;
6381 }
6382
6383 auto policy = get_iam_policy_from_attr(s->cct, store, battrs, binfo.bucket.tenant);
6384
6385 bucket_owner = bacl.get_owner();
6386 if (policy) {
6387 auto e = policy->eval(s->env, *s->auth.identity,
6388 rgw::IAM::s3PutObject, obj);
6389 if (e == Effect::Allow) {
6390 return true;
6391 } else if (e == Effect::Deny) {
6392 return false;
6393 }
6394 }
6395
6396 return verify_bucket_permission_no_policy(s, s->user_acl.get(),
6397 &bacl, RGW_PERM_WRITE);
6398 }
6399
6400 int RGWBulkUploadOp::handle_file(const boost::string_ref path,
6401 const size_t size,
6402 AlignedStreamGetter& body)
6403 {
6404
6405 ldout(s->cct, 20) << "bulk upload: got file=" << path << ", size=" << size
6406 << dendl;
6407
6408 RGWPutObjDataProcessor *filter = nullptr;
6409 boost::optional<RGWPutObj_Compress> compressor;
6410
6411 if (size > static_cast<const size_t>(s->cct->_conf->rgw_max_put_size)) {
6412 op_ret = -ERR_TOO_LARGE;
6413 return op_ret;
6414 }
6415
6416 std::string bucket_name;
6417 rgw_obj_key object;
6418 std::tie(bucket_name, object) = *parse_path(path);
6419
6420 auto& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
6421 RGWBucketInfo binfo;
6422 std::map<std::string, ceph::bufferlist> battrs;
6423 ACLOwner bowner;
6424 op_ret = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
6425 bucket_name, binfo, nullptr, &battrs);
6426 if (op_ret == -ENOENT) {
6427 ldout(s->cct, 20) << "bulk upload: non existent directory=" << bucket_name
6428 << dendl;
6429 } else if (op_ret < 0) {
6430 return op_ret;
6431 }
6432
6433 if (! handle_file_verify_permission(binfo,
6434 rgw_obj(binfo.bucket, object),
6435 battrs, bowner)) {
6436 ldout(s->cct, 20) << "bulk upload: object creation unauthorized" << dendl;
6437 op_ret = -EACCES;
6438 return op_ret;
6439 }
6440
6441 op_ret = store->check_quota(bowner.get_id(), binfo.bucket,
6442 user_quota, bucket_quota, size);
6443 if (op_ret < 0) {
6444 return op_ret;
6445 }
6446
6447 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
6448 if (op_ret < 0) {
6449 return op_ret;
6450 }
6451
6452 RGWPutObjProcessor_Atomic processor(obj_ctx,
6453 binfo,
6454 binfo.bucket,
6455 object.name,
6456 /* part size */
6457 s->cct->_conf->rgw_obj_stripe_size,
6458 s->req_id,
6459 binfo.versioning_enabled());
6460
6461 /* No filters by default. */
6462 filter = &processor;
6463
6464 op_ret = processor.prepare(store, nullptr);
6465 if (op_ret < 0) {
6466 ldout(s->cct, 20) << "bulk upload: cannot prepare processor due to ret="
6467 << op_ret << dendl;
6468 return op_ret;
6469 }
6470
6471 const auto& compression_type = store->get_zone_params().get_compression_type(
6472 binfo.placement_rule);
6473 CompressorRef plugin;
6474 if (compression_type != "none") {
6475 plugin = Compressor::create(s->cct, compression_type);
6476 if (! plugin) {
6477 ldout(s->cct, 1) << "Cannot load plugin for rgw_compression_type "
6478 << compression_type << dendl;
6479 } else {
6480 compressor.emplace(s->cct, plugin, filter);
6481 filter = &*compressor;
6482 }
6483 }
6484
6485 /* Upload file content. */
6486 ssize_t len = 0;
6487 size_t ofs = 0;
6488 MD5 hash;
6489 do {
6490 ceph::bufferlist data;
6491 len = body.get_at_most(s->cct->_conf->rgw_max_chunk_size, data);
6492
6493 ldout(s->cct, 20) << "bulk upload: body=" << data.c_str() << dendl;
6494 if (len < 0) {
6495 op_ret = len;
6496 return op_ret;
6497 } else if (len > 0) {
6498 hash.Update((const byte *)data.c_str(), data.length());
6499 op_ret = put_data_and_throttle(filter, data, ofs, false);
6500 if (op_ret < 0) {
6501 ldout(s->cct, 20) << "processor->thottle_data() returned ret="
6502 << op_ret << dendl;
6503 return op_ret;
6504 }
6505
6506 ofs += len;
6507 }
6508
6509 } while (len > 0);
6510
6511 if (ofs != size) {
6512 ldout(s->cct, 10) << "bulk upload: real file size different from declared"
6513 << dendl;
6514 op_ret = -EINVAL;
6515 }
6516
6517 op_ret = store->check_quota(bowner.get_id(), binfo.bucket,
6518 user_quota, bucket_quota, size);
6519 if (op_ret < 0) {
6520 ldout(s->cct, 20) << "bulk upload: quota exceeded for path=" << path
6521 << dendl;
6522 return op_ret;
6523 }
6524
6525 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
6526 if (op_ret < 0) {
6527 return op_ret;
6528 }
6529
6530 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
6531 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
6532 hash.Final(m);
6533 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
6534
6535 /* Create metadata: ETAG. */
6536 std::map<std::string, ceph::bufferlist> attrs;
6537 std::string etag = calc_md5;
6538 ceph::bufferlist etag_bl;
6539 etag_bl.append(etag.c_str(), etag.size() + 1);
6540 attrs.emplace(RGW_ATTR_ETAG, std::move(etag_bl));
6541
6542 /* Create metadata: ACLs. */
6543 RGWAccessControlPolicy policy;
6544 policy.create_default(s->user->user_id, s->user->display_name);
6545 ceph::bufferlist aclbl;
6546 policy.encode(aclbl);
6547 attrs.emplace(RGW_ATTR_ACL, std::move(aclbl));
6548
6549 /* Create metadata: compression info. */
6550 if (compressor && compressor->is_compressed()) {
6551 ceph::bufferlist tmp;
6552 RGWCompressionInfo cs_info;
6553 cs_info.compression_type = plugin->get_type_name();
6554 cs_info.orig_size = s->obj_size;
6555 cs_info.blocks = std::move(compressor->get_compression_blocks());
6556 ::encode(cs_info, tmp);
6557 attrs.emplace(RGW_ATTR_COMPRESSION, std::move(tmp));
6558 }
6559
6560 /* Complete the transaction. */
6561 op_ret = processor.complete(size, etag, nullptr, ceph::real_time(), attrs,
6562 ceph::real_time() /* delete_at */);
6563 if (op_ret < 0) {
6564 ldout(s->cct, 20) << "bulk upload: processor::complete returned op_ret="
6565 << op_ret << dendl;
6566 }
6567
6568 return op_ret;
6569 }
6570
6571 void RGWBulkUploadOp::execute()
6572 {
6573 ceph::bufferlist buffer(64 * 1024);
6574
6575 ldout(s->cct, 20) << "bulk upload: start" << dendl;
6576
6577 /* Create an instance of stream-abstracting class. Having this indirection
6578 * allows for easy introduction of decompressors like gzip and bzip2. */
6579 auto stream = create_stream();
6580 if (! stream) {
6581 return;
6582 }
6583
6584 /* Handling the $UPLOAD_PATH accordingly to the Swift's Bulk middleware. See:
6585 * https://github.com/openstack/swift/blob/2.13.0/swift/common/middleware/bulk.py#L31-L41 */
6586 std::string bucket_path, file_prefix;
6587 std::tie(bucket_path, file_prefix) = handle_upload_path(s);
6588
6589 auto status = rgw::tar::StatusIndicator::create();
6590 do {
6591 op_ret = stream->get_exactly(rgw::tar::BLOCK_SIZE, buffer);
6592 if (op_ret < 0) {
6593 ldout(s->cct, 2) << "bulk upload: cannot read header" << dendl;
6594 return;
6595 }
6596
6597 /* We need to re-interpret the buffer as a TAR block. Exactly two blocks
6598 * must be tracked to detect out end-of-archive. It occurs when both of
6599 * them are empty (zeroed). Tracing this particular inter-block dependency
6600 * is responsibility of the rgw::tar::StatusIndicator class. */
6601 boost::optional<rgw::tar::HeaderView> header;
6602 std::tie(status, header) = rgw::tar::interpret_block(status, buffer);
6603
6604 if (! status.empty() && header) {
6605 /* This specific block isn't empty (entirely zeroed), so we can parse
6606 * it as a TAR header and dispatch. At the moment we do support only
6607 * regular files and directories. Everything else (symlinks, devices)
6608 * will be ignored but won't cease the whole upload. */
6609 switch (header->get_filetype()) {
6610 case rgw::tar::FileType::NORMAL_FILE: {
6611 ldout(s->cct, 2) << "bulk upload: handling regular file" << dendl;
6612
6613 boost::string_ref filename = bucket_path.empty() ? header->get_filename() : \
6614 file_prefix + header->get_filename().to_string();
6615 auto body = AlignedStreamGetter(0, header->get_filesize(),
6616 rgw::tar::BLOCK_SIZE, *stream);
6617 op_ret = handle_file(filename,
6618 header->get_filesize(),
6619 body);
6620 if (! op_ret) {
6621 /* Only regular files counts. */
6622 num_created++;
6623 } else {
6624 failures.emplace_back(op_ret, filename.to_string());
6625 }
6626 break;
6627 }
6628 case rgw::tar::FileType::DIRECTORY: {
6629 ldout(s->cct, 2) << "bulk upload: handling regular directory" << dendl;
6630
6631 boost::string_ref dirname = bucket_path.empty() ? header->get_filename() : bucket_path;
6632 op_ret = handle_dir(dirname);
6633 if (op_ret < 0 && op_ret != -ERR_BUCKET_EXISTS) {
6634 failures.emplace_back(op_ret, dirname.to_string());
6635 }
6636 break;
6637 }
6638 default: {
6639 /* Not recognized. Skip. */
6640 op_ret = 0;
6641 break;
6642 }
6643 }
6644
6645 /* In case of any problems with sub-request authorization Swift simply
6646 * terminates whole upload immediately. */
6647 if (boost::algorithm::contains(std::initializer_list<int>{ op_ret },
6648 terminal_errors)) {
6649 ldout(s->cct, 2) << "bulk upload: terminating due to ret=" << op_ret
6650 << dendl;
6651 break;
6652 }
6653 } else {
6654 ldout(s->cct, 2) << "bulk upload: an empty block" << dendl;
6655 op_ret = 0;
6656 }
6657
6658 buffer.clear();
6659 } while (! status.eof());
6660
6661 return;
6662 }
6663
6664 RGWBulkUploadOp::AlignedStreamGetter::~AlignedStreamGetter()
6665 {
6666 const size_t aligned_legnth = length + (-length % alignment);
6667 ceph::bufferlist junk;
6668
6669 DecoratedStreamGetter::get_exactly(aligned_legnth - position, junk);
6670 }
6671
6672 ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_at_most(const size_t want,
6673 ceph::bufferlist& dst)
6674 {
6675 const size_t max_to_read = std::min(want, length - position);
6676 const auto len = DecoratedStreamGetter::get_at_most(max_to_read, dst);
6677 if (len > 0) {
6678 position += len;
6679 }
6680 return len;
6681 }
6682
6683 ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_exactly(const size_t want,
6684 ceph::bufferlist& dst)
6685 {
6686 const auto len = DecoratedStreamGetter::get_exactly(want, dst);
6687 if (len > 0) {
6688 position += len;
6689 }
6690 return len;
6691 }
6692
6693 int RGWSetAttrs::verify_permission()
6694 {
6695 // This looks to be part of the RGW-NFS machinery and has no S3 or
6696 // Swift equivalent.
6697 bool perm;
6698 if (!s->object.empty()) {
6699 perm = verify_object_permission_no_policy(s, RGW_PERM_WRITE);
6700 } else {
6701 perm = verify_bucket_permission_no_policy(s, RGW_PERM_WRITE);
6702 }
6703 if (!perm)
6704 return -EACCES;
6705
6706 return 0;
6707 }
6708
6709 void RGWSetAttrs::pre_exec()
6710 {
6711 rgw_bucket_object_pre_exec(s);
6712 }
6713
6714 void RGWSetAttrs::execute()
6715 {
6716 op_ret = get_params();
6717 if (op_ret < 0)
6718 return;
6719
6720 rgw_obj obj(s->bucket, s->object);
6721
6722 if (!s->object.empty()) {
6723 store->set_atomic(s->obj_ctx, obj);
6724 op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, nullptr);
6725 } else {
6726 for (auto& iter : attrs) {
6727 s->bucket_attrs[iter.first] = std::move(iter.second);
6728 }
6729 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, s->bucket_attrs,
6730 &s->bucket_info.objv_tracker);
6731 }
6732 }
6733
6734 void RGWGetObjLayout::pre_exec()
6735 {
6736 rgw_bucket_object_pre_exec(s);
6737 }
6738
6739 void RGWGetObjLayout::execute()
6740 {
6741 rgw_obj obj(s->bucket, s->object);
6742 RGWRados::Object target(store,
6743 s->bucket_info,
6744 *static_cast<RGWObjectCtx *>(s->obj_ctx),
6745 rgw_obj(s->bucket, s->object));
6746 RGWRados::Object::Read stat_op(&target);
6747
6748 op_ret = stat_op.prepare();
6749 if (op_ret < 0) {
6750 return;
6751 }
6752
6753 head_obj = stat_op.state.head_obj;
6754
6755 op_ret = target.get_manifest(&manifest);
6756 }
6757
6758
6759 int RGWConfigBucketMetaSearch::verify_permission()
6760 {
6761 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
6762 return -EACCES;
6763 }
6764
6765 return 0;
6766 }
6767
6768 void RGWConfigBucketMetaSearch::pre_exec()
6769 {
6770 rgw_bucket_object_pre_exec(s);
6771 }
6772
6773 void RGWConfigBucketMetaSearch::execute()
6774 {
6775 op_ret = get_params();
6776 if (op_ret < 0) {
6777 ldout(s->cct, 20) << "NOTICE: get_params() returned ret=" << op_ret << dendl;
6778 return;
6779 }
6780
6781 s->bucket_info.mdsearch_config = mdsearch_config;
6782
6783 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
6784 if (op_ret < 0) {
6785 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
6786 return;
6787 }
6788 }
6789
6790 int RGWGetBucketMetaSearch::verify_permission()
6791 {
6792 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
6793 return -EACCES;
6794 }
6795
6796 return 0;
6797 }
6798
6799 void RGWGetBucketMetaSearch::pre_exec()
6800 {
6801 rgw_bucket_object_pre_exec(s);
6802 }
6803
6804 int RGWDelBucketMetaSearch::verify_permission()
6805 {
6806 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
6807 return -EACCES;
6808 }
6809
6810 return 0;
6811 }
6812
6813 void RGWDelBucketMetaSearch::pre_exec()
6814 {
6815 rgw_bucket_object_pre_exec(s);
6816 }
6817
6818 void RGWDelBucketMetaSearch::execute()
6819 {
6820 s->bucket_info.mdsearch_config.clear();
6821
6822 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
6823 if (op_ret < 0) {
6824 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
6825 return;
6826 }
6827 }
6828
6829
6830 RGWHandler::~RGWHandler()
6831 {
6832 }
6833
6834 int RGWHandler::init(RGWRados *_store,
6835 struct req_state *_s,
6836 rgw::io::BasicClient *cio)
6837 {
6838 store = _store;
6839 s = _s;
6840
6841 return 0;
6842 }
6843
6844 int RGWHandler::do_init_permissions()
6845 {
6846 int ret = rgw_build_bucket_policies(store, s);
6847 s->env = rgw_build_iam_environment(store, s);
6848
6849 if (ret < 0) {
6850 ldout(s->cct, 10) << "read_permissions on " << s->bucket << " ret=" << ret << dendl;
6851 if (ret == -ENODATA)
6852 ret = -EACCES;
6853 }
6854
6855 return ret;
6856 }
6857
6858 int RGWHandler::do_read_permissions(RGWOp *op, bool only_bucket)
6859 {
6860 if (only_bucket) {
6861 /* already read bucket info */
6862 return 0;
6863 }
6864 int ret = rgw_build_object_policies(store, s, op->prefetch_data());
6865
6866 if (ret < 0) {
6867 ldout(s->cct, 10) << "read_permissions on " << s->bucket << ":"
6868 << s->object << " only_bucket=" << only_bucket
6869 << " ret=" << ret << dendl;
6870 if (ret == -ENODATA)
6871 ret = -EACCES;
6872 }
6873
6874 return ret;
6875 }
6876
6877 int RGWOp::error_handler(int err_no, string *error_content) {
6878 return dialect_handler->error_handler(err_no, error_content);
6879 }
6880
6881 int RGWHandler::error_handler(int err_no, string *error_content) {
6882 // This is the do-nothing error handler
6883 return err_no;
6884 }
6885
6886
6887 void RGWPutBucketPolicy::send_response()
6888 {
6889 if (op_ret) {
6890 set_req_state_err(s, op_ret);
6891 }
6892 dump_errno(s);
6893 end_header(s);
6894 }
6895
6896 int RGWPutBucketPolicy::verify_permission()
6897 {
6898 if (!verify_bucket_permission(s, rgw::IAM::s3PutBucketPolicy)) {
6899 return -EACCES;
6900 }
6901
6902 return 0;
6903 }
6904
6905 int RGWPutBucketPolicy::get_params()
6906 {
6907 const auto max_size = s->cct->_conf->rgw_max_put_param_size;
6908 // At some point when I have more time I want to make a version of
6909 // rgw_rest_read_all_input that doesn't use malloc.
6910 op_ret = rgw_rest_read_all_input(s, &data, &len, max_size, false);
6911 // And throws exceptions.
6912 return op_ret;
6913 }
6914
6915 void RGWPutBucketPolicy::execute()
6916 {
6917 op_ret = get_params();
6918 if (op_ret < 0) {
6919 return;
6920 }
6921
6922 bufferlist in_data = bufferlist::static_from_mem(data, len);
6923
6924 if (!store->is_meta_master()) {
6925 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
6926 if (op_ret < 0) {
6927 ldout(s->cct, 20) << "forward_request_to_master returned ret=" << op_ret << dendl;
6928 return;
6929 }
6930 }
6931
6932 try {
6933 const Policy p(s->cct, s->bucket_tenant, in_data);
6934 op_ret = retry_raced_bucket_write(store, s, [&p, this] {
6935 auto attrs = s->bucket_attrs;
6936 attrs[RGW_ATTR_IAM_POLICY].clear();
6937 attrs[RGW_ATTR_IAM_POLICY].append(p.text);
6938 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
6939 &s->bucket_info.objv_tracker);
6940 return op_ret;
6941 });
6942 } catch (rgw::IAM::PolicyParseException& e) {
6943 ldout(s->cct, 20) << "failed to parse policy: " << e.what() << dendl;
6944 op_ret = -EINVAL;
6945 }
6946 }
6947
6948 void RGWGetBucketPolicy::send_response()
6949 {
6950 if (op_ret) {
6951 set_req_state_err(s, op_ret);
6952 }
6953 dump_errno(s);
6954 end_header(s, this, "application/json");
6955 dump_body(s, policy);
6956 }
6957
6958 int RGWGetBucketPolicy::verify_permission()
6959 {
6960 if (!verify_bucket_permission(s, rgw::IAM::s3GetBucketPolicy)) {
6961 return -EACCES;
6962 }
6963
6964 return 0;
6965 }
6966
6967 void RGWGetBucketPolicy::execute()
6968 {
6969 auto attrs = s->bucket_attrs;
6970 map<string, bufferlist>::iterator aiter = attrs.find(RGW_ATTR_IAM_POLICY);
6971 if (aiter == attrs.end()) {
6972 ldout(s->cct, 0) << __func__ << " can't find bucket IAM POLICY attr"
6973 << " bucket_name = " << s->bucket_name << dendl;
6974 op_ret = -ERR_NO_SUCH_BUCKET_POLICY;
6975 s->err.message = "The bucket policy does not exist";
6976 return;
6977 } else {
6978 policy = attrs[RGW_ATTR_IAM_POLICY];
6979
6980 if (policy.length() == 0) {
6981 ldout(s->cct, 10) << "The bucket policy does not exist, bucket: " << s->bucket_name << dendl;
6982 op_ret = -ERR_NO_SUCH_BUCKET_POLICY;
6983 s->err.message = "The bucket policy does not exist";
6984 return;
6985 }
6986 }
6987 }
6988
6989 void RGWDeleteBucketPolicy::send_response()
6990 {
6991 if (op_ret) {
6992 set_req_state_err(s, op_ret);
6993 }
6994 dump_errno(s);
6995 end_header(s);
6996 }
6997
6998 int RGWDeleteBucketPolicy::verify_permission()
6999 {
7000 if (!verify_bucket_permission(s, rgw::IAM::s3DeleteBucketPolicy)) {
7001 return -EACCES;
7002 }
7003
7004 return 0;
7005 }
7006
7007 void RGWDeleteBucketPolicy::execute()
7008 {
7009 op_ret = retry_raced_bucket_write(store, s, [this] {
7010 auto attrs = s->bucket_attrs;
7011 attrs.erase(RGW_ATTR_IAM_POLICY);
7012 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
7013 &s->bucket_info.objv_tracker);
7014 return op_ret;
7015 });
7016 }
7017
7018 void RGWGetClusterStat::execute()
7019 {
7020 op_ret = this->store->get_rados_handle()->cluster_stat(stats_op);
7021 }
7022
7023