]> git.proxmox.com Git - ceph.git/blob - ceph/src/rgw/rgw_op.cc
update sources to 12.2.8
[ceph.git] / ceph / src / rgw / rgw_op.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include <errno.h>
5 #include <stdlib.h>
6 #include <system_error>
7 #include <unistd.h>
8
9 #include <sstream>
10
11 #include <boost/algorithm/string/predicate.hpp>
12 #include <boost/bind.hpp>
13 #include <boost/optional.hpp>
14 #include <boost/utility/in_place_factory.hpp>
15 #include <boost/utility/string_view.hpp>
16
17 #include "common/Clock.h"
18 #include "common/armor.h"
19 #include "common/backport14.h"
20 #include "common/errno.h"
21 #include "common/mime.h"
22 #include "common/utf8.h"
23 #include "common/ceph_json.h"
24
25 #include "rgw_rados.h"
26 #include "rgw_op.h"
27 #include "rgw_rest.h"
28 #include "rgw_acl.h"
29 #include "rgw_acl_s3.h"
30 #include "rgw_acl_swift.h"
31 #include "rgw_user.h"
32 #include "rgw_bucket.h"
33 #include "rgw_log.h"
34 #include "rgw_multi.h"
35 #include "rgw_multi_del.h"
36 #include "rgw_cors.h"
37 #include "rgw_cors_s3.h"
38 #include "rgw_rest_conn.h"
39 #include "rgw_rest_s3.h"
40 #include "rgw_tar.h"
41 #include "rgw_client_io.h"
42 #include "rgw_compression.h"
43 #include "rgw_role.h"
44 #include "rgw_tag_s3.h"
45 #include "cls/lock/cls_lock_client.h"
46 #include "cls/rgw/cls_rgw_client.h"
47
48
49 #include "include/assert.h"
50
51 #include "compressor/Compressor.h"
52
53 #include "rgw_acl_swift.h"
54
55 #define dout_context g_ceph_context
56 #define dout_subsys ceph_subsys_rgw
57
58 using namespace std;
59 using namespace librados;
60 using ceph::crypto::MD5;
61 using boost::optional;
62 using boost::none;
63
64 using rgw::IAM::ARN;
65 using rgw::IAM::Effect;
66 using rgw::IAM::Policy;
67
68 using rgw::IAM::Policy;
69
70 static string mp_ns = RGW_OBJ_NS_MULTIPART;
71 static string shadow_ns = RGW_OBJ_NS_SHADOW;
72
73 static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name);
74 static int forward_request_to_master(struct req_state *s, obj_version *objv, RGWRados *store,
75 bufferlist& in_data, JSONParser *jp, req_info *forward_info = nullptr);
76
77 static MultipartMetaFilter mp_filter;
78
79 int RGWGetObj::parse_range(void)
80 {
81 int r = -ERANGE;
82 string rs(range_str);
83 string ofs_str;
84 string end_str;
85
86 ignore_invalid_range = s->cct->_conf->rgw_ignore_get_invalid_range;
87 partial_content = false;
88
89 size_t pos = rs.find("bytes=");
90 if (pos == string::npos) {
91 pos = 0;
92 while (isspace(rs[pos]))
93 pos++;
94 int end = pos;
95 while (isalpha(rs[end]))
96 end++;
97 if (strncasecmp(rs.c_str(), "bytes", end - pos) != 0)
98 return 0;
99 while (isspace(rs[end]))
100 end++;
101 if (rs[end] != '=')
102 return 0;
103 rs = rs.substr(end + 1);
104 } else {
105 rs = rs.substr(pos + 6); /* size of("bytes=") */
106 }
107 pos = rs.find('-');
108 if (pos == string::npos)
109 goto done;
110
111 partial_content = true;
112
113 ofs_str = rs.substr(0, pos);
114 end_str = rs.substr(pos + 1);
115 if (end_str.length()) {
116 end = atoll(end_str.c_str());
117 if (end < 0)
118 goto done;
119 }
120
121 if (ofs_str.length()) {
122 ofs = atoll(ofs_str.c_str());
123 } else { // RFC2616 suffix-byte-range-spec
124 ofs = -end;
125 end = -1;
126 }
127
128 if (end >= 0 && end < ofs)
129 goto done;
130
131 range_parsed = true;
132 return 0;
133
134 done:
135 if (ignore_invalid_range) {
136 partial_content = false;
137 ofs = 0;
138 end = -1;
139 range_parsed = false; // allow retry
140 r = 0;
141 }
142
143 return r;
144 }
145
146 static int decode_policy(CephContext *cct,
147 bufferlist& bl,
148 RGWAccessControlPolicy *policy)
149 {
150 bufferlist::iterator iter = bl.begin();
151 try {
152 policy->decode(iter);
153 } catch (buffer::error& err) {
154 ldout(cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
155 return -EIO;
156 }
157 if (cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
158 RGWAccessControlPolicy_S3 *s3policy = static_cast<RGWAccessControlPolicy_S3 *>(policy);
159 ldout(cct, 15) << __func__ << " Read AccessControlPolicy";
160 s3policy->to_xml(*_dout);
161 *_dout << dendl;
162 }
163 return 0;
164 }
165
166
167 static int get_user_policy_from_attr(CephContext * const cct,
168 RGWRados * const store,
169 map<string, bufferlist>& attrs,
170 RGWAccessControlPolicy& policy /* out */)
171 {
172 auto aiter = attrs.find(RGW_ATTR_ACL);
173 if (aiter != attrs.end()) {
174 int ret = decode_policy(cct, aiter->second, &policy);
175 if (ret < 0) {
176 return ret;
177 }
178 } else {
179 return -ENOENT;
180 }
181
182 return 0;
183 }
184
185 static int get_bucket_instance_policy_from_attr(CephContext *cct,
186 RGWRados *store,
187 RGWBucketInfo& bucket_info,
188 map<string, bufferlist>& bucket_attrs,
189 RGWAccessControlPolicy *policy,
190 rgw_raw_obj& obj)
191 {
192 map<string, bufferlist>::iterator aiter = bucket_attrs.find(RGW_ATTR_ACL);
193
194 if (aiter != bucket_attrs.end()) {
195 int ret = decode_policy(cct, aiter->second, policy);
196 if (ret < 0)
197 return ret;
198 } else {
199 ldout(cct, 0) << "WARNING: couldn't find acl header for bucket, generating default" << dendl;
200 RGWUserInfo uinfo;
201 /* object exists, but policy is broken */
202 int r = rgw_get_user_info_by_uid(store, bucket_info.owner, uinfo);
203 if (r < 0)
204 return r;
205
206 policy->create_default(bucket_info.owner, uinfo.display_name);
207 }
208 return 0;
209 }
210
211 static int get_obj_policy_from_attr(CephContext *cct,
212 RGWRados *store,
213 RGWObjectCtx& obj_ctx,
214 RGWBucketInfo& bucket_info,
215 map<string, bufferlist>& bucket_attrs,
216 RGWAccessControlPolicy *policy,
217 rgw_obj& obj)
218 {
219 bufferlist bl;
220 int ret = 0;
221
222 RGWRados::Object op_target(store, bucket_info, obj_ctx, obj);
223 RGWRados::Object::Read rop(&op_target);
224
225 ret = rop.get_attr(RGW_ATTR_ACL, bl);
226 if (ret >= 0) {
227 ret = decode_policy(cct, bl, policy);
228 if (ret < 0)
229 return ret;
230 } else if (ret == -ENODATA) {
231 /* object exists, but policy is broken */
232 ldout(cct, 0) << "WARNING: couldn't find acl header for object, generating default" << dendl;
233 RGWUserInfo uinfo;
234 ret = rgw_get_user_info_by_uid(store, bucket_info.owner, uinfo);
235 if (ret < 0)
236 return ret;
237
238 policy->create_default(bucket_info.owner, uinfo.display_name);
239 }
240 return ret;
241 }
242
243
244 /**
245 * Get the AccessControlPolicy for an object off of disk.
246 * policy: must point to a valid RGWACL, and will be filled upon return.
247 * bucket: name of the bucket containing the object.
248 * object: name of the object to get the ACL for.
249 * Returns: 0 on success, -ERR# otherwise.
250 */
251 static int get_bucket_policy_from_attr(CephContext *cct,
252 RGWRados *store,
253 RGWBucketInfo& bucket_info,
254 map<string, bufferlist>& bucket_attrs,
255 RGWAccessControlPolicy *policy)
256 {
257 rgw_raw_obj instance_obj;
258 store->get_bucket_instance_obj(bucket_info.bucket, instance_obj);
259 return get_bucket_instance_policy_from_attr(cct, store, bucket_info, bucket_attrs,
260 policy, instance_obj);
261 }
262
263 static optional<Policy> get_iam_policy_from_attr(CephContext* cct,
264 RGWRados* store,
265 map<string, bufferlist>& attrs,
266 const string& tenant) {
267 auto i = attrs.find(RGW_ATTR_IAM_POLICY);
268 if (i != attrs.end()) {
269 return Policy(cct, tenant, i->second);
270 } else {
271 return none;
272 }
273 }
274
275 static int get_obj_attrs(RGWRados *store, struct req_state *s, rgw_obj& obj, map<string, bufferlist>& attrs)
276 {
277 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
278 RGWRados::Object::Read read_op(&op_target);
279
280 read_op.params.attrs = &attrs;
281
282 return read_op.prepare();
283 }
284
285 static int modify_obj_attr(RGWRados *store, struct req_state *s, rgw_obj& obj, const char* attr_name, bufferlist& attr_val)
286 {
287 map<string, bufferlist> attrs;
288 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
289 RGWRados::Object::Read read_op(&op_target);
290
291 read_op.params.attrs = &attrs;
292
293 int r = read_op.prepare();
294 if (r < 0) {
295 return r;
296 }
297 store->set_atomic(s->obj_ctx, read_op.state.obj);
298 attrs[attr_name] = attr_val;
299 return store->set_attrs(s->obj_ctx, s->bucket_info, read_op.state.obj, attrs, NULL);
300 }
301
302 static int get_system_obj_attrs(RGWRados *store, struct req_state *s, rgw_raw_obj& obj, map<string, bufferlist>& attrs,
303 uint64_t *obj_size, RGWObjVersionTracker *objv_tracker)
304 {
305 RGWRados::SystemObject src(store, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
306 RGWRados::SystemObject::Read rop(&src);
307
308 rop.stat_params.attrs = &attrs;
309 rop.stat_params.obj_size = obj_size;
310
311 int ret = rop.stat(objv_tracker);
312 return ret;
313 }
314
315 static int read_bucket_policy(RGWRados *store,
316 struct req_state *s,
317 RGWBucketInfo& bucket_info,
318 map<string, bufferlist>& bucket_attrs,
319 RGWAccessControlPolicy *policy,
320 rgw_bucket& bucket)
321 {
322 if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) {
323 ldout(s->cct, 0) << "NOTICE: bucket " << bucket_info.bucket.name << " is suspended" << dendl;
324 return -ERR_USER_SUSPENDED;
325 }
326
327 if (bucket.name.empty()) {
328 return 0;
329 }
330
331 int ret = get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, policy);
332 if (ret == -ENOENT) {
333 ret = -ERR_NO_SUCH_BUCKET;
334 }
335
336 return ret;
337 }
338
339 static int read_obj_policy(RGWRados *store,
340 struct req_state *s,
341 RGWBucketInfo& bucket_info,
342 map<string, bufferlist>& bucket_attrs,
343 RGWAccessControlPolicy* acl,
344 optional<Policy>& policy,
345 rgw_bucket& bucket,
346 rgw_obj_key& object)
347 {
348 string upload_id;
349 upload_id = s->info.args.get("uploadId");
350 rgw_obj obj;
351
352 if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) {
353 ldout(s->cct, 0) << "NOTICE: bucket " << bucket_info.bucket.name << " is suspended" << dendl;
354 return -ERR_USER_SUSPENDED;
355 }
356
357 if (!upload_id.empty()) {
358 /* multipart upload */
359 RGWMPObj mp(object.name, upload_id);
360 string oid = mp.get_meta();
361 obj.init_ns(bucket, oid, mp_ns);
362 obj.set_in_extra_data(true);
363 } else {
364 obj = rgw_obj(bucket, object);
365 }
366 policy = get_iam_policy_from_attr(s->cct, store, bucket_attrs, bucket.tenant);
367
368 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
369 int ret = get_obj_policy_from_attr(s->cct, store, *obj_ctx,
370 bucket_info, bucket_attrs, acl, obj);
371 if (ret == -ENOENT) {
372 /* object does not exist checking the bucket's ACL to make sure
373 that we send a proper error code */
374 RGWAccessControlPolicy bucket_policy(s->cct);
375 ret = get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, &bucket_policy);
376 if (ret < 0) {
377 return ret;
378 }
379
380 const rgw_user& bucket_owner = bucket_policy.get_owner().get_id();
381 if (bucket_owner.compare(s->user->user_id) != 0 &&
382 ! s->auth.identity->is_admin_of(bucket_owner) &&
383 ! bucket_policy.verify_permission(*s->auth.identity, s->perm_mask,
384 RGW_PERM_READ)) {
385 ret = -EACCES;
386 } else {
387 ret = -ENOENT;
388 }
389 }
390
391 return ret;
392 }
393
394 /**
395 * Get the AccessControlPolicy for an user, bucket or object off of disk.
396 * s: The req_state to draw information from.
397 * only_bucket: If true, reads the user and bucket ACLs rather than the object ACL.
398 * Returns: 0 on success, -ERR# otherwise.
399 */
400 int rgw_build_bucket_policies(RGWRados* store, struct req_state* s)
401 {
402 int ret = 0;
403 rgw_obj_key obj;
404 RGWUserInfo bucket_owner_info;
405 RGWObjectCtx obj_ctx(store);
406
407 string bi = s->info.args.get(RGW_SYS_PARAM_PREFIX "bucket-instance");
408 if (!bi.empty()) {
409 ret = rgw_bucket_parse_bucket_instance(bi, &s->bucket_instance_id, &s->bucket_instance_shard_id);
410 if (ret < 0) {
411 return ret;
412 }
413 }
414
415 if(s->dialect.compare("s3") == 0) {
416 s->bucket_acl = ceph::make_unique<RGWAccessControlPolicy_S3>(s->cct);
417 } else if(s->dialect.compare("swift") == 0) {
418 /* We aren't allocating the account policy for those operations using
419 * the Swift's infrastructure that don't really need req_state::user.
420 * Typical example here is the implementation of /info. */
421 if (!s->user->user_id.empty()) {
422 s->user_acl = ceph::make_unique<RGWAccessControlPolicy_SWIFTAcct>(s->cct);
423 }
424 s->bucket_acl = ceph::make_unique<RGWAccessControlPolicy_SWIFT>(s->cct);
425 } else {
426 s->bucket_acl = ceph::make_unique<RGWAccessControlPolicy>(s->cct);
427 }
428
429 /* check if copy source is within the current domain */
430 if (!s->src_bucket_name.empty()) {
431 RGWBucketInfo source_info;
432
433 if (s->bucket_instance_id.empty()) {
434 ret = store->get_bucket_info(obj_ctx, s->src_tenant_name, s->src_bucket_name, source_info, NULL);
435 } else {
436 ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, source_info, NULL, NULL);
437 }
438 if (ret == 0) {
439 string& zonegroup = source_info.zonegroup;
440 s->local_source = store->get_zonegroup().equals(zonegroup);
441 }
442 }
443
444 struct {
445 rgw_user uid;
446 std::string display_name;
447 } acct_acl_user = {
448 s->user->user_id,
449 s->user->display_name,
450 };
451
452 if (!s->bucket_name.empty()) {
453 s->bucket_exists = true;
454 if (s->bucket_instance_id.empty()) {
455 ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name, s->bucket_info, NULL, &s->bucket_attrs);
456 } else {
457 ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, s->bucket_info, NULL, &s->bucket_attrs);
458 }
459 if (ret < 0) {
460 if (ret != -ENOENT) {
461 string bucket_log;
462 rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_log);
463 ldout(s->cct, 0) << "NOTICE: couldn't get bucket from bucket_name (name=" << bucket_log << ")" << dendl;
464 return ret;
465 }
466 s->bucket_exists = false;
467 }
468 s->bucket = s->bucket_info.bucket;
469
470 if (s->bucket_exists) {
471 ret = read_bucket_policy(store, s, s->bucket_info, s->bucket_attrs,
472 s->bucket_acl.get(), s->bucket);
473 acct_acl_user = {
474 s->bucket_info.owner,
475 s->bucket_acl->get_owner().get_display_name(),
476 };
477 } else {
478 s->bucket_acl->create_default(s->user->user_id, s->user->display_name);
479 ret = -ERR_NO_SUCH_BUCKET;
480 }
481
482 s->bucket_owner = s->bucket_acl->get_owner();
483
484 RGWZoneGroup zonegroup;
485 int r = store->get_zonegroup(s->bucket_info.zonegroup, zonegroup);
486 if (!r) {
487 if (!zonegroup.endpoints.empty()) {
488 s->zonegroup_endpoint = zonegroup.endpoints.front();
489 } else {
490 // use zonegroup's master zone endpoints
491 auto z = zonegroup.zones.find(zonegroup.master_zone);
492 if (z != zonegroup.zones.end() && !z->second.endpoints.empty()) {
493 s->zonegroup_endpoint = z->second.endpoints.front();
494 }
495 }
496 s->zonegroup_name = zonegroup.get_name();
497 }
498 if (r < 0 && ret == 0) {
499 ret = r;
500 }
501
502 if (s->bucket_exists && !store->get_zonegroup().equals(s->bucket_info.zonegroup)) {
503 ldout(s->cct, 0) << "NOTICE: request for data in a different zonegroup (" << s->bucket_info.zonegroup << " != " << store->get_zonegroup().get_id() << ")" << dendl;
504 /* we now need to make sure that the operation actually requires copy source, that is
505 * it's a copy operation
506 */
507 if (store->get_zonegroup().is_master_zonegroup() && s->system_request) {
508 /*If this is the master, don't redirect*/
509 } else if (s->op_type == RGW_OP_GET_BUCKET_LOCATION ) {
510 /* If op is get bucket location, don't redirect */
511 } else if (!s->local_source ||
512 (s->op != OP_PUT && s->op != OP_COPY) ||
513 s->object.empty()) {
514 return -ERR_PERMANENT_REDIRECT;
515 }
516 }
517 }
518
519 /* handle user ACL only for those APIs which support it */
520 if (s->user_acl) {
521 map<string, bufferlist> uattrs;
522
523 ret = rgw_get_user_attrs_by_uid(store, acct_acl_user.uid, uattrs);
524 if (!ret) {
525 ret = get_user_policy_from_attr(s->cct, store, uattrs, *s->user_acl);
526 }
527 if (-ENOENT == ret) {
528 /* In already existing clusters users won't have ACL. In such case
529 * assuming that only account owner has the rights seems to be
530 * reasonable. That allows to have only one verification logic.
531 * NOTE: there is small compatibility kludge for global, empty tenant:
532 * 1. if we try to reach an existing bucket, its owner is considered
533 * as account owner.
534 * 2. otherwise account owner is identity stored in s->user->user_id. */
535 s->user_acl->create_default(acct_acl_user.uid,
536 acct_acl_user.display_name);
537 ret = 0;
538 } else {
539 ldout(s->cct, 0) << "NOTICE: couldn't get user attrs for handling ACL (user_id="
540 << s->user->user_id
541 << ", ret="
542 << ret
543 << ")" << dendl;
544 return ret;
545 }
546 }
547
548 try {
549 s->iam_policy = get_iam_policy_from_attr(s->cct, store, s->bucket_attrs,
550 s->bucket_tenant);
551 } catch (const std::exception& e) {
552 // Really this is a can't happen condition. We parse the policy
553 // when it's given to us, so perhaps we should abort or otherwise
554 // raise bloody murder.
555 lderr(s->cct) << "Error reading IAM Policy: " << e.what() << dendl;
556 ret = -EACCES;
557 }
558
559 return ret;
560 }
561
562 /**
563 * Get the AccessControlPolicy for a bucket or object off of disk.
564 * s: The req_state to draw information from.
565 * only_bucket: If true, reads the bucket ACL rather than the object ACL.
566 * Returns: 0 on success, -ERR# otherwise.
567 */
568 int rgw_build_object_policies(RGWRados *store, struct req_state *s,
569 bool prefetch_data)
570 {
571 int ret = 0;
572
573 if (!s->object.empty()) {
574 if (!s->bucket_exists) {
575 return -ERR_NO_SUCH_BUCKET;
576 }
577 s->object_acl = ceph::make_unique<RGWAccessControlPolicy>(s->cct);
578
579 rgw_obj obj(s->bucket, s->object);
580
581 store->set_atomic(s->obj_ctx, obj);
582 if (prefetch_data) {
583 store->set_prefetch_data(s->obj_ctx, obj);
584 }
585 ret = read_obj_policy(store, s, s->bucket_info, s->bucket_attrs,
586 s->object_acl.get(), s->iam_policy, s->bucket,
587 s->object);
588 }
589
590 return ret;
591 }
592
593 rgw::IAM::Environment rgw_build_iam_environment(RGWRados* store,
594 struct req_state* s)
595 {
596 rgw::IAM::Environment e;
597 const auto& m = s->info.env->get_map();
598 auto t = ceph::real_clock::now();
599 e.emplace("aws:CurrentTime", std::to_string(ceph::real_clock::to_time_t(t)));
600 e.emplace("aws:EpochTime", ceph::to_iso_8601(t));
601 // TODO: This is fine for now, but once we have STS we'll need to
602 // look and see. Also this won't work with the IdentityApplier
603 // model, since we need to know the actual credential.
604 e.emplace("aws:PrincipalType", "User");
605
606 auto i = m.find("HTTP_REFERER");
607 if (i != m.end()) {
608 e.emplace("aws:Referer", i->second);
609 }
610
611 // These seem to be the semantics, judging from rest_rgw_s3.cc
612 i = m.find("SERVER_PORT_SECURE");
613 if (i != m.end()) {
614 e.emplace("aws:SecureTransport", "true");
615 }
616
617 const auto remote_addr_param = s->cct->_conf->rgw_remote_addr_param;
618 if (remote_addr_param.length()) {
619 i = m.find(remote_addr_param);
620 } else {
621 i = m.find("REMOTE_ADDR");
622 }
623 if (i != m.end()) {
624 const string* ip = &(i->second);
625 string temp;
626 if (remote_addr_param == "HTTP_X_FORWARDED_FOR") {
627 const auto comma = ip->find(',');
628 if (comma != string::npos) {
629 temp.assign(*ip, 0, comma);
630 ip = &temp;
631 }
632 }
633 e.emplace("aws:SourceIp", *ip);
634 }
635
636 i = m.find("HTTP_USER_AGENT"); {
637 if (i != m.end())
638 e.emplace("aws:UserAgent", i->second);
639 }
640
641 if (s->user) {
642 // What to do about aws::userid? One can have multiple access
643 // keys so that isn't really suitable. Do we have a durable
644 // identifier that can persist through name changes?
645 e.emplace("aws:username", s->user->user_id.id);
646 }
647 return e;
648 }
649
650 void rgw_bucket_object_pre_exec(struct req_state *s)
651 {
652 if (s->expect_cont)
653 dump_continue(s);
654
655 dump_bucket_from_state(s);
656 }
657
658 // So! Now and then when we try to update bucket information, the
659 // bucket has changed during the course of the operation. (Or we have
660 // a cache consistency problem that Watch/Notify isn't ruling out
661 // completely.)
662 //
663 // When this happens, we need to update the bucket info and try
664 // again. We have, however, to try the right *part* again. We can't
665 // simply re-send, since that will obliterate the previous update.
666 //
667 // Thus, callers of this function should include everything that
668 // merges information to be changed into the bucket information as
669 // well as the call to set it.
670 //
671 // The called function must return an integer, negative on error. In
672 // general, they should just return op_ret.
673 namespace {
674 template<typename F>
675 int retry_raced_bucket_write(RGWRados* g, req_state* s, const F& f) {
676 auto r = f();
677 for (auto i = 0u; i < 15u && r == -ECANCELED; ++i) {
678 r = g->try_refresh_bucket_info(s->bucket_info, nullptr,
679 &s->bucket_attrs);
680 if (r >= 0) {
681 r = f();
682 }
683 }
684 return r;
685 }
686 }
687
688
689 int RGWGetObj::verify_permission()
690 {
691 obj = rgw_obj(s->bucket, s->object);
692 store->set_atomic(s->obj_ctx, obj);
693 if (get_data) {
694 store->set_prefetch_data(s->obj_ctx, obj);
695 }
696
697 if (torrent.get_flag()) {
698 if (obj.key.instance.empty()) {
699 action = rgw::IAM::s3GetObjectTorrent;
700 } else {
701 action = rgw::IAM::s3GetObjectVersionTorrent;
702 }
703 } else {
704 if (obj.key.instance.empty()) {
705 action = rgw::IAM::s3GetObject;
706 } else {
707 action = rgw::IAM::s3GetObjectVersion;
708 }
709 }
710
711 if (!verify_object_permission(s, action)) {
712 return -EACCES;
713 }
714
715 return 0;
716 }
717
718
719 int RGWOp::verify_op_mask()
720 {
721 uint32_t required_mask = op_mask();
722
723 ldout(s->cct, 20) << "required_mask= " << required_mask
724 << " user.op_mask=" << s->user->op_mask << dendl;
725
726 if ((s->user->op_mask & required_mask) != required_mask) {
727 return -EPERM;
728 }
729
730 if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !store->zone_is_writeable()) {
731 ldout(s->cct, 5) << "NOTICE: modify request to a read-only zone by a non-system user, permission denied" << dendl;
732 return -EPERM;
733 }
734
735 return 0;
736 }
737
738 int RGWGetObjTags::verify_permission()
739 {
740 if (!verify_object_permission(s,
741 s->object.instance.empty() ?
742 rgw::IAM::s3GetObjectTagging:
743 rgw::IAM::s3GetObjectVersionTagging))
744 return -EACCES;
745
746 return 0;
747 }
748
749 void RGWGetObjTags::pre_exec()
750 {
751 rgw_bucket_object_pre_exec(s);
752 }
753
754 void RGWGetObjTags::execute()
755 {
756 rgw_obj obj;
757 map<string,bufferlist> attrs;
758
759 obj = rgw_obj(s->bucket, s->object);
760
761 store->set_atomic(s->obj_ctx, obj);
762
763 op_ret = get_obj_attrs(store, s, obj, attrs);
764 if (op_ret < 0) {
765 ldout(s->cct, 0) << "ERROR: failed to get obj attrs, obj=" << obj
766 << " ret=" << op_ret << dendl;
767 return;
768 }
769
770 auto tags = attrs.find(RGW_ATTR_TAGS);
771 if(tags != attrs.end()){
772 has_tags = true;
773 tags_bl.append(tags->second);
774 }
775 send_response_data(tags_bl);
776 }
777
778 int RGWPutObjTags::verify_permission()
779 {
780 if (!verify_object_permission(s,
781 s->object.instance.empty() ?
782 rgw::IAM::s3PutObjectTagging:
783 rgw::IAM::s3PutObjectVersionTagging))
784 return -EACCES;
785 return 0;
786 }
787
788 void RGWPutObjTags::execute()
789 {
790 op_ret = get_params();
791 if (op_ret < 0)
792 return;
793
794 if (s->object.empty()){
795 op_ret= -EINVAL; // we only support tagging on existing objects
796 return;
797 }
798
799 rgw_obj obj;
800 obj = rgw_obj(s->bucket, s->object);
801 store->set_atomic(s->obj_ctx, obj);
802 op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_TAGS, tags_bl);
803 if (op_ret == -ECANCELED){
804 op_ret = -ERR_TAG_CONFLICT;
805 }
806 }
807
808 void RGWDeleteObjTags::pre_exec()
809 {
810 rgw_bucket_object_pre_exec(s);
811 }
812
813
814 int RGWDeleteObjTags::verify_permission()
815 {
816 if (!s->object.empty()) {
817 if (!verify_object_permission(s,
818 s->object.instance.empty() ?
819 rgw::IAM::s3DeleteObjectTagging:
820 rgw::IAM::s3DeleteObjectVersionTagging))
821 return -EACCES;
822 }
823 return 0;
824 }
825
826 void RGWDeleteObjTags::execute()
827 {
828 if (s->object.empty())
829 return;
830
831 rgw_obj obj;
832 obj = rgw_obj(s->bucket, s->object);
833 store->set_atomic(s->obj_ctx, obj);
834 map <string, bufferlist> attrs;
835 map <string, bufferlist> rmattr;
836 bufferlist bl;
837 rmattr[RGW_ATTR_TAGS] = bl;
838 op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, &rmattr);
839 }
840
841 int RGWOp::do_aws4_auth_completion()
842 {
843 ldout(s->cct, 5) << "NOTICE: call to do_aws4_auth_completion" << dendl;
844 if (s->auth.completer) {
845 if (!s->auth.completer->complete()) {
846 return -ERR_AMZ_CONTENT_SHA256_MISMATCH;
847 } else {
848 dout(10) << "v4 auth ok -- do_aws4_auth_completion" << dendl;
849 }
850
851 /* TODO(rzarzynski): yes, we're really called twice on PUTs. Only first
852 * call passes, so we disable second one. This is old behaviour, sorry!
853 * Plan for tomorrow: seek and destroy. */
854 s->auth.completer = nullptr;
855 }
856
857 return 0;
858 }
859
860 int RGWOp::init_quota()
861 {
862 /* no quota enforcement for system requests */
863 if (s->system_request)
864 return 0;
865
866 /* init quota related stuff */
867 if (!(s->user->op_mask & RGW_OP_TYPE_MODIFY)) {
868 return 0;
869 }
870
871 /* only interested in object related ops */
872 if (s->object.empty()) {
873 return 0;
874 }
875
876 RGWUserInfo owner_info;
877 RGWUserInfo *uinfo;
878
879 if (s->user->user_id == s->bucket_owner.get_id()) {
880 uinfo = s->user;
881 } else {
882 int r = rgw_get_user_info_by_uid(store, s->bucket_info.owner, owner_info);
883 if (r < 0)
884 return r;
885 uinfo = &owner_info;
886 }
887
888 if (s->bucket_info.quota.enabled) {
889 bucket_quota = s->bucket_info.quota;
890 } else if (uinfo->bucket_quota.enabled) {
891 bucket_quota = uinfo->bucket_quota;
892 } else {
893 bucket_quota = store->get_bucket_quota();
894 }
895
896 if (uinfo->user_quota.enabled) {
897 user_quota = uinfo->user_quota;
898 } else {
899 user_quota = store->get_user_quota();
900 }
901
902 return 0;
903 }
904
905 static bool validate_cors_rule_method(RGWCORSRule *rule, const char *req_meth) {
906 uint8_t flags = 0;
907
908 if (!req_meth) {
909 dout(5) << "req_meth is null" << dendl;
910 return false;
911 }
912
913 if (strcmp(req_meth, "GET") == 0) flags = RGW_CORS_GET;
914 else if (strcmp(req_meth, "POST") == 0) flags = RGW_CORS_POST;
915 else if (strcmp(req_meth, "PUT") == 0) flags = RGW_CORS_PUT;
916 else if (strcmp(req_meth, "DELETE") == 0) flags = RGW_CORS_DELETE;
917 else if (strcmp(req_meth, "HEAD") == 0) flags = RGW_CORS_HEAD;
918
919 if (rule->get_allowed_methods() & flags) {
920 dout(10) << "Method " << req_meth << " is supported" << dendl;
921 } else {
922 dout(5) << "Method " << req_meth << " is not supported" << dendl;
923 return false;
924 }
925
926 return true;
927 }
928
929 static bool validate_cors_rule_header(RGWCORSRule *rule, const char *req_hdrs) {
930 if (req_hdrs) {
931 vector<string> hdrs;
932 get_str_vec(req_hdrs, hdrs);
933 for (const auto& hdr : hdrs) {
934 if (!rule->is_header_allowed(hdr.c_str(), hdr.length())) {
935 dout(5) << "Header " << hdr << " is not registered in this rule" << dendl;
936 return false;
937 }
938 }
939 }
940 return true;
941 }
942
943 int RGWOp::read_bucket_cors()
944 {
945 bufferlist bl;
946
947 map<string, bufferlist>::iterator aiter = s->bucket_attrs.find(RGW_ATTR_CORS);
948 if (aiter == s->bucket_attrs.end()) {
949 ldout(s->cct, 20) << "no CORS configuration attr found" << dendl;
950 cors_exist = false;
951 return 0; /* no CORS configuration found */
952 }
953
954 cors_exist = true;
955
956 bl = aiter->second;
957
958 bufferlist::iterator iter = bl.begin();
959 try {
960 bucket_cors.decode(iter);
961 } catch (buffer::error& err) {
962 ldout(s->cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
963 return -EIO;
964 }
965 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
966 RGWCORSConfiguration_S3 *s3cors = static_cast<RGWCORSConfiguration_S3 *>(&bucket_cors);
967 ldout(s->cct, 15) << "Read RGWCORSConfiguration";
968 s3cors->to_xml(*_dout);
969 *_dout << dendl;
970 }
971 return 0;
972 }
973
974 /** CORS 6.2.6.
975 * If any of the header field-names is not a ASCII case-insensitive match for
976 * any of the values in list of headers do not set any additional headers and
977 * terminate this set of steps.
978 * */
979 static void get_cors_response_headers(RGWCORSRule *rule, const char *req_hdrs, string& hdrs, string& exp_hdrs, unsigned *max_age) {
980 if (req_hdrs) {
981 list<string> hl;
982 get_str_list(req_hdrs, hl);
983 for(list<string>::iterator it = hl.begin(); it != hl.end(); ++it) {
984 if (!rule->is_header_allowed((*it).c_str(), (*it).length())) {
985 dout(5) << "Header " << (*it) << " is not registered in this rule" << dendl;
986 } else {
987 if (hdrs.length() > 0) hdrs.append(",");
988 hdrs.append((*it));
989 }
990 }
991 }
992 rule->format_exp_headers(exp_hdrs);
993 *max_age = rule->get_max_age();
994 }
995
996 /**
997 * Generate the CORS header response
998 *
999 * This is described in the CORS standard, section 6.2.
1000 */
1001 bool RGWOp::generate_cors_headers(string& origin, string& method, string& headers, string& exp_headers, unsigned *max_age)
1002 {
1003 /* CORS 6.2.1. */
1004 const char *orig = s->info.env->get("HTTP_ORIGIN");
1005 if (!orig) {
1006 return false;
1007 }
1008
1009 /* Custom: */
1010 origin = orig;
1011 op_ret = read_bucket_cors();
1012 if (op_ret < 0) {
1013 return false;
1014 }
1015
1016 if (!cors_exist) {
1017 dout(2) << "No CORS configuration set yet for this bucket" << dendl;
1018 return false;
1019 }
1020
1021 /* CORS 6.2.2. */
1022 RGWCORSRule *rule = bucket_cors.host_name_rule(orig);
1023 if (!rule)
1024 return false;
1025
1026 /*
1027 * Set the Allowed-Origin header to a asterisk if this is allowed in the rule
1028 * and no Authorization was send by the client
1029 *
1030 * The origin parameter specifies a URI that may access the resource. The browser must enforce this.
1031 * For requests without credentials, the server may specify "*" as a wildcard,
1032 * thereby allowing any origin to access the resource.
1033 */
1034 const char *authorization = s->info.env->get("HTTP_AUTHORIZATION");
1035 if (!authorization && rule->has_wildcard_origin())
1036 origin = "*";
1037
1038 /* CORS 6.2.3. */
1039 const char *req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD");
1040 if (!req_meth) {
1041 req_meth = s->info.method;
1042 }
1043
1044 if (req_meth) {
1045 method = req_meth;
1046 /* CORS 6.2.5. */
1047 if (!validate_cors_rule_method(rule, req_meth)) {
1048 return false;
1049 }
1050 }
1051
1052 /* CORS 6.2.4. */
1053 const char *req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS");
1054
1055 /* CORS 6.2.6. */
1056 get_cors_response_headers(rule, req_hdrs, headers, exp_headers, max_age);
1057
1058 return true;
1059 }
1060
1061 int RGWGetObj::read_user_manifest_part(rgw_bucket& bucket,
1062 const rgw_bucket_dir_entry& ent,
1063 RGWAccessControlPolicy * const bucket_acl,
1064 const optional<Policy>& bucket_policy,
1065 const off_t start_ofs,
1066 const off_t end_ofs)
1067 {
1068 ldout(s->cct, 20) << "user manifest obj=" << ent.key.name << "[" << ent.key.instance << "]" << dendl;
1069 RGWGetObj_CB cb(this);
1070 RGWGetDataCB* filter = &cb;
1071 boost::optional<RGWGetObj_Decompress> decompress;
1072
1073 int64_t cur_ofs = start_ofs;
1074 int64_t cur_end = end_ofs;
1075
1076 rgw_obj part(bucket, ent.key);
1077
1078 map<string, bufferlist> attrs;
1079
1080 uint64_t obj_size;
1081 RGWObjectCtx obj_ctx(store);
1082 RGWAccessControlPolicy obj_policy(s->cct);
1083
1084 ldout(s->cct, 20) << "reading obj=" << part << " ofs=" << cur_ofs << " end=" << cur_end << dendl;
1085
1086 obj_ctx.obj.set_atomic(part);
1087 store->set_prefetch_data(&obj_ctx, part);
1088
1089 RGWRados::Object op_target(store, s->bucket_info, obj_ctx, part);
1090 RGWRados::Object::Read read_op(&op_target);
1091
1092 read_op.conds.if_match = ent.meta.etag.c_str();
1093 read_op.params.attrs = &attrs;
1094 read_op.params.obj_size = &obj_size;
1095
1096 op_ret = read_op.prepare();
1097 if (op_ret < 0)
1098 return op_ret;
1099 op_ret = read_op.range_to_ofs(ent.meta.accounted_size, cur_ofs, cur_end);
1100 if (op_ret < 0)
1101 return op_ret;
1102 bool need_decompress;
1103 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
1104 if (op_ret < 0) {
1105 lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
1106 return -EIO;
1107 }
1108
1109 if (need_decompress)
1110 {
1111 if (cs_info.orig_size != ent.meta.accounted_size) {
1112 // hmm.. something wrong, object not as expected, abort!
1113 ldout(s->cct, 0) << "ERROR: expected cs_info.orig_size=" << cs_info.orig_size <<
1114 ", actual read size=" << ent.meta.size << dendl;
1115 return -EIO;
1116 }
1117 decompress.emplace(s->cct, &cs_info, partial_content, filter);
1118 filter = &*decompress;
1119 }
1120 else
1121 {
1122 if (obj_size != ent.meta.size) {
1123 // hmm.. something wrong, object not as expected, abort!
1124 ldout(s->cct, 0) << "ERROR: expected obj_size=" << obj_size << ", actual read size=" << ent.meta.size << dendl;
1125 return -EIO;
1126 }
1127 }
1128
1129 op_ret = rgw_policy_from_attrset(s->cct, attrs, &obj_policy);
1130 if (op_ret < 0)
1131 return op_ret;
1132
1133 /* We can use global user_acl because LOs cannot have segments
1134 * stored inside different accounts. */
1135 if (s->system_request) {
1136 ldout(s->cct, 2) << "overriding permissions due to system operation" << dendl;
1137 } else if (s->auth.identity->is_admin_of(s->user->user_id)) {
1138 ldout(s->cct, 2) << "overriding permissions due to admin operation" << dendl;
1139 } else if (!verify_object_permission(s, part, s->user_acl.get(), bucket_acl,
1140 &obj_policy, bucket_policy, action)) {
1141 return -EPERM;
1142 }
1143
1144 if (ent.meta.size == 0) {
1145 return 0;
1146 }
1147
1148 perfcounter->inc(l_rgw_get_b, cur_end - cur_ofs);
1149 filter->fixup_range(cur_ofs, cur_end);
1150 op_ret = read_op.iterate(cur_ofs, cur_end, filter);
1151 if (op_ret >= 0)
1152 op_ret = filter->flush();
1153 return op_ret;
1154 }
1155
1156 static int iterate_user_manifest_parts(CephContext * const cct,
1157 RGWRados * const store,
1158 const off_t ofs,
1159 const off_t end,
1160 RGWBucketInfo *pbucket_info,
1161 const string& obj_prefix,
1162 RGWAccessControlPolicy * const bucket_acl,
1163 const optional<Policy>& bucket_policy,
1164 uint64_t * const ptotal_len,
1165 uint64_t * const pobj_size,
1166 string * const pobj_sum,
1167 int (*cb)(rgw_bucket& bucket,
1168 const rgw_bucket_dir_entry& ent,
1169 RGWAccessControlPolicy * const bucket_acl,
1170 const optional<Policy>& bucket_policy,
1171 off_t start_ofs,
1172 off_t end_ofs,
1173 void *param),
1174 void * const cb_param)
1175 {
1176 rgw_bucket& bucket = pbucket_info->bucket;
1177 uint64_t obj_ofs = 0, len_count = 0;
1178 bool found_start = false, found_end = false, handled_end = false;
1179 string delim;
1180 bool is_truncated;
1181 vector<rgw_bucket_dir_entry> objs;
1182
1183 utime_t start_time = ceph_clock_now();
1184
1185 RGWRados::Bucket target(store, *pbucket_info);
1186 RGWRados::Bucket::List list_op(&target);
1187
1188 list_op.params.prefix = obj_prefix;
1189 list_op.params.delim = delim;
1190
1191 MD5 etag_sum;
1192 do {
1193 #define MAX_LIST_OBJS 100
1194 int r = list_op.list_objects(MAX_LIST_OBJS, &objs, NULL, &is_truncated);
1195 if (r < 0) {
1196 return r;
1197 }
1198
1199 for (rgw_bucket_dir_entry& ent : objs) {
1200 const uint64_t cur_total_len = obj_ofs;
1201 const uint64_t obj_size = ent.meta.accounted_size;
1202 uint64_t start_ofs = 0, end_ofs = obj_size;
1203
1204 if ((ptotal_len || cb) && !found_start && cur_total_len + obj_size > (uint64_t)ofs) {
1205 start_ofs = ofs - obj_ofs;
1206 found_start = true;
1207 }
1208
1209 obj_ofs += obj_size;
1210 if (pobj_sum) {
1211 etag_sum.Update((const byte *)ent.meta.etag.c_str(),
1212 ent.meta.etag.length());
1213 }
1214
1215 if ((ptotal_len || cb) && !found_end && obj_ofs > (uint64_t)end) {
1216 end_ofs = end - cur_total_len + 1;
1217 found_end = true;
1218 }
1219
1220 perfcounter->tinc(l_rgw_get_lat,
1221 (ceph_clock_now() - start_time));
1222
1223 if (found_start && !handled_end) {
1224 len_count += end_ofs - start_ofs;
1225
1226 if (cb) {
1227 r = cb(bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs, cb_param);
1228 if (r < 0) {
1229 return r;
1230 }
1231 }
1232 }
1233
1234 handled_end = found_end;
1235 start_time = ceph_clock_now();
1236 }
1237 } while (is_truncated);
1238
1239 if (ptotal_len) {
1240 *ptotal_len = len_count;
1241 }
1242 if (pobj_size) {
1243 *pobj_size = obj_ofs;
1244 }
1245 if (pobj_sum) {
1246 complete_etag(etag_sum, pobj_sum);
1247 }
1248
1249 return 0;
1250 }
1251
1252 struct rgw_slo_part {
1253 RGWAccessControlPolicy *bucket_acl = nullptr;
1254 Policy* bucket_policy = nullptr;
1255 rgw_bucket bucket;
1256 string obj_name;
1257 uint64_t size = 0;
1258 string etag;
1259 };
1260
1261 static int iterate_slo_parts(CephContext *cct,
1262 RGWRados *store,
1263 off_t ofs,
1264 off_t end,
1265 map<uint64_t, rgw_slo_part>& slo_parts,
1266 int (*cb)(rgw_bucket& bucket,
1267 const rgw_bucket_dir_entry& ent,
1268 RGWAccessControlPolicy *bucket_acl,
1269 const optional<Policy>& bucket_policy,
1270 off_t start_ofs,
1271 off_t end_ofs,
1272 void *param),
1273 void *cb_param)
1274 {
1275 bool found_start = false, found_end = false;
1276
1277 if (slo_parts.empty()) {
1278 return 0;
1279 }
1280
1281 utime_t start_time = ceph_clock_now();
1282
1283 map<uint64_t, rgw_slo_part>::iterator iter = slo_parts.upper_bound(ofs);
1284 if (iter != slo_parts.begin()) {
1285 --iter;
1286 }
1287
1288 uint64_t obj_ofs = iter->first;
1289
1290 for (; iter != slo_parts.end() && !found_end; ++iter) {
1291 rgw_slo_part& part = iter->second;
1292 rgw_bucket_dir_entry ent;
1293
1294 ent.key.name = part.obj_name;
1295 ent.meta.accounted_size = ent.meta.size = part.size;
1296 ent.meta.etag = part.etag;
1297
1298 uint64_t cur_total_len = obj_ofs;
1299 uint64_t start_ofs = 0, end_ofs = ent.meta.size;
1300
1301 if (!found_start && cur_total_len + ent.meta.size > (uint64_t)ofs) {
1302 start_ofs = ofs - obj_ofs;
1303 found_start = true;
1304 }
1305
1306 obj_ofs += ent.meta.size;
1307
1308 if (!found_end && obj_ofs > (uint64_t)end) {
1309 end_ofs = end - cur_total_len + 1;
1310 found_end = true;
1311 }
1312
1313 perfcounter->tinc(l_rgw_get_lat,
1314 (ceph_clock_now() - start_time));
1315
1316 if (found_start) {
1317 if (cb) {
1318 // SLO is a Swift thing, and Swift has no knowledge of S3 Policies.
1319 int r = cb(part.bucket, ent, part.bucket_acl,
1320 (part.bucket_policy ?
1321 optional<Policy>(*part.bucket_policy) : none),
1322 start_ofs, end_ofs, cb_param);
1323 if (r < 0)
1324 return r;
1325 }
1326 }
1327
1328 start_time = ceph_clock_now();
1329 }
1330
1331 return 0;
1332 }
1333
1334 static int get_obj_user_manifest_iterate_cb(rgw_bucket& bucket,
1335 const rgw_bucket_dir_entry& ent,
1336 RGWAccessControlPolicy * const bucket_acl,
1337 const optional<Policy>& bucket_policy,
1338 const off_t start_ofs,
1339 const off_t end_ofs,
1340 void * const param)
1341 {
1342 RGWGetObj *op = static_cast<RGWGetObj *>(param);
1343 return op->read_user_manifest_part(bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs);
1344 }
1345
1346 int RGWGetObj::handle_user_manifest(const char *prefix)
1347 {
1348 const boost::string_view prefix_view(prefix);
1349 ldout(s->cct, 2) << "RGWGetObj::handle_user_manifest() prefix="
1350 << prefix_view << dendl;
1351
1352 const size_t pos = prefix_view.find('/');
1353 if (pos == string::npos) {
1354 return -EINVAL;
1355 }
1356
1357 const std::string bucket_name = url_decode(prefix_view.substr(0, pos));
1358 const std::string obj_prefix = url_decode(prefix_view.substr(pos + 1));
1359
1360 rgw_bucket bucket;
1361
1362 RGWAccessControlPolicy _bucket_acl(s->cct);
1363 RGWAccessControlPolicy *bucket_acl;
1364 optional<Policy> _bucket_policy;
1365 optional<Policy>* bucket_policy;
1366 RGWBucketInfo bucket_info;
1367 RGWBucketInfo *pbucket_info;
1368
1369 if (bucket_name.compare(s->bucket.name) != 0) {
1370 map<string, bufferlist> bucket_attrs;
1371 RGWObjectCtx obj_ctx(store);
1372 int r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
1373 bucket_name, bucket_info, NULL,
1374 &bucket_attrs);
1375 if (r < 0) {
1376 ldout(s->cct, 0) << "could not get bucket info for bucket="
1377 << bucket_name << dendl;
1378 return r;
1379 }
1380 bucket = bucket_info.bucket;
1381 pbucket_info = &bucket_info;
1382 bucket_acl = &_bucket_acl;
1383 r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl, bucket);
1384 if (r < 0) {
1385 ldout(s->cct, 0) << "failed to read bucket policy" << dendl;
1386 return r;
1387 }
1388 _bucket_policy = get_iam_policy_from_attr(s->cct, store, bucket_attrs,
1389 bucket_info.bucket.tenant);
1390 bucket_policy = &_bucket_policy;
1391 } else {
1392 bucket = s->bucket;
1393 pbucket_info = &s->bucket_info;
1394 bucket_acl = s->bucket_acl.get();
1395 bucket_policy = &s->iam_policy;
1396 }
1397
1398 /* dry run to find out:
1399 * - total length (of the parts we are going to send to client),
1400 * - overall DLO's content size,
1401 * - md5 sum of overall DLO's content (for etag of Swift API). */
1402 int r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1403 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1404 nullptr, &s->obj_size, &lo_etag,
1405 nullptr /* cb */, nullptr /* cb arg */);
1406 if (r < 0) {
1407 return r;
1408 }
1409
1410 r = RGWRados::Object::Read::range_to_ofs(s->obj_size, ofs, end);
1411 if (r < 0) {
1412 return r;
1413 }
1414
1415 r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1416 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1417 &total_len, nullptr, nullptr,
1418 nullptr, nullptr);
1419 if (r < 0) {
1420 return r;
1421 }
1422
1423 if (!get_data) {
1424 bufferlist bl;
1425 send_response_data(bl, 0, 0);
1426 return 0;
1427 }
1428
1429 r = iterate_user_manifest_parts(s->cct, store, ofs, end,
1430 pbucket_info, obj_prefix, bucket_acl, *bucket_policy,
1431 nullptr, nullptr, nullptr,
1432 get_obj_user_manifest_iterate_cb, (void *)this);
1433 if (r < 0) {
1434 return r;
1435 }
1436
1437 if (!total_len) {
1438 bufferlist bl;
1439 send_response_data(bl, 0, 0);
1440 }
1441
1442 return 0;
1443 }
1444
1445 int RGWGetObj::handle_slo_manifest(bufferlist& bl)
1446 {
1447 RGWSLOInfo slo_info;
1448 bufferlist::iterator bliter = bl.begin();
1449 try {
1450 ::decode(slo_info, bliter);
1451 } catch (buffer::error& err) {
1452 ldout(s->cct, 0) << "ERROR: failed to decode slo manifest" << dendl;
1453 return -EIO;
1454 }
1455 ldout(s->cct, 2) << "RGWGetObj::handle_slo_manifest()" << dendl;
1456
1457 vector<RGWAccessControlPolicy> allocated_acls;
1458 map<string, pair<RGWAccessControlPolicy *, optional<Policy>>> policies;
1459 map<string, rgw_bucket> buckets;
1460
1461 map<uint64_t, rgw_slo_part> slo_parts;
1462
1463 MD5 etag_sum;
1464 total_len = 0;
1465
1466 for (const auto& entry : slo_info.entries) {
1467 const string& path = entry.path;
1468
1469 /* If the path starts with slashes, strip them all. */
1470 const size_t pos_init = path.find_first_not_of('/');
1471 /* According to the documentation of std::string::find following check
1472 * is not necessary as we should get the std::string::npos propagation
1473 * here. This might be true with the accuracy to implementation's bugs.
1474 * See following question on SO:
1475 * http://stackoverflow.com/questions/1011790/why-does-stdstring-findtext-stdstringnpos-not-return-npos
1476 */
1477 if (pos_init == string::npos) {
1478 return -EINVAL;
1479 }
1480
1481 const size_t pos_sep = path.find('/', pos_init);
1482 if (pos_sep == string::npos) {
1483 return -EINVAL;
1484 }
1485
1486 string bucket_name = path.substr(pos_init, pos_sep - pos_init);
1487 string obj_name = path.substr(pos_sep + 1);
1488
1489 rgw_bucket bucket;
1490 RGWAccessControlPolicy *bucket_acl;
1491 Policy* bucket_policy;
1492
1493 if (bucket_name.compare(s->bucket.name) != 0) {
1494 const auto& piter = policies.find(bucket_name);
1495 if (piter != policies.end()) {
1496 bucket_acl = piter->second.first;
1497 bucket_policy = piter->second.second.get_ptr();
1498 bucket = buckets[bucket_name];
1499 } else {
1500 allocated_acls.push_back(RGWAccessControlPolicy(s->cct));
1501 RGWAccessControlPolicy& _bucket_acl = allocated_acls.back();
1502
1503 RGWBucketInfo bucket_info;
1504 map<string, bufferlist> bucket_attrs;
1505 RGWObjectCtx obj_ctx(store);
1506 int r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
1507 bucket_name, bucket_info, nullptr,
1508 &bucket_attrs);
1509 if (r < 0) {
1510 ldout(s->cct, 0) << "could not get bucket info for bucket="
1511 << bucket_name << dendl;
1512 return r;
1513 }
1514 bucket = bucket_info.bucket;
1515 bucket_acl = &_bucket_acl;
1516 r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl,
1517 bucket);
1518 if (r < 0) {
1519 ldout(s->cct, 0) << "failed to read bucket ACL for bucket "
1520 << bucket << dendl;
1521 return r;
1522 }
1523 auto _bucket_policy = get_iam_policy_from_attr(
1524 s->cct, store, bucket_attrs, bucket_info.bucket.tenant);
1525 bucket_policy = _bucket_policy.get_ptr();
1526 buckets[bucket_name] = bucket;
1527 policies[bucket_name] = make_pair(bucket_acl, _bucket_policy);
1528 }
1529 } else {
1530 bucket = s->bucket;
1531 bucket_acl = s->bucket_acl.get();
1532 bucket_policy = s->iam_policy.get_ptr();
1533 }
1534
1535 rgw_slo_part part;
1536 part.bucket_acl = bucket_acl;
1537 part.bucket_policy = bucket_policy;
1538 part.bucket = bucket;
1539 part.obj_name = obj_name;
1540 part.size = entry.size_bytes;
1541 part.etag = entry.etag;
1542 ldout(s->cct, 20) << "slo_part: ofs=" << ofs
1543 << " bucket=" << part.bucket
1544 << " obj=" << part.obj_name
1545 << " size=" << part.size
1546 << " etag=" << part.etag
1547 << dendl;
1548
1549 etag_sum.Update((const byte *)entry.etag.c_str(),
1550 entry.etag.length());
1551
1552 slo_parts[total_len] = part;
1553 total_len += part.size;
1554 }
1555
1556 complete_etag(etag_sum, &lo_etag);
1557
1558 s->obj_size = slo_info.total_size;
1559 ldout(s->cct, 20) << "s->obj_size=" << s->obj_size << dendl;
1560
1561 int r = RGWRados::Object::Read::range_to_ofs(total_len, ofs, end);
1562 if (r < 0) {
1563 return r;
1564 }
1565
1566 total_len = end - ofs + 1;
1567
1568 r = iterate_slo_parts(s->cct, store, ofs, end, slo_parts,
1569 get_obj_user_manifest_iterate_cb, (void *)this);
1570 if (r < 0) {
1571 return r;
1572 }
1573
1574 return 0;
1575 }
1576
1577 int RGWGetObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
1578 {
1579 /* garbage collection related handling */
1580 utime_t start_time = ceph_clock_now();
1581 if (start_time > gc_invalidate_time) {
1582 int r = store->defer_gc(s->obj_ctx, s->bucket_info, obj);
1583 if (r < 0) {
1584 dout(0) << "WARNING: could not defer gc entry for obj" << dendl;
1585 }
1586 gc_invalidate_time = start_time;
1587 gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2);
1588 }
1589 return send_response_data(bl, bl_ofs, bl_len);
1590 }
1591
1592 bool RGWGetObj::prefetch_data()
1593 {
1594 /* HEAD request, stop prefetch*/
1595 if (!get_data) {
1596 return false;
1597 }
1598
1599 bool prefetch_first_chunk = true;
1600 range_str = s->info.env->get("HTTP_RANGE");
1601
1602 if (range_str) {
1603 int r = parse_range();
1604 /* error on parsing the range, stop prefetch and will fail in execute() */
1605 if (r < 0) {
1606 return false; /* range_parsed==false */
1607 }
1608 /* range get goes to shadow objects, stop prefetch */
1609 if (ofs >= s->cct->_conf->rgw_max_chunk_size) {
1610 prefetch_first_chunk = false;
1611 }
1612 }
1613
1614 return get_data && prefetch_first_chunk;
1615 }
1616
1617 void RGWGetObj::pre_exec()
1618 {
1619 rgw_bucket_object_pre_exec(s);
1620 }
1621
1622 static bool object_is_expired(map<string, bufferlist>& attrs) {
1623 map<string, bufferlist>::iterator iter = attrs.find(RGW_ATTR_DELETE_AT);
1624 if (iter != attrs.end()) {
1625 utime_t delete_at;
1626 try {
1627 ::decode(delete_at, iter->second);
1628 } catch (buffer::error& err) {
1629 dout(0) << "ERROR: " << __func__ << ": failed to decode " RGW_ATTR_DELETE_AT " attr" << dendl;
1630 return false;
1631 }
1632
1633 if (delete_at <= ceph_clock_now() && !delete_at.is_zero()) {
1634 return true;
1635 }
1636 }
1637
1638 return false;
1639 }
1640
1641 void RGWGetObj::execute()
1642 {
1643 utime_t start_time = s->time;
1644 bufferlist bl;
1645 gc_invalidate_time = ceph_clock_now();
1646 gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2);
1647
1648 bool need_decompress;
1649 int64_t ofs_x, end_x;
1650
1651 RGWGetObj_CB cb(this);
1652 RGWGetDataCB* filter = (RGWGetDataCB*)&cb;
1653 boost::optional<RGWGetObj_Decompress> decompress;
1654 std::unique_ptr<RGWGetDataCB> decrypt;
1655 map<string, bufferlist>::iterator attr_iter;
1656
1657 perfcounter->inc(l_rgw_get);
1658
1659 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
1660 RGWRados::Object::Read read_op(&op_target);
1661
1662 op_ret = get_params();
1663 if (op_ret < 0)
1664 goto done_err;
1665
1666 op_ret = init_common();
1667 if (op_ret < 0)
1668 goto done_err;
1669
1670 read_op.conds.mod_ptr = mod_ptr;
1671 read_op.conds.unmod_ptr = unmod_ptr;
1672 read_op.conds.high_precision_time = s->system_request; /* system request need to use high precision time */
1673 read_op.conds.mod_zone_id = mod_zone_id;
1674 read_op.conds.mod_pg_ver = mod_pg_ver;
1675 read_op.conds.if_match = if_match;
1676 read_op.conds.if_nomatch = if_nomatch;
1677 read_op.params.attrs = &attrs;
1678 read_op.params.lastmod = &lastmod;
1679 read_op.params.obj_size = &s->obj_size;
1680
1681 op_ret = read_op.prepare();
1682 if (op_ret < 0)
1683 goto done_err;
1684 version_id = read_op.state.obj.key.instance;
1685
1686 /* STAT ops don't need data, and do no i/o */
1687 if (get_type() == RGW_OP_STAT_OBJ) {
1688 return;
1689 }
1690
1691 /* start gettorrent */
1692 if (torrent.get_flag())
1693 {
1694 attr_iter = attrs.find(RGW_ATTR_CRYPT_MODE);
1695 if (attr_iter != attrs.end() && attr_iter->second.to_str() == "SSE-C-AES256") {
1696 ldout(s->cct, 0) << "ERROR: torrents are not supported for objects "
1697 "encrypted with SSE-C" << dendl;
1698 op_ret = -EINVAL;
1699 goto done_err;
1700 }
1701 torrent.init(s, store);
1702 op_ret = torrent.get_torrent_file(read_op, total_len, bl, obj);
1703 if (op_ret < 0)
1704 {
1705 ldout(s->cct, 0) << "ERROR: failed to get_torrent_file ret= " << op_ret
1706 << dendl;
1707 goto done_err;
1708 }
1709 op_ret = send_response_data(bl, 0, total_len);
1710 if (op_ret < 0)
1711 {
1712 ldout(s->cct, 0) << "ERROR: failed to send_response_data ret= " << op_ret
1713 << dendl;
1714 goto done_err;
1715 }
1716 return;
1717 }
1718 /* end gettorrent */
1719
1720 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
1721 if (op_ret < 0) {
1722 lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
1723 goto done_err;
1724 }
1725 if (need_decompress) {
1726 s->obj_size = cs_info.orig_size;
1727 decompress.emplace(s->cct, &cs_info, partial_content, filter);
1728 filter = &*decompress;
1729 }
1730
1731 attr_iter = attrs.find(RGW_ATTR_USER_MANIFEST);
1732 if (attr_iter != attrs.end() && !skip_manifest) {
1733 op_ret = handle_user_manifest(attr_iter->second.c_str());
1734 if (op_ret < 0) {
1735 ldout(s->cct, 0) << "ERROR: failed to handle user manifest ret="
1736 << op_ret << dendl;
1737 goto done_err;
1738 }
1739 return;
1740 }
1741
1742 attr_iter = attrs.find(RGW_ATTR_SLO_MANIFEST);
1743 if (attr_iter != attrs.end() && !skip_manifest) {
1744 is_slo = true;
1745 op_ret = handle_slo_manifest(attr_iter->second);
1746 if (op_ret < 0) {
1747 ldout(s->cct, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret
1748 << dendl;
1749 goto done_err;
1750 }
1751 return;
1752 }
1753
1754 // for range requests with obj size 0
1755 if (range_str && !(s->obj_size)) {
1756 total_len = 0;
1757 op_ret = -ERANGE;
1758 goto done_err;
1759 }
1760
1761 op_ret = read_op.range_to_ofs(s->obj_size, ofs, end);
1762 if (op_ret < 0)
1763 goto done_err;
1764 total_len = (ofs <= end ? end + 1 - ofs : 0);
1765
1766 /* Check whether the object has expired. Swift API documentation
1767 * stands that we should return 404 Not Found in such case. */
1768 if (need_object_expiration() && object_is_expired(attrs)) {
1769 op_ret = -ENOENT;
1770 goto done_err;
1771 }
1772
1773 start = ofs;
1774
1775 /* STAT ops don't need data, and do no i/o */
1776 if (get_type() == RGW_OP_STAT_OBJ) {
1777 return;
1778 }
1779
1780 attr_iter = attrs.find(RGW_ATTR_MANIFEST);
1781 op_ret = this->get_decrypt_filter(&decrypt, filter,
1782 attr_iter != attrs.end() ? &(attr_iter->second) : nullptr);
1783 if (decrypt != nullptr) {
1784 filter = decrypt.get();
1785 }
1786 if (op_ret < 0) {
1787 goto done_err;
1788 }
1789
1790 if (!get_data || ofs > end) {
1791 send_response_data(bl, 0, 0);
1792 return;
1793 }
1794
1795 perfcounter->inc(l_rgw_get_b, end - ofs);
1796
1797 ofs_x = ofs;
1798 end_x = end;
1799 filter->fixup_range(ofs_x, end_x);
1800 op_ret = read_op.iterate(ofs_x, end_x, filter);
1801
1802 if (op_ret >= 0)
1803 op_ret = filter->flush();
1804
1805 perfcounter->tinc(l_rgw_get_lat,
1806 (ceph_clock_now() - start_time));
1807 if (op_ret < 0) {
1808 goto done_err;
1809 }
1810
1811 op_ret = send_response_data(bl, 0, 0);
1812 if (op_ret < 0) {
1813 goto done_err;
1814 }
1815 return;
1816
1817 done_err:
1818 send_response_data_error();
1819 }
1820
1821 int RGWGetObj::init_common()
1822 {
1823 if (range_str) {
1824 /* range parsed error when prefetch */
1825 if (!range_parsed) {
1826 int r = parse_range();
1827 if (r < 0)
1828 return r;
1829 }
1830 }
1831 if (if_mod) {
1832 if (parse_time(if_mod, &mod_time) < 0)
1833 return -EINVAL;
1834 mod_ptr = &mod_time;
1835 }
1836
1837 if (if_unmod) {
1838 if (parse_time(if_unmod, &unmod_time) < 0)
1839 return -EINVAL;
1840 unmod_ptr = &unmod_time;
1841 }
1842
1843 return 0;
1844 }
1845
1846 int RGWListBuckets::verify_permission()
1847 {
1848 if (!verify_user_permission(s, RGW_PERM_READ)) {
1849 return -EACCES;
1850 }
1851
1852 return 0;
1853 }
1854
1855 int RGWGetUsage::verify_permission()
1856 {
1857 if (s->auth.identity->is_anonymous()) {
1858 return -EACCES;
1859 }
1860
1861 return 0;
1862 }
1863
1864 void RGWListBuckets::execute()
1865 {
1866 bool done;
1867 bool started = false;
1868 uint64_t total_count = 0;
1869
1870 const uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk;
1871
1872 op_ret = get_params();
1873 if (op_ret < 0) {
1874 goto send_end;
1875 }
1876
1877 if (supports_account_metadata()) {
1878 op_ret = rgw_get_user_attrs_by_uid(store, s->user->user_id, attrs);
1879 if (op_ret < 0) {
1880 goto send_end;
1881 }
1882 }
1883
1884 is_truncated = false;
1885 do {
1886 RGWUserBuckets buckets;
1887 uint64_t read_count;
1888 if (limit >= 0) {
1889 read_count = min(limit - total_count, (uint64_t)max_buckets);
1890 } else {
1891 read_count = max_buckets;
1892 }
1893
1894 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
1895 marker, end_marker, read_count,
1896 should_get_stats(), &is_truncated,
1897 get_default_max());
1898 if (op_ret < 0) {
1899 /* hmm.. something wrong here.. the user was authenticated, so it
1900 should exist */
1901 ldout(s->cct, 10) << "WARNING: failed on rgw_get_user_buckets uid="
1902 << s->user->user_id << dendl;
1903 break;
1904 }
1905
1906 /* We need to have stats for all our policies - even if a given policy
1907 * isn't actually used in a given account. In such situation its usage
1908 * stats would be simply full of zeros. */
1909 for (const auto& policy : store->get_zonegroup().placement_targets) {
1910 policies_stats.emplace(policy.second.name,
1911 decltype(policies_stats)::mapped_type());
1912 }
1913
1914 std::map<std::string, RGWBucketEnt>& m = buckets.get_buckets();
1915 for (const auto& kv : m) {
1916 const auto& bucket = kv.second;
1917
1918 global_stats.bytes_used += bucket.size;
1919 global_stats.bytes_used_rounded += bucket.size_rounded;
1920 global_stats.objects_count += bucket.count;
1921
1922 /* operator[] still can create a new entry for storage policy seen
1923 * for first time. */
1924 auto& policy_stats = policies_stats[bucket.placement_rule];
1925 policy_stats.bytes_used += bucket.size;
1926 policy_stats.bytes_used_rounded += bucket.size_rounded;
1927 policy_stats.buckets_count++;
1928 policy_stats.objects_count += bucket.count;
1929 }
1930 global_stats.buckets_count += m.size();
1931 total_count += m.size();
1932
1933 done = (m.size() < read_count || (limit >= 0 && total_count >= (uint64_t)limit));
1934
1935 if (!started) {
1936 send_response_begin(buckets.count() > 0);
1937 started = true;
1938 }
1939
1940 if (!m.empty()) {
1941 map<string, RGWBucketEnt>::reverse_iterator riter = m.rbegin();
1942 marker = riter->first;
1943
1944 handle_listing_chunk(std::move(buckets));
1945 }
1946 } while (is_truncated && !done);
1947
1948 send_end:
1949 if (!started) {
1950 send_response_begin(false);
1951 }
1952 send_response_end();
1953 }
1954
1955 void RGWGetUsage::execute()
1956 {
1957 uint64_t start_epoch = 0;
1958 uint64_t end_epoch = (uint64_t)-1;
1959 op_ret = get_params();
1960 if (op_ret < 0)
1961 return;
1962
1963 if (!start_date.empty()) {
1964 op_ret = utime_t::parse_date(start_date, &start_epoch, NULL);
1965 if (op_ret < 0) {
1966 ldout(store->ctx(), 0) << "ERROR: failed to parse start date" << dendl;
1967 return;
1968 }
1969 }
1970
1971 if (!end_date.empty()) {
1972 op_ret = utime_t::parse_date(end_date, &end_epoch, NULL);
1973 if (op_ret < 0) {
1974 ldout(store->ctx(), 0) << "ERROR: failed to parse end date" << dendl;
1975 return;
1976 }
1977 }
1978
1979 uint32_t max_entries = 1000;
1980
1981 bool is_truncated = true;
1982
1983 RGWUsageIter usage_iter;
1984
1985 while (is_truncated) {
1986 op_ret = store->read_usage(s->user->user_id, start_epoch, end_epoch, max_entries,
1987 &is_truncated, usage_iter, usage);
1988
1989 if (op_ret == -ENOENT) {
1990 op_ret = 0;
1991 is_truncated = false;
1992 }
1993
1994 if (op_ret < 0) {
1995 return;
1996 }
1997 }
1998
1999 op_ret = rgw_user_sync_all_stats(store, s->user->user_id);
2000 if (op_ret < 0) {
2001 ldout(store->ctx(), 0) << "ERROR: failed to sync user stats: " << dendl;
2002 return;
2003 }
2004
2005 op_ret = rgw_user_get_all_buckets_stats(store, s->user->user_id, buckets_usage);
2006 if (op_ret < 0) {
2007 cerr << "ERROR: failed to sync user stats: " << std::endl;
2008 return ;
2009 }
2010
2011 string user_str = s->user->user_id.to_str();
2012 op_ret = store->cls_user_get_header(user_str, &header);
2013 if (op_ret < 0) {
2014 ldout(store->ctx(), 0) << "ERROR: can't read user header: " << dendl;
2015 return;
2016 }
2017
2018 return;
2019 }
2020
2021 int RGWStatAccount::verify_permission()
2022 {
2023 if (!verify_user_permission(s, RGW_PERM_READ)) {
2024 return -EACCES;
2025 }
2026
2027 return 0;
2028 }
2029
2030 void RGWStatAccount::execute()
2031 {
2032 string marker;
2033 bool is_truncated = false;
2034 uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk;
2035
2036 do {
2037 RGWUserBuckets buckets;
2038
2039 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets, marker,
2040 string(), max_buckets, true, &is_truncated);
2041 if (op_ret < 0) {
2042 /* hmm.. something wrong here.. the user was authenticated, so it
2043 should exist */
2044 ldout(s->cct, 10) << "WARNING: failed on rgw_get_user_buckets uid="
2045 << s->user->user_id << dendl;
2046 break;
2047 } else {
2048 /* We need to have stats for all our policies - even if a given policy
2049 * isn't actually used in a given account. In such situation its usage
2050 * stats would be simply full of zeros. */
2051 for (const auto& policy : store->get_zonegroup().placement_targets) {
2052 policies_stats.emplace(policy.second.name,
2053 decltype(policies_stats)::mapped_type());
2054 }
2055
2056 std::map<std::string, RGWBucketEnt>& m = buckets.get_buckets();
2057 for (const auto& kv : m) {
2058 const auto& bucket = kv.second;
2059
2060 global_stats.bytes_used += bucket.size;
2061 global_stats.bytes_used_rounded += bucket.size_rounded;
2062 global_stats.objects_count += bucket.count;
2063
2064 /* operator[] still can create a new entry for storage policy seen
2065 * for first time. */
2066 auto& policy_stats = policies_stats[bucket.placement_rule];
2067 policy_stats.bytes_used += bucket.size;
2068 policy_stats.bytes_used_rounded += bucket.size_rounded;
2069 policy_stats.buckets_count++;
2070 policy_stats.objects_count += bucket.count;
2071 }
2072 global_stats.buckets_count += m.size();
2073
2074 }
2075 } while (is_truncated);
2076 }
2077
2078 int RGWGetBucketVersioning::verify_permission()
2079 {
2080 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketVersioning);
2081 }
2082
2083 void RGWGetBucketVersioning::pre_exec()
2084 {
2085 rgw_bucket_object_pre_exec(s);
2086 }
2087
2088 void RGWGetBucketVersioning::execute()
2089 {
2090 versioned = s->bucket_info.versioned();
2091 versioning_enabled = s->bucket_info.versioning_enabled();
2092 }
2093
2094 int RGWSetBucketVersioning::verify_permission()
2095 {
2096 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketVersioning);
2097 }
2098
2099 void RGWSetBucketVersioning::pre_exec()
2100 {
2101 rgw_bucket_object_pre_exec(s);
2102 }
2103
2104 void RGWSetBucketVersioning::execute()
2105 {
2106 op_ret = get_params();
2107 if (op_ret < 0)
2108 return;
2109
2110 if (!store->is_meta_master()) {
2111 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
2112 if (op_ret < 0) {
2113 ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
2114 return;
2115 }
2116 }
2117
2118 op_ret = retry_raced_bucket_write(store, s, [this] {
2119 if (enable_versioning) {
2120 s->bucket_info.flags |= BUCKET_VERSIONED;
2121 s->bucket_info.flags &= ~BUCKET_VERSIONS_SUSPENDED;
2122 } else {
2123 s->bucket_info.flags |= (BUCKET_VERSIONED | BUCKET_VERSIONS_SUSPENDED);
2124 }
2125
2126 return store->put_bucket_instance_info(s->bucket_info, false, real_time(),
2127 &s->bucket_attrs);
2128 });
2129
2130 if (op_ret < 0) {
2131 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
2132 << " returned err=" << op_ret << dendl;
2133 return;
2134 }
2135 }
2136
2137 int RGWGetBucketWebsite::verify_permission()
2138 {
2139 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketWebsite);
2140 }
2141
2142 void RGWGetBucketWebsite::pre_exec()
2143 {
2144 rgw_bucket_object_pre_exec(s);
2145 }
2146
2147 void RGWGetBucketWebsite::execute()
2148 {
2149 if (!s->bucket_info.has_website) {
2150 op_ret = -ERR_NO_SUCH_WEBSITE_CONFIGURATION;
2151 }
2152 }
2153
2154 int RGWSetBucketWebsite::verify_permission()
2155 {
2156 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketWebsite);
2157 }
2158
2159 void RGWSetBucketWebsite::pre_exec()
2160 {
2161 rgw_bucket_object_pre_exec(s);
2162 }
2163
2164 void RGWSetBucketWebsite::execute()
2165 {
2166 op_ret = get_params();
2167
2168 if (op_ret < 0)
2169 return;
2170
2171 if (!store->is_meta_master()) {
2172 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
2173 if (op_ret < 0) {
2174 ldout(s->cct, 20) << __func__ << " forward_request_to_master returned ret=" << op_ret << dendl;
2175 return;
2176 }
2177 }
2178
2179 op_ret = retry_raced_bucket_write(store, s, [this] {
2180 s->bucket_info.has_website = true;
2181 s->bucket_info.website_conf = website_conf;
2182 op_ret = store->put_bucket_instance_info(s->bucket_info, false,
2183 real_time(), &s->bucket_attrs);
2184 return op_ret;
2185 });
2186
2187 if (op_ret < 0) {
2188 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
2189 return;
2190 }
2191 }
2192
2193 int RGWDeleteBucketWebsite::verify_permission()
2194 {
2195 return verify_bucket_owner_or_policy(s, rgw::IAM::s3DeleteBucketWebsite);
2196 }
2197
2198 void RGWDeleteBucketWebsite::pre_exec()
2199 {
2200 rgw_bucket_object_pre_exec(s);
2201 }
2202
2203 void RGWDeleteBucketWebsite::execute()
2204 {
2205 op_ret = retry_raced_bucket_write(store, s, [this] {
2206 s->bucket_info.has_website = false;
2207 s->bucket_info.website_conf = RGWBucketWebsiteConf();
2208 op_ret = store->put_bucket_instance_info(s->bucket_info, false,
2209 real_time(), &s->bucket_attrs);
2210 return op_ret;
2211 });
2212 if (op_ret < 0) {
2213 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
2214 return;
2215 }
2216 }
2217
2218 int RGWStatBucket::verify_permission()
2219 {
2220 // This (a HEAD request on a bucket) is governed by the s3:ListBucket permission.
2221 if (!verify_bucket_permission(s, rgw::IAM::s3ListBucket)) {
2222 return -EACCES;
2223 }
2224
2225 return 0;
2226 }
2227
2228 void RGWStatBucket::pre_exec()
2229 {
2230 rgw_bucket_object_pre_exec(s);
2231 }
2232
2233 void RGWStatBucket::execute()
2234 {
2235 if (!s->bucket_exists) {
2236 op_ret = -ERR_NO_SUCH_BUCKET;
2237 return;
2238 }
2239
2240 RGWUserBuckets buckets;
2241 bucket.bucket = s->bucket;
2242 buckets.add(bucket);
2243 map<string, RGWBucketEnt>& m = buckets.get_buckets();
2244 op_ret = store->update_containers_stats(m);
2245 if (! op_ret)
2246 op_ret = -EEXIST;
2247 if (op_ret > 0) {
2248 op_ret = 0;
2249 map<string, RGWBucketEnt>::iterator iter = m.find(bucket.bucket.name);
2250 if (iter != m.end()) {
2251 bucket = iter->second;
2252 } else {
2253 op_ret = -EINVAL;
2254 }
2255 }
2256 }
2257
2258 int RGWListBucket::verify_permission()
2259 {
2260 op_ret = get_params();
2261 if (op_ret < 0) {
2262 return op_ret;
2263 }
2264 if (!prefix.empty())
2265 s->env.emplace("s3:prefix", prefix);
2266
2267 if (!delimiter.empty())
2268 s->env.emplace("s3:delimiter", delimiter);
2269
2270 s->env.emplace("s3:max-keys", std::to_string(max));
2271
2272 if (!verify_bucket_permission(s,
2273 list_versions ?
2274 rgw::IAM::s3ListBucketVersions :
2275 rgw::IAM::s3ListBucket)) {
2276 return -EACCES;
2277 }
2278
2279 return 0;
2280 }
2281
2282 int RGWListBucket::parse_max_keys()
2283 {
2284 if (!max_keys.empty()) {
2285 char *endptr;
2286 max = strtol(max_keys.c_str(), &endptr, 10);
2287 if (endptr) {
2288 if (endptr == max_keys.c_str()) return -EINVAL;
2289 while (*endptr && isspace(*endptr)) // ignore white space
2290 endptr++;
2291 if (*endptr) {
2292 return -EINVAL;
2293 }
2294 }
2295 } else {
2296 max = default_max;
2297 }
2298
2299 return 0;
2300 }
2301
2302 void RGWListBucket::pre_exec()
2303 {
2304 rgw_bucket_object_pre_exec(s);
2305 }
2306
2307 void RGWListBucket::execute()
2308 {
2309 if (!s->bucket_exists) {
2310 op_ret = -ERR_NO_SUCH_BUCKET;
2311 return;
2312 }
2313
2314 if (allow_unordered && !delimiter.empty()) {
2315 ldout(s->cct, 0) <<
2316 "ERROR: unordered bucket listing requested with a delimiter" << dendl;
2317 op_ret = -EINVAL;
2318 return;
2319 }
2320
2321 if (need_container_stats()) {
2322 map<string, RGWBucketEnt> m;
2323 m[s->bucket.name] = RGWBucketEnt();
2324 m.begin()->second.bucket = s->bucket;
2325 op_ret = store->update_containers_stats(m);
2326 if (op_ret > 0) {
2327 bucket = m.begin()->second;
2328 }
2329 }
2330
2331 RGWRados::Bucket target(store, s->bucket_info);
2332 if (shard_id >= 0) {
2333 target.set_shard_id(shard_id);
2334 }
2335 RGWRados::Bucket::List list_op(&target);
2336
2337 list_op.params.prefix = prefix;
2338 list_op.params.delim = delimiter;
2339 list_op.params.marker = marker;
2340 list_op.params.end_marker = end_marker;
2341 list_op.params.list_versions = list_versions;
2342 list_op.params.allow_unordered = allow_unordered;
2343
2344 op_ret = list_op.list_objects(max, &objs, &common_prefixes, &is_truncated);
2345 if (op_ret >= 0) {
2346 next_marker = list_op.get_next_marker();
2347 }
2348 }
2349
2350 int RGWGetBucketLogging::verify_permission()
2351 {
2352 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketLogging);
2353 }
2354
2355 int RGWGetBucketLocation::verify_permission()
2356 {
2357 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketLocation);
2358 }
2359
2360 int RGWCreateBucket::verify_permission()
2361 {
2362 /* This check is mostly needed for S3 that doesn't support account ACL.
2363 * Swift doesn't allow to delegate any permission to an anonymous user,
2364 * so it will become an early exit in such case. */
2365 if (s->auth.identity->is_anonymous()) {
2366 return -EACCES;
2367 }
2368
2369 if (!verify_user_permission(s, RGW_PERM_WRITE)) {
2370 return -EACCES;
2371 }
2372
2373 if (s->user->user_id.tenant != s->bucket_tenant) {
2374 ldout(s->cct, 10) << "user cannot create a bucket in a different tenant"
2375 << " (user_id.tenant=" << s->user->user_id.tenant
2376 << " requested=" << s->bucket_tenant << ")"
2377 << dendl;
2378 return -EACCES;
2379 }
2380 if (s->user->max_buckets < 0) {
2381 return -EPERM;
2382 }
2383
2384 if (s->user->max_buckets) {
2385 RGWUserBuckets buckets;
2386 string marker;
2387 bool is_truncated = false;
2388 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
2389 marker, string(), s->user->max_buckets,
2390 false, &is_truncated);
2391 if (op_ret < 0) {
2392 return op_ret;
2393 }
2394
2395 if ((int)buckets.count() >= s->user->max_buckets) {
2396 return -ERR_TOO_MANY_BUCKETS;
2397 }
2398 }
2399
2400 return 0;
2401 }
2402
2403 static int forward_request_to_master(struct req_state *s, obj_version *objv,
2404 RGWRados *store, bufferlist& in_data,
2405 JSONParser *jp, req_info *forward_info)
2406 {
2407 if (!store->rest_master_conn) {
2408 ldout(s->cct, 0) << "rest connection is invalid" << dendl;
2409 return -EINVAL;
2410 }
2411 ldout(s->cct, 0) << "sending request to master zonegroup" << dendl;
2412 bufferlist response;
2413 string uid_str = s->user->user_id.to_str();
2414 #define MAX_REST_RESPONSE (128 * 1024) // we expect a very small response
2415 int ret = store->rest_master_conn->forward(uid_str, (forward_info ? *forward_info : s->info),
2416 objv, MAX_REST_RESPONSE, &in_data, &response);
2417 if (ret < 0)
2418 return ret;
2419
2420 ldout(s->cct, 20) << "response: " << response.c_str() << dendl;
2421 if (jp && !jp->parse(response.c_str(), response.length())) {
2422 ldout(s->cct, 0) << "failed parsing response from master zonegroup" << dendl;
2423 return -EINVAL;
2424 }
2425
2426 return 0;
2427 }
2428
2429 void RGWCreateBucket::pre_exec()
2430 {
2431 rgw_bucket_object_pre_exec(s);
2432 }
2433
2434 static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs,
2435 map<string, bufferlist>& out_attrs,
2436 map<string, bufferlist>& out_rmattrs)
2437 {
2438 for (const auto& kv : orig_attrs) {
2439 const string& name = kv.first;
2440
2441 /* Check if the attr is user-defined metadata item. */
2442 if (name.compare(0, sizeof(RGW_ATTR_META_PREFIX) - 1,
2443 RGW_ATTR_META_PREFIX) == 0) {
2444 /* For the objects all existing meta attrs have to be removed. */
2445 out_rmattrs[name] = kv.second;
2446 } else if (out_attrs.find(name) == std::end(out_attrs)) {
2447 out_attrs[name] = kv.second;
2448 }
2449 }
2450 }
2451
2452 /* Fuse resource metadata basing on original attributes in @orig_attrs, set
2453 * of _custom_ attribute names to remove in @rmattr_names and attributes in
2454 * @out_attrs. Place results in @out_attrs.
2455 *
2456 * NOTE: it's supposed that all special attrs already present in @out_attrs
2457 * will be preserved without any change. Special attributes are those which
2458 * names start with RGW_ATTR_META_PREFIX. They're complement to custom ones
2459 * used for X-Account-Meta-*, X-Container-Meta-*, X-Amz-Meta and so on. */
2460 static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs,
2461 const set<string>& rmattr_names,
2462 map<string, bufferlist>& out_attrs)
2463 {
2464 for (const auto& kv : orig_attrs) {
2465 const string& name = kv.first;
2466
2467 /* Check if the attr is user-defined metadata item. */
2468 if (name.compare(0, strlen(RGW_ATTR_META_PREFIX),
2469 RGW_ATTR_META_PREFIX) == 0) {
2470 /* For the buckets all existing meta attrs are preserved,
2471 except those that are listed in rmattr_names. */
2472 if (rmattr_names.find(name) != std::end(rmattr_names)) {
2473 const auto aiter = out_attrs.find(name);
2474
2475 if (aiter != std::end(out_attrs)) {
2476 out_attrs.erase(aiter);
2477 }
2478 } else {
2479 /* emplace() won't alter the map if the key is already present.
2480 * This behaviour is fully intensional here. */
2481 out_attrs.emplace(kv);
2482 }
2483 } else if (out_attrs.find(name) == std::end(out_attrs)) {
2484 out_attrs[name] = kv.second;
2485 }
2486 }
2487 }
2488
2489
2490 static void populate_with_generic_attrs(const req_state * const s,
2491 map<string, bufferlist>& out_attrs)
2492 {
2493 for (const auto& kv : s->generic_attrs) {
2494 bufferlist& attrbl = out_attrs[kv.first];
2495 const string& val = kv.second;
2496 attrbl.clear();
2497 attrbl.append(val.c_str(), val.size() + 1);
2498 }
2499 }
2500
2501
2502 static int filter_out_quota_info(std::map<std::string, bufferlist>& add_attrs,
2503 const std::set<std::string>& rmattr_names,
2504 RGWQuotaInfo& quota,
2505 bool * quota_extracted = nullptr)
2506 {
2507 bool extracted = false;
2508
2509 /* Put new limit on max objects. */
2510 auto iter = add_attrs.find(RGW_ATTR_QUOTA_NOBJS);
2511 std::string err;
2512 if (std::end(add_attrs) != iter) {
2513 quota.max_objects =
2514 static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err));
2515 if (!err.empty()) {
2516 return -EINVAL;
2517 }
2518 add_attrs.erase(iter);
2519 extracted = true;
2520 }
2521
2522 /* Put new limit on bucket (container) size. */
2523 iter = add_attrs.find(RGW_ATTR_QUOTA_MSIZE);
2524 if (iter != add_attrs.end()) {
2525 quota.max_size =
2526 static_cast<int64_t>(strict_strtoll(iter->second.c_str(), 10, &err));
2527 if (!err.empty()) {
2528 return -EINVAL;
2529 }
2530 add_attrs.erase(iter);
2531 extracted = true;
2532 }
2533
2534 for (const auto& name : rmattr_names) {
2535 /* Remove limit on max objects. */
2536 if (name.compare(RGW_ATTR_QUOTA_NOBJS) == 0) {
2537 quota.max_objects = -1;
2538 extracted = true;
2539 }
2540
2541 /* Remove limit on max bucket size. */
2542 if (name.compare(RGW_ATTR_QUOTA_MSIZE) == 0) {
2543 quota.max_size = -1;
2544 extracted = true;
2545 }
2546 }
2547
2548 /* Swift requries checking on raw usage instead of the 4 KiB rounded one. */
2549 quota.check_on_raw = true;
2550 quota.enabled = quota.max_size > 0 || quota.max_objects > 0;
2551
2552 if (quota_extracted) {
2553 *quota_extracted = extracted;
2554 }
2555
2556 return 0;
2557 }
2558
2559
2560 static void filter_out_website(std::map<std::string, ceph::bufferlist>& add_attrs,
2561 const std::set<std::string>& rmattr_names,
2562 RGWBucketWebsiteConf& ws_conf)
2563 {
2564 std::string lstval;
2565
2566 /* Let's define a mapping between each custom attribute and the memory where
2567 * attribute's value should be stored. The memory location is expressed by
2568 * a non-const reference. */
2569 const auto mapping = {
2570 std::make_pair(RGW_ATTR_WEB_INDEX, std::ref(ws_conf.index_doc_suffix)),
2571 std::make_pair(RGW_ATTR_WEB_ERROR, std::ref(ws_conf.error_doc)),
2572 std::make_pair(RGW_ATTR_WEB_LISTINGS, std::ref(lstval)),
2573 std::make_pair(RGW_ATTR_WEB_LIST_CSS, std::ref(ws_conf.listing_css_doc)),
2574 std::make_pair(RGW_ATTR_SUBDIR_MARKER, std::ref(ws_conf.subdir_marker))
2575 };
2576
2577 for (const auto& kv : mapping) {
2578 const char * const key = kv.first;
2579 auto& target = kv.second;
2580
2581 auto iter = add_attrs.find(key);
2582
2583 if (std::end(add_attrs) != iter) {
2584 /* The "target" is a reference to ws_conf. */
2585 target = iter->second.c_str();
2586 add_attrs.erase(iter);
2587 }
2588
2589 if (rmattr_names.count(key)) {
2590 target = std::string();
2591 }
2592 }
2593
2594 if (! lstval.empty()) {
2595 ws_conf.listing_enabled = boost::algorithm::iequals(lstval, "true");
2596 }
2597 }
2598
2599
2600 void RGWCreateBucket::execute()
2601 {
2602 RGWAccessControlPolicy old_policy(s->cct);
2603 buffer::list aclbl;
2604 buffer::list corsbl;
2605 bool existed;
2606 string bucket_name;
2607 rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_name);
2608 rgw_raw_obj obj(store->get_zone_params().domain_root, bucket_name);
2609 obj_version objv, *pobjv = NULL;
2610
2611 op_ret = get_params();
2612 if (op_ret < 0)
2613 return;
2614
2615 if (!location_constraint.empty() &&
2616 !store->has_zonegroup_api(location_constraint)) {
2617 ldout(s->cct, 0) << "location constraint (" << location_constraint << ")"
2618 << " can't be found." << dendl;
2619 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
2620 s->err.message = "The specified location-constraint is not valid";
2621 return;
2622 }
2623
2624 if (!store->get_zonegroup().is_master_zonegroup() && !location_constraint.empty() &&
2625 store->get_zonegroup().api_name != location_constraint) {
2626 ldout(s->cct, 0) << "location constraint (" << location_constraint << ")"
2627 << " doesn't match zonegroup" << " (" << store->get_zonegroup().api_name << ")"
2628 << dendl;
2629 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
2630 s->err.message = "The specified location-constraint is not valid";
2631 return;
2632 }
2633
2634 const auto& zonegroup = store->get_zonegroup();
2635 if (!placement_rule.empty() &&
2636 !zonegroup.placement_targets.count(placement_rule)) {
2637 ldout(s->cct, 0) << "placement target (" << placement_rule << ")"
2638 << " doesn't exist in the placement targets of zonegroup"
2639 << " (" << store->get_zonegroup().api_name << ")" << dendl;
2640 op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
2641 s->err.message = "The specified placement target does not exist";
2642 return;
2643 }
2644
2645 /* we need to make sure we read bucket info, it's not read before for this
2646 * specific request */
2647 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
2648 op_ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name,
2649 s->bucket_info, NULL, &s->bucket_attrs);
2650 if (op_ret < 0 && op_ret != -ENOENT)
2651 return;
2652 s->bucket_exists = (op_ret != -ENOENT);
2653
2654 s->bucket_owner.set_id(s->user->user_id);
2655 s->bucket_owner.set_name(s->user->display_name);
2656 if (s->bucket_exists) {
2657 int r = get_bucket_policy_from_attr(s->cct, store, s->bucket_info,
2658 s->bucket_attrs, &old_policy);
2659 if (r >= 0) {
2660 if (old_policy.get_owner().get_id().compare(s->user->user_id) != 0) {
2661 op_ret = -EEXIST;
2662 return;
2663 }
2664 }
2665 }
2666
2667 RGWBucketInfo master_info;
2668 rgw_bucket *pmaster_bucket;
2669 uint32_t *pmaster_num_shards;
2670 real_time creation_time;
2671
2672 if (!store->is_meta_master()) {
2673 JSONParser jp;
2674 op_ret = forward_request_to_master(s, NULL, store, in_data, &jp);
2675 if (op_ret < 0) {
2676 return;
2677 }
2678
2679 JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp);
2680 JSONDecoder::decode_json("object_ver", objv, &jp);
2681 JSONDecoder::decode_json("bucket_info", master_info, &jp);
2682 ldout(s->cct, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" << objv.ver << dendl;
2683 ldout(s->cct, 20) << "got creation time: << " << master_info.creation_time << dendl;
2684 pmaster_bucket= &master_info.bucket;
2685 creation_time = master_info.creation_time;
2686 pmaster_num_shards = &master_info.num_shards;
2687 pobjv = &objv;
2688 } else {
2689 pmaster_bucket = NULL;
2690 pmaster_num_shards = NULL;
2691 }
2692
2693 string zonegroup_id;
2694
2695 if (s->system_request) {
2696 zonegroup_id = s->info.args.get(RGW_SYS_PARAM_PREFIX "zonegroup");
2697 if (zonegroup_id.empty()) {
2698 zonegroup_id = store->get_zonegroup().get_id();
2699 }
2700 } else {
2701 zonegroup_id = store->get_zonegroup().get_id();
2702 }
2703
2704 if (s->bucket_exists) {
2705 string selected_placement_rule;
2706 rgw_bucket bucket;
2707 bucket.tenant = s->bucket_tenant;
2708 bucket.name = s->bucket_name;
2709 op_ret = store->select_bucket_placement(*(s->user), zonegroup_id,
2710 placement_rule,
2711 &selected_placement_rule, nullptr);
2712 if (selected_placement_rule != s->bucket_info.placement_rule) {
2713 op_ret = -EEXIST;
2714 return;
2715 }
2716 }
2717
2718 /* Encode special metadata first as we're using std::map::emplace under
2719 * the hood. This method will add the new items only if the map doesn't
2720 * contain such keys yet. */
2721 policy.encode(aclbl);
2722 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
2723
2724 if (has_cors) {
2725 cors_config.encode(corsbl);
2726 emplace_attr(RGW_ATTR_CORS, std::move(corsbl));
2727 }
2728
2729 RGWQuotaInfo quota_info;
2730 const RGWQuotaInfo * pquota_info = nullptr;
2731 if (need_metadata_upload()) {
2732 /* It's supposed that following functions WILL NOT change any special
2733 * attributes (like RGW_ATTR_ACL) if they are already present in attrs. */
2734 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
2735 if (op_ret < 0) {
2736 return;
2737 }
2738 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
2739 populate_with_generic_attrs(s, attrs);
2740
2741 op_ret = filter_out_quota_info(attrs, rmattr_names, quota_info);
2742 if (op_ret < 0) {
2743 return;
2744 } else {
2745 pquota_info = &quota_info;
2746 }
2747
2748 /* Web site of Swift API. */
2749 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
2750 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
2751 }
2752
2753 s->bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */
2754 s->bucket.name = s->bucket_name;
2755
2756 /* Handle updates of the metadata for Swift's object versioning. */
2757 if (swift_ver_location) {
2758 s->bucket_info.swift_ver_location = *swift_ver_location;
2759 s->bucket_info.swift_versioning = (! swift_ver_location->empty());
2760 }
2761
2762 op_ret = store->create_bucket(*(s->user), s->bucket, zonegroup_id,
2763 placement_rule, s->bucket_info.swift_ver_location,
2764 pquota_info, attrs,
2765 info, pobjv, &ep_objv, creation_time,
2766 pmaster_bucket, pmaster_num_shards, true);
2767 /* continue if EEXIST and create_bucket will fail below. this way we can
2768 * recover from a partial create by retrying it. */
2769 ldout(s->cct, 20) << "rgw_create_bucket returned ret=" << op_ret << " bucket=" << s->bucket << dendl;
2770
2771 if (op_ret && op_ret != -EEXIST)
2772 return;
2773
2774 existed = (op_ret == -EEXIST);
2775
2776 if (existed) {
2777 /* bucket already existed, might have raced with another bucket creation, or
2778 * might be partial bucket creation that never completed. Read existing bucket
2779 * info, verify that the reported bucket owner is the current user.
2780 * If all is ok then update the user's list of buckets.
2781 * Otherwise inform client about a name conflict.
2782 */
2783 if (info.owner.compare(s->user->user_id) != 0) {
2784 op_ret = -EEXIST;
2785 return;
2786 }
2787 s->bucket = info.bucket;
2788 }
2789
2790 op_ret = rgw_link_bucket(store, s->user->user_id, s->bucket,
2791 info.creation_time, false);
2792 if (op_ret && !existed && op_ret != -EEXIST) {
2793 /* if it exists (or previously existed), don't remove it! */
2794 op_ret = rgw_unlink_bucket(store, s->user->user_id, s->bucket.tenant,
2795 s->bucket.name);
2796 if (op_ret < 0) {
2797 ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
2798 << dendl;
2799 }
2800 } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) {
2801 op_ret = -ERR_BUCKET_EXISTS;
2802 }
2803
2804 if (need_metadata_upload() && existed) {
2805 /* OK, it looks we lost race with another request. As it's required to
2806 * handle metadata fusion and upload, the whole operation becomes very
2807 * similar in nature to PutMetadataBucket. However, as the attrs may
2808 * changed in the meantime, we have to refresh. */
2809 short tries = 0;
2810 do {
2811 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
2812 RGWBucketInfo binfo;
2813 map<string, bufferlist> battrs;
2814
2815 op_ret = store->get_bucket_info(obj_ctx, s->bucket_tenant, s->bucket_name,
2816 binfo, nullptr, &battrs);
2817 if (op_ret < 0) {
2818 return;
2819 } else if (binfo.owner.compare(s->user->user_id) != 0) {
2820 /* New bucket doesn't belong to the account we're operating on. */
2821 op_ret = -EEXIST;
2822 return;
2823 } else {
2824 s->bucket_info = binfo;
2825 s->bucket_attrs = battrs;
2826 }
2827
2828 attrs.clear();
2829
2830 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
2831 if (op_ret < 0) {
2832 return;
2833 }
2834 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
2835 populate_with_generic_attrs(s, attrs);
2836 op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket_info.quota);
2837 if (op_ret < 0) {
2838 return;
2839 }
2840
2841 /* Handle updates of the metadata for Swift's object versioning. */
2842 if (swift_ver_location) {
2843 s->bucket_info.swift_ver_location = *swift_ver_location;
2844 s->bucket_info.swift_versioning = (! swift_ver_location->empty());
2845 }
2846
2847 /* Web site of Swift API. */
2848 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
2849 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
2850
2851 /* This will also set the quota on the bucket. */
2852 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
2853 &s->bucket_info.objv_tracker);
2854 } while (op_ret == -ECANCELED && tries++ < 20);
2855
2856 /* Restore the proper return code. */
2857 if (op_ret >= 0) {
2858 op_ret = -ERR_BUCKET_EXISTS;
2859 }
2860 }
2861 }
2862
2863 int RGWDeleteBucket::verify_permission()
2864 {
2865 if (!verify_bucket_permission(s, rgw::IAM::s3DeleteBucket)) {
2866 return -EACCES;
2867 }
2868
2869 return 0;
2870 }
2871
2872 void RGWDeleteBucket::pre_exec()
2873 {
2874 rgw_bucket_object_pre_exec(s);
2875 }
2876
2877 void RGWDeleteBucket::execute()
2878 {
2879 op_ret = -EINVAL;
2880
2881 if (s->bucket_name.empty())
2882 return;
2883
2884 if (!s->bucket_exists) {
2885 ldout(s->cct, 0) << "ERROR: bucket " << s->bucket_name << " not found" << dendl;
2886 op_ret = -ERR_NO_SUCH_BUCKET;
2887 return;
2888 }
2889 RGWObjVersionTracker ot;
2890 ot.read_version = s->bucket_info.ep_objv;
2891
2892 if (s->system_request) {
2893 string tag = s->info.args.get(RGW_SYS_PARAM_PREFIX "tag");
2894 string ver_str = s->info.args.get(RGW_SYS_PARAM_PREFIX "ver");
2895 if (!tag.empty()) {
2896 ot.read_version.tag = tag;
2897 uint64_t ver;
2898 string err;
2899 ver = strict_strtol(ver_str.c_str(), 10, &err);
2900 if (!err.empty()) {
2901 ldout(s->cct, 0) << "failed to parse ver param" << dendl;
2902 op_ret = -EINVAL;
2903 return;
2904 }
2905 ot.read_version.ver = ver;
2906 }
2907 }
2908
2909 op_ret = rgw_bucket_sync_user_stats(store, s->user->user_id, s->bucket_info);
2910 if ( op_ret < 0) {
2911 ldout(s->cct, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl;
2912 }
2913
2914 op_ret = store->check_bucket_empty(s->bucket_info);
2915 if (op_ret < 0) {
2916 return;
2917 }
2918
2919 if (!store->is_meta_master()) {
2920 bufferlist in_data;
2921 op_ret = forward_request_to_master(s, &ot.read_version, store, in_data,
2922 NULL);
2923 if (op_ret < 0) {
2924 if (op_ret == -ENOENT) {
2925 /* adjust error, we want to return with NoSuchBucket and not
2926 * NoSuchKey */
2927 op_ret = -ERR_NO_SUCH_BUCKET;
2928 }
2929 return;
2930 }
2931 }
2932
2933 string prefix, delimiter;
2934
2935 if (s->prot_flags & RGW_REST_SWIFT) {
2936 string path_args;
2937 path_args = s->info.args.get("path");
2938 if (!path_args.empty()) {
2939 if (!delimiter.empty() || !prefix.empty()) {
2940 op_ret = -EINVAL;
2941 return;
2942 }
2943 prefix = path_args;
2944 delimiter="/";
2945 }
2946 }
2947
2948 op_ret = abort_bucket_multiparts(store, s->cct, s->bucket_info, prefix, delimiter);
2949
2950 if (op_ret < 0) {
2951 return;
2952 }
2953
2954 op_ret = store->delete_bucket(s->bucket_info, ot, false);
2955
2956 if (op_ret == -ECANCELED) {
2957 // lost a race, either with mdlog sync or another delete bucket operation.
2958 // in either case, we've already called rgw_unlink_bucket()
2959 op_ret = 0;
2960 return;
2961 }
2962
2963 if (op_ret == 0) {
2964 op_ret = rgw_unlink_bucket(store, s->bucket_info.owner, s->bucket.tenant,
2965 s->bucket.name, false);
2966 if (op_ret < 0) {
2967 ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
2968 << dendl;
2969 }
2970 }
2971
2972 if (op_ret < 0) {
2973 return;
2974 }
2975
2976
2977 }
2978
2979 int RGWPutObj::verify_permission()
2980 {
2981 if (! copy_source.empty()) {
2982
2983 RGWAccessControlPolicy cs_acl(s->cct);
2984 optional<Policy> policy;
2985 map<string, bufferlist> cs_attrs;
2986 rgw_bucket cs_bucket(copy_source_bucket_info.bucket);
2987 rgw_obj_key cs_object(copy_source_object_name, copy_source_version_id);
2988
2989 rgw_obj obj(cs_bucket, cs_object);
2990 store->set_atomic(s->obj_ctx, obj);
2991 store->set_prefetch_data(s->obj_ctx, obj);
2992
2993 /* check source object permissions */
2994 if (read_obj_policy(store, s, copy_source_bucket_info, cs_attrs, &cs_acl,
2995 policy, cs_bucket, cs_object) < 0) {
2996 return -EACCES;
2997 }
2998
2999 /* admin request overrides permission checks */
3000 if (! s->auth.identity->is_admin_of(cs_acl.get_owner().get_id())) {
3001 if (policy) {
3002 auto e = policy->eval(s->env, *s->auth.identity,
3003 cs_object.instance.empty() ?
3004 rgw::IAM::s3GetObject :
3005 rgw::IAM::s3GetObjectVersion,
3006 rgw::IAM::ARN(obj));
3007 if (e == Effect::Deny) {
3008 return -EACCES;
3009 } else if (e == Effect::Pass &&
3010 !cs_acl.verify_permission(*s->auth.identity, s->perm_mask,
3011 RGW_PERM_READ)) {
3012 return -EACCES;
3013 }
3014 } else if (!cs_acl.verify_permission(*s->auth.identity, s->perm_mask,
3015 RGW_PERM_READ)) {
3016 return -EACCES;
3017 }
3018 }
3019 }
3020
3021 if (s->iam_policy) {
3022 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
3023 rgw::IAM::s3PutObject,
3024 rgw_obj(s->bucket, s->object));
3025 if (e == Effect::Allow) {
3026 return 0;
3027 } else if (e == Effect::Deny) {
3028 return -EACCES;
3029 }
3030 }
3031
3032 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
3033 return -EACCES;
3034 }
3035
3036 return 0;
3037 }
3038
3039 void RGWPutObjProcessor_Multipart::get_mp(RGWMPObj** _mp){
3040 *_mp = &mp;
3041 }
3042
3043 int RGWPutObjProcessor_Multipart::prepare(RGWRados *store, string *oid_rand)
3044 {
3045 string oid = obj_str;
3046 upload_id = s->info.args.get("uploadId");
3047 if (!oid_rand) {
3048 mp.init(oid, upload_id);
3049 } else {
3050 mp.init(oid, upload_id, *oid_rand);
3051 }
3052
3053 part_num = s->info.args.get("partNumber");
3054 if (part_num.empty()) {
3055 ldout(s->cct, 10) << "part number is empty" << dendl;
3056 return -EINVAL;
3057 }
3058
3059 string err;
3060 uint64_t num = (uint64_t)strict_strtol(part_num.c_str(), 10, &err);
3061
3062 if (!err.empty()) {
3063 ldout(s->cct, 10) << "bad part number: " << part_num << ": " << err << dendl;
3064 return -EINVAL;
3065 }
3066
3067 string upload_prefix = oid + ".";
3068
3069 if (!oid_rand) {
3070 upload_prefix.append(upload_id);
3071 } else {
3072 upload_prefix.append(*oid_rand);
3073 }
3074
3075 rgw_obj target_obj;
3076 target_obj.init(bucket, oid);
3077
3078 manifest.set_prefix(upload_prefix);
3079
3080 manifest.set_multipart_part_rule(store->ctx()->_conf->rgw_obj_stripe_size, num);
3081
3082 int r = manifest_gen.create_begin(store->ctx(), &manifest, s->bucket_info.placement_rule, bucket, target_obj);
3083 if (r < 0) {
3084 return r;
3085 }
3086
3087 cur_obj = manifest_gen.get_cur_obj(store);
3088 rgw_raw_obj_to_obj(bucket, cur_obj, &head_obj);
3089 head_obj.index_hash_source = obj_str;
3090
3091 r = prepare_init(store, NULL);
3092 if (r < 0) {
3093 return r;
3094 }
3095
3096 return 0;
3097 }
3098
3099 int RGWPutObjProcessor_Multipart::do_complete(size_t accounted_size,
3100 const string& etag,
3101 real_time *mtime, real_time set_mtime,
3102 map<string, bufferlist>& attrs,
3103 real_time delete_at,
3104 const char *if_match,
3105 const char *if_nomatch, const string *user_data, rgw_zone_set *zones_trace)
3106 {
3107 complete_writing_data();
3108
3109 RGWRados::Object op_target(store, s->bucket_info, obj_ctx, head_obj);
3110 op_target.set_versioning_disabled(true);
3111 RGWRados::Object::Write head_obj_op(&op_target);
3112
3113 head_obj_op.meta.set_mtime = set_mtime;
3114 head_obj_op.meta.mtime = mtime;
3115 head_obj_op.meta.owner = s->owner.get_id();
3116 head_obj_op.meta.delete_at = delete_at;
3117 head_obj_op.meta.zones_trace = zones_trace;
3118 head_obj_op.meta.modify_tail = true;
3119
3120 int r = head_obj_op.write_meta(obj_len, accounted_size, attrs);
3121 if (r < 0)
3122 return r;
3123
3124 bufferlist bl;
3125 RGWUploadPartInfo info;
3126 string p = "part.";
3127 bool sorted_omap = is_v2_upload_id(upload_id);
3128
3129 if (sorted_omap) {
3130 string err;
3131 int part_num_int = strict_strtol(part_num.c_str(), 10, &err);
3132 if (!err.empty()) {
3133 dout(10) << "bad part number specified: " << part_num << dendl;
3134 return -EINVAL;
3135 }
3136 char buf[32];
3137 snprintf(buf, sizeof(buf), "%08d", part_num_int);
3138 p.append(buf);
3139 } else {
3140 p.append(part_num);
3141 }
3142 info.num = atoi(part_num.c_str());
3143 info.etag = etag;
3144 info.size = obj_len;
3145 info.accounted_size = accounted_size;
3146 info.modified = real_clock::now();
3147 info.manifest = manifest;
3148
3149 bool compressed;
3150 r = rgw_compression_info_from_attrset(attrs, compressed, info.cs_info);
3151 if (r < 0) {
3152 dout(1) << "cannot get compression info" << dendl;
3153 return r;
3154 }
3155
3156 ::encode(info, bl);
3157
3158 string multipart_meta_obj = mp.get_meta();
3159
3160 rgw_obj meta_obj;
3161 meta_obj.init_ns(bucket, multipart_meta_obj, mp_ns);
3162 meta_obj.set_in_extra_data(true);
3163
3164 rgw_raw_obj raw_meta_obj;
3165
3166 store->obj_to_raw(s->bucket_info.placement_rule, meta_obj, &raw_meta_obj);
3167
3168 r = store->omap_set(raw_meta_obj, p, bl);
3169
3170 return r;
3171 }
3172
3173 RGWPutObjProcessor *RGWPutObj::select_processor(RGWObjectCtx& obj_ctx, bool *is_multipart)
3174 {
3175 RGWPutObjProcessor *processor;
3176
3177 bool multipart = s->info.args.exists("uploadId");
3178
3179 uint64_t part_size = s->cct->_conf->rgw_obj_stripe_size;
3180
3181 if (!multipart) {
3182 processor = new RGWPutObjProcessor_Atomic(obj_ctx, s->bucket_info, s->bucket, s->object.name, part_size, s->req_id, s->bucket_info.versioning_enabled());
3183 (static_cast<RGWPutObjProcessor_Atomic *>(processor))->set_olh_epoch(olh_epoch);
3184 (static_cast<RGWPutObjProcessor_Atomic *>(processor))->set_version_id(version_id);
3185 } else {
3186 processor = new RGWPutObjProcessor_Multipart(obj_ctx, s->bucket_info, part_size, s);
3187 }
3188
3189 if (is_multipart) {
3190 *is_multipart = multipart;
3191 }
3192
3193 return processor;
3194 }
3195
3196 void RGWPutObj::dispose_processor(RGWPutObjDataProcessor *processor)
3197 {
3198 delete processor;
3199 }
3200
3201 void RGWPutObj::pre_exec()
3202 {
3203 rgw_bucket_object_pre_exec(s);
3204 }
3205
3206 class RGWPutObj_CB : public RGWGetDataCB
3207 {
3208 RGWPutObj *op;
3209 public:
3210 RGWPutObj_CB(RGWPutObj *_op) : op(_op) {}
3211 ~RGWPutObj_CB() override {}
3212
3213 int handle_data(bufferlist& bl, off_t bl_ofs, off_t bl_len) override {
3214 return op->get_data_cb(bl, bl_ofs, bl_len);
3215 }
3216 };
3217
3218 int RGWPutObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
3219 {
3220 bufferlist bl_tmp;
3221 bl.copy(bl_ofs, bl_len, bl_tmp);
3222
3223 bl_aux.append(bl_tmp);
3224
3225 return bl_len;
3226 }
3227
3228 int RGWPutObj::get_data(const off_t fst, const off_t lst, bufferlist& bl)
3229 {
3230 RGWPutObj_CB cb(this);
3231 RGWGetDataCB* filter = &cb;
3232 boost::optional<RGWGetObj_Decompress> decompress;
3233 std::unique_ptr<RGWGetDataCB> decrypt;
3234 RGWCompressionInfo cs_info;
3235 map<string, bufferlist> attrs;
3236 map<string, bufferlist>::iterator attr_iter;
3237 int ret = 0;
3238
3239 uint64_t obj_size;
3240 int64_t new_ofs, new_end;
3241
3242 new_ofs = fst;
3243 new_end = lst;
3244
3245 rgw_obj_key obj_key(copy_source_object_name, copy_source_version_id);
3246 rgw_obj obj(copy_source_bucket_info.bucket, obj_key);
3247
3248 RGWRados::Object op_target(store, copy_source_bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
3249 RGWRados::Object::Read read_op(&op_target);
3250 read_op.params.obj_size = &obj_size;
3251 read_op.params.attrs = &attrs;
3252
3253 ret = read_op.prepare();
3254 if (ret < 0)
3255 return ret;
3256
3257 bool need_decompress;
3258 op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
3259 if (op_ret < 0) {
3260 lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
3261 return -EIO;
3262 }
3263
3264 bool partial_content = true;
3265 if (need_decompress)
3266 {
3267 obj_size = cs_info.orig_size;
3268 decompress.emplace(s->cct, &cs_info, partial_content, filter);
3269 filter = &*decompress;
3270 }
3271
3272 attr_iter = attrs.find(RGW_ATTR_MANIFEST);
3273 op_ret = this->get_decrypt_filter(&decrypt,
3274 filter,
3275 attrs,
3276 attr_iter != attrs.end() ? &(attr_iter->second) : nullptr);
3277 if (decrypt != nullptr) {
3278 filter = decrypt.get();
3279 }
3280 if (op_ret < 0) {
3281 return ret;
3282 }
3283
3284 ret = read_op.range_to_ofs(obj_size, new_ofs, new_end);
3285 if (ret < 0)
3286 return ret;
3287
3288 filter->fixup_range(new_ofs, new_end);
3289 ret = read_op.iterate(new_ofs, new_end, filter);
3290
3291 if (ret >= 0)
3292 ret = filter->flush();
3293
3294 bl.claim_append(bl_aux);
3295
3296 return ret;
3297 }
3298
3299 // special handling for compression type = "random" with multipart uploads
3300 static CompressorRef get_compressor_plugin(const req_state *s,
3301 const std::string& compression_type)
3302 {
3303 if (compression_type != "random") {
3304 return Compressor::create(s->cct, compression_type);
3305 }
3306
3307 bool is_multipart{false};
3308 const auto& upload_id = s->info.args.get("uploadId", &is_multipart);
3309
3310 if (!is_multipart) {
3311 return Compressor::create(s->cct, compression_type);
3312 }
3313
3314 // use a hash of the multipart upload id so all parts use the same plugin
3315 const auto alg = std::hash<std::string>{}(upload_id) % Compressor::COMP_ALG_LAST;
3316 if (alg == Compressor::COMP_ALG_NONE) {
3317 return nullptr;
3318 }
3319 return Compressor::create(s->cct, alg);
3320 }
3321
3322 void RGWPutObj::execute()
3323 {
3324 RGWPutObjProcessor *processor = NULL;
3325 RGWPutObjDataProcessor *filter = nullptr;
3326 std::unique_ptr<RGWPutObjDataProcessor> encrypt;
3327 char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1];
3328 char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3329 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3330 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
3331 MD5 hash;
3332 bufferlist bl, aclbl, bs;
3333 int len;
3334 map<string, string>::iterator iter;
3335 bool multipart;
3336
3337 off_t fst;
3338 off_t lst;
3339 const auto& compression_type = store->get_zone_params().get_compression_type(
3340 s->bucket_info.placement_rule);
3341 CompressorRef plugin;
3342 boost::optional<RGWPutObj_Compress> compressor;
3343
3344 bool need_calc_md5 = (dlo_manifest == NULL) && (slo_info == NULL);
3345 perfcounter->inc(l_rgw_put);
3346 op_ret = -EINVAL;
3347 if (s->object.empty()) {
3348 goto done;
3349 }
3350
3351 if (!s->bucket_exists) {
3352 op_ret = -ERR_NO_SUCH_BUCKET;
3353 return;
3354 }
3355
3356 op_ret = get_params();
3357 if (op_ret < 0) {
3358 ldout(s->cct, 20) << "get_params() returned ret=" << op_ret << dendl;
3359 goto done;
3360 }
3361
3362 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
3363 if (op_ret < 0) {
3364 ldout(s->cct, 20) << "get_system_versioning_params() returned ret="
3365 << op_ret << dendl;
3366 goto done;
3367 }
3368
3369 if (supplied_md5_b64) {
3370 need_calc_md5 = true;
3371
3372 ldout(s->cct, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
3373 op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1],
3374 supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64));
3375 ldout(s->cct, 15) << "ceph_armor ret=" << op_ret << dendl;
3376 if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) {
3377 op_ret = -ERR_INVALID_DIGEST;
3378 goto done;
3379 }
3380
3381 buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5);
3382 ldout(s->cct, 15) << "supplied_md5=" << supplied_md5 << dendl;
3383 }
3384
3385 if (!chunked_upload) { /* with chunked upload we don't know how big is the upload.
3386 we also check sizes at the end anyway */
3387 op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
3388 user_quota, bucket_quota, s->content_length);
3389 if (op_ret < 0) {
3390 ldout(s->cct, 20) << "check_quota() returned ret=" << op_ret << dendl;
3391 goto done;
3392 }
3393 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3394 if (op_ret < 0) {
3395 ldout(s->cct, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl;
3396 goto done;
3397 }
3398 }
3399
3400 if (supplied_etag) {
3401 strncpy(supplied_md5, supplied_etag, sizeof(supplied_md5) - 1);
3402 supplied_md5[sizeof(supplied_md5) - 1] = '\0';
3403 }
3404
3405 processor = select_processor(*static_cast<RGWObjectCtx *>(s->obj_ctx), &multipart);
3406
3407 // no filters by default
3408 filter = processor;
3409
3410 /* Handle object versioning of Swift API. */
3411 if (! multipart) {
3412 rgw_obj obj(s->bucket, s->object);
3413 op_ret = store->swift_versioning_copy(*static_cast<RGWObjectCtx *>(s->obj_ctx),
3414 s->bucket_owner.get_id(),
3415 s->bucket_info,
3416 obj);
3417 if (op_ret < 0) {
3418 goto done;
3419 }
3420 }
3421
3422 op_ret = processor->prepare(store, NULL);
3423 if (op_ret < 0) {
3424 ldout(s->cct, 20) << "processor->prepare() returned ret=" << op_ret
3425 << dendl;
3426 goto done;
3427 }
3428
3429 if ((! copy_source.empty()) && !copy_source_range) {
3430 rgw_obj_key obj_key(copy_source_object_name, copy_source_version_id);
3431 rgw_obj obj(copy_source_bucket_info.bucket, obj_key.name);
3432
3433 RGWObjState *astate;
3434 op_ret = store->get_obj_state(static_cast<RGWObjectCtx *>(s->obj_ctx),
3435 copy_source_bucket_info, obj, &astate, true, false);
3436 if (op_ret < 0) {
3437 ldout(s->cct, 0) << "ERROR: get copy source obj state returned with error" << op_ret << dendl;
3438 goto done;
3439 }
3440 if (!astate->exists){
3441 op_ret = -ENOENT;
3442 goto done;
3443 }
3444 lst = astate->accounted_size - 1;
3445 } else {
3446 lst = copy_source_range_lst;
3447 }
3448
3449 fst = copy_source_range_fst;
3450
3451 op_ret = get_encrypt_filter(&encrypt, filter);
3452 if (op_ret < 0) {
3453 goto done;
3454 }
3455 if (encrypt != nullptr) {
3456 filter = encrypt.get();
3457 } else {
3458 //no encryption, we can try compression
3459 if (compression_type != "none") {
3460 plugin = get_compressor_plugin(s, compression_type);
3461 if (!plugin) {
3462 ldout(s->cct, 1) << "Cannot load plugin for compression type "
3463 << compression_type << dendl;
3464 } else {
3465 compressor.emplace(s->cct, plugin, filter);
3466 filter = &*compressor;
3467 }
3468 }
3469 }
3470
3471 do {
3472 bufferlist data;
3473 if (fst > lst)
3474 break;
3475 if (copy_source.empty()) {
3476 len = get_data(data);
3477 } else {
3478 uint64_t cur_lst = min(fst + s->cct->_conf->rgw_max_chunk_size - 1, lst);
3479 op_ret = get_data(fst, cur_lst, data);
3480 if (op_ret < 0)
3481 goto done;
3482 len = data.length();
3483 s->content_length += len;
3484 fst += len;
3485 }
3486 if (len < 0) {
3487 op_ret = len;
3488 goto done;
3489 }
3490
3491 if (need_calc_md5) {
3492 hash.Update((const byte *)data.c_str(), data.length());
3493 }
3494
3495 /* update torrrent */
3496 torrent.update(data);
3497
3498 /* do we need this operation to be synchronous? if we're dealing with an object with immutable
3499 * head, e.g., multipart object we need to make sure we're the first one writing to this object
3500 */
3501 bool need_to_wait = (ofs == 0) && multipart;
3502
3503 bufferlist orig_data;
3504
3505 if (need_to_wait) {
3506 orig_data = data;
3507 }
3508
3509 op_ret = put_data_and_throttle(filter, data, ofs, need_to_wait);
3510 if (op_ret < 0) {
3511 if (op_ret != -EEXIST) {
3512 ldout(s->cct, 20) << "processor->thottle_data() returned ret="
3513 << op_ret << dendl;
3514 goto done;
3515 }
3516 /* need_to_wait == true and op_ret == -EEXIST */
3517 ldout(s->cct, 5) << "NOTICE: processor->throttle_data() returned -EEXIST, need to restart write" << dendl;
3518
3519 /* restore original data */
3520 data.swap(orig_data);
3521
3522 /* restart processing with different oid suffix */
3523
3524 dispose_processor(processor);
3525 processor = select_processor(*static_cast<RGWObjectCtx *>(s->obj_ctx), &multipart);
3526 filter = processor;
3527
3528 string oid_rand;
3529 char buf[33];
3530 gen_rand_alphanumeric(store->ctx(), buf, sizeof(buf) - 1);
3531 oid_rand.append(buf);
3532
3533 op_ret = processor->prepare(store, &oid_rand);
3534 if (op_ret < 0) {
3535 ldout(s->cct, 0) << "ERROR: processor->prepare() returned "
3536 << op_ret << dendl;
3537 goto done;
3538 }
3539
3540 op_ret = get_encrypt_filter(&encrypt, filter);
3541 if (op_ret < 0) {
3542 goto done;
3543 }
3544 if (encrypt != nullptr) {
3545 filter = encrypt.get();
3546 } else {
3547 if (compressor) {
3548 compressor.emplace(s->cct, plugin, filter);
3549 filter = &*compressor;
3550 }
3551 }
3552 op_ret = put_data_and_throttle(filter, data, ofs, false);
3553 if (op_ret < 0) {
3554 goto done;
3555 }
3556 }
3557
3558 ofs += len;
3559 } while (len > 0);
3560
3561 {
3562 bufferlist flush;
3563 op_ret = put_data_and_throttle(filter, flush, ofs, false);
3564 if (op_ret < 0) {
3565 goto done;
3566 }
3567 }
3568
3569 if (!chunked_upload && ofs != s->content_length) {
3570 op_ret = -ERR_REQUEST_TIMEOUT;
3571 goto done;
3572 }
3573 s->obj_size = ofs;
3574
3575 perfcounter->inc(l_rgw_put_b, s->obj_size);
3576
3577 op_ret = do_aws4_auth_completion();
3578 if (op_ret < 0) {
3579 goto done;
3580 }
3581
3582 op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
3583 user_quota, bucket_quota, s->obj_size);
3584 if (op_ret < 0) {
3585 ldout(s->cct, 20) << "second check_quota() returned op_ret=" << op_ret << dendl;
3586 goto done;
3587 }
3588
3589 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3590 if (op_ret < 0) {
3591 ldout(s->cct, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl;
3592 goto done;
3593 }
3594
3595 hash.Final(m);
3596
3597 if (compressor && compressor->is_compressed()) {
3598 bufferlist tmp;
3599 RGWCompressionInfo cs_info;
3600 cs_info.compression_type = plugin->get_type_name();
3601 cs_info.orig_size = s->obj_size;
3602 cs_info.blocks = move(compressor->get_compression_blocks());
3603 ::encode(cs_info, tmp);
3604 attrs[RGW_ATTR_COMPRESSION] = tmp;
3605 ldout(s->cct, 20) << "storing " << RGW_ATTR_COMPRESSION
3606 << " with type=" << cs_info.compression_type
3607 << ", orig_size=" << cs_info.orig_size
3608 << ", blocks=" << cs_info.blocks.size() << dendl;
3609 }
3610
3611 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
3612
3613 etag = calc_md5;
3614
3615 if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) {
3616 op_ret = -ERR_BAD_DIGEST;
3617 goto done;
3618 }
3619
3620 policy.encode(aclbl);
3621 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
3622
3623 if (dlo_manifest) {
3624 op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
3625 if (op_ret < 0) {
3626 ldout(s->cct, 0) << "bad user manifest: " << dlo_manifest << dendl;
3627 goto done;
3628 }
3629 complete_etag(hash, &etag);
3630 ldout(s->cct, 10) << __func__ << ": calculated md5 for user manifest: " << etag << dendl;
3631 }
3632
3633 if (slo_info) {
3634 bufferlist manifest_bl;
3635 ::encode(*slo_info, manifest_bl);
3636 emplace_attr(RGW_ATTR_SLO_MANIFEST, std::move(manifest_bl));
3637
3638 hash.Update((byte *)slo_info->raw_data, slo_info->raw_data_len);
3639 complete_etag(hash, &etag);
3640 ldout(s->cct, 10) << __func__ << ": calculated md5 for user manifest: " << etag << dendl;
3641 }
3642
3643 if (supplied_etag && etag.compare(supplied_etag) != 0) {
3644 op_ret = -ERR_UNPROCESSABLE_ENTITY;
3645 goto done;
3646 }
3647 bl.append(etag.c_str(), etag.size() + 1);
3648 emplace_attr(RGW_ATTR_ETAG, std::move(bl));
3649
3650 populate_with_generic_attrs(s, attrs);
3651 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
3652 if (op_ret < 0) {
3653 goto done;
3654 }
3655 encode_delete_at_attr(delete_at, attrs);
3656 encode_obj_tags_attr(obj_tags.get(), attrs);
3657
3658 /* Add a custom metadata to expose the information whether an object
3659 * is an SLO or not. Appending the attribute must be performed AFTER
3660 * processing any input from user in order to prohibit overwriting. */
3661 if (slo_info) {
3662 bufferlist slo_userindicator_bl;
3663 slo_userindicator_bl.append("True", 4);
3664 emplace_attr(RGW_ATTR_SLO_UINDICATOR, std::move(slo_userindicator_bl));
3665 }
3666
3667 op_ret = processor->complete(s->obj_size, etag, &mtime, real_time(), attrs,
3668 (delete_at ? *delete_at : real_time()), if_match, if_nomatch,
3669 (user_data.empty() ? nullptr : &user_data));
3670
3671 /* produce torrent */
3672 if (s->cct->_conf->rgw_torrent_flag && (ofs == torrent.get_data_len()))
3673 {
3674 torrent.init(s, store);
3675 torrent.set_create_date(mtime);
3676 op_ret = torrent.complete();
3677 if (0 != op_ret)
3678 {
3679 ldout(s->cct, 0) << "ERROR: torrent.handle_data() returned " << op_ret << dendl;
3680 goto done;
3681 }
3682 }
3683
3684 done:
3685 dispose_processor(processor);
3686 perfcounter->tinc(l_rgw_put_lat,
3687 (ceph_clock_now() - s->time));
3688 }
3689
3690 int RGWPostObj::verify_permission()
3691 {
3692 return 0;
3693 }
3694 /*
3695 RGWPutObjProcessor *RGWPostObj::select_processor(RGWObjectCtx& obj_ctx)
3696 {
3697 RGWPutObjProcessor *processor;
3698
3699 uint64_t part_size = s->cct->_conf->rgw_obj_stripe_size;
3700
3701 processor = new RGWPutObjProcessor_Atomic(obj_ctx, s->bucket_info, s->bucket, s->object.name, part_size, s->req_id, s->bucket_info.versioning_enabled());
3702
3703 return processor;
3704 }
3705
3706 void RGWPostObj::dispose_processor(RGWPutObjDataProcessor *processor)
3707 {
3708 delete processor;
3709 }
3710 */
3711 void RGWPostObj::pre_exec()
3712 {
3713 rgw_bucket_object_pre_exec(s);
3714 }
3715
3716 void RGWPostObj::execute()
3717 {
3718 RGWPutObjDataProcessor *filter = nullptr;
3719 boost::optional<RGWPutObj_Compress> compressor;
3720 CompressorRef plugin;
3721 char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3722
3723 /* Read in the data from the POST form. */
3724 op_ret = get_params();
3725 if (op_ret < 0) {
3726 return;
3727 }
3728
3729 op_ret = verify_params();
3730 if (op_ret < 0) {
3731 return;
3732 }
3733
3734 if (s->iam_policy) {
3735 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
3736 rgw::IAM::s3PutObject,
3737 rgw_obj(s->bucket, s->object));
3738 if (e == Effect::Deny) {
3739 op_ret = -EACCES;
3740 return;
3741 } else if (e == Effect::Pass && !verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
3742 op_ret = -EACCES;
3743 return;
3744 }
3745 } else if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
3746 op_ret = -EACCES;
3747 return;
3748 }
3749
3750 /* Start iteration over data fields. It's necessary as Swift's FormPost
3751 * is capable to handle multiple files in single form. */
3752 do {
3753 std::unique_ptr<RGWPutObjDataProcessor> encrypt;
3754 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
3755 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
3756 MD5 hash;
3757 ceph::buffer::list bl, aclbl;
3758 int len = 0;
3759
3760 op_ret = store->check_quota(s->bucket_owner.get_id(),
3761 s->bucket,
3762 user_quota,
3763 bucket_quota,
3764 s->content_length);
3765 if (op_ret < 0) {
3766 return;
3767 }
3768
3769 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3770 if (op_ret < 0) {
3771 return;
3772 }
3773
3774 if (supplied_md5_b64) {
3775 char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1];
3776 ldout(s->cct, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
3777 op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1],
3778 supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64));
3779 ldout(s->cct, 15) << "ceph_armor ret=" << op_ret << dendl;
3780 if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) {
3781 op_ret = -ERR_INVALID_DIGEST;
3782 return;
3783 }
3784
3785 buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5);
3786 ldout(s->cct, 15) << "supplied_md5=" << supplied_md5 << dendl;
3787 }
3788
3789 RGWPutObjProcessor_Atomic processor(*static_cast<RGWObjectCtx *>(s->obj_ctx),
3790 s->bucket_info,
3791 s->bucket,
3792 get_current_filename(),
3793 /* part size */
3794 s->cct->_conf->rgw_obj_stripe_size,
3795 s->req_id,
3796 s->bucket_info.versioning_enabled());
3797 /* No filters by default. */
3798 filter = &processor;
3799
3800 op_ret = processor.prepare(store, nullptr);
3801 if (op_ret < 0) {
3802 return;
3803 }
3804
3805 op_ret = get_encrypt_filter(&encrypt, filter);
3806 if (op_ret < 0) {
3807 return;
3808 }
3809 if (encrypt != nullptr) {
3810 filter = encrypt.get();
3811 } else {
3812 const auto& compression_type = store->get_zone_params().get_compression_type(
3813 s->bucket_info.placement_rule);
3814 if (compression_type != "none") {
3815 plugin = Compressor::create(s->cct, compression_type);
3816 if (!plugin) {
3817 ldout(s->cct, 1) << "Cannot load plugin for compression type "
3818 << compression_type << dendl;
3819 } else {
3820 compressor.emplace(s->cct, plugin, filter);
3821 filter = &*compressor;
3822 }
3823 }
3824 }
3825
3826 bool again;
3827 do {
3828 ceph::bufferlist data;
3829 len = get_data(data, again);
3830
3831 if (len < 0) {
3832 op_ret = len;
3833 return;
3834 }
3835
3836 if (!len) {
3837 break;
3838 }
3839
3840 hash.Update((const byte *)data.c_str(), data.length());
3841 op_ret = put_data_and_throttle(filter, data, ofs, false);
3842
3843 ofs += len;
3844
3845 if (ofs > max_len) {
3846 op_ret = -ERR_TOO_LARGE;
3847 return;
3848 }
3849 } while (again);
3850
3851 {
3852 bufferlist flush;
3853 op_ret = put_data_and_throttle(filter, flush, ofs, false);
3854 }
3855
3856 if (len < min_len) {
3857 op_ret = -ERR_TOO_SMALL;
3858 return;
3859 }
3860
3861 s->obj_size = ofs;
3862
3863 if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) {
3864 op_ret = -ERR_BAD_DIGEST;
3865 return;
3866 }
3867
3868 op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
3869 user_quota, bucket_quota, s->obj_size);
3870 if (op_ret < 0) {
3871 return;
3872 }
3873
3874 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
3875 if (op_ret < 0) {
3876 return;
3877 }
3878
3879 hash.Final(m);
3880 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
3881
3882 etag = calc_md5;
3883 bl.append(etag.c_str(), etag.size() + 1);
3884 emplace_attr(RGW_ATTR_ETAG, std::move(bl));
3885
3886 policy.encode(aclbl);
3887 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
3888
3889 const std::string content_type = get_current_content_type();
3890 if (! content_type.empty()) {
3891 ceph::bufferlist ct_bl;
3892 ct_bl.append(content_type.c_str(), content_type.size() + 1);
3893 emplace_attr(RGW_ATTR_CONTENT_TYPE, std::move(ct_bl));
3894 }
3895
3896 if (compressor && compressor->is_compressed()) {
3897 ceph::bufferlist tmp;
3898 RGWCompressionInfo cs_info;
3899 cs_info.compression_type = plugin->get_type_name();
3900 cs_info.orig_size = s->obj_size;
3901 cs_info.blocks = move(compressor->get_compression_blocks());
3902 ::encode(cs_info, tmp);
3903 emplace_attr(RGW_ATTR_COMPRESSION, std::move(tmp));
3904 }
3905
3906 op_ret = processor.complete(s->obj_size, etag, nullptr, real_time(),
3907 attrs, (delete_at ? *delete_at : real_time()));
3908 } while (is_next_file_to_upload());
3909 }
3910
3911
3912 void RGWPutMetadataAccount::filter_out_temp_url(map<string, bufferlist>& add_attrs,
3913 const set<string>& rmattr_names,
3914 map<int, string>& temp_url_keys)
3915 {
3916 map<string, bufferlist>::iterator iter;
3917
3918 iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY1);
3919 if (iter != add_attrs.end()) {
3920 temp_url_keys[0] = iter->second.c_str();
3921 add_attrs.erase(iter);
3922 }
3923
3924 iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY2);
3925 if (iter != add_attrs.end()) {
3926 temp_url_keys[1] = iter->second.c_str();
3927 add_attrs.erase(iter);
3928 }
3929
3930 for (const string& name : rmattr_names) {
3931 if (name.compare(RGW_ATTR_TEMPURL_KEY1) == 0) {
3932 temp_url_keys[0] = string();
3933 }
3934 if (name.compare(RGW_ATTR_TEMPURL_KEY2) == 0) {
3935 temp_url_keys[1] = string();
3936 }
3937 }
3938 }
3939
3940 int RGWPutMetadataAccount::init_processing()
3941 {
3942 /* First, go to the base class. At the time of writing the method was
3943 * responsible only for initializing the quota. This isn't necessary
3944 * here as we are touching metadata only. I'm putting this call only
3945 * for the future. */
3946 op_ret = RGWOp::init_processing();
3947 if (op_ret < 0) {
3948 return op_ret;
3949 }
3950
3951 op_ret = get_params();
3952 if (op_ret < 0) {
3953 return op_ret;
3954 }
3955
3956 op_ret = rgw_get_user_attrs_by_uid(store, s->user->user_id, orig_attrs,
3957 &acct_op_tracker);
3958 if (op_ret < 0) {
3959 return op_ret;
3960 }
3961
3962 if (has_policy) {
3963 bufferlist acl_bl;
3964 policy.encode(acl_bl);
3965 attrs.emplace(RGW_ATTR_ACL, std::move(acl_bl));
3966 }
3967
3968 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
3969 if (op_ret < 0) {
3970 return op_ret;
3971 }
3972 prepare_add_del_attrs(orig_attrs, rmattr_names, attrs);
3973 populate_with_generic_attrs(s, attrs);
3974
3975 /* Try extract the TempURL-related stuff now to allow verify_permission
3976 * evaluate whether we need FULL_CONTROL or not. */
3977 filter_out_temp_url(attrs, rmattr_names, temp_url_keys);
3978
3979 /* The same with quota except a client needs to be reseller admin. */
3980 op_ret = filter_out_quota_info(attrs, rmattr_names, new_quota,
3981 &new_quota_extracted);
3982 if (op_ret < 0) {
3983 return op_ret;
3984 }
3985
3986 return 0;
3987 }
3988
3989 int RGWPutMetadataAccount::verify_permission()
3990 {
3991 if (s->auth.identity->is_anonymous()) {
3992 return -EACCES;
3993 }
3994
3995 if (!verify_user_permission(s, RGW_PERM_WRITE)) {
3996 return -EACCES;
3997 }
3998
3999 /* Altering TempURL keys requires FULL_CONTROL. */
4000 if (!temp_url_keys.empty() && s->perm_mask != RGW_PERM_FULL_CONTROL) {
4001 return -EPERM;
4002 }
4003
4004 /* We are failing this intensionally to allow system user/reseller admin
4005 * override in rgw_process.cc. This is the way to specify a given RGWOp
4006 * expect extra privileges. */
4007 if (new_quota_extracted) {
4008 return -EACCES;
4009 }
4010
4011 return 0;
4012 }
4013
4014 void RGWPutMetadataAccount::execute()
4015 {
4016 /* Params have been extracted earlier. See init_processing(). */
4017 RGWUserInfo new_uinfo;
4018 op_ret = rgw_get_user_info_by_uid(store, s->user->user_id, new_uinfo,
4019 &acct_op_tracker);
4020 if (op_ret < 0) {
4021 return;
4022 }
4023
4024 /* Handle the TempURL-related stuff. */
4025 if (!temp_url_keys.empty()) {
4026 for (auto& pair : temp_url_keys) {
4027 new_uinfo.temp_url_keys[pair.first] = std::move(pair.second);
4028 }
4029 }
4030
4031 /* Handle the quota extracted at the verify_permission step. */
4032 if (new_quota_extracted) {
4033 new_uinfo.user_quota = std::move(new_quota);
4034 }
4035
4036 /* We are passing here the current (old) user info to allow the function
4037 * optimize-out some operations. */
4038 op_ret = rgw_store_user_info(store, new_uinfo, s->user,
4039 &acct_op_tracker, real_time(), false, &attrs);
4040 }
4041
4042 int RGWPutMetadataBucket::verify_permission()
4043 {
4044 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
4045 return -EACCES;
4046 }
4047
4048 return 0;
4049 }
4050
4051 void RGWPutMetadataBucket::pre_exec()
4052 {
4053 rgw_bucket_object_pre_exec(s);
4054 }
4055
4056 void RGWPutMetadataBucket::execute()
4057 {
4058 op_ret = get_params();
4059 if (op_ret < 0) {
4060 return;
4061 }
4062
4063 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
4064 if (op_ret < 0) {
4065 return;
4066 }
4067
4068 if (!placement_rule.empty() &&
4069 placement_rule != s->bucket_info.placement_rule) {
4070 op_ret = -EEXIST;
4071 return;
4072 }
4073
4074 op_ret = retry_raced_bucket_write(store, s, [this] {
4075 /* Encode special metadata first as we're using std::map::emplace under
4076 * the hood. This method will add the new items only if the map doesn't
4077 * contain such keys yet. */
4078 if (has_policy) {
4079 if (s->dialect.compare("swift") == 0) {
4080 auto old_policy = \
4081 static_cast<RGWAccessControlPolicy_SWIFT*>(s->bucket_acl.get());
4082 auto new_policy = static_cast<RGWAccessControlPolicy_SWIFT*>(&policy);
4083 new_policy->filter_merge(policy_rw_mask, old_policy);
4084 policy = *new_policy;
4085 }
4086 buffer::list bl;
4087 policy.encode(bl);
4088 emplace_attr(RGW_ATTR_ACL, std::move(bl));
4089 }
4090
4091 if (has_cors) {
4092 buffer::list bl;
4093 cors_config.encode(bl);
4094 emplace_attr(RGW_ATTR_CORS, std::move(bl));
4095 }
4096
4097 /* It's supposed that following functions WILL NOT change any
4098 * special attributes (like RGW_ATTR_ACL) if they are already
4099 * present in attrs. */
4100 prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs);
4101 populate_with_generic_attrs(s, attrs);
4102
4103 /* According to the Swift's behaviour and its container_quota
4104 * WSGI middleware implementation: anyone with write permissions
4105 * is able to set the bucket quota. This stays in contrast to
4106 * account quotas that can be set only by clients holding
4107 * reseller admin privileges. */
4108 op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket_info.quota);
4109 if (op_ret < 0) {
4110 return op_ret;
4111 }
4112
4113 if (swift_ver_location) {
4114 s->bucket_info.swift_ver_location = *swift_ver_location;
4115 s->bucket_info.swift_versioning = (!swift_ver_location->empty());
4116 }
4117
4118 /* Web site of Swift API. */
4119 filter_out_website(attrs, rmattr_names, s->bucket_info.website_conf);
4120 s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
4121
4122 /* Setting attributes also stores the provided bucket info. Due
4123 * to this fact, the new quota settings can be serialized with
4124 * the same call. */
4125 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
4126 &s->bucket_info.objv_tracker);
4127 return op_ret;
4128 });
4129 }
4130
4131 int RGWPutMetadataObject::verify_permission()
4132 {
4133 // This looks to be something specific to Swift. We could add
4134 // operations like swift:PutMetadataObject to the Policy Engine.
4135 if (!verify_object_permission_no_policy(s, RGW_PERM_WRITE)) {
4136 return -EACCES;
4137 }
4138
4139 return 0;
4140 }
4141
4142 void RGWPutMetadataObject::pre_exec()
4143 {
4144 rgw_bucket_object_pre_exec(s);
4145 }
4146
4147 void RGWPutMetadataObject::execute()
4148 {
4149 rgw_obj obj(s->bucket, s->object);
4150 map<string, bufferlist> attrs, orig_attrs, rmattrs;
4151
4152 store->set_atomic(s->obj_ctx, obj);
4153
4154 op_ret = get_params();
4155 if (op_ret < 0) {
4156 return;
4157 }
4158
4159 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
4160 if (op_ret < 0) {
4161 return;
4162 }
4163
4164 /* check if obj exists, read orig attrs */
4165 op_ret = get_obj_attrs(store, s, obj, orig_attrs);
4166 if (op_ret < 0) {
4167 return;
4168 }
4169
4170 /* Check whether the object has expired. Swift API documentation
4171 * stands that we should return 404 Not Found in such case. */
4172 if (need_object_expiration() && object_is_expired(orig_attrs)) {
4173 op_ret = -ENOENT;
4174 return;
4175 }
4176
4177 /* Filter currently existing attributes. */
4178 prepare_add_del_attrs(orig_attrs, attrs, rmattrs);
4179 populate_with_generic_attrs(s, attrs);
4180 encode_delete_at_attr(delete_at, attrs);
4181
4182 if (dlo_manifest) {
4183 op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
4184 if (op_ret < 0) {
4185 ldout(s->cct, 0) << "bad user manifest: " << dlo_manifest << dendl;
4186 return;
4187 }
4188 }
4189
4190 op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, &rmattrs);
4191 }
4192
4193 int RGWDeleteObj::handle_slo_manifest(bufferlist& bl)
4194 {
4195 RGWSLOInfo slo_info;
4196 bufferlist::iterator bliter = bl.begin();
4197 try {
4198 ::decode(slo_info, bliter);
4199 } catch (buffer::error& err) {
4200 ldout(s->cct, 0) << "ERROR: failed to decode slo manifest" << dendl;
4201 return -EIO;
4202 }
4203
4204 try {
4205 deleter = std::unique_ptr<RGWBulkDelete::Deleter>(\
4206 new RGWBulkDelete::Deleter(store, s));
4207 } catch (std::bad_alloc) {
4208 return -ENOMEM;
4209 }
4210
4211 list<RGWBulkDelete::acct_path_t> items;
4212 for (const auto& iter : slo_info.entries) {
4213 const string& path_str = iter.path;
4214
4215 const size_t sep_pos = path_str.find('/', 1 /* skip first slash */);
4216 if (boost::string_view::npos == sep_pos) {
4217 return -EINVAL;
4218 }
4219
4220 RGWBulkDelete::acct_path_t path;
4221
4222 path.bucket_name = url_decode(path_str.substr(1, sep_pos - 1));
4223 path.obj_key = url_decode(path_str.substr(sep_pos + 1));
4224
4225 items.push_back(path);
4226 }
4227
4228 /* Request removal of the manifest object itself. */
4229 RGWBulkDelete::acct_path_t path;
4230 path.bucket_name = s->bucket_name;
4231 path.obj_key = s->object;
4232 items.push_back(path);
4233
4234 int ret = deleter->delete_chunk(items);
4235 if (ret < 0) {
4236 return ret;
4237 }
4238
4239 return 0;
4240 }
4241
4242 int RGWDeleteObj::verify_permission()
4243 {
4244 if (s->iam_policy) {
4245 auto r = s->iam_policy->eval(s->env, *s->auth.identity,
4246 s->object.instance.empty() ?
4247 rgw::IAM::s3DeleteObject :
4248 rgw::IAM::s3DeleteObjectVersion,
4249 ARN(s->bucket, s->object.name));
4250 if (r == Effect::Allow)
4251 return true;
4252 else if (r == Effect::Deny)
4253 return false;
4254 }
4255
4256 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
4257 return -EACCES;
4258 }
4259
4260 return 0;
4261 }
4262
4263 void RGWDeleteObj::pre_exec()
4264 {
4265 rgw_bucket_object_pre_exec(s);
4266 }
4267
4268 void RGWDeleteObj::execute()
4269 {
4270 if (!s->bucket_exists) {
4271 op_ret = -ERR_NO_SUCH_BUCKET;
4272 return;
4273 }
4274
4275 op_ret = get_params();
4276 if (op_ret < 0) {
4277 return;
4278 }
4279
4280 rgw_obj obj(s->bucket, s->object);
4281 map<string, bufferlist> attrs;
4282
4283
4284 if (!s->object.empty()) {
4285 if (need_object_expiration() || multipart_delete) {
4286 /* check if obj exists, read orig attrs */
4287 op_ret = get_obj_attrs(store, s, obj, attrs);
4288 if (op_ret < 0) {
4289 return;
4290 }
4291 }
4292
4293 if (multipart_delete) {
4294 const auto slo_attr = attrs.find(RGW_ATTR_SLO_MANIFEST);
4295
4296 if (slo_attr != attrs.end()) {
4297 op_ret = handle_slo_manifest(slo_attr->second);
4298 if (op_ret < 0) {
4299 ldout(s->cct, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret << dendl;
4300 }
4301 } else {
4302 op_ret = -ERR_NOT_SLO_MANIFEST;
4303 }
4304
4305 return;
4306 }
4307
4308 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
4309 obj_ctx->obj.set_atomic(obj);
4310
4311 bool ver_restored = false;
4312 op_ret = store->swift_versioning_restore(*obj_ctx, s->bucket_owner.get_id(),
4313 s->bucket_info, obj, ver_restored);
4314 if (op_ret < 0) {
4315 return;
4316 }
4317
4318 if (!ver_restored) {
4319 /* Swift's versioning mechanism hasn't found any previous version of
4320 * the object that could be restored. This means we should proceed
4321 * with the regular delete path. */
4322 RGWRados::Object del_target(store, s->bucket_info, *obj_ctx, obj);
4323 RGWRados::Object::Delete del_op(&del_target);
4324
4325 op_ret = get_system_versioning_params(s, &del_op.params.olh_epoch,
4326 &del_op.params.marker_version_id);
4327 if (op_ret < 0) {
4328 return;
4329 }
4330
4331 del_op.params.bucket_owner = s->bucket_owner.get_id();
4332 del_op.params.versioning_status = s->bucket_info.versioning_status();
4333 del_op.params.obj_owner = s->owner;
4334 del_op.params.unmod_since = unmod_since;
4335 del_op.params.high_precision_time = s->system_request; /* system request uses high precision time */
4336
4337 op_ret = del_op.delete_obj();
4338 if (op_ret >= 0) {
4339 delete_marker = del_op.result.delete_marker;
4340 version_id = del_op.result.version_id;
4341 }
4342
4343 /* Check whether the object has expired. Swift API documentation
4344 * stands that we should return 404 Not Found in such case. */
4345 if (need_object_expiration() && object_is_expired(attrs)) {
4346 op_ret = -ENOENT;
4347 return;
4348 }
4349 }
4350
4351 if (op_ret == -ECANCELED) {
4352 op_ret = 0;
4353 }
4354 if (op_ret == -ERR_PRECONDITION_FAILED && no_precondition_error) {
4355 op_ret = 0;
4356 }
4357 } else {
4358 op_ret = -EINVAL;
4359 }
4360 }
4361
4362 bool RGWCopyObj::parse_copy_location(const boost::string_view& url_src,
4363 string& bucket_name,
4364 rgw_obj_key& key)
4365 {
4366 boost::string_view name_str;
4367 boost::string_view params_str;
4368
4369 size_t pos = url_src.find('?');
4370 if (pos == string::npos) {
4371 name_str = url_src;
4372 } else {
4373 name_str = url_src.substr(0, pos);
4374 params_str = url_src.substr(pos + 1);
4375 }
4376
4377 boost::string_view dec_src{name_str};
4378 if (dec_src[0] == '/')
4379 dec_src.remove_prefix(1);
4380
4381 pos = dec_src.find('/');
4382 if (pos ==string::npos)
4383 return false;
4384
4385 boost::string_view bn_view{dec_src.substr(0, pos)};
4386 bucket_name = std::string{bn_view.data(), bn_view.size()};
4387
4388 boost::string_view kn_view{dec_src.substr(pos + 1)};
4389 key.name = std::string{kn_view.data(), kn_view.size()};
4390
4391 if (key.name.empty()) {
4392 return false;
4393 }
4394
4395 if (! params_str.empty()) {
4396 RGWHTTPArgs args;
4397 args.set(params_str.to_string());
4398 args.parse();
4399
4400 key.instance = args.get("versionId", NULL);
4401 }
4402
4403 return true;
4404 }
4405
4406 int RGWCopyObj::verify_permission()
4407 {
4408 RGWAccessControlPolicy src_acl(s->cct);
4409 optional<Policy> src_policy;
4410 op_ret = get_params();
4411 if (op_ret < 0)
4412 return op_ret;
4413
4414 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
4415 if (op_ret < 0) {
4416 return op_ret;
4417 }
4418 map<string, bufferlist> src_attrs;
4419
4420 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
4421
4422 if (s->bucket_instance_id.empty()) {
4423 op_ret = store->get_bucket_info(obj_ctx, src_tenant_name, src_bucket_name, src_bucket_info, NULL, &src_attrs);
4424 } else {
4425 /* will only happen in intra region sync where the source and dest bucket is the same */
4426 op_ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, src_bucket_info, NULL, &src_attrs);
4427 }
4428 if (op_ret < 0) {
4429 if (op_ret == -ENOENT) {
4430 op_ret = -ERR_NO_SUCH_BUCKET;
4431 }
4432 return op_ret;
4433 }
4434
4435 src_bucket = src_bucket_info.bucket;
4436
4437 /* get buckets info (source and dest) */
4438 if (s->local_source && source_zone.empty()) {
4439 rgw_obj src_obj(src_bucket, src_object);
4440 store->set_atomic(s->obj_ctx, src_obj);
4441 store->set_prefetch_data(s->obj_ctx, src_obj);
4442
4443 /* check source object permissions */
4444 op_ret = read_obj_policy(store, s, src_bucket_info, src_attrs, &src_acl,
4445 src_policy, src_bucket, src_object);
4446 if (op_ret < 0) {
4447 return op_ret;
4448 }
4449
4450 /* admin request overrides permission checks */
4451 if (!s->auth.identity->is_admin_of(src_acl.get_owner().get_id())) {
4452 if (src_policy) {
4453 auto e = src_policy->eval(s->env, *s->auth.identity,
4454 src_object.instance.empty() ?
4455 rgw::IAM::s3GetObject :
4456 rgw::IAM::s3GetObjectVersion,
4457 ARN(src_obj));
4458 if (e == Effect::Deny) {
4459 return -EACCES;
4460 } else if (e == Effect::Pass &&
4461 !src_acl.verify_permission(*s->auth.identity, s->perm_mask,
4462 RGW_PERM_READ)) {
4463 return -EACCES;
4464 }
4465 } else if (!src_acl.verify_permission(*s->auth.identity,
4466 s->perm_mask,
4467 RGW_PERM_READ)) {
4468 return -EACCES;
4469 }
4470 }
4471 }
4472
4473 RGWAccessControlPolicy dest_bucket_policy(s->cct);
4474 map<string, bufferlist> dest_attrs;
4475
4476 if (src_bucket_name.compare(dest_bucket_name) == 0) { /* will only happen if s->local_source
4477 or intra region sync */
4478 dest_bucket_info = src_bucket_info;
4479 dest_attrs = src_attrs;
4480 } else {
4481 op_ret = store->get_bucket_info(obj_ctx, dest_tenant_name, dest_bucket_name,
4482 dest_bucket_info, nullptr, &dest_attrs);
4483 if (op_ret < 0) {
4484 if (op_ret == -ENOENT) {
4485 op_ret = -ERR_NO_SUCH_BUCKET;
4486 }
4487 return op_ret;
4488 }
4489 }
4490
4491 dest_bucket = dest_bucket_info.bucket;
4492
4493 rgw_obj dest_obj(dest_bucket, dest_object);
4494 store->set_atomic(s->obj_ctx, dest_obj);
4495
4496 /* check dest bucket permissions */
4497 op_ret = read_bucket_policy(store, s, dest_bucket_info, dest_attrs,
4498 &dest_bucket_policy, dest_bucket);
4499 if (op_ret < 0) {
4500 return op_ret;
4501 }
4502
4503 /* admin request overrides permission checks */
4504 if (! s->auth.identity->is_admin_of(dest_policy.get_owner().get_id()) &&
4505 ! dest_bucket_policy.verify_permission(*s->auth.identity, s->perm_mask,
4506 RGW_PERM_WRITE)) {
4507 return -EACCES;
4508 }
4509
4510 op_ret = init_dest_policy();
4511 if (op_ret < 0) {
4512 return op_ret;
4513 }
4514
4515 return 0;
4516 }
4517
4518
4519 int RGWCopyObj::init_common()
4520 {
4521 if (if_mod) {
4522 if (parse_time(if_mod, &mod_time) < 0) {
4523 op_ret = -EINVAL;
4524 return op_ret;
4525 }
4526 mod_ptr = &mod_time;
4527 }
4528
4529 if (if_unmod) {
4530 if (parse_time(if_unmod, &unmod_time) < 0) {
4531 op_ret = -EINVAL;
4532 return op_ret;
4533 }
4534 unmod_ptr = &unmod_time;
4535 }
4536
4537 bufferlist aclbl;
4538 dest_policy.encode(aclbl);
4539 emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
4540
4541 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
4542 if (op_ret < 0) {
4543 return op_ret;
4544 }
4545 populate_with_generic_attrs(s, attrs);
4546
4547 return 0;
4548 }
4549
4550 static void copy_obj_progress_cb(off_t ofs, void *param)
4551 {
4552 RGWCopyObj *op = static_cast<RGWCopyObj *>(param);
4553 op->progress_cb(ofs);
4554 }
4555
4556 void RGWCopyObj::progress_cb(off_t ofs)
4557 {
4558 if (!s->cct->_conf->rgw_copy_obj_progress)
4559 return;
4560
4561 if (ofs - last_ofs < s->cct->_conf->rgw_copy_obj_progress_every_bytes)
4562 return;
4563
4564 send_partial_response(ofs);
4565
4566 last_ofs = ofs;
4567 }
4568
4569 void RGWCopyObj::pre_exec()
4570 {
4571 rgw_bucket_object_pre_exec(s);
4572 }
4573
4574 void RGWCopyObj::execute()
4575 {
4576 if (init_common() < 0)
4577 return;
4578
4579 rgw_obj src_obj(src_bucket, src_object);
4580 rgw_obj dst_obj(dest_bucket, dest_object);
4581
4582 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
4583 obj_ctx.obj.set_atomic(src_obj);
4584 obj_ctx.obj.set_atomic(dst_obj);
4585
4586 encode_delete_at_attr(delete_at, attrs);
4587
4588 bool high_precision_time = (s->system_request);
4589
4590 /* Handle object versioning of Swift API. In case of copying to remote this
4591 * should fail gently (op_ret == 0) as the dst_obj will not exist here. */
4592 op_ret = store->swift_versioning_copy(obj_ctx,
4593 dest_bucket_info.owner,
4594 dest_bucket_info,
4595 dst_obj);
4596 if (op_ret < 0) {
4597 return;
4598 }
4599
4600 op_ret = store->copy_obj(obj_ctx,
4601 s->user->user_id,
4602 client_id,
4603 op_id,
4604 &s->info,
4605 source_zone,
4606 dst_obj,
4607 src_obj,
4608 dest_bucket_info,
4609 src_bucket_info,
4610 &src_mtime,
4611 &mtime,
4612 mod_ptr,
4613 unmod_ptr,
4614 high_precision_time,
4615 if_match,
4616 if_nomatch,
4617 attrs_mod,
4618 copy_if_newer,
4619 attrs, RGW_OBJ_CATEGORY_MAIN,
4620 olh_epoch,
4621 (delete_at ? *delete_at : real_time()),
4622 (version_id.empty() ? NULL : &version_id),
4623 &s->req_id, /* use req_id as tag */
4624 &etag,
4625 copy_obj_progress_cb, (void *)this
4626 );
4627 }
4628
4629 int RGWGetACLs::verify_permission()
4630 {
4631 bool perm;
4632 if (!s->object.empty()) {
4633 perm = verify_object_permission(s,
4634 s->object.instance.empty() ?
4635 rgw::IAM::s3GetObjectAcl :
4636 rgw::IAM::s3GetObjectVersionAcl);
4637 } else {
4638 perm = verify_bucket_permission(s, rgw::IAM::s3GetBucketAcl);
4639 }
4640 if (!perm)
4641 return -EACCES;
4642
4643 return 0;
4644 }
4645
4646 void RGWGetACLs::pre_exec()
4647 {
4648 rgw_bucket_object_pre_exec(s);
4649 }
4650
4651 void RGWGetACLs::execute()
4652 {
4653 stringstream ss;
4654 RGWAccessControlPolicy* const acl = \
4655 (!s->object.empty() ? s->object_acl.get() : s->bucket_acl.get());
4656 RGWAccessControlPolicy_S3* const s3policy = \
4657 static_cast<RGWAccessControlPolicy_S3*>(acl);
4658 s3policy->to_xml(ss);
4659 acls = ss.str();
4660 }
4661
4662
4663
4664 int RGWPutACLs::verify_permission()
4665 {
4666 bool perm;
4667 if (!s->object.empty()) {
4668 perm = verify_object_permission(s,
4669 s->object.instance.empty() ?
4670 rgw::IAM::s3PutObjectAcl :
4671 rgw::IAM::s3PutObjectVersionAcl);
4672 } else {
4673 perm = verify_bucket_permission(s, rgw::IAM::s3PutBucketAcl);
4674 }
4675 if (!perm)
4676 return -EACCES;
4677
4678 return 0;
4679 }
4680
4681 int RGWGetLC::verify_permission()
4682 {
4683 bool perm;
4684 perm = verify_bucket_permission(s, rgw::IAM::s3GetLifecycleConfiguration);
4685 if (!perm)
4686 return -EACCES;
4687
4688 return 0;
4689 }
4690
4691 int RGWPutLC::verify_permission()
4692 {
4693 bool perm;
4694 perm = verify_bucket_permission(s, rgw::IAM::s3PutLifecycleConfiguration);
4695 if (!perm)
4696 return -EACCES;
4697
4698 return 0;
4699 }
4700
4701 int RGWDeleteLC::verify_permission()
4702 {
4703 bool perm;
4704 perm = verify_bucket_permission(s, rgw::IAM::s3PutLifecycleConfiguration);
4705 if (!perm)
4706 return -EACCES;
4707
4708 return 0;
4709 }
4710
4711 void RGWPutACLs::pre_exec()
4712 {
4713 rgw_bucket_object_pre_exec(s);
4714 }
4715
4716 void RGWGetLC::pre_exec()
4717 {
4718 rgw_bucket_object_pre_exec(s);
4719 }
4720
4721 void RGWPutLC::pre_exec()
4722 {
4723 rgw_bucket_object_pre_exec(s);
4724 }
4725
4726 void RGWDeleteLC::pre_exec()
4727 {
4728 rgw_bucket_object_pre_exec(s);
4729 }
4730
4731 void RGWPutACLs::execute()
4732 {
4733 bufferlist bl;
4734
4735 RGWAccessControlPolicy_S3 *policy = NULL;
4736 RGWACLXMLParser_S3 parser(s->cct);
4737 RGWAccessControlPolicy_S3 new_policy(s->cct);
4738 stringstream ss;
4739 char *new_data = NULL;
4740 rgw_obj obj;
4741
4742 op_ret = 0; /* XXX redundant? */
4743
4744 if (!parser.init()) {
4745 op_ret = -EINVAL;
4746 return;
4747 }
4748
4749
4750 RGWAccessControlPolicy* const existing_policy = \
4751 (s->object.empty() ? s->bucket_acl.get() : s->object_acl.get());
4752
4753 owner = existing_policy->get_owner();
4754
4755 op_ret = get_params();
4756 if (op_ret < 0) {
4757 if (op_ret == -ERANGE) {
4758 ldout(s->cct, 4) << "The size of request xml data is larger than the max limitation, data size = "
4759 << s->length << dendl;
4760 op_ret = -ERR_MALFORMED_XML;
4761 s->err.message = "The XML you provided was larger than the maximum " +
4762 std::to_string(s->cct->_conf->rgw_max_put_param_size) +
4763 " bytes allowed.";
4764 }
4765 return;
4766 }
4767
4768 ldout(s->cct, 15) << "read len=" << len << " data=" << (data ? data : "") << dendl;
4769
4770 if (!s->canned_acl.empty() && len) {
4771 op_ret = -EINVAL;
4772 return;
4773 }
4774
4775 if (!s->canned_acl.empty() || s->has_acl_header) {
4776 op_ret = get_policy_from_state(store, s, ss);
4777 if (op_ret < 0)
4778 return;
4779
4780 new_data = strdup(ss.str().c_str());
4781 free(data);
4782 data = new_data;
4783 len = ss.str().size();
4784 }
4785
4786 if (!parser.parse(data, len, 1)) {
4787 op_ret = -EINVAL;
4788 return;
4789 }
4790 policy = static_cast<RGWAccessControlPolicy_S3 *>(parser.find_first("AccessControlPolicy"));
4791 if (!policy) {
4792 op_ret = -EINVAL;
4793 return;
4794 }
4795
4796 const RGWAccessControlList& req_acl = policy->get_acl();
4797 const multimap<string, ACLGrant>& req_grant_map = req_acl.get_grant_map();
4798 #define ACL_GRANTS_MAX_NUM 100
4799 int max_num = s->cct->_conf->rgw_acl_grants_max_num;
4800 if (max_num < 0) {
4801 max_num = ACL_GRANTS_MAX_NUM;
4802 }
4803
4804 int grants_num = req_grant_map.size();
4805 if (grants_num > max_num) {
4806 ldout(s->cct, 4) << "An acl can have up to "
4807 << max_num
4808 << " grants, request acl grants num: "
4809 << grants_num << dendl;
4810 op_ret = -ERR_MALFORMED_ACL_ERROR;
4811 s->err.message = "The request is rejected, because the acl grants number you requested is larger than the maximum "
4812 + std::to_string(max_num)
4813 + " grants allowed in an acl.";
4814 return;
4815 }
4816
4817 // forward bucket acl requests to meta master zone
4818 if (s->object.empty() && !store->is_meta_master()) {
4819 bufferlist in_data;
4820 // include acl data unless it was generated from a canned_acl
4821 if (s->canned_acl.empty()) {
4822 in_data.append(data, len);
4823 }
4824 op_ret = forward_request_to_master(s, NULL, store, in_data, NULL);
4825 if (op_ret < 0) {
4826 ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
4827 return;
4828 }
4829 }
4830
4831 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
4832 ldout(s->cct, 15) << "Old AccessControlPolicy";
4833 policy->to_xml(*_dout);
4834 *_dout << dendl;
4835 }
4836
4837 op_ret = policy->rebuild(store, &owner, new_policy);
4838 if (op_ret < 0)
4839 return;
4840
4841 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
4842 ldout(s->cct, 15) << "New AccessControlPolicy:";
4843 new_policy.to_xml(*_dout);
4844 *_dout << dendl;
4845 }
4846
4847 new_policy.encode(bl);
4848 map<string, bufferlist> attrs;
4849
4850 if (!s->object.empty()) {
4851 obj = rgw_obj(s->bucket, s->object);
4852 store->set_atomic(s->obj_ctx, obj);
4853 //if instance is empty, we should modify the latest object
4854 op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_ACL, bl);
4855 } else {
4856 attrs = s->bucket_attrs;
4857 attrs[RGW_ATTR_ACL] = bl;
4858 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
4859 }
4860 if (op_ret == -ECANCELED) {
4861 op_ret = 0; /* lost a race, but it's ok because acls are immutable */
4862 }
4863 }
4864
4865 static void get_lc_oid(struct req_state *s, string& oid)
4866 {
4867 string shard_id = s->bucket.name + ':' +s->bucket.bucket_id;
4868 int max_objs = (s->cct->_conf->rgw_lc_max_objs > HASH_PRIME)?HASH_PRIME:s->cct->_conf->rgw_lc_max_objs;
4869 int index = ceph_str_hash_linux(shard_id.c_str(), shard_id.size()) % HASH_PRIME % max_objs;
4870 oid = lc_oid_prefix;
4871 char buf[32];
4872 snprintf(buf, 32, ".%d", index);
4873 oid.append(buf);
4874 return;
4875 }
4876
4877 void RGWPutLC::execute()
4878 {
4879 bufferlist bl;
4880
4881 RGWLifecycleConfiguration_S3 *config = NULL;
4882 RGWLCXMLParser_S3 parser(s->cct);
4883 RGWLifecycleConfiguration_S3 new_config(s->cct);
4884
4885 content_md5 = s->info.env->get("HTTP_CONTENT_MD5");
4886 if (content_md5 == nullptr) {
4887 op_ret = -ERR_INVALID_REQUEST;
4888 s->err.message = "Missing required header for this request: Content-MD5";
4889 ldout(s->cct, 5) << s->err.message << dendl;
4890 return;
4891 }
4892
4893 std::string content_md5_bin;
4894 try {
4895 content_md5_bin = rgw::from_base64(boost::string_view(content_md5));
4896 } catch (...) {
4897 s->err.message = "Request header Content-MD5 contains character "
4898 "that is not base64 encoded.";
4899 ldout(s->cct, 5) << s->err.message << dendl;
4900 op_ret = -ERR_BAD_DIGEST;
4901 return;
4902 }
4903
4904 if (!parser.init()) {
4905 op_ret = -EINVAL;
4906 return;
4907 }
4908
4909 op_ret = get_params();
4910 if (op_ret < 0)
4911 return;
4912
4913 ldout(s->cct, 15) << "read len=" << len << " data=" << (data ? data : "") << dendl;
4914
4915 MD5 data_hash;
4916 unsigned char data_hash_res[CEPH_CRYPTO_MD5_DIGESTSIZE];
4917 data_hash.Update(reinterpret_cast<const byte*>(data), len);
4918 data_hash.Final(data_hash_res);
4919
4920 if (memcmp(data_hash_res, content_md5_bin.c_str(), CEPH_CRYPTO_MD5_DIGESTSIZE) != 0) {
4921 op_ret = -ERR_BAD_DIGEST;
4922 s->err.message = "The Content-MD5 you specified did not match what we received.";
4923 ldout(s->cct, 5) << s->err.message
4924 << " Specified content md5: " << content_md5
4925 << ", calculated content md5: " << data_hash_res
4926 << dendl;
4927 return;
4928 }
4929
4930 if (!parser.parse(data, len, 1)) {
4931 op_ret = -ERR_MALFORMED_XML;
4932 return;
4933 }
4934 config = static_cast<RGWLifecycleConfiguration_S3 *>(parser.find_first("LifecycleConfiguration"));
4935 if (!config) {
4936 op_ret = -ERR_MALFORMED_XML;
4937 return;
4938 }
4939
4940 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
4941 ldout(s->cct, 15) << "Old LifecycleConfiguration:";
4942 config->to_xml(*_dout);
4943 *_dout << dendl;
4944 }
4945
4946 op_ret = config->rebuild(store, new_config);
4947 if (op_ret < 0)
4948 return;
4949
4950 if (s->cct->_conf->subsys.should_gather(ceph_subsys_rgw, 15)) {
4951 ldout(s->cct, 15) << "New LifecycleConfiguration:";
4952 new_config.to_xml(*_dout);
4953 *_dout << dendl;
4954 }
4955
4956 new_config.encode(bl);
4957 map<string, bufferlist> attrs;
4958 attrs = s->bucket_attrs;
4959 attrs[RGW_ATTR_LC] = bl;
4960 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
4961 if (op_ret < 0)
4962 return;
4963 string shard_id = s->bucket.tenant + ':' + s->bucket.name + ':' + s->bucket.bucket_id;
4964 string oid;
4965 get_lc_oid(s, oid);
4966 pair<string, int> entry(shard_id, lc_uninitial);
4967 int max_lock_secs = s->cct->_conf->rgw_lc_lock_max_time;
4968 rados::cls::lock::Lock l(lc_index_lock_name);
4969 utime_t time(max_lock_secs, 0);
4970 l.set_duration(time);
4971 l.set_cookie(cookie);
4972 librados::IoCtx *ctx = store->get_lc_pool_ctx();
4973 do {
4974 op_ret = l.lock_exclusive(ctx, oid);
4975 if (op_ret == -EBUSY) {
4976 dout(0) << "RGWLC::RGWPutLC() failed to acquire lock on, sleep 5, try again" << oid << dendl;
4977 sleep(5);
4978 continue;
4979 }
4980 if (op_ret < 0) {
4981 dout(0) << "RGWLC::RGWPutLC() failed to acquire lock " << oid << op_ret << dendl;
4982 break;
4983 }
4984 op_ret = cls_rgw_lc_set_entry(*ctx, oid, entry);
4985 if (op_ret < 0) {
4986 dout(0) << "RGWLC::RGWPutLC() failed to set entry " << oid << op_ret << dendl;
4987 }
4988 break;
4989 }while(1);
4990 l.unlock(ctx, oid);
4991 return;
4992 }
4993
4994 void RGWDeleteLC::execute()
4995 {
4996 bufferlist bl;
4997 map<string, bufferlist> orig_attrs, attrs;
4998 map<string, bufferlist>::iterator iter;
4999 rgw_raw_obj obj;
5000 store->get_bucket_instance_obj(s->bucket, obj);
5001 store->set_prefetch_data(s->obj_ctx, obj);
5002 op_ret = get_system_obj_attrs(store, s, obj, orig_attrs, NULL, &s->bucket_info.objv_tracker);
5003 if (op_ret < 0)
5004 return;
5005
5006 for (iter = orig_attrs.begin(); iter != orig_attrs.end(); ++iter) {
5007 const string& name = iter->first;
5008 dout(10) << "DeleteLC : attr: " << name << dendl;
5009 if (name.compare(0, (sizeof(RGW_ATTR_LC) - 1), RGW_ATTR_LC) != 0) {
5010 if (attrs.find(name) == attrs.end()) {
5011 attrs[name] = iter->second;
5012 }
5013 }
5014 }
5015 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
5016 string shard_id = s->bucket.tenant + ':' + s->bucket.name + ':' + s->bucket.bucket_id;
5017 pair<string, int> entry(shard_id, lc_uninitial);
5018 string oid;
5019 get_lc_oid(s, oid);
5020 int max_lock_secs = s->cct->_conf->rgw_lc_lock_max_time;
5021 librados::IoCtx *ctx = store->get_lc_pool_ctx();
5022 rados::cls::lock::Lock l(lc_index_lock_name);
5023 utime_t time(max_lock_secs, 0);
5024 l.set_duration(time);
5025 do {
5026 op_ret = l.lock_exclusive(ctx, oid);
5027 if (op_ret == -EBUSY) {
5028 dout(0) << "RGWLC::RGWDeleteLC() failed to acquire lock on, sleep 5, try again" << oid << dendl;
5029 sleep(5);
5030 continue;
5031 }
5032 if (op_ret < 0) {
5033 dout(0) << "RGWLC::RGWDeleteLC() failed to acquire lock " << oid << op_ret << dendl;
5034 break;
5035 }
5036 op_ret = cls_rgw_lc_rm_entry(*ctx, oid, entry);
5037 if (op_ret < 0) {
5038 dout(0) << "RGWLC::RGWDeleteLC() failed to set entry " << oid << op_ret << dendl;
5039 }
5040 break;
5041 }while(1);
5042 l.unlock(ctx, oid);
5043 return;
5044 }
5045
5046 int RGWGetCORS::verify_permission()
5047 {
5048 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketCORS);
5049 }
5050
5051 void RGWGetCORS::execute()
5052 {
5053 op_ret = read_bucket_cors();
5054 if (op_ret < 0)
5055 return ;
5056
5057 if (!cors_exist) {
5058 dout(2) << "No CORS configuration set yet for this bucket" << dendl;
5059 op_ret = -ENOENT;
5060 return;
5061 }
5062 }
5063
5064 int RGWPutCORS::verify_permission()
5065 {
5066 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketCORS);
5067 }
5068
5069 void RGWPutCORS::execute()
5070 {
5071 rgw_raw_obj obj;
5072
5073 op_ret = get_params();
5074 if (op_ret < 0)
5075 return;
5076
5077 if (!store->is_meta_master()) {
5078 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
5079 if (op_ret < 0) {
5080 ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
5081 return;
5082 }
5083 }
5084
5085 op_ret = retry_raced_bucket_write(store, s, [this] {
5086 map<string, bufferlist> attrs = s->bucket_attrs;
5087 attrs[RGW_ATTR_CORS] = cors_bl;
5088 return rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
5089 });
5090 }
5091
5092 int RGWDeleteCORS::verify_permission()
5093 {
5094 // No separate delete permission
5095 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketCORS);
5096 }
5097
5098 void RGWDeleteCORS::execute()
5099 {
5100 op_ret = read_bucket_cors();
5101 if (op_ret < 0)
5102 return;
5103
5104 bufferlist bl;
5105 if (!cors_exist) {
5106 dout(2) << "No CORS configuration set yet for this bucket" << dendl;
5107 op_ret = -ENOENT;
5108 return;
5109 }
5110 op_ret = retry_raced_bucket_write(store, s, [this] {
5111 rgw_raw_obj obj;
5112 store->get_bucket_instance_obj(s->bucket, obj);
5113 store->set_prefetch_data(s->obj_ctx, obj);
5114 map<string, bufferlist> orig_attrs, attrs, rmattrs;
5115 map<string, bufferlist>::iterator iter;
5116
5117 op_ret = get_system_obj_attrs(store, s, obj, orig_attrs, NULL, &s->bucket_info.objv_tracker);
5118 if (op_ret < 0)
5119 return op_ret;
5120
5121 /* only remove meta attrs */
5122 for (iter = orig_attrs.begin(); iter != orig_attrs.end(); ++iter) {
5123 const string& name = iter->first;
5124 dout(10) << "DeleteCORS : attr: " << name << dendl;
5125 if (name.compare(0, (sizeof(RGW_ATTR_CORS) - 1), RGW_ATTR_CORS) == 0) {
5126 rmattrs[name] = iter->second;
5127 } else if (attrs.find(name) == attrs.end()) {
5128 attrs[name] = iter->second;
5129 }
5130 }
5131 return rgw_bucket_set_attrs(store, s->bucket_info, attrs,
5132 &s->bucket_info.objv_tracker);
5133 });
5134 }
5135
5136 void RGWOptionsCORS::get_response_params(string& hdrs, string& exp_hdrs, unsigned *max_age) {
5137 get_cors_response_headers(rule, req_hdrs, hdrs, exp_hdrs, max_age);
5138 }
5139
5140 int RGWOptionsCORS::validate_cors_request(RGWCORSConfiguration *cc) {
5141 rule = cc->host_name_rule(origin);
5142 if (!rule) {
5143 dout(10) << "There is no cors rule present for " << origin << dendl;
5144 return -ENOENT;
5145 }
5146
5147 if (!validate_cors_rule_method(rule, req_meth)) {
5148 return -ENOENT;
5149 }
5150
5151 if (!validate_cors_rule_header(rule, req_hdrs)) {
5152 return -ENOENT;
5153 }
5154
5155 return 0;
5156 }
5157
5158 void RGWOptionsCORS::execute()
5159 {
5160 op_ret = read_bucket_cors();
5161 if (op_ret < 0)
5162 return;
5163
5164 origin = s->info.env->get("HTTP_ORIGIN");
5165 if (!origin) {
5166 dout(0) <<
5167 "Preflight request without mandatory Origin header"
5168 << dendl;
5169 op_ret = -EINVAL;
5170 return;
5171 }
5172 req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD");
5173 if (!req_meth) {
5174 dout(0) <<
5175 "Preflight request without mandatory Access-control-request-method header"
5176 << dendl;
5177 op_ret = -EINVAL;
5178 return;
5179 }
5180 if (!cors_exist) {
5181 dout(2) << "No CORS configuration set yet for this bucket" << dendl;
5182 op_ret = -ENOENT;
5183 return;
5184 }
5185 req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS");
5186 op_ret = validate_cors_request(&bucket_cors);
5187 if (!rule) {
5188 origin = req_meth = NULL;
5189 return;
5190 }
5191 return;
5192 }
5193
5194 int RGWGetRequestPayment::verify_permission()
5195 {
5196 return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketRequestPayment);
5197 }
5198
5199 void RGWGetRequestPayment::pre_exec()
5200 {
5201 rgw_bucket_object_pre_exec(s);
5202 }
5203
5204 void RGWGetRequestPayment::execute()
5205 {
5206 requester_pays = s->bucket_info.requester_pays;
5207 }
5208
5209 int RGWSetRequestPayment::verify_permission()
5210 {
5211 return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketRequestPayment);
5212 }
5213
5214 void RGWSetRequestPayment::pre_exec()
5215 {
5216 rgw_bucket_object_pre_exec(s);
5217 }
5218
5219 void RGWSetRequestPayment::execute()
5220 {
5221 op_ret = get_params();
5222
5223 if (op_ret < 0)
5224 return;
5225
5226 s->bucket_info.requester_pays = requester_pays;
5227 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(),
5228 &s->bucket_attrs);
5229 if (op_ret < 0) {
5230 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
5231 << " returned err=" << op_ret << dendl;
5232 return;
5233 }
5234 }
5235
5236 int RGWInitMultipart::verify_permission()
5237 {
5238 if (s->iam_policy) {
5239 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
5240 rgw::IAM::s3PutObject,
5241 rgw_obj(s->bucket, s->object));
5242 if (e == Effect::Allow) {
5243 return 0;
5244 } else if (e == Effect::Deny) {
5245 return -EACCES;
5246 }
5247 }
5248
5249 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
5250 return -EACCES;
5251 }
5252
5253 return 0;
5254 }
5255
5256 void RGWInitMultipart::pre_exec()
5257 {
5258 rgw_bucket_object_pre_exec(s);
5259 }
5260
5261 void RGWInitMultipart::execute()
5262 {
5263 bufferlist aclbl;
5264 map<string, bufferlist> attrs;
5265 rgw_obj obj;
5266
5267 if (get_params() < 0)
5268 return;
5269
5270 if (s->object.empty())
5271 return;
5272
5273 policy.encode(aclbl);
5274 attrs[RGW_ATTR_ACL] = aclbl;
5275
5276 populate_with_generic_attrs(s, attrs);
5277
5278 /* select encryption mode */
5279 op_ret = prepare_encryption(attrs);
5280 if (op_ret != 0)
5281 return;
5282
5283 op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
5284 if (op_ret < 0) {
5285 return;
5286 }
5287
5288 do {
5289 char buf[33];
5290 gen_rand_alphanumeric(s->cct, buf, sizeof(buf) - 1);
5291 upload_id = MULTIPART_UPLOAD_ID_PREFIX; /* v2 upload id */
5292 upload_id.append(buf);
5293
5294 string tmp_obj_name;
5295 RGWMPObj mp(s->object.name, upload_id);
5296 tmp_obj_name = mp.get_meta();
5297
5298 obj.init_ns(s->bucket, tmp_obj_name, mp_ns);
5299 // the meta object will be indexed with 0 size, we c
5300 obj.set_in_extra_data(true);
5301 obj.index_hash_source = s->object.name;
5302
5303 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
5304 op_target.set_versioning_disabled(true); /* no versioning for multipart meta */
5305
5306 RGWRados::Object::Write obj_op(&op_target);
5307
5308 obj_op.meta.owner = s->owner.get_id();
5309 obj_op.meta.category = RGW_OBJ_CATEGORY_MULTIMETA;
5310 obj_op.meta.flags = PUT_OBJ_CREATE_EXCL;
5311
5312 op_ret = obj_op.write_meta(0, 0, attrs);
5313 } while (op_ret == -EEXIST);
5314 }
5315
5316 static int get_multipart_info(RGWRados *store, struct req_state *s,
5317 string& meta_oid,
5318 RGWAccessControlPolicy *policy,
5319 map<string, bufferlist>& attrs)
5320 {
5321 map<string, bufferlist>::iterator iter;
5322 bufferlist header;
5323
5324 rgw_obj obj;
5325 obj.init_ns(s->bucket, meta_oid, mp_ns);
5326 obj.set_in_extra_data(true);
5327
5328 int op_ret = get_obj_attrs(store, s, obj, attrs);
5329 if (op_ret < 0) {
5330 if (op_ret == -ENOENT) {
5331 return -ERR_NO_SUCH_UPLOAD;
5332 }
5333 return op_ret;
5334 }
5335
5336 if (policy) {
5337 for (iter = attrs.begin(); iter != attrs.end(); ++iter) {
5338 string name = iter->first;
5339 if (name.compare(RGW_ATTR_ACL) == 0) {
5340 bufferlist& bl = iter->second;
5341 bufferlist::iterator bli = bl.begin();
5342 try {
5343 ::decode(*policy, bli);
5344 } catch (buffer::error& err) {
5345 ldout(s->cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
5346 return -EIO;
5347 }
5348 break;
5349 }
5350 }
5351 }
5352
5353 return 0;
5354 }
5355
5356 int RGWCompleteMultipart::verify_permission()
5357 {
5358 if (s->iam_policy) {
5359 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
5360 rgw::IAM::s3PutObject,
5361 rgw_obj(s->bucket, s->object));
5362 if (e == Effect::Allow) {
5363 return 0;
5364 } else if (e == Effect::Deny) {
5365 return -EACCES;
5366 }
5367 }
5368
5369 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
5370 return -EACCES;
5371 }
5372
5373 return 0;
5374 }
5375
5376 void RGWCompleteMultipart::pre_exec()
5377 {
5378 rgw_bucket_object_pre_exec(s);
5379 }
5380
5381 void RGWCompleteMultipart::execute()
5382 {
5383 RGWMultiCompleteUpload *parts;
5384 map<int, string>::iterator iter;
5385 RGWMultiXMLParser parser;
5386 string meta_oid;
5387 map<uint32_t, RGWUploadPartInfo> obj_parts;
5388 map<uint32_t, RGWUploadPartInfo>::iterator obj_iter;
5389 map<string, bufferlist> attrs;
5390 off_t ofs = 0;
5391 MD5 hash;
5392 char final_etag[CEPH_CRYPTO_MD5_DIGESTSIZE];
5393 char final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 16];
5394 bufferlist etag_bl;
5395 rgw_obj meta_obj;
5396 rgw_obj target_obj;
5397 RGWMPObj mp;
5398 RGWObjManifest manifest;
5399 uint64_t olh_epoch = 0;
5400 string version_id;
5401
5402 op_ret = get_params();
5403 if (op_ret < 0)
5404 return;
5405 op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
5406 if (op_ret < 0) {
5407 return;
5408 }
5409
5410 if (!data || !len) {
5411 op_ret = -ERR_MALFORMED_XML;
5412 return;
5413 }
5414
5415 if (!parser.init()) {
5416 op_ret = -EIO;
5417 return;
5418 }
5419
5420 if (!parser.parse(data, len, 1)) {
5421 op_ret = -ERR_MALFORMED_XML;
5422 return;
5423 }
5424
5425 parts = static_cast<RGWMultiCompleteUpload *>(parser.find_first("CompleteMultipartUpload"));
5426 if (!parts || parts->parts.empty()) {
5427 op_ret = -ERR_MALFORMED_XML;
5428 return;
5429 }
5430
5431 if ((int)parts->parts.size() >
5432 s->cct->_conf->rgw_multipart_part_upload_limit) {
5433 op_ret = -ERANGE;
5434 return;
5435 }
5436
5437 mp.init(s->object.name, upload_id);
5438 meta_oid = mp.get_meta();
5439
5440 int total_parts = 0;
5441 int handled_parts = 0;
5442 int max_parts = 1000;
5443 int marker = 0;
5444 bool truncated;
5445 RGWCompressionInfo cs_info;
5446 bool compressed = false;
5447 uint64_t accounted_size = 0;
5448
5449 uint64_t min_part_size = s->cct->_conf->rgw_multipart_min_part_size;
5450
5451 list<rgw_obj_index_key> remove_objs; /* objects to be removed from index listing */
5452
5453 bool versioned_object = s->bucket_info.versioning_enabled();
5454
5455 iter = parts->parts.begin();
5456
5457 meta_obj.init_ns(s->bucket, meta_oid, mp_ns);
5458 meta_obj.set_in_extra_data(true);
5459 meta_obj.index_hash_source = s->object.name;
5460
5461 /*take a cls lock on meta_obj to prevent racing completions (or retries)
5462 from deleting the parts*/
5463 rgw_pool meta_pool;
5464 rgw_raw_obj raw_obj;
5465 int max_lock_secs_mp =
5466 s->cct->_conf->get_val<int64_t>("rgw_mp_lock_max_time");
5467 utime_t dur(max_lock_secs_mp, 0);
5468
5469 store->obj_to_raw((s->bucket_info).placement_rule, meta_obj, &raw_obj);
5470 store->get_obj_data_pool((s->bucket_info).placement_rule,
5471 meta_obj,&meta_pool);
5472 store->open_pool_ctx(meta_pool, serializer.ioctx);
5473
5474 op_ret = serializer.try_lock(raw_obj.oid, dur);
5475 if (op_ret < 0) {
5476 dout(0) << "RGWCompleteMultipart::execute() failed to acquire lock " << dendl;
5477 op_ret = -ERR_INTERNAL_ERROR;
5478 s->err.message = "This multipart completion is already in progress";
5479 return;
5480 }
5481
5482 op_ret = get_obj_attrs(store, s, meta_obj, attrs);
5483
5484 if (op_ret < 0) {
5485 ldout(s->cct, 0) << "ERROR: failed to get obj attrs, obj=" << meta_obj
5486 << " ret=" << op_ret << dendl;
5487 return;
5488 }
5489
5490 do {
5491 op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts,
5492 marker, obj_parts, &marker, &truncated);
5493 if (op_ret == -ENOENT) {
5494 op_ret = -ERR_NO_SUCH_UPLOAD;
5495 }
5496 if (op_ret < 0)
5497 return;
5498
5499 total_parts += obj_parts.size();
5500 if (!truncated && total_parts != (int)parts->parts.size()) {
5501 ldout(s->cct, 0) << "NOTICE: total parts mismatch: have: " << total_parts
5502 << " expected: " << parts->parts.size() << dendl;
5503 op_ret = -ERR_INVALID_PART;
5504 return;
5505 }
5506
5507 for (obj_iter = obj_parts.begin(); iter != parts->parts.end() && obj_iter != obj_parts.end(); ++iter, ++obj_iter, ++handled_parts) {
5508 uint64_t part_size = obj_iter->second.accounted_size;
5509 if (handled_parts < (int)parts->parts.size() - 1 &&
5510 part_size < min_part_size) {
5511 op_ret = -ERR_TOO_SMALL;
5512 return;
5513 }
5514
5515 char petag[CEPH_CRYPTO_MD5_DIGESTSIZE];
5516 if (iter->first != (int)obj_iter->first) {
5517 ldout(s->cct, 0) << "NOTICE: parts num mismatch: next requested: "
5518 << iter->first << " next uploaded: "
5519 << obj_iter->first << dendl;
5520 op_ret = -ERR_INVALID_PART;
5521 return;
5522 }
5523 string part_etag = rgw_string_unquote(iter->second);
5524 if (part_etag.compare(obj_iter->second.etag) != 0) {
5525 ldout(s->cct, 0) << "NOTICE: etag mismatch: part: " << iter->first
5526 << " etag: " << iter->second << dendl;
5527 op_ret = -ERR_INVALID_PART;
5528 return;
5529 }
5530
5531 hex_to_buf(obj_iter->second.etag.c_str(), petag,
5532 CEPH_CRYPTO_MD5_DIGESTSIZE);
5533 hash.Update((const byte *)petag, sizeof(petag));
5534
5535 RGWUploadPartInfo& obj_part = obj_iter->second;
5536
5537 /* update manifest for part */
5538 string oid = mp.get_part(obj_iter->second.num);
5539 rgw_obj src_obj;
5540 src_obj.init_ns(s->bucket, oid, mp_ns);
5541
5542 if (obj_part.manifest.empty()) {
5543 ldout(s->cct, 0) << "ERROR: empty manifest for object part: obj="
5544 << src_obj << dendl;
5545 op_ret = -ERR_INVALID_PART;
5546 return;
5547 } else {
5548 manifest.append(obj_part.manifest, store);
5549 }
5550
5551 if (obj_part.cs_info.compression_type != "none") {
5552 if (compressed && cs_info.compression_type != obj_part.cs_info.compression_type) {
5553 ldout(s->cct, 0) << "ERROR: compression type was changed during multipart upload ("
5554 << cs_info.compression_type << ">>" << obj_part.cs_info.compression_type << ")" << dendl;
5555 op_ret = -ERR_INVALID_PART;
5556 return;
5557 }
5558 int64_t new_ofs; // offset in compression data for new part
5559 if (cs_info.blocks.size() > 0)
5560 new_ofs = cs_info.blocks.back().new_ofs + cs_info.blocks.back().len;
5561 else
5562 new_ofs = 0;
5563 for (const auto& block : obj_part.cs_info.blocks) {
5564 compression_block cb;
5565 cb.old_ofs = block.old_ofs + cs_info.orig_size;
5566 cb.new_ofs = new_ofs;
5567 cb.len = block.len;
5568 cs_info.blocks.push_back(cb);
5569 new_ofs = cb.new_ofs + cb.len;
5570 }
5571 if (!compressed)
5572 cs_info.compression_type = obj_part.cs_info.compression_type;
5573 cs_info.orig_size += obj_part.cs_info.orig_size;
5574 compressed = true;
5575 }
5576
5577 rgw_obj_index_key remove_key;
5578 src_obj.key.get_index_key(&remove_key);
5579
5580 remove_objs.push_back(remove_key);
5581
5582 ofs += obj_part.size;
5583 accounted_size += obj_part.accounted_size;
5584 }
5585 } while (truncated);
5586 hash.Final((byte *)final_etag);
5587
5588 buf_to_hex((unsigned char *)final_etag, sizeof(final_etag), final_etag_str);
5589 snprintf(&final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2], sizeof(final_etag_str) - CEPH_CRYPTO_MD5_DIGESTSIZE * 2,
5590 "-%lld", (long long)parts->parts.size());
5591 etag = final_etag_str;
5592 ldout(s->cct, 10) << "calculated etag: " << final_etag_str << dendl;
5593
5594 etag_bl.append(final_etag_str, strlen(final_etag_str) + 1);
5595
5596 attrs[RGW_ATTR_ETAG] = etag_bl;
5597
5598 if (compressed) {
5599 // write compression attribute to full object
5600 bufferlist tmp;
5601 ::encode(cs_info, tmp);
5602 attrs[RGW_ATTR_COMPRESSION] = tmp;
5603 }
5604
5605 target_obj.init(s->bucket, s->object.name);
5606 if (versioned_object) {
5607 store->gen_rand_obj_instance_name(&target_obj);
5608 }
5609
5610 RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
5611
5612 obj_ctx.obj.set_atomic(target_obj);
5613
5614 RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), target_obj);
5615 RGWRados::Object::Write obj_op(&op_target);
5616
5617 obj_op.meta.manifest = &manifest;
5618 obj_op.meta.remove_objs = &remove_objs;
5619
5620 obj_op.meta.ptag = &s->req_id; /* use req_id as operation tag */
5621 obj_op.meta.owner = s->owner.get_id();
5622 obj_op.meta.flags = PUT_OBJ_CREATE;
5623 obj_op.meta.modify_tail = true;
5624 obj_op.meta.completeMultipart = true;
5625 op_ret = obj_op.write_meta(ofs, accounted_size, attrs);
5626 if (op_ret < 0)
5627 return;
5628
5629 // remove the upload obj
5630 int r = store->delete_obj(*static_cast<RGWObjectCtx *>(s->obj_ctx),
5631 s->bucket_info, meta_obj, 0);
5632 if (r >= 0) {
5633 /* serializer's exclusive lock is released */
5634 serializer.clear_locked();
5635 } else {
5636 ldout(store->ctx(), 0) << "WARNING: failed to remove object "
5637 << meta_obj << dendl;
5638 }
5639 }
5640
5641 int RGWCompleteMultipart::MPSerializer::try_lock(
5642 const std::string& _oid,
5643 utime_t dur)
5644 {
5645 oid = _oid;
5646 op.assert_exists();
5647 lock.set_duration(dur);
5648 lock.lock_exclusive(&op);
5649 int ret = ioctx.operate(oid, &op);
5650 if (! ret) {
5651 locked = true;
5652 }
5653 return ret;
5654 }
5655
5656 void RGWCompleteMultipart::complete()
5657 {
5658 /* release exclusive lock iff not already */
5659 if (unlikely(serializer.locked)) {
5660 int r = serializer.unlock();
5661 if (r < 0) {
5662 ldout(store->ctx(), 0) << "WARNING: failed to unlock "
5663 << serializer.oid << dendl;
5664 }
5665 }
5666 send_response();
5667 }
5668
5669 int RGWAbortMultipart::verify_permission()
5670 {
5671 if (s->iam_policy) {
5672 auto e = s->iam_policy->eval(s->env, *s->auth.identity,
5673 rgw::IAM::s3AbortMultipartUpload,
5674 rgw_obj(s->bucket, s->object));
5675 if (e == Effect::Allow) {
5676 return 0;
5677 } else if (e == Effect::Deny) {
5678 return -EACCES;
5679 }
5680 }
5681
5682 if (!verify_bucket_permission_no_policy(s, RGW_PERM_WRITE)) {
5683 return -EACCES;
5684 }
5685
5686 return 0;
5687 }
5688
5689 void RGWAbortMultipart::pre_exec()
5690 {
5691 rgw_bucket_object_pre_exec(s);
5692 }
5693
5694 void RGWAbortMultipart::execute()
5695 {
5696 op_ret = -EINVAL;
5697 string upload_id;
5698 string meta_oid;
5699 upload_id = s->info.args.get("uploadId");
5700 map<string, bufferlist> attrs;
5701 rgw_obj meta_obj;
5702 RGWMPObj mp;
5703
5704 if (upload_id.empty() || s->object.empty())
5705 return;
5706
5707 mp.init(s->object.name, upload_id);
5708 meta_oid = mp.get_meta();
5709
5710 op_ret = get_multipart_info(store, s, meta_oid, NULL, attrs);
5711 if (op_ret < 0)
5712 return;
5713
5714 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
5715 op_ret = abort_multipart_upload(store, s->cct, obj_ctx, s->bucket_info, mp);
5716 }
5717
5718 int RGWListMultipart::verify_permission()
5719 {
5720 if (!verify_object_permission(s, rgw::IAM::s3ListMultipartUploadParts))
5721 return -EACCES;
5722
5723 return 0;
5724 }
5725
5726 void RGWListMultipart::pre_exec()
5727 {
5728 rgw_bucket_object_pre_exec(s);
5729 }
5730
5731 void RGWListMultipart::execute()
5732 {
5733 map<string, bufferlist> xattrs;
5734 string meta_oid;
5735 RGWMPObj mp;
5736
5737 op_ret = get_params();
5738 if (op_ret < 0)
5739 return;
5740
5741 mp.init(s->object.name, upload_id);
5742 meta_oid = mp.get_meta();
5743
5744 op_ret = get_multipart_info(store, s, meta_oid, &policy, xattrs);
5745 if (op_ret < 0)
5746 return;
5747
5748 op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts,
5749 marker, parts, NULL, &truncated);
5750 }
5751
5752 int RGWListBucketMultiparts::verify_permission()
5753 {
5754 if (!verify_bucket_permission(s,
5755 rgw::IAM::s3ListBucketMultipartUploads))
5756 return -EACCES;
5757
5758 return 0;
5759 }
5760
5761 void RGWListBucketMultiparts::pre_exec()
5762 {
5763 rgw_bucket_object_pre_exec(s);
5764 }
5765
5766 void RGWListBucketMultiparts::execute()
5767 {
5768 vector<rgw_bucket_dir_entry> objs;
5769 string marker_meta;
5770
5771 op_ret = get_params();
5772 if (op_ret < 0)
5773 return;
5774
5775 if (s->prot_flags & RGW_REST_SWIFT) {
5776 string path_args;
5777 path_args = s->info.args.get("path");
5778 if (!path_args.empty()) {
5779 if (!delimiter.empty() || !prefix.empty()) {
5780 op_ret = -EINVAL;
5781 return;
5782 }
5783 prefix = path_args;
5784 delimiter="/";
5785 }
5786 }
5787 marker_meta = marker.get_meta();
5788
5789 op_ret = list_bucket_multiparts(store, s->bucket_info, prefix, marker_meta, delimiter,
5790 max_uploads, &objs, &common_prefixes, &is_truncated);
5791 if (op_ret < 0) {
5792 return;
5793 }
5794
5795 if (!objs.empty()) {
5796 vector<rgw_bucket_dir_entry>::iterator iter;
5797 RGWMultipartUploadEntry entry;
5798 for (iter = objs.begin(); iter != objs.end(); ++iter) {
5799 rgw_obj_key key(iter->key);
5800 if (!entry.mp.from_meta(key.name))
5801 continue;
5802 entry.obj = *iter;
5803 uploads.push_back(entry);
5804 }
5805 next_marker = entry;
5806 }
5807 }
5808
5809 void RGWGetHealthCheck::execute()
5810 {
5811 if (!g_conf->rgw_healthcheck_disabling_path.empty() &&
5812 (::access(g_conf->rgw_healthcheck_disabling_path.c_str(), F_OK) == 0)) {
5813 /* Disabling path specified & existent in the filesystem. */
5814 op_ret = -ERR_SERVICE_UNAVAILABLE; /* 503 */
5815 } else {
5816 op_ret = 0; /* 200 OK */
5817 }
5818 }
5819
5820 int RGWDeleteMultiObj::verify_permission()
5821 {
5822 acl_allowed = verify_bucket_permission_no_policy(s, RGW_PERM_WRITE);
5823 if (!acl_allowed && !s->iam_policy)
5824 return -EACCES;
5825
5826 return 0;
5827 }
5828
5829 void RGWDeleteMultiObj::pre_exec()
5830 {
5831 rgw_bucket_object_pre_exec(s);
5832 }
5833
5834 void RGWDeleteMultiObj::execute()
5835 {
5836 RGWMultiDelDelete *multi_delete;
5837 vector<rgw_obj_key>::iterator iter;
5838 RGWMultiDelXMLParser parser;
5839 int num_processed = 0;
5840 RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
5841
5842 op_ret = get_params();
5843 if (op_ret < 0) {
5844 goto error;
5845 }
5846
5847 if (!data) {
5848 op_ret = -EINVAL;
5849 goto error;
5850 }
5851
5852 if (!parser.init()) {
5853 op_ret = -EINVAL;
5854 goto error;
5855 }
5856
5857 if (!parser.parse(data, len, 1)) {
5858 op_ret = -EINVAL;
5859 goto error;
5860 }
5861
5862 multi_delete = static_cast<RGWMultiDelDelete *>(parser.find_first("Delete"));
5863 if (!multi_delete) {
5864 op_ret = -EINVAL;
5865 goto error;
5866 }
5867
5868 if (multi_delete->is_quiet())
5869 quiet = true;
5870
5871 begin_response();
5872 if (multi_delete->objects.empty()) {
5873 goto done;
5874 }
5875
5876 for (iter = multi_delete->objects.begin();
5877 iter != multi_delete->objects.end() && num_processed < max_to_delete;
5878 ++iter, num_processed++) {
5879 rgw_obj obj(bucket, *iter);
5880 if (s->iam_policy) {
5881 auto e = s->iam_policy->eval(s->env,
5882 *s->auth.identity,
5883 iter->instance.empty() ?
5884 rgw::IAM::s3DeleteObject :
5885 rgw::IAM::s3DeleteObjectVersion,
5886 obj);
5887 if ((e == Effect::Deny) ||
5888 (e == Effect::Pass && !acl_allowed)) {
5889 send_partial_response(*iter, false, "", -EACCES);
5890 continue;
5891 }
5892 }
5893
5894 obj_ctx->obj.set_atomic(obj);
5895
5896 RGWRados::Object del_target(store, s->bucket_info, *obj_ctx, obj);
5897 RGWRados::Object::Delete del_op(&del_target);
5898
5899 del_op.params.bucket_owner = s->bucket_owner.get_id();
5900 del_op.params.versioning_status = s->bucket_info.versioning_status();
5901 del_op.params.obj_owner = s->owner;
5902
5903 op_ret = del_op.delete_obj();
5904 if (op_ret == -ENOENT) {
5905 op_ret = 0;
5906 }
5907
5908 send_partial_response(*iter, del_op.result.delete_marker,
5909 del_op.result.version_id, op_ret);
5910 }
5911
5912 /* set the return code to zero, errors at this point will be
5913 dumped to the response */
5914 op_ret = 0;
5915
5916 done:
5917 // will likely segfault if begin_response() has not been called
5918 end_response();
5919 free(data);
5920 return;
5921
5922 error:
5923 send_status();
5924 free(data);
5925 return;
5926
5927 }
5928
5929 bool RGWBulkDelete::Deleter::verify_permission(RGWBucketInfo& binfo,
5930 map<string, bufferlist>& battrs,
5931 ACLOwner& bucket_owner /* out */)
5932 {
5933 RGWAccessControlPolicy bacl(store->ctx());
5934 int ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket);
5935 if (ret < 0) {
5936 return false;
5937 }
5938
5939 auto policy = get_iam_policy_from_attr(s->cct, store, battrs, binfo.bucket.tenant);
5940
5941 bucket_owner = bacl.get_owner();
5942
5943 /* We can use global user_acl because each BulkDelete request is allowed
5944 * to work on entities from a single account only. */
5945 return verify_bucket_permission(s, binfo.bucket, s->user_acl.get(),
5946 &bacl, policy, rgw::IAM::s3DeleteBucket);
5947 }
5948
5949 bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path)
5950 {
5951 auto& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
5952
5953 RGWBucketInfo binfo;
5954 map<string, bufferlist> battrs;
5955 ACLOwner bowner;
5956
5957 int ret = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
5958 path.bucket_name, binfo, nullptr,
5959 &battrs);
5960 if (ret < 0) {
5961 goto binfo_fail;
5962 }
5963
5964 if (!verify_permission(binfo, battrs, bowner)) {
5965 ret = -EACCES;
5966 goto auth_fail;
5967 }
5968
5969 if (!path.obj_key.empty()) {
5970 rgw_obj obj(binfo.bucket, path.obj_key);
5971 obj_ctx.obj.set_atomic(obj);
5972
5973 RGWRados::Object del_target(store, binfo, obj_ctx, obj);
5974 RGWRados::Object::Delete del_op(&del_target);
5975
5976 del_op.params.bucket_owner = binfo.owner;
5977 del_op.params.versioning_status = binfo.versioning_status();
5978 del_op.params.obj_owner = bowner;
5979
5980 ret = del_op.delete_obj();
5981 if (ret < 0) {
5982 goto delop_fail;
5983 }
5984 } else {
5985 RGWObjVersionTracker ot;
5986 ot.read_version = binfo.ep_objv;
5987
5988 ret = store->delete_bucket(binfo, ot);
5989 if (0 == ret) {
5990 ret = rgw_unlink_bucket(store, binfo.owner, binfo.bucket.tenant,
5991 binfo.bucket.name, false);
5992 if (ret < 0) {
5993 ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << ret
5994 << dendl;
5995 }
5996 }
5997 if (ret < 0) {
5998 goto delop_fail;
5999 }
6000
6001 if (!store->is_meta_master()) {
6002 bufferlist in_data;
6003 ret = forward_request_to_master(s, &ot.read_version, store, in_data,
6004 nullptr);
6005 if (ret < 0) {
6006 if (ret == -ENOENT) {
6007 /* adjust error, we want to return with NoSuchBucket and not
6008 * NoSuchKey */
6009 ret = -ERR_NO_SUCH_BUCKET;
6010 }
6011 goto delop_fail;
6012 }
6013 }
6014 }
6015
6016 num_deleted++;
6017 return true;
6018
6019
6020 binfo_fail:
6021 if (-ENOENT == ret) {
6022 ldout(store->ctx(), 20) << "cannot find bucket = " << path.bucket_name << dendl;
6023 num_unfound++;
6024 } else {
6025 ldout(store->ctx(), 20) << "cannot get bucket info, ret = " << ret
6026 << dendl;
6027
6028 fail_desc_t failed_item = {
6029 .err = ret,
6030 .path = path
6031 };
6032 failures.push_back(failed_item);
6033 }
6034 return false;
6035
6036 auth_fail:
6037 ldout(store->ctx(), 20) << "wrong auth for " << path << dendl;
6038 {
6039 fail_desc_t failed_item = {
6040 .err = ret,
6041 .path = path
6042 };
6043 failures.push_back(failed_item);
6044 }
6045 return false;
6046
6047 delop_fail:
6048 if (-ENOENT == ret) {
6049 ldout(store->ctx(), 20) << "cannot find entry " << path << dendl;
6050 num_unfound++;
6051 } else {
6052 fail_desc_t failed_item = {
6053 .err = ret,
6054 .path = path
6055 };
6056 failures.push_back(failed_item);
6057 }
6058 return false;
6059 }
6060
6061 bool RGWBulkDelete::Deleter::delete_chunk(const std::list<acct_path_t>& paths)
6062 {
6063 ldout(store->ctx(), 20) << "in delete_chunk" << dendl;
6064 for (auto path : paths) {
6065 ldout(store->ctx(), 20) << "bulk deleting path: " << path << dendl;
6066 delete_single(path);
6067 }
6068
6069 return true;
6070 }
6071
6072 int RGWBulkDelete::verify_permission()
6073 {
6074 return 0;
6075 }
6076
6077 void RGWBulkDelete::pre_exec()
6078 {
6079 rgw_bucket_object_pre_exec(s);
6080 }
6081
6082 void RGWBulkDelete::execute()
6083 {
6084 deleter = std::unique_ptr<Deleter>(new Deleter(store, s));
6085
6086 bool is_truncated = false;
6087 do {
6088 list<RGWBulkDelete::acct_path_t> items;
6089
6090 int ret = get_data(items, &is_truncated);
6091 if (ret < 0) {
6092 return;
6093 }
6094
6095 ret = deleter->delete_chunk(items);
6096 } while (!op_ret && is_truncated);
6097
6098 return;
6099 }
6100
6101
6102 constexpr std::array<int, 2> RGWBulkUploadOp::terminal_errors;
6103
6104 int RGWBulkUploadOp::verify_permission()
6105 {
6106 if (s->auth.identity->is_anonymous()) {
6107 return -EACCES;
6108 }
6109
6110 if (! verify_user_permission(s, RGW_PERM_WRITE)) {
6111 return -EACCES;
6112 }
6113
6114 if (s->user->user_id.tenant != s->bucket_tenant) {
6115 ldout(s->cct, 10) << "user cannot create a bucket in a different tenant"
6116 << " (user_id.tenant=" << s->user->user_id.tenant
6117 << " requested=" << s->bucket_tenant << ")"
6118 << dendl;
6119 return -EACCES;
6120 }
6121
6122 if (s->user->max_buckets < 0) {
6123 return -EPERM;
6124 }
6125
6126 return 0;
6127 }
6128
6129 void RGWBulkUploadOp::pre_exec()
6130 {
6131 rgw_bucket_object_pre_exec(s);
6132 }
6133
6134 boost::optional<std::pair<std::string, rgw_obj_key>>
6135 RGWBulkUploadOp::parse_path(const boost::string_ref& path)
6136 {
6137 /* We need to skip all slashes at the beginning in order to preserve
6138 * compliance with Swift. */
6139 const size_t start_pos = path.find_first_not_of('/');
6140
6141 if (boost::string_ref::npos != start_pos) {
6142 /* Seperator is the first slash after the leading ones. */
6143 const size_t sep_pos = path.substr(start_pos).find('/');
6144
6145 if (boost::string_ref::npos != sep_pos) {
6146 const auto bucket_name = path.substr(start_pos, sep_pos - start_pos);
6147 const auto obj_name = path.substr(sep_pos + 1);
6148
6149 return std::make_pair(bucket_name.to_string(),
6150 rgw_obj_key(obj_name.to_string()));
6151 } else {
6152 /* It's guaranteed here that bucket name is at least one character
6153 * long and is different than slash. */
6154 return std::make_pair(path.substr(start_pos).to_string(),
6155 rgw_obj_key());
6156 }
6157 }
6158
6159 return none;
6160 }
6161
6162 std::pair<std::string, std::string>
6163 RGWBulkUploadOp::handle_upload_path(struct req_state *s)
6164 {
6165 std::string bucket_path, file_prefix;
6166 if (! s->init_state.url_bucket.empty()) {
6167 file_prefix = bucket_path = s->init_state.url_bucket + "/";
6168 if (! s->object.empty()) {
6169 std::string& object_name = s->object.name;
6170
6171 /* As rgw_obj_key::empty() already verified emptiness of s->object.name,
6172 * we can safely examine its last element. */
6173 if (object_name.back() == '/') {
6174 file_prefix.append(object_name);
6175 } else {
6176 file_prefix.append(object_name).append("/");
6177 }
6178 }
6179 }
6180 return std::make_pair(bucket_path, file_prefix);
6181 }
6182
6183 int RGWBulkUploadOp::handle_dir_verify_permission()
6184 {
6185 if (s->user->max_buckets > 0) {
6186 RGWUserBuckets buckets;
6187 std::string marker;
6188 bool is_truncated = false;
6189 op_ret = rgw_read_user_buckets(store, s->user->user_id, buckets,
6190 marker, std::string(), s->user->max_buckets,
6191 false, &is_truncated);
6192 if (op_ret < 0) {
6193 return op_ret;
6194 }
6195
6196 if (buckets.count() >= static_cast<size_t>(s->user->max_buckets)) {
6197 return -ERR_TOO_MANY_BUCKETS;
6198 }
6199 }
6200
6201 return 0;
6202 }
6203
6204 static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name)
6205 {
6206 /* the request of container or object level will contain bucket name.
6207 * only at account level need to append the bucket name */
6208 if (info.script_uri.find(bucket_name) != std::string::npos) {
6209 return;
6210 }
6211
6212 ldout(cct, 20) << "append the bucket: "<< bucket_name << " to req_info" << dendl;
6213 info.script_uri.append("/").append(bucket_name);
6214 info.request_uri_aws4 = info.request_uri = info.script_uri;
6215 info.effective_uri = "/" + bucket_name;
6216 }
6217
6218 int RGWBulkUploadOp::handle_dir(const boost::string_ref path)
6219 {
6220 ldout(s->cct, 20) << "bulk upload: got directory=" << path << dendl;
6221
6222 op_ret = handle_dir_verify_permission();
6223 if (op_ret < 0) {
6224 return op_ret;
6225 }
6226
6227 std::string bucket_name;
6228 rgw_obj_key object_junk;
6229 std::tie(bucket_name, object_junk) = *parse_path(path);
6230
6231 rgw_raw_obj obj(store->get_zone_params().domain_root,
6232 rgw_make_bucket_entry_name(s->bucket_tenant, bucket_name));
6233
6234 /* we need to make sure we read bucket info, it's not read before for this
6235 * specific request */
6236 RGWBucketInfo binfo;
6237 std::map<std::string, ceph::bufferlist> battrs;
6238 op_ret = store->get_bucket_info(*dir_ctx, s->bucket_tenant, bucket_name,
6239 binfo, NULL, &battrs);
6240 if (op_ret < 0 && op_ret != -ENOENT) {
6241 return op_ret;
6242 }
6243 const bool bucket_exists = (op_ret != -ENOENT);
6244
6245 if (bucket_exists) {
6246 RGWAccessControlPolicy old_policy(s->cct);
6247 int r = get_bucket_policy_from_attr(s->cct, store, binfo,
6248 battrs, &old_policy);
6249 if (r >= 0) {
6250 if (old_policy.get_owner().get_id().compare(s->user->user_id) != 0) {
6251 op_ret = -EEXIST;
6252 return op_ret;
6253 }
6254 }
6255 }
6256
6257 RGWBucketInfo master_info;
6258 rgw_bucket *pmaster_bucket = nullptr;
6259 uint32_t *pmaster_num_shards = nullptr;
6260 real_time creation_time;
6261 obj_version objv, ep_objv, *pobjv = nullptr;
6262
6263 if (! store->is_meta_master()) {
6264 JSONParser jp;
6265 ceph::bufferlist in_data;
6266 req_info info = s->info;
6267 forward_req_info(s->cct, info, bucket_name);
6268 op_ret = forward_request_to_master(s, nullptr, store, in_data, &jp, &info);
6269 if (op_ret < 0) {
6270 return op_ret;
6271 }
6272
6273 JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp);
6274 JSONDecoder::decode_json("object_ver", objv, &jp);
6275 JSONDecoder::decode_json("bucket_info", master_info, &jp);
6276
6277 ldout(s->cct, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver="
6278 << objv.ver << dendl;
6279 ldout(s->cct, 20) << "got creation_time="<< master_info.creation_time
6280 << dendl;
6281
6282 pmaster_bucket= &master_info.bucket;
6283 creation_time = master_info.creation_time;
6284 pmaster_num_shards = &master_info.num_shards;
6285 pobjv = &objv;
6286 } else {
6287 pmaster_bucket = nullptr;
6288 pmaster_num_shards = nullptr;
6289 }
6290
6291
6292 std::string placement_rule;
6293 if (bucket_exists) {
6294 std::string selected_placement_rule;
6295 rgw_bucket bucket;
6296 bucket.tenant = s->bucket_tenant;
6297 bucket.name = s->bucket_name;
6298 op_ret = store->select_bucket_placement(*(s->user),
6299 store->get_zonegroup().get_id(),
6300 placement_rule,
6301 &selected_placement_rule,
6302 nullptr);
6303 if (selected_placement_rule != binfo.placement_rule) {
6304 op_ret = -EEXIST;
6305 ldout(s->cct, 20) << "bulk upload: non-coherent placement rule" << dendl;
6306 return op_ret;
6307 }
6308 }
6309
6310 /* Create metadata: ACLs. */
6311 std::map<std::string, ceph::bufferlist> attrs;
6312 RGWAccessControlPolicy policy;
6313 policy.create_default(s->user->user_id, s->user->display_name);
6314 ceph::bufferlist aclbl;
6315 policy.encode(aclbl);
6316 attrs.emplace(RGW_ATTR_ACL, std::move(aclbl));
6317
6318 RGWQuotaInfo quota_info;
6319 const RGWQuotaInfo * pquota_info = nullptr;
6320
6321 rgw_bucket bucket;
6322 bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */
6323 bucket.name = bucket_name;
6324
6325
6326 RGWBucketInfo out_info;
6327 op_ret = store->create_bucket(*(s->user),
6328 bucket,
6329 store->get_zonegroup().get_id(),
6330 placement_rule, binfo.swift_ver_location,
6331 pquota_info, attrs,
6332 out_info, pobjv, &ep_objv, creation_time,
6333 pmaster_bucket, pmaster_num_shards, true);
6334 /* continue if EEXIST and create_bucket will fail below. this way we can
6335 * recover from a partial create by retrying it. */
6336 ldout(s->cct, 20) << "rgw_create_bucket returned ret=" << op_ret
6337 << ", bucket=" << bucket << dendl;
6338
6339 if (op_ret && op_ret != -EEXIST) {
6340 return op_ret;
6341 }
6342
6343 const bool existed = (op_ret == -EEXIST);
6344 if (existed) {
6345 /* bucket already existed, might have raced with another bucket creation, or
6346 * might be partial bucket creation that never completed. Read existing bucket
6347 * info, verify that the reported bucket owner is the current user.
6348 * If all is ok then update the user's list of buckets.
6349 * Otherwise inform client about a name conflict.
6350 */
6351 if (out_info.owner.compare(s->user->user_id) != 0) {
6352 op_ret = -EEXIST;
6353 ldout(s->cct, 20) << "bulk upload: conflicting bucket name" << dendl;
6354 return op_ret;
6355 }
6356 bucket = out_info.bucket;
6357 }
6358
6359 op_ret = rgw_link_bucket(store, s->user->user_id, bucket,
6360 out_info.creation_time, false);
6361 if (op_ret && !existed && op_ret != -EEXIST) {
6362 /* if it exists (or previously existed), don't remove it! */
6363 op_ret = rgw_unlink_bucket(store, s->user->user_id,
6364 bucket.tenant, bucket.name);
6365 if (op_ret < 0) {
6366 ldout(s->cct, 0) << "bulk upload: WARNING: failed to unlink bucket: ret="
6367 << op_ret << dendl;
6368 }
6369 } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) {
6370 ldout(s->cct, 20) << "bulk upload: containers already exists"
6371 << dendl;
6372 op_ret = -ERR_BUCKET_EXISTS;
6373 }
6374
6375 return op_ret;
6376 }
6377
6378
6379 bool RGWBulkUploadOp::handle_file_verify_permission(RGWBucketInfo& binfo,
6380 const rgw_obj& obj,
6381 std::map<std::string, ceph::bufferlist>& battrs,
6382 ACLOwner& bucket_owner /* out */)
6383 {
6384 RGWAccessControlPolicy bacl(store->ctx());
6385 op_ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket);
6386 if (op_ret < 0) {
6387 ldout(s->cct, 20) << "bulk upload: cannot read_policy() for bucket"
6388 << dendl;
6389 return false;
6390 }
6391
6392 auto policy = get_iam_policy_from_attr(s->cct, store, battrs, binfo.bucket.tenant);
6393
6394 bucket_owner = bacl.get_owner();
6395 if (policy) {
6396 auto e = policy->eval(s->env, *s->auth.identity,
6397 rgw::IAM::s3PutObject, obj);
6398 if (e == Effect::Allow) {
6399 return true;
6400 } else if (e == Effect::Deny) {
6401 return false;
6402 }
6403 }
6404
6405 return verify_bucket_permission_no_policy(s, s->user_acl.get(),
6406 &bacl, RGW_PERM_WRITE);
6407 }
6408
6409 int RGWBulkUploadOp::handle_file(const boost::string_ref path,
6410 const size_t size,
6411 AlignedStreamGetter& body)
6412 {
6413
6414 ldout(s->cct, 20) << "bulk upload: got file=" << path << ", size=" << size
6415 << dendl;
6416
6417 RGWPutObjDataProcessor *filter = nullptr;
6418 boost::optional<RGWPutObj_Compress> compressor;
6419
6420 if (size > static_cast<const size_t>(s->cct->_conf->rgw_max_put_size)) {
6421 op_ret = -ERR_TOO_LARGE;
6422 return op_ret;
6423 }
6424
6425 std::string bucket_name;
6426 rgw_obj_key object;
6427 std::tie(bucket_name, object) = *parse_path(path);
6428
6429 auto& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
6430 RGWBucketInfo binfo;
6431 std::map<std::string, ceph::bufferlist> battrs;
6432 ACLOwner bowner;
6433 op_ret = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
6434 bucket_name, binfo, nullptr, &battrs);
6435 if (op_ret == -ENOENT) {
6436 ldout(s->cct, 20) << "bulk upload: non existent directory=" << bucket_name
6437 << dendl;
6438 } else if (op_ret < 0) {
6439 return op_ret;
6440 }
6441
6442 if (! handle_file_verify_permission(binfo,
6443 rgw_obj(binfo.bucket, object),
6444 battrs, bowner)) {
6445 ldout(s->cct, 20) << "bulk upload: object creation unauthorized" << dendl;
6446 op_ret = -EACCES;
6447 return op_ret;
6448 }
6449
6450 op_ret = store->check_quota(bowner.get_id(), binfo.bucket,
6451 user_quota, bucket_quota, size);
6452 if (op_ret < 0) {
6453 return op_ret;
6454 }
6455
6456 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
6457 if (op_ret < 0) {
6458 return op_ret;
6459 }
6460
6461 RGWPutObjProcessor_Atomic processor(obj_ctx,
6462 binfo,
6463 binfo.bucket,
6464 object.name,
6465 /* part size */
6466 s->cct->_conf->rgw_obj_stripe_size,
6467 s->req_id,
6468 binfo.versioning_enabled());
6469
6470 /* No filters by default. */
6471 filter = &processor;
6472
6473 op_ret = processor.prepare(store, nullptr);
6474 if (op_ret < 0) {
6475 ldout(s->cct, 20) << "bulk upload: cannot prepare processor due to ret="
6476 << op_ret << dendl;
6477 return op_ret;
6478 }
6479
6480 const auto& compression_type = store->get_zone_params().get_compression_type(
6481 binfo.placement_rule);
6482 CompressorRef plugin;
6483 if (compression_type != "none") {
6484 plugin = Compressor::create(s->cct, compression_type);
6485 if (! plugin) {
6486 ldout(s->cct, 1) << "Cannot load plugin for rgw_compression_type "
6487 << compression_type << dendl;
6488 } else {
6489 compressor.emplace(s->cct, plugin, filter);
6490 filter = &*compressor;
6491 }
6492 }
6493
6494 /* Upload file content. */
6495 ssize_t len = 0;
6496 size_t ofs = 0;
6497 MD5 hash;
6498 do {
6499 ceph::bufferlist data;
6500 len = body.get_at_most(s->cct->_conf->rgw_max_chunk_size, data);
6501
6502 ldout(s->cct, 20) << "bulk upload: body=" << data.c_str() << dendl;
6503 if (len < 0) {
6504 op_ret = len;
6505 return op_ret;
6506 } else if (len > 0) {
6507 hash.Update((const byte *)data.c_str(), data.length());
6508 op_ret = put_data_and_throttle(filter, data, ofs, false);
6509 if (op_ret < 0) {
6510 ldout(s->cct, 20) << "processor->thottle_data() returned ret="
6511 << op_ret << dendl;
6512 return op_ret;
6513 }
6514
6515 ofs += len;
6516 }
6517
6518 } while (len > 0);
6519
6520 if (ofs != size) {
6521 ldout(s->cct, 10) << "bulk upload: real file size different from declared"
6522 << dendl;
6523 op_ret = -EINVAL;
6524 }
6525
6526 op_ret = store->check_quota(bowner.get_id(), binfo.bucket,
6527 user_quota, bucket_quota, size);
6528 if (op_ret < 0) {
6529 ldout(s->cct, 20) << "bulk upload: quota exceeded for path=" << path
6530 << dendl;
6531 return op_ret;
6532 }
6533
6534 op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
6535 if (op_ret < 0) {
6536 return op_ret;
6537 }
6538
6539 char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
6540 unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
6541 hash.Final(m);
6542 buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
6543
6544 /* Create metadata: ETAG. */
6545 std::map<std::string, ceph::bufferlist> attrs;
6546 std::string etag = calc_md5;
6547 ceph::bufferlist etag_bl;
6548 etag_bl.append(etag.c_str(), etag.size() + 1);
6549 attrs.emplace(RGW_ATTR_ETAG, std::move(etag_bl));
6550
6551 /* Create metadata: ACLs. */
6552 RGWAccessControlPolicy policy;
6553 policy.create_default(s->user->user_id, s->user->display_name);
6554 ceph::bufferlist aclbl;
6555 policy.encode(aclbl);
6556 attrs.emplace(RGW_ATTR_ACL, std::move(aclbl));
6557
6558 /* Create metadata: compression info. */
6559 if (compressor && compressor->is_compressed()) {
6560 ceph::bufferlist tmp;
6561 RGWCompressionInfo cs_info;
6562 cs_info.compression_type = plugin->get_type_name();
6563 cs_info.orig_size = s->obj_size;
6564 cs_info.blocks = std::move(compressor->get_compression_blocks());
6565 ::encode(cs_info, tmp);
6566 attrs.emplace(RGW_ATTR_COMPRESSION, std::move(tmp));
6567 }
6568
6569 /* Complete the transaction. */
6570 op_ret = processor.complete(size, etag, nullptr, ceph::real_time(), attrs,
6571 ceph::real_time() /* delete_at */);
6572 if (op_ret < 0) {
6573 ldout(s->cct, 20) << "bulk upload: processor::complete returned op_ret="
6574 << op_ret << dendl;
6575 }
6576
6577 return op_ret;
6578 }
6579
6580 void RGWBulkUploadOp::execute()
6581 {
6582 ceph::bufferlist buffer(64 * 1024);
6583
6584 ldout(s->cct, 20) << "bulk upload: start" << dendl;
6585
6586 /* Create an instance of stream-abstracting class. Having this indirection
6587 * allows for easy introduction of decompressors like gzip and bzip2. */
6588 auto stream = create_stream();
6589 if (! stream) {
6590 return;
6591 }
6592
6593 /* Handling the $UPLOAD_PATH accordingly to the Swift's Bulk middleware. See:
6594 * https://github.com/openstack/swift/blob/2.13.0/swift/common/middleware/bulk.py#L31-L41 */
6595 std::string bucket_path, file_prefix;
6596 std::tie(bucket_path, file_prefix) = handle_upload_path(s);
6597
6598 auto status = rgw::tar::StatusIndicator::create();
6599 do {
6600 op_ret = stream->get_exactly(rgw::tar::BLOCK_SIZE, buffer);
6601 if (op_ret < 0) {
6602 ldout(s->cct, 2) << "bulk upload: cannot read header" << dendl;
6603 return;
6604 }
6605
6606 /* We need to re-interpret the buffer as a TAR block. Exactly two blocks
6607 * must be tracked to detect out end-of-archive. It occurs when both of
6608 * them are empty (zeroed). Tracing this particular inter-block dependency
6609 * is responsibility of the rgw::tar::StatusIndicator class. */
6610 boost::optional<rgw::tar::HeaderView> header;
6611 std::tie(status, header) = rgw::tar::interpret_block(status, buffer);
6612
6613 if (! status.empty() && header) {
6614 /* This specific block isn't empty (entirely zeroed), so we can parse
6615 * it as a TAR header and dispatch. At the moment we do support only
6616 * regular files and directories. Everything else (symlinks, devices)
6617 * will be ignored but won't cease the whole upload. */
6618 switch (header->get_filetype()) {
6619 case rgw::tar::FileType::NORMAL_FILE: {
6620 ldout(s->cct, 2) << "bulk upload: handling regular file" << dendl;
6621
6622 boost::string_ref filename = bucket_path.empty() ? header->get_filename() : \
6623 file_prefix + header->get_filename().to_string();
6624 auto body = AlignedStreamGetter(0, header->get_filesize(),
6625 rgw::tar::BLOCK_SIZE, *stream);
6626 op_ret = handle_file(filename,
6627 header->get_filesize(),
6628 body);
6629 if (! op_ret) {
6630 /* Only regular files counts. */
6631 num_created++;
6632 } else {
6633 failures.emplace_back(op_ret, filename.to_string());
6634 }
6635 break;
6636 }
6637 case rgw::tar::FileType::DIRECTORY: {
6638 ldout(s->cct, 2) << "bulk upload: handling regular directory" << dendl;
6639
6640 boost::string_ref dirname = bucket_path.empty() ? header->get_filename() : bucket_path;
6641 op_ret = handle_dir(dirname);
6642 if (op_ret < 0 && op_ret != -ERR_BUCKET_EXISTS) {
6643 failures.emplace_back(op_ret, dirname.to_string());
6644 }
6645 break;
6646 }
6647 default: {
6648 /* Not recognized. Skip. */
6649 op_ret = 0;
6650 break;
6651 }
6652 }
6653
6654 /* In case of any problems with sub-request authorization Swift simply
6655 * terminates whole upload immediately. */
6656 if (boost::algorithm::contains(std::initializer_list<int>{ op_ret },
6657 terminal_errors)) {
6658 ldout(s->cct, 2) << "bulk upload: terminating due to ret=" << op_ret
6659 << dendl;
6660 break;
6661 }
6662 } else {
6663 ldout(s->cct, 2) << "bulk upload: an empty block" << dendl;
6664 op_ret = 0;
6665 }
6666
6667 buffer.clear();
6668 } while (! status.eof());
6669
6670 return;
6671 }
6672
6673 RGWBulkUploadOp::AlignedStreamGetter::~AlignedStreamGetter()
6674 {
6675 const size_t aligned_legnth = length + (-length % alignment);
6676 ceph::bufferlist junk;
6677
6678 DecoratedStreamGetter::get_exactly(aligned_legnth - position, junk);
6679 }
6680
6681 ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_at_most(const size_t want,
6682 ceph::bufferlist& dst)
6683 {
6684 const size_t max_to_read = std::min(want, length - position);
6685 const auto len = DecoratedStreamGetter::get_at_most(max_to_read, dst);
6686 if (len > 0) {
6687 position += len;
6688 }
6689 return len;
6690 }
6691
6692 ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_exactly(const size_t want,
6693 ceph::bufferlist& dst)
6694 {
6695 const auto len = DecoratedStreamGetter::get_exactly(want, dst);
6696 if (len > 0) {
6697 position += len;
6698 }
6699 return len;
6700 }
6701
6702 int RGWSetAttrs::verify_permission()
6703 {
6704 // This looks to be part of the RGW-NFS machinery and has no S3 or
6705 // Swift equivalent.
6706 bool perm;
6707 if (!s->object.empty()) {
6708 perm = verify_object_permission_no_policy(s, RGW_PERM_WRITE);
6709 } else {
6710 perm = verify_bucket_permission_no_policy(s, RGW_PERM_WRITE);
6711 }
6712 if (!perm)
6713 return -EACCES;
6714
6715 return 0;
6716 }
6717
6718 void RGWSetAttrs::pre_exec()
6719 {
6720 rgw_bucket_object_pre_exec(s);
6721 }
6722
6723 void RGWSetAttrs::execute()
6724 {
6725 op_ret = get_params();
6726 if (op_ret < 0)
6727 return;
6728
6729 rgw_obj obj(s->bucket, s->object);
6730
6731 if (!s->object.empty()) {
6732 store->set_atomic(s->obj_ctx, obj);
6733 op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, nullptr);
6734 } else {
6735 for (auto& iter : attrs) {
6736 s->bucket_attrs[iter.first] = std::move(iter.second);
6737 }
6738 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, s->bucket_attrs,
6739 &s->bucket_info.objv_tracker);
6740 }
6741 }
6742
6743 void RGWGetObjLayout::pre_exec()
6744 {
6745 rgw_bucket_object_pre_exec(s);
6746 }
6747
6748 void RGWGetObjLayout::execute()
6749 {
6750 rgw_obj obj(s->bucket, s->object);
6751 RGWRados::Object target(store,
6752 s->bucket_info,
6753 *static_cast<RGWObjectCtx *>(s->obj_ctx),
6754 rgw_obj(s->bucket, s->object));
6755 RGWRados::Object::Read stat_op(&target);
6756
6757 op_ret = stat_op.prepare();
6758 if (op_ret < 0) {
6759 return;
6760 }
6761
6762 head_obj = stat_op.state.head_obj;
6763
6764 op_ret = target.get_manifest(&manifest);
6765 }
6766
6767
6768 int RGWConfigBucketMetaSearch::verify_permission()
6769 {
6770 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
6771 return -EACCES;
6772 }
6773
6774 return 0;
6775 }
6776
6777 void RGWConfigBucketMetaSearch::pre_exec()
6778 {
6779 rgw_bucket_object_pre_exec(s);
6780 }
6781
6782 void RGWConfigBucketMetaSearch::execute()
6783 {
6784 op_ret = get_params();
6785 if (op_ret < 0) {
6786 ldout(s->cct, 20) << "NOTICE: get_params() returned ret=" << op_ret << dendl;
6787 return;
6788 }
6789
6790 s->bucket_info.mdsearch_config = mdsearch_config;
6791
6792 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
6793 if (op_ret < 0) {
6794 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
6795 return;
6796 }
6797 }
6798
6799 int RGWGetBucketMetaSearch::verify_permission()
6800 {
6801 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
6802 return -EACCES;
6803 }
6804
6805 return 0;
6806 }
6807
6808 void RGWGetBucketMetaSearch::pre_exec()
6809 {
6810 rgw_bucket_object_pre_exec(s);
6811 }
6812
6813 int RGWDelBucketMetaSearch::verify_permission()
6814 {
6815 if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) {
6816 return -EACCES;
6817 }
6818
6819 return 0;
6820 }
6821
6822 void RGWDelBucketMetaSearch::pre_exec()
6823 {
6824 rgw_bucket_object_pre_exec(s);
6825 }
6826
6827 void RGWDelBucketMetaSearch::execute()
6828 {
6829 s->bucket_info.mdsearch_config.clear();
6830
6831 op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
6832 if (op_ret < 0) {
6833 ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
6834 return;
6835 }
6836 }
6837
6838
6839 RGWHandler::~RGWHandler()
6840 {
6841 }
6842
6843 int RGWHandler::init(RGWRados *_store,
6844 struct req_state *_s,
6845 rgw::io::BasicClient *cio)
6846 {
6847 store = _store;
6848 s = _s;
6849
6850 return 0;
6851 }
6852
6853 int RGWHandler::do_init_permissions()
6854 {
6855 int ret = rgw_build_bucket_policies(store, s);
6856 s->env = rgw_build_iam_environment(store, s);
6857
6858 if (ret < 0) {
6859 ldout(s->cct, 10) << "read_permissions on " << s->bucket << " ret=" << ret << dendl;
6860 if (ret == -ENODATA)
6861 ret = -EACCES;
6862 }
6863
6864 return ret;
6865 }
6866
6867 int RGWHandler::do_read_permissions(RGWOp *op, bool only_bucket)
6868 {
6869 if (only_bucket) {
6870 /* already read bucket info */
6871 return 0;
6872 }
6873 int ret = rgw_build_object_policies(store, s, op->prefetch_data());
6874
6875 if (ret < 0) {
6876 ldout(s->cct, 10) << "read_permissions on " << s->bucket << ":"
6877 << s->object << " only_bucket=" << only_bucket
6878 << " ret=" << ret << dendl;
6879 if (ret == -ENODATA)
6880 ret = -EACCES;
6881 }
6882
6883 return ret;
6884 }
6885
6886 int RGWOp::error_handler(int err_no, string *error_content) {
6887 return dialect_handler->error_handler(err_no, error_content);
6888 }
6889
6890 int RGWHandler::error_handler(int err_no, string *error_content) {
6891 // This is the do-nothing error handler
6892 return err_no;
6893 }
6894
6895
6896 void RGWPutBucketPolicy::send_response()
6897 {
6898 if (op_ret) {
6899 set_req_state_err(s, op_ret);
6900 }
6901 dump_errno(s);
6902 end_header(s);
6903 }
6904
6905 int RGWPutBucketPolicy::verify_permission()
6906 {
6907 if (!verify_bucket_permission(s, rgw::IAM::s3PutBucketPolicy)) {
6908 return -EACCES;
6909 }
6910
6911 return 0;
6912 }
6913
6914 int RGWPutBucketPolicy::get_params()
6915 {
6916 const auto max_size = s->cct->_conf->rgw_max_put_param_size;
6917 // At some point when I have more time I want to make a version of
6918 // rgw_rest_read_all_input that doesn't use malloc.
6919 op_ret = rgw_rest_read_all_input(s, &data, &len, max_size, false);
6920 // And throws exceptions.
6921 return op_ret;
6922 }
6923
6924 void RGWPutBucketPolicy::execute()
6925 {
6926 op_ret = get_params();
6927 if (op_ret < 0) {
6928 return;
6929 }
6930
6931 bufferlist in_data = bufferlist::static_from_mem(data, len);
6932
6933 if (!store->is_meta_master()) {
6934 op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
6935 if (op_ret < 0) {
6936 ldout(s->cct, 20) << "forward_request_to_master returned ret=" << op_ret << dendl;
6937 return;
6938 }
6939 }
6940
6941 try {
6942 const Policy p(s->cct, s->bucket_tenant, in_data);
6943 op_ret = retry_raced_bucket_write(store, s, [&p, this] {
6944 auto attrs = s->bucket_attrs;
6945 attrs[RGW_ATTR_IAM_POLICY].clear();
6946 attrs[RGW_ATTR_IAM_POLICY].append(p.text);
6947 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
6948 &s->bucket_info.objv_tracker);
6949 return op_ret;
6950 });
6951 } catch (rgw::IAM::PolicyParseException& e) {
6952 ldout(s->cct, 20) << "failed to parse policy: " << e.what() << dendl;
6953 op_ret = -EINVAL;
6954 }
6955 }
6956
6957 void RGWGetBucketPolicy::send_response()
6958 {
6959 if (op_ret) {
6960 set_req_state_err(s, op_ret);
6961 }
6962 dump_errno(s);
6963 end_header(s, this, "application/json");
6964 dump_body(s, policy);
6965 }
6966
6967 int RGWGetBucketPolicy::verify_permission()
6968 {
6969 if (!verify_bucket_permission(s, rgw::IAM::s3GetBucketPolicy)) {
6970 return -EACCES;
6971 }
6972
6973 return 0;
6974 }
6975
6976 void RGWGetBucketPolicy::execute()
6977 {
6978 auto attrs = s->bucket_attrs;
6979 map<string, bufferlist>::iterator aiter = attrs.find(RGW_ATTR_IAM_POLICY);
6980 if (aiter == attrs.end()) {
6981 ldout(s->cct, 0) << __func__ << " can't find bucket IAM POLICY attr"
6982 << " bucket_name = " << s->bucket_name << dendl;
6983 op_ret = -ERR_NO_SUCH_BUCKET_POLICY;
6984 s->err.message = "The bucket policy does not exist";
6985 return;
6986 } else {
6987 policy = attrs[RGW_ATTR_IAM_POLICY];
6988
6989 if (policy.length() == 0) {
6990 ldout(s->cct, 10) << "The bucket policy does not exist, bucket: " << s->bucket_name << dendl;
6991 op_ret = -ERR_NO_SUCH_BUCKET_POLICY;
6992 s->err.message = "The bucket policy does not exist";
6993 return;
6994 }
6995 }
6996 }
6997
6998 void RGWDeleteBucketPolicy::send_response()
6999 {
7000 if (op_ret) {
7001 set_req_state_err(s, op_ret);
7002 }
7003 dump_errno(s);
7004 end_header(s);
7005 }
7006
7007 int RGWDeleteBucketPolicy::verify_permission()
7008 {
7009 if (!verify_bucket_permission(s, rgw::IAM::s3DeleteBucketPolicy)) {
7010 return -EACCES;
7011 }
7012
7013 return 0;
7014 }
7015
7016 void RGWDeleteBucketPolicy::execute()
7017 {
7018 op_ret = retry_raced_bucket_write(store, s, [this] {
7019 auto attrs = s->bucket_attrs;
7020 attrs.erase(RGW_ATTR_IAM_POLICY);
7021 op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
7022 &s->bucket_info.objv_tracker);
7023 return op_ret;
7024 });
7025 }
7026
7027 void RGWGetClusterStat::execute()
7028 {
7029 op_ret = this->store->get_rados_handle()->cluster_stat(stats_op);
7030 }
7031
7032